pymoo 0.6.1.5.dev0__cp313-cp313-macosx_10_13_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pymoo might be problematic. Click here for more details.
- pymoo/__init__.py +3 -0
- pymoo/algorithms/__init__.py +0 -0
- pymoo/algorithms/base/__init__.py +0 -0
- pymoo/algorithms/base/bracket.py +38 -0
- pymoo/algorithms/base/genetic.py +109 -0
- pymoo/algorithms/base/line.py +62 -0
- pymoo/algorithms/base/local.py +39 -0
- pymoo/algorithms/base/meta.py +79 -0
- pymoo/algorithms/hyperparameters.py +89 -0
- pymoo/algorithms/moo/__init__.py +0 -0
- pymoo/algorithms/moo/age.py +310 -0
- pymoo/algorithms/moo/age2.py +194 -0
- pymoo/algorithms/moo/ctaea.py +298 -0
- pymoo/algorithms/moo/dnsga2.py +76 -0
- pymoo/algorithms/moo/kgb.py +446 -0
- pymoo/algorithms/moo/moead.py +183 -0
- pymoo/algorithms/moo/nsga2.py +113 -0
- pymoo/algorithms/moo/nsga3.py +358 -0
- pymoo/algorithms/moo/pinsga2.py +370 -0
- pymoo/algorithms/moo/rnsga2.py +188 -0
- pymoo/algorithms/moo/rnsga3.py +246 -0
- pymoo/algorithms/moo/rvea.py +214 -0
- pymoo/algorithms/moo/sms.py +195 -0
- pymoo/algorithms/moo/spea2.py +190 -0
- pymoo/algorithms/moo/unsga3.py +47 -0
- pymoo/algorithms/soo/__init__.py +0 -0
- pymoo/algorithms/soo/convex/__init__.py +0 -0
- pymoo/algorithms/soo/nonconvex/__init__.py +0 -0
- pymoo/algorithms/soo/nonconvex/brkga.py +161 -0
- pymoo/algorithms/soo/nonconvex/cmaes.py +554 -0
- pymoo/algorithms/soo/nonconvex/de.py +279 -0
- pymoo/algorithms/soo/nonconvex/direct.py +149 -0
- pymoo/algorithms/soo/nonconvex/es.py +203 -0
- pymoo/algorithms/soo/nonconvex/g3pcx.py +94 -0
- pymoo/algorithms/soo/nonconvex/ga.py +93 -0
- pymoo/algorithms/soo/nonconvex/ga_niching.py +223 -0
- pymoo/algorithms/soo/nonconvex/isres.py +74 -0
- pymoo/algorithms/soo/nonconvex/nelder.py +251 -0
- pymoo/algorithms/soo/nonconvex/optuna.py +80 -0
- pymoo/algorithms/soo/nonconvex/pattern.py +183 -0
- pymoo/algorithms/soo/nonconvex/pso.py +399 -0
- pymoo/algorithms/soo/nonconvex/pso_ep.py +297 -0
- pymoo/algorithms/soo/nonconvex/random_search.py +25 -0
- pymoo/algorithms/soo/nonconvex/sres.py +56 -0
- pymoo/algorithms/soo/univariate/__init__.py +0 -0
- pymoo/algorithms/soo/univariate/backtracking.py +59 -0
- pymoo/algorithms/soo/univariate/exp.py +46 -0
- pymoo/algorithms/soo/univariate/golden.py +65 -0
- pymoo/algorithms/soo/univariate/quadr_interp.py +81 -0
- pymoo/algorithms/soo/univariate/wolfe.py +163 -0
- pymoo/config.py +33 -0
- pymoo/constraints/__init__.py +3 -0
- pymoo/constraints/adaptive.py +62 -0
- pymoo/constraints/as_obj.py +56 -0
- pymoo/constraints/as_penalty.py +41 -0
- pymoo/constraints/eps.py +26 -0
- pymoo/constraints/from_bounds.py +36 -0
- pymoo/core/__init__.py +0 -0
- pymoo/core/algorithm.py +394 -0
- pymoo/core/callback.py +38 -0
- pymoo/core/crossover.py +77 -0
- pymoo/core/decision_making.py +102 -0
- pymoo/core/decomposition.py +76 -0
- pymoo/core/duplicate.py +163 -0
- pymoo/core/evaluator.py +116 -0
- pymoo/core/indicator.py +34 -0
- pymoo/core/individual.py +784 -0
- pymoo/core/infill.py +64 -0
- pymoo/core/initialization.py +42 -0
- pymoo/core/mating.py +39 -0
- pymoo/core/meta.py +21 -0
- pymoo/core/mixed.py +165 -0
- pymoo/core/mutation.py +44 -0
- pymoo/core/operator.py +40 -0
- pymoo/core/parameters.py +134 -0
- pymoo/core/plot.py +210 -0
- pymoo/core/population.py +180 -0
- pymoo/core/problem.py +460 -0
- pymoo/core/recorder.py +99 -0
- pymoo/core/repair.py +23 -0
- pymoo/core/replacement.py +96 -0
- pymoo/core/result.py +52 -0
- pymoo/core/sampling.py +43 -0
- pymoo/core/selection.py +61 -0
- pymoo/core/solution.py +10 -0
- pymoo/core/survival.py +103 -0
- pymoo/core/termination.py +70 -0
- pymoo/core/variable.py +399 -0
- pymoo/cython/__init__.py +0 -0
- pymoo/cython/calc_perpendicular_distance.cpython-313-darwin.so +0 -0
- pymoo/cython/calc_perpendicular_distance.pyx +67 -0
- pymoo/cython/decomposition.cpython-313-darwin.so +0 -0
- pymoo/cython/decomposition.pyx +165 -0
- pymoo/cython/hv.cpython-313-darwin.so +0 -0
- pymoo/cython/hv.pyx +18 -0
- pymoo/cython/info.cpython-313-darwin.so +0 -0
- pymoo/cython/info.pyx +5 -0
- pymoo/cython/mnn.cpython-313-darwin.so +0 -0
- pymoo/cython/mnn.pyx +273 -0
- pymoo/cython/non_dominated_sorting.cpython-313-darwin.so +0 -0
- pymoo/cython/non_dominated_sorting.pyx +645 -0
- pymoo/cython/pruning_cd.cpython-313-darwin.so +0 -0
- pymoo/cython/pruning_cd.pyx +197 -0
- pymoo/cython/stochastic_ranking.cpython-313-darwin.so +0 -0
- pymoo/cython/stochastic_ranking.pyx +49 -0
- pymoo/cython/utils.pxd +129 -0
- pymoo/cython/vendor/__init__.py +0 -0
- pymoo/cython/vendor/hypervolume.cpp +1621 -0
- pymoo/cython/vendor/hypervolume.h +63 -0
- pymoo/decomposition/__init__.py +0 -0
- pymoo/decomposition/aasf.py +24 -0
- pymoo/decomposition/asf.py +10 -0
- pymoo/decomposition/pbi.py +13 -0
- pymoo/decomposition/perp_dist.py +13 -0
- pymoo/decomposition/tchebicheff.py +11 -0
- pymoo/decomposition/util.py +13 -0
- pymoo/decomposition/weighted_sum.py +8 -0
- pymoo/docs.py +187 -0
- pymoo/experimental/__init__.py +0 -0
- pymoo/experimental/algorithms/__init__.py +0 -0
- pymoo/experimental/algorithms/gde3.py +57 -0
- pymoo/gradient/__init__.py +21 -0
- pymoo/gradient/automatic.py +57 -0
- pymoo/gradient/grad_autograd.py +105 -0
- pymoo/gradient/grad_complex.py +35 -0
- pymoo/gradient/grad_jax.py +51 -0
- pymoo/gradient/toolbox/__init__.py +6 -0
- pymoo/indicators/__init__.py +0 -0
- pymoo/indicators/distance_indicator.py +55 -0
- pymoo/indicators/gd.py +7 -0
- pymoo/indicators/gd_plus.py +7 -0
- pymoo/indicators/hv/__init__.py +63 -0
- pymoo/indicators/hv/exact.py +71 -0
- pymoo/indicators/hv/exact_2d.py +102 -0
- pymoo/indicators/hv/monte_carlo.py +74 -0
- pymoo/indicators/igd.py +7 -0
- pymoo/indicators/igd_plus.py +7 -0
- pymoo/indicators/kktpm.py +151 -0
- pymoo/indicators/migd.py +55 -0
- pymoo/indicators/rmetric.py +203 -0
- pymoo/indicators/spacing.py +52 -0
- pymoo/mcdm/__init__.py +0 -0
- pymoo/mcdm/compromise_programming.py +19 -0
- pymoo/mcdm/high_tradeoff.py +40 -0
- pymoo/mcdm/pseudo_weights.py +32 -0
- pymoo/operators/__init__.py +0 -0
- pymoo/operators/control.py +187 -0
- pymoo/operators/crossover/__init__.py +0 -0
- pymoo/operators/crossover/binx.py +45 -0
- pymoo/operators/crossover/dex.py +122 -0
- pymoo/operators/crossover/erx.py +162 -0
- pymoo/operators/crossover/expx.py +51 -0
- pymoo/operators/crossover/hux.py +37 -0
- pymoo/operators/crossover/nox.py +13 -0
- pymoo/operators/crossover/ox.py +84 -0
- pymoo/operators/crossover/pcx.py +82 -0
- pymoo/operators/crossover/pntx.py +49 -0
- pymoo/operators/crossover/sbx.py +125 -0
- pymoo/operators/crossover/spx.py +5 -0
- pymoo/operators/crossover/ux.py +20 -0
- pymoo/operators/mutation/__init__.py +0 -0
- pymoo/operators/mutation/bitflip.py +17 -0
- pymoo/operators/mutation/gauss.py +58 -0
- pymoo/operators/mutation/inversion.py +42 -0
- pymoo/operators/mutation/nom.py +7 -0
- pymoo/operators/mutation/pm.py +94 -0
- pymoo/operators/mutation/rm.py +23 -0
- pymoo/operators/repair/__init__.py +0 -0
- pymoo/operators/repair/bounce_back.py +32 -0
- pymoo/operators/repair/bounds_repair.py +95 -0
- pymoo/operators/repair/inverse_penalty.py +89 -0
- pymoo/operators/repair/rounding.py +18 -0
- pymoo/operators/repair/to_bound.py +31 -0
- pymoo/operators/repair/vtype.py +11 -0
- pymoo/operators/sampling/__init__.py +0 -0
- pymoo/operators/sampling/lhs.py +73 -0
- pymoo/operators/sampling/rnd.py +50 -0
- pymoo/operators/selection/__init__.py +0 -0
- pymoo/operators/selection/rnd.py +72 -0
- pymoo/operators/selection/tournament.py +76 -0
- pymoo/operators/survival/__init__.py +0 -0
- pymoo/operators/survival/rank_and_crowding/__init__.py +1 -0
- pymoo/operators/survival/rank_and_crowding/classes.py +209 -0
- pymoo/operators/survival/rank_and_crowding/metrics.py +208 -0
- pymoo/optimize.py +72 -0
- pymoo/problems/__init__.py +157 -0
- pymoo/problems/dyn.py +47 -0
- pymoo/problems/dynamic/__init__.py +0 -0
- pymoo/problems/dynamic/cec2015.py +108 -0
- pymoo/problems/dynamic/df.py +452 -0
- pymoo/problems/dynamic/misc.py +167 -0
- pymoo/problems/functional.py +48 -0
- pymoo/problems/many/__init__.py +5 -0
- pymoo/problems/many/cdtlz.py +159 -0
- pymoo/problems/many/dcdtlz.py +88 -0
- pymoo/problems/many/dtlz.py +264 -0
- pymoo/problems/many/wfg.py +550 -0
- pymoo/problems/multi/__init__.py +14 -0
- pymoo/problems/multi/bnh.py +34 -0
- pymoo/problems/multi/carside.py +48 -0
- pymoo/problems/multi/clutch.py +104 -0
- pymoo/problems/multi/csi.py +55 -0
- pymoo/problems/multi/ctp.py +198 -0
- pymoo/problems/multi/dascmop.py +213 -0
- pymoo/problems/multi/kursawe.py +25 -0
- pymoo/problems/multi/modact.py +68 -0
- pymoo/problems/multi/mw.py +400 -0
- pymoo/problems/multi/omnitest.py +48 -0
- pymoo/problems/multi/osy.py +32 -0
- pymoo/problems/multi/srn.py +28 -0
- pymoo/problems/multi/sympart.py +94 -0
- pymoo/problems/multi/tnk.py +24 -0
- pymoo/problems/multi/truss2d.py +83 -0
- pymoo/problems/multi/welded_beam.py +41 -0
- pymoo/problems/multi/wrm.py +36 -0
- pymoo/problems/multi/zdt.py +151 -0
- pymoo/problems/multi_to_single.py +22 -0
- pymoo/problems/single/__init__.py +12 -0
- pymoo/problems/single/ackley.py +24 -0
- pymoo/problems/single/cantilevered_beam.py +34 -0
- pymoo/problems/single/flowshop_scheduling.py +112 -0
- pymoo/problems/single/g.py +874 -0
- pymoo/problems/single/griewank.py +18 -0
- pymoo/problems/single/himmelblau.py +15 -0
- pymoo/problems/single/knapsack.py +48 -0
- pymoo/problems/single/mopta08.py +26 -0
- pymoo/problems/single/multimodal.py +20 -0
- pymoo/problems/single/pressure_vessel.py +30 -0
- pymoo/problems/single/rastrigin.py +20 -0
- pymoo/problems/single/rosenbrock.py +22 -0
- pymoo/problems/single/schwefel.py +18 -0
- pymoo/problems/single/simple.py +13 -0
- pymoo/problems/single/sphere.py +19 -0
- pymoo/problems/single/traveling_salesman.py +79 -0
- pymoo/problems/single/zakharov.py +19 -0
- pymoo/problems/static.py +14 -0
- pymoo/problems/util.py +42 -0
- pymoo/problems/zero_to_one.py +27 -0
- pymoo/termination/__init__.py +23 -0
- pymoo/termination/collection.py +12 -0
- pymoo/termination/cv.py +48 -0
- pymoo/termination/default.py +45 -0
- pymoo/termination/delta.py +64 -0
- pymoo/termination/fmin.py +16 -0
- pymoo/termination/ftol.py +144 -0
- pymoo/termination/indicator.py +49 -0
- pymoo/termination/max_eval.py +14 -0
- pymoo/termination/max_gen.py +15 -0
- pymoo/termination/max_time.py +20 -0
- pymoo/termination/robust.py +34 -0
- pymoo/termination/xtol.py +33 -0
- pymoo/util/__init__.py +0 -0
- pymoo/util/archive.py +150 -0
- pymoo/util/cache.py +29 -0
- pymoo/util/clearing.py +82 -0
- pymoo/util/display/__init__.py +0 -0
- pymoo/util/display/column.py +52 -0
- pymoo/util/display/display.py +34 -0
- pymoo/util/display/multi.py +96 -0
- pymoo/util/display/output.py +53 -0
- pymoo/util/display/progress.py +54 -0
- pymoo/util/display/single.py +67 -0
- pymoo/util/dominator.py +67 -0
- pymoo/util/function_loader.py +129 -0
- pymoo/util/hv.py +23 -0
- pymoo/util/matlab_engine.py +39 -0
- pymoo/util/misc.py +460 -0
- pymoo/util/mnn.py +70 -0
- pymoo/util/nds/__init__.py +0 -0
- pymoo/util/nds/dominance_degree_non_dominated_sort.py +159 -0
- pymoo/util/nds/efficient_non_dominated_sort.py +152 -0
- pymoo/util/nds/fast_non_dominated_sort.py +70 -0
- pymoo/util/nds/naive_non_dominated_sort.py +36 -0
- pymoo/util/nds/non_dominated_sorting.py +67 -0
- pymoo/util/nds/tree_based_non_dominated_sort.py +133 -0
- pymoo/util/normalization.py +312 -0
- pymoo/util/optimum.py +42 -0
- pymoo/util/plotting.py +177 -0
- pymoo/util/pruning_cd.py +89 -0
- pymoo/util/randomized_argsort.py +60 -0
- pymoo/util/ref_dirs/__init__.py +24 -0
- pymoo/util/ref_dirs/construction.py +88 -0
- pymoo/util/ref_dirs/das_dennis.py +52 -0
- pymoo/util/ref_dirs/energy.py +319 -0
- pymoo/util/ref_dirs/energy_layer.py +119 -0
- pymoo/util/ref_dirs/genetic_algorithm.py +63 -0
- pymoo/util/ref_dirs/incremental.py +68 -0
- pymoo/util/ref_dirs/misc.py +128 -0
- pymoo/util/ref_dirs/optimizer.py +59 -0
- pymoo/util/ref_dirs/performance.py +162 -0
- pymoo/util/ref_dirs/reduction.py +85 -0
- pymoo/util/ref_dirs/sample_and_map.py +24 -0
- pymoo/util/reference_direction.py +260 -0
- pymoo/util/remote.py +55 -0
- pymoo/util/roulette.py +27 -0
- pymoo/util/running_metric.py +128 -0
- pymoo/util/sliding_window.py +25 -0
- pymoo/util/stochastic_ranking.py +32 -0
- pymoo/util/value_functions.py +719 -0
- pymoo/util/vectors.py +40 -0
- pymoo/util/vf_dominator.py +99 -0
- pymoo/vendor/__init__.py +0 -0
- pymoo/vendor/cec2018.py +398 -0
- pymoo/vendor/gta.py +617 -0
- pymoo/vendor/hv.py +267 -0
- pymoo/vendor/vendor_cmaes.py +412 -0
- pymoo/vendor/vendor_coco.py +81 -0
- pymoo/vendor/vendor_scipy.py +232 -0
- pymoo/version.py +1 -0
- pymoo/visualization/__init__.py +8 -0
- pymoo/visualization/fitness_landscape.py +127 -0
- pymoo/visualization/heatmap.py +123 -0
- pymoo/visualization/pcp.py +120 -0
- pymoo/visualization/petal.py +91 -0
- pymoo/visualization/radar.py +108 -0
- pymoo/visualization/radviz.py +68 -0
- pymoo/visualization/scatter.py +150 -0
- pymoo/visualization/star_coordinate.py +75 -0
- pymoo/visualization/util.py +123 -0
- pymoo/visualization/video/__init__.py +0 -0
- pymoo/visualization/video/callback_video.py +82 -0
- pymoo/visualization/video/one_var_one_obj.py +57 -0
- pymoo/visualization/video/two_var_one_obj.py +62 -0
- pymoo-0.6.1.5.dev0.dist-info/METADATA +187 -0
- pymoo-0.6.1.5.dev0.dist-info/RECORD +328 -0
- pymoo-0.6.1.5.dev0.dist-info/WHEEL +6 -0
- pymoo-0.6.1.5.dev0.dist-info/licenses/LICENSE +191 -0
- pymoo-0.6.1.5.dev0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,446 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import random
|
|
3
|
+
import json
|
|
4
|
+
from pymoo.algorithms.moo.nsga2 import NSGA2
|
|
5
|
+
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
|
|
6
|
+
from pymoo.core.population import Population
|
|
7
|
+
try:
|
|
8
|
+
from sklearn.naive_bayes import GaussianNB
|
|
9
|
+
except:
|
|
10
|
+
raise "Please install sklearn for KGB: pip install scikit-learn"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def euclidean_distance(a, b):
|
|
14
|
+
a = np.array(a)
|
|
15
|
+
b = np.array(b)
|
|
16
|
+
return np.sqrt(np.sum((a - b) ** 2))
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class KGB(NSGA2):
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
perc_detect_change=0.1,
|
|
23
|
+
perc_diversity=0.3,
|
|
24
|
+
c_size=13,
|
|
25
|
+
eps=0.0,
|
|
26
|
+
ps={},
|
|
27
|
+
perturb_dev=0.1,
|
|
28
|
+
save_ps=False,
|
|
29
|
+
**kwargs,
|
|
30
|
+
):
|
|
31
|
+
|
|
32
|
+
super().__init__(**kwargs)
|
|
33
|
+
self.PERTURB_DEV = perturb_dev
|
|
34
|
+
self.PERC_DIVERSITY = perc_diversity
|
|
35
|
+
self.PERC_DETECT_CHANGE = perc_detect_change
|
|
36
|
+
self.EPS = eps
|
|
37
|
+
self.save_ps = save_ps
|
|
38
|
+
|
|
39
|
+
self.C_SIZE = c_size
|
|
40
|
+
self.ps = ps
|
|
41
|
+
self.nr_rand_solutions = 50 * self.pop_size
|
|
42
|
+
self.t = 0
|
|
43
|
+
|
|
44
|
+
self.rng = np.random.RandomState(self.seed)
|
|
45
|
+
random.seed(self.seed)
|
|
46
|
+
|
|
47
|
+
def setup(self, problem, **kwargs):
|
|
48
|
+
"""
|
|
49
|
+
Set up the KGB-DMOEA algorithm.
|
|
50
|
+
:param problem: The optimization problem instance
|
|
51
|
+
:param kwargs: Additional keyword arguments
|
|
52
|
+
:return: The result of the superclass setup method
|
|
53
|
+
"""
|
|
54
|
+
assert (
|
|
55
|
+
not problem.has_constraints()
|
|
56
|
+
), "KGB-DMOEA only works for unconstrained problems."
|
|
57
|
+
return super().setup(problem, **kwargs)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def knowledge_reconstruction_examination(self):
|
|
61
|
+
"""
|
|
62
|
+
Perform the knowledge reconstruction examination.
|
|
63
|
+
:return: Tuple containing the useful population, useless population, and cluster centroids
|
|
64
|
+
"""
|
|
65
|
+
clusters = self.ps # set historical PS set as clusters
|
|
66
|
+
Nc = self.C_SIZE # set final nr of clusters
|
|
67
|
+
size = len(self.ps) # set size iteration to length of cluster
|
|
68
|
+
run_counter = 0 # counter variable to give unique key
|
|
69
|
+
|
|
70
|
+
# while there are still clusters to be condensed
|
|
71
|
+
while size > Nc:
|
|
72
|
+
|
|
73
|
+
counter = 0
|
|
74
|
+
min_distance = None
|
|
75
|
+
min_distance_index = []
|
|
76
|
+
|
|
77
|
+
# get clusters that are closest to each other by calculating the euclidean distance
|
|
78
|
+
for keys_i in clusters.keys():
|
|
79
|
+
for keys_j in clusters.keys():
|
|
80
|
+
if (
|
|
81
|
+
clusters[keys_i]["solutions"]
|
|
82
|
+
is not clusters[keys_j]["solutions"]
|
|
83
|
+
):
|
|
84
|
+
|
|
85
|
+
dst = euclidean_distance(
|
|
86
|
+
clusters[keys_i]["centroid"],
|
|
87
|
+
clusters[keys_j]["centroid"],
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
if min_distance == None:
|
|
91
|
+
min_distance = dst
|
|
92
|
+
min_distance_index = [keys_i, keys_j]
|
|
93
|
+
elif dst < min_distance:
|
|
94
|
+
min_distance = dst
|
|
95
|
+
|
|
96
|
+
min_distance_index = [keys_i, keys_j]
|
|
97
|
+
|
|
98
|
+
counter += 1
|
|
99
|
+
|
|
100
|
+
# merge closest clusters
|
|
101
|
+
for solution in clusters[min_distance_index[1]]["solutions"]:
|
|
102
|
+
clusters[min_distance_index[0]]["solutions"].append(solution)
|
|
103
|
+
|
|
104
|
+
# calculate new centroid for merged cluster
|
|
105
|
+
clusters[min_distance_index[0]][
|
|
106
|
+
"centroid"
|
|
107
|
+
] = self.calculate_cluster_centroid(
|
|
108
|
+
clusters[min_distance_index[0]]["solutions"]
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# remove cluster that was merged
|
|
112
|
+
del clusters[min_distance_index[1]]
|
|
113
|
+
|
|
114
|
+
size -= 1
|
|
115
|
+
run_counter += 1
|
|
116
|
+
|
|
117
|
+
c = [] # list of centroids
|
|
118
|
+
pop_useful = []
|
|
119
|
+
pop_useless = []
|
|
120
|
+
|
|
121
|
+
# get centroids of clusters
|
|
122
|
+
for key in clusters.keys():
|
|
123
|
+
c.append(clusters[key]["centroid"])
|
|
124
|
+
|
|
125
|
+
# create pymoo population objected to evaluate centroid solutions
|
|
126
|
+
centroid_pop = Population.new("X", c)
|
|
127
|
+
|
|
128
|
+
# evaluate centroids
|
|
129
|
+
self.evaluator.eval(self.problem, centroid_pop)
|
|
130
|
+
|
|
131
|
+
# do non-dominated sorting on centroid solutions
|
|
132
|
+
ranking = NonDominatedSorting().do(centroid_pop.get("F"), return_rank=True)[-1]
|
|
133
|
+
|
|
134
|
+
# add the individuals from the clusters with the best objective values to the useful population the rest is useless :(
|
|
135
|
+
|
|
136
|
+
for idx, rank in enumerate(ranking):
|
|
137
|
+
if rank == 0:
|
|
138
|
+
for key in clusters.keys():
|
|
139
|
+
if centroid_pop[idx].X == clusters[key]["centroid"]:
|
|
140
|
+
for cluster_individual in clusters[key]["solutions"]:
|
|
141
|
+
pop_useful.append(cluster_individual)
|
|
142
|
+
else:
|
|
143
|
+
for key in clusters.keys():
|
|
144
|
+
if centroid_pop[idx].X == clusters[key]["centroid"]:
|
|
145
|
+
for cluster_individual in clusters[key]["solutions"]:
|
|
146
|
+
pop_useless.append(cluster_individual)
|
|
147
|
+
|
|
148
|
+
# return useful and useless population and the centroid solutions
|
|
149
|
+
return pop_useful, pop_useless, c
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def naive_bayesian_classifier(self, pop_useful, pop_useless):
|
|
153
|
+
"""
|
|
154
|
+
Train a naive Bayesian classifier using the useful and useless populations.
|
|
155
|
+
:param pop_useful: Useful population
|
|
156
|
+
:param pop_useless: Useless population
|
|
157
|
+
:return: Trained GaussianNB classifier
|
|
158
|
+
"""
|
|
159
|
+
labeled_useful_solutions = []
|
|
160
|
+
labeled_useless_solutions = []
|
|
161
|
+
|
|
162
|
+
# add labels to solutions
|
|
163
|
+
for individual in pop_useful:
|
|
164
|
+
labeled_useful_solutions.append((individual, +1))
|
|
165
|
+
|
|
166
|
+
for individual in pop_useless:
|
|
167
|
+
labeled_useless_solutions.append((individual, -1))
|
|
168
|
+
|
|
169
|
+
x_train = []
|
|
170
|
+
y_train = []
|
|
171
|
+
|
|
172
|
+
for i in range(len(labeled_useful_solutions)):
|
|
173
|
+
x_train.append(labeled_useful_solutions[i][0])
|
|
174
|
+
y_train.append(labeled_useful_solutions[i][1])
|
|
175
|
+
|
|
176
|
+
for i in range(len(labeled_useless_solutions)):
|
|
177
|
+
x_train.append(labeled_useless_solutions[i][0])
|
|
178
|
+
y_train.append(labeled_useless_solutions[i][1])
|
|
179
|
+
|
|
180
|
+
x_train = np.asarray(x_train)
|
|
181
|
+
y_train = np.asarray(y_train)
|
|
182
|
+
|
|
183
|
+
# fit the naive bayesian classifier with the training data
|
|
184
|
+
model = GaussianNB()
|
|
185
|
+
model.fit(x_train, y_train)
|
|
186
|
+
|
|
187
|
+
return model
|
|
188
|
+
|
|
189
|
+
def add_to_ps(self):
|
|
190
|
+
"""
|
|
191
|
+
Add the current Pareto optimal set (POS) to the Pareto set (PS) with individual keys.
|
|
192
|
+
"""
|
|
193
|
+
|
|
194
|
+
PS_counter = 0
|
|
195
|
+
|
|
196
|
+
for individual in self.opt:
|
|
197
|
+
|
|
198
|
+
if isinstance(individual.X, list):
|
|
199
|
+
individual.X = np.asarray(individual.X)
|
|
200
|
+
|
|
201
|
+
centroid = self.calculate_cluster_centroid(individual.X)
|
|
202
|
+
|
|
203
|
+
self.ps[str(PS_counter) + "-" + str(self.t)] = {
|
|
204
|
+
"solutions": [individual.X.tolist()],
|
|
205
|
+
"centroid": centroid,
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
PS_counter += 1
|
|
209
|
+
|
|
210
|
+
def predicted_population(self, X_test, Y_test):
|
|
211
|
+
"""
|
|
212
|
+
Create a predicted population from the test set with positive labels.
|
|
213
|
+
:param X_test: Test set of features
|
|
214
|
+
:param Y_test: Test set of labels
|
|
215
|
+
:return: Predicted population
|
|
216
|
+
"""
|
|
217
|
+
predicted_pop = []
|
|
218
|
+
for i in range(len(Y_test)):
|
|
219
|
+
if Y_test[i] == 1:
|
|
220
|
+
predicted_pop.append(X_test[i])
|
|
221
|
+
return predicted_pop
|
|
222
|
+
|
|
223
|
+
def calculate_cluster_centroid(self, solution_cluster):
|
|
224
|
+
"""
|
|
225
|
+
Calculate the centroid for a given cluster of solutions.
|
|
226
|
+
:param solution_cluster: List of solutions in the cluster
|
|
227
|
+
:return: Cluster centroid
|
|
228
|
+
"""
|
|
229
|
+
# Get number of variable shape
|
|
230
|
+
try:
|
|
231
|
+
n_vars = len(solution_cluster[0])
|
|
232
|
+
except TypeError:
|
|
233
|
+
solution_cluster = np.array(solution_cluster)
|
|
234
|
+
return solution_cluster.tolist()
|
|
235
|
+
|
|
236
|
+
# TODO: this is lazy garbage fix whats coming in
|
|
237
|
+
cluster = []
|
|
238
|
+
for i in range(len(solution_cluster)):
|
|
239
|
+
# cluster.append(solution_cluster[i].tolist())
|
|
240
|
+
cluster.append(solution_cluster[i])
|
|
241
|
+
solution_cluster = np.asarray(cluster)
|
|
242
|
+
|
|
243
|
+
# Get number of solutions
|
|
244
|
+
length = solution_cluster.shape[0]
|
|
245
|
+
|
|
246
|
+
centroid_points = []
|
|
247
|
+
|
|
248
|
+
# calculate centroid for each variable, by taking mean of every variable of cluster
|
|
249
|
+
for i in range(n_vars):
|
|
250
|
+
# calculate sum over cluster
|
|
251
|
+
centroid_points.append(np.sum(solution_cluster[:, i]))
|
|
252
|
+
|
|
253
|
+
return [x / length for x in centroid_points]
|
|
254
|
+
|
|
255
|
+
def check_boundaries(self, pop):
|
|
256
|
+
"""
|
|
257
|
+
Check and fix the boundaries of the given population.
|
|
258
|
+
:param pop: Population to check and fix boundaries
|
|
259
|
+
:return: Population with corrected boundaries
|
|
260
|
+
"""
|
|
261
|
+
# check whether numpy array or pymoo population is given
|
|
262
|
+
if isinstance(pop, Population):
|
|
263
|
+
pop = pop.get("X")
|
|
264
|
+
|
|
265
|
+
# check if any solution is outside the bounds
|
|
266
|
+
for individual in pop:
|
|
267
|
+
for i in range(len(individual)):
|
|
268
|
+
if individual[i] > self.problem.xu[i]:
|
|
269
|
+
individual[i] = self.problem.xu[i]
|
|
270
|
+
elif individual[i] < self.problem.xl[i]:
|
|
271
|
+
individual[i] = self.problem.xl[i]
|
|
272
|
+
return pop
|
|
273
|
+
|
|
274
|
+
def random_strategy(self, N_r):
|
|
275
|
+
"""
|
|
276
|
+
Generate a random population within the problem boundaries.
|
|
277
|
+
:param N_r: Number of random solutions to generate
|
|
278
|
+
:return: Randomly generated population
|
|
279
|
+
"""
|
|
280
|
+
# generate a random population of size N_r
|
|
281
|
+
# TODO: Check boundaries
|
|
282
|
+
random_pop = np.random.random((N_r, self.problem.n_var))
|
|
283
|
+
|
|
284
|
+
# check if any solution is outside the bounds
|
|
285
|
+
for individual in random_pop:
|
|
286
|
+
for i in range(len(individual)):
|
|
287
|
+
if individual[i] > self.problem.xu[i]:
|
|
288
|
+
individual[i] = self.problem.xu[i]
|
|
289
|
+
elif individual[i] < self.problem.xl[i]:
|
|
290
|
+
individual[i] = self.problem.xl[i]
|
|
291
|
+
|
|
292
|
+
return random_pop
|
|
293
|
+
|
|
294
|
+
def diversify_population(self, pop):
|
|
295
|
+
"""
|
|
296
|
+
Introduce diversity in the population by replacing a percentage of individuals.
|
|
297
|
+
:param pop: Population to diversify
|
|
298
|
+
:return: Diversified population
|
|
299
|
+
"""
|
|
300
|
+
# find indices to be replaced (introduce diversity)
|
|
301
|
+
I = np.where(np.random.random(len(pop)) < self.PERC_DIVERSITY)[0]
|
|
302
|
+
# replace with randomly sampled individuals
|
|
303
|
+
pop[I] = self.initialization.sampling(self.problem, len(I))
|
|
304
|
+
return pop
|
|
305
|
+
|
|
306
|
+
def _advance(self, **kwargs):
|
|
307
|
+
"""
|
|
308
|
+
Advance the optimization algorithm by one iteration.
|
|
309
|
+
"""
|
|
310
|
+
pop = self.pop
|
|
311
|
+
X, F = pop.get("X", "F")
|
|
312
|
+
|
|
313
|
+
# the number of solutions to sample from the population to detect the change
|
|
314
|
+
n_samples = int(np.ceil(len(pop) * self.PERC_DETECT_CHANGE))
|
|
315
|
+
|
|
316
|
+
# choose randomly some individuals of the current population to test if there was a change
|
|
317
|
+
I = np.random.choice(np.arange(len(pop)), size=n_samples)
|
|
318
|
+
samples = self.evaluator.eval(self.problem, Population.new(X=X[I]))
|
|
319
|
+
|
|
320
|
+
# calculate the differences between the old and newly evaluated pop
|
|
321
|
+
delta = ((samples.get("F") - F[I]) ** 2).mean()
|
|
322
|
+
|
|
323
|
+
# archive the current POS
|
|
324
|
+
self.add_to_ps()
|
|
325
|
+
|
|
326
|
+
# if there is an average deviation bigger than eps -> we have a change detected
|
|
327
|
+
change_detected = delta > self.EPS
|
|
328
|
+
|
|
329
|
+
if change_detected:
|
|
330
|
+
|
|
331
|
+
# increase t counter for unique key of PS
|
|
332
|
+
self.t += 1
|
|
333
|
+
|
|
334
|
+
# conduct knowledge reconstruction examination
|
|
335
|
+
pop_useful, pop_useless, c = self.knowledge_reconstruction_examination()
|
|
336
|
+
|
|
337
|
+
# Train a naive bayesian classifier
|
|
338
|
+
model = self.naive_bayesian_classifier(pop_useful, pop_useless)
|
|
339
|
+
|
|
340
|
+
# generate a lot of random solutions with the dimensions of problem decision space
|
|
341
|
+
X_test = self.random_strategy(self.nr_rand_solutions)
|
|
342
|
+
|
|
343
|
+
# introduce noise to vary previously useful solutions
|
|
344
|
+
noise = np.random.normal(0, self.PERTURB_DEV, self.problem.n_var)
|
|
345
|
+
noisy_useful_history = np.asarray(pop_useful) + noise
|
|
346
|
+
|
|
347
|
+
# check whether solutions are within bounds
|
|
348
|
+
noisy_useful_history = self.check_boundaries(noisy_useful_history)
|
|
349
|
+
|
|
350
|
+
# add noisy useful history to randomly generated solutions
|
|
351
|
+
X_test = np.vstack((X_test, noisy_useful_history))
|
|
352
|
+
|
|
353
|
+
# predict whether random solutions are useful or useless
|
|
354
|
+
Y_test = model.predict(X_test)
|
|
355
|
+
|
|
356
|
+
# create list of useful predicted solutions
|
|
357
|
+
predicted_pop = self.predicted_population(X_test, Y_test)
|
|
358
|
+
|
|
359
|
+
# ------ POPULATION GENERATION --------
|
|
360
|
+
# take a random sample from predicted pop and known useful pop
|
|
361
|
+
|
|
362
|
+
nr_sampled_pop_useful = 0
|
|
363
|
+
nr_random_filler_solutions = 0
|
|
364
|
+
|
|
365
|
+
if len(predicted_pop) >= self.pop_size - self.C_SIZE:
|
|
366
|
+
init_pop = []
|
|
367
|
+
predicted_pop = random.sample(
|
|
368
|
+
predicted_pop, self.pop_size - self.C_SIZE
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
# add sampled solutions to init_pop
|
|
372
|
+
for solution in predicted_pop:
|
|
373
|
+
init_pop.append(solution)
|
|
374
|
+
|
|
375
|
+
# add cluster centroids to init_pop
|
|
376
|
+
for solution in c:
|
|
377
|
+
init_pop.append(np.asarray(solution))
|
|
378
|
+
|
|
379
|
+
else:
|
|
380
|
+
|
|
381
|
+
# if not enough predicted solutions are available, add all predicted solutions to init_pop
|
|
382
|
+
init_pop = []
|
|
383
|
+
|
|
384
|
+
for solution in predicted_pop:
|
|
385
|
+
init_pop.append(solution)
|
|
386
|
+
|
|
387
|
+
# add cluster centroids to init_pop
|
|
388
|
+
for solution in c:
|
|
389
|
+
init_pop.append(np.asarray(solution))
|
|
390
|
+
|
|
391
|
+
# if there are still not enough solutions in init_pop randomly sample previously useful solutions directly without noise to init_pop
|
|
392
|
+
if len(init_pop) < self.pop_size:
|
|
393
|
+
|
|
394
|
+
# fill up init_pop with randomly sampled solutions from pop_useful
|
|
395
|
+
if len(pop_useful) >= self.pop_size - len(init_pop):
|
|
396
|
+
|
|
397
|
+
nr_sampled_pop_useful = self.pop_size - len(init_pop)
|
|
398
|
+
|
|
399
|
+
init_pop = np.vstack(
|
|
400
|
+
(
|
|
401
|
+
init_pop,
|
|
402
|
+
random.sample(pop_useful, self.pop_size - len(init_pop)),
|
|
403
|
+
)
|
|
404
|
+
)
|
|
405
|
+
else:
|
|
406
|
+
# if not enough solutions are available, add all previously known useful solutions without noise to init_pop
|
|
407
|
+
for solution in pop_useful:
|
|
408
|
+
init_pop.append(solution)
|
|
409
|
+
|
|
410
|
+
nr_sampled_pop_useful = len(pop_useful)
|
|
411
|
+
|
|
412
|
+
# if there are still not enough solutions in init_pop generate random solutions with the dimensions of problem decision space
|
|
413
|
+
if len(init_pop) < self.pop_size:
|
|
414
|
+
|
|
415
|
+
nr_random_filler_solutions = self.pop_size - len(init_pop)
|
|
416
|
+
|
|
417
|
+
# fill up with random solutions
|
|
418
|
+
init_pop = np.vstack(
|
|
419
|
+
(init_pop, self.random_strategy(self.pop_size - len(init_pop)))
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
# recreate the current population without being evaluated
|
|
423
|
+
pop = Population.new(X=init_pop)
|
|
424
|
+
|
|
425
|
+
# reevaluate because we know there was a change
|
|
426
|
+
self.evaluator.eval(self.problem, pop)
|
|
427
|
+
|
|
428
|
+
# do a survival to recreate rank and crowding of all individuals
|
|
429
|
+
pop = self.survival.do(self.problem, pop, n_survive=len(pop))
|
|
430
|
+
|
|
431
|
+
# create the offsprings from the current population
|
|
432
|
+
off = self.mating.do(self.problem, pop, self.n_offsprings, algorithm=self)
|
|
433
|
+
self.evaluator.eval(self.problem, off)
|
|
434
|
+
|
|
435
|
+
# merge the parent population and offsprings
|
|
436
|
+
pop = Population.merge(pop, off)
|
|
437
|
+
|
|
438
|
+
# execute the survival to find the fittest solutions
|
|
439
|
+
self.pop = self.survival.do(
|
|
440
|
+
self.problem, pop, n_survive=self.pop_size, algorithm=self
|
|
441
|
+
)
|
|
442
|
+
|
|
443
|
+
# dump self.ps to file
|
|
444
|
+
if self.save_ps:
|
|
445
|
+
with open("ps.json", "w") as fp:
|
|
446
|
+
json.dump(self.ps, fp)
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy.spatial.distance import cdist
|
|
3
|
+
|
|
4
|
+
from pymoo.algorithms.base.genetic import GeneticAlgorithm
|
|
5
|
+
from pymoo.core.algorithm import LoopwiseAlgorithm
|
|
6
|
+
from pymoo.core.duplicate import NoDuplicateElimination
|
|
7
|
+
from pymoo.core.population import Population
|
|
8
|
+
from pymoo.core.selection import Selection
|
|
9
|
+
from pymoo.core.variable import Real, get
|
|
10
|
+
from pymoo.docs import parse_doc_string
|
|
11
|
+
from pymoo.operators.crossover.sbx import SBX
|
|
12
|
+
from pymoo.operators.mutation.pm import PM
|
|
13
|
+
from pymoo.operators.sampling.rnd import FloatRandomSampling
|
|
14
|
+
from pymoo.util.display.multi import MultiObjectiveOutput
|
|
15
|
+
from pymoo.util.reference_direction import default_ref_dirs
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class NeighborhoodSelection(Selection):
|
|
19
|
+
|
|
20
|
+
def __init__(self, prob=1.0) -> None:
|
|
21
|
+
super().__init__()
|
|
22
|
+
self.prob = Real(prob, bounds=(0.0, 1.0))
|
|
23
|
+
|
|
24
|
+
def _do(self, problem, pop, n_select, n_parents, neighbors=None, **kwargs):
|
|
25
|
+
assert n_select == len(neighbors)
|
|
26
|
+
P = np.full((n_select, n_parents), -1)
|
|
27
|
+
|
|
28
|
+
prob = get(self.prob, size=n_select)
|
|
29
|
+
|
|
30
|
+
for k in range(n_select):
|
|
31
|
+
if np.random.random() < prob[k]:
|
|
32
|
+
P[k] = np.random.choice(neighbors[k], n_parents, replace=False)
|
|
33
|
+
else:
|
|
34
|
+
P[k] = np.random.permutation(len(pop))[:n_parents]
|
|
35
|
+
|
|
36
|
+
return P
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
# =========================================================================================================
|
|
40
|
+
# Implementation
|
|
41
|
+
# =========================================================================================================
|
|
42
|
+
|
|
43
|
+
class MOEAD(LoopwiseAlgorithm, GeneticAlgorithm):
|
|
44
|
+
|
|
45
|
+
def __init__(self,
|
|
46
|
+
ref_dirs=None,
|
|
47
|
+
n_neighbors=20,
|
|
48
|
+
decomposition=None,
|
|
49
|
+
prob_neighbor_mating=0.9,
|
|
50
|
+
sampling=FloatRandomSampling(),
|
|
51
|
+
crossover=SBX(prob=1.0, eta=20),
|
|
52
|
+
mutation=PM(prob_var=None, eta=20),
|
|
53
|
+
output=MultiObjectiveOutput(),
|
|
54
|
+
**kwargs):
|
|
55
|
+
|
|
56
|
+
# reference directions used for MOEAD
|
|
57
|
+
self.ref_dirs = ref_dirs
|
|
58
|
+
|
|
59
|
+
# the decomposition metric used
|
|
60
|
+
self.decomposition = decomposition
|
|
61
|
+
|
|
62
|
+
# the number of neighbors considered during mating
|
|
63
|
+
self.n_neighbors = n_neighbors
|
|
64
|
+
|
|
65
|
+
self.neighbors = None
|
|
66
|
+
|
|
67
|
+
self.selection = NeighborhoodSelection(prob=prob_neighbor_mating)
|
|
68
|
+
|
|
69
|
+
super().__init__(pop_size=len(ref_dirs),
|
|
70
|
+
sampling=sampling,
|
|
71
|
+
crossover=crossover,
|
|
72
|
+
mutation=mutation,
|
|
73
|
+
eliminate_duplicates=NoDuplicateElimination(),
|
|
74
|
+
output=output,
|
|
75
|
+
advance_after_initialization=False,
|
|
76
|
+
**kwargs)
|
|
77
|
+
|
|
78
|
+
def _setup(self, problem, **kwargs):
|
|
79
|
+
assert not problem.has_constraints(), "This implementation of MOEAD does not support any constraints."
|
|
80
|
+
|
|
81
|
+
# if no reference directions have been provided get them and override the population size and other settings
|
|
82
|
+
if self.ref_dirs is None:
|
|
83
|
+
self.ref_dirs = default_ref_dirs(problem.n_obj)
|
|
84
|
+
self.pop_size = len(self.ref_dirs)
|
|
85
|
+
|
|
86
|
+
# neighbours includes the entry by itself intentionally for the survival method
|
|
87
|
+
self.neighbors = np.argsort(cdist(self.ref_dirs, self.ref_dirs), axis=1, kind='quicksort')[:, :self.n_neighbors]
|
|
88
|
+
|
|
89
|
+
# if the decomposition is not set yet, set the default
|
|
90
|
+
if self.decomposition is None:
|
|
91
|
+
self.decomposition = default_decomp(problem)
|
|
92
|
+
|
|
93
|
+
def _initialize_advance(self, infills=None, **kwargs):
|
|
94
|
+
super()._initialize_advance(infills, **kwargs)
|
|
95
|
+
self.ideal = np.min(self.pop.get("F"), axis=0)
|
|
96
|
+
|
|
97
|
+
def _next(self):
|
|
98
|
+
pop = self.pop
|
|
99
|
+
|
|
100
|
+
# iterate for each member of the population in random order
|
|
101
|
+
for k in np.random.permutation(len(pop)):
|
|
102
|
+
# get the parents using the neighborhood selection
|
|
103
|
+
P = self.selection.do(self.problem, pop, 1, self.mating.crossover.n_parents, neighbors=[self.neighbors[k]])
|
|
104
|
+
|
|
105
|
+
# perform a mating using the default operators - if more than one offspring just pick the first
|
|
106
|
+
off = np.random.choice(self.mating.do(self.problem, pop, 1, parents=P, n_max_iterations=1))
|
|
107
|
+
|
|
108
|
+
# evaluate the offspring
|
|
109
|
+
off = yield off
|
|
110
|
+
|
|
111
|
+
# update the ideal point
|
|
112
|
+
self.ideal = np.min(np.vstack([self.ideal, off.F]), axis=0)
|
|
113
|
+
|
|
114
|
+
# now actually do the replacement of the individual is better
|
|
115
|
+
self._replace(k, off)
|
|
116
|
+
|
|
117
|
+
def _replace(self, k, off):
|
|
118
|
+
pop = self.pop
|
|
119
|
+
|
|
120
|
+
# calculate the decomposed values for each neighbor
|
|
121
|
+
N = self.neighbors[k]
|
|
122
|
+
FV = self.decomposition.do(pop[N].get("F"), weights=self.ref_dirs[N, :], ideal_point=self.ideal)
|
|
123
|
+
off_FV = self.decomposition.do(off.F[None, :], weights=self.ref_dirs[N, :], ideal_point=self.ideal)
|
|
124
|
+
|
|
125
|
+
# this makes the algorithm to support constraints - not originally proposed though and not tested enough
|
|
126
|
+
# if self.problem.has_constraints():
|
|
127
|
+
# CV, off_CV = pop[N].get("CV")[:, 0], np.full(len(off_FV), off.CV)
|
|
128
|
+
# fmax = max(FV.max(), off_FV.max())
|
|
129
|
+
# FV, off_FV = parameter_less(FV, CV, fmax=fmax), parameter_less(off_FV, off_CV, fmax=fmax)
|
|
130
|
+
|
|
131
|
+
# get the absolute index in F where offspring is better than the current F (decomposed space)
|
|
132
|
+
I = np.where(off_FV < FV)[0]
|
|
133
|
+
pop[N[I]] = off
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
class ParallelMOEAD(MOEAD):
|
|
137
|
+
|
|
138
|
+
def __init__(self, ref_dirs, **kwargs):
|
|
139
|
+
super().__init__(ref_dirs, **kwargs)
|
|
140
|
+
self.indices = None
|
|
141
|
+
|
|
142
|
+
def _infill(self):
|
|
143
|
+
pop_size, cross_parents, cross_off = self.pop_size, self.mating.crossover.n_parents, self.mating.crossover.n_offsprings
|
|
144
|
+
|
|
145
|
+
# do the mating in a random order
|
|
146
|
+
indices = np.random.permutation(len(self.pop))[:self.n_offsprings]
|
|
147
|
+
|
|
148
|
+
# get the parents using the neighborhood selection
|
|
149
|
+
P = self.selection.do(self.problem, self.pop, self.n_offsprings, cross_parents,
|
|
150
|
+
neighbors=self.neighbors[indices])
|
|
151
|
+
|
|
152
|
+
# do not any duplicates elimination - thus this results in exactly pop_size * n_offsprings offsprings
|
|
153
|
+
off = self.mating.do(self.problem, self.pop, 1e12, n_max_iterations=1, parents=P)
|
|
154
|
+
|
|
155
|
+
# select a random offspring from each mating
|
|
156
|
+
off = Population.create(*[np.random.choice(pool) for pool in np.reshape(off, (self.n_offsprings, -1))])
|
|
157
|
+
|
|
158
|
+
# store the indices because of the neighborhood matching in advance
|
|
159
|
+
self.indices = indices
|
|
160
|
+
|
|
161
|
+
return off
|
|
162
|
+
|
|
163
|
+
def _advance(self, infills=None, **kwargs):
|
|
164
|
+
assert len(self.indices) == len(infills), "Number of infills must be equal to the one created beforehand."
|
|
165
|
+
|
|
166
|
+
# update the ideal point before starting to replace
|
|
167
|
+
self.ideal = np.min(np.vstack([self.ideal, infills.get("F")]), axis=0)
|
|
168
|
+
|
|
169
|
+
# now do the replacements as in the loop-wise version
|
|
170
|
+
for k, off in enumerate(infills):
|
|
171
|
+
self._replace(self.indices[k], off)
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def default_decomp(problem):
|
|
175
|
+
if problem.n_obj <= 2:
|
|
176
|
+
from pymoo.decomposition.tchebicheff import Tchebicheff
|
|
177
|
+
return Tchebicheff()
|
|
178
|
+
else:
|
|
179
|
+
from pymoo.decomposition.pbi import PBI
|
|
180
|
+
return PBI()
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
parse_doc_string(MOEAD.__init__)
|