pydmoo 0.0.18__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pydmoo/algorithms/base/__init__.py +20 -0
- pydmoo/algorithms/base/core/__init__.py +0 -0
- pydmoo/algorithms/base/core/algorithm.py +416 -0
- pydmoo/algorithms/base/core/genetic.py +129 -0
- pydmoo/algorithms/base/dmoo/__init__.py +0 -0
- pydmoo/algorithms/base/dmoo/dmoead.py +131 -0
- pydmoo/algorithms/base/dmoo/dmoeadde.py +131 -0
- pydmoo/algorithms/base/dmoo/dmopso.py +0 -0
- pydmoo/algorithms/base/dmoo/dnsga2.py +137 -0
- pydmoo/algorithms/base/moo/__init__.py +0 -0
- pydmoo/algorithms/base/moo/moead.py +199 -0
- pydmoo/algorithms/base/moo/moeadde.py +105 -0
- pydmoo/algorithms/base/moo/mopso.py +0 -0
- pydmoo/algorithms/base/moo/nsga2.py +122 -0
- pydmoo/algorithms/modern/__init__.py +94 -0
- pydmoo/algorithms/modern/moead_imkt.py +161 -0
- pydmoo/algorithms/modern/moead_imkt_igp.py +56 -0
- pydmoo/algorithms/modern/moead_imkt_lstm.py +109 -0
- pydmoo/algorithms/modern/moead_imkt_n.py +117 -0
- pydmoo/algorithms/modern/moead_imkt_n_igp.py +56 -0
- pydmoo/algorithms/modern/moead_imkt_n_lstm.py +111 -0
- pydmoo/algorithms/modern/moead_ktmm.py +112 -0
- pydmoo/algorithms/modern/moeadde_imkt.py +161 -0
- pydmoo/algorithms/modern/moeadde_imkt_clstm.py +223 -0
- pydmoo/algorithms/modern/moeadde_imkt_igp.py +56 -0
- pydmoo/algorithms/modern/moeadde_imkt_lstm.py +212 -0
- pydmoo/algorithms/modern/moeadde_imkt_n.py +117 -0
- pydmoo/algorithms/modern/moeadde_imkt_n_clstm.py +146 -0
- pydmoo/algorithms/modern/moeadde_imkt_n_igp.py +56 -0
- pydmoo/algorithms/modern/moeadde_imkt_n_lstm.py +114 -0
- pydmoo/algorithms/modern/moeadde_ktmm.py +112 -0
- pydmoo/algorithms/modern/nsga2_imkt.py +162 -0
- pydmoo/algorithms/modern/nsga2_imkt_clstm.py +223 -0
- pydmoo/algorithms/modern/nsga2_imkt_igp.py +56 -0
- pydmoo/algorithms/modern/nsga2_imkt_lstm.py +248 -0
- pydmoo/algorithms/modern/nsga2_imkt_n.py +117 -0
- pydmoo/algorithms/modern/nsga2_imkt_n_clstm.py +146 -0
- pydmoo/algorithms/modern/nsga2_imkt_n_igp.py +57 -0
- pydmoo/algorithms/modern/nsga2_imkt_n_lstm.py +154 -0
- pydmoo/algorithms/modern/nsga2_ktmm.py +112 -0
- pydmoo/algorithms/utils/__init__.py +0 -0
- pydmoo/algorithms/utils/utils.py +166 -0
- pydmoo/core/__init__.py +0 -0
- pydmoo/{response → core}/ar_model.py +4 -4
- pydmoo/{response → core}/bounds.py +35 -2
- pydmoo/core/distance.py +45 -0
- pydmoo/core/inverse.py +55 -0
- pydmoo/core/lstm/__init__.py +0 -0
- pydmoo/core/lstm/base.py +291 -0
- pydmoo/core/lstm/lstm.py +491 -0
- pydmoo/core/manifold.py +93 -0
- pydmoo/core/predictions.py +50 -0
- pydmoo/core/sample_gaussian.py +56 -0
- pydmoo/core/sample_uniform.py +63 -0
- pydmoo/{response/tca_model.py → core/transfer.py} +3 -3
- pydmoo/problems/__init__.py +53 -49
- pydmoo/problems/dyn.py +94 -13
- pydmoo/problems/dynamic/cec2015.py +10 -5
- pydmoo/problems/dynamic/df.py +6 -3
- pydmoo/problems/dynamic/gts.py +69 -34
- pydmoo/problems/real_world/__init__.py +0 -0
- pydmoo/problems/real_world/dsrp.py +168 -0
- pydmoo/problems/real_world/dwbdp.py +189 -0
- {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/METADATA +11 -10
- pydmoo-0.1.0.dist-info/RECORD +70 -0
- {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/WHEEL +1 -1
- pydmoo-0.0.18.dist-info/RECORD +0 -15
- /pydmoo/{response → algorithms}/__init__.py +0 -0
- {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
import time
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from pymoo.core.population import Population
|
|
5
|
+
from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
|
|
6
|
+
|
|
7
|
+
from pydmoo.algorithms.base.moo.moeadde import MOEADDE
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class DMOEADDE(MOEADDE):
|
|
11
|
+
|
|
12
|
+
def __init__(self,
|
|
13
|
+
perc_detect_change=0.1,
|
|
14
|
+
eps=0.0,
|
|
15
|
+
**kwargs):
|
|
16
|
+
|
|
17
|
+
super().__init__(**kwargs)
|
|
18
|
+
self.perc_detect_change = perc_detect_change
|
|
19
|
+
self.eps = eps
|
|
20
|
+
|
|
21
|
+
def setup(self, problem, **kwargs):
|
|
22
|
+
assert not problem.has_constraints(), f"{self.__class__.__name__} only works for unconstrained problems."
|
|
23
|
+
return super().setup(problem, **kwargs)
|
|
24
|
+
|
|
25
|
+
def _detect_change_sample_part_population(self):
|
|
26
|
+
pop = self.pop
|
|
27
|
+
X, F = pop.get("X", "F")
|
|
28
|
+
|
|
29
|
+
# the number of solutions to sample from the population to detect the change
|
|
30
|
+
n_samples = int(np.ceil(len(pop) * self.perc_detect_change))
|
|
31
|
+
|
|
32
|
+
# choose randomly some individuals of the current population to test if there was a change
|
|
33
|
+
I = self.random_state.choice(np.arange(len(pop)), size=n_samples)
|
|
34
|
+
samples = self.evaluator.eval(self.problem, Population.new(X=X[I]))
|
|
35
|
+
|
|
36
|
+
# calculate the differences between the old and newly evaluated pop
|
|
37
|
+
delta = ((samples.get("F") - F[I]) ** 2).mean()
|
|
38
|
+
|
|
39
|
+
# if there is an average deviation bigger than eps -> we have a change detected
|
|
40
|
+
change_detected = delta > self.eps
|
|
41
|
+
return change_detected
|
|
42
|
+
|
|
43
|
+
def _next_static_dynamic(self):
|
|
44
|
+
# for dynamic environment
|
|
45
|
+
pop = self.pop
|
|
46
|
+
|
|
47
|
+
if self.state is None:
|
|
48
|
+
|
|
49
|
+
change_detected = self._detect_change_sample_part_population()
|
|
50
|
+
|
|
51
|
+
if change_detected:
|
|
52
|
+
|
|
53
|
+
start_time = time.time()
|
|
54
|
+
|
|
55
|
+
pop = self._response_change()
|
|
56
|
+
|
|
57
|
+
# reevaluate because we know there was a change
|
|
58
|
+
self.evaluator.eval(self.problem, pop)
|
|
59
|
+
|
|
60
|
+
if len(pop) > self.pop_size:
|
|
61
|
+
# do a survival to recreate rank and crowding of all individuals
|
|
62
|
+
# Modified by DynOpt on Dec 21, 2025
|
|
63
|
+
# n_survive=len(pop) -> n_survive=self.pop_size
|
|
64
|
+
pop = RankAndCrowding().do(self.problem, pop, n_survive=self.pop_size, random_state=self.random_state)
|
|
65
|
+
|
|
66
|
+
self.pop = pop
|
|
67
|
+
|
|
68
|
+
self.data["response_duration"] = time.time() - start_time
|
|
69
|
+
|
|
70
|
+
return pop
|
|
71
|
+
|
|
72
|
+
def _response_change(self):
|
|
73
|
+
pass
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class DMOEADDEA(DMOEADDE):
|
|
77
|
+
|
|
78
|
+
def __init__(self,
|
|
79
|
+
perc_detect_change=0.1,
|
|
80
|
+
eps=0.0,
|
|
81
|
+
perc_diversity=0.3,
|
|
82
|
+
**kwargs):
|
|
83
|
+
super().__init__(perc_detect_change=perc_detect_change,
|
|
84
|
+
eps=eps,
|
|
85
|
+
**kwargs)
|
|
86
|
+
|
|
87
|
+
self.perc_diversity = perc_diversity
|
|
88
|
+
|
|
89
|
+
def _response_change(self):
|
|
90
|
+
pop = self.pop
|
|
91
|
+
X = pop.get("X")
|
|
92
|
+
|
|
93
|
+
# recreate the current population without being evaluated
|
|
94
|
+
pop = Population.new(X=X)
|
|
95
|
+
|
|
96
|
+
# find indices to be replaced (introduce diversity)
|
|
97
|
+
I = np.where(self.random_state.random(len(pop)) < self.perc_diversity)[0]
|
|
98
|
+
|
|
99
|
+
# replace with randomly sampled individuals
|
|
100
|
+
pop[I] = self.initialization.sampling(self.problem, len(I), random_state=self.random_state)
|
|
101
|
+
|
|
102
|
+
return pop
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class DMOEADDEB(DMOEADDE):
|
|
106
|
+
|
|
107
|
+
def __init__(self,
|
|
108
|
+
perc_detect_change=0.1,
|
|
109
|
+
eps=0.0,
|
|
110
|
+
perc_diversity=0.3,
|
|
111
|
+
**kwargs):
|
|
112
|
+
super().__init__(perc_detect_change=perc_detect_change,
|
|
113
|
+
eps=eps,
|
|
114
|
+
**kwargs)
|
|
115
|
+
|
|
116
|
+
self.perc_diversity = perc_diversity
|
|
117
|
+
|
|
118
|
+
def _response_change(self):
|
|
119
|
+
pop = self.pop
|
|
120
|
+
X = pop.get("X")
|
|
121
|
+
|
|
122
|
+
# recreate the current population without being evaluated
|
|
123
|
+
pop = Population.new(X=X)
|
|
124
|
+
|
|
125
|
+
# find indices to be replaced (introduce diversity)
|
|
126
|
+
I = np.where(self.random_state.random(len(pop)) < self.perc_diversity)[0]
|
|
127
|
+
|
|
128
|
+
# replace by mutations of existing solutions (this occurs inplace)
|
|
129
|
+
self.mating.mutation(self.problem, pop[I])
|
|
130
|
+
|
|
131
|
+
return pop
|
|
File without changes
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Includes modified code from [pymoo](https://github.com/anyoptimization/pymoo).
|
|
3
|
+
|
|
4
|
+
Sources:
|
|
5
|
+
- [dnsga2.py](https://github.com/anyoptimization/pymoo/blob/main/pymoo/algorithms/moo/dnsga2.py).
|
|
6
|
+
|
|
7
|
+
Licensed under the Apache License, Version 2.0. Original copyright and license terms are preserved.
|
|
8
|
+
|
|
9
|
+
The framework was adapted to enable efficient aggregation of change response strategies.
|
|
10
|
+
"""
|
|
11
|
+
import time
|
|
12
|
+
|
|
13
|
+
import numpy as np
|
|
14
|
+
from pymoo.core.population import Population
|
|
15
|
+
|
|
16
|
+
from pydmoo.algorithms.base.moo.nsga2 import NSGA2
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class DNSGA2(NSGA2):
|
|
20
|
+
|
|
21
|
+
def __init__(self,
|
|
22
|
+
perc_detect_change=0.1,
|
|
23
|
+
eps=0.0,
|
|
24
|
+
**kwargs):
|
|
25
|
+
|
|
26
|
+
super().__init__(**kwargs)
|
|
27
|
+
self.perc_detect_change = perc_detect_change
|
|
28
|
+
self.eps = eps
|
|
29
|
+
|
|
30
|
+
def setup(self, problem, **kwargs):
|
|
31
|
+
assert not problem.has_constraints(), f"{self.__class__.__name__} only works for unconstrained problems."
|
|
32
|
+
return super().setup(problem, **kwargs)
|
|
33
|
+
|
|
34
|
+
def _detect_change_sample_part_population(self):
|
|
35
|
+
pop = self.pop
|
|
36
|
+
X, F = pop.get("X", "F")
|
|
37
|
+
|
|
38
|
+
# the number of solutions to sample from the population to detect the change
|
|
39
|
+
n_samples = int(np.ceil(len(pop) * self.perc_detect_change))
|
|
40
|
+
|
|
41
|
+
# choose randomly some individuals of the current population to test if there was a change
|
|
42
|
+
I = self.random_state.choice(np.arange(len(pop)), size=n_samples)
|
|
43
|
+
samples = self.evaluator.eval(self.problem, Population.new(X=X[I]))
|
|
44
|
+
|
|
45
|
+
# calculate the differences between the old and newly evaluated pop
|
|
46
|
+
delta = ((samples.get("F") - F[I]) ** 2).mean()
|
|
47
|
+
|
|
48
|
+
# if there is an average deviation bigger than eps -> we have a change detected
|
|
49
|
+
change_detected = delta > self.eps
|
|
50
|
+
return change_detected
|
|
51
|
+
|
|
52
|
+
def _infill_static_dynamic(self):
|
|
53
|
+
# for dynamic environment
|
|
54
|
+
pop = self.pop
|
|
55
|
+
|
|
56
|
+
change_detected = self._detect_change_sample_part_population()
|
|
57
|
+
|
|
58
|
+
if change_detected:
|
|
59
|
+
|
|
60
|
+
start_time = time.time()
|
|
61
|
+
|
|
62
|
+
pop = self._response_change()
|
|
63
|
+
|
|
64
|
+
# reevaluate because we know there was a change
|
|
65
|
+
self.evaluator.eval(self.problem, pop)
|
|
66
|
+
|
|
67
|
+
# do a survival to recreate rank and crowding of all individuals
|
|
68
|
+
# Modified by DynOpt on Dec 21, 2025
|
|
69
|
+
# n_survive=len(pop) -> n_survive=self.pop_size
|
|
70
|
+
pop = self.survival.do(self.problem, pop, n_survive=self.pop_size, random_state=self.random_state)
|
|
71
|
+
|
|
72
|
+
self.pop = pop
|
|
73
|
+
|
|
74
|
+
self.data["response_duration"] = time.time() - start_time
|
|
75
|
+
|
|
76
|
+
return pop
|
|
77
|
+
|
|
78
|
+
def _response_change(self):
|
|
79
|
+
pass
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class DNSGA2A(DNSGA2):
|
|
83
|
+
|
|
84
|
+
def __init__(self,
|
|
85
|
+
perc_detect_change=0.1,
|
|
86
|
+
eps=0.0,
|
|
87
|
+
perc_diversity=0.3,
|
|
88
|
+
**kwargs):
|
|
89
|
+
super().__init__(perc_detect_change=perc_detect_change,
|
|
90
|
+
eps=eps,
|
|
91
|
+
**kwargs)
|
|
92
|
+
|
|
93
|
+
self.perc_diversity = perc_diversity
|
|
94
|
+
|
|
95
|
+
def _response_change(self):
|
|
96
|
+
pop = self.pop
|
|
97
|
+
X = pop.get("X")
|
|
98
|
+
|
|
99
|
+
# recreate the current population without being evaluated
|
|
100
|
+
pop = Population.new(X=X)
|
|
101
|
+
|
|
102
|
+
# find indices to be replaced (introduce diversity)
|
|
103
|
+
I = np.where(self.random_state.random(len(pop)) < self.perc_diversity)[0]
|
|
104
|
+
|
|
105
|
+
# replace with randomly sampled individuals
|
|
106
|
+
pop[I] = self.initialization.sampling(self.problem, len(I), random_state=self.random_state)
|
|
107
|
+
|
|
108
|
+
return pop
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class DNSGA2B(DNSGA2):
|
|
112
|
+
|
|
113
|
+
def __init__(self,
|
|
114
|
+
perc_detect_change=0.1,
|
|
115
|
+
eps=0.0,
|
|
116
|
+
perc_diversity=0.3,
|
|
117
|
+
**kwargs):
|
|
118
|
+
super().__init__(perc_detect_change=perc_detect_change,
|
|
119
|
+
eps=eps,
|
|
120
|
+
**kwargs)
|
|
121
|
+
|
|
122
|
+
self.perc_diversity = perc_diversity
|
|
123
|
+
|
|
124
|
+
def _response_change(self):
|
|
125
|
+
pop = self.pop
|
|
126
|
+
X = pop.get("X")
|
|
127
|
+
|
|
128
|
+
# recreate the current population without being evaluated
|
|
129
|
+
pop = Population.new(X=X)
|
|
130
|
+
|
|
131
|
+
# find indices to be replaced (introduce diversity)
|
|
132
|
+
I = np.where(self.random_state.random(len(pop)) < self.perc_diversity)[0]
|
|
133
|
+
|
|
134
|
+
# replace by mutations of existing solutions (this occurs inplace)
|
|
135
|
+
self.mating.mutation(self.problem, pop[I])
|
|
136
|
+
|
|
137
|
+
return pop
|
|
File without changes
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Includes modified code from [pymoo](https://github.com/anyoptimization/pymoo).
|
|
3
|
+
|
|
4
|
+
Sources:
|
|
5
|
+
- [moead.py](https://github.com/anyoptimization/pymoo/blob/main/pymoo/algorithms/moo/moead.py).
|
|
6
|
+
|
|
7
|
+
Licensed under the Apache License, Version 2.0. Original copyright and license terms are preserved.
|
|
8
|
+
|
|
9
|
+
Add a method `_next_static_dynamic for dynamic multi-objective optimization.`
|
|
10
|
+
"""
|
|
11
|
+
import numpy as np
|
|
12
|
+
from pymoo.core.duplicate import NoDuplicateElimination
|
|
13
|
+
from pymoo.core.population import Population
|
|
14
|
+
from pymoo.core.selection import Selection
|
|
15
|
+
from pymoo.core.variable import Real, get
|
|
16
|
+
from pymoo.docs import parse_doc_string
|
|
17
|
+
from pymoo.operators.crossover.sbx import SBX
|
|
18
|
+
from pymoo.operators.mutation.pm import PM
|
|
19
|
+
from pymoo.operators.sampling.rnd import FloatRandomSampling
|
|
20
|
+
from pymoo.util.display.multi import MultiObjectiveOutput
|
|
21
|
+
from pymoo.util.reference_direction import default_ref_dirs
|
|
22
|
+
from scipy.spatial.distance import cdist
|
|
23
|
+
|
|
24
|
+
from pydmoo.algorithms.base.core.algorithm import LoopwiseAlgorithm
|
|
25
|
+
from pydmoo.algorithms.base.core.genetic import GeneticAlgorithm
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class NeighborhoodSelection(Selection):
|
|
29
|
+
|
|
30
|
+
def __init__(self, prob=1.0) -> None:
|
|
31
|
+
super().__init__()
|
|
32
|
+
self.prob = Real(prob, bounds=(0.0, 1.0))
|
|
33
|
+
|
|
34
|
+
def _do(self, problem, pop, n_select, n_parents, neighbors=None, random_state=None, **kwargs):
|
|
35
|
+
assert n_select == len(neighbors)
|
|
36
|
+
P = np.full((n_select, n_parents), -1)
|
|
37
|
+
|
|
38
|
+
prob = get(self.prob, size=n_select)
|
|
39
|
+
|
|
40
|
+
for k in range(n_select):
|
|
41
|
+
if random_state.random() < prob[k]:
|
|
42
|
+
P[k] = random_state.choice(neighbors[k], n_parents, replace=False)
|
|
43
|
+
else:
|
|
44
|
+
P[k] = random_state.permutation(len(pop))[:n_parents]
|
|
45
|
+
|
|
46
|
+
return P
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
# =========================================================================================================
|
|
50
|
+
# Implementation
|
|
51
|
+
# =========================================================================================================
|
|
52
|
+
|
|
53
|
+
class MOEAD(LoopwiseAlgorithm, GeneticAlgorithm):
|
|
54
|
+
|
|
55
|
+
def __init__(self,
|
|
56
|
+
ref_dirs=None,
|
|
57
|
+
n_neighbors=20,
|
|
58
|
+
decomposition=None,
|
|
59
|
+
prob_neighbor_mating=0.9,
|
|
60
|
+
sampling=FloatRandomSampling(),
|
|
61
|
+
crossover=SBX(prob=1.0, eta=20),
|
|
62
|
+
mutation=PM(prob_var=None, eta=20),
|
|
63
|
+
output=MultiObjectiveOutput(),
|
|
64
|
+
**kwargs):
|
|
65
|
+
|
|
66
|
+
# reference directions used for MOEAD
|
|
67
|
+
self.ref_dirs = ref_dirs
|
|
68
|
+
|
|
69
|
+
# the decomposition metric used
|
|
70
|
+
self.decomposition = decomposition
|
|
71
|
+
|
|
72
|
+
# the number of neighbors considered during mating
|
|
73
|
+
self.n_neighbors = n_neighbors
|
|
74
|
+
|
|
75
|
+
self.neighbors = None
|
|
76
|
+
|
|
77
|
+
self.selection = NeighborhoodSelection(prob=prob_neighbor_mating)
|
|
78
|
+
|
|
79
|
+
super().__init__(pop_size=len(ref_dirs),
|
|
80
|
+
sampling=sampling,
|
|
81
|
+
crossover=crossover,
|
|
82
|
+
mutation=mutation,
|
|
83
|
+
eliminate_duplicates=NoDuplicateElimination(),
|
|
84
|
+
output=output,
|
|
85
|
+
advance_after_initialization=False,
|
|
86
|
+
**kwargs)
|
|
87
|
+
|
|
88
|
+
def _setup(self, problem, **kwargs):
|
|
89
|
+
assert not problem.has_constraints(), "This implementation of MOEAD does not support any constraints."
|
|
90
|
+
|
|
91
|
+
# if no reference directions have been provided get them and override the population size and other settings
|
|
92
|
+
if self.ref_dirs is None:
|
|
93
|
+
self.ref_dirs = default_ref_dirs(problem.n_obj)
|
|
94
|
+
self.pop_size = len(self.ref_dirs)
|
|
95
|
+
|
|
96
|
+
# neighbours includes the entry by itself intentionally for the survival method
|
|
97
|
+
self.neighbors = np.argsort(cdist(self.ref_dirs, self.ref_dirs), axis=1, kind='quicksort')[:, :self.n_neighbors]
|
|
98
|
+
|
|
99
|
+
# if the decomposition is not set yet, set the default
|
|
100
|
+
if self.decomposition is None:
|
|
101
|
+
self.decomposition = default_decomp(problem)
|
|
102
|
+
|
|
103
|
+
def _initialize_advance(self, infills=None, **kwargs):
|
|
104
|
+
super()._initialize_advance(infills, **kwargs)
|
|
105
|
+
self.ideal = np.min(self.pop.get("F"), axis=0)
|
|
106
|
+
|
|
107
|
+
def _next(self):
|
|
108
|
+
pop = self._next_static_dynamic()
|
|
109
|
+
|
|
110
|
+
# iterate for each member of the population in random order
|
|
111
|
+
for k in self.random_state.permutation(len(pop)):
|
|
112
|
+
# get the parents using the neighborhood selection
|
|
113
|
+
P = self.selection.do(self.problem, pop, 1, self.mating.crossover.n_parents, neighbors=[self.neighbors[k]], random_state=self.random_state)
|
|
114
|
+
|
|
115
|
+
# perform a mating using the default operators - if more than one offspring just pick the first
|
|
116
|
+
off = self.random_state.choice(self.mating.do(self.problem, pop, 1, parents=P, n_max_iterations=1, random_state=self.random_state))
|
|
117
|
+
|
|
118
|
+
# evaluate the offspring
|
|
119
|
+
off = yield off
|
|
120
|
+
|
|
121
|
+
# update the ideal point
|
|
122
|
+
self.ideal = np.min(np.vstack([self.ideal, off.F]), axis=0)
|
|
123
|
+
|
|
124
|
+
# now actually do the replacement of the individual is better
|
|
125
|
+
self._replace(k, off)
|
|
126
|
+
|
|
127
|
+
# Added by DynOpt on Dec 21, 2025
|
|
128
|
+
def _next_static_dynamic(self):
|
|
129
|
+
pop = self.pop
|
|
130
|
+
|
|
131
|
+
return pop
|
|
132
|
+
|
|
133
|
+
def _replace(self, k, off):
|
|
134
|
+
pop = self.pop
|
|
135
|
+
|
|
136
|
+
# calculate the decomposed values for each neighbor
|
|
137
|
+
N = self.neighbors[k]
|
|
138
|
+
FV = self.decomposition.do(pop[N].get("F"), weights=self.ref_dirs[N, :], ideal_point=self.ideal)
|
|
139
|
+
off_FV = self.decomposition.do(off.F[None, :], weights=self.ref_dirs[N, :], ideal_point=self.ideal)
|
|
140
|
+
|
|
141
|
+
# this makes the algorithm to support constraints - not originally proposed though and not tested enough
|
|
142
|
+
# if self.problem.has_constraints():
|
|
143
|
+
# CV, off_CV = pop[N].get("CV")[:, 0], np.full(len(off_FV), off.CV)
|
|
144
|
+
# fmax = max(FV.max(), off_FV.max())
|
|
145
|
+
# FV, off_FV = parameter_less(FV, CV, fmax=fmax), parameter_less(off_FV, off_CV, fmax=fmax)
|
|
146
|
+
|
|
147
|
+
# get the absolute index in F where offspring is better than the current F (decomposed space)
|
|
148
|
+
I = np.where(off_FV < FV)[0]
|
|
149
|
+
pop[N[I]] = off
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class ParallelMOEAD(MOEAD):
|
|
153
|
+
|
|
154
|
+
def __init__(self, ref_dirs, **kwargs):
|
|
155
|
+
super().__init__(ref_dirs, **kwargs)
|
|
156
|
+
self.indices = None
|
|
157
|
+
|
|
158
|
+
def _infill(self):
|
|
159
|
+
pop_size, cross_parents, cross_off = self.pop_size, self.mating.crossover.n_parents, self.mating.crossover.n_offsprings
|
|
160
|
+
|
|
161
|
+
# do the mating in a random order
|
|
162
|
+
indices = self.random_state.permutation(len(self.pop))[:self.n_offsprings]
|
|
163
|
+
|
|
164
|
+
# get the parents using the neighborhood selection
|
|
165
|
+
P = self.selection.do(self.problem, self.pop, self.n_offsprings, cross_parents,
|
|
166
|
+
neighbors=self.neighbors[indices], random_state=self.random_state)
|
|
167
|
+
|
|
168
|
+
# do not any duplicates elimination - thus this results in exactly pop_size * n_offsprings offsprings
|
|
169
|
+
off = self.mating.do(self.problem, self.pop, 1e12, n_max_iterations=1, parents=P, random_state=self.random_state)
|
|
170
|
+
|
|
171
|
+
# select a random offspring from each mating
|
|
172
|
+
off = Population.create(*[self.random_state.choice(pool) for pool in np.reshape(off, (self.n_offsprings, -1))])
|
|
173
|
+
|
|
174
|
+
# store the indices because of the neighborhood matching in advance
|
|
175
|
+
self.indices = indices
|
|
176
|
+
|
|
177
|
+
return off
|
|
178
|
+
|
|
179
|
+
def _advance(self, infills=None, **kwargs):
|
|
180
|
+
assert len(self.indices) == len(infills), "Number of infills must be equal to the one created beforehand."
|
|
181
|
+
|
|
182
|
+
# update the ideal point before starting to replace
|
|
183
|
+
self.ideal = np.min(np.vstack([self.ideal, infills.get("F")]), axis=0)
|
|
184
|
+
|
|
185
|
+
# now do the replacements as in the loop-wise version
|
|
186
|
+
for k, off in enumerate(infills):
|
|
187
|
+
self._replace(self.indices[k], off)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def default_decomp(problem):
|
|
191
|
+
if problem.n_obj <= 2:
|
|
192
|
+
from pymoo.decomposition.tchebicheff import Tchebicheff
|
|
193
|
+
return Tchebicheff()
|
|
194
|
+
else:
|
|
195
|
+
from pymoo.decomposition.pbi import PBI
|
|
196
|
+
return PBI()
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
parse_doc_string(MOEAD.__init__)
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from pymoo.core.population import Population
|
|
3
|
+
from pymoo.decomposition.tchebicheff import Tchebicheff
|
|
4
|
+
from pymoo.operators.mutation.pm import PM
|
|
5
|
+
|
|
6
|
+
from pydmoo.algorithms.base.moo.moead import MOEAD
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class MOEADDE(MOEAD):
|
|
10
|
+
"""MOEA/D-DE (Updated by Cao).
|
|
11
|
+
|
|
12
|
+
It is worth noting that there is a distinct modification in line 28 compared with the original framework of MOEA/D-DE.
|
|
13
|
+
The newly generated solution competes with each member from the corresponding mating neighborhood (denoted as Pool in Algorithm 2).
|
|
14
|
+
But in the original MOEA/D-DE framework, it only competes with two members from the corresponding mating neighborhood.
|
|
15
|
+
This modification expands the replacement neighborhood to enhance the exploitation capability that is extremely important in dealing with DMOPs.
|
|
16
|
+
|
|
17
|
+
Cao, L., Xu, L., Goodman, E. D., Bao, C., and Zhu, S. (2020).
|
|
18
|
+
Evolutionary dynamic multiobjective optimization assisted by a support vector regression predictor.
|
|
19
|
+
IEEE Transactions on Evolutionary Computation, 24(2), 305–319.
|
|
20
|
+
https://doi.org/10.1109/TEVC.2019.2925722
|
|
21
|
+
|
|
22
|
+
Hui Li and Qingfu Zhang. (2009).
|
|
23
|
+
Multiobjective optimization problems with complicated pareto sets, MOEA/D and NSGA-II.
|
|
24
|
+
IEEE Transactions on Evolutionary Computation, 13(2), 284–302.
|
|
25
|
+
https://doi.org/10.1109/TEVC.2008.925798
|
|
26
|
+
"""
|
|
27
|
+
def __init__(self, decomposition=Tchebicheff(), prob_neighbor_mating=0.8, **kwargs):
|
|
28
|
+
super().__init__(decomposition=decomposition, prob_neighbor_mating=prob_neighbor_mating, **kwargs)
|
|
29
|
+
|
|
30
|
+
# Neighborhood Selection
|
|
31
|
+
# the number of neighbors considered during mating
|
|
32
|
+
self.n_neighbors = 20
|
|
33
|
+
self._delta = 0.8 # 0.9 (prob_neighbor_mating)
|
|
34
|
+
|
|
35
|
+
# DE crossover
|
|
36
|
+
self._cr = 0.5 # 1
|
|
37
|
+
self._F_ = 0.5 # 0.5
|
|
38
|
+
|
|
39
|
+
# Polynomial mutation
|
|
40
|
+
self._eta = 20
|
|
41
|
+
# self._pm = 1/d # d is the number of variables
|
|
42
|
+
self.mutation = PM(eta=self._eta) # prob_var=1/d, default is min(0.5, 1/d)
|
|
43
|
+
|
|
44
|
+
def _next(self):
|
|
45
|
+
pop = self._next_static_dynamic()
|
|
46
|
+
|
|
47
|
+
# iterate for each member of the population in random order
|
|
48
|
+
for k in self.random_state.permutation(len(pop)):
|
|
49
|
+
# Step 2.1 Selection of Mating/Update Range:
|
|
50
|
+
# Select parents - use neighborhood with probability delta, else global selection
|
|
51
|
+
pp = self.neighbors[k] if self.random_state.random() < self._delta else list(range(self.pop_size))
|
|
52
|
+
|
|
53
|
+
# Step 2.2 Reproduction
|
|
54
|
+
# Randomly select three distinct parents from the chosen pool
|
|
55
|
+
a, b, c = self.random_state.choice(pp, size=3, replace=True)
|
|
56
|
+
|
|
57
|
+
# Initialize problem parameters
|
|
58
|
+
n_var = self.problem.n_var
|
|
59
|
+
xl, xu = self.problem.xl, self.problem.xu
|
|
60
|
+
|
|
61
|
+
X = pop.get("X") # Position critical
|
|
62
|
+
|
|
63
|
+
# mutation operator
|
|
64
|
+
# Differential evolution mutation: v = x1 + F*(x2 - x3)
|
|
65
|
+
mutation = X[a] + self._F_ * (X[b] - X[c])
|
|
66
|
+
|
|
67
|
+
# crossover operator
|
|
68
|
+
# Create mask for crossover operations (CR probability)
|
|
69
|
+
mask = self.random_state.random(n_var) < self._cr
|
|
70
|
+
|
|
71
|
+
# Combine mutation with target vector based on crossover probability
|
|
72
|
+
V = np.where(mask, mutation, X[k])
|
|
73
|
+
|
|
74
|
+
# mutation operator
|
|
75
|
+
# polynomial mutation operator
|
|
76
|
+
r = self.random_state.random(n_var)
|
|
77
|
+
delta = np.where(
|
|
78
|
+
self.random_state.random(n_var) < 0.5,
|
|
79
|
+
np.power(2*r, 1/(1+self._eta)) - 1, # First perturbation formula
|
|
80
|
+
1 - np.power(2-2*r, 1/(1+self._eta)) # Alternative perturbation formula
|
|
81
|
+
)
|
|
82
|
+
perturb_mask = self.random_state.random(n_var) < (1/n_var) # self._pm
|
|
83
|
+
V[perturb_mask] += delta[perturb_mask] * (xu[perturb_mask] - xl[perturb_mask])
|
|
84
|
+
|
|
85
|
+
# Step 2.3 Repair: If an element is out of the boundary, its value is reset to be a randomly selected value inside the boundary.
|
|
86
|
+
# Ensure solution stays within bounds
|
|
87
|
+
V = np.clip(V, xl, xu)
|
|
88
|
+
|
|
89
|
+
# Individual
|
|
90
|
+
off = Population.new(X=np.array([V]))[0]
|
|
91
|
+
|
|
92
|
+
# evaluate the offspring
|
|
93
|
+
off = yield off
|
|
94
|
+
|
|
95
|
+
# Step 2.4 Update
|
|
96
|
+
# update the ideal point
|
|
97
|
+
self.ideal = np.min(np.vstack([self.ideal, off.F]), axis=0)
|
|
98
|
+
|
|
99
|
+
# Step 2.5 Update of Solutions
|
|
100
|
+
# It is worth noting that there is a distinct modification in line 28 compared with the original framework of MOEA/D-DE.
|
|
101
|
+
# The newly generated solution competes with each member from the corresponding mating neighborhood (denoted as Pool in Algorithm 2).
|
|
102
|
+
# But in the original MOEA/D-DE framework, it only competes with two members from the corresponding mating neighborhood.
|
|
103
|
+
# This modification expands the replacement neighborhood to enhance the exploitation capability that is extremely important in dealing with DMOPs.
|
|
104
|
+
# now actually do the replacement of the individual is better
|
|
105
|
+
self._replace(k, off)
|
|
File without changes
|