pydmoo 0.0.18__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pydmoo/algorithms/base/__init__.py +20 -0
- pydmoo/algorithms/base/core/__init__.py +0 -0
- pydmoo/algorithms/base/core/algorithm.py +416 -0
- pydmoo/algorithms/base/core/genetic.py +129 -0
- pydmoo/algorithms/base/dmoo/__init__.py +0 -0
- pydmoo/algorithms/base/dmoo/dmoead.py +131 -0
- pydmoo/algorithms/base/dmoo/dmoeadde.py +131 -0
- pydmoo/algorithms/base/dmoo/dmopso.py +0 -0
- pydmoo/algorithms/base/dmoo/dnsga2.py +137 -0
- pydmoo/algorithms/base/moo/__init__.py +0 -0
- pydmoo/algorithms/base/moo/moead.py +199 -0
- pydmoo/algorithms/base/moo/moeadde.py +105 -0
- pydmoo/algorithms/base/moo/mopso.py +0 -0
- pydmoo/algorithms/base/moo/nsga2.py +122 -0
- pydmoo/algorithms/modern/__init__.py +94 -0
- pydmoo/algorithms/modern/moead_imkt.py +161 -0
- pydmoo/algorithms/modern/moead_imkt_igp.py +56 -0
- pydmoo/algorithms/modern/moead_imkt_lstm.py +109 -0
- pydmoo/algorithms/modern/moead_imkt_n.py +117 -0
- pydmoo/algorithms/modern/moead_imkt_n_igp.py +56 -0
- pydmoo/algorithms/modern/moead_imkt_n_lstm.py +111 -0
- pydmoo/algorithms/modern/moead_ktmm.py +112 -0
- pydmoo/algorithms/modern/moeadde_imkt.py +161 -0
- pydmoo/algorithms/modern/moeadde_imkt_clstm.py +223 -0
- pydmoo/algorithms/modern/moeadde_imkt_igp.py +56 -0
- pydmoo/algorithms/modern/moeadde_imkt_lstm.py +212 -0
- pydmoo/algorithms/modern/moeadde_imkt_n.py +117 -0
- pydmoo/algorithms/modern/moeadde_imkt_n_clstm.py +146 -0
- pydmoo/algorithms/modern/moeadde_imkt_n_igp.py +56 -0
- pydmoo/algorithms/modern/moeadde_imkt_n_lstm.py +114 -0
- pydmoo/algorithms/modern/moeadde_ktmm.py +112 -0
- pydmoo/algorithms/modern/nsga2_imkt.py +162 -0
- pydmoo/algorithms/modern/nsga2_imkt_clstm.py +223 -0
- pydmoo/algorithms/modern/nsga2_imkt_igp.py +56 -0
- pydmoo/algorithms/modern/nsga2_imkt_lstm.py +248 -0
- pydmoo/algorithms/modern/nsga2_imkt_n.py +117 -0
- pydmoo/algorithms/modern/nsga2_imkt_n_clstm.py +146 -0
- pydmoo/algorithms/modern/nsga2_imkt_n_igp.py +57 -0
- pydmoo/algorithms/modern/nsga2_imkt_n_lstm.py +154 -0
- pydmoo/algorithms/modern/nsga2_ktmm.py +112 -0
- pydmoo/algorithms/utils/__init__.py +0 -0
- pydmoo/algorithms/utils/utils.py +166 -0
- pydmoo/core/__init__.py +0 -0
- pydmoo/{response → core}/ar_model.py +4 -4
- pydmoo/{response → core}/bounds.py +35 -2
- pydmoo/core/distance.py +45 -0
- pydmoo/core/inverse.py +55 -0
- pydmoo/core/lstm/__init__.py +0 -0
- pydmoo/core/lstm/base.py +291 -0
- pydmoo/core/lstm/lstm.py +491 -0
- pydmoo/core/manifold.py +93 -0
- pydmoo/core/predictions.py +50 -0
- pydmoo/core/sample_gaussian.py +56 -0
- pydmoo/core/sample_uniform.py +63 -0
- pydmoo/{response/tca_model.py → core/transfer.py} +3 -3
- pydmoo/problems/__init__.py +53 -49
- pydmoo/problems/dyn.py +94 -13
- pydmoo/problems/dynamic/cec2015.py +10 -5
- pydmoo/problems/dynamic/df.py +6 -3
- pydmoo/problems/dynamic/gts.py +69 -34
- pydmoo/problems/real_world/__init__.py +0 -0
- pydmoo/problems/real_world/dsrp.py +168 -0
- pydmoo/problems/real_world/dwbdp.py +189 -0
- {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/METADATA +11 -10
- pydmoo-0.1.0.dist-info/RECORD +70 -0
- {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/WHEEL +1 -1
- pydmoo-0.0.18.dist-info/RECORD +0 -15
- /pydmoo/{response → algorithms}/__init__.py +0 -0
- {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from pymoo.core.population import Population
|
|
3
|
+
|
|
4
|
+
from pydmoo.algorithms.modern.nsga2_imkt import NSGA2IMKT
|
|
5
|
+
from pydmoo.core.bounds import clip_and_randomize
|
|
6
|
+
from pydmoo.core.inverse import closed_form_solution
|
|
7
|
+
from pydmoo.core.lstm.lstm import LSTMpredictor
|
|
8
|
+
from pydmoo.core.sample_gaussian import univariate_gaussian_sample
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class NSGA2IMLSTM(NSGA2IMKT):
|
|
12
|
+
"""Inverse Modeling with LSTM (IMLSTM).
|
|
13
|
+
|
|
14
|
+
Inverse Modeling for Dynamic Multiobjective Optimization with LSTM prediction In objective Space.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(self, **kwargs):
|
|
18
|
+
super().__init__(**kwargs)
|
|
19
|
+
self.size_pool = 10
|
|
20
|
+
self.denominator = 0.5
|
|
21
|
+
|
|
22
|
+
self._n_timesteps = 10
|
|
23
|
+
self._sequence_length = 5 # Use 5 historical time steps to predict next step
|
|
24
|
+
self._incremental_learning = False
|
|
25
|
+
|
|
26
|
+
def _setup(self, problem, **kwargs):
|
|
27
|
+
super()._setup(problem, **kwargs)
|
|
28
|
+
|
|
29
|
+
# Must be here
|
|
30
|
+
self._lstm = LSTMpredictor(
|
|
31
|
+
self._sequence_length,
|
|
32
|
+
hidden_dim=64,
|
|
33
|
+
num_layers=1,
|
|
34
|
+
epochs=50,
|
|
35
|
+
batch_size=32,
|
|
36
|
+
lr=0.001,
|
|
37
|
+
device="cpu", # for fair comparison
|
|
38
|
+
patience=5,
|
|
39
|
+
seed=self.seed,
|
|
40
|
+
model_type="lstm",
|
|
41
|
+
incremental_learning=self._incremental_learning,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
def _response_change(self):
|
|
45
|
+
pop = self.pop
|
|
46
|
+
X = pop.get("X")
|
|
47
|
+
|
|
48
|
+
# recreate the current population without being evaluated
|
|
49
|
+
pop = Population.new(X=X)
|
|
50
|
+
|
|
51
|
+
# sample self.pop_size individuals in decision space
|
|
52
|
+
samples_old = self.sampling_new_pop()
|
|
53
|
+
|
|
54
|
+
# select self.pop_size/2 individuals with better convergence and diversity
|
|
55
|
+
samples = samples_old[:int(len(samples_old)/2)]
|
|
56
|
+
|
|
57
|
+
# knowledge in objective space
|
|
58
|
+
means_stds, mean, std = self._in_decision_or_objective_space_1d(samples, "objective_space")
|
|
59
|
+
|
|
60
|
+
# Check if sufficient historical data is available for LSTM prediction
|
|
61
|
+
if len(means_stds) > self._n_timesteps:
|
|
62
|
+
# Update pool
|
|
63
|
+
self.data["means_stds"] = means_stds[self._n_timesteps:]
|
|
64
|
+
|
|
65
|
+
# Prepare time series data from historical means and standard deviations
|
|
66
|
+
time_series_data = prepare_data_means_std(self._n_timesteps, means_stds)
|
|
67
|
+
|
|
68
|
+
# Initialize predictor and generate prediction for next time step
|
|
69
|
+
next_prediction = self._lstm.convert_train_predict(time_series_data)
|
|
70
|
+
|
|
71
|
+
# Convert prediction tensor to numpy array for further processing
|
|
72
|
+
next_prediction = next_prediction.numpy()
|
|
73
|
+
|
|
74
|
+
# Split prediction into mean and standard deviation components
|
|
75
|
+
# First n_obj elements represent mean values, remaining elements represent standard deviations
|
|
76
|
+
mean_new, std_new = next_prediction[:self.problem.n_obj], next_prediction[self.problem.n_obj:]
|
|
77
|
+
std_new = np.abs(std_new)
|
|
78
|
+
|
|
79
|
+
else:
|
|
80
|
+
mean_new, std_new = self._select_means_stds(means_stds, mean, std)
|
|
81
|
+
|
|
82
|
+
# sample self.pop_size individuals in objective space
|
|
83
|
+
F = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
|
|
84
|
+
|
|
85
|
+
# TODO
|
|
86
|
+
# inverse mapping
|
|
87
|
+
# X = FB
|
|
88
|
+
B = closed_form_solution(samples.get("X"), samples.get("F"))
|
|
89
|
+
|
|
90
|
+
# X = FB
|
|
91
|
+
X = np.dot(F, B)
|
|
92
|
+
|
|
93
|
+
# bounds
|
|
94
|
+
if self.problem.has_bounds():
|
|
95
|
+
xl, xu = self.problem.bounds()
|
|
96
|
+
X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
|
|
97
|
+
|
|
98
|
+
# merge
|
|
99
|
+
pop = Population.merge(samples_old, Population.new(X=X))
|
|
100
|
+
|
|
101
|
+
return pop
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def prepare_data_means_std(n_timesteps, means_stds):
|
|
105
|
+
"""Prepare time series data from means and standard deviations.
|
|
106
|
+
|
|
107
|
+
This function converts a sequence of mean vectors and standard deviation vectors
|
|
108
|
+
into a time series format suitable for machine learning models. It concatenates
|
|
109
|
+
mean values and standard deviation values to create feature vectors for each time step.
|
|
110
|
+
|
|
111
|
+
Parameters
|
|
112
|
+
----------
|
|
113
|
+
means_stds : list of tuples
|
|
114
|
+
List containing (mean, std, n_iter) pairs for each time step, where:
|
|
115
|
+
- mean: 1D numpy array of mean values
|
|
116
|
+
- std: 1D numpy array of standard deviation values
|
|
117
|
+
- n_iter: number of iterations (not used in feature extraction)
|
|
118
|
+
|
|
119
|
+
Returns
|
|
120
|
+
-------
|
|
121
|
+
time_series_data : list
|
|
122
|
+
Combined feature data with shape (n_timesteps, n_features)
|
|
123
|
+
Each row represents a time step containing:
|
|
124
|
+
[mean_1, mean_2, ..., mean_n, std_1, std_2, ..., std_n]
|
|
125
|
+
"""
|
|
126
|
+
# Create time series data
|
|
127
|
+
time_series_data = [] # shape: (n_timesteps, n_features)
|
|
128
|
+
|
|
129
|
+
# Process only the most recent n_timesteps
|
|
130
|
+
for m, s, _ in means_stds[-n_timesteps:]:
|
|
131
|
+
# Combine mean vector and standard deviation vector
|
|
132
|
+
# [*m] unpacks all mean values
|
|
133
|
+
# [*s] unpacks all standard deviation values
|
|
134
|
+
feature_vector = [*m, *s]
|
|
135
|
+
|
|
136
|
+
time_series_data.append(feature_vector)
|
|
137
|
+
|
|
138
|
+
return time_series_data
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
class NSGA2IMiLSTM(NSGA2IMLSTM):
|
|
142
|
+
def __init__(self, **kwargs) -> None:
|
|
143
|
+
super().__init__(**kwargs)
|
|
144
|
+
self.size_pool = 10
|
|
145
|
+
self.denominator = 0.5
|
|
146
|
+
self._n_timesteps = 10
|
|
147
|
+
self._sequence_length = 5
|
|
148
|
+
self._incremental_learning = True
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
class NSGA2IMiLSTM1003(NSGA2IMLSTM):
|
|
152
|
+
def __init__(self, **kwargs) -> None:
|
|
153
|
+
super().__init__(**kwargs)
|
|
154
|
+
self.size_pool = 10
|
|
155
|
+
self.denominator = 0.5
|
|
156
|
+
self._n_timesteps = 10
|
|
157
|
+
self._sequence_length = 3
|
|
158
|
+
self._incremental_learning = True
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
class NSGA2IMiLSTM1005(NSGA2IMLSTM):
|
|
162
|
+
def __init__(self, **kwargs) -> None:
|
|
163
|
+
super().__init__(**kwargs)
|
|
164
|
+
self.size_pool = 10
|
|
165
|
+
self.denominator = 0.5
|
|
166
|
+
self._n_timesteps = 10
|
|
167
|
+
self._sequence_length = 5
|
|
168
|
+
self._incremental_learning = True
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
class NSGA2IMiLSTM1007(NSGA2IMLSTM):
|
|
172
|
+
def __init__(self, **kwargs) -> None:
|
|
173
|
+
super().__init__(**kwargs)
|
|
174
|
+
self.size_pool = 10
|
|
175
|
+
self.denominator = 0.5
|
|
176
|
+
self._n_timesteps = 10
|
|
177
|
+
self._sequence_length = 7
|
|
178
|
+
self._incremental_learning = True
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
class NSGA2IMiLSTM1009(NSGA2IMLSTM):
|
|
182
|
+
def __init__(self, **kwargs) -> None:
|
|
183
|
+
super().__init__(**kwargs)
|
|
184
|
+
self.size_pool = 10
|
|
185
|
+
self.denominator = 0.5
|
|
186
|
+
self._n_timesteps = 10
|
|
187
|
+
self._sequence_length = 9
|
|
188
|
+
self._incremental_learning = True
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
class NSGA2IMiLSTM1503(NSGA2IMLSTM):
|
|
192
|
+
def __init__(self, **kwargs) -> None:
|
|
193
|
+
super().__init__(**kwargs)
|
|
194
|
+
self.size_pool = 15
|
|
195
|
+
self.denominator = 0.5
|
|
196
|
+
self._n_timesteps = 15
|
|
197
|
+
self._sequence_length = 3
|
|
198
|
+
self._incremental_learning = True
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
class NSGA2IMiLSTM1505(NSGA2IMLSTM):
|
|
202
|
+
def __init__(self, **kwargs) -> None:
|
|
203
|
+
super().__init__(**kwargs)
|
|
204
|
+
self.size_pool = 15
|
|
205
|
+
self.denominator = 0.5
|
|
206
|
+
self._n_timesteps = 15
|
|
207
|
+
self._sequence_length = 5
|
|
208
|
+
self._incremental_learning = True
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
class NSGA2IMiLSTM1507(NSGA2IMLSTM):
|
|
212
|
+
def __init__(self, **kwargs) -> None:
|
|
213
|
+
super().__init__(**kwargs)
|
|
214
|
+
self.size_pool = 15
|
|
215
|
+
self.denominator = 0.5
|
|
216
|
+
self._n_timesteps = 15
|
|
217
|
+
self._sequence_length = 7
|
|
218
|
+
self._incremental_learning = True
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
class NSGA2IMiLSTM1509(NSGA2IMLSTM):
|
|
222
|
+
def __init__(self, **kwargs) -> None:
|
|
223
|
+
super().__init__(**kwargs)
|
|
224
|
+
self.size_pool = 15
|
|
225
|
+
self.denominator = 0.5
|
|
226
|
+
self._n_timesteps = 15
|
|
227
|
+
self._sequence_length = 9
|
|
228
|
+
self._incremental_learning = True
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
class NSGA2IMiLSTM1511(NSGA2IMLSTM):
|
|
232
|
+
def __init__(self, **kwargs) -> None:
|
|
233
|
+
super().__init__(**kwargs)
|
|
234
|
+
self.size_pool = 15
|
|
235
|
+
self.denominator = 0.5
|
|
236
|
+
self._n_timesteps = 15
|
|
237
|
+
self._sequence_length = 11
|
|
238
|
+
self._incremental_learning = True
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
class NSGA2IMiLSTM1513(NSGA2IMLSTM):
|
|
242
|
+
def __init__(self, **kwargs) -> None:
|
|
243
|
+
super().__init__(**kwargs)
|
|
244
|
+
self.size_pool = 15
|
|
245
|
+
self.denominator = 0.5
|
|
246
|
+
self._n_timesteps = 15
|
|
247
|
+
self._sequence_length = 13
|
|
248
|
+
self._incremental_learning = True
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from pymoo.core.population import Population
|
|
3
|
+
|
|
4
|
+
from pydmoo.algorithms.modern.nsga2_imkt import NSGA2IMKT
|
|
5
|
+
from pydmoo.core.bounds import clip_and_randomize
|
|
6
|
+
from pydmoo.core.distance import norm_mean_frobenius_distance
|
|
7
|
+
from pydmoo.core.inverse import closed_form_solution
|
|
8
|
+
from pydmoo.core.sample_gaussian import multivariate_gaussian_sample
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class NSGA2IMKTN(NSGA2IMKT):
|
|
12
|
+
"""Inverse Modeling with Knowledge Transfer.
|
|
13
|
+
|
|
14
|
+
Inverse Modeling for Dynamic Multiobjective Optimization with Knowledge Transfer In objective Space.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(self, **kwargs):
|
|
18
|
+
super().__init__(**kwargs)
|
|
19
|
+
self.size_pool = 10
|
|
20
|
+
self.denominator = 0.5
|
|
21
|
+
|
|
22
|
+
def _response_change(self):
|
|
23
|
+
pop = self.pop
|
|
24
|
+
X = pop.get("X")
|
|
25
|
+
|
|
26
|
+
# recreate the current population without being evaluated
|
|
27
|
+
pop = Population.new(X=X)
|
|
28
|
+
|
|
29
|
+
# sample self.pop_size individuals in decision space
|
|
30
|
+
samples_old = self.sampling_new_pop()
|
|
31
|
+
|
|
32
|
+
# select self.pop_size/2 individuals with better convergence and diversity
|
|
33
|
+
samples = samples_old[:int(len(samples_old)/2)]
|
|
34
|
+
|
|
35
|
+
# knowledge in objective space
|
|
36
|
+
means_stds, mean, cov = self._in_decision_or_objective_space_nd(samples, "objective_space")
|
|
37
|
+
mean_new, cov_new = self._select_means_covs(means_stds, mean, cov)
|
|
38
|
+
|
|
39
|
+
# sample self.pop_size individuals in objective space
|
|
40
|
+
F = multivariate_gaussian_sample(mean_new, cov_new, self.pop_size, random_state=self.random_state)
|
|
41
|
+
|
|
42
|
+
# TODO
|
|
43
|
+
# inverse mapping
|
|
44
|
+
# X = FB
|
|
45
|
+
B = closed_form_solution(samples.get("X"), samples.get("F"))
|
|
46
|
+
|
|
47
|
+
# X = FB
|
|
48
|
+
X = np.dot(F, B)
|
|
49
|
+
|
|
50
|
+
# Bounds
|
|
51
|
+
if self.problem.has_bounds():
|
|
52
|
+
xl, xu = self.problem.bounds()
|
|
53
|
+
X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
|
|
54
|
+
|
|
55
|
+
# merge
|
|
56
|
+
pop = Population.merge(samples_old, Population.new(X=X))
|
|
57
|
+
|
|
58
|
+
return pop
|
|
59
|
+
|
|
60
|
+
def _in_decision_or_objective_space_nd(self, samples, decision_or_objective="decision_space"):
|
|
61
|
+
# decision space or objective space
|
|
62
|
+
flag = "X" if decision_or_objective == "decision_space" else "F"
|
|
63
|
+
|
|
64
|
+
means_covs = self.data.get("means_covs", [])
|
|
65
|
+
|
|
66
|
+
flag_value = self.opt.get(flag)
|
|
67
|
+
if len(flag_value) <= 1:
|
|
68
|
+
flag_value = self.pop.get(flag)
|
|
69
|
+
flag_value = flag_value[:2]
|
|
70
|
+
|
|
71
|
+
m, c = np.mean(flag_value, axis=0), np.cov(flag_value.T)
|
|
72
|
+
means_covs.append((m, 0.5 * (c.T + c), self.n_iter - 1)) # 1-based
|
|
73
|
+
self.data["means_covs"] = means_covs
|
|
74
|
+
|
|
75
|
+
flag_value = samples.get(flag)
|
|
76
|
+
mean, cov = np.mean(flag_value, axis=0), np.cov(flag_value.T)
|
|
77
|
+
return means_covs, mean, 0.5 * (cov.T + cov)
|
|
78
|
+
|
|
79
|
+
def _select_means_covs(self, means_covs, mean_new, cov_new):
|
|
80
|
+
# Unpack means and stds
|
|
81
|
+
means = np.array([m[0] for m in means_covs])
|
|
82
|
+
covs = np.array([m[1] for m in means_covs])
|
|
83
|
+
|
|
84
|
+
# Calculate distances
|
|
85
|
+
distances = np.array([
|
|
86
|
+
norm_mean_frobenius_distance(mean, cov, mean_new, cov_new) for mean, cov in zip(means, covs)
|
|
87
|
+
])
|
|
88
|
+
|
|
89
|
+
# Get top K closest
|
|
90
|
+
top_k_idx = np.argsort(distances)[:self.size_pool]
|
|
91
|
+
top_k_dist = distances[top_k_idx]
|
|
92
|
+
top_k_means = means[top_k_idx]
|
|
93
|
+
top_k_covs = covs[top_k_idx]
|
|
94
|
+
|
|
95
|
+
# Update pool
|
|
96
|
+
self._update_means_covs_pool(means_covs, top_k_idx)
|
|
97
|
+
|
|
98
|
+
# Calculate weights
|
|
99
|
+
weights = 1 / (top_k_dist + 1e-8) # Add small epsilon to avoid division by zero
|
|
100
|
+
weights = weights / (np.sum(weights) + self.denominator)
|
|
101
|
+
|
|
102
|
+
# Weighted combination
|
|
103
|
+
mean_new = (1 - np.sum(weights)) * mean_new + np.sum(weights[:, None] * top_k_means, axis=0)
|
|
104
|
+
cov_new = (1 - np.sum(weights)) * cov_new + np.sum(weights[:, None, None] * top_k_covs, axis=0)
|
|
105
|
+
|
|
106
|
+
# Symmetric matrix
|
|
107
|
+
cov_new = 0.5 * (cov_new.T + cov_new)
|
|
108
|
+
return mean_new, cov_new
|
|
109
|
+
|
|
110
|
+
def _update_means_covs_pool(self, means_covs, top_k_idx) -> None:
|
|
111
|
+
self.data["means_covs"] = [means_covs[i] for i in top_k_idx]
|
|
112
|
+
return None
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class NSGA2IMKTN0(NSGA2IMKTN):
|
|
116
|
+
def __init__(self, **kwargs):
|
|
117
|
+
super().__init__(**kwargs)
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from pymoo.core.population import Population
|
|
3
|
+
from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
|
|
4
|
+
|
|
5
|
+
from pydmoo.algorithms.modern.nsga2_imkt_n import NSGA2IMKTN
|
|
6
|
+
from pydmoo.algorithms.modern.nsga2_imkt_n_lstm import prepare_data_mean_cov
|
|
7
|
+
from pydmoo.algorithms.utils.utils import make_semidefinite, reconstruct_covariance_from_triu
|
|
8
|
+
from pydmoo.core.bounds import clip_and_randomize
|
|
9
|
+
from pydmoo.core.inverse import closed_form_solution
|
|
10
|
+
from pydmoo.core.lstm.lstm import LSTMpredictor
|
|
11
|
+
from pydmoo.core.sample_gaussian import multivariate_gaussian_sample
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class NSGA2IMNcLSTM(NSGA2IMKTN):
|
|
15
|
+
def __init__(self, **kwargs):
|
|
16
|
+
super().__init__(**kwargs)
|
|
17
|
+
|
|
18
|
+
self._n_timesteps = 10
|
|
19
|
+
self._sequence_length = 5 # Use 5 historical time steps to predict next step
|
|
20
|
+
self._incremental_learning = False
|
|
21
|
+
|
|
22
|
+
def _setup(self, problem, **kwargs):
|
|
23
|
+
super()._setup(problem, **kwargs)
|
|
24
|
+
|
|
25
|
+
# Must be here
|
|
26
|
+
self._lstm = LSTMpredictor(
|
|
27
|
+
self._sequence_length,
|
|
28
|
+
hidden_dim=64,
|
|
29
|
+
num_layers=1,
|
|
30
|
+
epochs=50,
|
|
31
|
+
batch_size=32,
|
|
32
|
+
lr=0.001,
|
|
33
|
+
device="cpu", # for fair comparison
|
|
34
|
+
patience=5,
|
|
35
|
+
seed=self.seed,
|
|
36
|
+
model_type="lstm",
|
|
37
|
+
incremental_learning=self._incremental_learning,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
def _response_change(self):
|
|
41
|
+
pop = self.pop
|
|
42
|
+
X = pop.get("X")
|
|
43
|
+
|
|
44
|
+
# recreate the current population without being evaluated
|
|
45
|
+
pop = Population.new(X=X)
|
|
46
|
+
|
|
47
|
+
# sample self.pop_size individuals in decision space
|
|
48
|
+
samples_old = self.sampling_new_pop()
|
|
49
|
+
|
|
50
|
+
# select self.pop_size/2 individuals with better convergence and diversity
|
|
51
|
+
samples = samples_old[:int(len(samples_old)/2)]
|
|
52
|
+
|
|
53
|
+
# knowledge in objective space
|
|
54
|
+
means_covs, mean, cov = self._in_decision_or_objective_space_nd(samples, "objective_space")
|
|
55
|
+
|
|
56
|
+
# Check if sufficient historical data is available for LSTM prediction
|
|
57
|
+
if len(means_covs) > self._n_timesteps:
|
|
58
|
+
# Update pool
|
|
59
|
+
self.data["means_covs"] = means_covs[self._n_timesteps:]
|
|
60
|
+
|
|
61
|
+
# Prepare time series data from historical means and covariance matrices
|
|
62
|
+
time_series_data = prepare_data_mean_cov(self._n_timesteps, means_covs)
|
|
63
|
+
|
|
64
|
+
# Initialize predictor and generate prediction for next time step
|
|
65
|
+
next_prediction = self._lstm.convert_train_predict(time_series_data)
|
|
66
|
+
|
|
67
|
+
# Convert prediction tensor to numpy array for further processing
|
|
68
|
+
next_prediction = next_prediction.numpy()
|
|
69
|
+
|
|
70
|
+
# Split prediction into mean and covariance components
|
|
71
|
+
# First n_obj elements represent the mean vector, Remaining elements represent the flattened covariance matrix values
|
|
72
|
+
mean_new, cov_new_ = next_prediction[:self.problem.n_obj], next_prediction[self.problem.n_obj:]
|
|
73
|
+
cov_new = reconstruct_covariance_from_triu(cov_new_, len(mean_new))
|
|
74
|
+
cov_new = make_semidefinite(cov_new)
|
|
75
|
+
|
|
76
|
+
else:
|
|
77
|
+
mean_new, cov_new = self._select_means_covs(means_covs, mean, cov)
|
|
78
|
+
|
|
79
|
+
# sample self.pop_size individuals in objective space
|
|
80
|
+
F = multivariate_gaussian_sample(mean_new, cov_new, self.pop_size, random_state=self.random_state)
|
|
81
|
+
|
|
82
|
+
# TODO
|
|
83
|
+
# inverse mapping
|
|
84
|
+
# X = FB
|
|
85
|
+
B = closed_form_solution(samples.get("X"), samples.get("F"))
|
|
86
|
+
|
|
87
|
+
# X = FB
|
|
88
|
+
X = np.dot(F, B)
|
|
89
|
+
|
|
90
|
+
# bounds
|
|
91
|
+
if self.problem.has_bounds():
|
|
92
|
+
xl, xu = self.problem.bounds()
|
|
93
|
+
X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
|
|
94
|
+
|
|
95
|
+
# merge
|
|
96
|
+
pop = Population.merge(samples_old, Population.new(X=X))
|
|
97
|
+
|
|
98
|
+
return pop
|
|
99
|
+
|
|
100
|
+
def sampling_new_pop(self):
|
|
101
|
+
ps = self.opt.get("X")
|
|
102
|
+
X = self.pop.get("X")
|
|
103
|
+
|
|
104
|
+
if not self.problem.has_constraints():
|
|
105
|
+
|
|
106
|
+
last_ps = self.data.get("last_ps", [])
|
|
107
|
+
if len(last_ps) == 0:
|
|
108
|
+
last_ps = ps
|
|
109
|
+
self.data["last_ps"] = ps
|
|
110
|
+
|
|
111
|
+
d = np.mean(ps, axis=0) - np.mean(last_ps, axis=0)
|
|
112
|
+
|
|
113
|
+
radius = max(np.linalg.norm(d) / self.problem.n_obj, 0.1)
|
|
114
|
+
|
|
115
|
+
X = X + d + self.random_state.uniform(low=-radius, high=radius, size=X.shape)
|
|
116
|
+
|
|
117
|
+
# bounds
|
|
118
|
+
if self.problem.has_bounds():
|
|
119
|
+
xl, xu = self.problem.bounds()
|
|
120
|
+
X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
|
|
121
|
+
|
|
122
|
+
samples = Population.new(X=X)
|
|
123
|
+
samples = self.evaluator.eval(self.problem, samples)
|
|
124
|
+
|
|
125
|
+
# do a survival to recreate rank and crowding of all individuals
|
|
126
|
+
samples = RankAndCrowding().do(self.problem, samples, n_survive=len(samples))
|
|
127
|
+
return samples
|
|
128
|
+
|
|
129
|
+
def _select_means_covs(self, means_covs, mean_new, cov_new):
|
|
130
|
+
# Unpack means and stds
|
|
131
|
+
means = np.array([m[0] for m in means_covs])
|
|
132
|
+
covs = np.array([m[1] for m in means_covs])
|
|
133
|
+
|
|
134
|
+
# Weighted combination
|
|
135
|
+
mean_new = 0.5 * mean_new + 0.5 * means[-1]
|
|
136
|
+
cov_new = 0.5 * cov_new + 0.5 * covs[-1]
|
|
137
|
+
|
|
138
|
+
return mean_new, cov_new
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
class NSGA2IMNicLSTM(NSGA2IMNcLSTM):
|
|
142
|
+
def __init__(self, **kwargs) -> None:
|
|
143
|
+
super().__init__(**kwargs)
|
|
144
|
+
self._n_timesteps = 10
|
|
145
|
+
self._sequence_length = 5 # Use 5 historical time steps to predict next step
|
|
146
|
+
self._incremental_learning = True
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
from pymoo.core.population import Population
|
|
2
|
+
|
|
3
|
+
from pydmoo.algorithms.modern.nsga2_imkt_n import NSGA2IMKTN
|
|
4
|
+
from pydmoo.core.bounds import clip_and_randomize
|
|
5
|
+
from pydmoo.core.predictions import igp_based_predictor
|
|
6
|
+
from pydmoo.core.sample_gaussian import multivariate_gaussian_sample
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class NSGA2IMKTNIGP(NSGA2IMKTN):
|
|
10
|
+
def __init__(self, **kwargs):
|
|
11
|
+
super().__init__(**kwargs)
|
|
12
|
+
self.size_pool = 10
|
|
13
|
+
self.denominator = 0.5
|
|
14
|
+
|
|
15
|
+
self.delta_s = 0.01
|
|
16
|
+
self.sigma_n = 0.01
|
|
17
|
+
self.sigma_n_2 = self.sigma_n ** 2
|
|
18
|
+
|
|
19
|
+
def _response_change(self):
|
|
20
|
+
pop = self.pop
|
|
21
|
+
X = pop.get("X")
|
|
22
|
+
|
|
23
|
+
# recreate the current population without being evaluated
|
|
24
|
+
pop = Population.new(X=X)
|
|
25
|
+
|
|
26
|
+
# sample self.pop_size individuals in decision space
|
|
27
|
+
samples_old = self.sampling_new_pop()
|
|
28
|
+
|
|
29
|
+
# select self.pop_size/2 individuals with better convergence and diversity
|
|
30
|
+
samples = samples_old[:int(len(samples_old)/2)]
|
|
31
|
+
|
|
32
|
+
# knowledge in objective space
|
|
33
|
+
means_stds, mean, cov = self._in_decision_or_objective_space_nd(samples, "objective_space")
|
|
34
|
+
mean_new, cov_new = self._select_means_covs(means_stds, mean, cov)
|
|
35
|
+
|
|
36
|
+
# sample self.pop_size individuals in objective space
|
|
37
|
+
F = multivariate_gaussian_sample(mean_new, cov_new, self.pop_size, random_state=self.random_state)
|
|
38
|
+
|
|
39
|
+
# TODO
|
|
40
|
+
# inverse mapping
|
|
41
|
+
# X = FB
|
|
42
|
+
X = igp_based_predictor(samples.get("X"), samples.get("F"), F, self.sigma_n_2)
|
|
43
|
+
|
|
44
|
+
# Bounds
|
|
45
|
+
if self.problem.has_bounds():
|
|
46
|
+
xl, xu = self.problem.bounds()
|
|
47
|
+
X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
|
|
48
|
+
|
|
49
|
+
# merge
|
|
50
|
+
pop = Population.merge(samples_old, Population.new(X=X))
|
|
51
|
+
|
|
52
|
+
return pop
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class NSGA2IMKTNIGP0(NSGA2IMKTNIGP):
|
|
56
|
+
def __init__(self, **kwargs):
|
|
57
|
+
super().__init__(**kwargs)
|