pydmoo 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. pydmoo/algorithms/classic/moead_ae.py +1 -1
  2. pydmoo/algorithms/classic/moead_pps.py +1 -1
  3. pydmoo/algorithms/classic/moead_svr.py +1 -1
  4. pydmoo/algorithms/classic/moeadde_ae.py +1 -1
  5. pydmoo/algorithms/classic/moeadde_pps.py +1 -1
  6. pydmoo/algorithms/classic/moeadde_svr.py +1 -1
  7. pydmoo/algorithms/classic/nsga2_ae.py +1 -1
  8. pydmoo/algorithms/classic/nsga2_pps.py +1 -1
  9. pydmoo/algorithms/knowledge/__init__.py +0 -0
  10. pydmoo/algorithms/knowledge/moead_kgb.py +407 -0
  11. pydmoo/algorithms/knowledge/moead_ktmm.py +115 -0
  12. pydmoo/algorithms/knowledge/moeadde_kgb.py +407 -0
  13. pydmoo/algorithms/knowledge/moeadde_ktmm.py +115 -0
  14. pydmoo/algorithms/knowledge/nsga2_kgb.py +378 -0
  15. pydmoo/algorithms/knowledge/nsga2_ktmm.py +115 -0
  16. pydmoo/algorithms/learning/__init__.py +0 -0
  17. pydmoo/algorithms/learning/moead_tr.py +1 -1
  18. pydmoo/algorithms/learning/moeadde_tr.py +1 -1
  19. pydmoo/algorithms/learning/nsga2_tr.py +1 -1
  20. pydmoo/algorithms/modern/moead_imkt.py +1 -1
  21. pydmoo/algorithms/modern/moead_imkt_igp.py +1 -1
  22. pydmoo/algorithms/modern/moead_imkt_lstm.py +1 -1
  23. pydmoo/algorithms/modern/moead_imkt_n.py +1 -1
  24. pydmoo/algorithms/modern/moead_imkt_n_igp.py +1 -1
  25. pydmoo/algorithms/modern/moead_imkt_n_lstm.py +1 -1
  26. pydmoo/algorithms/modern/moead_ktmm.py +1 -1
  27. pydmoo/algorithms/modern/moeadde_imkt.py +1 -1
  28. pydmoo/algorithms/modern/moeadde_imkt_clstm.py +1 -1
  29. pydmoo/algorithms/modern/moeadde_imkt_igp.py +1 -1
  30. pydmoo/algorithms/modern/moeadde_imkt_lstm.py +1 -1
  31. pydmoo/algorithms/modern/moeadde_imkt_n.py +1 -1
  32. pydmoo/algorithms/modern/moeadde_imkt_n_clstm.py +1 -1
  33. pydmoo/algorithms/modern/moeadde_imkt_n_igp.py +1 -1
  34. pydmoo/algorithms/modern/moeadde_imkt_n_lstm.py +1 -1
  35. pydmoo/algorithms/modern/moeadde_ktmm.py +1 -1
  36. pydmoo/algorithms/modern/nsga2_imkt.py +1 -1
  37. pydmoo/algorithms/modern/nsga2_imkt_clstm.py +1 -1
  38. pydmoo/algorithms/modern/nsga2_imkt_igp.py +1 -1
  39. pydmoo/algorithms/modern/nsga2_imkt_lstm.py +1 -1
  40. pydmoo/algorithms/modern/nsga2_imkt_n.py +1 -1
  41. pydmoo/algorithms/modern/nsga2_imkt_n_clstm.py +1 -1
  42. pydmoo/algorithms/modern/nsga2_imkt_n_igp.py +1 -1
  43. pydmoo/algorithms/modern/nsga2_imkt_n_lstm.py +1 -1
  44. pydmoo/algorithms/modern/nsga2_ktmm.py +1 -1
  45. {pydmoo-0.1.3.dist-info → pydmoo-0.1.5.dist-info}/METADATA +2 -1
  46. pydmoo-0.1.5.dist-info/RECORD +90 -0
  47. pydmoo-0.1.3.dist-info/RECORD +0 -82
  48. {pydmoo-0.1.3.dist-info → pydmoo-0.1.5.dist-info}/WHEEL +0 -0
  49. {pydmoo-0.1.3.dist-info → pydmoo-0.1.5.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,378 @@
1
+ import random
2
+
3
+ import numpy as np
4
+ from pymoo.core.population import Population
5
+ from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
6
+ from sklearn.naive_bayes import GaussianNB
7
+
8
+ from pydmoo.algorithms.base.dmoo.dnsga2 import DNSGA2
9
+
10
+
11
+ def euclidean_distance(a, b):
12
+ a = np.array(a)
13
+ b = np.array(b)
14
+ return np.sqrt(np.sum((a - b) ** 2))
15
+
16
+
17
+ class NSGA2KGB(DNSGA2):
18
+ """Knowledge Guided Bayesian (KGB).
19
+
20
+ References
21
+ ----------
22
+ Ye, Y., Li, L., Lin, Q., Wong, K.-C., Li, J., and Ming, Z. (2022).
23
+ Knowledge guided bayesian classification for dynamic multi-objective optimization.
24
+ Knowledge-Based Systems, 250, 109173.
25
+ https://doi.org/10.1016/j.knosys.2022.109173
26
+ """
27
+ def __init__(
28
+ self,
29
+ c_size=13,
30
+ perturb_dev=0.1,
31
+ ps={},
32
+ save_ps=False,
33
+ **kwargs,
34
+ ):
35
+
36
+ super().__init__(**kwargs)
37
+
38
+ self.C_SIZE = c_size
39
+ self.PERTURB_DEV = perturb_dev
40
+
41
+ self.ps = ps
42
+ self.save_ps = save_ps
43
+
44
+ self.nr_rand_solutions = 50 * self.pop_size
45
+ self.t = 0
46
+
47
+ def _infill_static_dynamic(self):
48
+ # archive the current POS
49
+ self.add_to_ps()
50
+
51
+ return super()._infill_static_dynamic()
52
+
53
+ def _response_mechanism(self):
54
+ """Response mechanism."""
55
+ # increase t counter for unique key of PS
56
+ self.t += 1
57
+
58
+ # conduct knowledge reconstruction examination
59
+ pop_useful, pop_useless, c = self.knowledge_reconstruction_examination()
60
+
61
+ # Train a naive bayesian classifier
62
+ model = self.naive_bayesian_classifier(pop_useful, pop_useless)
63
+
64
+ # generate a lot of random solutions with the dimensions of problem decision space
65
+ X_test = self.random_strategy(self.nr_rand_solutions)
66
+
67
+ # introduce noise to vary previously useful solutions
68
+ noise = self.random_state.normal(0, self.PERTURB_DEV, self.problem.n_var)
69
+ noisy_useful_history = np.asarray(pop_useful) + noise
70
+
71
+ # check whether solutions are within bounds
72
+ noisy_useful_history = self.check_boundaries(noisy_useful_history)
73
+
74
+ # add noisy useful history to randomly generated solutions
75
+ X_test = np.vstack((X_test, noisy_useful_history))
76
+
77
+ # predict whether random solutions are useful or useless
78
+ Y_test = model.predict(X_test)
79
+
80
+ # create list of useful predicted solutions
81
+ predicted_pop = self.predicted_population(X_test, Y_test)
82
+
83
+ # ------ POPULATION GENERATION --------
84
+ # take a random sample from predicted pop and known useful pop
85
+ if len(predicted_pop) >= self.pop_size - self.C_SIZE:
86
+ init_pop = []
87
+ predicted_pop = random.sample(
88
+ predicted_pop, self.pop_size - self.C_SIZE
89
+ )
90
+
91
+ # add sampled solutions to init_pop
92
+ for solution in predicted_pop:
93
+ init_pop.append(solution)
94
+
95
+ # add cluster centroids to init_pop
96
+ for solution in c:
97
+ init_pop.append(np.asarray(solution))
98
+
99
+ else:
100
+
101
+ # if not enough predicted solutions are available, add all predicted solutions to init_pop
102
+ init_pop = []
103
+
104
+ for solution in predicted_pop:
105
+ init_pop.append(solution)
106
+
107
+ # add cluster centroids to init_pop
108
+ for solution in c:
109
+ init_pop.append(np.asarray(solution))
110
+
111
+ # if there are still not enough solutions in init_pop randomly sample previously useful solutions directly without noise to init_pop
112
+ if len(init_pop) < self.pop_size:
113
+
114
+ # fill up init_pop with randomly sampled solutions from pop_useful
115
+ if len(pop_useful) >= self.pop_size - len(init_pop):
116
+
117
+ init_pop = np.vstack(
118
+ (
119
+ init_pop,
120
+ random.sample(pop_useful, self.pop_size - len(init_pop)),
121
+ )
122
+ )
123
+ else:
124
+ # if not enough solutions are available, add all previously known useful solutions without noise to init_pop
125
+ for solution in pop_useful:
126
+ init_pop.append(solution)
127
+
128
+ # if there are still not enough solutions in init_pop generate random solutions with the dimensions of problem decision space
129
+ if len(init_pop) < self.pop_size:
130
+
131
+ # fill up with random solutions
132
+ init_pop = np.vstack(
133
+ (init_pop, self.random_strategy(self.pop_size - len(init_pop)))
134
+ )
135
+
136
+ # recreate the current population without being evaluated
137
+ pop = Population.new(X=init_pop)
138
+
139
+ # reevaluate because we know there was a change
140
+ self.evaluator.eval(self.problem, pop)
141
+
142
+ # do a survival to recreate rank and crowding of all individuals
143
+ pop = self.survival.do(self.problem, pop, n_survive=len(pop), random_state=self.random_state)
144
+
145
+ return pop
146
+
147
+ def knowledge_reconstruction_examination(self):
148
+ """
149
+ Perform the knowledge reconstruction examination.
150
+ :return: Tuple containing the useful population, useless population, and cluster centroids
151
+ """
152
+ clusters = self.ps # set historical PS set as clusters
153
+ Nc = self.C_SIZE # set final nr of clusters
154
+ size = len(self.ps) # set size iteration to length of cluster
155
+ run_counter = 0 # counter variable to give unique key
156
+
157
+ # while there are still clusters to be condensed
158
+ while size > Nc:
159
+
160
+ counter = 0
161
+ min_distance = None
162
+ min_distance_index = []
163
+
164
+ # get clusters that are closest to each other by calculating the euclidean distance
165
+ for keys_i in clusters.keys():
166
+ for keys_j in clusters.keys():
167
+ if (
168
+ clusters[keys_i]["solutions"]
169
+ is not clusters[keys_j]["solutions"]
170
+ ):
171
+
172
+ dst = euclidean_distance(
173
+ clusters[keys_i]["centroid"],
174
+ clusters[keys_j]["centroid"],
175
+ )
176
+
177
+ if min_distance == None:
178
+ min_distance = dst
179
+ min_distance_index = [keys_i, keys_j]
180
+ elif dst < min_distance:
181
+ min_distance = dst
182
+
183
+ min_distance_index = [keys_i, keys_j]
184
+
185
+ counter += 1
186
+
187
+ # merge closest clusters
188
+ for solution in clusters[min_distance_index[1]]["solutions"]:
189
+ clusters[min_distance_index[0]]["solutions"].append(solution)
190
+
191
+ # calculate new centroid for merged cluster
192
+ clusters[min_distance_index[0]][
193
+ "centroid"
194
+ ] = self.calculate_cluster_centroid(
195
+ clusters[min_distance_index[0]]["solutions"]
196
+ )
197
+
198
+ # remove cluster that was merged
199
+ del clusters[min_distance_index[1]]
200
+
201
+ size -= 1
202
+ run_counter += 1
203
+
204
+ c = [] # list of centroids
205
+ pop_useful = []
206
+ pop_useless = []
207
+
208
+ # get centroids of clusters
209
+ for key in clusters.keys():
210
+ c.append(clusters[key]["centroid"])
211
+
212
+ # create pymoo population objected to evaluate centroid solutions
213
+ centroid_pop = Population.new("X", c)
214
+
215
+ # evaluate centroids
216
+ self.evaluator.eval(self.problem, centroid_pop)
217
+
218
+ # do non-dominated sorting on centroid solutions
219
+ ranking = NonDominatedSorting().do(centroid_pop.get("F"), return_rank=True)[-1]
220
+
221
+ # add the individuals from the clusters with the best objective values to the useful population the rest is useless :(
222
+
223
+ for idx, rank in enumerate(ranking):
224
+ if rank == 0:
225
+ for key in clusters.keys():
226
+ if centroid_pop[idx].X == clusters[key]["centroid"]:
227
+ for cluster_individual in clusters[key]["solutions"]:
228
+ pop_useful.append(cluster_individual)
229
+ else:
230
+ for key in clusters.keys():
231
+ if centroid_pop[idx].X == clusters[key]["centroid"]:
232
+ for cluster_individual in clusters[key]["solutions"]:
233
+ pop_useless.append(cluster_individual)
234
+
235
+ # return useful and useless population and the centroid solutions
236
+ return pop_useful, pop_useless, c
237
+
238
+ def naive_bayesian_classifier(self, pop_useful, pop_useless):
239
+ """
240
+ Train a naive Bayesian classifier using the useful and useless populations.
241
+ :param pop_useful: Useful population
242
+ :param pop_useless: Useless population
243
+ :return: Trained GaussianNB classifier
244
+ """
245
+ labeled_useful_solutions = []
246
+ labeled_useless_solutions = []
247
+
248
+ # add labels to solutions
249
+ for individual in pop_useful:
250
+ labeled_useful_solutions.append((individual, +1))
251
+
252
+ for individual in pop_useless:
253
+ labeled_useless_solutions.append((individual, -1))
254
+
255
+ x_train = []
256
+ y_train = []
257
+
258
+ for i in range(len(labeled_useful_solutions)):
259
+ x_train.append(labeled_useful_solutions[i][0])
260
+ y_train.append(labeled_useful_solutions[i][1])
261
+
262
+ for i in range(len(labeled_useless_solutions)):
263
+ x_train.append(labeled_useless_solutions[i][0])
264
+ y_train.append(labeled_useless_solutions[i][1])
265
+
266
+ x_train = np.asarray(x_train)
267
+ y_train = np.asarray(y_train)
268
+
269
+ # fit the naive bayesian classifier with the training data
270
+ model = GaussianNB()
271
+ model.fit(x_train, y_train)
272
+
273
+ return model
274
+
275
+ def add_to_ps(self):
276
+ """
277
+ Add the current Pareto optimal set (POS) to the Pareto set (PS) with individual keys.
278
+ """
279
+
280
+ PS_counter = 0
281
+
282
+ for individual in self.opt:
283
+
284
+ if isinstance(individual.X, list):
285
+ individual.X = np.asarray(individual.X)
286
+
287
+ centroid = self.calculate_cluster_centroid(individual.X)
288
+
289
+ self.ps[str(PS_counter) + "-" + str(self.t)] = {
290
+ "solutions": [individual.X.tolist()],
291
+ "centroid": centroid,
292
+ }
293
+
294
+ PS_counter += 1
295
+
296
+ def predicted_population(self, X_test, Y_test):
297
+ """
298
+ Create a predicted population from the test set with positive labels.
299
+ :param X_test: Test set of features
300
+ :param Y_test: Test set of labels
301
+ :return: Predicted population
302
+ """
303
+ predicted_pop = []
304
+ for i in range(len(Y_test)):
305
+ if Y_test[i] == 1:
306
+ predicted_pop.append(X_test[i])
307
+ return predicted_pop
308
+
309
+ def calculate_cluster_centroid(self, solution_cluster):
310
+ """
311
+ Calculate the centroid for a given cluster of solutions.
312
+ :param solution_cluster: List of solutions in the cluster
313
+ :return: Cluster centroid
314
+ """
315
+ # Get number of variable shape
316
+ try:
317
+ n_vars = len(solution_cluster[0])
318
+ except TypeError:
319
+ solution_cluster = np.array(solution_cluster)
320
+ return solution_cluster.tolist()
321
+
322
+ # TODO: this is lazy garbage fix whats coming in
323
+ cluster = []
324
+ for i in range(len(solution_cluster)):
325
+ # cluster.append(solution_cluster[i].tolist())
326
+ cluster.append(solution_cluster[i])
327
+ solution_cluster = np.asarray(cluster)
328
+
329
+ # Get number of solutions
330
+ length = solution_cluster.shape[0]
331
+
332
+ centroid_points = []
333
+
334
+ # calculate centroid for each variable, by taking mean of every variable of cluster
335
+ for i in range(n_vars):
336
+ # calculate sum over cluster
337
+ centroid_points.append(np.sum(solution_cluster[:, i]))
338
+
339
+ return [x / length for x in centroid_points]
340
+
341
+ def check_boundaries(self, pop):
342
+ """
343
+ Check and fix the boundaries of the given population.
344
+ :param pop: Population to check and fix boundaries
345
+ :return: Population with corrected boundaries
346
+ """
347
+ # check whether numpy array or pymoo population is given
348
+ if isinstance(pop, Population):
349
+ pop = pop.get("X")
350
+
351
+ # check if any solution is outside the bounds
352
+ for individual in pop:
353
+ for i in range(len(individual)):
354
+ if individual[i] > self.problem.xu[i]:
355
+ individual[i] = self.problem.xu[i]
356
+ elif individual[i] < self.problem.xl[i]:
357
+ individual[i] = self.problem.xl[i]
358
+ return pop
359
+
360
+ def random_strategy(self, N_r):
361
+ """
362
+ Generate a random population within the problem boundaries.
363
+ :param N_r: Number of random solutions to generate
364
+ :return: Randomly generated population
365
+ """
366
+ # generate a random population of size N_r
367
+ # TODO: Check boundaries
368
+ random_pop = self.random_state.random((N_r, self.problem.n_var))
369
+
370
+ # check if any solution is outside the bounds
371
+ for individual in random_pop:
372
+ for i in range(len(individual)):
373
+ if individual[i] > self.problem.xu[i]:
374
+ individual[i] = self.problem.xu[i]
375
+ elif individual[i] < self.problem.xl[i]:
376
+ individual[i] = self.problem.xl[i]
377
+
378
+ return random_pop
@@ -0,0 +1,115 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.base.dmoo.dnsga2 import DNSGA2
6
+ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
7
+
8
+
9
+ class NSGA2KTMM(DNSGA2):
10
+ """Knowledge Transfer with Mixture Model.
11
+
12
+ References
13
+ ----------
14
+ Zou, J., Hou, Z., Jiang, S., Yang, S., Ruan, G., Xia, Y., and Liu, Y. (2025).
15
+ Knowledge transfer with mixture model in dynamic multi-objective optimization.
16
+ IEEE Transactions on Evolutionary Computation, in press.
17
+ https://doi.org/10.1109/TEVC.2025.3566481
18
+ """
19
+
20
+ def __init__(self, **kwargs):
21
+
22
+ super().__init__(**kwargs)
23
+
24
+ self.size_pool = 14 # the size of knowledge pool
25
+ self.denominator = 0.5
26
+
27
+ def _response_mechanism(self) -> Population:
28
+ """Response mechanism."""
29
+ pop = self.pop
30
+ X = pop.get("X")
31
+
32
+ # recreate the current population without being evaluated
33
+ pop = Population.new(X=X)
34
+
35
+ # sample self.pop_size solutions in decision space
36
+ samples_old = self.sampling_new_pop()
37
+
38
+ # select self.pop_size/2 individuals with better convergence and diversity
39
+ samples = samples_old[:int(len(samples_old)/2)]
40
+
41
+ # knowledge in decision space
42
+ means_stds_ps, mean, std = self._in_decision_or_objective_space_1d(samples, "decision_space")
43
+ mean_new, std_new = self._select_means_stds(means_stds_ps, mean, std)
44
+
45
+ # sample self.pop_size solutions in decision space
46
+ X = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
47
+
48
+ # bounds
49
+ if self.problem.has_bounds():
50
+ xl, xu = self.problem.bounds()
51
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
52
+
53
+ # merge
54
+ pop = Population.merge(samples_old, Population.new(X=X))
55
+
56
+ return pop
57
+
58
+ def _in_decision_or_objective_space_1d(self, samples, decision_or_objective="decision_space"):
59
+ # decision space or objective space
60
+ flag = "X" if decision_or_objective == "decision_space" else "F"
61
+
62
+ means_stds = self.data.get("means_stds", [])
63
+
64
+ flag_value = self.opt.get(flag)
65
+ if len(flag_value) <= 1:
66
+ flag_value = self.pop.get(flag)
67
+ flag_value = flag_value[:2]
68
+
69
+ means_stds.append((np.mean(flag_value, axis=0), np.std(flag_value, axis=0), self.n_iter - 1)) # 1-based
70
+ self.data["means_stds"] = means_stds
71
+
72
+ flag_value = samples.get(flag)
73
+ mean, std = np.mean(flag_value, axis=0), np.std(flag_value, axis=0)
74
+ return means_stds, mean, std
75
+
76
+ def sampling_new_pop(self):
77
+ samples = self.initialization.sampling(self.problem, self.pop_size)
78
+ samples = self.evaluator.eval(self.problem, samples)
79
+
80
+ # do a survival to recreate rank and crowding of all individuals
81
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=len(samples))
82
+ return samples
83
+
84
+ def _select_means_stds(self, means_stds, mean_new, std_new):
85
+ # Unpack means and stds
86
+ means = np.array([m[0] for m in means_stds])
87
+ stds = np.array([m[1] for m in means_stds])
88
+
89
+ # Calculate distances
90
+ mean_diffs = means - mean_new
91
+ std_diffs = stds - std_new
92
+
93
+ distances = np.sqrt(np.sum(mean_diffs**2, axis=1) + np.sum(std_diffs**2, axis=1))
94
+
95
+ # Get top K closest
96
+ top_k_idx = np.argsort(distances)[:self.size_pool]
97
+ top_k_dist = distances[top_k_idx]
98
+ top_k_means = means[top_k_idx]
99
+ top_k_stds = stds[top_k_idx]
100
+
101
+ # Update pool
102
+ self._update_means_stds_pool(means_stds, top_k_idx)
103
+
104
+ # Calculate weights
105
+ weights = 1 / (top_k_dist + 1e-8) # Add small epsilon to avoid division by zero
106
+ weights = weights / (np.sum(weights) + self.denominator)
107
+
108
+ # Weighted combination
109
+ mean_new = (1 - np.sum(weights)) * mean_new + np.sum(weights[:, None] * top_k_means, axis=0)
110
+ std_new = (1 - np.sum(weights)) * std_new + np.sum(weights[:, None] * top_k_stds, axis=0)
111
+ return mean_new, std_new
112
+
113
+ def _update_means_stds_pool(self, means_stds, top_k_idx) -> None:
114
+ self.data["means_stds"] = [means_stds[i] for i in top_k_idx]
115
+ return None
File without changes
@@ -30,7 +30,7 @@ class MOEADTr(DMOEAD):
30
30
  #
31
31
  self._maxiter = max(self.pop_size, 100) # default is 1000
32
32
 
33
- def _response_mechanism(self):
33
+ def _response_mechanism(self) -> Population:
34
34
  """Response mechanism."""
35
35
  pop = self.pop
36
36
  X, F = pop.get("X", "F")
@@ -30,7 +30,7 @@ class MOEADDETr(DMOEADDE):
30
30
  #
31
31
  self._maxiter = max(self.pop_size, 100) # default is 1000
32
32
 
33
- def _response_mechanism(self):
33
+ def _response_mechanism(self) -> Population:
34
34
  """Response mechanism."""
35
35
  pop = self.pop
36
36
  X, F = pop.get("X", "F")
@@ -30,7 +30,7 @@ class NSGA2Tr(DNSGA2):
30
30
  #
31
31
  self._maxiter = max(self.pop_size, 100) # default is 1000
32
32
 
33
- def _response_mechanism(self):
33
+ def _response_mechanism(self) -> Population:
34
34
  """Response mechanism."""
35
35
  pop = self.pop
36
36
  X, F = pop.get("X", "F")
@@ -19,7 +19,7 @@ class MOEADIMKT(MOEADKTMM):
19
19
  self.size_pool = 10
20
20
  self.denominator = 0.5
21
21
 
22
- def _response_mechanism(self):
22
+ def _response_mechanism(self) -> Population:
23
23
  """Response mechanism."""
24
24
  pop = self.pop
25
25
  X = pop.get("X")
@@ -16,7 +16,7 @@ class MOEADIMKTIGP(MOEADIMKT):
16
16
  self.sigma_n = 0.01
17
17
  self.sigma_n_2 = self.sigma_n ** 2
18
18
 
19
- def _response_mechanism(self):
19
+ def _response_mechanism(self) -> Population:
20
20
  """Response mechanism."""
21
21
  pop = self.pop
22
22
  X = pop.get("X")
@@ -42,7 +42,7 @@ class MOEADIMLSTM(MOEADIMKT):
42
42
  incremental_learning=self._incremental_learning,
43
43
  )
44
44
 
45
- def _response_mechanism(self):
45
+ def _response_mechanism(self) -> Population:
46
46
  """Response mechanism."""
47
47
  pop = self.pop
48
48
  X = pop.get("X")
@@ -19,7 +19,7 @@ class MOEADIMKTN(MOEADIMKT):
19
19
  self.size_pool = 10
20
20
  self.denominator = 0.5
21
21
 
22
- def _response_mechanism(self):
22
+ def _response_mechanism(self) -> Population:
23
23
  """Response mechanism."""
24
24
  pop = self.pop
25
25
  X = pop.get("X")
@@ -16,7 +16,7 @@ class MOEADIMKTNIGP(MOEADIMKTN):
16
16
  self.sigma_n = 0.01
17
17
  self.sigma_n_2 = self.sigma_n ** 2
18
18
 
19
- def _response_mechanism(self):
19
+ def _response_mechanism(self) -> Population:
20
20
  """Response mechanism."""
21
21
  pop = self.pop
22
22
  X = pop.get("X")
@@ -43,7 +43,7 @@ class MOEADIMNLSTM(MOEADIMKTN):
43
43
  incremental_learning=self._incremental_learning,
44
44
  )
45
45
 
46
- def _response_mechanism(self):
46
+ def _response_mechanism(self) -> Population:
47
47
  """Response mechanism."""
48
48
  pop = self.pop
49
49
  X = pop.get("X")
@@ -22,7 +22,7 @@ class MOEADKTMM(DMOEAD):
22
22
  self.size_pool = 14 # the size of knowledge pool
23
23
  self.denominator = 0.5
24
24
 
25
- def _response_mechanism(self):
25
+ def _response_mechanism(self) -> Population:
26
26
  """Response mechanism."""
27
27
  pop = self.pop
28
28
  X = pop.get("X")
@@ -19,7 +19,7 @@ class MOEADDEIMKT(MOEADDEKTMM):
19
19
  self.size_pool = 10
20
20
  self.denominator = 0.5
21
21
 
22
- def _response_mechanism(self):
22
+ def _response_mechanism(self) -> Population:
23
23
  """Response mechanism."""
24
24
  pop = self.pop
25
25
  X = pop.get("X")
@@ -36,7 +36,7 @@ class MOEADDEIMcLSTM(MOEADDEIMKT):
36
36
  incremental_learning=self._incremental_learning,
37
37
  )
38
38
 
39
- def _response_mechanism(self):
39
+ def _response_mechanism(self) -> Population:
40
40
  """Response mechanism."""
41
41
  pop = self.pop
42
42
  X = pop.get("X")
@@ -16,7 +16,7 @@ class MOEADDEIMKTIGP(MOEADDEIMKT):
16
16
  self.sigma_n = 0.01
17
17
  self.sigma_n_2 = self.sigma_n ** 2
18
18
 
19
- def _response_mechanism(self):
19
+ def _response_mechanism(self) -> Population:
20
20
  """Response mechanism."""
21
21
  pop = self.pop
22
22
  X = pop.get("X")
@@ -42,7 +42,7 @@ class MOEADDEIMLSTM(MOEADDEIMKT):
42
42
  incremental_learning=self._incremental_learning,
43
43
  )
44
44
 
45
- def _response_mechanism(self):
45
+ def _response_mechanism(self) -> Population:
46
46
  """Response mechanism."""
47
47
  pop = self.pop
48
48
  X = pop.get("X")
@@ -19,7 +19,7 @@ class MOEADDEIMKTN(MOEADDEIMKT):
19
19
  self.size_pool = 10
20
20
  self.denominator = 0.5
21
21
 
22
- def _response_mechanism(self):
22
+ def _response_mechanism(self) -> Population:
23
23
  """Response mechanism."""
24
24
  pop = self.pop
25
25
  X = pop.get("X")
@@ -37,7 +37,7 @@ class MOEADDEIMNcLSTM(MOEADDEIMKTN):
37
37
  incremental_learning=self._incremental_learning,
38
38
  )
39
39
 
40
- def _response_mechanism(self):
40
+ def _response_mechanism(self) -> Population:
41
41
  """Response mechanism."""
42
42
  pop = self.pop
43
43
  X = pop.get("X")
@@ -16,7 +16,7 @@ class MOEADDEIMKTNIGP(MOEADDEIMKTN):
16
16
  self.sigma_n = 0.01
17
17
  self.sigma_n_2 = self.sigma_n ** 2
18
18
 
19
- def _response_mechanism(self):
19
+ def _response_mechanism(self) -> Population:
20
20
  """Response mechanism."""
21
21
  pop = self.pop
22
22
  X = pop.get("X")
@@ -43,7 +43,7 @@ class MOEADDEIMNLSTM(MOEADDEIMKTN):
43
43
  incremental_learning=self._incremental_learning,
44
44
  )
45
45
 
46
- def _response_mechanism(self):
46
+ def _response_mechanism(self) -> Population:
47
47
  """Response mechanism."""
48
48
  pop = self.pop
49
49
  X = pop.get("X")