pydmoo 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. pydmoo/algorithms/classic/moead_ae.py +1 -1
  2. pydmoo/algorithms/classic/moead_pps.py +1 -1
  3. pydmoo/algorithms/classic/moead_svr.py +1 -1
  4. pydmoo/algorithms/classic/moeadde_ae.py +1 -1
  5. pydmoo/algorithms/classic/moeadde_pps.py +1 -1
  6. pydmoo/algorithms/classic/moeadde_svr.py +1 -1
  7. pydmoo/algorithms/classic/nsga2_ae.py +1 -1
  8. pydmoo/algorithms/classic/nsga2_pps.py +1 -1
  9. pydmoo/algorithms/knowledge/__init__.py +0 -0
  10. pydmoo/algorithms/knowledge/moead_kgb.py +407 -0
  11. pydmoo/algorithms/knowledge/moead_ktmm.py +3 -1
  12. pydmoo/algorithms/knowledge/moeadde_kgb.py +407 -0
  13. pydmoo/algorithms/knowledge/moeadde_ktmm.py +3 -1
  14. pydmoo/algorithms/knowledge/nsga2_kgb.py +378 -0
  15. pydmoo/algorithms/knowledge/nsga2_ktmm.py +3 -1
  16. pydmoo/algorithms/learning/moead_tr.py +1 -1
  17. pydmoo/algorithms/learning/moeadde_tr.py +1 -1
  18. pydmoo/algorithms/learning/nsga2_tr.py +1 -1
  19. pydmoo/algorithms/modern/moead_imkt.py +1 -1
  20. pydmoo/algorithms/modern/moead_imkt_igp.py +1 -1
  21. pydmoo/algorithms/modern/moead_imkt_lstm.py +1 -1
  22. pydmoo/algorithms/modern/moead_imkt_n.py +1 -1
  23. pydmoo/algorithms/modern/moead_imkt_n_igp.py +1 -1
  24. pydmoo/algorithms/modern/moead_imkt_n_lstm.py +1 -1
  25. pydmoo/algorithms/modern/moead_ktmm.py +1 -1
  26. pydmoo/algorithms/modern/moeadde_imkt.py +1 -1
  27. pydmoo/algorithms/modern/moeadde_imkt_clstm.py +1 -1
  28. pydmoo/algorithms/modern/moeadde_imkt_igp.py +1 -1
  29. pydmoo/algorithms/modern/moeadde_imkt_lstm.py +1 -1
  30. pydmoo/algorithms/modern/moeadde_imkt_n.py +1 -1
  31. pydmoo/algorithms/modern/moeadde_imkt_n_clstm.py +1 -1
  32. pydmoo/algorithms/modern/moeadde_imkt_n_igp.py +1 -1
  33. pydmoo/algorithms/modern/moeadde_imkt_n_lstm.py +1 -1
  34. pydmoo/algorithms/modern/moeadde_ktmm.py +1 -1
  35. pydmoo/algorithms/modern/nsga2_imkt.py +1 -1
  36. pydmoo/algorithms/modern/nsga2_imkt_clstm.py +1 -1
  37. pydmoo/algorithms/modern/nsga2_imkt_igp.py +1 -1
  38. pydmoo/algorithms/modern/nsga2_imkt_lstm.py +1 -1
  39. pydmoo/algorithms/modern/nsga2_imkt_n.py +1 -1
  40. pydmoo/algorithms/modern/nsga2_imkt_n_clstm.py +1 -1
  41. pydmoo/algorithms/modern/nsga2_imkt_n_igp.py +1 -1
  42. pydmoo/algorithms/modern/nsga2_imkt_n_lstm.py +1 -1
  43. pydmoo/algorithms/modern/nsga2_ktmm.py +1 -1
  44. {pydmoo-0.1.4.dist-info → pydmoo-0.1.5.dist-info}/METADATA +1 -1
  45. pydmoo-0.1.5.dist-info/RECORD +90 -0
  46. pydmoo-0.1.4.dist-info/RECORD +0 -86
  47. {pydmoo-0.1.4.dist-info → pydmoo-0.1.5.dist-info}/WHEEL +0 -0
  48. {pydmoo-0.1.4.dist-info → pydmoo-0.1.5.dist-info}/licenses/LICENSE +0 -0
@@ -21,7 +21,7 @@ class MOEADAE(DMOEAD):
21
21
 
22
22
  super().__init__(**kwargs)
23
23
 
24
- def _response_mechanism(self):
24
+ def _response_mechanism(self) -> Population:
25
25
  """Response mechanism."""
26
26
  pop = self.pop
27
27
  X = pop.get("X")
@@ -25,7 +25,7 @@ class MOEADPPS(DMOEAD):
25
25
  self.p = 3 # the order of the AR model
26
26
  self.M = 23 # the length of history mean point series
27
27
 
28
- def _response_mechanism(self):
28
+ def _response_mechanism(self) -> Population:
29
29
  """Response mechanism."""
30
30
  pop = self.pop
31
31
  X = pop.get("X")
@@ -29,7 +29,7 @@ class MOEADSVR(DMOEAD):
29
29
  self._epsilon = 0.05 # the insensitive tube size in SVR model
30
30
  # self._gamma = 1/d # the Gaussian RBF kernel parameter used in SVR model, and d is the number of variables
31
31
 
32
- def _response_mechanism(self):
32
+ def _response_mechanism(self) -> Population:
33
33
  """Response mechanism."""
34
34
  pop = self.pop
35
35
  X = pop.get("X")
@@ -21,7 +21,7 @@ class MOEADDEAE(DMOEADDE):
21
21
 
22
22
  super().__init__(**kwargs)
23
23
 
24
- def _response_mechanism(self):
24
+ def _response_mechanism(self) -> Population:
25
25
  """Response mechanism."""
26
26
  pop = self.pop
27
27
  X = pop.get("X")
@@ -25,7 +25,7 @@ class MOEADDEPPS(DMOEADDE):
25
25
  self.p = 3 # the order of the AR model
26
26
  self.M = 23 # the length of history mean point series
27
27
 
28
- def _response_mechanism(self):
28
+ def _response_mechanism(self) -> Population:
29
29
  """Response mechanism."""
30
30
  pop = self.pop
31
31
  X = pop.get("X")
@@ -29,7 +29,7 @@ class MOEADDESVR(DMOEADDE):
29
29
  self._epsilon = 0.05 # the insensitive tube size in SVR model
30
30
  # self._gamma = 1/d # the Gaussian RBF kernel parameter used in SVR model, and d is the number of variables
31
31
 
32
- def _response_mechanism(self):
32
+ def _response_mechanism(self) -> Population:
33
33
  """Response mechanism."""
34
34
  pop = self.pop
35
35
  X = pop.get("X")
@@ -20,7 +20,7 @@ class NSGA2AE(DNSGA2):
20
20
 
21
21
  super().__init__(**kwargs)
22
22
 
23
- def _response_mechanism(self):
23
+ def _response_mechanism(self) -> Population:
24
24
  """Response mechanism."""
25
25
  pop = self.pop
26
26
  X = pop.get("X")
@@ -25,7 +25,7 @@ class NSGA2PPS(DNSGA2):
25
25
  self.p = 3 # the order of the AR model
26
26
  self.M = 23 # the length of history mean point series
27
27
 
28
- def _response_mechanism(self):
28
+ def _response_mechanism(self) -> Population:
29
29
  """Response mechanism."""
30
30
  pop = self.pop
31
31
  X = pop.get("X")
File without changes
@@ -0,0 +1,407 @@
1
+ import random
2
+ import time
3
+
4
+ import numpy as np
5
+ from pymoo.core.population import Population
6
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
7
+ from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
8
+ from sklearn.naive_bayes import GaussianNB
9
+
10
+ from pydmoo.algorithms.base.dmoo.dmoead import DMOEAD
11
+
12
+
13
+ def euclidean_distance(a, b):
14
+ a = np.array(a)
15
+ b = np.array(b)
16
+ return np.sqrt(np.sum((a - b) ** 2))
17
+
18
+
19
+ class MOEADKGB(DMOEAD):
20
+ """Knowledge Guided Bayesian (KGB).
21
+
22
+ References
23
+ ----------
24
+ Ye, Y., Li, L., Lin, Q., Wong, K.-C., Li, J., and Ming, Z. (2022).
25
+ Knowledge guided bayesian classification for dynamic multi-objective optimization.
26
+ Knowledge-Based Systems, 250, 109173.
27
+ https://doi.org/10.1016/j.knosys.2022.109173
28
+ """
29
+ def __init__(
30
+ self,
31
+ c_size=13,
32
+ perturb_dev=0.1,
33
+ ps={},
34
+ save_ps=False,
35
+ **kwargs,
36
+ ):
37
+
38
+ super().__init__(**kwargs)
39
+
40
+ self.C_SIZE = c_size
41
+ self.PERTURB_DEV = perturb_dev
42
+
43
+ self.ps = ps
44
+ self.save_ps = save_ps
45
+
46
+ self.nr_rand_solutions = 50 * self.pop_size
47
+ self.t = 0
48
+
49
+ def _next_static_dynamic(self):
50
+ # for dynamic environment
51
+ pop = self.pop
52
+
53
+ if self.state is None:
54
+
55
+ # archive the current POS
56
+ self.add_to_ps()
57
+
58
+ change_detected = self._detect_change_sample_part_population()
59
+
60
+ if change_detected:
61
+
62
+ start_time = time.time()
63
+
64
+ pop = self._response_mechanism()
65
+
66
+ # reevaluate because we know there was a change
67
+ self.evaluator.eval(self.problem, self.pop, skip_already_evaluated=False, algorithm=self)
68
+
69
+ # reevaluate because we know there was a change
70
+ self.evaluator.eval(self.problem, pop)
71
+
72
+ if len(pop) > self.pop_size:
73
+ # do a survival to recreate rank and crowding of all individuals
74
+ # Modified by DynOpt on Dec 21, 2025
75
+ # n_survive=len(pop) -> n_survive=self.pop_size
76
+ pop = RankAndCrowding().do(self.problem, pop, n_survive=self.pop_size, random_state=self.random_state) # len(pop)
77
+
78
+ self.data["response_duration"] = time.time() - start_time
79
+
80
+ return pop
81
+
82
+ def _response_mechanism(self):
83
+ """Response mechanism."""
84
+ # increase t counter for unique key of PS
85
+ self.t += 1
86
+
87
+ # conduct knowledge reconstruction examination
88
+ pop_useful, pop_useless, c = self.knowledge_reconstruction_examination()
89
+
90
+ # Train a naive bayesian classifier
91
+ model = self.naive_bayesian_classifier(pop_useful, pop_useless)
92
+
93
+ # generate a lot of random solutions with the dimensions of problem decision space
94
+ X_test = self.random_strategy(self.nr_rand_solutions)
95
+
96
+ # introduce noise to vary previously useful solutions
97
+ noise = self.random_state.normal(0, self.PERTURB_DEV, self.problem.n_var)
98
+ noisy_useful_history = np.asarray(pop_useful) + noise
99
+
100
+ # check whether solutions are within bounds
101
+ noisy_useful_history = self.check_boundaries(noisy_useful_history)
102
+
103
+ # add noisy useful history to randomly generated solutions
104
+ X_test = np.vstack((X_test, noisy_useful_history))
105
+
106
+ # predict whether random solutions are useful or useless
107
+ Y_test = model.predict(X_test)
108
+
109
+ # create list of useful predicted solutions
110
+ predicted_pop = self.predicted_population(X_test, Y_test)
111
+
112
+ # ------ POPULATION GENERATION --------
113
+ # take a random sample from predicted pop and known useful pop
114
+ if len(predicted_pop) >= self.pop_size - self.C_SIZE:
115
+ init_pop = []
116
+ predicted_pop = random.sample(
117
+ predicted_pop, self.pop_size - self.C_SIZE
118
+ )
119
+
120
+ # add sampled solutions to init_pop
121
+ for solution in predicted_pop:
122
+ init_pop.append(solution)
123
+
124
+ # add cluster centroids to init_pop
125
+ for solution in c:
126
+ init_pop.append(np.asarray(solution))
127
+
128
+ else:
129
+
130
+ # if not enough predicted solutions are available, add all predicted solutions to init_pop
131
+ init_pop = []
132
+
133
+ for solution in predicted_pop:
134
+ init_pop.append(solution)
135
+
136
+ # add cluster centroids to init_pop
137
+ for solution in c:
138
+ init_pop.append(np.asarray(solution))
139
+
140
+ # if there are still not enough solutions in init_pop randomly sample previously useful solutions directly without noise to init_pop
141
+ if len(init_pop) < self.pop_size:
142
+
143
+ # fill up init_pop with randomly sampled solutions from pop_useful
144
+ if len(pop_useful) >= self.pop_size - len(init_pop):
145
+
146
+ init_pop = np.vstack(
147
+ (
148
+ init_pop,
149
+ random.sample(pop_useful, self.pop_size - len(init_pop)),
150
+ )
151
+ )
152
+ else:
153
+ # if not enough solutions are available, add all previously known useful solutions without noise to init_pop
154
+ for solution in pop_useful:
155
+ init_pop.append(solution)
156
+
157
+ # if there are still not enough solutions in init_pop generate random solutions with the dimensions of problem decision space
158
+ if len(init_pop) < self.pop_size:
159
+
160
+ # fill up with random solutions
161
+ init_pop = np.vstack(
162
+ (init_pop, self.random_strategy(self.pop_size - len(init_pop)))
163
+ )
164
+
165
+ # recreate the current population without being evaluated
166
+ pop = Population.new(X=init_pop)
167
+
168
+ # reevaluate because we know there was a change
169
+ self.evaluator.eval(self.problem, pop)
170
+
171
+ # do a survival to recreate rank and crowding of all individuals
172
+ pop = RankAndCrowding().do(self.problem, pop, n_survive=len(pop), random_state=self.random_state) # self.pop_size
173
+
174
+ return pop
175
+
176
+ def knowledge_reconstruction_examination(self):
177
+ """
178
+ Perform the knowledge reconstruction examination.
179
+ :return: Tuple containing the useful population, useless population, and cluster centroids
180
+ """
181
+ clusters = self.ps # set historical PS set as clusters
182
+ Nc = self.C_SIZE # set final nr of clusters
183
+ size = len(self.ps) # set size iteration to length of cluster
184
+ run_counter = 0 # counter variable to give unique key
185
+
186
+ # while there are still clusters to be condensed
187
+ while size > Nc:
188
+
189
+ counter = 0
190
+ min_distance = None
191
+ min_distance_index = []
192
+
193
+ # get clusters that are closest to each other by calculating the euclidean distance
194
+ for keys_i in clusters.keys():
195
+ for keys_j in clusters.keys():
196
+ if (
197
+ clusters[keys_i]["solutions"]
198
+ is not clusters[keys_j]["solutions"]
199
+ ):
200
+
201
+ dst = euclidean_distance(
202
+ clusters[keys_i]["centroid"],
203
+ clusters[keys_j]["centroid"],
204
+ )
205
+
206
+ if min_distance == None:
207
+ min_distance = dst
208
+ min_distance_index = [keys_i, keys_j]
209
+ elif dst < min_distance:
210
+ min_distance = dst
211
+
212
+ min_distance_index = [keys_i, keys_j]
213
+
214
+ counter += 1
215
+
216
+ # merge closest clusters
217
+ for solution in clusters[min_distance_index[1]]["solutions"]:
218
+ clusters[min_distance_index[0]]["solutions"].append(solution)
219
+
220
+ # calculate new centroid for merged cluster
221
+ clusters[min_distance_index[0]][
222
+ "centroid"
223
+ ] = self.calculate_cluster_centroid(
224
+ clusters[min_distance_index[0]]["solutions"]
225
+ )
226
+
227
+ # remove cluster that was merged
228
+ del clusters[min_distance_index[1]]
229
+
230
+ size -= 1
231
+ run_counter += 1
232
+
233
+ c = [] # list of centroids
234
+ pop_useful = []
235
+ pop_useless = []
236
+
237
+ # get centroids of clusters
238
+ for key in clusters.keys():
239
+ c.append(clusters[key]["centroid"])
240
+
241
+ # create pymoo population objected to evaluate centroid solutions
242
+ centroid_pop = Population.new("X", c)
243
+
244
+ # evaluate centroids
245
+ self.evaluator.eval(self.problem, centroid_pop)
246
+
247
+ # do non-dominated sorting on centroid solutions
248
+ ranking = NonDominatedSorting().do(centroid_pop.get("F"), return_rank=True)[-1]
249
+
250
+ # add the individuals from the clusters with the best objective values to the useful population the rest is useless :(
251
+
252
+ for idx, rank in enumerate(ranking):
253
+ if rank == 0:
254
+ for key in clusters.keys():
255
+ if centroid_pop[idx].X == clusters[key]["centroid"]:
256
+ for cluster_individual in clusters[key]["solutions"]:
257
+ pop_useful.append(cluster_individual)
258
+ else:
259
+ for key in clusters.keys():
260
+ if centroid_pop[idx].X == clusters[key]["centroid"]:
261
+ for cluster_individual in clusters[key]["solutions"]:
262
+ pop_useless.append(cluster_individual)
263
+
264
+ # return useful and useless population and the centroid solutions
265
+ return pop_useful, pop_useless, c
266
+
267
+ def naive_bayesian_classifier(self, pop_useful, pop_useless):
268
+ """
269
+ Train a naive Bayesian classifier using the useful and useless populations.
270
+ :param pop_useful: Useful population
271
+ :param pop_useless: Useless population
272
+ :return: Trained GaussianNB classifier
273
+ """
274
+ labeled_useful_solutions = []
275
+ labeled_useless_solutions = []
276
+
277
+ # add labels to solutions
278
+ for individual in pop_useful:
279
+ labeled_useful_solutions.append((individual, +1))
280
+
281
+ for individual in pop_useless:
282
+ labeled_useless_solutions.append((individual, -1))
283
+
284
+ x_train = []
285
+ y_train = []
286
+
287
+ for i in range(len(labeled_useful_solutions)):
288
+ x_train.append(labeled_useful_solutions[i][0])
289
+ y_train.append(labeled_useful_solutions[i][1])
290
+
291
+ for i in range(len(labeled_useless_solutions)):
292
+ x_train.append(labeled_useless_solutions[i][0])
293
+ y_train.append(labeled_useless_solutions[i][1])
294
+
295
+ x_train = np.asarray(x_train)
296
+ y_train = np.asarray(y_train)
297
+
298
+ # fit the naive bayesian classifier with the training data
299
+ model = GaussianNB()
300
+ model.fit(x_train, y_train)
301
+
302
+ return model
303
+
304
+ def add_to_ps(self):
305
+ """
306
+ Add the current Pareto optimal set (POS) to the Pareto set (PS) with individual keys.
307
+ """
308
+
309
+ PS_counter = 0
310
+
311
+ for individual in self.opt:
312
+
313
+ if isinstance(individual.X, list):
314
+ individual.X = np.asarray(individual.X)
315
+
316
+ centroid = self.calculate_cluster_centroid(individual.X)
317
+
318
+ self.ps[str(PS_counter) + "-" + str(self.t)] = {
319
+ "solutions": [individual.X.tolist()],
320
+ "centroid": centroid,
321
+ }
322
+
323
+ PS_counter += 1
324
+
325
+ def predicted_population(self, X_test, Y_test):
326
+ """
327
+ Create a predicted population from the test set with positive labels.
328
+ :param X_test: Test set of features
329
+ :param Y_test: Test set of labels
330
+ :return: Predicted population
331
+ """
332
+ predicted_pop = []
333
+ for i in range(len(Y_test)):
334
+ if Y_test[i] == 1:
335
+ predicted_pop.append(X_test[i])
336
+ return predicted_pop
337
+
338
+ def calculate_cluster_centroid(self, solution_cluster):
339
+ """
340
+ Calculate the centroid for a given cluster of solutions.
341
+ :param solution_cluster: List of solutions in the cluster
342
+ :return: Cluster centroid
343
+ """
344
+ # Get number of variable shape
345
+ try:
346
+ n_vars = len(solution_cluster[0])
347
+ except TypeError:
348
+ solution_cluster = np.array(solution_cluster)
349
+ return solution_cluster.tolist()
350
+
351
+ # TODO: this is lazy garbage fix whats coming in
352
+ cluster = []
353
+ for i in range(len(solution_cluster)):
354
+ # cluster.append(solution_cluster[i].tolist())
355
+ cluster.append(solution_cluster[i])
356
+ solution_cluster = np.asarray(cluster)
357
+
358
+ # Get number of solutions
359
+ length = solution_cluster.shape[0]
360
+
361
+ centroid_points = []
362
+
363
+ # calculate centroid for each variable, by taking mean of every variable of cluster
364
+ for i in range(n_vars):
365
+ # calculate sum over cluster
366
+ centroid_points.append(np.sum(solution_cluster[:, i]))
367
+
368
+ return [x / length for x in centroid_points]
369
+
370
+ def check_boundaries(self, pop):
371
+ """
372
+ Check and fix the boundaries of the given population.
373
+ :param pop: Population to check and fix boundaries
374
+ :return: Population with corrected boundaries
375
+ """
376
+ # check whether numpy array or pymoo population is given
377
+ if isinstance(pop, Population):
378
+ pop = pop.get("X")
379
+
380
+ # check if any solution is outside the bounds
381
+ for individual in pop:
382
+ for i in range(len(individual)):
383
+ if individual[i] > self.problem.xu[i]:
384
+ individual[i] = self.problem.xu[i]
385
+ elif individual[i] < self.problem.xl[i]:
386
+ individual[i] = self.problem.xl[i]
387
+ return pop
388
+
389
+ def random_strategy(self, N_r):
390
+ """
391
+ Generate a random population within the problem boundaries.
392
+ :param N_r: Number of random solutions to generate
393
+ :return: Randomly generated population
394
+ """
395
+ # generate a random population of size N_r
396
+ # TODO: Check boundaries
397
+ random_pop = self.random_state.random((N_r, self.problem.n_var))
398
+
399
+ # check if any solution is outside the bounds
400
+ for individual in random_pop:
401
+ for i in range(len(individual)):
402
+ if individual[i] > self.problem.xu[i]:
403
+ individual[i] = self.problem.xu[i]
404
+ elif individual[i] < self.problem.xl[i]:
405
+ individual[i] = self.problem.xl[i]
406
+
407
+ return random_pop
@@ -9,6 +9,8 @@ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
9
9
  class MOEADKTMM(DMOEAD):
10
10
  """Knowledge Transfer with Mixture Model.
11
11
 
12
+ References
13
+ ----------
12
14
  Zou, J., Hou, Z., Jiang, S., Yang, S., Ruan, G., Xia, Y., and Liu, Y. (2025).
13
15
  Knowledge transfer with mixture model in dynamic multi-objective optimization.
14
16
  IEEE Transactions on Evolutionary Computation, in press.
@@ -22,7 +24,7 @@ class MOEADKTMM(DMOEAD):
22
24
  self.size_pool = 14 # the size of knowledge pool
23
25
  self.denominator = 0.5
24
26
 
25
- def _response_mechanism(self):
27
+ def _response_mechanism(self) -> Population:
26
28
  """Response mechanism."""
27
29
  pop = self.pop
28
30
  X = pop.get("X")