pydmoo 0.0.18__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. pydmoo/algorithms/base/__init__.py +20 -0
  2. pydmoo/algorithms/base/core/__init__.py +0 -0
  3. pydmoo/algorithms/base/core/algorithm.py +416 -0
  4. pydmoo/algorithms/base/core/genetic.py +129 -0
  5. pydmoo/algorithms/base/dmoo/__init__.py +0 -0
  6. pydmoo/algorithms/base/dmoo/dmoead.py +131 -0
  7. pydmoo/algorithms/base/dmoo/dmoeadde.py +131 -0
  8. pydmoo/algorithms/base/dmoo/dmopso.py +0 -0
  9. pydmoo/algorithms/base/dmoo/dnsga2.py +137 -0
  10. pydmoo/algorithms/base/moo/__init__.py +0 -0
  11. pydmoo/algorithms/base/moo/moead.py +199 -0
  12. pydmoo/algorithms/base/moo/moeadde.py +105 -0
  13. pydmoo/algorithms/base/moo/mopso.py +0 -0
  14. pydmoo/algorithms/base/moo/nsga2.py +122 -0
  15. pydmoo/algorithms/modern/__init__.py +94 -0
  16. pydmoo/algorithms/modern/moead_imkt.py +161 -0
  17. pydmoo/algorithms/modern/moead_imkt_igp.py +56 -0
  18. pydmoo/algorithms/modern/moead_imkt_lstm.py +109 -0
  19. pydmoo/algorithms/modern/moead_imkt_n.py +117 -0
  20. pydmoo/algorithms/modern/moead_imkt_n_igp.py +56 -0
  21. pydmoo/algorithms/modern/moead_imkt_n_lstm.py +111 -0
  22. pydmoo/algorithms/modern/moead_ktmm.py +112 -0
  23. pydmoo/algorithms/modern/moeadde_imkt.py +161 -0
  24. pydmoo/algorithms/modern/moeadde_imkt_clstm.py +223 -0
  25. pydmoo/algorithms/modern/moeadde_imkt_igp.py +56 -0
  26. pydmoo/algorithms/modern/moeadde_imkt_lstm.py +212 -0
  27. pydmoo/algorithms/modern/moeadde_imkt_n.py +117 -0
  28. pydmoo/algorithms/modern/moeadde_imkt_n_clstm.py +146 -0
  29. pydmoo/algorithms/modern/moeadde_imkt_n_igp.py +56 -0
  30. pydmoo/algorithms/modern/moeadde_imkt_n_lstm.py +114 -0
  31. pydmoo/algorithms/modern/moeadde_ktmm.py +112 -0
  32. pydmoo/algorithms/modern/nsga2_imkt.py +162 -0
  33. pydmoo/algorithms/modern/nsga2_imkt_clstm.py +223 -0
  34. pydmoo/algorithms/modern/nsga2_imkt_igp.py +56 -0
  35. pydmoo/algorithms/modern/nsga2_imkt_lstm.py +248 -0
  36. pydmoo/algorithms/modern/nsga2_imkt_n.py +117 -0
  37. pydmoo/algorithms/modern/nsga2_imkt_n_clstm.py +146 -0
  38. pydmoo/algorithms/modern/nsga2_imkt_n_igp.py +57 -0
  39. pydmoo/algorithms/modern/nsga2_imkt_n_lstm.py +154 -0
  40. pydmoo/algorithms/modern/nsga2_ktmm.py +112 -0
  41. pydmoo/algorithms/utils/__init__.py +0 -0
  42. pydmoo/algorithms/utils/utils.py +166 -0
  43. pydmoo/core/__init__.py +0 -0
  44. pydmoo/{response → core}/ar_model.py +4 -4
  45. pydmoo/{response → core}/bounds.py +35 -2
  46. pydmoo/core/distance.py +45 -0
  47. pydmoo/core/inverse.py +55 -0
  48. pydmoo/core/lstm/__init__.py +0 -0
  49. pydmoo/core/lstm/base.py +291 -0
  50. pydmoo/core/lstm/lstm.py +491 -0
  51. pydmoo/core/manifold.py +93 -0
  52. pydmoo/core/predictions.py +50 -0
  53. pydmoo/core/sample_gaussian.py +56 -0
  54. pydmoo/core/sample_uniform.py +63 -0
  55. pydmoo/{response/tca_model.py → core/transfer.py} +3 -3
  56. pydmoo/problems/__init__.py +53 -49
  57. pydmoo/problems/dyn.py +94 -13
  58. pydmoo/problems/dynamic/cec2015.py +10 -5
  59. pydmoo/problems/dynamic/df.py +6 -3
  60. pydmoo/problems/dynamic/gts.py +69 -34
  61. pydmoo/problems/real_world/__init__.py +0 -0
  62. pydmoo/problems/real_world/dsrp.py +168 -0
  63. pydmoo/problems/real_world/dwbdp.py +189 -0
  64. {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/METADATA +11 -10
  65. pydmoo-0.1.0.dist-info/RECORD +70 -0
  66. {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/WHEEL +1 -1
  67. pydmoo-0.0.18.dist-info/RECORD +0 -15
  68. /pydmoo/{response → algorithms}/__init__.py +0 -0
  69. {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,122 @@
1
+ """
2
+ Includes modified code from [pymoo](https://github.com/anyoptimization/pymoo).
3
+
4
+ Sources:
5
+ - [nsga2.py](https://github.com/anyoptimization/pymoo/blob/main/pymoo/algorithms/moo/nsga2.py).
6
+
7
+ Licensed under the Apache License, Version 2.0. Original copyright and license terms are preserved.
8
+ """
9
+
10
+ import warnings
11
+
12
+ import numpy as np
13
+ from pymoo.docs import parse_doc_string
14
+ from pymoo.operators.crossover.sbx import SBX
15
+ from pymoo.operators.mutation.pm import PM
16
+ from pymoo.operators.sampling.rnd import FloatRandomSampling
17
+ from pymoo.operators.selection.tournament import TournamentSelection, compare
18
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
19
+ from pymoo.termination.default import DefaultMultiObjectiveTermination
20
+ from pymoo.util.display.multi import MultiObjectiveOutput
21
+ from pymoo.util.dominator import Dominator
22
+ from pymoo.util.misc import has_feasible
23
+
24
+ from pydmoo.algorithms.base.core.genetic import GeneticAlgorithm
25
+
26
+ # ---------------------------------------------------------------------------------------------------------
27
+ # Binary Tournament Selection Function
28
+ # ---------------------------------------------------------------------------------------------------------
29
+
30
+
31
+ def binary_tournament(pop, P, algorithm, **kwargs):
32
+ n_tournaments, n_parents = P.shape
33
+
34
+ if n_parents != 2:
35
+ raise ValueError("Only implemented for binary tournament!")
36
+
37
+ tournament_type = algorithm.tournament_type
38
+ S = np.full(n_tournaments, np.nan)
39
+
40
+ for i in range(n_tournaments):
41
+
42
+ a, b = P[i, 0], P[i, 1]
43
+ a_cv, a_f, b_cv, b_f = pop[a].CV[0], pop[a].F, pop[b].CV[0], pop[b].F
44
+ rank_a, cd_a = pop[a].get("rank", "crowding")
45
+ rank_b, cd_b = pop[b].get("rank", "crowding")
46
+
47
+ # if at least one solution is infeasible
48
+ if a_cv > 0.0 or b_cv > 0.0:
49
+ S[i] = compare(a, a_cv, b, b_cv, method='smaller_is_better', return_random_if_equal=True, random_state=algorithm.random_state)
50
+
51
+ # both solutions are feasible
52
+ else:
53
+
54
+ if tournament_type == 'comp_by_dom_and_crowding':
55
+ rel = Dominator.get_relation(a_f, b_f)
56
+ if rel == 1:
57
+ S[i] = a
58
+ elif rel == -1:
59
+ S[i] = b
60
+
61
+ elif tournament_type == 'comp_by_rank_and_crowding':
62
+ S[i] = compare(a, rank_a, b, rank_b, method='smaller_is_better')
63
+
64
+ else:
65
+ raise Exception("Unknown tournament type.")
66
+
67
+ # if rank or domination relation didn't make a decision compare by crowding
68
+ if np.isnan(S[i]):
69
+ S[i] = compare(a, cd_a, b, cd_b, method='larger_is_better', return_random_if_equal=True, random_state=algorithm.random_state)
70
+
71
+ return S[:, None].astype(int, copy=False)
72
+
73
+
74
+ # ---------------------------------------------------------------------------------------------------------
75
+ # Survival Selection
76
+ # ---------------------------------------------------------------------------------------------------------
77
+
78
+
79
+ class RankAndCrowdingSurvival(RankAndCrowding):
80
+
81
+ def __init__(self, nds=None, crowding_func="cd"):
82
+ super().__init__(nds, crowding_func)
83
+
84
+ # =========================================================================================================
85
+ # Implementation
86
+ # =========================================================================================================
87
+
88
+
89
+ class NSGA2(GeneticAlgorithm):
90
+
91
+ def __init__(self,
92
+ pop_size=100,
93
+ sampling=FloatRandomSampling(),
94
+ selection=TournamentSelection(func_comp=binary_tournament),
95
+ crossover=SBX(eta=15, prob=0.9),
96
+ mutation=PM(eta=20),
97
+ survival=RankAndCrowding(),
98
+ output=MultiObjectiveOutput(),
99
+ **kwargs):
100
+
101
+ super().__init__(
102
+ pop_size=pop_size,
103
+ sampling=sampling,
104
+ selection=selection,
105
+ crossover=crossover,
106
+ mutation=mutation,
107
+ survival=survival,
108
+ output=output,
109
+ advance_after_initial_infill=True,
110
+ **kwargs)
111
+
112
+ self.termination = DefaultMultiObjectiveTermination()
113
+ self.tournament_type = 'comp_by_dom_and_crowding'
114
+
115
+ def _set_optimum(self, **kwargs):
116
+ if not has_feasible(self.pop):
117
+ self.opt = self.pop[[np.argmin(self.pop.get("CV"))]]
118
+ else:
119
+ self.opt = self.pop[self.pop.get("rank") == 0]
120
+
121
+
122
+ parse_doc_string(NSGA2.__init__)
@@ -0,0 +1,94 @@
1
+ from pydmoo.algorithms.modern.moeadde_imkt_clstm import (
2
+ MOEADDEIMicLSTM1003,
3
+ MOEADDEIMicLSTM1005,
4
+ MOEADDEIMicLSTM1007,
5
+ MOEADDEIMicLSTM1009,
6
+ MOEADDEIMicLSTM1503,
7
+ MOEADDEIMicLSTM1505,
8
+ MOEADDEIMicLSTM1507,
9
+ MOEADDEIMicLSTM1509,
10
+ MOEADDEIMicLSTM1511,
11
+ MOEADDEIMicLSTM1513,
12
+ )
13
+ from pydmoo.algorithms.modern.moeadde_imkt_lstm import (
14
+ MOEADDEIMiLSTM1003,
15
+ MOEADDEIMiLSTM1005,
16
+ MOEADDEIMiLSTM1007,
17
+ MOEADDEIMiLSTM1009,
18
+ MOEADDEIMiLSTM1503,
19
+ MOEADDEIMiLSTM1505,
20
+ MOEADDEIMiLSTM1507,
21
+ MOEADDEIMiLSTM1509,
22
+ MOEADDEIMiLSTM1511,
23
+ MOEADDEIMiLSTM1513,
24
+ )
25
+ from pydmoo.algorithms.modern.nsga2_imkt_clstm import (
26
+ NSGA2IMicLSTM1003,
27
+ NSGA2IMicLSTM1005,
28
+ NSGA2IMicLSTM1007,
29
+ NSGA2IMicLSTM1009,
30
+ NSGA2IMicLSTM1503,
31
+ NSGA2IMicLSTM1505,
32
+ NSGA2IMicLSTM1507,
33
+ NSGA2IMicLSTM1509,
34
+ NSGA2IMicLSTM1511,
35
+ NSGA2IMicLSTM1513,
36
+ )
37
+ from pydmoo.algorithms.modern.nsga2_imkt_lstm import (
38
+ NSGA2IMiLSTM1003,
39
+ NSGA2IMiLSTM1005,
40
+ NSGA2IMiLSTM1007,
41
+ NSGA2IMiLSTM1009,
42
+ NSGA2IMiLSTM1503,
43
+ NSGA2IMiLSTM1505,
44
+ NSGA2IMiLSTM1507,
45
+ NSGA2IMiLSTM1509,
46
+ NSGA2IMiLSTM1511,
47
+ NSGA2IMiLSTM1513,
48
+ )
49
+
50
+ ALGORITHMS_ABLATION = {
51
+ "MOEADDEIMicLSTM1003": MOEADDEIMicLSTM1003,
52
+ "MOEADDEIMicLSTM1005": MOEADDEIMicLSTM1005,
53
+ "MOEADDEIMicLSTM1007": MOEADDEIMicLSTM1007,
54
+ "MOEADDEIMicLSTM1009": MOEADDEIMicLSTM1009,
55
+ "MOEADDEIMicLSTM1503": MOEADDEIMicLSTM1503,
56
+ "MOEADDEIMicLSTM1505": MOEADDEIMicLSTM1505,
57
+ "MOEADDEIMicLSTM1507": MOEADDEIMicLSTM1507,
58
+ "MOEADDEIMicLSTM1509": MOEADDEIMicLSTM1509,
59
+ "MOEADDEIMicLSTM1511": MOEADDEIMicLSTM1511,
60
+ "MOEADDEIMicLSTM1513": MOEADDEIMicLSTM1513,
61
+
62
+ "MOEADDEIMiLSTM1003": MOEADDEIMiLSTM1003,
63
+ "MOEADDEIMiLSTM1005": MOEADDEIMiLSTM1005,
64
+ "MOEADDEIMiLSTM1007": MOEADDEIMiLSTM1007,
65
+ "MOEADDEIMiLSTM1009": MOEADDEIMiLSTM1009,
66
+ "MOEADDEIMiLSTM1503": MOEADDEIMiLSTM1503,
67
+ "MOEADDEIMiLSTM1505": MOEADDEIMiLSTM1505,
68
+ "MOEADDEIMiLSTM1507": MOEADDEIMiLSTM1507,
69
+ "MOEADDEIMiLSTM1509": MOEADDEIMiLSTM1509,
70
+ "MOEADDEIMiLSTM1511": MOEADDEIMiLSTM1511,
71
+ "MOEADDEIMiLSTM1513": MOEADDEIMiLSTM1513,
72
+
73
+ "NSGA2IMicLSTM1003": NSGA2IMicLSTM1003,
74
+ "NSGA2IMicLSTM1005": NSGA2IMicLSTM1005,
75
+ "NSGA2IMicLSTM1007": NSGA2IMicLSTM1007,
76
+ "NSGA2IMicLSTM1009": NSGA2IMicLSTM1009,
77
+ "NSGA2IMicLSTM1503": NSGA2IMicLSTM1503,
78
+ "NSGA2IMicLSTM1505": NSGA2IMicLSTM1505,
79
+ "NSGA2IMicLSTM1507": NSGA2IMicLSTM1507,
80
+ "NSGA2IMicLSTM1509": NSGA2IMicLSTM1509,
81
+ "NSGA2IMicLSTM1511": NSGA2IMicLSTM1511,
82
+ "NSGA2IMicLSTM1513": NSGA2IMicLSTM1513,
83
+
84
+ "NSGA2IMiLSTM1003": NSGA2IMiLSTM1003,
85
+ "NSGA2IMiLSTM1005": NSGA2IMiLSTM1005,
86
+ "NSGA2IMiLSTM1007": NSGA2IMiLSTM1007,
87
+ "NSGA2IMiLSTM1009": NSGA2IMiLSTM1009,
88
+ "NSGA2IMiLSTM1503": NSGA2IMiLSTM1503,
89
+ "NSGA2IMiLSTM1505": NSGA2IMiLSTM1505,
90
+ "NSGA2IMiLSTM1507": NSGA2IMiLSTM1507,
91
+ "NSGA2IMiLSTM1509": NSGA2IMiLSTM1509,
92
+ "NSGA2IMiLSTM1511": NSGA2IMiLSTM1511,
93
+ "NSGA2IMiLSTM1513": NSGA2IMiLSTM1513,
94
+ }
@@ -0,0 +1,161 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.modern.moead_ktmm import MOEADKTMM
6
+ from pydmoo.core.bounds import clip_and_randomize
7
+ from pydmoo.core.inverse import closed_form_solution
8
+ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
9
+
10
+
11
+ class MOEADIMKT(MOEADKTMM):
12
+ """Inverse Modeling with Knowledge Transfer.
13
+
14
+ Inverse Modeling for Dynamic Multiobjective Optimization with Knowledge Transfer In objective Space.
15
+ """
16
+
17
+ def __init__(self, **kwargs):
18
+ super().__init__(**kwargs)
19
+ self.size_pool = 10
20
+ self.denominator = 0.5
21
+
22
+ def _response_change(self):
23
+ pop = self.pop
24
+ X = pop.get("X")
25
+
26
+ # recreate the current population without being evaluated
27
+ pop = Population.new(X=X)
28
+
29
+ # sample self.pop_size individuals in decision space
30
+ samples_old = self.sampling_new_pop()
31
+
32
+ # select self.pop_size/2 individuals with better convergence and diversity
33
+ samples = samples_old[:int(len(samples_old)/2)]
34
+
35
+ # knowledge in objective space
36
+ means_stds, mean, std = self._in_decision_or_objective_space_1d(samples, "objective_space")
37
+ mean_new, std_new = self._select_means_stds(means_stds, mean, std)
38
+
39
+ # sample self.pop_size individuals in objective space
40
+ F = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
41
+
42
+ # TODO
43
+ # inverse mapping
44
+ # X = FB
45
+ B = closed_form_solution(samples.get("X"), samples.get("F"))
46
+
47
+ # X = FB
48
+ X = np.dot(F, B)
49
+
50
+ # bounds
51
+ if self.problem.has_bounds():
52
+ xl, xu = self.problem.bounds()
53
+ X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
54
+
55
+ # merge
56
+ pop = Population.merge(samples_old, Population.new(X=X))
57
+
58
+ return pop
59
+
60
+ def sampling_new_pop(self):
61
+ X = self.pop.get("X")
62
+
63
+ if not self.problem.has_constraints():
64
+
65
+ last_X = self.data.get("last_X", [])
66
+ if len(last_X) == 0:
67
+ last_X = X
68
+ self.data["last_X"] = X
69
+
70
+ d = np.mean(X - last_X, axis=0)
71
+
72
+ radius = max(np.linalg.norm(d) / self.problem.n_obj, 0.1)
73
+
74
+ X = X + d + self.random_state.uniform(low=-radius, high=radius, size=X.shape)
75
+
76
+ # bounds
77
+ if self.problem.has_bounds():
78
+ xl, xu = self.problem.bounds()
79
+ X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
80
+
81
+ samples = Population.new(X=X)
82
+ samples = self.evaluator.eval(self.problem, samples)
83
+
84
+ # do a survival to recreate rank and crowding of all individuals
85
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=len(samples))
86
+ return samples
87
+
88
+
89
+ class MOEADIMKT0(MOEADIMKT):
90
+ def __init__(self, **kwargs):
91
+ super().__init__(**kwargs)
92
+
93
+
94
+ class MOEADIMKT1(MOEADIMKT):
95
+ def __init__(self, **kwargs):
96
+ super().__init__(**kwargs)
97
+ self.size_pool = 2
98
+ self.denominator = 0.5
99
+
100
+
101
+ class MOEADIMKT2(MOEADIMKT):
102
+ def __init__(self, **kwargs):
103
+ super().__init__(**kwargs)
104
+ self.size_pool = 4
105
+ self.denominator = 0.5
106
+
107
+
108
+ class MOEADIMKT3(MOEADIMKT):
109
+ def __init__(self, **kwargs):
110
+ super().__init__(**kwargs)
111
+ self.size_pool = 6
112
+ self.denominator = 0.5
113
+
114
+
115
+ class MOEADIMKT4(MOEADIMKT):
116
+ def __init__(self, **kwargs):
117
+ super().__init__(**kwargs)
118
+ self.size_pool = 8
119
+ self.denominator = 0.5
120
+
121
+
122
+ class MOEADIMKT5(MOEADIMKT):
123
+ def __init__(self, **kwargs):
124
+ super().__init__(**kwargs)
125
+ self.size_pool = 10
126
+ self.denominator = 0.5
127
+
128
+
129
+ class MOEADIMKT6(MOEADIMKT):
130
+ def __init__(self, **kwargs):
131
+ super().__init__(**kwargs)
132
+ self.size_pool = 12
133
+ self.denominator = 0.5
134
+
135
+
136
+ class MOEADIMKT7(MOEADIMKT):
137
+ def __init__(self, **kwargs):
138
+ super().__init__(**kwargs)
139
+ self.size_pool = 14
140
+ self.denominator = 0.5
141
+
142
+
143
+ class MOEADIMKT8(MOEADIMKT):
144
+ def __init__(self, **kwargs):
145
+ super().__init__(**kwargs)
146
+ self.size_pool = 16
147
+ self.denominator = 0.5
148
+
149
+
150
+ class MOEADIMKT9(MOEADIMKT):
151
+ def __init__(self, **kwargs):
152
+ super().__init__(**kwargs)
153
+ self.size_pool = 18
154
+ self.denominator = 0.5
155
+
156
+
157
+ class MOEADIMKT10(MOEADIMKT):
158
+ def __init__(self, **kwargs):
159
+ super().__init__(**kwargs)
160
+ self.size_pool = 20
161
+ self.denominator = 0.5
@@ -0,0 +1,56 @@
1
+ from pymoo.core.population import Population
2
+
3
+ from pydmoo.algorithms.modern.moead_imkt import MOEADIMKT
4
+ from pydmoo.core.bounds import clip_and_randomize
5
+ from pydmoo.core.predictions import igp_based_predictor
6
+ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
7
+
8
+
9
+ class MOEADIMKTIGP(MOEADIMKT):
10
+ def __init__(self, **kwargs):
11
+ super().__init__(**kwargs)
12
+ self.size_pool = 10
13
+ self.denominator = 0.5
14
+
15
+ self.delta_s = 0.01
16
+ self.sigma_n = 0.01
17
+ self.sigma_n_2 = self.sigma_n ** 2
18
+
19
+ def _response_change(self):
20
+ pop = self.pop
21
+ X = pop.get("X")
22
+
23
+ # recreate the current population without being evaluated
24
+ pop = Population.new(X=X)
25
+
26
+ # sample self.pop_size individuals in decision space
27
+ samples_old = self.sampling_new_pop()
28
+
29
+ # select self.pop_size/2 individuals with better convergence and diversity
30
+ samples = samples_old[:int(len(samples_old)/2)]
31
+
32
+ # knowledge in objective space
33
+ means_stds, mean, std = self._in_decision_or_objective_space_1d(samples, "objective_space")
34
+ mean_new, std_new = self._select_means_stds(means_stds, mean, std)
35
+
36
+ # sample self.pop_size individuals in objective space
37
+ F = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
38
+
39
+ # TODO
40
+ # inverse mapping
41
+ X = igp_based_predictor(samples.get("X"), samples.get("F"), F, self.sigma_n_2)
42
+
43
+ # bounds
44
+ if self.problem.has_bounds():
45
+ xl, xu = self.problem.bounds()
46
+ X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
47
+
48
+ # merge
49
+ pop = Population.merge(samples_old, Population.new(X=X))
50
+
51
+ return pop
52
+
53
+
54
+ class MOEADIMKTIGP0(MOEADIMKTIGP):
55
+ def __init__(self, **kwargs):
56
+ super().__init__(**kwargs)
@@ -0,0 +1,109 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+
4
+ from pydmoo.algorithms.modern.moead_imkt import MOEADIMKT
5
+ from pydmoo.algorithms.modern.nsga2_imkt_lstm import prepare_data_means_std
6
+ from pydmoo.core.bounds import clip_and_randomize
7
+ from pydmoo.core.inverse import closed_form_solution
8
+ from pydmoo.core.lstm.lstm import LSTMpredictor
9
+ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
10
+
11
+
12
+ class MOEADIMLSTM(MOEADIMKT):
13
+ """Inverse Modeling with LSTM (IMLSTM).
14
+
15
+ Inverse Modeling for Dynamic Multiobjective Optimization with LSTM prediction In objective Space.
16
+ """
17
+
18
+ def __init__(self, **kwargs):
19
+ super().__init__(**kwargs)
20
+ self.size_pool = 10
21
+ self.denominator = 0.5
22
+
23
+ self._n_timesteps = 10
24
+ self._sequence_length = 5 # Use 5 historical time steps to predict next step
25
+ self._incremental_learning = False
26
+
27
+ def _setup(self, problem, **kwargs):
28
+ super()._setup(problem, **kwargs)
29
+
30
+ # Must be here
31
+ self._lstm = LSTMpredictor(
32
+ self._sequence_length,
33
+ hidden_dim=64,
34
+ num_layers=1,
35
+ epochs=50,
36
+ batch_size=32,
37
+ lr=0.001,
38
+ device="cpu", # for fair comparison
39
+ patience=5,
40
+ seed=self.seed,
41
+ model_type="lstm",
42
+ incremental_learning=self._incremental_learning,
43
+ )
44
+
45
+ def _response_change(self):
46
+ pop = self.pop
47
+ X = pop.get("X")
48
+
49
+ # recreate the current population without being evaluated
50
+ pop = Population.new(X=X)
51
+
52
+ # sample self.pop_size individuals in decision space
53
+ samples_old = self.sampling_new_pop()
54
+
55
+ # select self.pop_size/2 individuals with better convergence and diversity
56
+ samples = samples_old[:int(len(samples_old)/2)]
57
+
58
+ # knowledge in objective space
59
+ means_stds, mean, std = self._in_decision_or_objective_space_1d(samples, "objective_space")
60
+
61
+ # Check if sufficient historical data is available for LSTM prediction
62
+ if len(means_stds) > self._n_timesteps:
63
+ # Update pool
64
+ self.data["means_stds"] = means_stds[self._n_timesteps:]
65
+
66
+ # Prepare time series data from historical means and standard deviations
67
+ time_series_data = prepare_data_means_std(self._n_timesteps, means_stds)
68
+
69
+ # Initialize predictor and generate prediction for next time step
70
+ next_prediction = self._lstm.convert_train_predict(time_series_data)
71
+
72
+ # Convert prediction tensor to numpy array for further processing
73
+ next_prediction = next_prediction.numpy()
74
+
75
+ # Split prediction into mean and standard deviation components
76
+ # First n_obj elements represent mean values, remaining elements represent standard deviations
77
+ mean_new, std_new = next_prediction[:self.problem.n_obj], next_prediction[self.problem.n_obj:]
78
+ std_new = np.abs(std_new)
79
+
80
+ else:
81
+ mean_new, std_new = self._select_means_stds(means_stds, mean, std)
82
+
83
+ # sample self.pop_size individuals in objective space
84
+ F = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
85
+
86
+ # TODO
87
+ # inverse mapping
88
+ # X = FB
89
+ B = closed_form_solution(samples.get("X"), samples.get("F"))
90
+
91
+ # X = FB
92
+ X = np.dot(F, B)
93
+
94
+ # bounds
95
+ if self.problem.has_bounds():
96
+ xl, xu = self.problem.bounds()
97
+ X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
98
+
99
+ # merge
100
+ pop = Population.merge(samples_old, Population.new(X=X))
101
+
102
+ return pop
103
+
104
+
105
+ class MOEADIMiLSTM(MOEADIMLSTM):
106
+ def __init__(self, **kwargs) -> None:
107
+ super().__init__(**kwargs)
108
+
109
+ self._incremental_learning = True
@@ -0,0 +1,117 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+
4
+ from pydmoo.algorithms.modern.moead_imkt import MOEADIMKT
5
+ from pydmoo.core.bounds import clip_and_randomize
6
+ from pydmoo.core.distance import norm_mean_frobenius_distance
7
+ from pydmoo.core.inverse import closed_form_solution
8
+ from pydmoo.core.sample_gaussian import multivariate_gaussian_sample
9
+
10
+
11
+ class MOEADIMKTN(MOEADIMKT):
12
+ """Inverse Modeling with Knowledge Transfer.
13
+
14
+ Inverse Modeling for Dynamic Multiobjective Optimization with Knowledge Transfer In objective Space.
15
+ """
16
+
17
+ def __init__(self, **kwargs):
18
+ super().__init__(**kwargs)
19
+ self.size_pool = 10
20
+ self.denominator = 0.5
21
+
22
+ def _response_change(self):
23
+ pop = self.pop
24
+ X = pop.get("X")
25
+
26
+ # recreate the current population without being evaluated
27
+ pop = Population.new(X=X)
28
+
29
+ # sample self.pop_size individuals in decision space
30
+ samples_old = self.sampling_new_pop()
31
+
32
+ # select self.pop_size/2 individuals with better convergence and diversity
33
+ samples = samples_old[:int(len(samples_old)/2)]
34
+
35
+ # knowledge in objective space
36
+ means_stds, mean, cov = self._in_decision_or_objective_space_nd(samples, "objective_space")
37
+ mean_new, cov_new = self._select_means_covs(means_stds, mean, cov)
38
+
39
+ # sample self.pop_size individuals in objective space
40
+ F = multivariate_gaussian_sample(mean_new, cov_new, self.pop_size, random_state=self.random_state)
41
+
42
+ # TODO
43
+ # inverse mapping
44
+ # X = FB
45
+ B = closed_form_solution(samples.get("X"), samples.get("F"))
46
+
47
+ # X = FB
48
+ X = np.dot(F, B)
49
+
50
+ # bounds
51
+ if self.problem.has_bounds():
52
+ xl, xu = self.problem.bounds()
53
+ X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
54
+
55
+ # merge
56
+ pop = Population.merge(samples_old, Population.new(X=X))
57
+
58
+ return pop
59
+
60
+ def _in_decision_or_objective_space_nd(self, samples, decision_or_objective="decision_space"):
61
+ # decision space or objective space
62
+ flag = "X" if decision_or_objective == "decision_space" else "F"
63
+
64
+ means_covs = self.data.get("means_covs", [])
65
+
66
+ flag_value = self.opt.get(flag)
67
+ if len(flag_value) <= 1:
68
+ flag_value = self.pop.get(flag)
69
+ flag_value = flag_value[:2]
70
+
71
+ m, c = np.mean(flag_value, axis=0), np.cov(flag_value.T)
72
+ means_covs.append((m, 0.5 * (c.T + c), self.n_iter - 1)) # 1-based
73
+ self.data["means_covs"] = means_covs
74
+
75
+ flag_value = samples.get(flag)
76
+ mean, cov = np.mean(flag_value, axis=0), np.cov(flag_value.T)
77
+ return means_covs, mean, 0.5 * (cov.T + cov)
78
+
79
+ def _select_means_covs(self, means_covs, mean_new, cov_new):
80
+ # Unpack means and stds
81
+ means = np.array([m[0] for m in means_covs])
82
+ covs = np.array([m[1] for m in means_covs])
83
+
84
+ # Calculate distances
85
+ distances = np.array([
86
+ norm_mean_frobenius_distance(mean, cov, mean_new, cov_new) for mean, cov in zip(means, covs)
87
+ ])
88
+
89
+ # Get top K closest
90
+ top_k_idx = np.argsort(distances)[:self.size_pool]
91
+ top_k_dist = distances[top_k_idx]
92
+ top_k_means = means[top_k_idx]
93
+ top_k_covs = covs[top_k_idx]
94
+
95
+ # Update pool
96
+ self._update_means_covs_pool(means_covs, top_k_idx)
97
+
98
+ # Calculate weights
99
+ weights = 1 / (top_k_dist + 1e-8) # Add small epsilon to avoid division by zero
100
+ weights = weights / (np.sum(weights) + self.denominator)
101
+
102
+ # Weighted combination
103
+ mean_new = (1 - np.sum(weights)) * mean_new + np.sum(weights[:, None] * top_k_means, axis=0)
104
+ cov_new = (1 - np.sum(weights)) * cov_new + np.sum(weights[:, None, None] * top_k_covs, axis=0)
105
+
106
+ # Symmetric matrix
107
+ cov_new = 0.5 * (cov_new.T + cov_new)
108
+ return mean_new, cov_new
109
+
110
+ def _update_means_covs_pool(self, means_covs, top_k_idx) -> None:
111
+ self.data["means_covs"] = [means_covs[i] for i in top_k_idx]
112
+ return None
113
+
114
+
115
+ class MOEADIMKTN0(MOEADIMKTN):
116
+ def __init__(self, **kwargs):
117
+ super().__init__(**kwargs)