pydmoo 0.0.18__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. pydmoo/algorithms/base/__init__.py +20 -0
  2. pydmoo/algorithms/base/core/__init__.py +0 -0
  3. pydmoo/algorithms/base/core/algorithm.py +416 -0
  4. pydmoo/algorithms/base/core/genetic.py +129 -0
  5. pydmoo/algorithms/base/dmoo/__init__.py +0 -0
  6. pydmoo/algorithms/base/dmoo/dmoead.py +131 -0
  7. pydmoo/algorithms/base/dmoo/dmoeadde.py +131 -0
  8. pydmoo/algorithms/base/dmoo/dmopso.py +0 -0
  9. pydmoo/algorithms/base/dmoo/dnsga2.py +137 -0
  10. pydmoo/algorithms/base/moo/__init__.py +0 -0
  11. pydmoo/algorithms/base/moo/moead.py +199 -0
  12. pydmoo/algorithms/base/moo/moeadde.py +105 -0
  13. pydmoo/algorithms/base/moo/mopso.py +0 -0
  14. pydmoo/algorithms/base/moo/nsga2.py +122 -0
  15. pydmoo/algorithms/modern/__init__.py +94 -0
  16. pydmoo/algorithms/modern/moead_imkt.py +161 -0
  17. pydmoo/algorithms/modern/moead_imkt_igp.py +56 -0
  18. pydmoo/algorithms/modern/moead_imkt_lstm.py +109 -0
  19. pydmoo/algorithms/modern/moead_imkt_n.py +117 -0
  20. pydmoo/algorithms/modern/moead_imkt_n_igp.py +56 -0
  21. pydmoo/algorithms/modern/moead_imkt_n_lstm.py +111 -0
  22. pydmoo/algorithms/modern/moead_ktmm.py +112 -0
  23. pydmoo/algorithms/modern/moeadde_imkt.py +161 -0
  24. pydmoo/algorithms/modern/moeadde_imkt_clstm.py +223 -0
  25. pydmoo/algorithms/modern/moeadde_imkt_igp.py +56 -0
  26. pydmoo/algorithms/modern/moeadde_imkt_lstm.py +212 -0
  27. pydmoo/algorithms/modern/moeadde_imkt_n.py +117 -0
  28. pydmoo/algorithms/modern/moeadde_imkt_n_clstm.py +146 -0
  29. pydmoo/algorithms/modern/moeadde_imkt_n_igp.py +56 -0
  30. pydmoo/algorithms/modern/moeadde_imkt_n_lstm.py +114 -0
  31. pydmoo/algorithms/modern/moeadde_ktmm.py +112 -0
  32. pydmoo/algorithms/modern/nsga2_imkt.py +162 -0
  33. pydmoo/algorithms/modern/nsga2_imkt_clstm.py +223 -0
  34. pydmoo/algorithms/modern/nsga2_imkt_igp.py +56 -0
  35. pydmoo/algorithms/modern/nsga2_imkt_lstm.py +248 -0
  36. pydmoo/algorithms/modern/nsga2_imkt_n.py +117 -0
  37. pydmoo/algorithms/modern/nsga2_imkt_n_clstm.py +146 -0
  38. pydmoo/algorithms/modern/nsga2_imkt_n_igp.py +57 -0
  39. pydmoo/algorithms/modern/nsga2_imkt_n_lstm.py +154 -0
  40. pydmoo/algorithms/modern/nsga2_ktmm.py +112 -0
  41. pydmoo/algorithms/utils/__init__.py +0 -0
  42. pydmoo/algorithms/utils/utils.py +166 -0
  43. pydmoo/core/__init__.py +0 -0
  44. pydmoo/{response → core}/ar_model.py +4 -4
  45. pydmoo/{response → core}/bounds.py +35 -2
  46. pydmoo/core/distance.py +45 -0
  47. pydmoo/core/inverse.py +55 -0
  48. pydmoo/core/lstm/__init__.py +0 -0
  49. pydmoo/core/lstm/base.py +291 -0
  50. pydmoo/core/lstm/lstm.py +491 -0
  51. pydmoo/core/manifold.py +93 -0
  52. pydmoo/core/predictions.py +50 -0
  53. pydmoo/core/sample_gaussian.py +56 -0
  54. pydmoo/core/sample_uniform.py +63 -0
  55. pydmoo/{response/tca_model.py → core/transfer.py} +3 -3
  56. pydmoo/problems/__init__.py +53 -49
  57. pydmoo/problems/dyn.py +94 -13
  58. pydmoo/problems/dynamic/cec2015.py +10 -5
  59. pydmoo/problems/dynamic/df.py +6 -3
  60. pydmoo/problems/dynamic/gts.py +69 -34
  61. pydmoo/problems/real_world/__init__.py +0 -0
  62. pydmoo/problems/real_world/dsrp.py +168 -0
  63. pydmoo/problems/real_world/dwbdp.py +189 -0
  64. {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/METADATA +11 -10
  65. pydmoo-0.1.0.dist-info/RECORD +70 -0
  66. {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/WHEEL +1 -1
  67. pydmoo-0.0.18.dist-info/RECORD +0 -15
  68. /pydmoo/{response → algorithms}/__init__.py +0 -0
  69. {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,56 @@
1
+ from pymoo.core.population import Population
2
+
3
+ from pydmoo.algorithms.modern.moead_imkt_n import MOEADIMKTN
4
+ from pydmoo.core.bounds import clip_and_randomize
5
+ from pydmoo.core.predictions import igp_based_predictor
6
+ from pydmoo.core.sample_gaussian import multivariate_gaussian_sample
7
+
8
+
9
+ class MOEADIMKTNIGP(MOEADIMKTN):
10
+ def __init__(self, **kwargs):
11
+ super().__init__(**kwargs)
12
+ self.size_pool = 10
13
+ self.denominator = 0.5
14
+
15
+ self.delta_s = 0.01
16
+ self.sigma_n = 0.01
17
+ self.sigma_n_2 = self.sigma_n ** 2
18
+
19
+ def _response_change(self):
20
+ pop = self.pop
21
+ X = pop.get("X")
22
+
23
+ # recreate the current population without being evaluated
24
+ pop = Population.new(X=X)
25
+
26
+ # sample self.pop_size individuals in decision space
27
+ samples_old = self.sampling_new_pop()
28
+
29
+ # select self.pop_size/2 individuals with better convergence and diversity
30
+ samples = samples_old[:int(len(samples_old)/2)]
31
+
32
+ # knowledge in objective space
33
+ means_stds, mean, cov = self._in_decision_or_objective_space_nd(samples, "objective_space")
34
+ mean_new, cov_new = self._select_means_covs(means_stds, mean, cov)
35
+
36
+ # sample self.pop_size individuals in objective space
37
+ F = multivariate_gaussian_sample(mean_new, cov_new, self.pop_size, random_state=self.random_state)
38
+
39
+ # TODO
40
+ # inverse mapping
41
+ X = igp_based_predictor(samples.get("X"), samples.get("F"), F, self.sigma_n_2)
42
+
43
+ # bounds
44
+ if self.problem.has_bounds():
45
+ xl, xu = self.problem.bounds()
46
+ X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
47
+
48
+ # merge
49
+ pop = Population.merge(samples_old, Population.new(X=X))
50
+
51
+ return pop
52
+
53
+
54
+ class MOEADIMKTNIGP0(MOEADIMKTNIGP):
55
+ def __init__(self, **kwargs):
56
+ super().__init__(**kwargs)
@@ -0,0 +1,111 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+
4
+ from pydmoo.algorithms.modern.moead_imkt_n import MOEADIMKTN
5
+ from pydmoo.algorithms.modern.nsga2_imkt_n_lstm import prepare_data_mean_cov
6
+ from pydmoo.algorithms.utils.utils import make_semidefinite, reconstruct_covariance_from_triu
7
+ from pydmoo.core.bounds import clip_and_randomize
8
+ from pydmoo.core.inverse import closed_form_solution
9
+ from pydmoo.core.lstm.lstm import LSTMpredictor
10
+ from pydmoo.core.sample_gaussian import multivariate_gaussian_sample
11
+
12
+
13
+ class MOEADIMNLSTM(MOEADIMKTN):
14
+ """Inverse Modeling with LSTM (IMNLSTM).
15
+
16
+ Inverse Modeling for Dynamic Multiobjective Optimization with Knowledge Transfer In objective Space.
17
+ """
18
+
19
+ def __init__(self, **kwargs):
20
+ super().__init__(**kwargs)
21
+ self.size_pool = 10
22
+ self.denominator = 0.5
23
+
24
+ self._n_timesteps = 10
25
+ self._sequence_length = 5 # Use 5 historical time steps to predict next step
26
+ self._incremental_learning = False
27
+
28
+ def _setup(self, problem, **kwargs):
29
+ super()._setup(problem, **kwargs)
30
+
31
+ # Must be here
32
+ self._lstm = LSTMpredictor(
33
+ self._sequence_length,
34
+ hidden_dim=64,
35
+ num_layers=1,
36
+ epochs=50,
37
+ batch_size=32,
38
+ lr=0.001,
39
+ device="cpu", # for fair comparison
40
+ patience=5,
41
+ seed=self.seed,
42
+ model_type="lstm",
43
+ incremental_learning=self._incremental_learning,
44
+ )
45
+
46
+ def _response_change(self):
47
+ pop = self.pop
48
+ X = pop.get("X")
49
+
50
+ # recreate the current population without being evaluated
51
+ pop = Population.new(X=X)
52
+
53
+ # sample self.pop_size individuals in decision space
54
+ samples_old = self.sampling_new_pop()
55
+
56
+ # select self.pop_size/2 individuals with better convergence and diversity
57
+ samples = samples_old[:int(len(samples_old)/2)]
58
+
59
+ # knowledge in objective space
60
+ means_covs, mean, cov = self._in_decision_or_objective_space_nd(samples, "objective_space")
61
+
62
+ # Check if sufficient historical data is available for LSTM prediction
63
+ if len(means_covs) > self._n_timesteps:
64
+ # Update pool
65
+ self.data["means_covs"] = means_covs[self._n_timesteps:]
66
+
67
+ # Prepare time series data from historical means and covariance matrices
68
+ time_series_data = prepare_data_mean_cov(self._n_timesteps, means_covs)
69
+
70
+ # Initialize predictor and generate prediction for next time step
71
+ next_prediction = self._lstm.convert_train_predict(time_series_data)
72
+
73
+ # Convert prediction tensor to numpy array for further processing
74
+ next_prediction = next_prediction.numpy()
75
+
76
+ # Split prediction into mean and covariance components
77
+ # First n_obj elements represent the mean vector, Remaining elements represent the flattened covariance matrix values
78
+ mean_new, cov_new_ = next_prediction[:self.problem.n_obj], next_prediction[self.problem.n_obj:]
79
+ cov_new = reconstruct_covariance_from_triu(cov_new_, len(mean_new))
80
+ cov_new = make_semidefinite(cov_new)
81
+
82
+ else:
83
+ mean_new, cov_new = self._select_means_covs(means_covs, mean, cov)
84
+
85
+ # sample self.pop_size individuals in objective space
86
+ F = multivariate_gaussian_sample(mean_new, cov_new, self.pop_size, random_state=self.random_state)
87
+
88
+ # TODO
89
+ # inverse mapping
90
+ # X = FB
91
+ B = closed_form_solution(samples.get("X"), samples.get("F"))
92
+
93
+ # X = FB
94
+ X = np.dot(F, B)
95
+
96
+ # bounds
97
+ if self.problem.has_bounds():
98
+ xl, xu = self.problem.bounds()
99
+ X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
100
+
101
+ # merge
102
+ pop = Population.merge(samples_old, Population.new(X=X))
103
+
104
+ return pop
105
+
106
+
107
+ class MOEADIMNiLSTM(MOEADIMNLSTM):
108
+ def __init__(self, **kwargs) -> None:
109
+ super().__init__(**kwargs)
110
+
111
+ self._incremental_learning = True
@@ -0,0 +1,112 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.base.dmoo.dmoead import DMOEAD
6
+ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
7
+
8
+
9
+ class MOEADKTMM(DMOEAD):
10
+ """Knowledge Transfer with Mixture Model.
11
+
12
+ Zou, J., Hou, Z., Jiang, S., Yang, S., Ruan, G., Xia, Y., and Liu, Y. (2025).
13
+ Knowledge transfer with mixture model in dynamic multi-objective optimization.
14
+ IEEE Transactions on Evolutionary Computation, in press.
15
+ https://doi.org/10.1109/TEVC.2025.3566481
16
+ """
17
+
18
+ def __init__(self, **kwargs):
19
+
20
+ super().__init__(**kwargs)
21
+
22
+ self.size_pool = 14 # the size of knowledge pool
23
+ self.denominator = 0.5
24
+
25
+ def _response_change(self):
26
+ pop = self.pop
27
+ X = pop.get("X")
28
+
29
+ # recreate the current population without being evaluated
30
+ pop = Population.new(X=X)
31
+
32
+ # sample self.pop_size solutions in decision space
33
+ samples_old = self.sampling_new_pop()
34
+
35
+ # select self.pop_size/2 individuals with better convergence and diversity
36
+ samples = samples_old[:int(len(samples_old)/2)]
37
+
38
+ # knowledge in decision space
39
+ means_stds_ps, mean, std = self._in_decision_or_objective_space_1d(samples, "decision_space")
40
+ mean_new, std_new = self._select_means_stds(means_stds_ps, mean, std)
41
+
42
+ # sample self.pop_size solutions in decision space
43
+ X = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
44
+
45
+ # bounds
46
+ if self.problem.has_bounds():
47
+ xl, xu = self.problem.bounds()
48
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
49
+
50
+ # merge
51
+ pop = Population.merge(samples_old, Population.new(X=X))
52
+
53
+ return pop
54
+
55
+ def _in_decision_or_objective_space_1d(self, samples, decision_or_objective="decision_space"):
56
+ # decision space or objective space
57
+ flag = "X" if decision_or_objective == "decision_space" else "F"
58
+
59
+ means_stds = self.data.get("means_stds", [])
60
+
61
+ flag_value = self.opt.get(flag)
62
+ if len(flag_value) <= 1:
63
+ flag_value = self.pop.get(flag)
64
+ flag_value = flag_value[:2]
65
+
66
+ means_stds.append((np.mean(flag_value, axis=0), np.std(flag_value, axis=0), self.n_iter - 1)) # 1-based
67
+ self.data["means_stds"] = means_stds
68
+
69
+ flag_value = samples.get(flag)
70
+ mean, std = np.mean(flag_value, axis=0), np.std(flag_value, axis=0)
71
+ return means_stds, mean, std
72
+
73
+ def sampling_new_pop(self):
74
+ samples = self.initialization.sampling(self.problem, self.pop_size)
75
+ samples = self.evaluator.eval(self.problem, samples)
76
+
77
+ # do a survival to recreate rank and crowding of all individuals
78
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=len(samples))
79
+ return samples
80
+
81
+ def _select_means_stds(self, means_stds, mean_new, std_new):
82
+ # Unpack means and stds
83
+ means = np.array([m[0] for m in means_stds])
84
+ stds = np.array([m[1] for m in means_stds])
85
+
86
+ # Calculate distances
87
+ mean_diffs = means - mean_new
88
+ std_diffs = stds - std_new
89
+
90
+ distances = np.sqrt(np.sum(mean_diffs**2, axis=1) + np.sum(std_diffs**2, axis=1))
91
+
92
+ # Get top K closest
93
+ top_k_idx = np.argsort(distances)[:self.size_pool]
94
+ top_k_dist = distances[top_k_idx]
95
+ top_k_means = means[top_k_idx]
96
+ top_k_stds = stds[top_k_idx]
97
+
98
+ # Update pool
99
+ self._update_means_stds_pool(means_stds, top_k_idx)
100
+
101
+ # Calculate weights
102
+ weights = 1 / (top_k_dist + 1e-8) # Add small epsilon to avoid division by zero
103
+ weights = weights / (np.sum(weights) + self.denominator)
104
+
105
+ # Weighted combination
106
+ mean_new = (1 - np.sum(weights)) * mean_new + np.sum(weights[:, None] * top_k_means, axis=0)
107
+ std_new = (1 - np.sum(weights)) * std_new + np.sum(weights[:, None] * top_k_stds, axis=0)
108
+ return mean_new, std_new
109
+
110
+ def _update_means_stds_pool(self, means_stds, top_k_idx) -> None:
111
+ self.data["means_stds"] = [means_stds[i] for i in top_k_idx]
112
+ return None
@@ -0,0 +1,161 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.modern.moeadde_ktmm import MOEADDEKTMM
6
+ from pydmoo.core.bounds import clip_and_randomize
7
+ from pydmoo.core.inverse import closed_form_solution
8
+ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
9
+
10
+
11
+ class MOEADDEIMKT(MOEADDEKTMM):
12
+ """Inverse Modeling with Knowledge Transfer.
13
+
14
+ Inverse Modeling for Dynamic Multiobjective Optimization with Knowledge Transfer In objective Space.
15
+ """
16
+
17
+ def __init__(self, **kwargs):
18
+ super().__init__(**kwargs)
19
+ self.size_pool = 10
20
+ self.denominator = 0.5
21
+
22
+ def _response_change(self):
23
+ pop = self.pop
24
+ X = pop.get("X")
25
+
26
+ # recreate the current population without being evaluated
27
+ pop = Population.new(X=X)
28
+
29
+ # sample self.pop_size individuals in decision space
30
+ samples_old = self.sampling_new_pop()
31
+
32
+ # select self.pop_size/2 individuals with better convergence and diversity
33
+ samples = samples_old[:int(len(samples_old)/2)]
34
+
35
+ # knowledge in objective space
36
+ means_stds, mean, std = self._in_decision_or_objective_space_1d(samples, "objective_space")
37
+ mean_new, std_new = self._select_means_stds(means_stds, mean, std)
38
+
39
+ # sample self.pop_size individuals in objective space
40
+ F = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
41
+
42
+ # TODO
43
+ # inverse mapping
44
+ # X = FB
45
+ B = closed_form_solution(samples.get("X"), samples.get("F"))
46
+
47
+ # X = FB
48
+ X = np.dot(F, B)
49
+
50
+ # bounds
51
+ if self.problem.has_bounds():
52
+ xl, xu = self.problem.bounds()
53
+ X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
54
+
55
+ # merge
56
+ pop = Population.merge(samples_old, Population.new(X=X))
57
+
58
+ return pop
59
+
60
+ def sampling_new_pop(self):
61
+ X = self.pop.get("X")
62
+
63
+ if not self.problem.has_constraints():
64
+
65
+ last_X = self.data.get("last_X", [])
66
+ if len(last_X) == 0:
67
+ last_X = X
68
+ self.data["last_X"] = X
69
+
70
+ d = np.mean(X - last_X, axis=0)
71
+
72
+ radius = max(np.linalg.norm(d) / self.problem.n_obj, 0.1)
73
+
74
+ X = X + d + self.random_state.uniform(low=-radius, high=radius, size=X.shape)
75
+
76
+ # bounds
77
+ if self.problem.has_bounds():
78
+ xl, xu = self.problem.bounds()
79
+ X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
80
+
81
+ samples = Population.new(X=X)
82
+ samples = self.evaluator.eval(self.problem, samples)
83
+
84
+ # do a survival to recreate rank and crowding of all individuals
85
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=len(samples))
86
+ return samples
87
+
88
+
89
+ class MOEADDEIMKT0(MOEADDEIMKT):
90
+ def __init__(self, **kwargs):
91
+ super().__init__(**kwargs)
92
+
93
+
94
+ class MOEADDEIMKT1(MOEADDEIMKT):
95
+ def __init__(self, **kwargs):
96
+ super().__init__(**kwargs)
97
+ self.size_pool = 2
98
+ self.denominator = 0.5
99
+
100
+
101
+ class MOEADDEIMKT2(MOEADDEIMKT):
102
+ def __init__(self, **kwargs):
103
+ super().__init__(**kwargs)
104
+ self.size_pool = 4
105
+ self.denominator = 0.5
106
+
107
+
108
+ class MOEADDEIMKT3(MOEADDEIMKT):
109
+ def __init__(self, **kwargs):
110
+ super().__init__(**kwargs)
111
+ self.size_pool = 6
112
+ self.denominator = 0.5
113
+
114
+
115
+ class MOEADDEIMKT4(MOEADDEIMKT):
116
+ def __init__(self, **kwargs):
117
+ super().__init__(**kwargs)
118
+ self.size_pool = 8
119
+ self.denominator = 0.5
120
+
121
+
122
+ class MOEADDEIMKT5(MOEADDEIMKT):
123
+ def __init__(self, **kwargs):
124
+ super().__init__(**kwargs)
125
+ self.size_pool = 10
126
+ self.denominator = 0.5
127
+
128
+
129
+ class MOEADDEIMKT6(MOEADDEIMKT):
130
+ def __init__(self, **kwargs):
131
+ super().__init__(**kwargs)
132
+ self.size_pool = 12
133
+ self.denominator = 0.5
134
+
135
+
136
+ class MOEADDEIMKT7(MOEADDEIMKT):
137
+ def __init__(self, **kwargs):
138
+ super().__init__(**kwargs)
139
+ self.size_pool = 14
140
+ self.denominator = 0.5
141
+
142
+
143
+ class MOEADDEIMKT8(MOEADDEIMKT):
144
+ def __init__(self, **kwargs):
145
+ super().__init__(**kwargs)
146
+ self.size_pool = 16
147
+ self.denominator = 0.5
148
+
149
+
150
+ class MOEADDEIMKT9(MOEADDEIMKT):
151
+ def __init__(self, **kwargs):
152
+ super().__init__(**kwargs)
153
+ self.size_pool = 18
154
+ self.denominator = 0.5
155
+
156
+
157
+ class MOEADDEIMKT10(MOEADDEIMKT):
158
+ def __init__(self, **kwargs):
159
+ super().__init__(**kwargs)
160
+ self.size_pool = 20
161
+ self.denominator = 0.5
@@ -0,0 +1,223 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.modern.moeadde_imkt import MOEADDEIMKT
6
+ from pydmoo.algorithms.modern.nsga2_imkt_lstm import prepare_data_means_std
7
+ from pydmoo.core.bounds import clip_and_randomize
8
+ from pydmoo.core.inverse import closed_form_solution
9
+ from pydmoo.core.lstm.lstm import LSTMpredictor
10
+ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
11
+
12
+
13
+ class MOEADDEIMcLSTM(MOEADDEIMKT):
14
+ def __init__(self, **kwargs):
15
+ super().__init__(**kwargs)
16
+
17
+ self._n_timesteps = 10
18
+ self._sequence_length = 5 # Use 5 historical time steps to predict next step
19
+ self._incremental_learning = False
20
+
21
+ def _setup(self, problem, **kwargs):
22
+ super()._setup(problem, **kwargs)
23
+
24
+ # Must be here
25
+ self._lstm = LSTMpredictor(
26
+ self._sequence_length,
27
+ hidden_dim=64,
28
+ num_layers=1,
29
+ epochs=50,
30
+ batch_size=32,
31
+ lr=0.001,
32
+ device="cpu", # for fair comparison
33
+ patience=5,
34
+ seed=self.seed,
35
+ model_type="lstm",
36
+ incremental_learning=self._incremental_learning,
37
+ )
38
+
39
+ def _response_change(self):
40
+ pop = self.pop
41
+ X = pop.get("X")
42
+
43
+ # recreate the current population without being evaluated
44
+ pop = Population.new(X=X)
45
+
46
+ # sample self.pop_size individuals in decision space
47
+ samples_old = self.sampling_new_pop()
48
+
49
+ # select self.pop_size/2 individuals with better convergence and diversity
50
+ samples = samples_old[:int(len(samples_old)/2)]
51
+
52
+ # knowledge in objective space
53
+ means_stds, mean, std = self._in_decision_or_objective_space_1d(samples, "objective_space")
54
+
55
+ # Check if sufficient historical data is available for LSTM prediction
56
+ if len(means_stds) > self._n_timesteps:
57
+ # Update pool
58
+ self.data["means_stds"] = means_stds[self._n_timesteps:]
59
+
60
+ # Prepare time series data from historical means and standard deviations
61
+ time_series_data = prepare_data_means_std(self._n_timesteps, means_stds)
62
+
63
+ # Initialize predictor and generate prediction for next time step
64
+ next_prediction = self._lstm.convert_train_predict(time_series_data)
65
+
66
+ # Convert prediction tensor to numpy array for further processing
67
+ next_prediction = next_prediction.numpy()
68
+
69
+ # Split prediction into mean and standard deviation components
70
+ # First n_obj elements represent mean values, remaining elements represent standard deviations
71
+ mean_new, std_new = next_prediction[:self.problem.n_obj], next_prediction[self.problem.n_obj:]
72
+ std_new = np.abs(std_new)
73
+
74
+ else:
75
+ mean_new, std_new = self._select_means_stds(means_stds, mean, std)
76
+
77
+ # sample self.pop_size individuals in objective space
78
+ F = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
79
+
80
+ # TODO
81
+ # inverse mapping
82
+ # X = FB
83
+ B = closed_form_solution(samples.get("X"), samples.get("F"))
84
+
85
+ # X = FB
86
+ X = np.dot(F, B)
87
+
88
+ # bounds
89
+ if self.problem.has_bounds():
90
+ xl, xu = self.problem.bounds()
91
+ X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
92
+
93
+ # merge
94
+ pop = Population.merge(samples_old, Population.new(X=X))
95
+
96
+ return pop
97
+
98
+ def sampling_new_pop(self):
99
+ ps = self.opt.get("X")
100
+ X = self.pop.get("X")
101
+
102
+ if not self.problem.has_constraints():
103
+
104
+ last_ps = self.data.get("last_ps", [])
105
+ if len(last_ps) == 0:
106
+ last_ps = ps
107
+ self.data["last_ps"] = ps
108
+
109
+ d = np.mean(ps, axis=0) - np.mean(last_ps, axis=0)
110
+
111
+ radius = max(np.linalg.norm(d) / self.problem.n_obj, 0.1)
112
+
113
+ X = X + d + self.random_state.uniform(low=-radius, high=radius, size=X.shape)
114
+
115
+ # bounds
116
+ if self.problem.has_bounds():
117
+ xl, xu = self.problem.bounds()
118
+ X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
119
+
120
+ samples = Population.new(X=X)
121
+ samples = self.evaluator.eval(self.problem, samples)
122
+
123
+ # do a survival to recreate rank and crowding of all individuals
124
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=len(samples))
125
+ return samples
126
+
127
+ def _select_means_stds(self, means_stds, mean_new, std_new):
128
+ # Unpack means and stds
129
+ means = np.array([m[0] for m in means_stds])
130
+ stds = np.array([m[1] for m in means_stds])
131
+
132
+ # Weighted combination
133
+ mean_new = 0.5 * mean_new + 0.5 * means[-1]
134
+ std_new = 0.5 * std_new + 0.5 * stds[-1]
135
+ return mean_new, std_new
136
+
137
+
138
+ class MOEADDEIMicLSTM(MOEADDEIMcLSTM):
139
+ def __init__(self, **kwargs) -> None:
140
+ super().__init__(**kwargs)
141
+ self._n_timesteps = 10
142
+ self._sequence_length = 5
143
+ self._incremental_learning = True
144
+
145
+
146
+ class MOEADDEIMicLSTM1003(MOEADDEIMcLSTM):
147
+ def __init__(self, **kwargs) -> None:
148
+ super().__init__(**kwargs)
149
+ self._n_timesteps = 10
150
+ self._sequence_length = 3
151
+ self._incremental_learning = True
152
+
153
+
154
+ class MOEADDEIMicLSTM1005(MOEADDEIMcLSTM):
155
+ def __init__(self, **kwargs) -> None:
156
+ super().__init__(**kwargs)
157
+ self._n_timesteps = 10
158
+ self._sequence_length = 5
159
+ self._incremental_learning = True
160
+
161
+
162
+ class MOEADDEIMicLSTM1007(MOEADDEIMcLSTM):
163
+ def __init__(self, **kwargs) -> None:
164
+ super().__init__(**kwargs)
165
+ self._n_timesteps = 10
166
+ self._sequence_length = 7
167
+ self._incremental_learning = True
168
+
169
+
170
+ class MOEADDEIMicLSTM1009(MOEADDEIMcLSTM):
171
+ def __init__(self, **kwargs) -> None:
172
+ super().__init__(**kwargs)
173
+ self._n_timesteps = 10
174
+ self._sequence_length = 9
175
+ self._incremental_learning = True
176
+
177
+
178
+ class MOEADDEIMicLSTM1503(MOEADDEIMcLSTM):
179
+ def __init__(self, **kwargs) -> None:
180
+ super().__init__(**kwargs)
181
+ self._n_timesteps = 15
182
+ self._sequence_length = 3
183
+ self._incremental_learning = True
184
+
185
+
186
+ class MOEADDEIMicLSTM1505(MOEADDEIMcLSTM):
187
+ def __init__(self, **kwargs) -> None:
188
+ super().__init__(**kwargs)
189
+ self._n_timesteps = 15
190
+ self._sequence_length = 5
191
+ self._incremental_learning = True
192
+
193
+
194
+ class MOEADDEIMicLSTM1507(MOEADDEIMcLSTM):
195
+ def __init__(self, **kwargs) -> None:
196
+ super().__init__(**kwargs)
197
+ self._n_timesteps = 15
198
+ self._sequence_length = 7
199
+ self._incremental_learning = True
200
+
201
+
202
+ class MOEADDEIMicLSTM1509(MOEADDEIMcLSTM):
203
+ def __init__(self, **kwargs) -> None:
204
+ super().__init__(**kwargs)
205
+ self._n_timesteps = 15
206
+ self._sequence_length = 9
207
+ self._incremental_learning = True
208
+
209
+
210
+ class MOEADDEIMicLSTM1511(MOEADDEIMcLSTM):
211
+ def __init__(self, **kwargs) -> None:
212
+ super().__init__(**kwargs)
213
+ self._n_timesteps = 15
214
+ self._sequence_length = 11
215
+ self._incremental_learning = True
216
+
217
+
218
+ class MOEADDEIMicLSTM1513(MOEADDEIMcLSTM):
219
+ def __init__(self, **kwargs) -> None:
220
+ super().__init__(**kwargs)
221
+ self._n_timesteps = 15
222
+ self._sequence_length = 13
223
+ self._incremental_learning = True