pydmoo 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,7 +8,21 @@ from pydmoo.algorithms.base.moo.moead import MOEAD
8
8
 
9
9
 
10
10
  class DMOEAD(MOEAD):
11
-
11
+ """
12
+ Dynamic MOEA/D (DMOEAD).
13
+
14
+ Extension of MOEAD for dynamic optimization problems.
15
+
16
+ Parameters
17
+ ----------
18
+ perc_detect_change : float, default=0.1
19
+ Percentage of population to sample for change detection (0 to 1).
20
+ eps : float, default=0.0
21
+ Threshold for change detection. Change is detected when mean squared
22
+ difference exceeds this value.
23
+ **kwargs
24
+ Additional arguments passed to MOEAD parent class.
25
+ """
12
26
  def __init__(self,
13
27
  perc_detect_change=0.1,
14
28
  eps=0.0,
@@ -22,7 +36,15 @@ class DMOEAD(MOEAD):
22
36
  assert not problem.has_constraints(), f"{self.__class__.__name__} only works for unconstrained problems."
23
37
  return super().setup(problem, **kwargs)
24
38
 
25
- def _detect_change_sample_part_population(self):
39
+ def _detect_change_sample_part_population(self) -> bool:
40
+ """
41
+ Detect environmental changes by sampling part of the population.
42
+
43
+ Returns
44
+ -------
45
+ change_detected : bool
46
+ True if environmental change is detected, False otherwise.
47
+ """
26
48
  pop = self.pop
27
49
  X, F = pop.get("X", "F")
28
50
 
@@ -40,7 +62,15 @@ class DMOEAD(MOEAD):
40
62
  change_detected = delta > self.eps
41
63
  return change_detected
42
64
 
43
- def _next_static_dynamic(self):
65
+ def _next_static_dynamic(self) -> Population:
66
+ """
67
+ Perform next with dynamic change detection and response.
68
+
69
+ Returns
70
+ -------
71
+ Population
72
+ Current population after potential response to environmental change.
73
+ """
44
74
  # for dynamic environment
45
75
  pop = self.pop
46
76
 
@@ -70,11 +100,24 @@ class DMOEAD(MOEAD):
70
100
  return pop
71
101
 
72
102
  def _response_mechanism(self):
73
- """Response mechanism."""
103
+ """
104
+ Response mechanism for environmental change.
105
+
106
+ Returns
107
+ -------
108
+ Population
109
+ Population after applying response strategy.
110
+
111
+ Raises
112
+ ------
113
+ NotImplementedError
114
+ Must be implemented by subclasses.
115
+ """
74
116
  raise NotImplementedError
75
117
 
76
118
 
77
119
  class DMOEADA(DMOEAD):
120
+ """DMOEADA."""
78
121
 
79
122
  def __init__(self,
80
123
  perc_detect_change=0.1,
@@ -105,6 +148,7 @@ class DMOEADA(DMOEAD):
105
148
 
106
149
 
107
150
  class DMOEADB(DMOEAD):
151
+ """DMOEADB."""
108
152
 
109
153
  def __init__(self,
110
154
  perc_detect_change=0.1,
@@ -8,6 +8,21 @@ from pydmoo.algorithms.base.moo.moeadde import MOEADDE
8
8
 
9
9
 
10
10
  class DMOEADDE(MOEADDE):
11
+ """
12
+ Dynamic MOEA/D-DE (DMOEADDE).
13
+
14
+ Extension of MOEADDE for dynamic optimization problems.
15
+
16
+ Parameters
17
+ ----------
18
+ perc_detect_change : float, default=0.1
19
+ Percentage of population to sample for change detection (0 to 1).
20
+ eps : float, default=0.0
21
+ Threshold for change detection. Change is detected when mean squared
22
+ difference exceeds this value.
23
+ **kwargs
24
+ Additional arguments passed to MOEADDE parent class.
25
+ """
11
26
 
12
27
  def __init__(self,
13
28
  perc_detect_change=0.1,
@@ -22,7 +37,15 @@ class DMOEADDE(MOEADDE):
22
37
  assert not problem.has_constraints(), f"{self.__class__.__name__} only works for unconstrained problems."
23
38
  return super().setup(problem, **kwargs)
24
39
 
25
- def _detect_change_sample_part_population(self):
40
+ def _detect_change_sample_part_population(self) -> bool:
41
+ """
42
+ Detect environmental changes by sampling part of the population.
43
+
44
+ Returns
45
+ -------
46
+ change_detected : bool
47
+ True if environmental change is detected, False otherwise.
48
+ """
26
49
  pop = self.pop
27
50
  X, F = pop.get("X", "F")
28
51
 
@@ -40,7 +63,15 @@ class DMOEADDE(MOEADDE):
40
63
  change_detected = delta > self.eps
41
64
  return change_detected
42
65
 
43
- def _next_static_dynamic(self):
66
+ def _next_static_dynamic(self) -> Population:
67
+ """
68
+ Perform next with dynamic change detection and response.
69
+
70
+ Returns
71
+ -------
72
+ Population
73
+ Current population after potential response to environmental change.
74
+ """
44
75
  # for dynamic environment
45
76
  pop = self.pop
46
77
 
@@ -69,12 +100,25 @@ class DMOEADDE(MOEADDE):
69
100
 
70
101
  return pop
71
102
 
72
- def _response_mechanism(self):
73
- """Response mechanism."""
103
+ def _response_mechanism(self) -> Population:
104
+ """
105
+ Response mechanism for environmental change.
106
+
107
+ Returns
108
+ -------
109
+ Population
110
+ Population after applying response strategy.
111
+
112
+ Raises
113
+ ------
114
+ NotImplementedError
115
+ Must be implemented by subclasses.
116
+ """
74
117
  raise NotImplementedError
75
118
 
76
119
 
77
120
  class DMOEADDEA(DMOEADDE):
121
+ """DMOEADDEA."""
78
122
 
79
123
  def __init__(self,
80
124
  perc_detect_change=0.1,
@@ -105,6 +149,7 @@ class DMOEADDEA(DMOEADDE):
105
149
 
106
150
 
107
151
  class DMOEADDEB(DMOEADDE):
152
+ """DMOEADDEB."""
108
153
 
109
154
  def __init__(self,
110
155
  perc_detect_change=0.1,
@@ -17,10 +17,25 @@ from pydmoo.algorithms.base.moo.nsga2 import NSGA2
17
17
 
18
18
 
19
19
  class DNSGA2(NSGA2):
20
+ """
21
+ Dynamic Non-dominated Sorting Genetic Algorithm II (DNSGA2).
22
+
23
+ Extension of NSGA2 for dynamic optimization problems.
24
+
25
+ Parameters
26
+ ----------
27
+ perc_detect_change : float, default=0.1
28
+ Percentage of population to sample for change detection (0 to 1).
29
+ eps : float, default=0.0
30
+ Threshold for change detection. Change is detected when mean squared
31
+ difference exceeds this value.
32
+ **kwargs
33
+ Additional arguments passed to NSGA2 parent class.
34
+ """
20
35
 
21
36
  def __init__(self,
22
- perc_detect_change=0.1,
23
- eps=0.0,
37
+ perc_detect_change: float = 0.1,
38
+ eps: float = 0.0,
24
39
  **kwargs):
25
40
 
26
41
  super().__init__(**kwargs)
@@ -31,7 +46,15 @@ class DNSGA2(NSGA2):
31
46
  assert not problem.has_constraints(), f"{self.__class__.__name__} only works for unconstrained problems."
32
47
  return super().setup(problem, **kwargs)
33
48
 
34
- def _detect_change_sample_part_population(self):
49
+ def _detect_change_sample_part_population(self) -> bool:
50
+ """
51
+ Detect environmental changes by sampling part of the population.
52
+
53
+ Returns
54
+ -------
55
+ change_detected : bool
56
+ True if environmental change is detected, False otherwise.
57
+ """
35
58
  pop = self.pop
36
59
  X, F = pop.get("X", "F")
37
60
 
@@ -49,7 +72,15 @@ class DNSGA2(NSGA2):
49
72
  change_detected = delta > self.eps
50
73
  return change_detected
51
74
 
52
- def _infill_static_dynamic(self):
75
+ def _infill_static_dynamic(self) -> Population:
76
+ """
77
+ Perform infill with dynamic change detection and response.
78
+
79
+ Returns
80
+ -------
81
+ Population
82
+ Current population after potential response to environmental change.
83
+ """
53
84
  # for dynamic environment
54
85
  pop = self.pop
55
86
 
@@ -75,17 +106,39 @@ class DNSGA2(NSGA2):
75
106
 
76
107
  return pop
77
108
 
78
- def _response_mechanism(self):
79
- """Response mechanism."""
109
+ def _response_mechanism(self) -> Population:
110
+ """
111
+ Response mechanism for environmental change.
112
+
113
+ Returns
114
+ -------
115
+ Population
116
+ Population after applying response strategy.
117
+
118
+ Raises
119
+ ------
120
+ NotImplementedError
121
+ Must be implemented by subclasses.
122
+ """
80
123
  raise NotImplementedError
81
124
 
82
125
 
83
126
  class DNSGA2A(DNSGA2):
127
+ """DNSGA2A.
128
+
129
+ References
130
+ ----------
131
+ Deb, K., Rao N., U. B., and Karthik, S. (2007).
132
+ Dynamic multi-objective optimization and decision-making using modified NSGA-II:
133
+ A case study on hydro-thermal power scheduling.
134
+ Evolutionary Multi-Criterion Optimization, 803–817.
135
+ https://doi.org/10.1007/978-3-540-70928-2_60
136
+ """
84
137
 
85
138
  def __init__(self,
86
- perc_detect_change=0.1,
87
- eps=0.0,
88
- perc_diversity=0.3,
139
+ perc_detect_change: float = 0.1,
140
+ eps: float = 0.0,
141
+ perc_diversity: float = 0.3,
89
142
  **kwargs):
90
143
  super().__init__(perc_detect_change=perc_detect_change,
91
144
  eps=eps,
@@ -93,7 +146,7 @@ class DNSGA2A(DNSGA2):
93
146
 
94
147
  self.perc_diversity = perc_diversity
95
148
 
96
- def _response_mechanism(self):
149
+ def _response_mechanism(self) -> Population:
97
150
  """Response mechanism."""
98
151
  pop = self.pop
99
152
  X = pop.get("X")
@@ -111,11 +164,21 @@ class DNSGA2A(DNSGA2):
111
164
 
112
165
 
113
166
  class DNSGA2B(DNSGA2):
167
+ """DNSGA2B.
168
+
169
+ References
170
+ ----------
171
+ Deb, K., Rao N., U. B., and Karthik, S. (2007).
172
+ Dynamic multi-objective optimization and decision-making using modified NSGA-II:
173
+ A case study on hydro-thermal power scheduling.
174
+ Evolutionary Multi-Criterion Optimization, 803–817.
175
+ https://doi.org/10.1007/978-3-540-70928-2_60
176
+ """
114
177
 
115
178
  def __init__(self,
116
- perc_detect_change=0.1,
117
- eps=0.0,
118
- perc_diversity=0.3,
179
+ perc_detect_change: float = 0.1,
180
+ eps: float = 0.0,
181
+ perc_diversity: float = 0.3,
119
182
  **kwargs):
120
183
  super().__init__(perc_detect_change=perc_detect_change,
121
184
  eps=eps,
@@ -123,7 +186,7 @@ class DNSGA2B(DNSGA2):
123
186
 
124
187
  self.perc_diversity = perc_diversity
125
188
 
126
- def _response_mechanism(self):
189
+ def _response_mechanism(self) -> Population:
127
190
  """Response mechanism."""
128
191
  pop = self.pop
129
192
  X = pop.get("X")
@@ -7,13 +7,18 @@ from pydmoo.algorithms.base.moo.moead import MOEAD
7
7
 
8
8
 
9
9
  class MOEADDE(MOEAD):
10
- """MOEA/D-DE (Updated by Cao).
11
-
12
- It is worth noting that there is a distinct modification in line 28 compared with the original framework of MOEA/D-DE.
13
- The newly generated solution competes with each member from the corresponding mating neighborhood (denoted as Pool in Algorithm 2).
14
- But in the original MOEA/D-DE framework, it only competes with two members from the corresponding mating neighborhood.
15
- This modification expands the replacement neighborhood to enhance the exploitation capability that is extremely important in dealing with DMOPs.
16
-
10
+ """MOEA/D-DE.
11
+
12
+ Notes
13
+ -----
14
+ It is worth noting that there is a distinct modification in line 28 compared with the original framework of
15
+ MOEA/D-DE. The newly generated solution competes with each member from the corresponding mating neighborhood
16
+ (denoted as Pool in Algorithm 2). But in the original MOEA/D-DE framework, it only competes with two members from
17
+ the corresponding mating neighborhood. This modification expands the replacement neighborhood to enhance the
18
+ exploitation capability that is extremely important in dealing with DMOPs (Cao et al., 2020).
19
+
20
+ References
21
+ ----------
17
22
  Cao, L., Xu, L., Goodman, E. D., Bao, C., and Zhu, S. (2020).
18
23
  Evolutionary dynamic multiobjective optimization assisted by a support vector regression predictor.
19
24
  IEEE Transactions on Evolutionary Computation, 24(2), 305–319.
@@ -0,0 +1,86 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from sklearn.svm import SVR
4
+
5
+ from pydmoo.algorithms.base.dmoo.dmoead import DMOEAD
6
+
7
+
8
+ class MOEADSVR(DMOEAD):
9
+ """Support Vector Regression (SVR).
10
+
11
+ Notes
12
+ -----
13
+ [Official Python Code](https://github.com/LeileiCao/MOEA-D-SVR/blob/master/MOEAD-SVR%20.py)
14
+
15
+ References
16
+ ----------
17
+ Cao, L., Xu, L., Goodman, E. D., Bao, C., and Zhu, S. (2020).
18
+ Evolutionary dynamic multiobjective optimization assisted by a support vector regression predictor.
19
+ IEEE Transactions on Evolutionary Computation, 24(2), 305–319.
20
+ https://doi.org/10.1109/TEVC.2019.2925722
21
+ """
22
+
23
+ def __init__(self, **kwargs):
24
+ super().__init__(**kwargs)
25
+
26
+ # SVR
27
+ self._q = 4 # the number of preceding values that are correlated with the target value (dimension of input samples in the SVR model)
28
+ self._C = 1000 # the regularization constant in SVR model
29
+ self._epsilon = 0.05 # the insensitive tube size in SVR model
30
+ # self._gamma = 1/d # the Gaussian RBF kernel parameter used in SVR model, and d is the number of variables
31
+
32
+ def _response_mechanism(self):
33
+ """Response mechanism."""
34
+ pop = self.pop
35
+ X = pop.get("X")
36
+
37
+ old = self.data.get("stacked_X", None)
38
+ if old is None:
39
+ stacked_X = np.expand_dims(X, axis=0)
40
+ else:
41
+ stacked_X = np.concatenate((old, np.expand_dims(X, axis=0)), axis=0)
42
+ self.data["stacked_X"] = stacked_X
43
+
44
+ N, d = X.shape
45
+ sol = np.zeros((N, d))
46
+ K = len(stacked_X)
47
+
48
+ if K < self._q + 2:
49
+ # recreate the current population without being evaluated
50
+ # Re-evaluate the current population, and update the reference point
51
+ pop = Population.new(X=X)
52
+
53
+ return pop
54
+
55
+ # Precompute sliding window indices to avoid redundant calculations
56
+ window_indices = np.lib.stride_tricks.sliding_window_view(np.arange(K), self._q + 1)
57
+
58
+ for i in range(N):
59
+ for j in range(d):
60
+ # Extract the time series for this (i,j) position
61
+ ts = stacked_X[:K, i, j]
62
+
63
+ # Create training data using vectorized sliding windows
64
+ train = ts[window_indices]
65
+ x_train = train[:, :-1]
66
+ y_train = train[:, -1]
67
+
68
+ # Train SVR model (consider moving this outside loops if possible)
69
+ # gamma if 'auto', uses 1 / n_features (not provided in code but provided in paper)
70
+ # versionchanged:: 0.22
71
+ # The default value of ``gamma`` changed from 'auto' to 'scale'.
72
+ svr = SVR(kernel='rbf', epsilon=self._epsilon, C=self._C, gamma=1/d)
73
+ model = svr.fit(x_train, y_train)
74
+
75
+ # Make prediction
76
+ sol[i, j] = model.predict(ts[-self._q:].reshape(1, -1))
77
+
78
+ # bounds
79
+ if self.problem.has_bounds():
80
+ xl, xu = self.problem.bounds()
81
+ sol = np.clip(sol, xl, xu) # provided in the original reference literature
82
+
83
+ # recreate the current population without being evaluated
84
+ pop = Population.new(X=sol)
85
+
86
+ return pop
@@ -0,0 +1,86 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from sklearn.svm import SVR
4
+
5
+ from pydmoo.algorithms.base.dmoo.dmoeadde import DMOEADDE
6
+
7
+
8
+ class MOEADDESVR(DMOEADDE):
9
+ """Support Vector Regression (SVR).
10
+
11
+ Notes
12
+ -----
13
+ [Official Python Code](https://github.com/LeileiCao/MOEA-D-SVR/blob/master/MOEAD-SVR%20.py)
14
+
15
+ References
16
+ ----------
17
+ Cao, L., Xu, L., Goodman, E. D., Bao, C., and Zhu, S. (2020).
18
+ Evolutionary dynamic multiobjective optimization assisted by a support vector regression predictor.
19
+ IEEE Transactions on Evolutionary Computation, 24(2), 305–319.
20
+ https://doi.org/10.1109/TEVC.2019.2925722
21
+ """
22
+
23
+ def __init__(self, **kwargs):
24
+ super().__init__(**kwargs)
25
+
26
+ # SVR
27
+ self._q = 4 # the number of preceding values that are correlated with the target value (dimension of input samples in the SVR model)
28
+ self._C = 1000 # the regularization constant in SVR model
29
+ self._epsilon = 0.05 # the insensitive tube size in SVR model
30
+ # self._gamma = 1/d # the Gaussian RBF kernel parameter used in SVR model, and d is the number of variables
31
+
32
+ def _response_mechanism(self):
33
+ """Response mechanism."""
34
+ pop = self.pop
35
+ X = pop.get("X")
36
+
37
+ old = self.data.get("stacked_X", None)
38
+ if old is None:
39
+ stacked_X = np.expand_dims(X, axis=0)
40
+ else:
41
+ stacked_X = np.concatenate((old, np.expand_dims(X, axis=0)), axis=0)
42
+ self.data["stacked_X"] = stacked_X
43
+
44
+ N, d = X.shape
45
+ sol = np.zeros((N, d))
46
+ K = len(stacked_X)
47
+
48
+ if K < self._q + 2:
49
+ # recreate the current population without being evaluated
50
+ # Re-evaluate the current population, and update the reference point
51
+ pop = Population.new(X=X)
52
+
53
+ return pop
54
+
55
+ # Precompute sliding window indices to avoid redundant calculations
56
+ window_indices = np.lib.stride_tricks.sliding_window_view(np.arange(K), self._q + 1)
57
+
58
+ for i in range(N):
59
+ for j in range(d):
60
+ # Extract the time series for this (i,j) position
61
+ ts = stacked_X[:K, i, j]
62
+
63
+ # Create training data using vectorized sliding windows
64
+ train = ts[window_indices]
65
+ x_train = train[:, :-1]
66
+ y_train = train[:, -1]
67
+
68
+ # Train SVR model (consider moving this outside loops if possible)
69
+ # gamma if 'auto', uses 1 / n_features (not provided in code but provided in paper)
70
+ # versionchanged:: 0.22
71
+ # The default value of ``gamma`` changed from 'auto' to 'scale'.
72
+ svr = SVR(kernel='rbf', epsilon=self._epsilon, C=self._C, gamma=1/d)
73
+ model = svr.fit(x_train, y_train)
74
+
75
+ # Make prediction
76
+ sol[i, j] = model.predict(ts[-self._q:].reshape(1, -1))[0]
77
+
78
+ # bounds
79
+ if self.problem.has_bounds():
80
+ xl, xu = self.problem.bounds()
81
+ sol = np.clip(sol, xl, xu) # provided in the original reference literature
82
+
83
+ # recreate the current population without being evaluated
84
+ pop = Population.new(X=sol)
85
+
86
+ return pop
@@ -0,0 +1,113 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.base.dmoo.dmoead import DMOEAD
6
+ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
7
+
8
+
9
+ class MOEADKTMM(DMOEAD):
10
+ """Knowledge Transfer with Mixture Model.
11
+
12
+ Zou, J., Hou, Z., Jiang, S., Yang, S., Ruan, G., Xia, Y., and Liu, Y. (2025).
13
+ Knowledge transfer with mixture model in dynamic multi-objective optimization.
14
+ IEEE Transactions on Evolutionary Computation, in press.
15
+ https://doi.org/10.1109/TEVC.2025.3566481
16
+ """
17
+
18
+ def __init__(self, **kwargs):
19
+
20
+ super().__init__(**kwargs)
21
+
22
+ self.size_pool = 14 # the size of knowledge pool
23
+ self.denominator = 0.5
24
+
25
+ def _response_mechanism(self):
26
+ """Response mechanism."""
27
+ pop = self.pop
28
+ X = pop.get("X")
29
+
30
+ # recreate the current population without being evaluated
31
+ pop = Population.new(X=X)
32
+
33
+ # sample self.pop_size solutions in decision space
34
+ samples_old = self.sampling_new_pop()
35
+
36
+ # select self.pop_size/2 individuals with better convergence and diversity
37
+ samples = samples_old[:int(len(samples_old)/2)]
38
+
39
+ # knowledge in decision space
40
+ means_stds_ps, mean, std = self._in_decision_or_objective_space_1d(samples, "decision_space")
41
+ mean_new, std_new = self._select_means_stds(means_stds_ps, mean, std)
42
+
43
+ # sample self.pop_size solutions in decision space
44
+ X = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
45
+
46
+ # bounds
47
+ if self.problem.has_bounds():
48
+ xl, xu = self.problem.bounds()
49
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
50
+
51
+ # merge
52
+ pop = Population.merge(samples_old, Population.new(X=X))
53
+
54
+ return pop
55
+
56
+ def _in_decision_or_objective_space_1d(self, samples, decision_or_objective="decision_space"):
57
+ # decision space or objective space
58
+ flag = "X" if decision_or_objective == "decision_space" else "F"
59
+
60
+ means_stds = self.data.get("means_stds", [])
61
+
62
+ flag_value = self.opt.get(flag)
63
+ if len(flag_value) <= 1:
64
+ flag_value = self.pop.get(flag)
65
+ flag_value = flag_value[:2]
66
+
67
+ means_stds.append((np.mean(flag_value, axis=0), np.std(flag_value, axis=0), self.n_iter - 1)) # 1-based
68
+ self.data["means_stds"] = means_stds
69
+
70
+ flag_value = samples.get(flag)
71
+ mean, std = np.mean(flag_value, axis=0), np.std(flag_value, axis=0)
72
+ return means_stds, mean, std
73
+
74
+ def sampling_new_pop(self):
75
+ samples = self.initialization.sampling(self.problem, self.pop_size)
76
+ samples = self.evaluator.eval(self.problem, samples)
77
+
78
+ # do a survival to recreate rank and crowding of all individuals
79
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=len(samples))
80
+ return samples
81
+
82
+ def _select_means_stds(self, means_stds, mean_new, std_new):
83
+ # Unpack means and stds
84
+ means = np.array([m[0] for m in means_stds])
85
+ stds = np.array([m[1] for m in means_stds])
86
+
87
+ # Calculate distances
88
+ mean_diffs = means - mean_new
89
+ std_diffs = stds - std_new
90
+
91
+ distances = np.sqrt(np.sum(mean_diffs**2, axis=1) + np.sum(std_diffs**2, axis=1))
92
+
93
+ # Get top K closest
94
+ top_k_idx = np.argsort(distances)[:self.size_pool]
95
+ top_k_dist = distances[top_k_idx]
96
+ top_k_means = means[top_k_idx]
97
+ top_k_stds = stds[top_k_idx]
98
+
99
+ # Update pool
100
+ self._update_means_stds_pool(means_stds, top_k_idx)
101
+
102
+ # Calculate weights
103
+ weights = 1 / (top_k_dist + 1e-8) # Add small epsilon to avoid division by zero
104
+ weights = weights / (np.sum(weights) + self.denominator)
105
+
106
+ # Weighted combination
107
+ mean_new = (1 - np.sum(weights)) * mean_new + np.sum(weights[:, None] * top_k_means, axis=0)
108
+ std_new = (1 - np.sum(weights)) * std_new + np.sum(weights[:, None] * top_k_stds, axis=0)
109
+ return mean_new, std_new
110
+
111
+ def _update_means_stds_pool(self, means_stds, top_k_idx) -> None:
112
+ self.data["means_stds"] = [means_stds[i] for i in top_k_idx]
113
+ return None
@@ -0,0 +1,113 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.base.dmoo.dmoeadde import DMOEADDE
6
+ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
7
+
8
+
9
+ class MOEADDEKTMM(DMOEADDE):
10
+ """Knowledge Transfer with Mixture Model.
11
+
12
+ Zou, J., Hou, Z., Jiang, S., Yang, S., Ruan, G., Xia, Y., and Liu, Y. (2025).
13
+ Knowledge transfer with mixture model in dynamic multi-objective optimization.
14
+ IEEE Transactions on Evolutionary Computation, in press.
15
+ https://doi.org/10.1109/TEVC.2025.3566481
16
+ """
17
+
18
+ def __init__(self, **kwargs):
19
+
20
+ super().__init__(**kwargs)
21
+
22
+ self.size_pool = 14 # the size of knowledge pool
23
+ self.denominator = 0.5
24
+
25
+ def _response_mechanism(self):
26
+ """Response mechanism."""
27
+ pop = self.pop
28
+ X = pop.get("X")
29
+
30
+ # recreate the current population without being evaluated
31
+ pop = Population.new(X=X)
32
+
33
+ # sample self.pop_size solutions in decision space
34
+ samples_old = self.sampling_new_pop()
35
+
36
+ # select self.pop_size/2 individuals with better convergence and diversity
37
+ samples = samples_old[:int(len(samples_old)/2)]
38
+
39
+ # knowledge in decision space
40
+ means_stds_ps, mean, std = self._in_decision_or_objective_space_1d(samples, "decision_space")
41
+ mean_new, std_new = self._select_means_stds(means_stds_ps, mean, std)
42
+
43
+ # sample self.pop_size solutions in decision space
44
+ X = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
45
+
46
+ # bounds
47
+ if self.problem.has_bounds():
48
+ xl, xu = self.problem.bounds()
49
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
50
+
51
+ # merge
52
+ pop = Population.merge(samples_old, Population.new(X=X))
53
+
54
+ return pop
55
+
56
+ def _in_decision_or_objective_space_1d(self, samples, decision_or_objective="decision_space"):
57
+ # decision space or objective space
58
+ flag = "X" if decision_or_objective == "decision_space" else "F"
59
+
60
+ means_stds = self.data.get("means_stds", [])
61
+
62
+ flag_value = self.opt.get(flag)
63
+ if len(flag_value) <= 1:
64
+ flag_value = self.pop.get(flag)
65
+ flag_value = flag_value[:2]
66
+
67
+ means_stds.append((np.mean(flag_value, axis=0), np.std(flag_value, axis=0), self.n_iter - 1)) # 1-based
68
+ self.data["means_stds"] = means_stds
69
+
70
+ flag_value = samples.get(flag)
71
+ mean, std = np.mean(flag_value, axis=0), np.std(flag_value, axis=0)
72
+ return means_stds, mean, std
73
+
74
+ def sampling_new_pop(self):
75
+ samples = self.initialization.sampling(self.problem, self.pop_size)
76
+ samples = self.evaluator.eval(self.problem, samples)
77
+
78
+ # do a survival to recreate rank and crowding of all individuals
79
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=len(samples))
80
+ return samples
81
+
82
+ def _select_means_stds(self, means_stds, mean_new, std_new):
83
+ # Unpack means and stds
84
+ means = np.array([m[0] for m in means_stds])
85
+ stds = np.array([m[1] for m in means_stds])
86
+
87
+ # Calculate distances
88
+ mean_diffs = means - mean_new
89
+ std_diffs = stds - std_new
90
+
91
+ distances = np.sqrt(np.sum(mean_diffs**2, axis=1) + np.sum(std_diffs**2, axis=1))
92
+
93
+ # Get top K closest
94
+ top_k_idx = np.argsort(distances)[:self.size_pool]
95
+ top_k_dist = distances[top_k_idx]
96
+ top_k_means = means[top_k_idx]
97
+ top_k_stds = stds[top_k_idx]
98
+
99
+ # Update pool
100
+ self._update_means_stds_pool(means_stds, top_k_idx)
101
+
102
+ # Calculate weights
103
+ weights = 1 / (top_k_dist + 1e-8) # Add small epsilon to avoid division by zero
104
+ weights = weights / (np.sum(weights) + self.denominator)
105
+
106
+ # Weighted combination
107
+ mean_new = (1 - np.sum(weights)) * mean_new + np.sum(weights[:, None] * top_k_means, axis=0)
108
+ std_new = (1 - np.sum(weights)) * std_new + np.sum(weights[:, None] * top_k_stds, axis=0)
109
+ return mean_new, std_new
110
+
111
+ def _update_means_stds_pool(self, means_stds, top_k_idx) -> None:
112
+ self.data["means_stds"] = [means_stds[i] for i in top_k_idx]
113
+ return None
@@ -0,0 +1,113 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.base.dmoo.dnsga2 import DNSGA2
6
+ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
7
+
8
+
9
+ class NSGA2KTMM(DNSGA2):
10
+ """Knowledge Transfer with Mixture Model.
11
+
12
+ Zou, J., Hou, Z., Jiang, S., Yang, S., Ruan, G., Xia, Y., and Liu, Y. (2025).
13
+ Knowledge transfer with mixture model in dynamic multi-objective optimization.
14
+ IEEE Transactions on Evolutionary Computation, in press.
15
+ https://doi.org/10.1109/TEVC.2025.3566481
16
+ """
17
+
18
+ def __init__(self, **kwargs):
19
+
20
+ super().__init__(**kwargs)
21
+
22
+ self.size_pool = 14 # the size of knowledge pool
23
+ self.denominator = 0.5
24
+
25
+ def _response_mechanism(self):
26
+ """Response mechanism."""
27
+ pop = self.pop
28
+ X = pop.get("X")
29
+
30
+ # recreate the current population without being evaluated
31
+ pop = Population.new(X=X)
32
+
33
+ # sample self.pop_size solutions in decision space
34
+ samples_old = self.sampling_new_pop()
35
+
36
+ # select self.pop_size/2 individuals with better convergence and diversity
37
+ samples = samples_old[:int(len(samples_old)/2)]
38
+
39
+ # knowledge in decision space
40
+ means_stds_ps, mean, std = self._in_decision_or_objective_space_1d(samples, "decision_space")
41
+ mean_new, std_new = self._select_means_stds(means_stds_ps, mean, std)
42
+
43
+ # sample self.pop_size solutions in decision space
44
+ X = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
45
+
46
+ # bounds
47
+ if self.problem.has_bounds():
48
+ xl, xu = self.problem.bounds()
49
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
50
+
51
+ # merge
52
+ pop = Population.merge(samples_old, Population.new(X=X))
53
+
54
+ return pop
55
+
56
+ def _in_decision_or_objective_space_1d(self, samples, decision_or_objective="decision_space"):
57
+ # decision space or objective space
58
+ flag = "X" if decision_or_objective == "decision_space" else "F"
59
+
60
+ means_stds = self.data.get("means_stds", [])
61
+
62
+ flag_value = self.opt.get(flag)
63
+ if len(flag_value) <= 1:
64
+ flag_value = self.pop.get(flag)
65
+ flag_value = flag_value[:2]
66
+
67
+ means_stds.append((np.mean(flag_value, axis=0), np.std(flag_value, axis=0), self.n_iter - 1)) # 1-based
68
+ self.data["means_stds"] = means_stds
69
+
70
+ flag_value = samples.get(flag)
71
+ mean, std = np.mean(flag_value, axis=0), np.std(flag_value, axis=0)
72
+ return means_stds, mean, std
73
+
74
+ def sampling_new_pop(self):
75
+ samples = self.initialization.sampling(self.problem, self.pop_size)
76
+ samples = self.evaluator.eval(self.problem, samples)
77
+
78
+ # do a survival to recreate rank and crowding of all individuals
79
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=len(samples))
80
+ return samples
81
+
82
+ def _select_means_stds(self, means_stds, mean_new, std_new):
83
+ # Unpack means and stds
84
+ means = np.array([m[0] for m in means_stds])
85
+ stds = np.array([m[1] for m in means_stds])
86
+
87
+ # Calculate distances
88
+ mean_diffs = means - mean_new
89
+ std_diffs = stds - std_new
90
+
91
+ distances = np.sqrt(np.sum(mean_diffs**2, axis=1) + np.sum(std_diffs**2, axis=1))
92
+
93
+ # Get top K closest
94
+ top_k_idx = np.argsort(distances)[:self.size_pool]
95
+ top_k_dist = distances[top_k_idx]
96
+ top_k_means = means[top_k_idx]
97
+ top_k_stds = stds[top_k_idx]
98
+
99
+ # Update pool
100
+ self._update_means_stds_pool(means_stds, top_k_idx)
101
+
102
+ # Calculate weights
103
+ weights = 1 / (top_k_dist + 1e-8) # Add small epsilon to avoid division by zero
104
+ weights = weights / (np.sum(weights) + self.denominator)
105
+
106
+ # Weighted combination
107
+ mean_new = (1 - np.sum(weights)) * mean_new + np.sum(weights[:, None] * top_k_means, axis=0)
108
+ std_new = (1 - np.sum(weights)) * std_new + np.sum(weights[:, None] * top_k_stds, axis=0)
109
+ return mean_new, std_new
110
+
111
+ def _update_means_stds_pool(self, means_stds, top_k_idx) -> None:
112
+ self.data["means_stds"] = [means_stds[i] for i in top_k_idx]
113
+ return None
File without changes
@@ -0,0 +1,98 @@
1
+ import copy
2
+
3
+ import numpy as np
4
+ from pymoo.core.population import Population
5
+ from scipy.optimize import Bounds, minimize
6
+
7
+ from pydmoo.algorithms.base.dmoo.dmoead import DMOEAD
8
+ from pydmoo.core.transfer import TCA
9
+
10
+
11
+ class MOEADTr(DMOEAD):
12
+ """Transfer learning (Tr).
13
+
14
+ Transfer Learning-based Initial Population Generator (Tr-IPG)
15
+
16
+ References
17
+ ----------
18
+ Jiang, M., Huang, Z., Qiu, L., Huang, W., and Yen, G. G. (2018).
19
+ Transfer learning-based dynamic multiobjective optimization algorithms.
20
+ IEEE Transactions on Evolutionary Computation, 22(4), 501–514.
21
+ https://doi.org/10.1109/TEVC.2017.2771451
22
+ """
23
+
24
+ def __init__(self, **kwargs):
25
+ super().__init__(**kwargs)
26
+
27
+ self.ndim_ls = 20 # the dimension of latent space
28
+ self.mu = 0.5
29
+
30
+ #
31
+ self._maxiter = max(self.pop_size, 100) # default is 1000
32
+
33
+ def _response_mechanism(self):
34
+ """Response mechanism."""
35
+ pop = self.pop
36
+ X, F = pop.get("X", "F")
37
+
38
+ last_time = self.data.get("last_time", 0)
39
+ self.data["last_time"] = self.problem.time
40
+
41
+ # source domain
42
+ problem_ = copy.deepcopy(self.problem)
43
+ problem_.time = last_time
44
+ pop_s = self.initialization.sampling(problem_, self.pop_size, random_state=self.random_state)
45
+ pop_s = self.evaluator.eval(problem_, pop_s)
46
+
47
+ # target domain
48
+ pop_t = self.initialization.sampling(self.problem, self.pop_size, random_state=self.random_state)
49
+ pop_t = self.evaluator.eval(self.problem, pop_t)
50
+
51
+ # Algorithm 1: TCA
52
+ model = TCA(dim=self.ndim_ls, mu=self.mu)
53
+ model.fit(pop_s.get("F"), pop_t.get("F"))
54
+
55
+ # Remark3
56
+ particles_latent_saace = model.transform(self.opt.get("F"))
57
+
58
+ def dist_px(p, x, xl, xu):
59
+ x = np.clip(x, xl, xu)
60
+ pop_temp = Population.new(X=[x])
61
+ pop_temp = self.evaluator.eval(self.problem, pop_temp)
62
+ F = pop_temp.get("F")
63
+ return np.sum((model.transform(F) - p) ** 2)
64
+
65
+ X_ = []
66
+ xl, xu = self.problem.bounds()
67
+ for particle in particles_latent_saace:
68
+ start = self.initialization.sampling(self.problem, 1, random_state=self.random_state).get("X")[0]
69
+ start = np.clip(start, xl, xu)
70
+
71
+ try:
72
+ res = minimize(
73
+ lambda x: dist_px(particle, x, xl, xu),
74
+ start,
75
+ bounds=Bounds(xl, xu),
76
+ method="trust-constr", # SLSQP, trust-constr, L-BFGS-B; In this paper, we use the interior point algorithm to solve the problem.
77
+ options={
78
+ "maxiter": self._maxiter,
79
+ },
80
+ )
81
+ x_opt = np.clip(res.x, xl, xu)
82
+ X_.append(x_opt)
83
+
84
+ except Exception as e:
85
+ random_point = self.initialization.sampling(self.problem, 1, random_state=self.random_state).get("X")[0]
86
+ X_.append(np.clip(random_point, xl, xu))
87
+
88
+ # bounds
89
+ X_ = np.array(X_)
90
+ if self.problem.has_bounds():
91
+ xl, xu = self.problem.bounds()
92
+ X_ = np.clip(X_, xl, xu) # not provided in the original reference literature
93
+
94
+ # recreate the current population without being evaluated
95
+ # merge
96
+ pop = Population.merge(pop_t, Population.new(X=X_))
97
+
98
+ return pop
@@ -0,0 +1,98 @@
1
+ import copy
2
+
3
+ import numpy as np
4
+ from pymoo.core.population import Population
5
+ from scipy.optimize import Bounds, minimize
6
+
7
+ from pydmoo.algorithms.base.dmoo.dmoeadde import DMOEADDE
8
+ from pydmoo.core.transfer import TCA
9
+
10
+
11
+ class MOEADDETr(DMOEADDE):
12
+ """Transfer learning (Tr).
13
+
14
+ Transfer Learning-based Initial Population Generator (Tr-IPG)
15
+
16
+ References
17
+ ----------
18
+ Jiang, M., Huang, Z., Qiu, L., Huang, W., and Yen, G. G. (2018).
19
+ Transfer learning-based dynamic multiobjective optimization algorithms.
20
+ IEEE Transactions on Evolutionary Computation, 22(4), 501–514.
21
+ https://doi.org/10.1109/TEVC.2017.2771451
22
+ """
23
+
24
+ def __init__(self, **kwargs):
25
+ super().__init__(**kwargs)
26
+
27
+ self.ndim_ls = 20 # the dimension of latent space
28
+ self.mu = 0.5
29
+
30
+ #
31
+ self._maxiter = max(self.pop_size, 100) # default is 1000
32
+
33
+ def _response_mechanism(self):
34
+ """Response mechanism."""
35
+ pop = self.pop
36
+ X, F = pop.get("X", "F")
37
+
38
+ last_time = self.data.get("last_time", 0)
39
+ self.data["last_time"] = self.problem.time
40
+
41
+ # source domain
42
+ problem_ = copy.deepcopy(self.problem)
43
+ problem_.time = last_time
44
+ pop_s = self.initialization.sampling(problem_, self.pop_size, random_state=self.random_state)
45
+ pop_s = self.evaluator.eval(problem_, pop_s)
46
+
47
+ # target domain
48
+ pop_t = self.initialization.sampling(self.problem, self.pop_size, random_state=self.random_state)
49
+ pop_t = self.evaluator.eval(self.problem, pop_t)
50
+
51
+ # Algorithm 1: TCA
52
+ model = TCA(dim=self.ndim_ls, mu=self.mu)
53
+ model.fit(pop_s.get("F"), pop_t.get("F"))
54
+
55
+ # Remark3
56
+ particles_latent_saace = model.transform(self.opt.get("F"))
57
+
58
+ def dist_px(p, x, xl, xu):
59
+ x = np.clip(x, xl, xu)
60
+ pop_temp = Population.new(X=[x])
61
+ pop_temp = self.evaluator.eval(self.problem, pop_temp)
62
+ F = pop_temp.get("F")
63
+ return np.sum((model.transform(F) - p) ** 2)
64
+
65
+ X_ = []
66
+ xl, xu = self.problem.bounds()
67
+ for particle in particles_latent_saace:
68
+ start = self.initialization.sampling(self.problem, 1, random_state=self.random_state).get("X")[0]
69
+ start = np.clip(start, xl, xu)
70
+
71
+ try:
72
+ res = minimize(
73
+ lambda x: dist_px(particle, x, xl, xu),
74
+ start,
75
+ bounds=Bounds(xl, xu),
76
+ method="trust-constr", # SLSQP, trust-constr, L-BFGS-B; In this paper, we use the interior point algorithm to solve the problem.
77
+ options={
78
+ "maxiter": self._maxiter,
79
+ },
80
+ )
81
+ x_opt = np.clip(res.x, xl, xu)
82
+ X_.append(x_opt)
83
+
84
+ except Exception as e:
85
+ random_point = self.initialization.sampling(self.problem, 1, random_state=self.random_state).get("X")[0]
86
+ X_.append(np.clip(random_point, xl, xu))
87
+
88
+ # bounds
89
+ X_ = np.array(X_)
90
+ if self.problem.has_bounds():
91
+ xl, xu = self.problem.bounds()
92
+ X_ = np.clip(X_, xl, xu) # not provided in the original reference literature
93
+
94
+ # recreate the current population without being evaluated
95
+ # merge
96
+ pop = Population.merge(pop_t, Population.new(X=X_))
97
+
98
+ return pop
@@ -0,0 +1,98 @@
1
+ import copy
2
+
3
+ import numpy as np
4
+ from pymoo.core.population import Population
5
+ from scipy.optimize import Bounds, minimize
6
+
7
+ from pydmoo.algorithms.base.dmoo.dnsga2 import DNSGA2
8
+ from pydmoo.core.transfer import TCA
9
+
10
+
11
+ class NSGA2Tr(DNSGA2):
12
+ """Transfer learning (Tr).
13
+
14
+ Transfer Learning-based Initial Population Generator (Tr-IPG)
15
+
16
+ References
17
+ ----------
18
+ Jiang, M., Huang, Z., Qiu, L., Huang, W., and Yen, G. G. (2018).
19
+ Transfer learning-based dynamic multiobjective optimization algorithms.
20
+ IEEE Transactions on Evolutionary Computation, 22(4), 501–514.
21
+ https://doi.org/10.1109/TEVC.2017.2771451
22
+ """
23
+
24
+ def __init__(self, **kwargs):
25
+ super().__init__(**kwargs)
26
+
27
+ self.ndim_ls = 20 # the dimension of latent space
28
+ self.mu = 0.5
29
+
30
+ #
31
+ self._maxiter = max(self.pop_size, 100) # default is 1000
32
+
33
+ def _response_mechanism(self):
34
+ """Response mechanism."""
35
+ pop = self.pop
36
+ X, F = pop.get("X", "F")
37
+
38
+ last_time = self.data.get("last_time", 0)
39
+ self.data["last_time"] = self.problem.time
40
+
41
+ # source domain
42
+ problem_ = copy.deepcopy(self.problem)
43
+ problem_.time = last_time
44
+ pop_s = self.initialization.sampling(problem_, self.pop_size, random_state=self.random_state)
45
+ pop_s = self.evaluator.eval(problem_, pop_s)
46
+
47
+ # target domain
48
+ pop_t = self.initialization.sampling(self.problem, self.pop_size, random_state=self.random_state)
49
+ pop_t = self.evaluator.eval(self.problem, pop_t)
50
+
51
+ # Algorithm 1: TCA
52
+ model = TCA(dim=self.ndim_ls, mu=self.mu)
53
+ model.fit(pop_s.get("F"), pop_t.get("F"))
54
+
55
+ # Remark3
56
+ particles_latent_saace = model.transform(self.opt.get("F"))
57
+
58
+ def dist_px(p, x, xl, xu):
59
+ x = np.clip(x, xl, xu)
60
+ pop_temp = Population.new(X=[x])
61
+ pop_temp = self.evaluator.eval(self.problem, pop_temp)
62
+ F = pop_temp.get("F")
63
+ return np.sum((model.transform(F) - p) ** 2)
64
+
65
+ X_ = []
66
+ xl, xu = self.problem.bounds()
67
+ for particle in particles_latent_saace:
68
+ start = self.initialization.sampling(self.problem, 1, random_state=self.random_state).get("X")[0]
69
+ start = np.clip(start, xl, xu)
70
+
71
+ try:
72
+ res = minimize(
73
+ lambda x: dist_px(particle, x, xl, xu),
74
+ start,
75
+ bounds=Bounds(xl, xu),
76
+ method="trust-constr", # SLSQP, trust-constr, L-BFGS-B; In this paper, we use the interior point algorithm to solve the problem.
77
+ options={
78
+ "maxiter": self._maxiter,
79
+ },
80
+ )
81
+ x_opt = np.clip(res.x, xl, xu)
82
+ X_.append(x_opt)
83
+
84
+ except Exception as e:
85
+ random_point = self.initialization.sampling(self.problem, 1, random_state=self.random_state).get("X")[0]
86
+ X_.append(np.clip(random_point, xl, xu))
87
+
88
+ # bounds
89
+ X_ = np.array(X_)
90
+ if self.problem.has_bounds():
91
+ xl, xu = self.problem.bounds()
92
+ X_ = np.clip(X_, xl, xu) # not provided in the original reference literature
93
+
94
+ # recreate the current population without being evaluated
95
+ # merge
96
+ pop = Population.merge(pop_t, Population.new(X=X_))
97
+
98
+ return pop
pydmoo/problems/dyn.py CHANGED
@@ -193,6 +193,34 @@ class DynamicTestProblem(DynamicProblem):
193
193
 
194
194
  @property
195
195
  def time(self):
196
+ r"""Time.
197
+
198
+ Notes
199
+ -----
200
+ The discrete time $t$ is defined as follows:
201
+
202
+ \begin{equation}
203
+ t = \frac{1}{n_t} \left\lfloor \frac{\tau}{\tau_t} \right\rfloor + \frac{1}{n_t} \left(0.5 \times \frac{\pi_{\tau}}{9}\right), \ \tau = 0, 1, 2, \dots
204
+ \end{equation}
205
+
206
+ Here, $\pi_{\tau}$ is given by:
207
+
208
+ \begin{equation}
209
+ \pi_{\tau} =
210
+ \begin{cases}
211
+ 0, & \text{if } \left\lfloor \frac{\tau}{\tau_t} \right\rfloor = 0, \\
212
+ \text{the } \left\lfloor \frac{\tau}{\tau_t} \right\rfloor\text{-th decimal digit of } \pi, & \text{otherwise.}
213
+ \end{cases}
214
+ \end{equation}
215
+
216
+ This formulation introduces a dynamic environment with an irregular change pattern. When $\pi_{\tau} = 0$, the time variation reduces to the commonly used form with a regular change pattern:
217
+
218
+ \begin{equation} \label{eq:time_regular}
219
+ t = \frac{1}{n_t} \left\lfloor \frac{\tau}{\tau_t} \right\rfloor, \ \tau = 0, 1, 2, \dots
220
+ \end{equation}
221
+
222
+ In the above expressions, $\tau$ denotes the generation counter, $n_t$ controls the severity of change, and $\tau_t$ represents the number of generations per time step.
223
+ """
196
224
  if self._time is not None:
197
225
  return self._time
198
226
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydmoo
3
- Version: 0.1.2
3
+ Version: 0.1.4
4
4
  Summary: Dynamic Multi-Objective Optimization in Python (pydmoo).
5
5
  Project-URL: Homepage, https://github.com/dynoptimization/pydmoo
6
6
  Project-URL: Repository, https://github.com/dynoptimization/pydmoo
@@ -44,5 +44,6 @@ Description-Content-Type: text/markdown
44
44
  ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/pydmoo)
45
45
  [![PyPI](https://img.shields.io/pypi/v/pydmoo)](https://pypi.org/project/pydmoo/)
46
46
  ![Visitors](https://visitor-badge.laobi.icu/badge?page_id=dynoptimization.pydmoo)
47
+ [![Commitizen friendly](https://img.shields.io/badge/commitizen-friendly-brightgreen.svg)](http://commitizen.github.io/cz-cli/)
47
48
 
48
49
  Please refer to the [documentation](https://dynoptimization.github.io/pydmoo/) for more details.
@@ -5,22 +5,31 @@ pydmoo/algorithms/base/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
5
5
  pydmoo/algorithms/base/core/algorithm.py,sha256=nFeyj1PECWk4q2W5_7utwqSjxVIx-DA3_8o-wzHxc50,12967
6
6
  pydmoo/algorithms/base/core/genetic.py,sha256=RH1MMkBbWqSQw_8pXvweTgdpCxFqUJZXaNUUh3oYUbg,4904
7
7
  pydmoo/algorithms/base/dmoo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
- pydmoo/algorithms/base/dmoo/dmoead.py,sha256=MB0RnYHmbd1fztC9EaezquaccK89B_egyIjaUp1xggA,4289
9
- pydmoo/algorithms/base/dmoo/dmoeadde.py,sha256=7jqEezFYl_kSEx1ytVy1OQtlBRZJQKHPJd7wsqoQTmk,4305
8
+ pydmoo/algorithms/base/dmoo/dmoead.py,sha256=BvmggD99CjqHiGN-ujTVi2HiCBSExhsPJVKMfO5WpVo,5530
9
+ pydmoo/algorithms/base/dmoo/dmoeadde.py,sha256=-TbM3lxn06-MKlGUuQxdZQO5RFqbRR0XdLH0EcsImsg,5574
10
10
  pydmoo/algorithms/base/dmoo/dmopso.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- pydmoo/algorithms/base/dmoo/dnsga2.py,sha256=AOK01k7T31Q-0wFMBA_AtHtTy4v2fnJ0RNW62YbsoFE,4465
11
+ pydmoo/algorithms/base/dmoo/dnsga2.py,sha256=PDsgVF935YK0ULceV0qSQ-8fY7fpJttu0RZRKIIqGf8,6531
12
12
  pydmoo/algorithms/base/moo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  pydmoo/algorithms/base/moo/moead.py,sha256=yAymuaqVritcrUb-erJNHMpsDY_eaD-BXC-2zM7zwoc,8084
14
- pydmoo/algorithms/base/moo/moeadde.py,sha256=WKgCZF3odsZMs40OSefqPvxlMmvkdvqQskV_q4yfOec,5073
14
+ pydmoo/algorithms/base/moo/moeadde.py,sha256=wm0aoiiogkY34ZHfJ1-dQuYWAscS7DPsqyxdp909O4E,5129
15
15
  pydmoo/algorithms/base/moo/mopso.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  pydmoo/algorithms/base/moo/nsga2.py,sha256=iwvP5Av-psbmDF89Kre3bLwJKQ8BPKR5rmG0EDxU-XE,4519
17
17
  pydmoo/algorithms/classic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
18
  pydmoo/algorithms/classic/moead_ae.py,sha256=Ju4wrQNlDJb_z9Xoa1yFMLy9zMQJjyCbrT93Ik3ltmI,2701
19
19
  pydmoo/algorithms/classic/moead_pps.py,sha256=WbHmUOvJMMDe2YQwKyl5uP-Yykfz0Kwk5ADBm9CKQ_0,3321
20
+ pydmoo/algorithms/classic/moead_svr.py,sha256=TLDBXOYiF9q0nZHswi7ZJCDVWJrzyz1U9Lg8oTjoPH8,3226
20
21
  pydmoo/algorithms/classic/moeadde_ae.py,sha256=yCSePEywxdo6bYIgpxm98QSyEfyDYMFIgKsV_wUy7Us,2709
21
22
  pydmoo/algorithms/classic/moeadde_pps.py,sha256=9wUgKZd6FbEanpbGZ6bAZiSk3wPE7xWQ2ahaMq9V0as,3329
23
+ pydmoo/algorithms/classic/moeadde_svr.py,sha256=el6FQb3D_yhWTkrBo6yQ5sLWX31OTdGeFyPdEB2gzUk,3237
22
24
  pydmoo/algorithms/classic/nsga2_ae.py,sha256=iTxlT0pQUTnLP1ZUPGzj-L7mxqaQU2lSoMDkHF4Hthc,2631
23
25
  pydmoo/algorithms/classic/nsga2_pps.py,sha256=lyQXcBssY7r8uxRJt0NXTvCJ052qpqp5KE459V8xU7M,3321
26
+ pydmoo/algorithms/knowledge/moead_ktmm.py,sha256=PHajdMbgpfq43ceLol2PAPixML1BgLLpIui22g13ItQ,4298
27
+ pydmoo/algorithms/knowledge/moeadde_ktmm.py,sha256=j8Fembi9v7l0vYcqyuksH2kvqEkqm3f3-aQl4KzkyM4,4306
28
+ pydmoo/algorithms/knowledge/nsga2_ktmm.py,sha256=i_Q8ffLUn1sp-ouAwzLmFqL9VHWz-G2f5aRsuts1TrQ,4298
29
+ pydmoo/algorithms/learning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
+ pydmoo/algorithms/learning/moead_tr.py,sha256=iDa3MqyZUaHq9JD_Om26Y5q2cbBm_DnHRprIrKWg-fY,3411
31
+ pydmoo/algorithms/learning/moeadde_tr.py,sha256=qKmMTRQ7Ev2y1hr9e4dUh0slD5XsXTTlx6miJHfxk60,3419
32
+ pydmoo/algorithms/learning/nsga2_tr.py,sha256=oV_l5P_fEPjs_UB8PHL8icOkToZStz8ekdqjyoPA0ls,3411
24
33
  pydmoo/algorithms/modern/__init__.py,sha256=RJDZoZaa1mp7hFQC4TkSpOxfBpqDnbXpLmMu2GFbv2o,3004
25
34
  pydmoo/algorithms/modern/moead_imkt.py,sha256=YqhqdUV4UEn6Y8s81GyNiP1yiZomHd-j67QOVW5KEFM,4597
26
35
  pydmoo/algorithms/modern/moead_imkt_igp.py,sha256=H4HQiT274UYzN_hM1HndLH1vdmG82nAX4hW9NJ5C-vg,1902
@@ -63,7 +72,7 @@ pydmoo/core/lstm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
63
72
  pydmoo/core/lstm/base.py,sha256=h6dR-_qnNu9tjE-JmicUnGUs0Iey4I11ZD4kScRtXxk,11171
64
73
  pydmoo/core/lstm/lstm.py,sha256=S84YWwSn52g-NUfmZnG2i-4RmUjpXVb0kfABELut0bE,17591
65
74
  pydmoo/problems/__init__.py,sha256=Kp7HfoxJFcQBLqcD7sMclwAJVXILlkESHVA6DwCTKUY,1909
66
- pydmoo/problems/dyn.py,sha256=opsbVPn0GV-w_4CsLm9RVdrhYX8oOPohZobxwmZDKJY,9517
75
+ pydmoo/problems/dyn.py,sha256=a7XCE4bKHYUc9XQhmXZl62Ry_yqEt8hICTUh2fiVRJU,10863
67
76
  pydmoo/problems/dynamic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
68
77
  pydmoo/problems/dynamic/cec2015.py,sha256=jJVAuQjUwwsU-frvapuYtmVf4pQHkaXmw70WBGu4vtQ,4450
69
78
  pydmoo/problems/dynamic/df.py,sha256=5vfsCqbeF6APVaaVEMuj75p-k5OspsIqY9bRotAJcN8,15488
@@ -71,7 +80,7 @@ pydmoo/problems/dynamic/gts.py,sha256=QRJXfw2L2M-lRraCIop3jBHUjHJVRr7RRS2muaxGLE
71
80
  pydmoo/problems/real_world/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
72
81
  pydmoo/problems/real_world/dsrp.py,sha256=XuF5fy1HbD6qDyBq8fOQP5ZAaNJgT4InkH2RqxH9TIk,5085
73
82
  pydmoo/problems/real_world/dwbdp.py,sha256=MUjSjH66_V3C8o4NEOSiz2uKmE-YrS9_vAEAdUHnZDs,6249
74
- pydmoo-0.1.2.dist-info/METADATA,sha256=YSJYvtpoIqF_WaRvaCeIUX9Kbn0xKb0CzdKyw-E6IGw,2163
75
- pydmoo-0.1.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
76
- pydmoo-0.1.2.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
77
- pydmoo-0.1.2.dist-info/RECORD,,
83
+ pydmoo-0.1.4.dist-info/METADATA,sha256=htSTAcXLDykyCyvtFx0GNsFUFu8islCaWsH9yCK_Gf4,2291
84
+ pydmoo-0.1.4.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
85
+ pydmoo-0.1.4.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
86
+ pydmoo-0.1.4.dist-info/RECORD,,
File without changes