pydmoo 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. pydmoo/algorithms/base/core/genetic.py +2 -2
  2. pydmoo/algorithms/base/dmoo/dmoead.py +55 -8
  3. pydmoo/algorithms/base/dmoo/dmoeadde.py +55 -7
  4. pydmoo/algorithms/base/dmoo/dnsga2.py +81 -15
  5. pydmoo/algorithms/base/moo/moead.py +2 -1
  6. pydmoo/algorithms/base/moo/moeadde.py +12 -7
  7. pydmoo/algorithms/classic/moead_ae.py +2 -2
  8. pydmoo/algorithms/classic/moead_pps.py +2 -2
  9. pydmoo/algorithms/classic/moead_svr.py +86 -0
  10. pydmoo/algorithms/classic/moeadde_ae.py +2 -2
  11. pydmoo/algorithms/classic/moeadde_pps.py +2 -2
  12. pydmoo/algorithms/classic/moeadde_svr.py +86 -0
  13. pydmoo/algorithms/classic/nsga2_ae.py +2 -2
  14. pydmoo/algorithms/classic/nsga2_pps.py +2 -2
  15. pydmoo/algorithms/learning/moead_tr.py +98 -0
  16. pydmoo/algorithms/learning/moeadde_tr.py +98 -0
  17. pydmoo/algorithms/learning/nsga2_tr.py +98 -0
  18. pydmoo/algorithms/modern/moead_imkt.py +2 -1
  19. pydmoo/algorithms/modern/moead_imkt_igp.py +2 -1
  20. pydmoo/algorithms/modern/moead_imkt_lstm.py +2 -1
  21. pydmoo/algorithms/modern/moead_imkt_n.py +2 -1
  22. pydmoo/algorithms/modern/moead_imkt_n_igp.py +2 -1
  23. pydmoo/algorithms/modern/moead_imkt_n_lstm.py +2 -1
  24. pydmoo/algorithms/modern/moead_ktmm.py +2 -1
  25. pydmoo/algorithms/modern/moeadde_imkt.py +2 -1
  26. pydmoo/algorithms/modern/moeadde_imkt_clstm.py +2 -1
  27. pydmoo/algorithms/modern/moeadde_imkt_igp.py +2 -1
  28. pydmoo/algorithms/modern/moeadde_imkt_lstm.py +2 -1
  29. pydmoo/algorithms/modern/moeadde_imkt_n.py +2 -1
  30. pydmoo/algorithms/modern/moeadde_imkt_n_clstm.py +2 -1
  31. pydmoo/algorithms/modern/moeadde_imkt_n_igp.py +2 -1
  32. pydmoo/algorithms/modern/moeadde_imkt_n_lstm.py +2 -1
  33. pydmoo/algorithms/modern/moeadde_ktmm.py +2 -1
  34. pydmoo/algorithms/modern/nsga2_imkt.py +2 -1
  35. pydmoo/algorithms/modern/nsga2_imkt_clstm.py +2 -1
  36. pydmoo/algorithms/modern/nsga2_imkt_igp.py +2 -1
  37. pydmoo/algorithms/modern/nsga2_imkt_lstm.py +2 -1
  38. pydmoo/algorithms/modern/nsga2_imkt_n.py +2 -1
  39. pydmoo/algorithms/modern/nsga2_imkt_n_clstm.py +2 -1
  40. pydmoo/algorithms/modern/nsga2_imkt_n_igp.py +2 -1
  41. pydmoo/algorithms/modern/nsga2_imkt_n_lstm.py +2 -1
  42. pydmoo/algorithms/modern/nsga2_ktmm.py +2 -1
  43. pydmoo/problems/dyn.py +28 -0
  44. {pydmoo-0.1.1.dist-info → pydmoo-0.1.3.dist-info}/METADATA +1 -1
  45. pydmoo-0.1.3.dist-info/RECORD +82 -0
  46. pydmoo-0.1.1.dist-info/RECORD +0 -77
  47. {pydmoo-0.1.1.dist-info → pydmoo-0.1.3.dist-info}/WHEEL +0 -0
  48. {pydmoo-0.1.1.dist-info → pydmoo-0.1.3.dist-info}/licenses/LICENSE +0 -0
@@ -20,8 +20,8 @@ class NSGA2AE(DNSGA2):
20
20
 
21
21
  super().__init__(**kwargs)
22
22
 
23
- def _response_change(self):
24
- """Response."""
23
+ def _response_mechanism(self):
24
+ """Response mechanism."""
25
25
  pop = self.pop
26
26
  X = pop.get("X")
27
27
 
@@ -25,8 +25,8 @@ class NSGA2PPS(DNSGA2):
25
25
  self.p = 3 # the order of the AR model
26
26
  self.M = 23 # the length of history mean point series
27
27
 
28
- def _response_change(self):
29
- """Response."""
28
+ def _response_mechanism(self):
29
+ """Response mechanism."""
30
30
  pop = self.pop
31
31
  X = pop.get("X")
32
32
 
@@ -0,0 +1,98 @@
1
+ import copy
2
+
3
+ import numpy as np
4
+ from pymoo.core.population import Population
5
+ from scipy.optimize import Bounds, minimize
6
+
7
+ from pydmoo.algorithms.base.dmoo.dmoead import DMOEAD
8
+ from pydmoo.core.transfer import TCA
9
+
10
+
11
+ class MOEADTr(DMOEAD):
12
+ """Transfer learning (Tr).
13
+
14
+ Transfer Learning-based Initial Population Generator (Tr-IPG)
15
+
16
+ References
17
+ ----------
18
+ Jiang, M., Huang, Z., Qiu, L., Huang, W., and Yen, G. G. (2018).
19
+ Transfer learning-based dynamic multiobjective optimization algorithms.
20
+ IEEE Transactions on Evolutionary Computation, 22(4), 501–514.
21
+ https://doi.org/10.1109/TEVC.2017.2771451
22
+ """
23
+
24
+ def __init__(self, **kwargs):
25
+ super().__init__(**kwargs)
26
+
27
+ self.ndim_ls = 20 # the dimension of latent space
28
+ self.mu = 0.5
29
+
30
+ #
31
+ self._maxiter = max(self.pop_size, 100) # default is 1000
32
+
33
+ def _response_mechanism(self):
34
+ """Response mechanism."""
35
+ pop = self.pop
36
+ X, F = pop.get("X", "F")
37
+
38
+ last_time = self.data.get("last_time", 0)
39
+ self.data["last_time"] = self.problem.time
40
+
41
+ # source domain
42
+ problem_ = copy.deepcopy(self.problem)
43
+ problem_.time = last_time
44
+ pop_s = self.initialization.sampling(problem_, self.pop_size, random_state=self.random_state)
45
+ pop_s = self.evaluator.eval(problem_, pop_s)
46
+
47
+ # target domain
48
+ pop_t = self.initialization.sampling(self.problem, self.pop_size, random_state=self.random_state)
49
+ pop_t = self.evaluator.eval(self.problem, pop_t)
50
+
51
+ # Algorithm 1: TCA
52
+ model = TCA(dim=self.ndim_ls, mu=self.mu)
53
+ model.fit(pop_s.get("F"), pop_t.get("F"))
54
+
55
+ # Remark3
56
+ particles_latent_saace = model.transform(self.opt.get("F"))
57
+
58
+ def dist_px(p, x, xl, xu):
59
+ x = np.clip(x, xl, xu)
60
+ pop_temp = Population.new(X=[x])
61
+ pop_temp = self.evaluator.eval(self.problem, pop_temp)
62
+ F = pop_temp.get("F")
63
+ return np.sum((model.transform(F) - p) ** 2)
64
+
65
+ X_ = []
66
+ xl, xu = self.problem.bounds()
67
+ for particle in particles_latent_saace:
68
+ start = self.initialization.sampling(self.problem, 1, random_state=self.random_state).get("X")[0]
69
+ start = np.clip(start, xl, xu)
70
+
71
+ try:
72
+ res = minimize(
73
+ lambda x: dist_px(particle, x, xl, xu),
74
+ start,
75
+ bounds=Bounds(xl, xu),
76
+ method="trust-constr", # SLSQP, trust-constr, L-BFGS-B; In this paper, we use the interior point algorithm to solve the problem.
77
+ options={
78
+ "maxiter": self._maxiter,
79
+ },
80
+ )
81
+ x_opt = np.clip(res.x, xl, xu)
82
+ X_.append(x_opt)
83
+
84
+ except Exception as e:
85
+ random_point = self.initialization.sampling(self.problem, 1, random_state=self.random_state).get("X")[0]
86
+ X_.append(np.clip(random_point, xl, xu))
87
+
88
+ # bounds
89
+ X_ = np.array(X_)
90
+ if self.problem.has_bounds():
91
+ xl, xu = self.problem.bounds()
92
+ X_ = np.clip(X_, xl, xu) # not provided in the original reference literature
93
+
94
+ # recreate the current population without being evaluated
95
+ # merge
96
+ pop = Population.merge(pop_t, Population.new(X=X_))
97
+
98
+ return pop
@@ -0,0 +1,98 @@
1
+ import copy
2
+
3
+ import numpy as np
4
+ from pymoo.core.population import Population
5
+ from scipy.optimize import Bounds, minimize
6
+
7
+ from pydmoo.algorithms.base.dmoo.dmoeadde import DMOEADDE
8
+ from pydmoo.core.transfer import TCA
9
+
10
+
11
+ class MOEADDETr(DMOEADDE):
12
+ """Transfer learning (Tr).
13
+
14
+ Transfer Learning-based Initial Population Generator (Tr-IPG)
15
+
16
+ References
17
+ ----------
18
+ Jiang, M., Huang, Z., Qiu, L., Huang, W., and Yen, G. G. (2018).
19
+ Transfer learning-based dynamic multiobjective optimization algorithms.
20
+ IEEE Transactions on Evolutionary Computation, 22(4), 501–514.
21
+ https://doi.org/10.1109/TEVC.2017.2771451
22
+ """
23
+
24
+ def __init__(self, **kwargs):
25
+ super().__init__(**kwargs)
26
+
27
+ self.ndim_ls = 20 # the dimension of latent space
28
+ self.mu = 0.5
29
+
30
+ #
31
+ self._maxiter = max(self.pop_size, 100) # default is 1000
32
+
33
+ def _response_mechanism(self):
34
+ """Response mechanism."""
35
+ pop = self.pop
36
+ X, F = pop.get("X", "F")
37
+
38
+ last_time = self.data.get("last_time", 0)
39
+ self.data["last_time"] = self.problem.time
40
+
41
+ # source domain
42
+ problem_ = copy.deepcopy(self.problem)
43
+ problem_.time = last_time
44
+ pop_s = self.initialization.sampling(problem_, self.pop_size, random_state=self.random_state)
45
+ pop_s = self.evaluator.eval(problem_, pop_s)
46
+
47
+ # target domain
48
+ pop_t = self.initialization.sampling(self.problem, self.pop_size, random_state=self.random_state)
49
+ pop_t = self.evaluator.eval(self.problem, pop_t)
50
+
51
+ # Algorithm 1: TCA
52
+ model = TCA(dim=self.ndim_ls, mu=self.mu)
53
+ model.fit(pop_s.get("F"), pop_t.get("F"))
54
+
55
+ # Remark3
56
+ particles_latent_saace = model.transform(self.opt.get("F"))
57
+
58
+ def dist_px(p, x, xl, xu):
59
+ x = np.clip(x, xl, xu)
60
+ pop_temp = Population.new(X=[x])
61
+ pop_temp = self.evaluator.eval(self.problem, pop_temp)
62
+ F = pop_temp.get("F")
63
+ return np.sum((model.transform(F) - p) ** 2)
64
+
65
+ X_ = []
66
+ xl, xu = self.problem.bounds()
67
+ for particle in particles_latent_saace:
68
+ start = self.initialization.sampling(self.problem, 1, random_state=self.random_state).get("X")[0]
69
+ start = np.clip(start, xl, xu)
70
+
71
+ try:
72
+ res = minimize(
73
+ lambda x: dist_px(particle, x, xl, xu),
74
+ start,
75
+ bounds=Bounds(xl, xu),
76
+ method="trust-constr", # SLSQP, trust-constr, L-BFGS-B; In this paper, we use the interior point algorithm to solve the problem.
77
+ options={
78
+ "maxiter": self._maxiter,
79
+ },
80
+ )
81
+ x_opt = np.clip(res.x, xl, xu)
82
+ X_.append(x_opt)
83
+
84
+ except Exception as e:
85
+ random_point = self.initialization.sampling(self.problem, 1, random_state=self.random_state).get("X")[0]
86
+ X_.append(np.clip(random_point, xl, xu))
87
+
88
+ # bounds
89
+ X_ = np.array(X_)
90
+ if self.problem.has_bounds():
91
+ xl, xu = self.problem.bounds()
92
+ X_ = np.clip(X_, xl, xu) # not provided in the original reference literature
93
+
94
+ # recreate the current population without being evaluated
95
+ # merge
96
+ pop = Population.merge(pop_t, Population.new(X=X_))
97
+
98
+ return pop
@@ -0,0 +1,98 @@
1
+ import copy
2
+
3
+ import numpy as np
4
+ from pymoo.core.population import Population
5
+ from scipy.optimize import Bounds, minimize
6
+
7
+ from pydmoo.algorithms.base.dmoo.dnsga2 import DNSGA2
8
+ from pydmoo.core.transfer import TCA
9
+
10
+
11
+ class NSGA2Tr(DNSGA2):
12
+ """Transfer learning (Tr).
13
+
14
+ Transfer Learning-based Initial Population Generator (Tr-IPG)
15
+
16
+ References
17
+ ----------
18
+ Jiang, M., Huang, Z., Qiu, L., Huang, W., and Yen, G. G. (2018).
19
+ Transfer learning-based dynamic multiobjective optimization algorithms.
20
+ IEEE Transactions on Evolutionary Computation, 22(4), 501–514.
21
+ https://doi.org/10.1109/TEVC.2017.2771451
22
+ """
23
+
24
+ def __init__(self, **kwargs):
25
+ super().__init__(**kwargs)
26
+
27
+ self.ndim_ls = 20 # the dimension of latent space
28
+ self.mu = 0.5
29
+
30
+ #
31
+ self._maxiter = max(self.pop_size, 100) # default is 1000
32
+
33
+ def _response_mechanism(self):
34
+ """Response mechanism."""
35
+ pop = self.pop
36
+ X, F = pop.get("X", "F")
37
+
38
+ last_time = self.data.get("last_time", 0)
39
+ self.data["last_time"] = self.problem.time
40
+
41
+ # source domain
42
+ problem_ = copy.deepcopy(self.problem)
43
+ problem_.time = last_time
44
+ pop_s = self.initialization.sampling(problem_, self.pop_size, random_state=self.random_state)
45
+ pop_s = self.evaluator.eval(problem_, pop_s)
46
+
47
+ # target domain
48
+ pop_t = self.initialization.sampling(self.problem, self.pop_size, random_state=self.random_state)
49
+ pop_t = self.evaluator.eval(self.problem, pop_t)
50
+
51
+ # Algorithm 1: TCA
52
+ model = TCA(dim=self.ndim_ls, mu=self.mu)
53
+ model.fit(pop_s.get("F"), pop_t.get("F"))
54
+
55
+ # Remark3
56
+ particles_latent_saace = model.transform(self.opt.get("F"))
57
+
58
+ def dist_px(p, x, xl, xu):
59
+ x = np.clip(x, xl, xu)
60
+ pop_temp = Population.new(X=[x])
61
+ pop_temp = self.evaluator.eval(self.problem, pop_temp)
62
+ F = pop_temp.get("F")
63
+ return np.sum((model.transform(F) - p) ** 2)
64
+
65
+ X_ = []
66
+ xl, xu = self.problem.bounds()
67
+ for particle in particles_latent_saace:
68
+ start = self.initialization.sampling(self.problem, 1, random_state=self.random_state).get("X")[0]
69
+ start = np.clip(start, xl, xu)
70
+
71
+ try:
72
+ res = minimize(
73
+ lambda x: dist_px(particle, x, xl, xu),
74
+ start,
75
+ bounds=Bounds(xl, xu),
76
+ method="trust-constr", # SLSQP, trust-constr, L-BFGS-B; In this paper, we use the interior point algorithm to solve the problem.
77
+ options={
78
+ "maxiter": self._maxiter,
79
+ },
80
+ )
81
+ x_opt = np.clip(res.x, xl, xu)
82
+ X_.append(x_opt)
83
+
84
+ except Exception as e:
85
+ random_point = self.initialization.sampling(self.problem, 1, random_state=self.random_state).get("X")[0]
86
+ X_.append(np.clip(random_point, xl, xu))
87
+
88
+ # bounds
89
+ X_ = np.array(X_)
90
+ if self.problem.has_bounds():
91
+ xl, xu = self.problem.bounds()
92
+ X_ = np.clip(X_, xl, xu) # not provided in the original reference literature
93
+
94
+ # recreate the current population without being evaluated
95
+ # merge
96
+ pop = Population.merge(pop_t, Population.new(X=X_))
97
+
98
+ return pop
@@ -19,7 +19,8 @@ class MOEADIMKT(MOEADKTMM):
19
19
  self.size_pool = 10
20
20
  self.denominator = 0.5
21
21
 
22
- def _response_change(self):
22
+ def _response_mechanism(self):
23
+ """Response mechanism."""
23
24
  pop = self.pop
24
25
  X = pop.get("X")
25
26
 
@@ -16,7 +16,8 @@ class MOEADIMKTIGP(MOEADIMKT):
16
16
  self.sigma_n = 0.01
17
17
  self.sigma_n_2 = self.sigma_n ** 2
18
18
 
19
- def _response_change(self):
19
+ def _response_mechanism(self):
20
+ """Response mechanism."""
20
21
  pop = self.pop
21
22
  X = pop.get("X")
22
23
 
@@ -42,7 +42,8 @@ class MOEADIMLSTM(MOEADIMKT):
42
42
  incremental_learning=self._incremental_learning,
43
43
  )
44
44
 
45
- def _response_change(self):
45
+ def _response_mechanism(self):
46
+ """Response mechanism."""
46
47
  pop = self.pop
47
48
  X = pop.get("X")
48
49
 
@@ -19,7 +19,8 @@ class MOEADIMKTN(MOEADIMKT):
19
19
  self.size_pool = 10
20
20
  self.denominator = 0.5
21
21
 
22
- def _response_change(self):
22
+ def _response_mechanism(self):
23
+ """Response mechanism."""
23
24
  pop = self.pop
24
25
  X = pop.get("X")
25
26
 
@@ -16,7 +16,8 @@ class MOEADIMKTNIGP(MOEADIMKTN):
16
16
  self.sigma_n = 0.01
17
17
  self.sigma_n_2 = self.sigma_n ** 2
18
18
 
19
- def _response_change(self):
19
+ def _response_mechanism(self):
20
+ """Response mechanism."""
20
21
  pop = self.pop
21
22
  X = pop.get("X")
22
23
 
@@ -43,7 +43,8 @@ class MOEADIMNLSTM(MOEADIMKTN):
43
43
  incremental_learning=self._incremental_learning,
44
44
  )
45
45
 
46
- def _response_change(self):
46
+ def _response_mechanism(self):
47
+ """Response mechanism."""
47
48
  pop = self.pop
48
49
  X = pop.get("X")
49
50
 
@@ -22,7 +22,8 @@ class MOEADKTMM(DMOEAD):
22
22
  self.size_pool = 14 # the size of knowledge pool
23
23
  self.denominator = 0.5
24
24
 
25
- def _response_change(self):
25
+ def _response_mechanism(self):
26
+ """Response mechanism."""
26
27
  pop = self.pop
27
28
  X = pop.get("X")
28
29
 
@@ -19,7 +19,8 @@ class MOEADDEIMKT(MOEADDEKTMM):
19
19
  self.size_pool = 10
20
20
  self.denominator = 0.5
21
21
 
22
- def _response_change(self):
22
+ def _response_mechanism(self):
23
+ """Response mechanism."""
23
24
  pop = self.pop
24
25
  X = pop.get("X")
25
26
 
@@ -36,7 +36,8 @@ class MOEADDEIMcLSTM(MOEADDEIMKT):
36
36
  incremental_learning=self._incremental_learning,
37
37
  )
38
38
 
39
- def _response_change(self):
39
+ def _response_mechanism(self):
40
+ """Response mechanism."""
40
41
  pop = self.pop
41
42
  X = pop.get("X")
42
43
 
@@ -16,7 +16,8 @@ class MOEADDEIMKTIGP(MOEADDEIMKT):
16
16
  self.sigma_n = 0.01
17
17
  self.sigma_n_2 = self.sigma_n ** 2
18
18
 
19
- def _response_change(self):
19
+ def _response_mechanism(self):
20
+ """Response mechanism."""
20
21
  pop = self.pop
21
22
  X = pop.get("X")
22
23
 
@@ -42,7 +42,8 @@ class MOEADDEIMLSTM(MOEADDEIMKT):
42
42
  incremental_learning=self._incremental_learning,
43
43
  )
44
44
 
45
- def _response_change(self):
45
+ def _response_mechanism(self):
46
+ """Response mechanism."""
46
47
  pop = self.pop
47
48
  X = pop.get("X")
48
49
 
@@ -19,7 +19,8 @@ class MOEADDEIMKTN(MOEADDEIMKT):
19
19
  self.size_pool = 10
20
20
  self.denominator = 0.5
21
21
 
22
- def _response_change(self):
22
+ def _response_mechanism(self):
23
+ """Response mechanism."""
23
24
  pop = self.pop
24
25
  X = pop.get("X")
25
26
 
@@ -37,7 +37,8 @@ class MOEADDEIMNcLSTM(MOEADDEIMKTN):
37
37
  incremental_learning=self._incremental_learning,
38
38
  )
39
39
 
40
- def _response_change(self):
40
+ def _response_mechanism(self):
41
+ """Response mechanism."""
41
42
  pop = self.pop
42
43
  X = pop.get("X")
43
44
 
@@ -16,7 +16,8 @@ class MOEADDEIMKTNIGP(MOEADDEIMKTN):
16
16
  self.sigma_n = 0.01
17
17
  self.sigma_n_2 = self.sigma_n ** 2
18
18
 
19
- def _response_change(self):
19
+ def _response_mechanism(self):
20
+ """Response mechanism."""
20
21
  pop = self.pop
21
22
  X = pop.get("X")
22
23
 
@@ -43,7 +43,8 @@ class MOEADDEIMNLSTM(MOEADDEIMKTN):
43
43
  incremental_learning=self._incremental_learning,
44
44
  )
45
45
 
46
- def _response_change(self):
46
+ def _response_mechanism(self):
47
+ """Response mechanism."""
47
48
  pop = self.pop
48
49
  X = pop.get("X")
49
50
 
@@ -22,7 +22,8 @@ class MOEADDEKTMM(DMOEADDE):
22
22
  self.size_pool = 14 # the size of knowledge pool
23
23
  self.denominator = 0.5
24
24
 
25
- def _response_change(self):
25
+ def _response_mechanism(self):
26
+ """Response mechanism."""
26
27
  pop = self.pop
27
28
  X = pop.get("X")
28
29
 
@@ -19,7 +19,8 @@ class NSGA2IMKT(NSGA2KTMM):
19
19
  self.size_pool = 10
20
20
  self.denominator = 0.5
21
21
 
22
- def _response_change(self):
22
+ def _response_mechanism(self):
23
+ """Response mechanism."""
23
24
  """Inverse Modeling with Knowledge Transfer."""
24
25
  pop = self.pop
25
26
  X = pop.get("X")
@@ -36,7 +36,8 @@ class NSGA2IMcLSTM(NSGA2IMKT):
36
36
  incremental_learning=self._incremental_learning,
37
37
  )
38
38
 
39
- def _response_change(self):
39
+ def _response_mechanism(self):
40
+ """Response mechanism."""
40
41
  pop = self.pop
41
42
  X = pop.get("X")
42
43
 
@@ -16,7 +16,8 @@ class NSGA2IMKTIGP(NSGA2IMKT):
16
16
  self.sigma_n = 0.01
17
17
  self.sigma_n_2 = self.sigma_n ** 2
18
18
 
19
- def _response_change(self):
19
+ def _response_mechanism(self):
20
+ """Response mechanism."""
20
21
  pop = self.pop
21
22
  X = pop.get("X")
22
23
 
@@ -41,7 +41,8 @@ class NSGA2IMLSTM(NSGA2IMKT):
41
41
  incremental_learning=self._incremental_learning,
42
42
  )
43
43
 
44
- def _response_change(self):
44
+ def _response_mechanism(self):
45
+ """Response mechanism."""
45
46
  pop = self.pop
46
47
  X = pop.get("X")
47
48
 
@@ -19,7 +19,8 @@ class NSGA2IMKTN(NSGA2IMKT):
19
19
  self.size_pool = 10
20
20
  self.denominator = 0.5
21
21
 
22
- def _response_change(self):
22
+ def _response_mechanism(self):
23
+ """Response mechanism."""
23
24
  pop = self.pop
24
25
  X = pop.get("X")
25
26
 
@@ -37,7 +37,8 @@ class NSGA2IMNcLSTM(NSGA2IMKTN):
37
37
  incremental_learning=self._incremental_learning,
38
38
  )
39
39
 
40
- def _response_change(self):
40
+ def _response_mechanism(self):
41
+ """Response mechanism."""
41
42
  pop = self.pop
42
43
  X = pop.get("X")
43
44
 
@@ -16,7 +16,8 @@ class NSGA2IMKTNIGP(NSGA2IMKTN):
16
16
  self.sigma_n = 0.01
17
17
  self.sigma_n_2 = self.sigma_n ** 2
18
18
 
19
- def _response_change(self):
19
+ def _response_mechanism(self):
20
+ """Response mechanism."""
20
21
  pop = self.pop
21
22
  X = pop.get("X")
22
23
 
@@ -42,7 +42,8 @@ class NSGA2IMNLSTM(NSGA2IMKTN):
42
42
  incremental_learning=self._incremental_learning,
43
43
  )
44
44
 
45
- def _response_change(self):
45
+ def _response_mechanism(self):
46
+ """Response mechanism."""
46
47
  pop = self.pop
47
48
  X = pop.get("X")
48
49
 
@@ -22,7 +22,8 @@ class NSGA2KTMM(DNSGA2):
22
22
  self.size_pool = 14 # the size of knowledge pool
23
23
  self.denominator = 0.5
24
24
 
25
- def _response_change(self):
25
+ def _response_mechanism(self):
26
+ """Response mechanism."""
26
27
  pop = self.pop
27
28
  X = pop.get("X")
28
29
 
pydmoo/problems/dyn.py CHANGED
@@ -193,6 +193,34 @@ class DynamicTestProblem(DynamicProblem):
193
193
 
194
194
  @property
195
195
  def time(self):
196
+ r"""Time.
197
+
198
+ Notes
199
+ -----
200
+ The discrete time $t$ is defined as follows:
201
+
202
+ \begin{equation}
203
+ t = \frac{1}{n_t} \left\lfloor \frac{\tau}{\tau_t} \right\rfloor + \frac{1}{n_t} \left(0.5 \times \frac{\pi_{\tau}}{9}\right), \ \tau = 0, 1, 2, \dots
204
+ \end{equation}
205
+
206
+ Here, $\pi_{\tau}$ is given by:
207
+
208
+ \begin{equation}
209
+ \pi_{\tau} =
210
+ \begin{cases}
211
+ 0, & \text{if } \left\lfloor \frac{\tau}{\tau_t} \right\rfloor = 0, \\
212
+ \text{the } \left\lfloor \frac{\tau}{\tau_t} \right\rfloor\text{-th decimal digit of } \pi, & \text{otherwise.}
213
+ \end{cases}
214
+ \end{equation}
215
+
216
+ This formulation introduces a dynamic environment with an irregular change pattern. When $\pi_{\tau} = 0$, the time variation reduces to the commonly used form with a regular change pattern:
217
+
218
+ \begin{equation} \label{eq:time_regular}
219
+ t = \frac{1}{n_t} \left\lfloor \frac{\tau}{\tau_t} \right\rfloor, \ \tau = 0, 1, 2, \dots
220
+ \end{equation}
221
+
222
+ In the above expressions, $\tau$ denotes the generation counter, $n_t$ controls the severity of change, and $\tau_t$ represents the number of generations per time step.
223
+ """
196
224
  if self._time is not None:
197
225
  return self._time
198
226
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydmoo
3
- Version: 0.1.1
3
+ Version: 0.1.3
4
4
  Summary: Dynamic Multi-Objective Optimization in Python (pydmoo).
5
5
  Project-URL: Homepage, https://github.com/dynoptimization/pydmoo
6
6
  Project-URL: Repository, https://github.com/dynoptimization/pydmoo