pydmoo 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,113 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.base.dmoo.dmoead import DMOEAD
6
+ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
7
+
8
+
9
+ class MOEADKTMM(DMOEAD):
10
+ """Knowledge Transfer with Mixture Model.
11
+
12
+ Zou, J., Hou, Z., Jiang, S., Yang, S., Ruan, G., Xia, Y., and Liu, Y. (2025).
13
+ Knowledge transfer with mixture model in dynamic multi-objective optimization.
14
+ IEEE Transactions on Evolutionary Computation, in press.
15
+ https://doi.org/10.1109/TEVC.2025.3566481
16
+ """
17
+
18
+ def __init__(self, **kwargs):
19
+
20
+ super().__init__(**kwargs)
21
+
22
+ self.size_pool = 14 # the size of knowledge pool
23
+ self.denominator = 0.5
24
+
25
+ def _response_mechanism(self):
26
+ """Response mechanism."""
27
+ pop = self.pop
28
+ X = pop.get("X")
29
+
30
+ # recreate the current population without being evaluated
31
+ pop = Population.new(X=X)
32
+
33
+ # sample self.pop_size solutions in decision space
34
+ samples_old = self.sampling_new_pop()
35
+
36
+ # select self.pop_size/2 individuals with better convergence and diversity
37
+ samples = samples_old[:int(len(samples_old)/2)]
38
+
39
+ # knowledge in decision space
40
+ means_stds_ps, mean, std = self._in_decision_or_objective_space_1d(samples, "decision_space")
41
+ mean_new, std_new = self._select_means_stds(means_stds_ps, mean, std)
42
+
43
+ # sample self.pop_size solutions in decision space
44
+ X = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
45
+
46
+ # bounds
47
+ if self.problem.has_bounds():
48
+ xl, xu = self.problem.bounds()
49
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
50
+
51
+ # merge
52
+ pop = Population.merge(samples_old, Population.new(X=X))
53
+
54
+ return pop
55
+
56
+ def _in_decision_or_objective_space_1d(self, samples, decision_or_objective="decision_space"):
57
+ # decision space or objective space
58
+ flag = "X" if decision_or_objective == "decision_space" else "F"
59
+
60
+ means_stds = self.data.get("means_stds", [])
61
+
62
+ flag_value = self.opt.get(flag)
63
+ if len(flag_value) <= 1:
64
+ flag_value = self.pop.get(flag)
65
+ flag_value = flag_value[:2]
66
+
67
+ means_stds.append((np.mean(flag_value, axis=0), np.std(flag_value, axis=0), self.n_iter - 1)) # 1-based
68
+ self.data["means_stds"] = means_stds
69
+
70
+ flag_value = samples.get(flag)
71
+ mean, std = np.mean(flag_value, axis=0), np.std(flag_value, axis=0)
72
+ return means_stds, mean, std
73
+
74
+ def sampling_new_pop(self):
75
+ samples = self.initialization.sampling(self.problem, self.pop_size)
76
+ samples = self.evaluator.eval(self.problem, samples)
77
+
78
+ # do a survival to recreate rank and crowding of all individuals
79
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=len(samples))
80
+ return samples
81
+
82
+ def _select_means_stds(self, means_stds, mean_new, std_new):
83
+ # Unpack means and stds
84
+ means = np.array([m[0] for m in means_stds])
85
+ stds = np.array([m[1] for m in means_stds])
86
+
87
+ # Calculate distances
88
+ mean_diffs = means - mean_new
89
+ std_diffs = stds - std_new
90
+
91
+ distances = np.sqrt(np.sum(mean_diffs**2, axis=1) + np.sum(std_diffs**2, axis=1))
92
+
93
+ # Get top K closest
94
+ top_k_idx = np.argsort(distances)[:self.size_pool]
95
+ top_k_dist = distances[top_k_idx]
96
+ top_k_means = means[top_k_idx]
97
+ top_k_stds = stds[top_k_idx]
98
+
99
+ # Update pool
100
+ self._update_means_stds_pool(means_stds, top_k_idx)
101
+
102
+ # Calculate weights
103
+ weights = 1 / (top_k_dist + 1e-8) # Add small epsilon to avoid division by zero
104
+ weights = weights / (np.sum(weights) + self.denominator)
105
+
106
+ # Weighted combination
107
+ mean_new = (1 - np.sum(weights)) * mean_new + np.sum(weights[:, None] * top_k_means, axis=0)
108
+ std_new = (1 - np.sum(weights)) * std_new + np.sum(weights[:, None] * top_k_stds, axis=0)
109
+ return mean_new, std_new
110
+
111
+ def _update_means_stds_pool(self, means_stds, top_k_idx) -> None:
112
+ self.data["means_stds"] = [means_stds[i] for i in top_k_idx]
113
+ return None
@@ -0,0 +1,113 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.base.dmoo.dmoeadde import DMOEADDE
6
+ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
7
+
8
+
9
+ class MOEADDEKTMM(DMOEADDE):
10
+ """Knowledge Transfer with Mixture Model.
11
+
12
+ Zou, J., Hou, Z., Jiang, S., Yang, S., Ruan, G., Xia, Y., and Liu, Y. (2025).
13
+ Knowledge transfer with mixture model in dynamic multi-objective optimization.
14
+ IEEE Transactions on Evolutionary Computation, in press.
15
+ https://doi.org/10.1109/TEVC.2025.3566481
16
+ """
17
+
18
+ def __init__(self, **kwargs):
19
+
20
+ super().__init__(**kwargs)
21
+
22
+ self.size_pool = 14 # the size of knowledge pool
23
+ self.denominator = 0.5
24
+
25
+ def _response_mechanism(self):
26
+ """Response mechanism."""
27
+ pop = self.pop
28
+ X = pop.get("X")
29
+
30
+ # recreate the current population without being evaluated
31
+ pop = Population.new(X=X)
32
+
33
+ # sample self.pop_size solutions in decision space
34
+ samples_old = self.sampling_new_pop()
35
+
36
+ # select self.pop_size/2 individuals with better convergence and diversity
37
+ samples = samples_old[:int(len(samples_old)/2)]
38
+
39
+ # knowledge in decision space
40
+ means_stds_ps, mean, std = self._in_decision_or_objective_space_1d(samples, "decision_space")
41
+ mean_new, std_new = self._select_means_stds(means_stds_ps, mean, std)
42
+
43
+ # sample self.pop_size solutions in decision space
44
+ X = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
45
+
46
+ # bounds
47
+ if self.problem.has_bounds():
48
+ xl, xu = self.problem.bounds()
49
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
50
+
51
+ # merge
52
+ pop = Population.merge(samples_old, Population.new(X=X))
53
+
54
+ return pop
55
+
56
+ def _in_decision_or_objective_space_1d(self, samples, decision_or_objective="decision_space"):
57
+ # decision space or objective space
58
+ flag = "X" if decision_or_objective == "decision_space" else "F"
59
+
60
+ means_stds = self.data.get("means_stds", [])
61
+
62
+ flag_value = self.opt.get(flag)
63
+ if len(flag_value) <= 1:
64
+ flag_value = self.pop.get(flag)
65
+ flag_value = flag_value[:2]
66
+
67
+ means_stds.append((np.mean(flag_value, axis=0), np.std(flag_value, axis=0), self.n_iter - 1)) # 1-based
68
+ self.data["means_stds"] = means_stds
69
+
70
+ flag_value = samples.get(flag)
71
+ mean, std = np.mean(flag_value, axis=0), np.std(flag_value, axis=0)
72
+ return means_stds, mean, std
73
+
74
+ def sampling_new_pop(self):
75
+ samples = self.initialization.sampling(self.problem, self.pop_size)
76
+ samples = self.evaluator.eval(self.problem, samples)
77
+
78
+ # do a survival to recreate rank and crowding of all individuals
79
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=len(samples))
80
+ return samples
81
+
82
+ def _select_means_stds(self, means_stds, mean_new, std_new):
83
+ # Unpack means and stds
84
+ means = np.array([m[0] for m in means_stds])
85
+ stds = np.array([m[1] for m in means_stds])
86
+
87
+ # Calculate distances
88
+ mean_diffs = means - mean_new
89
+ std_diffs = stds - std_new
90
+
91
+ distances = np.sqrt(np.sum(mean_diffs**2, axis=1) + np.sum(std_diffs**2, axis=1))
92
+
93
+ # Get top K closest
94
+ top_k_idx = np.argsort(distances)[:self.size_pool]
95
+ top_k_dist = distances[top_k_idx]
96
+ top_k_means = means[top_k_idx]
97
+ top_k_stds = stds[top_k_idx]
98
+
99
+ # Update pool
100
+ self._update_means_stds_pool(means_stds, top_k_idx)
101
+
102
+ # Calculate weights
103
+ weights = 1 / (top_k_dist + 1e-8) # Add small epsilon to avoid division by zero
104
+ weights = weights / (np.sum(weights) + self.denominator)
105
+
106
+ # Weighted combination
107
+ mean_new = (1 - np.sum(weights)) * mean_new + np.sum(weights[:, None] * top_k_means, axis=0)
108
+ std_new = (1 - np.sum(weights)) * std_new + np.sum(weights[:, None] * top_k_stds, axis=0)
109
+ return mean_new, std_new
110
+
111
+ def _update_means_stds_pool(self, means_stds, top_k_idx) -> None:
112
+ self.data["means_stds"] = [means_stds[i] for i in top_k_idx]
113
+ return None
@@ -0,0 +1,113 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.base.dmoo.dnsga2 import DNSGA2
6
+ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
7
+
8
+
9
+ class NSGA2KTMM(DNSGA2):
10
+ """Knowledge Transfer with Mixture Model.
11
+
12
+ Zou, J., Hou, Z., Jiang, S., Yang, S., Ruan, G., Xia, Y., and Liu, Y. (2025).
13
+ Knowledge transfer with mixture model in dynamic multi-objective optimization.
14
+ IEEE Transactions on Evolutionary Computation, in press.
15
+ https://doi.org/10.1109/TEVC.2025.3566481
16
+ """
17
+
18
+ def __init__(self, **kwargs):
19
+
20
+ super().__init__(**kwargs)
21
+
22
+ self.size_pool = 14 # the size of knowledge pool
23
+ self.denominator = 0.5
24
+
25
+ def _response_mechanism(self):
26
+ """Response mechanism."""
27
+ pop = self.pop
28
+ X = pop.get("X")
29
+
30
+ # recreate the current population without being evaluated
31
+ pop = Population.new(X=X)
32
+
33
+ # sample self.pop_size solutions in decision space
34
+ samples_old = self.sampling_new_pop()
35
+
36
+ # select self.pop_size/2 individuals with better convergence and diversity
37
+ samples = samples_old[:int(len(samples_old)/2)]
38
+
39
+ # knowledge in decision space
40
+ means_stds_ps, mean, std = self._in_decision_or_objective_space_1d(samples, "decision_space")
41
+ mean_new, std_new = self._select_means_stds(means_stds_ps, mean, std)
42
+
43
+ # sample self.pop_size solutions in decision space
44
+ X = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
45
+
46
+ # bounds
47
+ if self.problem.has_bounds():
48
+ xl, xu = self.problem.bounds()
49
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
50
+
51
+ # merge
52
+ pop = Population.merge(samples_old, Population.new(X=X))
53
+
54
+ return pop
55
+
56
+ def _in_decision_or_objective_space_1d(self, samples, decision_or_objective="decision_space"):
57
+ # decision space or objective space
58
+ flag = "X" if decision_or_objective == "decision_space" else "F"
59
+
60
+ means_stds = self.data.get("means_stds", [])
61
+
62
+ flag_value = self.opt.get(flag)
63
+ if len(flag_value) <= 1:
64
+ flag_value = self.pop.get(flag)
65
+ flag_value = flag_value[:2]
66
+
67
+ means_stds.append((np.mean(flag_value, axis=0), np.std(flag_value, axis=0), self.n_iter - 1)) # 1-based
68
+ self.data["means_stds"] = means_stds
69
+
70
+ flag_value = samples.get(flag)
71
+ mean, std = np.mean(flag_value, axis=0), np.std(flag_value, axis=0)
72
+ return means_stds, mean, std
73
+
74
+ def sampling_new_pop(self):
75
+ samples = self.initialization.sampling(self.problem, self.pop_size)
76
+ samples = self.evaluator.eval(self.problem, samples)
77
+
78
+ # do a survival to recreate rank and crowding of all individuals
79
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=len(samples))
80
+ return samples
81
+
82
+ def _select_means_stds(self, means_stds, mean_new, std_new):
83
+ # Unpack means and stds
84
+ means = np.array([m[0] for m in means_stds])
85
+ stds = np.array([m[1] for m in means_stds])
86
+
87
+ # Calculate distances
88
+ mean_diffs = means - mean_new
89
+ std_diffs = stds - std_new
90
+
91
+ distances = np.sqrt(np.sum(mean_diffs**2, axis=1) + np.sum(std_diffs**2, axis=1))
92
+
93
+ # Get top K closest
94
+ top_k_idx = np.argsort(distances)[:self.size_pool]
95
+ top_k_dist = distances[top_k_idx]
96
+ top_k_means = means[top_k_idx]
97
+ top_k_stds = stds[top_k_idx]
98
+
99
+ # Update pool
100
+ self._update_means_stds_pool(means_stds, top_k_idx)
101
+
102
+ # Calculate weights
103
+ weights = 1 / (top_k_dist + 1e-8) # Add small epsilon to avoid division by zero
104
+ weights = weights / (np.sum(weights) + self.denominator)
105
+
106
+ # Weighted combination
107
+ mean_new = (1 - np.sum(weights)) * mean_new + np.sum(weights[:, None] * top_k_means, axis=0)
108
+ std_new = (1 - np.sum(weights)) * std_new + np.sum(weights[:, None] * top_k_stds, axis=0)
109
+ return mean_new, std_new
110
+
111
+ def _update_means_stds_pool(self, means_stds, top_k_idx) -> None:
112
+ self.data["means_stds"] = [means_stds[i] for i in top_k_idx]
113
+ return None
File without changes
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydmoo
3
- Version: 0.1.3
3
+ Version: 0.1.4
4
4
  Summary: Dynamic Multi-Objective Optimization in Python (pydmoo).
5
5
  Project-URL: Homepage, https://github.com/dynoptimization/pydmoo
6
6
  Project-URL: Repository, https://github.com/dynoptimization/pydmoo
@@ -44,5 +44,6 @@ Description-Content-Type: text/markdown
44
44
  ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/pydmoo)
45
45
  [![PyPI](https://img.shields.io/pypi/v/pydmoo)](https://pypi.org/project/pydmoo/)
46
46
  ![Visitors](https://visitor-badge.laobi.icu/badge?page_id=dynoptimization.pydmoo)
47
+ [![Commitizen friendly](https://img.shields.io/badge/commitizen-friendly-brightgreen.svg)](http://commitizen.github.io/cz-cli/)
47
48
 
48
49
  Please refer to the [documentation](https://dynoptimization.github.io/pydmoo/) for more details.
@@ -23,6 +23,10 @@ pydmoo/algorithms/classic/moeadde_pps.py,sha256=9wUgKZd6FbEanpbGZ6bAZiSk3wPE7xWQ
23
23
  pydmoo/algorithms/classic/moeadde_svr.py,sha256=el6FQb3D_yhWTkrBo6yQ5sLWX31OTdGeFyPdEB2gzUk,3237
24
24
  pydmoo/algorithms/classic/nsga2_ae.py,sha256=iTxlT0pQUTnLP1ZUPGzj-L7mxqaQU2lSoMDkHF4Hthc,2631
25
25
  pydmoo/algorithms/classic/nsga2_pps.py,sha256=lyQXcBssY7r8uxRJt0NXTvCJ052qpqp5KE459V8xU7M,3321
26
+ pydmoo/algorithms/knowledge/moead_ktmm.py,sha256=PHajdMbgpfq43ceLol2PAPixML1BgLLpIui22g13ItQ,4298
27
+ pydmoo/algorithms/knowledge/moeadde_ktmm.py,sha256=j8Fembi9v7l0vYcqyuksH2kvqEkqm3f3-aQl4KzkyM4,4306
28
+ pydmoo/algorithms/knowledge/nsga2_ktmm.py,sha256=i_Q8ffLUn1sp-ouAwzLmFqL9VHWz-G2f5aRsuts1TrQ,4298
29
+ pydmoo/algorithms/learning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
30
  pydmoo/algorithms/learning/moead_tr.py,sha256=iDa3MqyZUaHq9JD_Om26Y5q2cbBm_DnHRprIrKWg-fY,3411
27
31
  pydmoo/algorithms/learning/moeadde_tr.py,sha256=qKmMTRQ7Ev2y1hr9e4dUh0slD5XsXTTlx6miJHfxk60,3419
28
32
  pydmoo/algorithms/learning/nsga2_tr.py,sha256=oV_l5P_fEPjs_UB8PHL8icOkToZStz8ekdqjyoPA0ls,3411
@@ -76,7 +80,7 @@ pydmoo/problems/dynamic/gts.py,sha256=QRJXfw2L2M-lRraCIop3jBHUjHJVRr7RRS2muaxGLE
76
80
  pydmoo/problems/real_world/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
77
81
  pydmoo/problems/real_world/dsrp.py,sha256=XuF5fy1HbD6qDyBq8fOQP5ZAaNJgT4InkH2RqxH9TIk,5085
78
82
  pydmoo/problems/real_world/dwbdp.py,sha256=MUjSjH66_V3C8o4NEOSiz2uKmE-YrS9_vAEAdUHnZDs,6249
79
- pydmoo-0.1.3.dist-info/METADATA,sha256=aGLptHdMtNjlBsIqAbQ2qCfDyOV-0JlkAsYZWGNpQss,2163
80
- pydmoo-0.1.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
81
- pydmoo-0.1.3.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
82
- pydmoo-0.1.3.dist-info/RECORD,,
83
+ pydmoo-0.1.4.dist-info/METADATA,sha256=htSTAcXLDykyCyvtFx0GNsFUFu8islCaWsH9yCK_Gf4,2291
84
+ pydmoo-0.1.4.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
85
+ pydmoo-0.1.4.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
86
+ pydmoo-0.1.4.dist-info/RECORD,,
File without changes