pydmoo 0.1.0__tar.gz → 0.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (139) hide show
  1. {pydmoo-0.1.0 → pydmoo-0.1.1}/PKG-INFO +1 -1
  2. pydmoo-0.1.1/docs/index.md +7 -0
  3. pydmoo-0.1.1/docs/user-guide/algorithms/algorithms.classic.nsga2_ae.md +9 -0
  4. pydmoo-0.1.1/docs/user-guide/algorithms/algorithms.classic.nsga2_pps.md +9 -0
  5. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/user-guide/algorithms.md +10 -0
  6. pydmoo-0.1.1/docs/user-guide/index.md +34 -0
  7. pydmoo-0.1.1/docs/user-guide/problems/problems.dyn.md +9 -0
  8. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/user-guide/problems.md +1 -0
  9. {pydmoo-0.1.0 → pydmoo-0.1.1}/mkdocs.yml +1 -1
  10. pydmoo-0.1.1/pydmoo/algorithms/classic/moead_ae.py +77 -0
  11. pydmoo-0.1.1/pydmoo/algorithms/classic/moead_pps.py +94 -0
  12. pydmoo-0.1.1/pydmoo/algorithms/classic/moeadde_ae.py +77 -0
  13. pydmoo-0.1.1/pydmoo/algorithms/classic/moeadde_pps.py +94 -0
  14. pydmoo-0.1.1/pydmoo/algorithms/classic/nsga2_ae.py +76 -0
  15. pydmoo-0.1.1/pydmoo/algorithms/classic/nsga2_pps.py +94 -0
  16. {pydmoo-0.1.0/pydmoo/core → pydmoo-0.1.1/pydmoo/algorithms/utils}/__init__.py +0 -0
  17. {pydmoo-0.1.0/pydmoo/core/lstm → pydmoo-0.1.1/pydmoo/core}/__init__.py +0 -0
  18. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/problems/dyn.py +110 -15
  19. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/problems/dynamic/cec2015.py +2 -1
  20. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/problems/dynamic/df.py +2 -1
  21. pydmoo-0.1.1/pydmoo/problems/dynamic/gts.py +1300 -0
  22. pydmoo-0.1.1/pydmoo/problems/real_world/__init__.py +0 -0
  23. {pydmoo-0.1.0 → pydmoo-0.1.1}/pyproject.toml +1 -1
  24. {pydmoo-0.1.0 → pydmoo-0.1.1}/uv.lock +154 -154
  25. pydmoo-0.1.0/docs/index.md +0 -37
  26. pydmoo-0.1.0/docs/user-guide/index.md +0 -1
  27. pydmoo-0.1.0/pydmoo/problems/dynamic/gts.py +0 -741
  28. {pydmoo-0.1.0 → pydmoo-0.1.1}/.gitattributes +0 -0
  29. {pydmoo-0.1.0 → pydmoo-0.1.1}/.github/workflows/docs.yml +0 -0
  30. {pydmoo-0.1.0 → pydmoo-0.1.1}/.github/workflows/publish.yml +0 -0
  31. {pydmoo-0.1.0 → pydmoo-0.1.1}/.github/workflows/release.yml +0 -0
  32. {pydmoo-0.1.0 → pydmoo-0.1.1}/.gitignore +0 -0
  33. {pydmoo-0.1.0 → pydmoo-0.1.1}/.python-version +0 -0
  34. {pydmoo-0.1.0 → pydmoo-0.1.1}/CODE_OF_CONDUCT.md +0 -0
  35. {pydmoo-0.1.0 → pydmoo-0.1.1}/LICENSE +0 -0
  36. {pydmoo-0.1.0 → pydmoo-0.1.1}/README.md +0 -0
  37. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/dev-guide/bug-report.md +0 -0
  38. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/dev-guide/index.md +0 -0
  39. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/dev-guide/pull-request.md +0 -0
  40. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PF/GTS1.png +0 -0
  41. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PF/GTS10.png +0 -0
  42. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PF/GTS11.png +0 -0
  43. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PF/GTS2.png +0 -0
  44. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PF/GTS3.png +0 -0
  45. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PF/GTS4.png +0 -0
  46. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PF/GTS5.png +0 -0
  47. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PF/GTS6.png +0 -0
  48. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PF/GTS7.png +0 -0
  49. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PF/GTS8.png +0 -0
  50. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PF/GTS9.png +0 -0
  51. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PS/GTS1.png +0 -0
  52. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PS/GTS10.png +0 -0
  53. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PS/GTS11.png +0 -0
  54. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PS/GTS2.png +0 -0
  55. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PS/GTS3.png +0 -0
  56. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PS/GTS4.png +0 -0
  57. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PS/GTS5.png +0 -0
  58. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PS/GTS6.png +0 -0
  59. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PS/GTS7.png +0 -0
  60. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PS/GTS8.png +0 -0
  61. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/figs/PS/GTS9.png +0 -0
  62. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/getting-started.md +0 -0
  63. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/reference/algorithms/diversity-based.md +0 -0
  64. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/reference/algorithms/index.md +0 -0
  65. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/reference/algorithms/knowledge-based.md +0 -0
  66. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/reference/algorithms/memory-based.md +0 -0
  67. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/reference/algorithms/multi-population-based.md +0 -0
  68. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/reference/algorithms/prediction-based.md +0 -0
  69. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/reference/applications/index.md +0 -0
  70. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/reference/index.md +0 -0
  71. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/reference/metrics/index.md +0 -0
  72. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/reference/problems/benchmarks.md +0 -0
  73. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/reference/problems/index.md +0 -0
  74. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/requirements.txt +0 -0
  75. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/user-guide/algorithms/algorithms.modern.nsga2_imkt.md +0 -0
  76. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/user-guide/algorithms/algorithms.modern.nsga2_imkt_clstm.md +0 -0
  77. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/user-guide/algorithms/algorithms.modern.nsga2_imkt_lstm.md +0 -0
  78. {pydmoo-0.1.0 → pydmoo-0.1.1}/docs/user-guide/problems/problems.dynamic.gts.md +0 -0
  79. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/__init__.py +0 -0
  80. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/__init__.py +0 -0
  81. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/base/__init__.py +0 -0
  82. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/base/core/__init__.py +0 -0
  83. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/base/core/algorithm.py +0 -0
  84. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/base/core/genetic.py +0 -0
  85. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/base/dmoo/__init__.py +0 -0
  86. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/base/dmoo/dmoead.py +0 -0
  87. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/base/dmoo/dmoeadde.py +0 -0
  88. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/base/dmoo/dmopso.py +0 -0
  89. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/base/dmoo/dnsga2.py +0 -0
  90. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/base/moo/__init__.py +0 -0
  91. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/base/moo/moead.py +0 -0
  92. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/base/moo/moeadde.py +0 -0
  93. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/base/moo/mopso.py +0 -0
  94. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/base/moo/nsga2.py +0 -0
  95. {pydmoo-0.1.0/pydmoo/algorithms/utils → pydmoo-0.1.1/pydmoo/algorithms/classic}/__init__.py +0 -0
  96. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/__init__.py +0 -0
  97. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moead_imkt.py +0 -0
  98. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moead_imkt_igp.py +0 -0
  99. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moead_imkt_lstm.py +0 -0
  100. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moead_imkt_n.py +0 -0
  101. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moead_imkt_n_igp.py +0 -0
  102. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moead_imkt_n_lstm.py +0 -0
  103. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moead_ktmm.py +0 -0
  104. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moeadde_imkt.py +0 -0
  105. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moeadde_imkt_clstm.py +0 -0
  106. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moeadde_imkt_igp.py +0 -0
  107. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moeadde_imkt_lstm.py +0 -0
  108. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moeadde_imkt_n.py +0 -0
  109. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moeadde_imkt_n_clstm.py +0 -0
  110. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moeadde_imkt_n_igp.py +0 -0
  111. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moeadde_imkt_n_lstm.py +0 -0
  112. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/moeadde_ktmm.py +0 -0
  113. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/nsga2_imkt.py +0 -0
  114. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/nsga2_imkt_clstm.py +0 -0
  115. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/nsga2_imkt_igp.py +0 -0
  116. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/nsga2_imkt_lstm.py +0 -0
  117. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/nsga2_imkt_n.py +0 -0
  118. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/nsga2_imkt_n_clstm.py +0 -0
  119. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/nsga2_imkt_n_igp.py +0 -0
  120. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/nsga2_imkt_n_lstm.py +0 -0
  121. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/modern/nsga2_ktmm.py +0 -0
  122. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/algorithms/utils/utils.py +0 -0
  123. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/core/ar_model.py +0 -0
  124. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/core/bounds.py +0 -0
  125. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/core/distance.py +0 -0
  126. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/core/inverse.py +0 -0
  127. {pydmoo-0.1.0/pydmoo/problems/dynamic → pydmoo-0.1.1/pydmoo/core/lstm}/__init__.py +0 -0
  128. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/core/lstm/base.py +0 -0
  129. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/core/lstm/lstm.py +0 -0
  130. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/core/manifold.py +0 -0
  131. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/core/predictions.py +0 -0
  132. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/core/sample_gaussian.py +0 -0
  133. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/core/sample_uniform.py +0 -0
  134. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/core/transfer.py +0 -0
  135. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/problems/__init__.py +0 -0
  136. {pydmoo-0.1.0/pydmoo/problems/real_world → pydmoo-0.1.1/pydmoo/problems/dynamic}/__init__.py +0 -0
  137. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/problems/real_world/dsrp.py +0 -0
  138. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo/problems/real_world/dwbdp.py +0 -0
  139. {pydmoo-0.1.0 → pydmoo-0.1.1}/pydmoo.sublime-project +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydmoo
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Summary: Dynamic Multi-Objective Optimization in Python (pydmoo).
5
5
  Project-URL: Homepage, https://github.com/dynoptimization/pydmoo
6
6
  Project-URL: Repository, https://github.com/dynoptimization/pydmoo
@@ -0,0 +1,7 @@
1
+ # pydmoo
2
+
3
+ Dynamic Multi-Objective Optimization in Python
4
+
5
+ This library is built upon the widely used multi-objective optimization framework [pymoo](https://github.com/anyoptimization/pymoo), with a primary focus on **dynamic multi-objective optimization**. Accordingly, the main emphasis here is on **response mechanisms**, while the static multi-objective optimization algorithms are adopted from well-established and widely recognized sources.
6
+
7
+ ---
@@ -0,0 +1,9 @@
1
+ # algorithms.classic.nsga2_ae
2
+
3
+ ::: pydmoo.algorithms.classic.nsga2_ae
4
+ options:
5
+ show_root_heading: false
6
+ show_submodules: true
7
+ heading_level: 2
8
+ show_source: true
9
+ show_category_heading: true
@@ -0,0 +1,9 @@
1
+ # algorithms.classic.nsga2_pps
2
+
3
+ ::: pydmoo.algorithms.classic.nsga2_pps
4
+ options:
5
+ show_root_heading: false
6
+ show_submodules: true
7
+ heading_level: 2
8
+ show_source: true
9
+ show_category_heading: true
@@ -1,5 +1,15 @@
1
1
  # Algorithms
2
2
 
3
+ ## Classic Response Mechanisms
4
+
5
+ - [algorithms.classic.nsga2_ae](algorithms/algorithms.classic.nsga2_ae.md)
6
+ - [algorithms.classic.nsga2_pps](algorithms/algorithms.classic.nsga2_pps.md)
7
+
8
+ ## Knowledge-Based Response Mechanisms
9
+
3
10
  - [algorithms.modern.nsga2_imkt](algorithms/algorithms.modern.nsga2_imkt.md)
11
+
12
+ ## Learning-Based Response Mechanisms
13
+
4
14
  - [algorithms.modern.nsga2_imkt_lstm](algorithms/algorithms.modern.nsga2_imkt_lstm.md)
5
15
  - [algorithms.modern.nsga2_imkt_clstm](algorithms/algorithms.modern.nsga2_imkt_clstm.md)
@@ -0,0 +1,34 @@
1
+ # API
2
+
3
+ ## Example
4
+
5
+ ```python
6
+ from pymoo.optimize import minimize
7
+
8
+ from pydmoo.algorithms.classic.nsga2_ae import NSGA2AE as DMOEA
9
+ from pydmoo.problems import DF1, GTS1
10
+ from pydmoo.problems.dyn import TimeSimulation
11
+
12
+
13
+ n_var = 10 # dimension of the decision variable
14
+ t0 = 100 # generations before the first environmental change
15
+ nc = 50 # total number of environmental changes
16
+ nt = 10 # severity of change
17
+ taut = 10 # frequency of change (generations between changes)
18
+ pop_size = 100 # population size
19
+
20
+ problem = GTS1(n_var=n_var, nt=nt, taut=taut, t0=t0)
21
+ algorithm = DMOEA(pop_size=pop_size)
22
+
23
+ seed = 2026
24
+ verbose = True
25
+
26
+ res = minimize(
27
+ problem,
28
+ algorithm,
29
+ termination=("n_gen", taut * nc + t0),
30
+ callback=TimeSimulation(),
31
+ seed=seed,
32
+ verbose=verbose,
33
+ )
34
+ ```
@@ -0,0 +1,9 @@
1
+ # problems.dyn
2
+
3
+ ::: pydmoo.problems.dyn
4
+ options:
5
+ show_root_heading: false
6
+ show_submodules: true
7
+ heading_level: 2
8
+ show_source: true
9
+ show_category_heading: true
@@ -1,3 +1,4 @@
1
1
  # Problems
2
2
 
3
+ - [problems.dyn](problems/problems.dyn.md)
3
4
  - [problems.dynamic.gts](problems/problems.dynamic.gts.md)
@@ -12,7 +12,7 @@ repo_url: https://github.com/dynoptimization/pydmoo
12
12
 
13
13
  # Copyright
14
14
  copyright: >-
15
- Copyright © 2025 - 2026 DynOpt Team.
15
+ Copyright © 2026 DynOpt Team.
16
16
  All rights reserved.
17
17
 
18
18
  # Configuration
@@ -0,0 +1,77 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.base.dmoo.dmoead import DMOEAD
6
+ from pydmoo.core.inverse import closed_form_solution
7
+
8
+
9
+ class MOEADAE(DMOEAD):
10
+ """Autoencoding.
11
+
12
+ References
13
+ ----------
14
+ Feng, L., Zhou, W., Liu, W., Ong, Y.-S., and Tan, K. C. (2022).
15
+ Solving dynamic multiobjective problem via autoencoding evolutionary search.
16
+ IEEE Transactions on Cybernetics, 52(5), 2649–2662.
17
+ https://doi.org/10.1109/TCYB.2020.3017017
18
+ """
19
+
20
+ def __init__(self, **kwargs):
21
+
22
+ super().__init__(**kwargs)
23
+
24
+ def _response_change(self):
25
+ """Response."""
26
+ pop = self.pop
27
+ X = pop.get("X")
28
+
29
+ # recreate the current population without being evaluated
30
+ pop = Population.new(X=X)
31
+
32
+ # predict via denoising autoencoding
33
+ PSs = self.data.get("PSs", [])
34
+ PSs.append(self.opt.get("X")) # Parate Set
35
+ PSs = PSs[-2:]
36
+ self.data["PSs"] = PSs
37
+
38
+ a = 0
39
+ if len(PSs) == 2:
40
+ # Pareto Set
41
+ P, Q = PSs
42
+
43
+ # Q = PM
44
+ min_len = min(len(P), len(Q))
45
+ M = closed_form_solution(Q[:min_len], P[:min_len])
46
+
47
+ # X = QM
48
+ X = np.dot(Q, M)
49
+
50
+ # bounds
51
+ if self.problem.has_bounds():
52
+ xl, xu = self.problem.bounds()
53
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
54
+
55
+ # evalutate new population
56
+ samples = self.evaluator.eval(self.problem, Population.new(X=X))
57
+ a = min(int(self.pop_size / 2), len(samples))
58
+
59
+ # do a survival to recreate rank and crowding of all individuals
60
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=a, random_state=self.random_state)
61
+
62
+ pop[:a] = samples[:a]
63
+
64
+ # randomly select solutions from previous Parate Set
65
+ # This is to, first, preserve the high-quality solutions found along the evolutionary search process
66
+ # second, to maintain the diversity of the population for further exploration of the evolutionary search.
67
+ Q = self.opt.get("X") # no-dominated solutions
68
+ b = min(int(self.pop_size / 2), len(Q))
69
+ idx = self.random_state.choice(np.arange(len(Q)), size=b)
70
+ pop[a:(a + b)] = Population.new(X=Q[idx])
71
+
72
+ # randomly generated solutions will be used to fill the population
73
+ c = self.pop_size - a - b
74
+ if c > 0:
75
+ pop[(a + b):(a + b + c)] = self.initialization.sampling(self.problem, c, random_state=self.random_state)
76
+
77
+ return pop
@@ -0,0 +1,94 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+
4
+ from pydmoo.algorithms.base.dmoo.dmoead import DMOEAD
5
+ from pydmoo.core.ar_model import ARModel
6
+ from pydmoo.core.bounds import matrix_conditional_update
7
+ from pydmoo.core.manifold import manifold_prediction
8
+
9
+
10
+ class MOEADPPS(DMOEAD):
11
+ """Population Prediction Strategy (Center point prediction and manifold prediction).
12
+
13
+ References
14
+ ----------
15
+ Zhou, A., Jin, Y., and Zhang, Q. (2014).
16
+ A population prediction strategy for evolutionary dynamic multiobjective optimization.
17
+ IEEE Transactions on Cybernetics, 44(1), 40–53.
18
+ https://doi.org/10.1109/TCYB.2013.2245892
19
+ """
20
+
21
+ def __init__(self, **kwargs):
22
+
23
+ super().__init__(**kwargs)
24
+
25
+ self.p = 3 # the order of the AR model
26
+ self.M = 23 # the length of history mean point series
27
+
28
+ def _response_change(self):
29
+ """Response."""
30
+ pop = self.pop
31
+ X = pop.get("X")
32
+
33
+ # archive center points
34
+ center_points = self.data.get("center_points", [])
35
+ center_points.append(np.mean(self.opt.get("X"), axis=0))
36
+
37
+ # the maximum length
38
+ center_points = center_points[(-self.M):]
39
+ self.data["center_points"] = center_points
40
+
41
+ # archive populations
42
+ Xs = self.data.get("Xs", [])
43
+ Xs.append(self.pop.get("X")) # pop
44
+ Xs = Xs[-2:]
45
+ self.data["Xs"] = Xs
46
+
47
+ if len(center_points) >= (self.p + 1):
48
+
49
+ C1, distance = manifold_prediction(Xs[0], Xs[1])
50
+ n = C1.shape[1] # Dimensionality of the manifold
51
+ variance = (distance ** 2) / n
52
+
53
+ center, variances = self.center_points_prediction(center_points)
54
+
55
+ X = center + C1 + self.random_state.normal(loc=0, scale=np.sqrt(variances + variance), size=X.shape)
56
+
57
+ # bounds
58
+ if self.problem.has_bounds():
59
+ xl, xu = self.problem.bounds()
60
+ X = matrix_conditional_update(X, xl, xu, self.pop.get("X"))
61
+
62
+ # recreate the current population without being evaluated
63
+ pop = Population.new(X=X)
64
+
65
+ else:
66
+
67
+ # recreate the current population without being evaluated
68
+ pop = Population.new(X=X)
69
+
70
+ # randomly sample half of the population and reuse half from the previous search
71
+ # when the history information is not enough to build an AR(p) model.
72
+
73
+ # randomly sample half of the population
74
+ a = int(self.pop_size / 2)
75
+ pop[:a] = self.initialization.sampling(self.problem, a, random_state=self.random_state)
76
+
77
+ # randomly reuse the other half from t Population
78
+ Q = self.pop.get("X")
79
+ b = self.pop_size - a
80
+ idx = self.random_state.choice(np.arange(len(Q)), size=b)
81
+ pop[a:] = Population.new(X=Q[idx])
82
+
83
+ return pop
84
+
85
+ def center_points_prediction(self, center_points):
86
+ n = len(center_points[0])
87
+ center = np.zeros(n)
88
+ variances = np.zeros(n)
89
+ for i in range(len(center)):
90
+ data = [c[i] for c in center_points]
91
+ model = ARModel(self.p).fit(data)
92
+ predictions = model.predict(data, 1)
93
+ center[i], variances[i] = predictions[0], np.mean(model.resid_ ** 2)
94
+ return center, variances
@@ -0,0 +1,77 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.base.dmoo.dmoeadde import DMOEADDE
6
+ from pydmoo.core.inverse import closed_form_solution
7
+
8
+
9
+ class MOEADDEAE(DMOEADDE):
10
+ """Autoencoding.
11
+
12
+ References
13
+ ----------
14
+ Feng, L., Zhou, W., Liu, W., Ong, Y.-S., and Tan, K. C. (2022).
15
+ Solving dynamic multiobjective problem via autoencoding evolutionary search.
16
+ IEEE Transactions on Cybernetics, 52(5), 2649–2662.
17
+ https://doi.org/10.1109/TCYB.2020.3017017
18
+ """
19
+
20
+ def __init__(self, **kwargs):
21
+
22
+ super().__init__(**kwargs)
23
+
24
+ def _response_change(self):
25
+ """Response."""
26
+ pop = self.pop
27
+ X = pop.get("X")
28
+
29
+ # recreate the current population without being evaluated
30
+ pop = Population.new(X=X)
31
+
32
+ # predict via denoising autoencoding
33
+ PSs = self.data.get("PSs", [])
34
+ PSs.append(self.opt.get("X")) # Parate Set
35
+ PSs = PSs[-2:]
36
+ self.data["PSs"] = PSs
37
+
38
+ a = 0
39
+ if len(PSs) == 2:
40
+ # Pareto Set
41
+ P, Q = PSs
42
+
43
+ # Q = PM
44
+ min_len = min(len(P), len(Q))
45
+ M = closed_form_solution(Q[:min_len], P[:min_len])
46
+
47
+ # X = QM
48
+ X = np.dot(Q, M)
49
+
50
+ # bounds
51
+ if self.problem.has_bounds():
52
+ xl, xu = self.problem.bounds()
53
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
54
+
55
+ # evalutate new population
56
+ samples = self.evaluator.eval(self.problem, Population.new(X=X))
57
+ a = min(int(self.pop_size / 2), len(samples))
58
+
59
+ # do a survival to recreate rank and crowding of all individuals
60
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=a, random_state=self.random_state)
61
+
62
+ pop[:a] = samples[:a]
63
+
64
+ # randomly select solutions from previous Parate Set
65
+ # This is to, first, preserve the high-quality solutions found along the evolutionary search process
66
+ # second, to maintain the diversity of the population for further exploration of the evolutionary search.
67
+ Q = self.opt.get("X") # no-dominated solutions
68
+ b = min(int(self.pop_size / 2), len(Q))
69
+ idx = self.random_state.choice(np.arange(len(Q)), size=b)
70
+ pop[a:(a + b)] = Population.new(X=Q[idx])
71
+
72
+ # randomly generated solutions will be used to fill the population
73
+ c = self.pop_size - a - b
74
+ if c > 0:
75
+ pop[(a + b):(a + b + c)] = self.initialization.sampling(self.problem, c, random_state=self.random_state)
76
+
77
+ return pop
@@ -0,0 +1,94 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+
4
+ from pydmoo.algorithms.base.dmoo.dmoeadde import DMOEADDE
5
+ from pydmoo.core.ar_model import ARModel
6
+ from pydmoo.core.bounds import matrix_conditional_update
7
+ from pydmoo.core.manifold import manifold_prediction
8
+
9
+
10
+ class MOEADDEPPS(DMOEADDE):
11
+ """Population Prediction Strategy (Center point prediction and manifold prediction).
12
+
13
+ References
14
+ ----------
15
+ Zhou, A., Jin, Y., and Zhang, Q. (2014).
16
+ A population prediction strategy for evolutionary dynamic multiobjective optimization.
17
+ IEEE Transactions on Cybernetics, 44(1), 40–53.
18
+ https://doi.org/10.1109/TCYB.2013.2245892
19
+ """
20
+
21
+ def __init__(self, **kwargs):
22
+
23
+ super().__init__(**kwargs)
24
+
25
+ self.p = 3 # the order of the AR model
26
+ self.M = 23 # the length of history mean point series
27
+
28
+ def _response_change(self):
29
+ """Response."""
30
+ pop = self.pop
31
+ X = pop.get("X")
32
+
33
+ # archive center points
34
+ center_points = self.data.get("center_points", [])
35
+ center_points.append(np.mean(self.opt.get("X"), axis=0))
36
+
37
+ # the maximum length
38
+ center_points = center_points[(-self.M):]
39
+ self.data["center_points"] = center_points
40
+
41
+ # archive populations
42
+ Xs = self.data.get("Xs", [])
43
+ Xs.append(self.pop.get("X")) # pop
44
+ Xs = Xs[-2:]
45
+ self.data["Xs"] = Xs
46
+
47
+ if len(center_points) >= (self.p + 1):
48
+
49
+ C1, distance = manifold_prediction(Xs[0], Xs[1])
50
+ n = C1.shape[1] # Dimensionality of the manifold
51
+ variance = (distance ** 2) / n
52
+
53
+ center, variances = self.center_points_prediction(center_points)
54
+
55
+ X = center + C1 + self.random_state.normal(loc=0, scale=np.sqrt(variances + variance), size=X.shape)
56
+
57
+ # bounds
58
+ if self.problem.has_bounds():
59
+ xl, xu = self.problem.bounds()
60
+ X = matrix_conditional_update(X, xl, xu, self.pop.get("X"))
61
+
62
+ # recreate the current population without being evaluated
63
+ pop = Population.new(X=X)
64
+
65
+ else:
66
+
67
+ # recreate the current population without being evaluated
68
+ pop = Population.new(X=X)
69
+
70
+ # randomly sample half of the population and reuse half from the previous search
71
+ # when the history information is not enough to build an AR(p) model.
72
+
73
+ # randomly sample half of the population
74
+ a = int(self.pop_size / 2)
75
+ pop[:a] = self.initialization.sampling(self.problem, a, random_state=self.random_state)
76
+
77
+ # randomly reuse the other half from t Population
78
+ Q = self.pop.get("X")
79
+ b = self.pop_size - a
80
+ idx = self.random_state.choice(np.arange(len(Q)), size=b)
81
+ pop[a:] = Population.new(X=Q[idx])
82
+
83
+ return pop
84
+
85
+ def center_points_prediction(self, center_points):
86
+ n = len(center_points[0])
87
+ center = np.zeros(n)
88
+ variances = np.zeros(n)
89
+ for i in range(len(center)):
90
+ data = [c[i] for c in center_points]
91
+ model = ARModel(self.p).fit(data)
92
+ predictions = model.predict(data, 1)
93
+ center[i], variances[i] = predictions[0], np.mean(model.resid_ ** 2)
94
+ return center, variances
@@ -0,0 +1,76 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+
4
+ from pydmoo.algorithms.base.dmoo.dnsga2 import DNSGA2
5
+ from pydmoo.core.inverse import closed_form_solution
6
+
7
+
8
+ class NSGA2AE(DNSGA2):
9
+ """Autoencoding (AE).
10
+
11
+ References
12
+ ----------
13
+ Feng, L., Zhou, W., Liu, W., Ong, Y.-S., and Tan, K. C. (2022).
14
+ Solving dynamic multiobjective problem via autoencoding evolutionary search.
15
+ IEEE Transactions on Cybernetics, 52(5), 2649–2662.
16
+ https://doi.org/10.1109/TCYB.2020.3017017
17
+ """
18
+
19
+ def __init__(self, **kwargs):
20
+
21
+ super().__init__(**kwargs)
22
+
23
+ def _response_change(self):
24
+ """Response."""
25
+ pop = self.pop
26
+ X = pop.get("X")
27
+
28
+ # recreate the current population without being evaluated
29
+ pop = Population.new(X=X)
30
+
31
+ # predict via denoising autoencoding
32
+ PSs = self.data.get("PSs", [])
33
+ PSs.append(self.opt.get("X")) # Parate Set
34
+ PSs = PSs[-2:]
35
+ self.data["PSs"] = PSs
36
+
37
+ a = 0
38
+ if len(PSs) == 2:
39
+ # Pareto Set
40
+ P, Q = PSs
41
+
42
+ # Q = PM
43
+ min_len = min(len(P), len(Q))
44
+ M = closed_form_solution(Q[:min_len], P[:min_len])
45
+
46
+ # X = QM
47
+ X = np.dot(Q, M)
48
+
49
+ # bounds
50
+ if self.problem.has_bounds():
51
+ xl, xu = self.problem.bounds()
52
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
53
+
54
+ # evalutate new population
55
+ samples = self.evaluator.eval(self.problem, Population.new(X=X))
56
+ a = min(int(self.pop_size / 2), len(samples))
57
+
58
+ # do a survival to recreate rank and crowding of all individuals
59
+ samples = self.survival.do(self.problem, samples, n_survive=a, random_state=self.random_state)
60
+
61
+ pop[:a] = samples[:a]
62
+
63
+ # randomly select solutions from previous Parate Set
64
+ # This is to, first, preserve the high-quality solutions found along the evolutionary search process
65
+ # second, to maintain the diversity of the population for further exploration of the evolutionary search.
66
+ Q = self.opt.get("X") # no-dominated solutions
67
+ b = min(int(self.pop_size / 2), len(Q))
68
+ idx = self.random_state.choice(np.arange(len(Q)), size=b)
69
+ pop[a:(a + b)] = Population.new(X=Q[idx])
70
+
71
+ # randomly generated solutions will be used to fill the population
72
+ c = self.pop_size - a - b
73
+ if c > 0:
74
+ pop[(a + b):(a + b + c)] = self.initialization.sampling(self.problem, c, random_state=self.random_state)
75
+
76
+ return pop
@@ -0,0 +1,94 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+
4
+ from pydmoo.algorithms.base.dmoo.dnsga2 import DNSGA2
5
+ from pydmoo.core.ar_model import ARModel
6
+ from pydmoo.core.bounds import matrix_conditional_update
7
+ from pydmoo.core.manifold import manifold_prediction
8
+
9
+
10
+ class NSGA2PPS(DNSGA2):
11
+ """Population Prediction Strategy (Center point prediction and manifold prediction).
12
+
13
+ References
14
+ ----------
15
+ Zhou, A., Jin, Y., and Zhang, Q. (2014).
16
+ A population prediction strategy for evolutionary dynamic multiobjective optimization.
17
+ IEEE Transactions on Cybernetics, 44(1), 40–53.
18
+ https://doi.org/10.1109/TCYB.2013.2245892
19
+ """
20
+
21
+ def __init__(self, **kwargs):
22
+
23
+ super().__init__(**kwargs)
24
+
25
+ self.p = 3 # the order of the AR model
26
+ self.M = 23 # the length of history mean point series
27
+
28
+ def _response_change(self):
29
+ """Response."""
30
+ pop = self.pop
31
+ X = pop.get("X")
32
+
33
+ # archive center points
34
+ center_points = self.data.get("center_points", [])
35
+ center_points.append(np.mean(self.opt.get("X"), axis=0))
36
+
37
+ # the maximum length
38
+ center_points = center_points[(-self.M):]
39
+ self.data["center_points"] = center_points
40
+
41
+ # archive populations
42
+ Xs = self.data.get("Xs", [])
43
+ Xs.append(self.pop.get("X")) # pop
44
+ Xs = Xs[-2:]
45
+ self.data["Xs"] = Xs
46
+
47
+ if len(center_points) >= (self.p + 1):
48
+
49
+ C1, distance = manifold_prediction(Xs[0], Xs[1])
50
+ n = C1.shape[1] # Dimensionality of the manifold
51
+ variance = (distance ** 2) / n
52
+
53
+ center, variances = self.center_points_prediction(center_points)
54
+
55
+ X = center + C1 + self.random_state.normal(loc=0, scale=np.sqrt(variances + variance), size=X.shape)
56
+
57
+ # bounds
58
+ if self.problem.has_bounds():
59
+ xl, xu = self.problem.bounds()
60
+ X = matrix_conditional_update(X, xl, xu, self.pop.get("X"))
61
+
62
+ # recreate the current population without being evaluated
63
+ pop = Population.new(X=X)
64
+
65
+ else:
66
+
67
+ # recreate the current population without being evaluated
68
+ pop = Population.new(X=X)
69
+
70
+ # randomly sample half of the population and reuse half from the previous search
71
+ # when the history information is not enough to build an AR(p) model.
72
+
73
+ # randomly sample half of the population
74
+ a = int(self.pop_size / 2)
75
+ pop[:a] = self.initialization.sampling(self.problem, a, random_state=self.random_state)
76
+
77
+ # randomly reuse the other half from t Population
78
+ Q = self.pop.get("X")
79
+ b = self.pop_size - a
80
+ idx = self.random_state.choice(np.arange(len(Q)), size=b)
81
+ pop[a:] = Population.new(X=Q[idx])
82
+
83
+ return pop
84
+
85
+ def center_points_prediction(self, center_points):
86
+ n = len(center_points[0])
87
+ center = np.zeros(n)
88
+ variances = np.zeros(n)
89
+ for i in range(len(center)):
90
+ data = [c[i] for c in center_points]
91
+ model = ARModel(self.p).fit(data)
92
+ predictions = model.predict(data, 1)
93
+ center[i], variances[i] = predictions[0], np.mean(model.resid_ ** 2)
94
+ return center, variances