pydmoo 0.0.18__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. pydmoo/algorithms/base/__init__.py +20 -0
  2. pydmoo/algorithms/base/core/__init__.py +0 -0
  3. pydmoo/algorithms/base/core/algorithm.py +416 -0
  4. pydmoo/algorithms/base/core/genetic.py +129 -0
  5. pydmoo/algorithms/base/dmoo/__init__.py +0 -0
  6. pydmoo/algorithms/base/dmoo/dmoead.py +131 -0
  7. pydmoo/algorithms/base/dmoo/dmoeadde.py +131 -0
  8. pydmoo/algorithms/base/dmoo/dmopso.py +0 -0
  9. pydmoo/algorithms/base/dmoo/dnsga2.py +137 -0
  10. pydmoo/algorithms/base/moo/__init__.py +0 -0
  11. pydmoo/algorithms/base/moo/moead.py +199 -0
  12. pydmoo/algorithms/base/moo/moeadde.py +105 -0
  13. pydmoo/algorithms/base/moo/mopso.py +0 -0
  14. pydmoo/algorithms/base/moo/nsga2.py +122 -0
  15. pydmoo/algorithms/modern/__init__.py +94 -0
  16. pydmoo/algorithms/modern/moead_imkt.py +161 -0
  17. pydmoo/algorithms/modern/moead_imkt_igp.py +56 -0
  18. pydmoo/algorithms/modern/moead_imkt_lstm.py +109 -0
  19. pydmoo/algorithms/modern/moead_imkt_n.py +117 -0
  20. pydmoo/algorithms/modern/moead_imkt_n_igp.py +56 -0
  21. pydmoo/algorithms/modern/moead_imkt_n_lstm.py +111 -0
  22. pydmoo/algorithms/modern/moead_ktmm.py +112 -0
  23. pydmoo/algorithms/modern/moeadde_imkt.py +161 -0
  24. pydmoo/algorithms/modern/moeadde_imkt_clstm.py +223 -0
  25. pydmoo/algorithms/modern/moeadde_imkt_igp.py +56 -0
  26. pydmoo/algorithms/modern/moeadde_imkt_lstm.py +212 -0
  27. pydmoo/algorithms/modern/moeadde_imkt_n.py +117 -0
  28. pydmoo/algorithms/modern/moeadde_imkt_n_clstm.py +146 -0
  29. pydmoo/algorithms/modern/moeadde_imkt_n_igp.py +56 -0
  30. pydmoo/algorithms/modern/moeadde_imkt_n_lstm.py +114 -0
  31. pydmoo/algorithms/modern/moeadde_ktmm.py +112 -0
  32. pydmoo/algorithms/modern/nsga2_imkt.py +162 -0
  33. pydmoo/algorithms/modern/nsga2_imkt_clstm.py +223 -0
  34. pydmoo/algorithms/modern/nsga2_imkt_igp.py +56 -0
  35. pydmoo/algorithms/modern/nsga2_imkt_lstm.py +248 -0
  36. pydmoo/algorithms/modern/nsga2_imkt_n.py +117 -0
  37. pydmoo/algorithms/modern/nsga2_imkt_n_clstm.py +146 -0
  38. pydmoo/algorithms/modern/nsga2_imkt_n_igp.py +57 -0
  39. pydmoo/algorithms/modern/nsga2_imkt_n_lstm.py +154 -0
  40. pydmoo/algorithms/modern/nsga2_ktmm.py +112 -0
  41. pydmoo/algorithms/utils/__init__.py +0 -0
  42. pydmoo/algorithms/utils/utils.py +166 -0
  43. pydmoo/core/__init__.py +0 -0
  44. pydmoo/{response → core}/ar_model.py +4 -4
  45. pydmoo/{response → core}/bounds.py +35 -2
  46. pydmoo/core/distance.py +45 -0
  47. pydmoo/core/inverse.py +55 -0
  48. pydmoo/core/lstm/__init__.py +0 -0
  49. pydmoo/core/lstm/base.py +291 -0
  50. pydmoo/core/lstm/lstm.py +491 -0
  51. pydmoo/core/manifold.py +93 -0
  52. pydmoo/core/predictions.py +50 -0
  53. pydmoo/core/sample_gaussian.py +56 -0
  54. pydmoo/core/sample_uniform.py +63 -0
  55. pydmoo/{response/tca_model.py → core/transfer.py} +3 -3
  56. pydmoo/problems/__init__.py +53 -49
  57. pydmoo/problems/dyn.py +94 -13
  58. pydmoo/problems/dynamic/cec2015.py +10 -5
  59. pydmoo/problems/dynamic/df.py +6 -3
  60. pydmoo/problems/dynamic/gts.py +69 -34
  61. pydmoo/problems/real_world/__init__.py +0 -0
  62. pydmoo/problems/real_world/dsrp.py +168 -0
  63. pydmoo/problems/real_world/dwbdp.py +189 -0
  64. {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/METADATA +11 -10
  65. pydmoo-0.1.0.dist-info/RECORD +70 -0
  66. {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/WHEEL +1 -1
  67. pydmoo-0.0.18.dist-info/RECORD +0 -15
  68. /pydmoo/{response → algorithms}/__init__.py +0 -0
  69. {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,20 @@
1
+ __all__ = [
2
+ "DMOEADA",
3
+ "DMOEADB",
4
+ "DMOEADDEA",
5
+ "DMOEADDEB",
6
+ "DNSGA2A",
7
+ "DNSGA2B",
8
+ "MOEAD",
9
+ "MOEADDE",
10
+ "NSGA2",
11
+ "Algorithm"
12
+ ]
13
+
14
+ from .core.algorithm import Algorithm
15
+ from .dmoo.dmoead import DMOEADA, DMOEADB
16
+ from .dmoo.dmoeadde import DMOEADDEA, DMOEADDEB
17
+ from .dmoo.dnsga2 import DNSGA2A, DNSGA2B
18
+ from .moo.moead import MOEAD
19
+ from .moo.moeadde import MOEADDE
20
+ from .moo.nsga2 import NSGA2
File without changes
@@ -0,0 +1,416 @@
1
+ """
2
+ Includes modified code from [pymoo](https://github.com/anyoptimization/pymoo).
3
+
4
+ Sources:
5
+ - [algorithm.py](https://github.com/anyoptimization/pymoo/blob/main/pymoo/core/algorithm.py).
6
+
7
+ Licensed under the Apache License, Version 2.0. Original copyright and license terms are preserved.
8
+ """
9
+
10
+ import copy
11
+ import time
12
+
13
+ import numpy as np
14
+ from pymoo.core.callback import Callback
15
+ from pymoo.core.evaluator import Evaluator
16
+ from pymoo.core.meta import Meta
17
+ from pymoo.core.population import Population
18
+ from pymoo.core.result import Result
19
+ from pymoo.functions import FunctionLoader
20
+ from pymoo.termination.default import DefaultMultiObjectiveTermination, DefaultSingleObjectiveTermination
21
+ from pymoo.util.display.display import Display
22
+ from pymoo.util.misc import termination_from_tuple
23
+ from pymoo.util.optimum import filter_optimum
24
+
25
+
26
+ class Algorithm:
27
+
28
+ def __init__(self,
29
+ termination=None,
30
+ output=None,
31
+ display=None,
32
+ callback=None,
33
+ archive=None,
34
+ return_least_infeasible=False,
35
+ save_history=False,
36
+ verbose=False,
37
+ seed=None,
38
+ evaluator=None,
39
+ **kwargs):
40
+
41
+ super().__init__()
42
+
43
+ # prints the compile warning if enabled
44
+ FunctionLoader.get_instance()
45
+
46
+ # the problem to be solved (will be set later on)
47
+ self.problem = None
48
+
49
+ # the termination criterion to be used by the algorithm - might be specific for an algorithm
50
+ self.termination = termination
51
+
52
+ # the text that should be printed during the algorithm run
53
+ self.output = output
54
+
55
+ # an archive kept during algorithm execution (not always the same as optimum)
56
+ self.archive = archive
57
+
58
+ # the form of display shown during algorithm execution
59
+ self.display = display
60
+
61
+ # callback to be executed each generation
62
+ if callback is None:
63
+ callback = Callback()
64
+ self.callback = callback
65
+
66
+ # whether the algorithm should finally return the least infeasible solution if no feasible found
67
+ self.return_least_infeasible = return_least_infeasible
68
+
69
+ # whether the history should be saved or not
70
+ self.save_history = save_history
71
+
72
+ # whether the algorithm should print output in this run or not
73
+ self.verbose = verbose
74
+
75
+ # the random seed that was used
76
+ self.seed = seed
77
+ self.random_state = None
78
+
79
+ # the function evaluator object (can be used to inject code)
80
+ if evaluator is None:
81
+ evaluator = Evaluator()
82
+ self.evaluator = evaluator
83
+
84
+ # the history object which contains the list
85
+ self.history = list()
86
+
87
+ # the current solutions stored - here considered as population
88
+ self.pop = None
89
+
90
+ # a placeholder object for implementation to store solutions in each iteration
91
+ self.off = None
92
+
93
+ # the optimum found by the algorithm
94
+ self.opt = None
95
+
96
+ # the current number of generation or iteration
97
+ self.n_iter = None
98
+
99
+ # can be used to store additional data in submodules
100
+ self.data = {}
101
+
102
+ # if the initialized method has been called before or not
103
+ self.is_initialized = False
104
+
105
+ # the time when the algorithm has been setup for the first time
106
+ self.start_time = None
107
+
108
+ def setup(self, problem, verbose=False, progress=False, **kwargs):
109
+
110
+ # the problem to be solved by the algorithm
111
+ self.problem = problem
112
+
113
+ # clone the output object if it exists to avoid state pollution between runs
114
+ if self.output is not None:
115
+ self.output = copy.deepcopy(self.output)
116
+
117
+ # set all the provided options to this method
118
+ for key, value in kwargs.items():
119
+ self.__dict__[key] = value
120
+
121
+ # set random state
122
+ self.random_state = np.random.default_rng(self.seed)
123
+
124
+ # make sure that some type of termination criterion is set
125
+ if self.termination is None:
126
+ self.termination = default_termination(problem)
127
+ else:
128
+ self.termination = termination_from_tuple(self.termination)
129
+
130
+ # set up the display during the algorithm execution
131
+ if self.display is None:
132
+ self.display = Display(self.output, verbose=verbose, progress=progress)
133
+
134
+ # finally call the function that can be overwritten by the actual algorithm
135
+ self._setup(problem, **kwargs)
136
+
137
+ return self
138
+
139
+ def run(self):
140
+ while self.has_next():
141
+ self.next()
142
+ return self.result()
143
+
144
+ def has_next(self):
145
+ return not self.termination.has_terminated()
146
+
147
+ def finalize(self):
148
+
149
+ # finalize the display output in the end of the run
150
+ self.display.finalize()
151
+
152
+ return self._finalize()
153
+
154
+ def next(self):
155
+
156
+ # get the infill solutions
157
+ infills = self.infill()
158
+
159
+ # call the advance with them after evaluation
160
+ if infills is not None:
161
+ self.evaluator.eval(self.problem, infills, algorithm=self)
162
+ self.advance(infills=infills)
163
+
164
+ # if the algorithm does not follow the infill-advance scheme just call advance
165
+ else:
166
+ self.advance()
167
+
168
+ def _initialize(self):
169
+
170
+ # the time starts whenever this method is called
171
+ self.start_time = time.time()
172
+
173
+ # set the attribute for the optimization method to start
174
+ self.n_iter = 1
175
+ self.pop = Population.empty()
176
+ self.opt = None
177
+
178
+ def infill(self):
179
+ if self.problem is None:
180
+ raise Exception("Please call `setup(problem)` before calling next().")
181
+
182
+ # the first time next is called simply initial the algorithm - makes the interface cleaner
183
+ if not self.is_initialized:
184
+
185
+ # hook mostly used by the class to happen before even to initialize
186
+ self._initialize()
187
+
188
+ # execute the initialization infill of the algorithm
189
+ infills = self._initialize_infill()
190
+
191
+ else:
192
+ # request the infill solutions if the algorithm has implemented it
193
+ infills = self._infill()
194
+
195
+ # set the current generation to the offsprings
196
+ if infills is not None:
197
+ infills.set("n_gen", self.n_iter)
198
+ infills.set("n_iter", self.n_iter)
199
+
200
+ return infills
201
+
202
+ def advance(self, infills=None, **kwargs):
203
+
204
+ # if infills have been provided set them as offsprings and feed them into advance
205
+ self.off = infills
206
+
207
+ # if the algorithm has not been already initialized
208
+ if not self.is_initialized:
209
+
210
+ # set the generation counter to 1
211
+ self.n_iter = 1
212
+
213
+ # assign the population to the algorithm
214
+ self.pop = infills
215
+
216
+ # do what is necessary after the initialization
217
+ self._initialize_advance(infills=infills, **kwargs)
218
+
219
+ # set this algorithm to be initialized
220
+ self.is_initialized = True
221
+
222
+ # always advance to the next iteration after initialization
223
+ self._post_advance()
224
+
225
+ else:
226
+
227
+ # call the implementation of the advance method - if the infill is not None
228
+ val = self._advance(infills=infills, **kwargs)
229
+
230
+ # always advance to the next iteration - except if the algorithm returns False
231
+ if val is None or val:
232
+ self._post_advance()
233
+
234
+ # if the algorithm has terminated, then do the finalization steps and return the result
235
+ if self.termination.has_terminated():
236
+ self.finalize()
237
+ ret = self.result()
238
+
239
+ # otherwise just increase the iteration counter for the next step and return the current optimum
240
+ else:
241
+ ret = self.opt
242
+
243
+ # add the infill solutions to an archive
244
+ if self.archive is not None and infills is not None:
245
+ self.archive = self.archive.add(infills)
246
+
247
+ return ret
248
+
249
+ def result(self):
250
+ res = Result()
251
+
252
+ # store the time when the algorithm as finished
253
+ res.start_time = self.start_time
254
+ res.end_time = time.time()
255
+ res.exec_time = res.end_time - res.start_time
256
+
257
+ res.pop = self.pop
258
+ res.archive = self.archive
259
+ res.data = self.data
260
+
261
+ # get the optimal solution found
262
+ opt = self.opt
263
+ if opt is None or len(opt) == 0:
264
+ opt = None
265
+
266
+ # if no feasible solution has been found
267
+ elif not np.any(opt.get("FEAS")):
268
+ if self.return_least_infeasible:
269
+ opt = filter_optimum(opt, least_infeasible=True)
270
+ else:
271
+ opt = None
272
+ res.opt = opt
273
+
274
+ # if optimum is set to none to not report anything
275
+ if res.opt is None:
276
+ X, F, CV, G, H = None, None, None, None, None
277
+
278
+ # otherwise get the values from the population
279
+ else:
280
+ X, F, CV, G, H = self.opt.get("X", "F", "CV", "G", "H")
281
+
282
+ # if single-objective problem and only one solution was found - create a 1d array
283
+ if self.problem.n_obj == 1 and len(X) == 1:
284
+ X, F, CV, G, H = X[0], F[0], CV[0], G[0], H[0]
285
+
286
+ # set all the individual values
287
+ res.X, res.F, res.CV, res.G, res.H = X, F, CV, G, H
288
+
289
+ # create the result object
290
+ res.problem = self.problem
291
+ res.history = self.history
292
+
293
+ return res
294
+
295
+ def ask(self):
296
+ return self.infill()
297
+
298
+ def tell(self, *args, **kwargs):
299
+ return self.advance(*args, **kwargs)
300
+
301
+ def _set_optimum(self):
302
+ self.opt = filter_optimum(self.pop, least_infeasible=True)
303
+
304
+ def _post_advance(self):
305
+
306
+ # update the current optimum of the algorithm
307
+ self._set_optimum()
308
+
309
+ # update the current termination condition of the algorithm
310
+ self.termination.update(self)
311
+
312
+ # display the output if defined by the algorithm
313
+ self.display(self)
314
+
315
+ if self.save_history:
316
+ _hist, _callback, _display = self.history, self.callback, self.display
317
+
318
+ self.history, self.callback, self.display = None, None, None
319
+ obj = copy.deepcopy(self)
320
+
321
+ self.history, self.callback, self.display = _hist, _callback, _display
322
+ self.history.append(obj)
323
+
324
+ # if a callback function is provided it is called after each iteration
325
+ self.callback(self)
326
+
327
+ self.n_iter += 1
328
+
329
+ # =========================================================================================================
330
+ # TO BE OVERWRITTEN
331
+ # =========================================================================================================
332
+
333
+ def _setup(self, problem, **kwargs):
334
+ pass
335
+
336
+ def _initialize_infill(self):
337
+ pass
338
+
339
+ def _initialize_advance(self, infills=None, **kwargs):
340
+ pass
341
+
342
+ def _infill(self):
343
+ pass
344
+
345
+ def _advance(self, infills=None, **kwargs):
346
+ pass
347
+
348
+ def _finalize(self):
349
+ pass
350
+
351
+ # =========================================================================================================
352
+ # CONVENIENCE
353
+ # =========================================================================================================
354
+
355
+ @property
356
+ def n_gen(self):
357
+ return self.n_iter
358
+
359
+ @n_gen.setter
360
+ def n_gen(self, value):
361
+ self.n_iter = value
362
+
363
+
364
+ class LoopwiseAlgorithm(Algorithm):
365
+
366
+ def __init__(self, **kwargs):
367
+ super().__init__(**kwargs)
368
+ self.generator = None
369
+ self.state = None
370
+
371
+ def _next(self):
372
+ pass
373
+
374
+ def _infill(self):
375
+ if self.state is None:
376
+ self._advance()
377
+ return self.state
378
+
379
+ def _advance(self, infills=None, **kwargs):
380
+ if self.generator is None:
381
+ self.generator = self._next()
382
+ try:
383
+ self.state = self.generator.send(infills)
384
+ except StopIteration:
385
+ self.generator = None
386
+ self.state = None
387
+ return True
388
+
389
+ return False
390
+
391
+
392
+ def default_termination(problem):
393
+ if problem.n_obj > 1:
394
+ termination = DefaultMultiObjectiveTermination()
395
+ else:
396
+ termination = DefaultSingleObjectiveTermination()
397
+ return termination
398
+
399
+
400
+ class MetaAlgorithm(Meta):
401
+ """
402
+ An algorithm wrapper that combines Algorithm's functionality with Meta's delegation behavior.
403
+ Uses Meta to provide transparent proxying with the ability to override specific methods.
404
+ """
405
+
406
+ def __init__(self, algorithm, copy=True, **kwargs):
407
+ # If the algorithm is already a Meta object, don't copy to avoid deepcopy issues with nested proxies
408
+ if isinstance(algorithm, Meta):
409
+ copy = False
410
+
411
+ # Initialize Meta
412
+ super().__init__(algorithm, copy=copy)
413
+
414
+ # Pass any additional kwargs to the wrapped algorithm if needed
415
+ for key, value in kwargs.items():
416
+ setattr(self, key, value)
@@ -0,0 +1,129 @@
1
+ """
2
+ Includes modified code from [pymoo](https://github.com/anyoptimization/pymoo).
3
+
4
+ Sources:
5
+ - [genetic.py](https://github.com/anyoptimization/pymoo/blob/main/pymoo/algorithms/base/genetic.py).
6
+
7
+ Licensed under the Apache License, Version 2.0. Original copyright and license terms are preserved.
8
+
9
+ Add a method `_infill_static_dynamic` for dynamic multi-objective optimization.
10
+ """
11
+ from pymoo.core.duplicate import DefaultDuplicateElimination, NoDuplicateElimination
12
+ from pymoo.core.initialization import Initialization
13
+ from pymoo.core.mating import Mating
14
+ from pymoo.core.population import Population
15
+ from pymoo.core.repair import NoRepair
16
+
17
+ from pydmoo.algorithms.base.core.algorithm import Algorithm
18
+
19
+
20
+ class GeneticAlgorithm(Algorithm):
21
+
22
+ def __init__(self,
23
+ pop_size=None,
24
+ sampling=None,
25
+ selection=None,
26
+ crossover=None,
27
+ mutation=None,
28
+ survival=None,
29
+ n_offsprings=None,
30
+ eliminate_duplicates=DefaultDuplicateElimination(),
31
+ repair=None,
32
+ mating=None,
33
+ advance_after_initial_infill=False,
34
+ **kwargs
35
+ ):
36
+
37
+ super().__init__(**kwargs)
38
+
39
+ # the population size used
40
+ self.pop_size = pop_size
41
+
42
+ # whether the algorithm should be advanced after initialization of not
43
+ self.advance_after_initial_infill = advance_after_initial_infill
44
+
45
+ # the survival for the genetic algorithm
46
+ self.survival = survival
47
+
48
+ # number of offsprings to generate through recombination
49
+ self.n_offsprings = n_offsprings
50
+
51
+ # if the number of offspring is not set - equal to population size
52
+ if self.n_offsprings is None:
53
+ self.n_offsprings = pop_size
54
+
55
+ # set the duplicate detection class - a boolean value chooses the default duplicate detection
56
+ if isinstance(eliminate_duplicates, bool):
57
+ if eliminate_duplicates:
58
+ self.eliminate_duplicates = DefaultDuplicateElimination()
59
+ else:
60
+ self.eliminate_duplicates = NoDuplicateElimination()
61
+ else:
62
+ self.eliminate_duplicates = eliminate_duplicates
63
+
64
+ # simply set the no repair object if it is None
65
+ self.repair = repair if repair is not None else NoRepair()
66
+
67
+ self.initialization = Initialization(sampling,
68
+ repair=self.repair,
69
+ eliminate_duplicates=self.eliminate_duplicates)
70
+
71
+ if mating is None:
72
+ mating = Mating(selection,
73
+ crossover,
74
+ mutation,
75
+ repair=self.repair,
76
+ eliminate_duplicates=self.eliminate_duplicates,
77
+ n_max_iterations=100)
78
+ self.mating = mating
79
+
80
+ # other run specific data updated whenever solve is called - to share them in all algorithms
81
+ self.n_gen = None
82
+ self.pop = None
83
+ self.off = None
84
+
85
+ def _initialize_infill(self):
86
+ pop = self.initialization.do(self.problem, self.pop_size, algorithm=self, random_state=self.random_state)
87
+ return pop
88
+
89
+ def _initialize_advance(self, infills=None, **kwargs):
90
+ if self.advance_after_initial_infill:
91
+ self.pop = self.survival.do(self.problem, infills, n_survive=len(infills),
92
+ random_state=self.random_state, algorithm=self, **kwargs)
93
+
94
+ def _infill(self):
95
+ # Added by DynOpt on Dec 21, 2025
96
+ pop = self._infill_static_dynamic()
97
+
98
+ # do the mating using the current population
99
+ off = self.mating.do(self.problem, pop, self.n_offsprings, algorithm=self, random_state=self.random_state)
100
+
101
+ # if the mating could not generate any new offspring (duplicate elimination might make that happen)
102
+ if len(off) == 0:
103
+ self.termination.force_termination = True
104
+ return
105
+
106
+ # if not the desired number of offspring could be created
107
+ elif len(off) < self.n_offsprings:
108
+ if self.verbose:
109
+ print("WARNING: Mating could not produce the required number of (unique) offsprings!")
110
+
111
+ return off
112
+
113
+ # Added by DynOpt on Dec 21, 2025
114
+ def _infill_static_dynamic(self):
115
+ pop = self.pop
116
+
117
+ return pop
118
+
119
+ def _advance(self, infills=None, **kwargs):
120
+
121
+ # the current population
122
+ pop = self.pop
123
+
124
+ # merge the offsprings with the current population
125
+ if infills is not None:
126
+ pop = Population.merge(self.pop, infills)
127
+
128
+ # execute the survival to find the fittest solutions
129
+ self.pop = self.survival.do(self.problem, pop, n_survive=self.pop_size, algorithm=self, random_state=self.random_state, **kwargs)
File without changes
@@ -0,0 +1,131 @@
1
+ import time
2
+
3
+ import numpy as np
4
+ from pymoo.core.population import Population
5
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
6
+
7
+ from pydmoo.algorithms.base.moo.moead import MOEAD
8
+
9
+
10
+ class DMOEAD(MOEAD):
11
+
12
+ def __init__(self,
13
+ perc_detect_change=0.1,
14
+ eps=0.0,
15
+ **kwargs):
16
+
17
+ super().__init__(**kwargs)
18
+ self.perc_detect_change = perc_detect_change
19
+ self.eps = eps
20
+
21
+ def setup(self, problem, **kwargs):
22
+ assert not problem.has_constraints(), f"{self.__class__.__name__} only works for unconstrained problems."
23
+ return super().setup(problem, **kwargs)
24
+
25
+ def _detect_change_sample_part_population(self):
26
+ pop = self.pop
27
+ X, F = pop.get("X", "F")
28
+
29
+ # the number of solutions to sample from the population to detect the change
30
+ n_samples = int(np.ceil(len(pop) * self.perc_detect_change))
31
+
32
+ # choose randomly some individuals of the current population to test if there was a change
33
+ I = self.random_state.choice(np.arange(len(pop)), size=n_samples)
34
+ samples = self.evaluator.eval(self.problem, Population.new(X=X[I]))
35
+
36
+ # calculate the differences between the old and newly evaluated pop
37
+ delta = ((samples.get("F") - F[I]) ** 2).mean()
38
+
39
+ # if there is an average deviation bigger than eps -> we have a change detected
40
+ change_detected = delta > self.eps
41
+ return change_detected
42
+
43
+ def _next_static_dynamic(self):
44
+ # for dynamic environment
45
+ pop = self.pop
46
+
47
+ if self.state is None:
48
+
49
+ change_detected = self._detect_change_sample_part_population()
50
+
51
+ if change_detected:
52
+
53
+ start_time = time.time()
54
+
55
+ pop = self._response_change()
56
+
57
+ # reevaluate because we know there was a change
58
+ self.evaluator.eval(self.problem, pop)
59
+
60
+ if len(pop) > self.pop_size:
61
+ # do a survival to recreate rank and crowding of all individuals
62
+ # Modified by DynOpt on Dec 21, 2025
63
+ # n_survive=len(pop) -> n_survive=self.pop_size
64
+ pop = RankAndCrowding().do(self.problem, pop, n_survive=self.pop_size, random_state=self.random_state)
65
+
66
+ self.pop = pop
67
+
68
+ self.data["response_duration"] = time.time() - start_time
69
+
70
+ return pop
71
+
72
+ def _response_change(self):
73
+ pass
74
+
75
+
76
+ class DMOEADA(DMOEAD):
77
+
78
+ def __init__(self,
79
+ perc_detect_change=0.1,
80
+ eps=0.0,
81
+ perc_diversity=0.3,
82
+ **kwargs):
83
+ super().__init__(perc_detect_change=perc_detect_change,
84
+ eps=eps,
85
+ **kwargs)
86
+
87
+ self.perc_diversity = perc_diversity
88
+
89
+ def _response_change(self):
90
+ pop = self.pop
91
+ X = pop.get("X")
92
+
93
+ # recreate the current population without being evaluated
94
+ pop = Population.new(X=X)
95
+
96
+ # find indices to be replaced (introduce diversity)
97
+ I = np.where(self.random_state.random(len(pop)) < self.perc_diversity)[0]
98
+
99
+ # replace with randomly sampled individuals
100
+ pop[I] = self.initialization.sampling(self.problem, len(I), random_state=self.random_state)
101
+
102
+ return pop
103
+
104
+
105
+ class DMOEADB(DMOEAD):
106
+
107
+ def __init__(self,
108
+ perc_detect_change=0.1,
109
+ eps=0.0,
110
+ perc_diversity=0.3,
111
+ **kwargs):
112
+ super().__init__(perc_detect_change=perc_detect_change,
113
+ eps=eps,
114
+ **kwargs)
115
+
116
+ self.perc_diversity = perc_diversity
117
+
118
+ def _response_change(self):
119
+ pop = self.pop
120
+ X = pop.get("X")
121
+
122
+ # recreate the current population without being evaluated
123
+ pop = Population.new(X=X)
124
+
125
+ # find indices to be replaced (introduce diversity)
126
+ I = np.where(self.random_state.random(len(pop)) < self.perc_diversity)[0]
127
+
128
+ # replace by mutations of existing solutions (this occurs inplace)
129
+ self.mating.mutation(self.problem, pop[I])
130
+
131
+ return pop