pydmoo 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
File without changes
@@ -0,0 +1,77 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.base.dmoo.dmoead import DMOEAD
6
+ from pydmoo.core.inverse import closed_form_solution
7
+
8
+
9
+ class MOEADAE(DMOEAD):
10
+ """Autoencoding.
11
+
12
+ References
13
+ ----------
14
+ Feng, L., Zhou, W., Liu, W., Ong, Y.-S., and Tan, K. C. (2022).
15
+ Solving dynamic multiobjective problem via autoencoding evolutionary search.
16
+ IEEE Transactions on Cybernetics, 52(5), 2649–2662.
17
+ https://doi.org/10.1109/TCYB.2020.3017017
18
+ """
19
+
20
+ def __init__(self, **kwargs):
21
+
22
+ super().__init__(**kwargs)
23
+
24
+ def _response_change(self):
25
+ """Response."""
26
+ pop = self.pop
27
+ X = pop.get("X")
28
+
29
+ # recreate the current population without being evaluated
30
+ pop = Population.new(X=X)
31
+
32
+ # predict via denoising autoencoding
33
+ PSs = self.data.get("PSs", [])
34
+ PSs.append(self.opt.get("X")) # Parate Set
35
+ PSs = PSs[-2:]
36
+ self.data["PSs"] = PSs
37
+
38
+ a = 0
39
+ if len(PSs) == 2:
40
+ # Pareto Set
41
+ P, Q = PSs
42
+
43
+ # Q = PM
44
+ min_len = min(len(P), len(Q))
45
+ M = closed_form_solution(Q[:min_len], P[:min_len])
46
+
47
+ # X = QM
48
+ X = np.dot(Q, M)
49
+
50
+ # bounds
51
+ if self.problem.has_bounds():
52
+ xl, xu = self.problem.bounds()
53
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
54
+
55
+ # evalutate new population
56
+ samples = self.evaluator.eval(self.problem, Population.new(X=X))
57
+ a = min(int(self.pop_size / 2), len(samples))
58
+
59
+ # do a survival to recreate rank and crowding of all individuals
60
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=a, random_state=self.random_state)
61
+
62
+ pop[:a] = samples[:a]
63
+
64
+ # randomly select solutions from previous Parate Set
65
+ # This is to, first, preserve the high-quality solutions found along the evolutionary search process
66
+ # second, to maintain the diversity of the population for further exploration of the evolutionary search.
67
+ Q = self.opt.get("X") # no-dominated solutions
68
+ b = min(int(self.pop_size / 2), len(Q))
69
+ idx = self.random_state.choice(np.arange(len(Q)), size=b)
70
+ pop[a:(a + b)] = Population.new(X=Q[idx])
71
+
72
+ # randomly generated solutions will be used to fill the population
73
+ c = self.pop_size - a - b
74
+ if c > 0:
75
+ pop[(a + b):(a + b + c)] = self.initialization.sampling(self.problem, c, random_state=self.random_state)
76
+
77
+ return pop
@@ -0,0 +1,94 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+
4
+ from pydmoo.algorithms.base.dmoo.dmoead import DMOEAD
5
+ from pydmoo.core.ar_model import ARModel
6
+ from pydmoo.core.bounds import matrix_conditional_update
7
+ from pydmoo.core.manifold import manifold_prediction
8
+
9
+
10
+ class MOEADPPS(DMOEAD):
11
+ """Population Prediction Strategy (Center point prediction and manifold prediction).
12
+
13
+ References
14
+ ----------
15
+ Zhou, A., Jin, Y., and Zhang, Q. (2014).
16
+ A population prediction strategy for evolutionary dynamic multiobjective optimization.
17
+ IEEE Transactions on Cybernetics, 44(1), 40–53.
18
+ https://doi.org/10.1109/TCYB.2013.2245892
19
+ """
20
+
21
+ def __init__(self, **kwargs):
22
+
23
+ super().__init__(**kwargs)
24
+
25
+ self.p = 3 # the order of the AR model
26
+ self.M = 23 # the length of history mean point series
27
+
28
+ def _response_change(self):
29
+ """Response."""
30
+ pop = self.pop
31
+ X = pop.get("X")
32
+
33
+ # archive center points
34
+ center_points = self.data.get("center_points", [])
35
+ center_points.append(np.mean(self.opt.get("X"), axis=0))
36
+
37
+ # the maximum length
38
+ center_points = center_points[(-self.M):]
39
+ self.data["center_points"] = center_points
40
+
41
+ # archive populations
42
+ Xs = self.data.get("Xs", [])
43
+ Xs.append(self.pop.get("X")) # pop
44
+ Xs = Xs[-2:]
45
+ self.data["Xs"] = Xs
46
+
47
+ if len(center_points) >= (self.p + 1):
48
+
49
+ C1, distance = manifold_prediction(Xs[0], Xs[1])
50
+ n = C1.shape[1] # Dimensionality of the manifold
51
+ variance = (distance ** 2) / n
52
+
53
+ center, variances = self.center_points_prediction(center_points)
54
+
55
+ X = center + C1 + self.random_state.normal(loc=0, scale=np.sqrt(variances + variance), size=X.shape)
56
+
57
+ # bounds
58
+ if self.problem.has_bounds():
59
+ xl, xu = self.problem.bounds()
60
+ X = matrix_conditional_update(X, xl, xu, self.pop.get("X"))
61
+
62
+ # recreate the current population without being evaluated
63
+ pop = Population.new(X=X)
64
+
65
+ else:
66
+
67
+ # recreate the current population without being evaluated
68
+ pop = Population.new(X=X)
69
+
70
+ # randomly sample half of the population and reuse half from the previous search
71
+ # when the history information is not enough to build an AR(p) model.
72
+
73
+ # randomly sample half of the population
74
+ a = int(self.pop_size / 2)
75
+ pop[:a] = self.initialization.sampling(self.problem, a, random_state=self.random_state)
76
+
77
+ # randomly reuse the other half from t Population
78
+ Q = self.pop.get("X")
79
+ b = self.pop_size - a
80
+ idx = self.random_state.choice(np.arange(len(Q)), size=b)
81
+ pop[a:] = Population.new(X=Q[idx])
82
+
83
+ return pop
84
+
85
+ def center_points_prediction(self, center_points):
86
+ n = len(center_points[0])
87
+ center = np.zeros(n)
88
+ variances = np.zeros(n)
89
+ for i in range(len(center)):
90
+ data = [c[i] for c in center_points]
91
+ model = ARModel(self.p).fit(data)
92
+ predictions = model.predict(data, 1)
93
+ center[i], variances[i] = predictions[0], np.mean(model.resid_ ** 2)
94
+ return center, variances
@@ -0,0 +1,77 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.base.dmoo.dmoeadde import DMOEADDE
6
+ from pydmoo.core.inverse import closed_form_solution
7
+
8
+
9
+ class MOEADDEAE(DMOEADDE):
10
+ """Autoencoding.
11
+
12
+ References
13
+ ----------
14
+ Feng, L., Zhou, W., Liu, W., Ong, Y.-S., and Tan, K. C. (2022).
15
+ Solving dynamic multiobjective problem via autoencoding evolutionary search.
16
+ IEEE Transactions on Cybernetics, 52(5), 2649–2662.
17
+ https://doi.org/10.1109/TCYB.2020.3017017
18
+ """
19
+
20
+ def __init__(self, **kwargs):
21
+
22
+ super().__init__(**kwargs)
23
+
24
+ def _response_change(self):
25
+ """Response."""
26
+ pop = self.pop
27
+ X = pop.get("X")
28
+
29
+ # recreate the current population without being evaluated
30
+ pop = Population.new(X=X)
31
+
32
+ # predict via denoising autoencoding
33
+ PSs = self.data.get("PSs", [])
34
+ PSs.append(self.opt.get("X")) # Parate Set
35
+ PSs = PSs[-2:]
36
+ self.data["PSs"] = PSs
37
+
38
+ a = 0
39
+ if len(PSs) == 2:
40
+ # Pareto Set
41
+ P, Q = PSs
42
+
43
+ # Q = PM
44
+ min_len = min(len(P), len(Q))
45
+ M = closed_form_solution(Q[:min_len], P[:min_len])
46
+
47
+ # X = QM
48
+ X = np.dot(Q, M)
49
+
50
+ # bounds
51
+ if self.problem.has_bounds():
52
+ xl, xu = self.problem.bounds()
53
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
54
+
55
+ # evalutate new population
56
+ samples = self.evaluator.eval(self.problem, Population.new(X=X))
57
+ a = min(int(self.pop_size / 2), len(samples))
58
+
59
+ # do a survival to recreate rank and crowding of all individuals
60
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=a, random_state=self.random_state)
61
+
62
+ pop[:a] = samples[:a]
63
+
64
+ # randomly select solutions from previous Parate Set
65
+ # This is to, first, preserve the high-quality solutions found along the evolutionary search process
66
+ # second, to maintain the diversity of the population for further exploration of the evolutionary search.
67
+ Q = self.opt.get("X") # no-dominated solutions
68
+ b = min(int(self.pop_size / 2), len(Q))
69
+ idx = self.random_state.choice(np.arange(len(Q)), size=b)
70
+ pop[a:(a + b)] = Population.new(X=Q[idx])
71
+
72
+ # randomly generated solutions will be used to fill the population
73
+ c = self.pop_size - a - b
74
+ if c > 0:
75
+ pop[(a + b):(a + b + c)] = self.initialization.sampling(self.problem, c, random_state=self.random_state)
76
+
77
+ return pop
@@ -0,0 +1,94 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+
4
+ from pydmoo.algorithms.base.dmoo.dmoeadde import DMOEADDE
5
+ from pydmoo.core.ar_model import ARModel
6
+ from pydmoo.core.bounds import matrix_conditional_update
7
+ from pydmoo.core.manifold import manifold_prediction
8
+
9
+
10
+ class MOEADDEPPS(DMOEADDE):
11
+ """Population Prediction Strategy (Center point prediction and manifold prediction).
12
+
13
+ References
14
+ ----------
15
+ Zhou, A., Jin, Y., and Zhang, Q. (2014).
16
+ A population prediction strategy for evolutionary dynamic multiobjective optimization.
17
+ IEEE Transactions on Cybernetics, 44(1), 40–53.
18
+ https://doi.org/10.1109/TCYB.2013.2245892
19
+ """
20
+
21
+ def __init__(self, **kwargs):
22
+
23
+ super().__init__(**kwargs)
24
+
25
+ self.p = 3 # the order of the AR model
26
+ self.M = 23 # the length of history mean point series
27
+
28
+ def _response_change(self):
29
+ """Response."""
30
+ pop = self.pop
31
+ X = pop.get("X")
32
+
33
+ # archive center points
34
+ center_points = self.data.get("center_points", [])
35
+ center_points.append(np.mean(self.opt.get("X"), axis=0))
36
+
37
+ # the maximum length
38
+ center_points = center_points[(-self.M):]
39
+ self.data["center_points"] = center_points
40
+
41
+ # archive populations
42
+ Xs = self.data.get("Xs", [])
43
+ Xs.append(self.pop.get("X")) # pop
44
+ Xs = Xs[-2:]
45
+ self.data["Xs"] = Xs
46
+
47
+ if len(center_points) >= (self.p + 1):
48
+
49
+ C1, distance = manifold_prediction(Xs[0], Xs[1])
50
+ n = C1.shape[1] # Dimensionality of the manifold
51
+ variance = (distance ** 2) / n
52
+
53
+ center, variances = self.center_points_prediction(center_points)
54
+
55
+ X = center + C1 + self.random_state.normal(loc=0, scale=np.sqrt(variances + variance), size=X.shape)
56
+
57
+ # bounds
58
+ if self.problem.has_bounds():
59
+ xl, xu = self.problem.bounds()
60
+ X = matrix_conditional_update(X, xl, xu, self.pop.get("X"))
61
+
62
+ # recreate the current population without being evaluated
63
+ pop = Population.new(X=X)
64
+
65
+ else:
66
+
67
+ # recreate the current population without being evaluated
68
+ pop = Population.new(X=X)
69
+
70
+ # randomly sample half of the population and reuse half from the previous search
71
+ # when the history information is not enough to build an AR(p) model.
72
+
73
+ # randomly sample half of the population
74
+ a = int(self.pop_size / 2)
75
+ pop[:a] = self.initialization.sampling(self.problem, a, random_state=self.random_state)
76
+
77
+ # randomly reuse the other half from t Population
78
+ Q = self.pop.get("X")
79
+ b = self.pop_size - a
80
+ idx = self.random_state.choice(np.arange(len(Q)), size=b)
81
+ pop[a:] = Population.new(X=Q[idx])
82
+
83
+ return pop
84
+
85
+ def center_points_prediction(self, center_points):
86
+ n = len(center_points[0])
87
+ center = np.zeros(n)
88
+ variances = np.zeros(n)
89
+ for i in range(len(center)):
90
+ data = [c[i] for c in center_points]
91
+ model = ARModel(self.p).fit(data)
92
+ predictions = model.predict(data, 1)
93
+ center[i], variances[i] = predictions[0], np.mean(model.resid_ ** 2)
94
+ return center, variances
@@ -0,0 +1,76 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+
4
+ from pydmoo.algorithms.base.dmoo.dnsga2 import DNSGA2
5
+ from pydmoo.core.inverse import closed_form_solution
6
+
7
+
8
+ class NSGA2AE(DNSGA2):
9
+ """Autoencoding (AE).
10
+
11
+ References
12
+ ----------
13
+ Feng, L., Zhou, W., Liu, W., Ong, Y.-S., and Tan, K. C. (2022).
14
+ Solving dynamic multiobjective problem via autoencoding evolutionary search.
15
+ IEEE Transactions on Cybernetics, 52(5), 2649–2662.
16
+ https://doi.org/10.1109/TCYB.2020.3017017
17
+ """
18
+
19
+ def __init__(self, **kwargs):
20
+
21
+ super().__init__(**kwargs)
22
+
23
+ def _response_change(self):
24
+ """Response."""
25
+ pop = self.pop
26
+ X = pop.get("X")
27
+
28
+ # recreate the current population without being evaluated
29
+ pop = Population.new(X=X)
30
+
31
+ # predict via denoising autoencoding
32
+ PSs = self.data.get("PSs", [])
33
+ PSs.append(self.opt.get("X")) # Parate Set
34
+ PSs = PSs[-2:]
35
+ self.data["PSs"] = PSs
36
+
37
+ a = 0
38
+ if len(PSs) == 2:
39
+ # Pareto Set
40
+ P, Q = PSs
41
+
42
+ # Q = PM
43
+ min_len = min(len(P), len(Q))
44
+ M = closed_form_solution(Q[:min_len], P[:min_len])
45
+
46
+ # X = QM
47
+ X = np.dot(Q, M)
48
+
49
+ # bounds
50
+ if self.problem.has_bounds():
51
+ xl, xu = self.problem.bounds()
52
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
53
+
54
+ # evalutate new population
55
+ samples = self.evaluator.eval(self.problem, Population.new(X=X))
56
+ a = min(int(self.pop_size / 2), len(samples))
57
+
58
+ # do a survival to recreate rank and crowding of all individuals
59
+ samples = self.survival.do(self.problem, samples, n_survive=a, random_state=self.random_state)
60
+
61
+ pop[:a] = samples[:a]
62
+
63
+ # randomly select solutions from previous Parate Set
64
+ # This is to, first, preserve the high-quality solutions found along the evolutionary search process
65
+ # second, to maintain the diversity of the population for further exploration of the evolutionary search.
66
+ Q = self.opt.get("X") # no-dominated solutions
67
+ b = min(int(self.pop_size / 2), len(Q))
68
+ idx = self.random_state.choice(np.arange(len(Q)), size=b)
69
+ pop[a:(a + b)] = Population.new(X=Q[idx])
70
+
71
+ # randomly generated solutions will be used to fill the population
72
+ c = self.pop_size - a - b
73
+ if c > 0:
74
+ pop[(a + b):(a + b + c)] = self.initialization.sampling(self.problem, c, random_state=self.random_state)
75
+
76
+ return pop
@@ -0,0 +1,94 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+
4
+ from pydmoo.algorithms.base.dmoo.dnsga2 import DNSGA2
5
+ from pydmoo.core.ar_model import ARModel
6
+ from pydmoo.core.bounds import matrix_conditional_update
7
+ from pydmoo.core.manifold import manifold_prediction
8
+
9
+
10
+ class NSGA2PPS(DNSGA2):
11
+ """Population Prediction Strategy (Center point prediction and manifold prediction).
12
+
13
+ References
14
+ ----------
15
+ Zhou, A., Jin, Y., and Zhang, Q. (2014).
16
+ A population prediction strategy for evolutionary dynamic multiobjective optimization.
17
+ IEEE Transactions on Cybernetics, 44(1), 40–53.
18
+ https://doi.org/10.1109/TCYB.2013.2245892
19
+ """
20
+
21
+ def __init__(self, **kwargs):
22
+
23
+ super().__init__(**kwargs)
24
+
25
+ self.p = 3 # the order of the AR model
26
+ self.M = 23 # the length of history mean point series
27
+
28
+ def _response_change(self):
29
+ """Response."""
30
+ pop = self.pop
31
+ X = pop.get("X")
32
+
33
+ # archive center points
34
+ center_points = self.data.get("center_points", [])
35
+ center_points.append(np.mean(self.opt.get("X"), axis=0))
36
+
37
+ # the maximum length
38
+ center_points = center_points[(-self.M):]
39
+ self.data["center_points"] = center_points
40
+
41
+ # archive populations
42
+ Xs = self.data.get("Xs", [])
43
+ Xs.append(self.pop.get("X")) # pop
44
+ Xs = Xs[-2:]
45
+ self.data["Xs"] = Xs
46
+
47
+ if len(center_points) >= (self.p + 1):
48
+
49
+ C1, distance = manifold_prediction(Xs[0], Xs[1])
50
+ n = C1.shape[1] # Dimensionality of the manifold
51
+ variance = (distance ** 2) / n
52
+
53
+ center, variances = self.center_points_prediction(center_points)
54
+
55
+ X = center + C1 + self.random_state.normal(loc=0, scale=np.sqrt(variances + variance), size=X.shape)
56
+
57
+ # bounds
58
+ if self.problem.has_bounds():
59
+ xl, xu = self.problem.bounds()
60
+ X = matrix_conditional_update(X, xl, xu, self.pop.get("X"))
61
+
62
+ # recreate the current population without being evaluated
63
+ pop = Population.new(X=X)
64
+
65
+ else:
66
+
67
+ # recreate the current population without being evaluated
68
+ pop = Population.new(X=X)
69
+
70
+ # randomly sample half of the population and reuse half from the previous search
71
+ # when the history information is not enough to build an AR(p) model.
72
+
73
+ # randomly sample half of the population
74
+ a = int(self.pop_size / 2)
75
+ pop[:a] = self.initialization.sampling(self.problem, a, random_state=self.random_state)
76
+
77
+ # randomly reuse the other half from t Population
78
+ Q = self.pop.get("X")
79
+ b = self.pop_size - a
80
+ idx = self.random_state.choice(np.arange(len(Q)), size=b)
81
+ pop[a:] = Population.new(X=Q[idx])
82
+
83
+ return pop
84
+
85
+ def center_points_prediction(self, center_points):
86
+ n = len(center_points[0])
87
+ center = np.zeros(n)
88
+ variances = np.zeros(n)
89
+ for i in range(len(center)):
90
+ data = [c[i] for c in center_points]
91
+ model = ARModel(self.p).fit(data)
92
+ predictions = model.predict(data, 1)
93
+ center[i], variances[i] = predictions[0], np.mean(model.resid_ ** 2)
94
+ return center, variances
pydmoo/problems/dyn.py CHANGED
@@ -1,10 +1,11 @@
1
1
  """
2
2
  Includes modified code from [pymoo](https://github.com/anyoptimization/pymoo).
3
3
 
4
- Sources:
5
- - [dyn.py](https://github.com/anyoptimization/pymoo/blob/main/pymoo/problems/dyn.py)
6
-
7
- Licensed under the Apache License, Version 2.0. Original copyright and license terms are preserved.
4
+ > Sources:
5
+ >
6
+ > - [dyn.py](https://github.com/anyoptimization/pymoo/blob/main/pymoo/problems/dyn.py)
7
+ >
8
+ > Licensed under the Apache License, Version 2.0. Original copyright and license terms are preserved.
8
9
  """
9
10
 
10
11
  from abc import ABC
@@ -16,20 +17,66 @@ from pymoo.core.problem import Problem
16
17
 
17
18
 
18
19
  class DynamicProblem(Problem, ABC):
20
+ """Abstract base class for dynamic optimization problems."""
19
21
  pass
20
22
 
21
23
 
22
24
  class DynamicApplProblem(DynamicProblem):
25
+ """Dynamic optimization problem for real-world applications.
26
+
27
+ This class defines dynamic optimization problems that model practical, real-world scenarios where the problem
28
+ characteristics change systematically over time.
29
+
30
+ Parameters
31
+ ----------
32
+ nt : int
33
+ Severity of change. Controls how significantly the problem changes
34
+ at each change point. Higher values indicate more substantial changes
35
+ in problem characteristics.
36
+ taut : int
37
+ Frequency of change. Specifies how often (in generations) the problem
38
+ undergoes changes. Lower values mean more frequent changes.
39
+ t0 : int, optional
40
+ The first change occurs after t0 generations, by default 50.
41
+ That is, the generation at which a change occurs is (t0+1), (t0+taut+1), etc.
42
+ This allows for an initial stabilization period before the first change.
43
+ tau : int, optional
44
+ Current simulation time counter (in generations), by default 1.
45
+ time : float, optional
46
+ Explicit simulation time value (overrides calculated time), by default None.
47
+ Used for manual time control in specific scenarios.
48
+ **kwargs : dict
49
+ Additional keyword arguments passed to the parent Problem class.
50
+
51
+ Attributes
52
+ ----------
53
+ tau : int
54
+ Current simulation time counter in generations.
55
+ nt : int
56
+ Severity of change at each change point.
57
+ taut : int
58
+ Frequency of change between consecutive changes.
59
+ t0 : int
60
+ Initial stabilization period before first change occurs.
61
+
62
+ Notes
63
+ -----
64
+ This class models real-world dynamic scenarios where:
65
+
66
+ - Changes occur at predictable intervals (every `taut` generations)
67
+ - Change severity is controlled by `nt` parameter
68
+ - Initial period `t0` allows for system stabilization
69
+ """
23
70
 
24
- def __init__(self, nt, taut, t0=50, tau=1, time=None, **kwargs):
71
+ def __init__(self, nt: int, taut: int, t0: int = 50, tau: int = 1, time: float | None = None, **kwargs):
25
72
  super().__init__(**kwargs)
26
- self.tau = tau
27
- self.nt = nt
28
- self.taut = taut
29
- self.t0 = t0
73
+ self.tau = tau # time counter
74
+ self.nt = nt # severity of change
75
+ self.taut = taut # frequency of change
76
+ self.t0 = t0 # Initial time offset
30
77
  self._time = time
31
78
 
32
- def tic(self, elapsed=1):
79
+ def tic(self, elapsed: int = 1) -> None:
33
80
 
34
81
  # increase the time counter by one
35
82
  self.tau += elapsed
@@ -38,7 +85,7 @@ class DynamicApplProblem(DynamicProblem):
38
85
  self.__dict__["cache"] = {}
39
86
 
40
87
  @property
41
- def time(self):
88
+ def time(self) -> float:
42
89
  if self._time is not None:
43
90
  return self._time
44
91
  else:
@@ -54,7 +101,7 @@ class DynamicApplProblem(DynamicProblem):
54
101
  return delta_time * count
55
102
 
56
103
  @time.setter
57
- def time(self, value):
104
+ def time(self, value: float) -> None:
58
105
  self._time = value
59
106
 
60
107
  def update_to_next_time(self):
@@ -77,12 +124,60 @@ class DynamicApplProblem(DynamicProblem):
77
124
 
78
125
 
79
126
  class DynamicTestProblem(DynamicProblem):
127
+ """Dynamic optimization problem for testing and benchmarking.
128
+
129
+ Parameters
130
+ ----------
131
+ nt : int
132
+ Severity of change. Controls how significantly the problem changes
133
+ at each change point. Higher values indicate more substantial changes
134
+ in problem characteristics.
135
+ taut : int
136
+ Frequency of change. Specifies how often (in generations) the problem
137
+ undergoes changes. Lower values mean more frequent changes.
138
+ t0 : int, optional
139
+ The first change occurs after t0 generations, by default 50.
140
+ That is, the generation at which a change occurs is (t0+1), (t0+taut+1), etc.
141
+ This allows for an initial stabilization period before the first change.
142
+ tau : int, optional
143
+ Current simulation time counter (in generations), by default 1.
144
+ time : float, optional
145
+ Explicit simulation time value (overrides calculated time), by default None.
146
+ Used for manual time control in specific scenarios.
147
+ add_time_perturbation : bool, optional
148
+ If True, adds perturbations to the time calculation, by default False.
149
+ **kwargs : dict
150
+ Additional keyword arguments passed to the parent Problem class.
151
+
152
+ Attributes
153
+ ----------
154
+ tau : int
155
+ Current simulation time counter in generations.
156
+ nt : int
157
+ Severity of change at each change point.
158
+ taut : int
159
+ Frequency of change between consecutive changes.
160
+ t0 : int
161
+ Initial stabilization period before first change occurs.
162
+ add_time_perturbation : bool
163
+ Flag indicating whether to add stochastic perturbations.
164
+
165
+ Notes
166
+ -----
167
+ This class is designed for testing scenarios where:
168
+
169
+ - Changes occur at predictable intervals (every `taut` generations)
170
+ - Change severity is controlled by `nt` parameter
171
+ - Initial period `t0` allows for system stabilization
172
+ - Stochastic perturbations can be added for more complex testing
173
+ - Reproducibility is important for benchmarking
174
+ """
80
175
 
81
176
  def __init__(self, nt, taut, t0=50, tau=1, time=None, add_time_perturbation=False, **kwargs):
82
177
  super().__init__(**kwargs)
83
- self.tau = tau
84
- self.nt = nt
85
- self.taut = taut
178
+ self.tau = tau # time counter
179
+ self.nt = nt # severity of change
180
+ self.taut = taut # frequency of change
86
181
  self.t0 = t0 # Initial time offset - added by DynOpt Team
87
182
  self._time = time
88
183