pydmoo 0.0.18__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. pydmoo/algorithms/base/__init__.py +20 -0
  2. pydmoo/algorithms/base/core/__init__.py +0 -0
  3. pydmoo/algorithms/base/core/algorithm.py +416 -0
  4. pydmoo/algorithms/base/core/genetic.py +129 -0
  5. pydmoo/algorithms/base/dmoo/__init__.py +0 -0
  6. pydmoo/algorithms/base/dmoo/dmoead.py +131 -0
  7. pydmoo/algorithms/base/dmoo/dmoeadde.py +131 -0
  8. pydmoo/algorithms/base/dmoo/dmopso.py +0 -0
  9. pydmoo/algorithms/base/dmoo/dnsga2.py +137 -0
  10. pydmoo/algorithms/base/moo/__init__.py +0 -0
  11. pydmoo/algorithms/base/moo/moead.py +199 -0
  12. pydmoo/algorithms/base/moo/moeadde.py +105 -0
  13. pydmoo/algorithms/base/moo/mopso.py +0 -0
  14. pydmoo/algorithms/base/moo/nsga2.py +122 -0
  15. pydmoo/algorithms/modern/__init__.py +94 -0
  16. pydmoo/algorithms/modern/moead_imkt.py +161 -0
  17. pydmoo/algorithms/modern/moead_imkt_igp.py +56 -0
  18. pydmoo/algorithms/modern/moead_imkt_lstm.py +109 -0
  19. pydmoo/algorithms/modern/moead_imkt_n.py +117 -0
  20. pydmoo/algorithms/modern/moead_imkt_n_igp.py +56 -0
  21. pydmoo/algorithms/modern/moead_imkt_n_lstm.py +111 -0
  22. pydmoo/algorithms/modern/moead_ktmm.py +112 -0
  23. pydmoo/algorithms/modern/moeadde_imkt.py +161 -0
  24. pydmoo/algorithms/modern/moeadde_imkt_clstm.py +223 -0
  25. pydmoo/algorithms/modern/moeadde_imkt_igp.py +56 -0
  26. pydmoo/algorithms/modern/moeadde_imkt_lstm.py +212 -0
  27. pydmoo/algorithms/modern/moeadde_imkt_n.py +117 -0
  28. pydmoo/algorithms/modern/moeadde_imkt_n_clstm.py +146 -0
  29. pydmoo/algorithms/modern/moeadde_imkt_n_igp.py +56 -0
  30. pydmoo/algorithms/modern/moeadde_imkt_n_lstm.py +114 -0
  31. pydmoo/algorithms/modern/moeadde_ktmm.py +112 -0
  32. pydmoo/algorithms/modern/nsga2_imkt.py +162 -0
  33. pydmoo/algorithms/modern/nsga2_imkt_clstm.py +223 -0
  34. pydmoo/algorithms/modern/nsga2_imkt_igp.py +56 -0
  35. pydmoo/algorithms/modern/nsga2_imkt_lstm.py +248 -0
  36. pydmoo/algorithms/modern/nsga2_imkt_n.py +117 -0
  37. pydmoo/algorithms/modern/nsga2_imkt_n_clstm.py +146 -0
  38. pydmoo/algorithms/modern/nsga2_imkt_n_igp.py +57 -0
  39. pydmoo/algorithms/modern/nsga2_imkt_n_lstm.py +154 -0
  40. pydmoo/algorithms/modern/nsga2_ktmm.py +112 -0
  41. pydmoo/algorithms/utils/__init__.py +0 -0
  42. pydmoo/algorithms/utils/utils.py +166 -0
  43. pydmoo/core/__init__.py +0 -0
  44. pydmoo/{response → core}/ar_model.py +4 -4
  45. pydmoo/{response → core}/bounds.py +35 -2
  46. pydmoo/core/distance.py +45 -0
  47. pydmoo/core/inverse.py +55 -0
  48. pydmoo/core/lstm/__init__.py +0 -0
  49. pydmoo/core/lstm/base.py +291 -0
  50. pydmoo/core/lstm/lstm.py +491 -0
  51. pydmoo/core/manifold.py +93 -0
  52. pydmoo/core/predictions.py +50 -0
  53. pydmoo/core/sample_gaussian.py +56 -0
  54. pydmoo/core/sample_uniform.py +63 -0
  55. pydmoo/{response/tca_model.py → core/transfer.py} +3 -3
  56. pydmoo/problems/__init__.py +53 -49
  57. pydmoo/problems/dyn.py +94 -13
  58. pydmoo/problems/dynamic/cec2015.py +10 -5
  59. pydmoo/problems/dynamic/df.py +6 -3
  60. pydmoo/problems/dynamic/gts.py +69 -34
  61. pydmoo/problems/real_world/__init__.py +0 -0
  62. pydmoo/problems/real_world/dsrp.py +168 -0
  63. pydmoo/problems/real_world/dwbdp.py +189 -0
  64. {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/METADATA +11 -10
  65. pydmoo-0.1.0.dist-info/RECORD +70 -0
  66. {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/WHEEL +1 -1
  67. pydmoo-0.0.18.dist-info/RECORD +0 -15
  68. /pydmoo/{response → algorithms}/__init__.py +0 -0
  69. {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,154 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+
4
+ from pydmoo.algorithms.modern.nsga2_imkt_n import NSGA2IMKTN
5
+ from pydmoo.algorithms.utils.utils import make_semidefinite, reconstruct_covariance_from_triu
6
+ from pydmoo.core.bounds import clip_and_randomize
7
+ from pydmoo.core.inverse import closed_form_solution
8
+ from pydmoo.core.lstm.lstm import LSTMpredictor
9
+ from pydmoo.core.sample_gaussian import multivariate_gaussian_sample
10
+
11
+
12
+ class NSGA2IMNLSTM(NSGA2IMKTN):
13
+ """Inverse Modeling with LSTM (IMNLSTM).
14
+
15
+ Inverse Modeling for Dynamic Multiobjective Optimization with Knowledge Transfer In objective Space.
16
+ """
17
+
18
+ def __init__(self, **kwargs):
19
+ super().__init__(**kwargs)
20
+ self.size_pool = 10
21
+ self.denominator = 0.5
22
+
23
+ self._n_timesteps = 10
24
+ self._sequence_length = 5 # Use 5 historical time steps to predict next step
25
+ self._incremental_learning = False
26
+
27
+ def _setup(self, problem, **kwargs):
28
+ super()._setup(problem, **kwargs)
29
+
30
+ # Must be here
31
+ self._lstm = LSTMpredictor(
32
+ self._sequence_length,
33
+ hidden_dim=64,
34
+ num_layers=1,
35
+ epochs=50,
36
+ batch_size=32,
37
+ lr=0.001,
38
+ device="cpu", # for fair comparison
39
+ patience=5,
40
+ seed=self.seed,
41
+ model_type="lstm",
42
+ incremental_learning=self._incremental_learning,
43
+ )
44
+
45
+ def _response_change(self):
46
+ pop = self.pop
47
+ X = pop.get("X")
48
+
49
+ # recreate the current population without being evaluated
50
+ pop = Population.new(X=X)
51
+
52
+ # sample self.pop_size individuals in decision space
53
+ samples_old = self.sampling_new_pop()
54
+
55
+ # select self.pop_size/2 individuals with better convergence and diversity
56
+ samples = samples_old[:int(len(samples_old)/2)]
57
+
58
+ # knowledge in objective space
59
+ means_covs, mean, cov = self._in_decision_or_objective_space_nd(samples, "objective_space")
60
+
61
+ # Check if sufficient historical data is available for LSTM prediction
62
+ if len(means_covs) > self._n_timesteps:
63
+ # Update pool
64
+ self.data["means_covs"] = means_covs[self._n_timesteps:]
65
+
66
+ # Prepare time series data from historical means and covariance matrices
67
+ time_series_data = prepare_data_mean_cov(self._n_timesteps, means_covs)
68
+
69
+ # Initialize predictor and generate prediction for next time step
70
+ next_prediction = self._lstm.convert_train_predict(time_series_data)
71
+
72
+ # Convert prediction tensor to numpy array for further processing
73
+ next_prediction = next_prediction.numpy()
74
+
75
+ # Split prediction into mean and covariance components
76
+ # First n_obj elements represent the mean vector, Remaining elements represent the flattened covariance matrix values
77
+ mean_new, cov_new_ = next_prediction[:self.problem.n_obj], next_prediction[self.problem.n_obj:]
78
+ cov_new = reconstruct_covariance_from_triu(cov_new_, len(mean_new))
79
+ cov_new = make_semidefinite(cov_new)
80
+
81
+ else:
82
+ mean_new, cov_new = self._select_means_covs(means_covs, mean, cov)
83
+
84
+ # sample self.pop_size individuals in objective space
85
+ F = multivariate_gaussian_sample(mean_new, cov_new, self.pop_size, random_state=self.random_state)
86
+
87
+ # TODO
88
+ # inverse mapping
89
+ # X = FB
90
+ B = closed_form_solution(samples.get("X"), samples.get("F"))
91
+
92
+ # X = FB
93
+ X = np.dot(F, B)
94
+
95
+ # bounds
96
+ if self.problem.has_bounds():
97
+ xl, xu = self.problem.bounds()
98
+ X = clip_and_randomize(X, xl, xu, random_state=self.random_state)
99
+
100
+ # merge
101
+ pop = Population.merge(samples_old, Population.new(X=X))
102
+
103
+ return pop
104
+
105
+
106
+ def prepare_data_mean_cov(n_timesteps, means_covs):
107
+ """Prepare time series data from means and covariance matrices.
108
+
109
+ This function converts a sequence of mean vectors and covariance matrices
110
+ into a time series format suitable for LSTM training. It extracts the mean
111
+ values and the upper triangular elements of the covariance matrix (including
112
+ diagonal) to create feature vectors for each time step.
113
+
114
+ Parameters
115
+ ----------
116
+ means_covs : list of tuples
117
+ List containing (mean, cov, n_iter) pairs for each time step, where:
118
+ - mean: 1D numpy array of mean values
119
+ - cov: 2D numpy array representing covariance matrix
120
+ - n_iter: number of iterations (not used in feature extraction)
121
+
122
+ Returns
123
+ -------
124
+ time_series_data : list
125
+ Combined feature data with shape (n_timesteps, n_features)
126
+ Each row represents a time step containing:
127
+ [mean_1, mean_2, ..., mean_n, cov_11, cov_12, ..., cov_nn]
128
+ """
129
+ # Create time series data
130
+ time_series_data = [] # shape: (n_timesteps, n_features)
131
+
132
+ # Process only the most recent n_timesteps
133
+ for m, c, _ in means_covs[-n_timesteps:]:
134
+ # Extract upper triangular indices including diagonal
135
+ # np.triu_indices_from(c) returns row and column indices of upper triangle
136
+ rows, cols = np.triu_indices_from(c)
137
+
138
+ # Combine mean vector and upper triangular covariance elements
139
+ # [*m] unpacks all mean values
140
+ # [*c[rows, cols]] unpacks all upper triangular covariance elements
141
+ feature_vector = [*m, *c[rows, cols]]
142
+ time_series_data.append(feature_vector)
143
+
144
+ return time_series_data
145
+
146
+
147
+ class NSGA2IMNiLSTM(NSGA2IMNLSTM):
148
+ def __init__(self, **kwargs) -> None:
149
+ super().__init__(**kwargs)
150
+ self.size_pool = 10
151
+ self.denominator = 0.5
152
+ self._n_timesteps = 10
153
+ self._sequence_length = 5 # Use 5 historical time steps to predict next step
154
+ self._incremental_learning = True
@@ -0,0 +1,112 @@
1
+ import numpy as np
2
+ from pymoo.core.population import Population
3
+ from pymoo.operators.survival.rank_and_crowding import RankAndCrowding
4
+
5
+ from pydmoo.algorithms.base.dmoo.dnsga2 import DNSGA2
6
+ from pydmoo.core.sample_gaussian import univariate_gaussian_sample
7
+
8
+
9
+ class NSGA2KTMM(DNSGA2):
10
+ """Knowledge Transfer with Mixture Model.
11
+
12
+ Zou, J., Hou, Z., Jiang, S., Yang, S., Ruan, G., Xia, Y., and Liu, Y. (2025).
13
+ Knowledge transfer with mixture model in dynamic multi-objective optimization.
14
+ IEEE Transactions on Evolutionary Computation, in press.
15
+ https://doi.org/10.1109/TEVC.2025.3566481
16
+ """
17
+
18
+ def __init__(self, **kwargs):
19
+
20
+ super().__init__(**kwargs)
21
+
22
+ self.size_pool = 14 # the size of knowledge pool
23
+ self.denominator = 0.5
24
+
25
+ def _response_change(self):
26
+ pop = self.pop
27
+ X = pop.get("X")
28
+
29
+ # recreate the current population without being evaluated
30
+ pop = Population.new(X=X)
31
+
32
+ # sample self.pop_size solutions in decision space
33
+ samples_old = self.sampling_new_pop()
34
+
35
+ # select self.pop_size/2 individuals with better convergence and diversity
36
+ samples = samples_old[:int(len(samples_old)/2)]
37
+
38
+ # knowledge in decision space
39
+ means_stds_ps, mean, std = self._in_decision_or_objective_space_1d(samples, "decision_space")
40
+ mean_new, std_new = self._select_means_stds(means_stds_ps, mean, std)
41
+
42
+ # sample self.pop_size solutions in decision space
43
+ X = univariate_gaussian_sample(mean_new, std_new, self.pop_size, random_state=self.random_state)
44
+
45
+ # bounds
46
+ if self.problem.has_bounds():
47
+ xl, xu = self.problem.bounds()
48
+ X = np.clip(X, xl, xu) # not provided in the original reference literature
49
+
50
+ # merge
51
+ pop = Population.merge(samples_old, Population.new(X=X))
52
+
53
+ return pop
54
+
55
+ def _in_decision_or_objective_space_1d(self, samples, decision_or_objective="decision_space"):
56
+ # decision space or objective space
57
+ flag = "X" if decision_or_objective == "decision_space" else "F"
58
+
59
+ means_stds = self.data.get("means_stds", [])
60
+
61
+ flag_value = self.opt.get(flag)
62
+ if len(flag_value) <= 1:
63
+ flag_value = self.pop.get(flag)
64
+ flag_value = flag_value[:2]
65
+
66
+ means_stds.append((np.mean(flag_value, axis=0), np.std(flag_value, axis=0), self.n_iter - 1)) # 1-based
67
+ self.data["means_stds"] = means_stds
68
+
69
+ flag_value = samples.get(flag)
70
+ mean, std = np.mean(flag_value, axis=0), np.std(flag_value, axis=0)
71
+ return means_stds, mean, std
72
+
73
+ def sampling_new_pop(self):
74
+ samples = self.initialization.sampling(self.problem, self.pop_size)
75
+ samples = self.evaluator.eval(self.problem, samples)
76
+
77
+ # do a survival to recreate rank and crowding of all individuals
78
+ samples = RankAndCrowding().do(self.problem, samples, n_survive=len(samples))
79
+ return samples
80
+
81
+ def _select_means_stds(self, means_stds, mean_new, std_new):
82
+ # Unpack means and stds
83
+ means = np.array([m[0] for m in means_stds])
84
+ stds = np.array([m[1] for m in means_stds])
85
+
86
+ # Calculate distances
87
+ mean_diffs = means - mean_new
88
+ std_diffs = stds - std_new
89
+
90
+ distances = np.sqrt(np.sum(mean_diffs**2, axis=1) + np.sum(std_diffs**2, axis=1))
91
+
92
+ # Get top K closest
93
+ top_k_idx = np.argsort(distances)[:self.size_pool]
94
+ top_k_dist = distances[top_k_idx]
95
+ top_k_means = means[top_k_idx]
96
+ top_k_stds = stds[top_k_idx]
97
+
98
+ # Update pool
99
+ self._update_means_stds_pool(means_stds, top_k_idx)
100
+
101
+ # Calculate weights
102
+ weights = 1 / (top_k_dist + 1e-8) # Add small epsilon to avoid division by zero
103
+ weights = weights / (np.sum(weights) + self.denominator)
104
+
105
+ # Weighted combination
106
+ mean_new = (1 - np.sum(weights)) * mean_new + np.sum(weights[:, None] * top_k_means, axis=0)
107
+ std_new = (1 - np.sum(weights)) * std_new + np.sum(weights[:, None] * top_k_stds, axis=0)
108
+ return mean_new, std_new
109
+
110
+ def _update_means_stds_pool(self, means_stds, top_k_idx) -> None:
111
+ self.data["means_stds"] = [means_stds[i] for i in top_k_idx]
112
+ return None
File without changes
@@ -0,0 +1,166 @@
1
+ import numpy as np
2
+
3
+
4
+ def reconstruct_covariance_from_triu(triu_elements, n_features):
5
+ """
6
+ Reconstruct a symmetric covariance matrix from upper triangular elements.
7
+
8
+ This function takes the flattened upper triangular elements (including diagonal)
9
+ of a covariance matrix and reconstructs the full symmetric matrix by leveraging
10
+ the symmetry property of covariance matrices.
11
+
12
+ Parameters
13
+ ----------
14
+ triu_elements : array_like
15
+ Flattened array containing the upper triangular elements of the covariance matrix,
16
+ including the diagonal elements. The elements should be in row-major order.
17
+ Shape: (n_triu_elements,) where n_triu_elements = n_features * (n_features + 1) // 2
18
+ n_features : int
19
+ Dimensionality of the feature space, which determines the size of the
20
+ output covariance matrix (n_features x n_features)
21
+
22
+ Returns
23
+ -------
24
+ cov_matrix : ndarray
25
+ Reconstructed symmetric covariance matrix of shape (n_features, n_features).
26
+ The matrix satisfies cov_matrix[i, j] == cov_matrix[j, i] for all i, j.
27
+
28
+ Raises
29
+ ------
30
+ ValueError
31
+ If the length of triu_elements does not match the expected number of
32
+ upper triangular elements for the given n_features
33
+
34
+ Notes
35
+ -----
36
+ The number of upper triangular elements (including diagonal) for an n x n matrix
37
+ is given by: n_triu_elements = n * (n + 1) // 2
38
+
39
+ Examples
40
+ --------
41
+ >>> triu_elements = np.array([1.0, 0.5, 0.3, 2.0, 0.4, 3.0])
42
+ >>> n_features = 3
43
+ >>> reconstruct_covariance_from_triu(triu_elements, n_features)
44
+ array([[1. , 0.5, 0.3],
45
+ [0.5, 2. , 0.4],
46
+ [0.3, 0.4, 3. ]])
47
+ """
48
+ # Validate input dimensions
49
+ expected_triu_elements = n_features * (n_features + 1) // 2
50
+ if len(triu_elements) != expected_triu_elements:
51
+ raise ValueError(
52
+ f"Invalid number of triu_elements: expected {expected_triu_elements}, "
53
+ f"got {len(triu_elements)}"
54
+ )
55
+
56
+ # Initialize output matrix with zeros
57
+ # Shape: (n_features, n_features)
58
+ cov_matrix = np.zeros((n_features, n_features), dtype=np.float64)
59
+
60
+ # Get indices for upper triangular portion (including diagonal)
61
+ # rows, cols: arrays of indices where row <= col
62
+ rows, cols = np.triu_indices(n_features)
63
+
64
+ # Fill upper triangular portion with input elements
65
+ # This sets cov_matrix[i, j] for all i <= j
66
+ cov_matrix[rows, cols] = triu_elements
67
+
68
+ # Fill lower triangular portion using symmetry
69
+ # Since covariance matrix is symmetric: cov_matrix[i, j] = cov_matrix[j, i]
70
+ # This sets cov_matrix[j, i] = cov_matrix[i, j] for all i < j
71
+ mask = rows != cols
72
+ cov_matrix[cols[mask], rows[mask]] = cov_matrix[rows[mask], cols[mask]]
73
+
74
+ return cov_matrix
75
+
76
+
77
+ def make_semidefinite(matrix, tol=1e-8):
78
+ """
79
+ Convert a symmetric matrix to positive semi-definite form.
80
+
81
+ This function takes a symmetric matrix and ensures it is positive semi-definite
82
+ by eigen-decomposition and thresholding of negative eigenvalues. The input matrix
83
+ is first symmetrized, then any negative eigenvalues are set to zero.
84
+
85
+ Parameters
86
+ ----------
87
+ matrix : array_like
88
+ Input matrix to be converted to positive semi-definite form.
89
+ Should be square and approximately symmetric.
90
+ Shape: (n, n)
91
+ tol : float, optional
92
+ Tolerance for eigenvalue thresholding and symmetry check.
93
+ Eigenvalues less than this value will be set to zero.
94
+ Default: 1e-8
95
+
96
+ Returns
97
+ -------
98
+ psd_matrix : ndarray
99
+ Positive semi-definite matrix with the same shape as input.
100
+ All eigenvalues are guaranteed to be >= 0.
101
+ Shape: (n, n)
102
+
103
+ Raises
104
+ ------
105
+ LinAlgError
106
+ If the eigen-decomposition fails (e.g., matrix is not numerically symmetric)
107
+ ValueError
108
+ If input matrix is not square
109
+
110
+ Notes
111
+ -----
112
+ The algorithm proceeds in three steps:
113
+ 1. Symmetrize the input matrix: (matrix + matrix.T) / 2
114
+ 2. Compute eigen-decomposition using eigh (optimized for symmetric matrices)
115
+ 3. Threshold negative eigenvalues to zero and reconstruct
116
+
117
+ This method preserves the eigenvectors while modifying only the eigenvalues
118
+ that violate positive semi-definiteness.
119
+
120
+ Examples
121
+ --------
122
+ >>> import numpy as np
123
+ >>> matrix = np.array([[1.0, 0.5], [0.5, 1.0]])
124
+ >>> make_semidefinite(matrix)
125
+ array([[1. , 0.5],
126
+ [0.5, 1. ]])
127
+
128
+ >>> # Example with negative eigenvalues
129
+ >>> matrix_indefinite = np.array([[1.0, 2.0], [2.0, 1.0]])
130
+ >>> print("Original eigenvalues:", np.linalg.eigvalsh(matrix_indefinite))
131
+ Original eigenvalues: [-1. 3.]
132
+ >>> psd_matrix = make_semidefinite(matrix_indefinite)
133
+ >>> print("PSD eigenvalues:", np.linalg.eigvalsh(psd_matrix))
134
+ PSD eigenvalues: [0. 3.]
135
+
136
+ See Also
137
+ --------
138
+ numpy.linalg.eigh : Eigen decomposition for symmetric/Hermitian matrices
139
+ numpy.diag : Create diagonal matrix from vector
140
+ """
141
+ # Input validation
142
+ if matrix.shape[0] != matrix.shape[1]:
143
+ raise ValueError(f"Input matrix must be square, got shape {matrix.shape}")
144
+
145
+ # Step 1: Ensure matrix is symmetric
146
+ # Average with transpose to eliminate numerical asymmetry
147
+ # Result: symmetric_matrix = (matrix + matrix.T) / 2
148
+ symmetric_matrix = (matrix + matrix.T) / 2
149
+
150
+ # Step 2: Compute eigen-decomposition
151
+ # eigh is preferred over eig for symmetric matrices (faster, more stable)
152
+ # Returns:
153
+ # eigvals : 1D array of eigenvalues in ascending order
154
+ # eigvecs : 2D array where columns are corresponding eigenvectors
155
+ eigvals, eigvecs = np.linalg.eigh(symmetric_matrix)
156
+
157
+ # Step 3: Threshold negative eigenvalues to zero
158
+ # Preserve eigenvalues >= tol, set others to 0
159
+ # This ensures positive semi-definiteness while minimizing distortion
160
+ eigvals_psd = np.where(eigvals < tol, 0.0, eigvals)
161
+
162
+ # Step 4: Reconstruct matrix from modified eigenvalues
163
+ # Matrix = V * diag(λ) * V.T where V contains eigenvectors as columns
164
+ psd_matrix = eigvecs @ np.diag(eigvals_psd) @ eigvecs.T
165
+
166
+ return psd_matrix
File without changes
@@ -38,9 +38,9 @@ class ARModel:
38
38
  # Construct design matrix X and target vector Y
39
39
  X = np.zeros((M - self.p, self.p))
40
40
  for i in range(self.p):
41
- X[:, i] = y[(self.p - i - 1) : (M - i - 1)]
41
+ X[:, i] = y[(self.p - i - 1): (M - i - 1)]
42
42
 
43
- Y = y[self.p :]
43
+ Y = y[self.p:]
44
44
 
45
45
  # Add intercept if specified
46
46
  if trend == "c":
@@ -76,11 +76,11 @@ class ARModel:
76
76
  raise ValueError("Model must be fitted before prediction")
77
77
 
78
78
  predictions = np.zeros(steps)
79
- history = y[-self.p :].copy() # Last p observations
79
+ history = y[-self.p:].copy() # Last p observations
80
80
 
81
81
  for i in range(steps):
82
82
  # Prepare input vector
83
- x = history[-self.p :][::-1] # Latest p observations in reverse order
83
+ x = history[-self.p:][::-1] # Latest p observations in reverse order
84
84
 
85
85
  # Add intercept if present (coef[0] is intercept)
86
86
  if len(self.coef_) == self.p + 1:
@@ -1,6 +1,39 @@
1
1
  import numpy as np
2
2
 
3
3
 
4
+ def do_degeneration(PF, F, eps=1e-10):
5
+ min_pf = np.min(PF, axis=0)
6
+ max_pf = np.max(PF, axis=0)
7
+
8
+ mask = (max_pf - min_pf) >= eps
9
+ PF = PF[:, mask]
10
+ F = F[:, mask]
11
+ return PF, F
12
+
13
+
14
+ def failure_count(PF, F):
15
+ min_pf = np.min(PF, axis=0)
16
+ max_pf = np.max(PF, axis=0)
17
+
18
+ # Check if any element in each row exceeds the bounds
19
+ # (data < lb) creates boolean matrix for lower bound violations
20
+ # (data > ub) creates boolean matrix for upper bound violations
21
+ # | combines both violations (OR operation)
22
+ # np.any(..., axis=1) checks if any violation exists in each row
23
+ out_of_bounds_mask = np.any((F < min_pf) | (F > max_pf), axis=1)
24
+
25
+ # Count number of invalid solutions (rows with at least one violation)
26
+ # True values are counted as 1, False as 0
27
+ num_invalid = np.sum(out_of_bounds_mask)
28
+
29
+ # Get indices of invalid rows
30
+ # np.where returns a tuple (we take first element with [0])
31
+ invalid_indices = np.where(out_of_bounds_mask)[0]
32
+
33
+ valid_indices = np.where(~out_of_bounds_mask)[0]
34
+ return num_invalid, valid_indices, invalid_indices
35
+
36
+
4
37
  def matrix_conditional_update(x_curr, lb, ub, x_prev):
5
38
  """
6
39
  Vectorized conditional matrix update with bounded interpolation.
@@ -44,7 +77,7 @@ def matrix_conditional_update(x_curr, lb, ub, x_prev):
44
77
  return x_new
45
78
 
46
79
 
47
- def clip_and_randomize(x, lb, ub):
80
+ def clip_and_randomize(x, lb, ub, random_state=None):
48
81
  """
49
82
  Clip values to bounds with random replacement for out-of-bounds values.
50
83
 
@@ -68,7 +101,7 @@ def clip_and_randomize(x, lb, ub):
68
101
  numpy.random.uniform : Used for random value generation.
69
102
  """
70
103
  out_of_bounds = (x < lb) | (x > ub)
71
- random_samples = np.random.uniform(low=lb, high=ub, size=x.shape)
104
+ random_samples = random_state.uniform(low=lb, high=ub, size=x.shape)
72
105
  return np.where(out_of_bounds, random_samples, x)
73
106
 
74
107
 
@@ -0,0 +1,45 @@
1
+ import numpy as np
2
+ from scipy.linalg import logm, sqrtm
3
+
4
+
5
+ def wasserstein_distance(mean1, cov1, mean2, cov2):
6
+ diff = mean1 - mean2
7
+ sqrt_cov1 = sqrtm(cov1)
8
+ cross_term = sqrtm(sqrt_cov1 @ cov2 @ sqrt_cov1)
9
+ return np.sqrt(diff @ diff + np.trace(cov1 + cov2 - 2 * cross_term))
10
+
11
+
12
+ def kl_div_distance(mean1, cov1, mean2, cov2):
13
+ inv_cov2 = np.linalg.inv(cov2)
14
+ diff = mean2 - mean1
15
+ logdet = np.log(np.linalg.det(cov2) / np.linalg.det(cov1))
16
+ return 0.5 * (np.trace(inv_cov2 @ cov1) + diff.T @ inv_cov2 @ diff - len(mean1) + logdet)
17
+
18
+
19
+ def bhattacharyya_distance(mean1, cov1, mean2, cov2):
20
+ avg_cov = 0.5 * (cov1 + cov2)
21
+ inv_avg = np.linalg.inv(avg_cov)
22
+ diff = mean1 - mean2
23
+ return 0.125 * diff.T @ inv_avg @ diff + 0.5 * np.log(
24
+ np.linalg.det(avg_cov) / np.sqrt(np.linalg.det(cov1) * np.linalg.det(cov2))
25
+ )
26
+
27
+
28
+ def mahalanobis_distance(mean1, cov1, mean2, cov2):
29
+ inv_cov = np.linalg.inv(0.5 * (cov1 + cov2))
30
+ return np.sqrt((mean1 - mean2).T @ inv_cov @ (mean1 - mean2))
31
+
32
+
33
+ def riemannian_distance(cov1, cov2):
34
+ return np.linalg.norm(logm(np.linalg.inv(sqrtm(cov1)) @ cov2 @ np.linalg.inv(sqrtm(cov1))), "fro")
35
+
36
+
37
+ def frobenius_distance(cov1, cov2):
38
+ return np.linalg.norm(cov1 - cov2, "fro")
39
+
40
+
41
+ # Used in NSGA2-IMKT-N
42
+ # def norm_mean_frobenius_distance(mean, cov, target_mean, target_cov):
43
+ # return np.linalg.norm(mean - target_mean) + frobenius_distance(cov, target_cov)
44
+ def norm_mean_frobenius_distance(mean, cov, target_mean, target_cov):
45
+ return frobenius_distance(cov, target_cov)
pydmoo/core/inverse.py ADDED
@@ -0,0 +1,55 @@
1
+ import numpy as np
2
+
3
+
4
+ def closed_form_solution(X, Y):
5
+ """
6
+ Compute the least squares solution B = (YᵀY)⁻¹(YᵀX) for the linear system X ≈ YB.
7
+
8
+ Parameters
9
+ ----------
10
+ X : ndarray, shape (m, n)
11
+ Target matrix containing dependent variables.
12
+ Y : ndarray, shape (m, p)
13
+ Design matrix containing independent variables.
14
+
15
+ Returns
16
+ -------
17
+ B : ndarray, shape (p, n)
18
+ Coefficient matrix that minimizes the Frobenius norm of (X - YB).
19
+
20
+ Raises
21
+ ------
22
+ LinAlgError
23
+ If YᵀY is singular and cannot be inverted, falls back to pseudo-inverse.
24
+
25
+ Notes
26
+ -----
27
+ 1. Solves the ordinary least squares problem in closed form.
28
+ 2. Automatically handles singular matrices by using pseudo-inverse when necessary.
29
+ 3. For numerical stability, consider using np.linalg.lstsq() in production code.
30
+
31
+ """
32
+ # Method 1
33
+ # # Compute Y transpose multiplied by X: YᵀX (p x n)
34
+ # YTX = np.dot(Y.T, X)
35
+
36
+ # # Compute Y transpose multiplied by Y: YᵀY (p x p)
37
+ # YTY = np.dot(Y.T, Y)
38
+
39
+ # # Compute inverse of YᵀY with fallback to pseudo-inverse
40
+ # try:
41
+ # YTY_inv = np.linalg.inv(YTY)
42
+ # except np.linalg.LinAlgError:
43
+ # YTY_inv = np.linalg.pinv(YTY)
44
+
45
+ # # Compute final solution: B = (YᵀY)⁻¹(YᵀX)
46
+ # B = np.dot(YTY_inv, YTX)
47
+
48
+ # Method 2
49
+ B, residuals, rank, s = np.linalg.lstsq(Y, X, rcond=None)
50
+
51
+ # Method 3
52
+ # from scipy.linalg import lstsq
53
+ # B = lstsq(Y, X, cond=None)[0]
54
+
55
+ return B
File without changes