Moral88 0.10.0__py3-none-any.whl → 0.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Moral88/classification.py +123 -0
- Moral88/clustering.py +129 -0
- Moral88/regression.py +259 -302
- Moral88/utils.py +67 -97
- {Moral88-0.10.0.dist-info → Moral88-0.13.0.dist-info}/METADATA +1 -1
- Moral88-0.13.0.dist-info/RECORD +14 -0
- {Moral88-0.10.0.dist-info → Moral88-0.13.0.dist-info}/top_level.txt +1 -1
- Test/test_classification.py +88 -0
- Test/test_clustering.py +63 -0
- Test/test_regression.py +141 -0
- Moral88-0.10.0.dist-info/RECORD +0 -10
- tests/test_regression.py +0 -99
- {Moral88-0.10.0.dist-info → Moral88-0.13.0.dist-info}/LICENSE +0 -0
- {Moral88-0.10.0.dist-info → Moral88-0.13.0.dist-info}/WHEEL +0 -0
- {tests → Test}/__init__.py +0 -0
Moral88/regression.py
CHANGED
@@ -3,312 +3,269 @@ from Moral88.utils import DataValidator
|
|
3
3
|
|
4
4
|
validator = DataValidator()
|
5
5
|
|
6
|
-
def mean_bias_deviation( y_true, y_pred, library=None, flatten=True):
|
7
|
-
"""
|
8
|
-
Computes Mean Bias Deviation (MBD).
|
9
|
-
"""
|
10
|
-
y_true, y_pred = validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
11
|
-
|
12
|
-
if flatten and y_true.ndim > 1:
|
13
|
-
y_true = y_true.flatten()
|
14
|
-
y_pred = y_pred.flatten()
|
15
|
-
|
16
|
-
if library == 'sklearn':
|
17
|
-
# Sklearn does not have a direct implementation for MBD
|
18
|
-
raise NotImplementedError("Mean Bias Deviation is not implemented in sklearn.")
|
19
|
-
|
20
|
-
if library == 'torch':
|
21
|
-
import torch
|
22
|
-
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
23
|
-
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
24
|
-
bias = torch.mean(y_pred_tensor - y_true_tensor).item()
|
25
|
-
return bias
|
26
|
-
|
27
|
-
if library == 'tensorflow':
|
28
|
-
import tensorflow as tf
|
29
|
-
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
30
|
-
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
31
|
-
bias = tf.reduce_mean(y_pred_tensor - y_true_tensor).numpy()
|
32
|
-
return bias
|
33
|
-
|
34
|
-
# Default implementation
|
35
|
-
return np.mean(y_pred - y_true)
|
36
|
-
|
37
|
-
def r2_score( y_true, y_pred, sample_weight=None, library=None, flatten=True):
|
38
|
-
"""
|
39
|
-
Computes R2 score.
|
40
|
-
"""
|
41
|
-
y_true, y_pred, sample_weight = validator.validate_r2_score_inputs(y_true, y_pred, sample_weight)
|
42
|
-
|
43
|
-
if flatten and y_true.ndim > 1:
|
44
|
-
y_true = y_true.flatten()
|
45
|
-
y_pred = y_pred.flatten()
|
46
|
-
|
47
|
-
if library == 'sklearn':
|
48
|
-
from sklearn.metrics import r2_score as sklearn_r2
|
49
|
-
return sklearn_r2(y_true, y_pred, sample_weight=sample_weight)
|
50
|
-
|
51
|
-
if library == 'statsmodels':
|
52
|
-
import statsmodels.api as sm
|
53
|
-
model = sm.OLS(y_true, sm.add_constant(y_pred)).fit()
|
54
|
-
return model.rsquared
|
55
|
-
|
56
|
-
numerator = np.sum((y_true - y_pred) ** 2)
|
57
|
-
denominator = np.sum((y_true - np.mean(y_true)) ** 2)
|
58
|
-
|
59
|
-
if denominator == 0:
|
60
|
-
return 0.0
|
61
|
-
return 1 - (numerator / denominator)
|
62
|
-
|
63
|
-
def mean_absolute_error( y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
|
64
|
-
"""
|
65
|
-
Computes Mean Absolute Error (MAE).
|
66
|
-
"""
|
67
|
-
y_true, y_pred = validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
68
|
-
|
69
|
-
if flatten:
|
70
|
-
y_true = y_true.ravel()
|
71
|
-
y_pred = y_pred.ravel()
|
72
|
-
|
73
|
-
if library == 'Moral88':
|
74
|
-
if threshold is not None:
|
75
|
-
y_pred = np.clip(y_pred, threshold[0], threshold[1])
|
76
|
-
|
77
|
-
if y_true.ndim > 1 and flatten:
|
78
|
-
y_true = y_true.flatten()
|
79
|
-
y_pred = y_pred.flatten()
|
80
|
-
absolute_errors = np.abs(y_true - y_pred)
|
81
|
-
|
82
|
-
if method == 'mean':
|
83
|
-
result = np.mean(absolute_errors)
|
84
|
-
elif method == 'sum':
|
85
|
-
result = np.sum(absolute_errors)
|
86
|
-
elif method == 'none':
|
87
|
-
result = absolute_errors
|
88
|
-
else:
|
89
|
-
raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
|
90
|
-
|
91
|
-
# if normalize and method != 'none':
|
92
|
-
# range_y = np.ptp(y_true)
|
93
|
-
# result = result / max(abs(range_y), 1)
|
94
|
-
|
95
|
-
return result
|
96
|
-
|
97
|
-
elif library == 'sklearn':
|
98
|
-
from sklearn.metrics import mean_absolute_error as sklearn_mae
|
99
|
-
return sklearn_mae(y_true, y_pred)
|
100
|
-
|
101
|
-
elif library == 'torch':
|
102
|
-
import torch
|
103
|
-
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
104
|
-
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
105
|
-
return torch.mean(torch.abs(y_true_tensor - y_pred_tensor)).item()
|
106
|
-
|
107
|
-
elif library == 'tensorflow':
|
108
|
-
import tensorflow as tf
|
109
|
-
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
110
|
-
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
111
|
-
return tf.reduce_mean(tf.abs(y_true_tensor - y_pred_tensor)).numpy()
|
112
|
-
|
113
|
-
def mean_squared_error(y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
|
114
|
-
"""
|
115
|
-
Computes Mean Squared Error (MSE).
|
116
|
-
"""
|
117
|
-
y_true, y_pred = validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
118
|
-
|
119
|
-
if flatten:
|
120
|
-
y_true = y_true.ravel()
|
121
|
-
y_pred = y_pred.ravel()
|
122
|
-
|
123
|
-
if library == 'Moral88':
|
124
|
-
if threshold is not None:
|
125
|
-
y_pred = np.clip(y_pred, threshold[0], threshold[1])
|
126
|
-
|
127
|
-
if y_true.ndim > 1 and flatten:
|
128
|
-
y_true = y_true.flatten()
|
129
|
-
y_pred = y_pred.flatten()
|
130
|
-
squared_errors = (y_true - y_pred) ** 2
|
131
|
-
|
132
|
-
if method == 'mean':
|
133
|
-
result = np.mean(squared_errors)
|
134
|
-
elif method == 'sum':
|
135
|
-
result = np.sum(squared_errors)
|
136
|
-
elif method == 'none':
|
137
|
-
result = squared_errors
|
138
|
-
else:
|
139
|
-
raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
|
140
|
-
|
141
|
-
return result
|
142
|
-
|
143
|
-
elif library == 'sklearn':
|
144
|
-
from sklearn.metrics import mean_squared_error as sklearn_mse
|
145
|
-
return sklearn_mse(y_true, y_pred)
|
146
|
-
|
147
|
-
elif library == 'torch':
|
148
|
-
import torch
|
149
|
-
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
150
|
-
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
151
|
-
return torch.mean((y_true_tensor - y_pred_tensor) ** 2).item()
|
152
|
-
|
153
|
-
elif library == 'tensorflow':
|
154
|
-
import tensorflow as tf
|
155
|
-
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
156
|
-
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
157
|
-
return tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor)).numpy()
|
158
|
-
|
159
|
-
def root_mean_squared_error(y_true, y_pred, library=None):
|
160
|
-
"""
|
161
|
-
Computes Root Mean Squared Error (RMSE).
|
162
|
-
"""
|
163
|
-
y_true, y_pred = validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
164
|
-
|
165
|
-
if library == 'sklearn':
|
166
|
-
from sklearn.metrics import mean_squared_error as sklearn_mse
|
167
|
-
return np.sqrt(sklearn_mse(y_true, y_pred))
|
168
|
-
|
169
|
-
if library == 'torch':
|
170
|
-
import torch
|
171
|
-
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
172
|
-
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
173
|
-
return torch.sqrt(torch.mean((y_true_tensor - y_pred_tensor) ** 2)).item()
|
174
|
-
|
175
|
-
if library == 'tensorflow':
|
176
|
-
import tensorflow as tf
|
177
|
-
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
178
|
-
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
179
|
-
return tf.sqrt(tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor))).numpy()
|
180
|
-
|
181
|
-
mse = mean_squared_error(y_true, y_pred)
|
182
|
-
return np.sqrt(mse)
|
183
|
-
|
184
|
-
def mean_absolute_percentage_error(y_true, y_pred, library=None):
|
185
|
-
"""
|
186
|
-
Computes Mean Absolute Percentage Error (MAPE).
|
187
|
-
"""
|
188
|
-
y_true, y_pred = validator.validate_regression_targets(y_true, y_pred)
|
189
|
-
y_true, y_pred = validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
190
|
-
y_true = np.clip(y_true, 1e-8, None)
|
191
|
-
|
192
|
-
if library == 'sklearn':
|
193
|
-
from sklearn.metrics import mean_absolute_percentage_error as sklearn_mape
|
194
|
-
return sklearn_mape(y_true, y_pred) * 100
|
195
|
-
|
196
|
-
if library == 'torch':
|
197
|
-
import torch
|
198
|
-
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
199
|
-
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
200
|
-
return torch.mean(torch.abs((y_true_tensor - y_pred_tensor) / torch.clamp(y_true_tensor, min=1e-8))).item() * 100
|
201
|
-
|
202
|
-
if library == 'tensorflow':
|
203
|
-
import tensorflow as tf
|
204
|
-
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
205
|
-
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
206
|
-
return tf.reduce_mean(tf.abs((y_true_tensor - y_pred_tensor) / tf.clip_by_value(y_true_tensor, 1e-8, tf.float32.max))).numpy() * 100
|
207
|
-
|
208
|
-
return np.mean(np.abs((y_true - y_pred) / np.clip(np.abs(y_true), 1e-8, None))) * 100
|
209
|
-
|
210
|
-
def explained_variance_score(y_true, y_pred, library=None, flatten=True):
|
211
|
-
"""
|
212
|
-
Computes Explained Variance Score.
|
213
|
-
"""
|
214
|
-
y_true, y_pred = validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
215
|
-
|
216
|
-
if library == 'sklearn':
|
217
|
-
from sklearn.metrics import explained_variance_score as sklearn_evs
|
218
|
-
return sklearn_evs(y_true, y_pred)
|
219
|
-
|
220
|
-
if library == 'torch':
|
221
|
-
import torch
|
222
|
-
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
223
|
-
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
224
|
-
variance_residual = torch.var(y_true_tensor - y_pred_tensor)
|
225
|
-
variance_y = torch.var(y_true_tensor)
|
226
|
-
return 1 - variance_residual / variance_y if variance_y != 0 else 0
|
227
|
-
|
228
|
-
if library == 'tensorflow':
|
229
|
-
import tensorflow as tf
|
230
|
-
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
231
|
-
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
232
|
-
variance_residual = tf.math.reduce_variance(y_true_tensor - y_pred_tensor)
|
233
|
-
variance_y = tf.math.reduce_variance(y_true_tensor)
|
234
|
-
return 1 - variance_residual / variance_y if variance_y != 0 else 0
|
235
|
-
|
236
|
-
numerator = np.var(y_true - y_pred)
|
237
|
-
denominator = np.var(y_true)
|
238
|
-
return 1 - numerator / denominator if denominator != 0 else 0
|
239
|
-
|
240
|
-
def adjusted_r2_score(y_true, y_pred, n_features, library=None, flatten=True):
|
241
|
-
"""
|
242
|
-
Computes Adjusted R-Squared Score.
|
243
|
-
|
244
|
-
Parameters:
|
245
|
-
y_true: array-like of shape (n_samples,)
|
246
|
-
Ground truth (correct) target values.
|
247
|
-
|
248
|
-
y_pred: array-like of shape (n_samples,)
|
249
|
-
Estimated target values.
|
250
|
-
|
251
|
-
n_features: int
|
252
|
-
Number of independent features in the model.
|
253
|
-
|
254
|
-
library: str, optional (default=None)
|
255
|
-
Library to use for computation. Supports {'sklearn', 'statsmodels', None}.
|
256
|
-
|
257
|
-
flatten: bool, optional (default=True)
|
258
|
-
If True, flattens multidimensional arrays before computation.
|
259
|
-
"""
|
260
|
-
# Validate inputs
|
261
|
-
y_true, y_pred, _ = validator.validate_r2_score_inputs(y_true, y_pred)
|
262
|
-
|
263
|
-
# Ensure inputs are 1D arrays
|
264
|
-
if y_true.ndim == 0 or y_pred.ndim == 0:
|
265
|
-
y_true = np.array([y_true])
|
266
|
-
y_pred = np.array([y_pred])
|
267
|
-
|
268
|
-
if flatten and y_true.ndim > 1:
|
269
|
-
y_true = y_true.flatten()
|
270
|
-
y_pred = y_pred.flatten()
|
271
|
-
|
272
|
-
if library == 'sklearn':
|
273
|
-
from sklearn.metrics import r2_score
|
274
|
-
r2 = r2_score(y_true, y_pred)
|
275
|
-
elif library == 'statsmodels':
|
276
|
-
import statsmodels.api as sm
|
277
|
-
X = sm.add_constant(y_pred)
|
278
|
-
model = sm.OLS(y_true, X).fit()
|
279
|
-
r2 = model.rsquared
|
280
|
-
else:
|
281
|
-
numerator = np.sum((y_true - y_pred) ** 2)
|
282
|
-
denominator = np.sum((y_true - np.mean(y_true)) ** 2)
|
283
|
-
r2 = 1 - (numerator / denominator) if denominator != 0 else 0.0
|
284
6
|
|
285
|
-
|
286
|
-
|
287
|
-
|
7
|
+
def mean_absolute_error(y_true, y_pred, sample_weights=None, normalize=False, method='mean'):
|
8
|
+
"""Compute Mean Absolute Error (MAE)"""
|
9
|
+
validator.validate_all(y_true, y_pred)
|
10
|
+
errors = np.abs(y_true - y_pred)
|
11
|
+
|
12
|
+
if sample_weights is not None:
|
13
|
+
sample_weights = np.array(sample_weights)
|
14
|
+
if len(sample_weights) != len(y_true):
|
15
|
+
raise ValueError("sample_weights must have the same length as y_true and y_pred")
|
16
|
+
errors *= sample_weights
|
17
|
+
|
18
|
+
if normalize:
|
19
|
+
errors /= np.mean(y_true)
|
20
|
+
|
21
|
+
if method == 'mean':
|
22
|
+
return np.mean(errors)
|
23
|
+
elif method == 'sum':
|
24
|
+
return np.sum(errors)
|
25
|
+
elif method == 'none':
|
26
|
+
return errors
|
27
|
+
else:
|
28
|
+
raise ValueError("Invalid method. Choose from 'mean', 'sum', or 'none'")
|
29
|
+
|
30
|
+
def mean_squared_error(y_true, y_pred, sample_weights=None, squared=True, method='mean'):
|
31
|
+
"""Compute Mean Squared Error (MSE) or Root Mean Squared Error (RMSE)"""
|
32
|
+
validator.validate_all(y_true, y_pred)
|
33
|
+
errors = (y_true - y_pred) ** 2
|
34
|
+
|
35
|
+
if sample_weights is not None:
|
36
|
+
sample_weights = np.array(sample_weights)
|
37
|
+
if len(sample_weights) != len(y_true):
|
38
|
+
raise ValueError("sample_weights must have the same length as y_true and y_pred")
|
39
|
+
errors *= sample_weights
|
40
|
+
|
41
|
+
if method == 'mean':
|
42
|
+
result = np.mean(errors)
|
43
|
+
elif method == 'sum':
|
44
|
+
result = np.sum(errors)
|
45
|
+
elif method == 'none':
|
46
|
+
return errors
|
47
|
+
else:
|
48
|
+
raise ValueError("Invalid method. Choose from 'mean', 'sum', or 'none'")
|
49
|
+
|
50
|
+
return result if squared else np.sqrt(result)
|
51
|
+
|
52
|
+
def root_mean_squared_error(y_true, y_pred, sample_weights=None, method='mean'):
|
53
|
+
"""Compute Root Mean Squared Error (RMSE)"""
|
54
|
+
validator.validate_all(y_true, y_pred)
|
55
|
+
errors = (y_true - y_pred) ** 2
|
56
|
+
|
57
|
+
if sample_weights is not None:
|
58
|
+
sample_weights = np.array(sample_weights)
|
59
|
+
if len(sample_weights) != len(y_true):
|
60
|
+
raise ValueError("sample_weights must have the same length as y_true and y_pred")
|
61
|
+
errors *= sample_weights
|
62
|
+
|
63
|
+
if method == 'mean':
|
64
|
+
result = np.mean(errors)
|
65
|
+
elif method == 'sum':
|
66
|
+
result = np.sum(errors)
|
67
|
+
elif method == 'none':
|
68
|
+
return np.sqrt(errors)
|
69
|
+
else:
|
70
|
+
raise ValueError("Invalid method. Choose from 'mean', 'sum', or 'none'")
|
71
|
+
|
72
|
+
return np.sqrt(result)
|
73
|
+
|
74
|
+
def mean_bias_deviation(y_true, y_pred, sample_weights=None, method='mean'):
|
75
|
+
"""Compute Mean Bias Deviation (MBD)"""
|
76
|
+
validator.validate_all(y_true, y_pred)
|
77
|
+
errors = y_true - y_pred
|
78
|
+
|
79
|
+
if sample_weights is not None:
|
80
|
+
sample_weights = np.array(sample_weights)
|
81
|
+
if len(sample_weights) != len(y_true):
|
82
|
+
raise ValueError("sample_weights must have the same length as y_true and y_pred")
|
83
|
+
errors *= sample_weights
|
84
|
+
|
85
|
+
if method == 'mean':
|
86
|
+
return np.mean(errors)
|
87
|
+
elif method == 'sum':
|
88
|
+
return np.sum(errors)
|
89
|
+
elif method == 'none':
|
90
|
+
return errors
|
91
|
+
else:
|
92
|
+
raise ValueError("Invalid method. Choose from 'mean', 'sum', or 'none'")
|
93
|
+
|
94
|
+
def r_squared(y_true, y_pred, adjusted=False, n_features=None):
|
95
|
+
"""Compute R-squared (R²) and Adjusted R-squared if needed"""
|
96
|
+
validator.validate_all(y_true, y_pred)
|
97
|
+
ss_total = np.sum((y_true - np.mean(y_true)) ** 2)
|
98
|
+
ss_residual = np.sum((y_true - y_pred) ** 2)
|
99
|
+
r2 = 1 - (ss_residual / ss_total)
|
100
|
+
|
101
|
+
if adjusted:
|
102
|
+
if n_features is None:
|
103
|
+
raise ValueError("n_features must be provided for adjusted R² calculation")
|
104
|
+
n = len(y_true)
|
105
|
+
return 1 - ((1 - r2) * (n - 1) / (n - n_features - 1))
|
106
|
+
|
107
|
+
return r2
|
108
|
+
|
109
|
+
def adjusted_r_squared(y_true, y_pred, n_features):
|
110
|
+
"""Compute Adjusted R-squared (R² Adjusted)"""
|
111
|
+
validator.validate_all(y_true, y_pred)
|
112
|
+
|
113
|
+
n = len(y_true)
|
114
|
+
if n_features >= n:
|
115
|
+
raise ValueError("Number of features must be less than number of samples")
|
116
|
+
|
117
|
+
ss_total = np.sum((y_true - np.mean(y_true)) ** 2)
|
118
|
+
ss_residual = np.sum((y_true - y_pred) ** 2)
|
119
|
+
r2 = 1 - (ss_residual / ss_total)
|
120
|
+
|
121
|
+
return 1 - ((1 - r2) * (n - 1) / (n - n_features - 1))
|
122
|
+
|
123
|
+
def mean_absolute_percentage_error(y_true, y_pred, sample_weights=None, method='mean'):
|
124
|
+
"""Compute Mean Absolute Percentage Error (MAPE)"""
|
125
|
+
validator.validate_all(y_true, y_pred, mape_based=True)
|
126
|
+
errors = np.abs((y_true - y_pred) / y_true) * 100
|
127
|
+
|
128
|
+
if sample_weights is not None:
|
129
|
+
sample_weights = np.array(sample_weights)
|
130
|
+
if len(sample_weights) != len(y_true):
|
131
|
+
raise ValueError("sample_weights must have the same length as y_true and y_pred")
|
132
|
+
errors *= sample_weights
|
133
|
+
|
134
|
+
if method == 'mean':
|
135
|
+
return np.mean(errors)
|
136
|
+
elif method == 'sum':
|
137
|
+
return np.sum(errors)
|
138
|
+
elif method == 'none':
|
139
|
+
return errors
|
140
|
+
else:
|
141
|
+
raise ValueError("Invalid method. Choose from 'mean', 'sum', or 'none'")
|
142
|
+
|
143
|
+
def symmetric_mean_absolute_percentage_error(y_true, y_pred, sample_weights=None, method='mean'):
|
144
|
+
"""Compute Symmetric Mean Absolute Percentage Error (sMAPE)"""
|
145
|
+
validator.validate_all(y_true, y_pred, mape_based=True)
|
146
|
+
errors = 200 * np.abs(y_true - y_pred) / (np.abs(y_true) + np.abs(y_pred))
|
147
|
+
|
148
|
+
if sample_weights is not None:
|
149
|
+
sample_weights = np.array(sample_weights)
|
150
|
+
if len(sample_weights) != len(y_true):
|
151
|
+
raise ValueError("sample_weights must have the same length as y_true and y_pred")
|
152
|
+
errors *= sample_weights
|
153
|
+
|
154
|
+
if method == 'mean':
|
155
|
+
return np.mean(errors)
|
156
|
+
elif method == 'sum':
|
157
|
+
return np.sum(errors)
|
158
|
+
elif method == 'none':
|
159
|
+
return errors
|
160
|
+
else:
|
161
|
+
raise ValueError("Invalid method. Choose from 'mean', 'sum', or 'none'")
|
162
|
+
|
163
|
+
def huber_loss(y_true, y_pred, delta=1.0, sample_weights=None, method='mean'):
|
164
|
+
"""Compute Huber Loss"""
|
165
|
+
validator.validate_all(y_true, y_pred)
|
166
|
+
error = y_true - y_pred
|
167
|
+
|
168
|
+
loss = np.where(np.abs(error) <= delta, 0.5 * error ** 2, delta * (np.abs(error) - 0.5 * delta))
|
169
|
+
|
170
|
+
if sample_weights is not None:
|
171
|
+
sample_weights = np.array(sample_weights)
|
172
|
+
if len(sample_weights) != len(y_true):
|
173
|
+
raise ValueError("sample_weights must have the same length as y_true and y_pred")
|
174
|
+
loss *= sample_weights
|
175
|
+
|
176
|
+
if method == 'mean':
|
177
|
+
return np.mean(loss)
|
178
|
+
elif method == 'sum':
|
179
|
+
return np.sum(loss)
|
180
|
+
elif method == 'none':
|
181
|
+
return loss
|
182
|
+
else:
|
183
|
+
raise ValueError("Invalid method. Choose from 'mean', 'sum', or 'none'")
|
184
|
+
|
185
|
+
def relative_squared_error(y_true, y_pred):
|
186
|
+
"""Compute Relative Squared Error (RSE)"""
|
187
|
+
validator.validate_all(y_true, y_pred)
|
188
|
+
return np.sum((y_true - y_pred) ** 2) / np.sum((y_true - np.mean(y_true)) ** 2)
|
189
|
+
|
190
|
+
def mean_squared_log_error(y_true, y_pred, sample_weights=None, method='mean', squared=True):
|
191
|
+
"""Compute Logarithmic Mean Squared Error (MSLE) or Root Mean Squared Log Error (RMSLE)"""
|
192
|
+
validator.validate_all(y_true, y_pred, log_based=True)
|
193
|
+
errors = (np.log1p(y_true) - np.log1p(y_pred)) ** 2
|
194
|
+
|
195
|
+
if sample_weights is not None:
|
196
|
+
sample_weights = np.array(sample_weights)
|
197
|
+
if len(sample_weights) != len(y_true):
|
198
|
+
raise ValueError("sample_weights must have the same length as y_true and y_pred")
|
199
|
+
errors *= sample_weights
|
200
|
+
|
201
|
+
if method == 'mean':
|
202
|
+
result = np.mean(errors)
|
203
|
+
elif method == 'sum':
|
204
|
+
result = np.sum(errors)
|
205
|
+
elif method == 'none':
|
206
|
+
return errors
|
207
|
+
else:
|
208
|
+
raise ValueError("Invalid method. Choose from 'mean', 'sum', or 'none'")
|
209
|
+
|
210
|
+
return result if squared else np.sqrt(result)
|
211
|
+
|
212
|
+
def root_mean_squared_log_error(y_true, y_pred, sample_weights=None, method='mean'):
|
213
|
+
"""Compute Root Mean Squared Logarithmic Error (RMSLE)"""
|
214
|
+
validator.validate_all(y_true, y_pred, log_based=True)
|
215
|
+
errors = (np.log1p(y_true) - np.log1p(y_pred)) ** 2
|
216
|
+
|
217
|
+
if sample_weights is not None:
|
218
|
+
sample_weights = np.array(sample_weights)
|
219
|
+
if len(sample_weights) != len(y_true):
|
220
|
+
raise ValueError("sample_weights must have the same length as y_true and y_pred")
|
221
|
+
errors *= sample_weights
|
222
|
+
|
223
|
+
if method == 'mean':
|
224
|
+
return np.sqrt(np.mean(errors))
|
225
|
+
elif method == 'sum':
|
226
|
+
return np.sqrt(np.sum(errors))
|
227
|
+
elif method == 'none':
|
228
|
+
return np.sqrt(errors)
|
229
|
+
else:
|
230
|
+
raise ValueError("Invalid method. Choose from 'mean', 'sum', or 'none'")
|
231
|
+
|
232
|
+
def log_cosh_loss(y_true, y_pred, sample_weights=None, method='mean'):
|
233
|
+
"""Compute Log-Cosh Loss"""
|
234
|
+
validator.validate_all(y_true, y_pred)
|
235
|
+
errors = np.log(np.cosh(y_pred - y_true))
|
236
|
+
|
237
|
+
if sample_weights is not None:
|
238
|
+
sample_weights = np.array(sample_weights)
|
239
|
+
if len(sample_weights) != len(y_true):
|
240
|
+
raise ValueError("sample_weights must have the same length as y_true and y_pred")
|
241
|
+
errors *= sample_weights
|
242
|
+
|
243
|
+
if method == 'mean':
|
244
|
+
return np.mean(errors)
|
245
|
+
elif method == 'sum':
|
246
|
+
return np.sum(errors)
|
247
|
+
elif method == 'none':
|
248
|
+
return errors
|
249
|
+
else:
|
250
|
+
raise ValueError("Invalid method. Choose from 'mean', 'sum', or 'none'")
|
288
251
|
|
289
|
-
|
290
|
-
|
252
|
+
def explained_variance(y_true, y_pred):
|
253
|
+
"""Compute Explained Variance Score"""
|
254
|
+
validator.validate_all(y_true, y_pred)
|
255
|
+
variance_y_true = np.var(y_true)
|
256
|
+
return 1 - (np.var(y_true - y_pred) / variance_y_true) if variance_y_true != 0 else 0
|
291
257
|
|
292
|
-
|
293
|
-
|
258
|
+
def median_absolute_error(y_true, y_pred, sample_weights=None):
|
259
|
+
"""Compute Median Absolute Error"""
|
294
260
|
validator = DataValidator()
|
261
|
+
validator.validate_all(y_true, y_pred)
|
262
|
+
errors = np.abs(y_true - y_pred)
|
295
263
|
|
264
|
+
if sample_weights is not None:
|
265
|
+
sample_weights = np.array(sample_weights)
|
266
|
+
if len(sample_weights) != len(y_true):
|
267
|
+
raise ValueError("sample_weights must have the same length as y_true and y_pred")
|
268
|
+
errors *= sample_weights
|
296
269
|
|
297
|
-
|
298
|
-
arr = [[1], [2], [3]]
|
299
|
-
print("1D array:", validator.is_1d_array(arr))
|
300
|
-
print("Samples:", validator.check_samples(arr))
|
301
|
-
|
302
|
-
# Test MAE, MSE, R2, MBD, EV, MAPE, RMSE
|
303
|
-
y_true = [3, -0.5, 2, 7]
|
304
|
-
y_pred = [2.5, 0.0, 2, 8]
|
305
|
-
|
306
|
-
print("Mean Absolute Error:", mean_absolute_error(y_true, y_pred))
|
307
|
-
print("Mean Squared Error:", mean_squared_error(y_true, y_pred))
|
308
|
-
print("R2 Score:", r2_score(y_true, y_pred))
|
309
|
-
print("Mean Bias Deviation: ", mean_bias_deviation(y_true, y_pred))
|
310
|
-
print("Explained Variance Score: ", explained_variance_score(y_true, y_pred))
|
311
|
-
print("Mean Absolute Percentage Error: ", mean_absolute_percentage_error(y_true, y_pred))
|
312
|
-
print("Root Mean Squared Error: ", root_mean_squared_error(y_true, y_pred))
|
313
|
-
print("adjusted_r2_score: ", adjusted_r2_score(y_true, y_pred, 2))
|
270
|
+
return np.median(errors)
|
314
271
|
|