Moral88 0.4.0__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Moral88/regression.py +295 -143
- {Moral88-0.4.0.dist-info → Moral88-0.6.0.dist-info}/METADATA +1 -1
- Moral88-0.6.0.dist-info/RECORD +8 -0
- Moral88-0.4.0.dist-info/RECORD +0 -8
- {Moral88-0.4.0.dist-info → Moral88-0.6.0.dist-info}/LICENSE +0 -0
- {Moral88-0.4.0.dist-info → Moral88-0.6.0.dist-info}/WHEEL +0 -0
- {Moral88-0.4.0.dist-info → Moral88-0.6.0.dist-info}/top_level.txt +0 -0
Moral88/regression.py
CHANGED
@@ -1,145 +1,4 @@
|
|
1
1
|
import numpy as np
|
2
|
-
|
3
|
-
def validate_inputs(y_true, y_pred):
|
4
|
-
"""
|
5
|
-
Validate the inputs for type and length.
|
6
|
-
"""
|
7
|
-
if not isinstance(y_true, (list, tuple, np.ndarray)) or not isinstance(y_pred, (list, tuple, np.ndarray)):
|
8
|
-
raise TypeError("Both y_true and y_pred must be lists, tuples, or numpy arrays.")
|
9
|
-
|
10
|
-
y_true = np.array(y_true)
|
11
|
-
y_pred = np.array(y_pred)
|
12
|
-
|
13
|
-
if y_true.shape != y_pred.shape:
|
14
|
-
raise ValueError("Shapes of y_true and y_pred must be the same.")
|
15
|
-
|
16
|
-
if not np.issubdtype(y_true.dtype, np.number) or not np.issubdtype(y_pred.dtype, np.number):
|
17
|
-
raise TypeError("All elements in y_true and y_pred must be numeric.")
|
18
|
-
|
19
|
-
def mean_absolute_error(y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
|
20
|
-
"""
|
21
|
-
Calculate Mean Absolute Error (MAE) for single or multi-dimensional data.
|
22
|
-
"""
|
23
|
-
validate_inputs(y_true, y_pred)
|
24
|
-
|
25
|
-
# y_true = np.array(y_true)
|
26
|
-
# y_pred = np.array(y_pred)
|
27
|
-
|
28
|
-
if flatten:
|
29
|
-
y_true = y_true.ravel()
|
30
|
-
y_pred = y_pred.ravel()
|
31
|
-
|
32
|
-
if library == 'Moral88':
|
33
|
-
if threshold is not None:
|
34
|
-
y_pred = np.clip(y_pred, threshold[0], threshold[1])
|
35
|
-
|
36
|
-
absolute_errors = np.abs(y_true - y_pred)
|
37
|
-
|
38
|
-
if method == 'mean':
|
39
|
-
result = np.mean(absolute_errors)
|
40
|
-
elif method == 'sum':
|
41
|
-
result = np.sum(absolute_errors)
|
42
|
-
elif method == 'none':
|
43
|
-
result = absolute_errors
|
44
|
-
else:
|
45
|
-
raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
|
46
|
-
|
47
|
-
if normalize and method != 'none':
|
48
|
-
range_y = np.ptp(y_true)
|
49
|
-
result = result / max(abs(range_y), 1)
|
50
|
-
|
51
|
-
return result
|
52
|
-
|
53
|
-
elif library == 'sklearn':
|
54
|
-
from sklearn.metrics import mean_absolute_error as sklearn_mae
|
55
|
-
return sklearn_mae(y_true, y_pred)
|
56
|
-
|
57
|
-
elif library == 'torch':
|
58
|
-
import torch
|
59
|
-
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
60
|
-
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
61
|
-
return torch.mean(torch.abs(y_true_tensor - y_pred_tensor)).item()
|
62
|
-
|
63
|
-
elif library == 'tensorflow':
|
64
|
-
import tensorflow as tf
|
65
|
-
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
66
|
-
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
67
|
-
return tf.reduce_mean(tf.abs(y_true_tensor - y_pred_tensor)).numpy()
|
68
|
-
|
69
|
-
else:
|
70
|
-
raise ValueError(f"Invalid library: {library}. Choose from {'Moral88', 'sklearn', 'torch', 'tensorflow'}.")
|
71
|
-
|
72
|
-
def mean_squared_error(y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
|
73
|
-
"""
|
74
|
-
Calculate Mean Squared Error (MSE) for single or multi-dimensional data.
|
75
|
-
"""
|
76
|
-
validate_inputs(y_true, y_pred)
|
77
|
-
|
78
|
-
# y_true = np.array(y_true)
|
79
|
-
# y_pred = np.array(y_pred)
|
80
|
-
|
81
|
-
if flatten:
|
82
|
-
y_true = y_true.ravel()
|
83
|
-
y_pred = y_pred.ravel()
|
84
|
-
|
85
|
-
if library == 'Moral88':
|
86
|
-
if threshold is not None:
|
87
|
-
y_pred = np.clip(y_pred, threshold[0], threshold[1])
|
88
|
-
|
89
|
-
squared_errors = (y_true - y_pred) ** 2
|
90
|
-
|
91
|
-
if method == 'mean':
|
92
|
-
result = np.mean(squared_errors)
|
93
|
-
elif method == 'sum':
|
94
|
-
result = np.sum(squared_errors)
|
95
|
-
elif method == 'none':
|
96
|
-
result = squared_errors
|
97
|
-
else:
|
98
|
-
raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
|
99
|
-
|
100
|
-
if normalize and method != 'none':
|
101
|
-
range_y = np.ptp(y_true)
|
102
|
-
result = result / max(abs(range_y), 1)
|
103
|
-
|
104
|
-
return result
|
105
|
-
|
106
|
-
elif library == 'sklearn':
|
107
|
-
from sklearn.metrics import mean_squared_error as sklearn_mse
|
108
|
-
return sklearn_mse(y_true, y_pred)
|
109
|
-
|
110
|
-
elif library == 'torch':
|
111
|
-
import torch
|
112
|
-
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
113
|
-
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
114
|
-
return torch.mean((y_true_tensor - y_pred_tensor) ** 2).item()
|
115
|
-
|
116
|
-
elif library == 'tensorflow':
|
117
|
-
import tensorflow as tf
|
118
|
-
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
119
|
-
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
120
|
-
return tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor)).numpy()
|
121
|
-
|
122
|
-
else:
|
123
|
-
raise ValueError(f"Invalid library: {library}. Choose from {'Moral88', 'sklearn', 'torch', 'tensorflow'}.")
|
124
|
-
|
125
|
-
def r_squared(y_true, y_pred, flatten=True):
|
126
|
-
"""
|
127
|
-
Compute R-Squared for single or multi-dimensional data.
|
128
|
-
"""
|
129
|
-
validate_inputs(y_true, y_pred)
|
130
|
-
|
131
|
-
y_true = np.array(y_true)
|
132
|
-
y_pred = np.array(y_pred)
|
133
|
-
|
134
|
-
if flatten:
|
135
|
-
y_true = y_true.ravel()
|
136
|
-
y_pred = y_pred.ravel()
|
137
|
-
|
138
|
-
ss_total = np.sum((y_true - np.mean(y_true)) ** 2)
|
139
|
-
ss_residual = np.sum((y_true - y_pred) ** 2)
|
140
|
-
return 1 - (ss_residual / ss_total)
|
141
|
-
|
142
|
-
import numpy as np
|
143
2
|
import warnings
|
144
3
|
from typing import Union, List, Tuple
|
145
4
|
from scipy import sparse
|
@@ -172,6 +31,7 @@ class DataValidator:
|
|
172
31
|
"""
|
173
32
|
Returns the number of samples in the array.
|
174
33
|
"""
|
34
|
+
array = np.asarray(array)
|
175
35
|
if hasattr(array, 'shape') and len(array.shape) > 0:
|
176
36
|
return array.shape[0]
|
177
37
|
else:
|
@@ -231,17 +91,69 @@ class DataValidator:
|
|
231
91
|
sample_weight = self.is_1d_array(sample_weight)
|
232
92
|
return y_true, y_pred, sample_weight
|
233
93
|
|
94
|
+
def validate_mae_mse_inputs(self, y_true, y_pred, library=None):
|
95
|
+
"""
|
96
|
+
Ensures inputs for MAE and MSE computation are valid.
|
97
|
+
"""
|
98
|
+
y_true, y_pred = self.validate_regression_targets(y_true, y_pred)
|
99
|
+
if library not in {None, 'sklearn', 'torch', 'tensorflow', 'Moral88'}:
|
100
|
+
raise ValueError(f"Invalid library: {library}. Choose from {{'Moral88', 'sklearn', 'torch', 'tensorflow'}}.")
|
101
|
+
return y_true, y_pred
|
102
|
+
|
234
103
|
|
235
104
|
class Metrics:
|
105
|
+
def mean_bias_deviation(self, y_true, y_pred, library=None, flatten=True):
|
106
|
+
"""
|
107
|
+
Computes Mean Bias Deviation (MBD).
|
108
|
+
"""
|
109
|
+
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
110
|
+
|
111
|
+
if flatten and y_true.ndim > 1:
|
112
|
+
y_true = y_true.flatten()
|
113
|
+
y_pred = y_pred.flatten()
|
114
|
+
|
115
|
+
if library == 'sklearn':
|
116
|
+
# Sklearn does not have a direct implementation for MBD
|
117
|
+
raise NotImplementedError("Mean Bias Deviation is not implemented in sklearn.")
|
118
|
+
|
119
|
+
if library == 'torch':
|
120
|
+
import torch
|
121
|
+
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
122
|
+
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
123
|
+
bias = torch.mean(y_pred_tensor - y_true_tensor).item()
|
124
|
+
return bias
|
125
|
+
|
126
|
+
if library == 'tensorflow':
|
127
|
+
import tensorflow as tf
|
128
|
+
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
129
|
+
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
130
|
+
bias = tf.reduce_mean(y_pred_tensor - y_true_tensor).numpy()
|
131
|
+
return bias
|
132
|
+
|
133
|
+
# Default implementation
|
134
|
+
return np.mean(y_pred - y_true)
|
236
135
|
def __init__(self):
|
237
136
|
self.validator = DataValidator()
|
238
137
|
|
239
|
-
def r2_score(self, y_true, y_pred, sample_weight=None):
|
138
|
+
def r2_score(self, y_true, y_pred, sample_weight=None, library=None, flatten=True):
|
240
139
|
"""
|
241
140
|
Computes R2 score.
|
242
141
|
"""
|
243
142
|
y_true, y_pred, sample_weight = self.validator.validate_r2_score_inputs(y_true, y_pred, sample_weight)
|
244
143
|
|
144
|
+
if flatten and y_true.ndim > 1:
|
145
|
+
y_true = y_true.flatten()
|
146
|
+
y_pred = y_pred.flatten()
|
147
|
+
|
148
|
+
if library == 'sklearn':
|
149
|
+
from sklearn.metrics import r2_score as sklearn_r2
|
150
|
+
return sklearn_r2(y_true, y_pred, sample_weight=sample_weight)
|
151
|
+
|
152
|
+
if library == 'statsmodels':
|
153
|
+
import statsmodels.api as sm
|
154
|
+
model = sm.OLS(y_true, sm.add_constant(y_pred)).fit()
|
155
|
+
return model.rsquared
|
156
|
+
|
245
157
|
numerator = np.sum((y_true - y_pred) ** 2)
|
246
158
|
denominator = np.sum((y_true - np.mean(y_true)) ** 2)
|
247
159
|
|
@@ -249,6 +161,237 @@ class Metrics:
|
|
249
161
|
return 0.0
|
250
162
|
return 1 - (numerator / denominator)
|
251
163
|
|
164
|
+
def mean_absolute_error(self, y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
|
165
|
+
"""
|
166
|
+
Computes Mean Absolute Error (MAE).
|
167
|
+
"""
|
168
|
+
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
169
|
+
|
170
|
+
if flatten:
|
171
|
+
y_true = y_true.ravel()
|
172
|
+
y_pred = y_pred.ravel()
|
173
|
+
|
174
|
+
if library == 'Moral88':
|
175
|
+
if threshold is not None:
|
176
|
+
y_pred = np.clip(y_pred, threshold[0], threshold[1])
|
177
|
+
|
178
|
+
if y_true.ndim > 1 and flatten:
|
179
|
+
y_true = y_true.flatten()
|
180
|
+
y_pred = y_pred.flatten()
|
181
|
+
absolute_errors = np.abs(y_true - y_pred)
|
182
|
+
|
183
|
+
if method == 'mean':
|
184
|
+
result = np.mean(absolute_errors)
|
185
|
+
elif method == 'sum':
|
186
|
+
result = np.sum(absolute_errors)
|
187
|
+
elif method == 'none':
|
188
|
+
result = absolute_errors
|
189
|
+
else:
|
190
|
+
raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
|
191
|
+
|
192
|
+
# if normalize and method != 'none':
|
193
|
+
# range_y = np.ptp(y_true)
|
194
|
+
# result = result / max(abs(range_y), 1)
|
195
|
+
|
196
|
+
return result
|
197
|
+
|
198
|
+
elif library == 'sklearn':
|
199
|
+
from sklearn.metrics import mean_absolute_error as sklearn_mae
|
200
|
+
return sklearn_mae(y_true, y_pred)
|
201
|
+
|
202
|
+
elif library == 'torch':
|
203
|
+
import torch
|
204
|
+
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
205
|
+
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
206
|
+
return torch.mean(torch.abs(y_true_tensor - y_pred_tensor)).item()
|
207
|
+
|
208
|
+
elif library == 'tensorflow':
|
209
|
+
import tensorflow as tf
|
210
|
+
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
211
|
+
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
212
|
+
return tf.reduce_mean(tf.abs(y_true_tensor - y_pred_tensor)).numpy()
|
213
|
+
|
214
|
+
def mean_squared_error(self, y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
|
215
|
+
"""
|
216
|
+
Computes Mean Squared Error (MSE).
|
217
|
+
"""
|
218
|
+
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
219
|
+
|
220
|
+
if flatten:
|
221
|
+
y_true = y_true.ravel()
|
222
|
+
y_pred = y_pred.ravel()
|
223
|
+
|
224
|
+
if library == 'Moral88':
|
225
|
+
if threshold is not None:
|
226
|
+
y_pred = np.clip(y_pred, threshold[0], threshold[1])
|
227
|
+
|
228
|
+
if y_true.ndim > 1 and flatten:
|
229
|
+
y_true = y_true.flatten()
|
230
|
+
y_pred = y_pred.flatten()
|
231
|
+
squared_errors = (y_true - y_pred) ** 2
|
232
|
+
|
233
|
+
if method == 'mean':
|
234
|
+
result = np.mean(squared_errors)
|
235
|
+
elif method == 'sum':
|
236
|
+
result = np.sum(squared_errors)
|
237
|
+
elif method == 'none':
|
238
|
+
result = squared_errors
|
239
|
+
else:
|
240
|
+
raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
|
241
|
+
|
242
|
+
# if normalize and method != 'none':
|
243
|
+
# range_y = np.ptp(y_true)
|
244
|
+
# result = result / max(abs(range_y), 1)
|
245
|
+
|
246
|
+
return result
|
247
|
+
|
248
|
+
elif library == 'sklearn':
|
249
|
+
from sklearn.metrics import mean_squared_error as sklearn_mse
|
250
|
+
return sklearn_mse(y_true, y_pred)
|
251
|
+
|
252
|
+
elif library == 'torch':
|
253
|
+
import torch
|
254
|
+
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
255
|
+
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
256
|
+
return torch.mean((y_true_tensor - y_pred_tensor) ** 2).item()
|
257
|
+
|
258
|
+
elif library == 'tensorflow':
|
259
|
+
import tensorflow as tf
|
260
|
+
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
261
|
+
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
262
|
+
return tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor)).numpy()
|
263
|
+
|
264
|
+
def root_mean_squared_error(self, y_true, y_pred, library=None):
|
265
|
+
"""
|
266
|
+
Computes Root Mean Squared Error (RMSE).
|
267
|
+
"""
|
268
|
+
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
269
|
+
|
270
|
+
if library == 'sklearn':
|
271
|
+
from sklearn.metrics import mean_squared_error as sklearn_mse
|
272
|
+
return np.sqrt(sklearn_mse(y_true, y_pred))
|
273
|
+
|
274
|
+
if library == 'torch':
|
275
|
+
import torch
|
276
|
+
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
277
|
+
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
278
|
+
return torch.sqrt(torch.mean((y_true_tensor - y_pred_tensor) ** 2)).item()
|
279
|
+
|
280
|
+
if library == 'tensorflow':
|
281
|
+
import tensorflow as tf
|
282
|
+
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
283
|
+
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
284
|
+
return tf.sqrt(tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor))).numpy()
|
285
|
+
|
286
|
+
mse = self.mean_squared_error(y_true, y_pred)
|
287
|
+
return np.sqrt(mse)
|
288
|
+
|
289
|
+
def mean_absolute_percentage_error(self, y_true, y_pred, library=None):
|
290
|
+
"""
|
291
|
+
Computes Mean Absolute Percentage Error (MAPE).
|
292
|
+
"""
|
293
|
+
y_true, y_pred = self.validator.validate_regression_targets(y_true, y_pred)
|
294
|
+
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
295
|
+
|
296
|
+
if library == 'sklearn':
|
297
|
+
from sklearn.metrics import mean_absolute_percentage_error as sklearn_mape
|
298
|
+
return sklearn_mape(y_true, y_pred) * 100
|
299
|
+
|
300
|
+
if library == 'torch':
|
301
|
+
import torch
|
302
|
+
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
303
|
+
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
304
|
+
return torch.mean(torch.abs((y_true_tensor - y_pred_tensor) / torch.clamp(y_true_tensor, min=1e-8))).item() * 100
|
305
|
+
|
306
|
+
if library == 'tensorflow':
|
307
|
+
import tensorflow as tf
|
308
|
+
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
309
|
+
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
310
|
+
return tf.reduce_mean(tf.abs((y_true_tensor - y_pred_tensor) / tf.clip_by_value(y_true_tensor, 1e-8, tf.float32.max))).numpy() * 100
|
311
|
+
|
312
|
+
return np.mean(np.abs((y_true - y_pred) / np.clip(y_true, 1e-8, None))) * 100
|
313
|
+
|
314
|
+
def explained_variance_score(self, y_true, y_pred, library=None, flatten=True):
|
315
|
+
"""
|
316
|
+
Computes Explained Variance Score.
|
317
|
+
"""
|
318
|
+
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
319
|
+
|
320
|
+
if library == 'sklearn':
|
321
|
+
from sklearn.metrics import explained_variance_score as sklearn_evs
|
322
|
+
return sklearn_evs(y_true, y_pred)
|
323
|
+
|
324
|
+
if library == 'torch':
|
325
|
+
import torch
|
326
|
+
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
327
|
+
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
328
|
+
variance_residual = torch.var(y_true_tensor - y_pred_tensor)
|
329
|
+
variance_y = torch.var(y_true_tensor)
|
330
|
+
return 1 - variance_residual / variance_y if variance_y != 0 else 0
|
331
|
+
|
332
|
+
if library == 'tensorflow':
|
333
|
+
import tensorflow as tf
|
334
|
+
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
335
|
+
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
336
|
+
variance_residual = tf.math.reduce_variance(y_true_tensor - y_pred_tensor)
|
337
|
+
variance_y = tf.math.reduce_variance(y_true_tensor)
|
338
|
+
return 1 - variance_residual / variance_y if variance_y != 0 else 0
|
339
|
+
|
340
|
+
numerator = np.var(y_true - y_pred)
|
341
|
+
denominator = np.var(y_true)
|
342
|
+
return 1 - numerator / denominator if denominator != 0 else 0
|
343
|
+
|
344
|
+
def adjusted_r2_score(self, y_true, y_pred, n_features, library=None, flatten=True):
|
345
|
+
"""
|
346
|
+
Computes Adjusted R-Squared Score.
|
347
|
+
|
348
|
+
Parameters:
|
349
|
+
y_true: array-like of shape (n_samples,)
|
350
|
+
Ground truth (correct) target values.
|
351
|
+
|
352
|
+
y_pred: array-like of shape (n_samples,)
|
353
|
+
Estimated target values.
|
354
|
+
|
355
|
+
n_features: int
|
356
|
+
Number of independent features in the model.
|
357
|
+
|
358
|
+
library: str, optional (default=None)
|
359
|
+
Library to use for computation. Supports {'sklearn', 'statsmodels', None}.
|
360
|
+
|
361
|
+
flatten: bool, optional (default=True)
|
362
|
+
If True, flattens multidimensional arrays before computation.
|
363
|
+
"""
|
364
|
+
# Validate inputs
|
365
|
+
y_true, y_pred, _ = self.validator.validate_r2_score_inputs(y_true, y_pred)
|
366
|
+
|
367
|
+
# Ensure inputs are 1D arrays
|
368
|
+
if y_true.ndim == 0 or y_pred.ndim == 0:
|
369
|
+
y_true = np.array([y_true])
|
370
|
+
y_pred = np.array([y_pred])
|
371
|
+
|
372
|
+
if flatten and y_true.ndim > 1:
|
373
|
+
y_true = y_true.flatten()
|
374
|
+
y_pred = y_pred.flatten()
|
375
|
+
|
376
|
+
if library == 'sklearn':
|
377
|
+
from sklearn.metrics import r2_score
|
378
|
+
r2 = r2_score(y_true, y_pred)
|
379
|
+
elif library == 'statsmodels':
|
380
|
+
import statsmodels.api as sm
|
381
|
+
X = sm.add_constant(y_pred)
|
382
|
+
model = sm.OLS(y_true, X).fit()
|
383
|
+
r2 = model.rsquared
|
384
|
+
else:
|
385
|
+
numerator = np.sum((y_true - y_pred) ** 2)
|
386
|
+
denominator = np.sum((y_true - np.mean(y_true)) ** 2)
|
387
|
+
r2 = 1 - (numerator / denominator) if denominator != 0 else 0.0
|
388
|
+
|
389
|
+
n_samples = len(y_true)
|
390
|
+
if n_samples <= n_features + 1:
|
391
|
+
raise ValueError("Number of samples must be greater than number of features plus one for adjusted R-squared computation.")
|
392
|
+
|
393
|
+
adjusted_r2 = 1 - (1 - r2) * (n_samples - 1) / (n_samples - n_features - 1)
|
394
|
+
return adjusted_r2
|
252
395
|
|
253
396
|
if __name__ == '__main__':
|
254
397
|
# Example usage
|
@@ -260,7 +403,16 @@ if __name__ == '__main__':
|
|
260
403
|
print("1D array:", validator.is_1d_array(arr))
|
261
404
|
print("Samples:", validator.check_samples(arr))
|
262
405
|
|
263
|
-
# Test R2
|
406
|
+
# Test MAE, MSE, R2, MBD, EV, MAPE, RMSE
|
264
407
|
y_true = [3, -0.5, 2, 7]
|
265
408
|
y_pred = [2.5, 0.0, 2, 8]
|
409
|
+
|
410
|
+
print("Mean Absolute Error:", metrics.mean_absolute_error(y_true, y_pred))
|
411
|
+
print("Mean Squared Error:", metrics.mean_squared_error(y_true, y_pred))
|
266
412
|
print("R2 Score:", metrics.r2_score(y_true, y_pred))
|
413
|
+
print("Mean Bias Deviation: ", metrics.mean_bias_deviation(y_true, y_pred))
|
414
|
+
print("Explained Variance Score: ", metrics.explained_variance_score(y_true, y_pred))
|
415
|
+
print("Mean Absolute Percentage Error: ", metrics.mean_absolute_percentage_error(y_true, y_pred))
|
416
|
+
print("Root Mean Squared Error: ", metrics.root_mean_squared_error(y_true, y_pred))
|
417
|
+
print("adjusted_r2_score: ", metrics.adjusted_r2_score(y_true, y_pred, 2))
|
418
|
+
|
@@ -0,0 +1,8 @@
|
|
1
|
+
Moral88/__init__.py,sha256=vb-aPc9ZbnYNSy9qq2fVESI63E10pYsCrDpnV8OHWkg,74
|
2
|
+
Moral88/regression.py,sha256=0aSRXLWur6tcC4xd806koyB2ktgPJodlOeXYCZZYDzE,17208
|
3
|
+
Moral88/segmentation.py,sha256=v0yqxdrKbM9LM7wVKLjJ4HrhrSrilNNeWS6-oK_27Ag,1363
|
4
|
+
Moral88-0.6.0.dist-info/LICENSE,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
+
Moral88-0.6.0.dist-info/METADATA,sha256=6Y1H8Qh9wnrZVUr2gnoBYMnF5EsXY6ijMoS9bFZ21bE,407
|
6
|
+
Moral88-0.6.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
7
|
+
Moral88-0.6.0.dist-info/top_level.txt,sha256=-dyn5iTprnSUHbtMpvRO-prJsIoaRxao7wlfCHLSsv4,8
|
8
|
+
Moral88-0.6.0.dist-info/RECORD,,
|
Moral88-0.4.0.dist-info/RECORD
DELETED
@@ -1,8 +0,0 @@
|
|
1
|
-
Moral88/__init__.py,sha256=vb-aPc9ZbnYNSy9qq2fVESI63E10pYsCrDpnV8OHWkg,74
|
2
|
-
Moral88/regression.py,sha256=kfWQcdtdZVlHW_iIRbS9_rNrKJOYNiT9RaeYpVIvl7I,9355
|
3
|
-
Moral88/segmentation.py,sha256=v0yqxdrKbM9LM7wVKLjJ4HrhrSrilNNeWS6-oK_27Ag,1363
|
4
|
-
Moral88-0.4.0.dist-info/LICENSE,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
-
Moral88-0.4.0.dist-info/METADATA,sha256=TITXx4rc91SXkAwwDXu2mW-FbV1xQukBXEFiVfgRXMY,407
|
6
|
-
Moral88-0.4.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
7
|
-
Moral88-0.4.0.dist-info/top_level.txt,sha256=-dyn5iTprnSUHbtMpvRO-prJsIoaRxao7wlfCHLSsv4,8
|
8
|
-
Moral88-0.4.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|