Moral88 0.5.0__py3-none-any.whl → 0.7.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- Moral88/__init__.py +0 -1
- Moral88/regression.py +87 -14
- {Moral88-0.5.0.dist-info → Moral88-0.7.0.dist-info}/METADATA +1 -1
- Moral88-0.7.0.dist-info/RECORD +8 -0
- Moral88-0.5.0.dist-info/RECORD +0 -8
- {Moral88-0.5.0.dist-info → Moral88-0.7.0.dist-info}/LICENSE +0 -0
- {Moral88-0.5.0.dist-info → Moral88-0.7.0.dist-info}/WHEEL +0 -0
- {Moral88-0.5.0.dist-info → Moral88-0.7.0.dist-info}/top_level.txt +0 -0
Moral88/__init__.py
CHANGED
@@ -1 +0,0 @@
|
|
1
|
-
from .regression import mean_absolute_error, mean_squared_error, r_squared
|
Moral88/regression.py
CHANGED
@@ -31,6 +31,7 @@ class DataValidator:
|
|
31
31
|
"""
|
32
32
|
Returns the number of samples in the array.
|
33
33
|
"""
|
34
|
+
array = np.asarray(array)
|
34
35
|
if hasattr(array, 'shape') and len(array.shape) > 0:
|
35
36
|
return array.shape[0]
|
36
37
|
else:
|
@@ -100,13 +101,17 @@ class DataValidator:
|
|
100
101
|
return y_true, y_pred
|
101
102
|
|
102
103
|
|
103
|
-
class
|
104
|
-
def mean_bias_deviation(self, y_true, y_pred, library=None):
|
104
|
+
class metrics:
|
105
|
+
def mean_bias_deviation(self, y_true, y_pred, library=None, flatten=True):
|
105
106
|
"""
|
106
107
|
Computes Mean Bias Deviation (MBD).
|
107
108
|
"""
|
108
109
|
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
109
110
|
|
111
|
+
if flatten and y_true.ndim > 1:
|
112
|
+
y_true = y_true.flatten()
|
113
|
+
y_pred = y_pred.flatten()
|
114
|
+
|
110
115
|
if library == 'sklearn':
|
111
116
|
# Sklearn does not have a direct implementation for MBD
|
112
117
|
raise NotImplementedError("Mean Bias Deviation is not implemented in sklearn.")
|
@@ -130,12 +135,16 @@ class Metrics:
|
|
130
135
|
def __init__(self):
|
131
136
|
self.validator = DataValidator()
|
132
137
|
|
133
|
-
def r2_score(self, y_true, y_pred, sample_weight=None, library=None):
|
138
|
+
def r2_score(self, y_true, y_pred, sample_weight=None, library=None, flatten=True):
|
134
139
|
"""
|
135
140
|
Computes R2 score.
|
136
141
|
"""
|
137
142
|
y_true, y_pred, sample_weight = self.validator.validate_r2_score_inputs(y_true, y_pred, sample_weight)
|
138
143
|
|
144
|
+
if flatten and y_true.ndim > 1:
|
145
|
+
y_true = y_true.flatten()
|
146
|
+
y_pred = y_pred.flatten()
|
147
|
+
|
139
148
|
if library == 'sklearn':
|
140
149
|
from sklearn.metrics import r2_score as sklearn_r2
|
141
150
|
return sklearn_r2(y_true, y_pred, sample_weight=sample_weight)
|
@@ -166,6 +175,9 @@ class Metrics:
|
|
166
175
|
if threshold is not None:
|
167
176
|
y_pred = np.clip(y_pred, threshold[0], threshold[1])
|
168
177
|
|
178
|
+
if y_true.ndim > 1 and flatten:
|
179
|
+
y_true = y_true.flatten()
|
180
|
+
y_pred = y_pred.flatten()
|
169
181
|
absolute_errors = np.abs(y_true - y_pred)
|
170
182
|
|
171
183
|
if method == 'mean':
|
@@ -177,9 +189,9 @@ class Metrics:
|
|
177
189
|
else:
|
178
190
|
raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
|
179
191
|
|
180
|
-
if normalize and method != 'none':
|
181
|
-
|
182
|
-
|
192
|
+
# if normalize and method != 'none':
|
193
|
+
# range_y = np.ptp(y_true)
|
194
|
+
# result = result / max(abs(range_y), 1)
|
183
195
|
|
184
196
|
return result
|
185
197
|
|
@@ -213,6 +225,9 @@ class Metrics:
|
|
213
225
|
if threshold is not None:
|
214
226
|
y_pred = np.clip(y_pred, threshold[0], threshold[1])
|
215
227
|
|
228
|
+
if y_true.ndim > 1 and flatten:
|
229
|
+
y_true = y_true.flatten()
|
230
|
+
y_pred = y_pred.flatten()
|
216
231
|
squared_errors = (y_true - y_pred) ** 2
|
217
232
|
|
218
233
|
if method == 'mean':
|
@@ -224,9 +239,9 @@ class Metrics:
|
|
224
239
|
else:
|
225
240
|
raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
|
226
241
|
|
227
|
-
if normalize and method != 'none':
|
228
|
-
|
229
|
-
|
242
|
+
# if normalize and method != 'none':
|
243
|
+
# range_y = np.ptp(y_true)
|
244
|
+
# result = result / max(abs(range_y), 1)
|
230
245
|
|
231
246
|
return result
|
232
247
|
|
@@ -277,6 +292,7 @@ class Metrics:
|
|
277
292
|
"""
|
278
293
|
y_true, y_pred = self.validator.validate_regression_targets(y_true, y_pred)
|
279
294
|
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
295
|
+
y_true = np.clip(y_true, 1e-8, None)
|
280
296
|
|
281
297
|
if library == 'sklearn':
|
282
298
|
from sklearn.metrics import mean_absolute_percentage_error as sklearn_mape
|
@@ -294,9 +310,9 @@ class Metrics:
|
|
294
310
|
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
295
311
|
return tf.reduce_mean(tf.abs((y_true_tensor - y_pred_tensor) / tf.clip_by_value(y_true_tensor, 1e-8, tf.float32.max))).numpy() * 100
|
296
312
|
|
297
|
-
return np.mean(np.abs((y_true - y_pred) / np.clip(y_true, 1e-8, None))) * 100
|
313
|
+
return np.mean(np.abs((y_true - y_pred) / np.clip(np.abs(y_true), 1e-8, None))) * 100
|
298
314
|
|
299
|
-
def explained_variance_score(self, y_true, y_pred, library=None):
|
315
|
+
def explained_variance_score(self, y_true, y_pred, library=None, flatten=True):
|
300
316
|
"""
|
301
317
|
Computes Explained Variance Score.
|
302
318
|
"""
|
@@ -326,6 +342,58 @@ class Metrics:
|
|
326
342
|
denominator = np.var(y_true)
|
327
343
|
return 1 - numerator / denominator if denominator != 0 else 0
|
328
344
|
|
345
|
+
def adjusted_r2_score(self, y_true, y_pred, n_features, library=None, flatten=True):
|
346
|
+
"""
|
347
|
+
Computes Adjusted R-Squared Score.
|
348
|
+
|
349
|
+
Parameters:
|
350
|
+
y_true: array-like of shape (n_samples,)
|
351
|
+
Ground truth (correct) target values.
|
352
|
+
|
353
|
+
y_pred: array-like of shape (n_samples,)
|
354
|
+
Estimated target values.
|
355
|
+
|
356
|
+
n_features: int
|
357
|
+
Number of independent features in the model.
|
358
|
+
|
359
|
+
library: str, optional (default=None)
|
360
|
+
Library to use for computation. Supports {'sklearn', 'statsmodels', None}.
|
361
|
+
|
362
|
+
flatten: bool, optional (default=True)
|
363
|
+
If True, flattens multidimensional arrays before computation.
|
364
|
+
"""
|
365
|
+
# Validate inputs
|
366
|
+
y_true, y_pred, _ = self.validator.validate_r2_score_inputs(y_true, y_pred)
|
367
|
+
|
368
|
+
# Ensure inputs are 1D arrays
|
369
|
+
if y_true.ndim == 0 or y_pred.ndim == 0:
|
370
|
+
y_true = np.array([y_true])
|
371
|
+
y_pred = np.array([y_pred])
|
372
|
+
|
373
|
+
if flatten and y_true.ndim > 1:
|
374
|
+
y_true = y_true.flatten()
|
375
|
+
y_pred = y_pred.flatten()
|
376
|
+
|
377
|
+
if library == 'sklearn':
|
378
|
+
from sklearn.metrics import r2_score
|
379
|
+
r2 = r2_score(y_true, y_pred)
|
380
|
+
elif library == 'statsmodels':
|
381
|
+
import statsmodels.api as sm
|
382
|
+
X = sm.add_constant(y_pred)
|
383
|
+
model = sm.OLS(y_true, X).fit()
|
384
|
+
r2 = model.rsquared
|
385
|
+
else:
|
386
|
+
numerator = np.sum((y_true - y_pred) ** 2)
|
387
|
+
denominator = np.sum((y_true - np.mean(y_true)) ** 2)
|
388
|
+
r2 = 1 - (numerator / denominator) if denominator != 0 else 0.0
|
389
|
+
|
390
|
+
n_samples = len(y_true)
|
391
|
+
if n_samples <= n_features + 1:
|
392
|
+
raise ValueError("Number of samples must be greater than number of features plus one for adjusted R-squared computation.")
|
393
|
+
|
394
|
+
adjusted_r2 = 1 - (1 - r2) * (n_samples - 1) / (n_samples - n_features - 1)
|
395
|
+
return adjusted_r2
|
396
|
+
|
329
397
|
if __name__ == '__main__':
|
330
398
|
# Example usage
|
331
399
|
validator = DataValidator()
|
@@ -336,11 +404,16 @@ if __name__ == '__main__':
|
|
336
404
|
print("1D array:", validator.is_1d_array(arr))
|
337
405
|
print("Samples:", validator.check_samples(arr))
|
338
406
|
|
339
|
-
# Test R2
|
407
|
+
# Test MAE, MSE, R2, MBD, EV, MAPE, RMSE
|
340
408
|
y_true = [3, -0.5, 2, 7]
|
341
409
|
y_pred = [2.5, 0.0, 2, 8]
|
342
|
-
print("R2 Score:", metrics.r2_score(y_true, y_pred))
|
343
410
|
|
344
|
-
# Test MAE and MSE
|
345
411
|
print("Mean Absolute Error:", metrics.mean_absolute_error(y_true, y_pred))
|
346
412
|
print("Mean Squared Error:", metrics.mean_squared_error(y_true, y_pred))
|
413
|
+
print("R2 Score:", metrics.r2_score(y_true, y_pred))
|
414
|
+
print("Mean Bias Deviation: ", metrics.mean_bias_deviation(y_true, y_pred))
|
415
|
+
print("Explained Variance Score: ", metrics.explained_variance_score(y_true, y_pred))
|
416
|
+
print("Mean Absolute Percentage Error: ", metrics.mean_absolute_percentage_error(y_true, y_pred))
|
417
|
+
print("Root Mean Squared Error: ", metrics.root_mean_squared_error(y_true, y_pred))
|
418
|
+
print("adjusted_r2_score: ", metrics.adjusted_r2_score(y_true, y_pred, 2))
|
419
|
+
|
@@ -0,0 +1,8 @@
|
|
1
|
+
Moral88/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
+
Moral88/regression.py,sha256=yzYuAdGv9IjLpXQ6yhlpC26ZIKcK5Cn3zDY_2N7lRW8,17263
|
3
|
+
Moral88/segmentation.py,sha256=v0yqxdrKbM9LM7wVKLjJ4HrhrSrilNNeWS6-oK_27Ag,1363
|
4
|
+
Moral88-0.7.0.dist-info/LICENSE,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
+
Moral88-0.7.0.dist-info/METADATA,sha256=WxT0FQzxE2BEhykJCIKSdTkJlwCbb2_-89z3s3BHjZw,407
|
6
|
+
Moral88-0.7.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
7
|
+
Moral88-0.7.0.dist-info/top_level.txt,sha256=-dyn5iTprnSUHbtMpvRO-prJsIoaRxao7wlfCHLSsv4,8
|
8
|
+
Moral88-0.7.0.dist-info/RECORD,,
|
Moral88-0.5.0.dist-info/RECORD
DELETED
@@ -1,8 +0,0 @@
|
|
1
|
-
Moral88/__init__.py,sha256=vb-aPc9ZbnYNSy9qq2fVESI63E10pYsCrDpnV8OHWkg,74
|
2
|
-
Moral88/regression.py,sha256=MjM3R1oqRWdlfo6Goc2NOT0UHeKGcQfdMyriqSvS5q4,14127
|
3
|
-
Moral88/segmentation.py,sha256=v0yqxdrKbM9LM7wVKLjJ4HrhrSrilNNeWS6-oK_27Ag,1363
|
4
|
-
Moral88-0.5.0.dist-info/LICENSE,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
-
Moral88-0.5.0.dist-info/METADATA,sha256=7_93ZrGO0rFBargNBBe7qvQzQnFi2BFN7RGss26ux3I,407
|
6
|
-
Moral88-0.5.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
7
|
-
Moral88-0.5.0.dist-info/top_level.txt,sha256=-dyn5iTprnSUHbtMpvRO-prJsIoaRxao7wlfCHLSsv4,8
|
8
|
-
Moral88-0.5.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|