Moral88 0.8.0__py3-none-any.whl → 0.9.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- Moral88/regression.py +275 -277
- {Moral88-0.8.0.dist-info → Moral88-0.9.0.dist-info}/METADATA +1 -1
- Moral88-0.9.0.dist-info/RECORD +8 -0
- Moral88-0.8.0.dist-info/RECORD +0 -8
- {Moral88-0.8.0.dist-info → Moral88-0.9.0.dist-info}/LICENSE +0 -0
- {Moral88-0.8.0.dist-info → Moral88-0.9.0.dist-info}/WHEEL +0 -0
- {Moral88-0.8.0.dist-info → Moral88-0.9.0.dist-info}/top_level.txt +0 -0
Moral88/regression.py
CHANGED
@@ -101,300 +101,298 @@ class DataValidator:
|
|
101
101
|
return y_true, y_pred
|
102
102
|
|
103
103
|
|
104
|
-
class metrics:
|
105
|
-
def __init__(self):
|
106
|
-
pass
|
107
|
-
def mean_bias_deviation(self, y_true, y_pred, library=None, flatten=True):
|
108
|
-
"""
|
109
|
-
Computes Mean Bias Deviation (MBD).
|
110
|
-
"""
|
111
|
-
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
112
104
|
|
113
|
-
|
105
|
+
def mean_bias_deviation(self, y_true, y_pred, library=None, flatten=True):
|
106
|
+
"""
|
107
|
+
Computes Mean Bias Deviation (MBD).
|
108
|
+
"""
|
109
|
+
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
110
|
+
|
111
|
+
if flatten and y_true.ndim > 1:
|
112
|
+
y_true = y_true.flatten()
|
113
|
+
y_pred = y_pred.flatten()
|
114
|
+
|
115
|
+
if library == 'sklearn':
|
116
|
+
# Sklearn does not have a direct implementation for MBD
|
117
|
+
raise NotImplementedError("Mean Bias Deviation is not implemented in sklearn.")
|
118
|
+
|
119
|
+
if library == 'torch':
|
120
|
+
import torch
|
121
|
+
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
122
|
+
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
123
|
+
bias = torch.mean(y_pred_tensor - y_true_tensor).item()
|
124
|
+
return bias
|
125
|
+
|
126
|
+
if library == 'tensorflow':
|
127
|
+
import tensorflow as tf
|
128
|
+
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
129
|
+
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
130
|
+
bias = tf.reduce_mean(y_pred_tensor - y_true_tensor).numpy()
|
131
|
+
return bias
|
132
|
+
|
133
|
+
# Default implementation
|
134
|
+
return np.mean(y_pred - y_true)
|
135
|
+
def __init__(self):
|
136
|
+
self.validator = DataValidator()
|
137
|
+
|
138
|
+
def r2_score(self, y_true, y_pred, sample_weight=None, library=None, flatten=True):
|
139
|
+
"""
|
140
|
+
Computes R2 score.
|
141
|
+
"""
|
142
|
+
y_true, y_pred, sample_weight = self.validator.validate_r2_score_inputs(y_true, y_pred, sample_weight)
|
143
|
+
|
144
|
+
if flatten and y_true.ndim > 1:
|
145
|
+
y_true = y_true.flatten()
|
146
|
+
y_pred = y_pred.flatten()
|
147
|
+
|
148
|
+
if library == 'sklearn':
|
149
|
+
from sklearn.metrics import r2_score as sklearn_r2
|
150
|
+
return sklearn_r2(y_true, y_pred, sample_weight=sample_weight)
|
151
|
+
|
152
|
+
if library == 'statsmodels':
|
153
|
+
import statsmodels.api as sm
|
154
|
+
model = sm.OLS(y_true, sm.add_constant(y_pred)).fit()
|
155
|
+
return model.rsquared
|
156
|
+
|
157
|
+
numerator = np.sum((y_true - y_pred) ** 2)
|
158
|
+
denominator = np.sum((y_true - np.mean(y_true)) ** 2)
|
159
|
+
|
160
|
+
if denominator == 0:
|
161
|
+
return 0.0
|
162
|
+
return 1 - (numerator / denominator)
|
163
|
+
|
164
|
+
def mean_absolute_error(self, y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
|
165
|
+
"""
|
166
|
+
Computes Mean Absolute Error (MAE).
|
167
|
+
"""
|
168
|
+
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
169
|
+
|
170
|
+
if flatten:
|
171
|
+
y_true = y_true.ravel()
|
172
|
+
y_pred = y_pred.ravel()
|
173
|
+
|
174
|
+
if library == 'Moral88':
|
175
|
+
if threshold is not None:
|
176
|
+
y_pred = np.clip(y_pred, threshold[0], threshold[1])
|
177
|
+
|
178
|
+
if y_true.ndim > 1 and flatten:
|
114
179
|
y_true = y_true.flatten()
|
115
180
|
y_pred = y_pred.flatten()
|
181
|
+
absolute_errors = np.abs(y_true - y_pred)
|
182
|
+
|
183
|
+
if method == 'mean':
|
184
|
+
result = np.mean(absolute_errors)
|
185
|
+
elif method == 'sum':
|
186
|
+
result = np.sum(absolute_errors)
|
187
|
+
elif method == 'none':
|
188
|
+
result = absolute_errors
|
189
|
+
else:
|
190
|
+
raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
|
116
191
|
|
117
|
-
if
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
if library == 'torch':
|
122
|
-
import torch
|
123
|
-
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
124
|
-
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
125
|
-
bias = torch.mean(y_pred_tensor - y_true_tensor).item()
|
126
|
-
return bias
|
127
|
-
|
128
|
-
if library == 'tensorflow':
|
129
|
-
import tensorflow as tf
|
130
|
-
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
131
|
-
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
132
|
-
bias = tf.reduce_mean(y_pred_tensor - y_true_tensor).numpy()
|
133
|
-
return bias
|
134
|
-
|
135
|
-
# Default implementation
|
136
|
-
return np.mean(y_pred - y_true)
|
137
|
-
def __init__(self):
|
138
|
-
self.validator = DataValidator()
|
139
|
-
|
140
|
-
def r2_score(self, y_true, y_pred, sample_weight=None, library=None, flatten=True):
|
141
|
-
"""
|
142
|
-
Computes R2 score.
|
143
|
-
"""
|
144
|
-
y_true, y_pred, sample_weight = self.validator.validate_r2_score_inputs(y_true, y_pred, sample_weight)
|
145
|
-
|
146
|
-
if flatten and y_true.ndim > 1:
|
147
|
-
y_true = y_true.flatten()
|
148
|
-
y_pred = y_pred.flatten()
|
149
|
-
|
150
|
-
if library == 'sklearn':
|
151
|
-
from sklearn.metrics import r2_score as sklearn_r2
|
152
|
-
return sklearn_r2(y_true, y_pred, sample_weight=sample_weight)
|
153
|
-
|
154
|
-
if library == 'statsmodels':
|
155
|
-
import statsmodels.api as sm
|
156
|
-
model = sm.OLS(y_true, sm.add_constant(y_pred)).fit()
|
157
|
-
return model.rsquared
|
158
|
-
|
159
|
-
numerator = np.sum((y_true - y_pred) ** 2)
|
160
|
-
denominator = np.sum((y_true - np.mean(y_true)) ** 2)
|
161
|
-
|
162
|
-
if denominator == 0:
|
163
|
-
return 0.0
|
164
|
-
return 1 - (numerator / denominator)
|
165
|
-
|
166
|
-
def mean_absolute_error(self, y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
|
167
|
-
"""
|
168
|
-
Computes Mean Absolute Error (MAE).
|
169
|
-
"""
|
170
|
-
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
171
|
-
|
172
|
-
if flatten:
|
173
|
-
y_true = y_true.ravel()
|
174
|
-
y_pred = y_pred.ravel()
|
175
|
-
|
176
|
-
if library == 'Moral88':
|
177
|
-
if threshold is not None:
|
178
|
-
y_pred = np.clip(y_pred, threshold[0], threshold[1])
|
179
|
-
|
180
|
-
if y_true.ndim > 1 and flatten:
|
181
|
-
y_true = y_true.flatten()
|
182
|
-
y_pred = y_pred.flatten()
|
183
|
-
absolute_errors = np.abs(y_true - y_pred)
|
184
|
-
|
185
|
-
if method == 'mean':
|
186
|
-
result = np.mean(absolute_errors)
|
187
|
-
elif method == 'sum':
|
188
|
-
result = np.sum(absolute_errors)
|
189
|
-
elif method == 'none':
|
190
|
-
result = absolute_errors
|
191
|
-
else:
|
192
|
-
raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
|
193
|
-
|
194
|
-
# if normalize and method != 'none':
|
195
|
-
# range_y = np.ptp(y_true)
|
196
|
-
# result = result / max(abs(range_y), 1)
|
197
|
-
|
198
|
-
return result
|
199
|
-
|
200
|
-
elif library == 'sklearn':
|
201
|
-
from sklearn.metrics import mean_absolute_error as sklearn_mae
|
202
|
-
return sklearn_mae(y_true, y_pred)
|
203
|
-
|
204
|
-
elif library == 'torch':
|
205
|
-
import torch
|
206
|
-
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
207
|
-
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
208
|
-
return torch.mean(torch.abs(y_true_tensor - y_pred_tensor)).item()
|
209
|
-
|
210
|
-
elif library == 'tensorflow':
|
211
|
-
import tensorflow as tf
|
212
|
-
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
213
|
-
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
214
|
-
return tf.reduce_mean(tf.abs(y_true_tensor - y_pred_tensor)).numpy()
|
215
|
-
|
216
|
-
def mean_squared_error(self, y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
|
217
|
-
"""
|
218
|
-
Computes Mean Squared Error (MSE).
|
219
|
-
"""
|
220
|
-
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
221
|
-
|
222
|
-
if flatten:
|
223
|
-
y_true = y_true.ravel()
|
224
|
-
y_pred = y_pred.ravel()
|
225
|
-
|
226
|
-
if library == 'Moral88':
|
227
|
-
if threshold is not None:
|
228
|
-
y_pred = np.clip(y_pred, threshold[0], threshold[1])
|
229
|
-
|
230
|
-
if y_true.ndim > 1 and flatten:
|
231
|
-
y_true = y_true.flatten()
|
232
|
-
y_pred = y_pred.flatten()
|
233
|
-
squared_errors = (y_true - y_pred) ** 2
|
234
|
-
|
235
|
-
if method == 'mean':
|
236
|
-
result = np.mean(squared_errors)
|
237
|
-
elif method == 'sum':
|
238
|
-
result = np.sum(squared_errors)
|
239
|
-
elif method == 'none':
|
240
|
-
result = squared_errors
|
241
|
-
else:
|
242
|
-
raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
|
243
|
-
|
244
|
-
# if normalize and method != 'none':
|
245
|
-
# range_y = np.ptp(y_true)
|
246
|
-
# result = result / max(abs(range_y), 1)
|
247
|
-
|
248
|
-
return result
|
249
|
-
|
250
|
-
elif library == 'sklearn':
|
251
|
-
from sklearn.metrics import mean_squared_error as sklearn_mse
|
252
|
-
return sklearn_mse(y_true, y_pred)
|
253
|
-
|
254
|
-
elif library == 'torch':
|
255
|
-
import torch
|
256
|
-
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
257
|
-
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
258
|
-
return torch.mean((y_true_tensor - y_pred_tensor) ** 2).item()
|
259
|
-
|
260
|
-
elif library == 'tensorflow':
|
261
|
-
import tensorflow as tf
|
262
|
-
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
263
|
-
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
264
|
-
return tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor)).numpy()
|
265
|
-
|
266
|
-
def root_mean_squared_error(self, y_true, y_pred, library=None):
|
267
|
-
"""
|
268
|
-
Computes Root Mean Squared Error (RMSE).
|
269
|
-
"""
|
270
|
-
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
271
|
-
|
272
|
-
if library == 'sklearn':
|
273
|
-
from sklearn.metrics import mean_squared_error as sklearn_mse
|
274
|
-
return np.sqrt(sklearn_mse(y_true, y_pred))
|
275
|
-
|
276
|
-
if library == 'torch':
|
277
|
-
import torch
|
278
|
-
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
279
|
-
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
280
|
-
return torch.sqrt(torch.mean((y_true_tensor - y_pred_tensor) ** 2)).item()
|
192
|
+
# if normalize and method != 'none':
|
193
|
+
# range_y = np.ptp(y_true)
|
194
|
+
# result = result / max(abs(range_y), 1)
|
281
195
|
|
282
|
-
|
283
|
-
import tensorflow as tf
|
284
|
-
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
285
|
-
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
286
|
-
return tf.sqrt(tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor))).numpy()
|
196
|
+
return result
|
287
197
|
|
288
|
-
|
289
|
-
|
198
|
+
elif library == 'sklearn':
|
199
|
+
from sklearn.metrics import mean_absolute_error as sklearn_mae
|
200
|
+
return sklearn_mae(y_true, y_pred)
|
290
201
|
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
297
|
-
y_true = np.clip(y_true, 1e-8, None)
|
202
|
+
elif library == 'torch':
|
203
|
+
import torch
|
204
|
+
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
205
|
+
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
206
|
+
return torch.mean(torch.abs(y_true_tensor - y_pred_tensor)).item()
|
298
207
|
|
299
|
-
|
300
|
-
|
301
|
-
|
208
|
+
elif library == 'tensorflow':
|
209
|
+
import tensorflow as tf
|
210
|
+
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
211
|
+
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
212
|
+
return tf.reduce_mean(tf.abs(y_true_tensor - y_pred_tensor)).numpy()
|
302
213
|
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
214
|
+
def mean_squared_error(self, y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
|
215
|
+
"""
|
216
|
+
Computes Mean Squared Error (MSE).
|
217
|
+
"""
|
218
|
+
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
308
219
|
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
313
|
-
return tf.reduce_mean(tf.abs((y_true_tensor - y_pred_tensor) / tf.clip_by_value(y_true_tensor, 1e-8, tf.float32.max))).numpy() * 100
|
220
|
+
if flatten:
|
221
|
+
y_true = y_true.ravel()
|
222
|
+
y_pred = y_pred.ravel()
|
314
223
|
|
315
|
-
|
224
|
+
if library == 'Moral88':
|
225
|
+
if threshold is not None:
|
226
|
+
y_pred = np.clip(y_pred, threshold[0], threshold[1])
|
316
227
|
|
317
|
-
|
318
|
-
"""
|
319
|
-
Computes Explained Variance Score.
|
320
|
-
"""
|
321
|
-
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
322
|
-
|
323
|
-
if library == 'sklearn':
|
324
|
-
from sklearn.metrics import explained_variance_score as sklearn_evs
|
325
|
-
return sklearn_evs(y_true, y_pred)
|
326
|
-
|
327
|
-
if library == 'torch':
|
328
|
-
import torch
|
329
|
-
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
330
|
-
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
331
|
-
variance_residual = torch.var(y_true_tensor - y_pred_tensor)
|
332
|
-
variance_y = torch.var(y_true_tensor)
|
333
|
-
return 1 - variance_residual / variance_y if variance_y != 0 else 0
|
334
|
-
|
335
|
-
if library == 'tensorflow':
|
336
|
-
import tensorflow as tf
|
337
|
-
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
338
|
-
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
339
|
-
variance_residual = tf.math.reduce_variance(y_true_tensor - y_pred_tensor)
|
340
|
-
variance_y = tf.math.reduce_variance(y_true_tensor)
|
341
|
-
return 1 - variance_residual / variance_y if variance_y != 0 else 0
|
342
|
-
|
343
|
-
numerator = np.var(y_true - y_pred)
|
344
|
-
denominator = np.var(y_true)
|
345
|
-
return 1 - numerator / denominator if denominator != 0 else 0
|
346
|
-
|
347
|
-
def adjusted_r2_score(self, y_true, y_pred, n_features, library=None, flatten=True):
|
348
|
-
"""
|
349
|
-
Computes Adjusted R-Squared Score.
|
350
|
-
|
351
|
-
Parameters:
|
352
|
-
y_true: array-like of shape (n_samples,)
|
353
|
-
Ground truth (correct) target values.
|
354
|
-
|
355
|
-
y_pred: array-like of shape (n_samples,)
|
356
|
-
Estimated target values.
|
357
|
-
|
358
|
-
n_features: int
|
359
|
-
Number of independent features in the model.
|
360
|
-
|
361
|
-
library: str, optional (default=None)
|
362
|
-
Library to use for computation. Supports {'sklearn', 'statsmodels', None}.
|
363
|
-
|
364
|
-
flatten: bool, optional (default=True)
|
365
|
-
If True, flattens multidimensional arrays before computation.
|
366
|
-
"""
|
367
|
-
# Validate inputs
|
368
|
-
y_true, y_pred, _ = self.validator.validate_r2_score_inputs(y_true, y_pred)
|
369
|
-
|
370
|
-
# Ensure inputs are 1D arrays
|
371
|
-
if y_true.ndim == 0 or y_pred.ndim == 0:
|
372
|
-
y_true = np.array([y_true])
|
373
|
-
y_pred = np.array([y_pred])
|
374
|
-
|
375
|
-
if flatten and y_true.ndim > 1:
|
228
|
+
if y_true.ndim > 1 and flatten:
|
376
229
|
y_true = y_true.flatten()
|
377
230
|
y_pred = y_pred.flatten()
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
elif
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
r2 = model.rsquared
|
231
|
+
squared_errors = (y_true - y_pred) ** 2
|
232
|
+
|
233
|
+
if method == 'mean':
|
234
|
+
result = np.mean(squared_errors)
|
235
|
+
elif method == 'sum':
|
236
|
+
result = np.sum(squared_errors)
|
237
|
+
elif method == 'none':
|
238
|
+
result = squared_errors
|
387
239
|
else:
|
388
|
-
|
389
|
-
|
390
|
-
|
240
|
+
raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
|
241
|
+
|
242
|
+
# if normalize and method != 'none':
|
243
|
+
# range_y = np.ptp(y_true)
|
244
|
+
# result = result / max(abs(range_y), 1)
|
245
|
+
|
246
|
+
return result
|
247
|
+
|
248
|
+
elif library == 'sklearn':
|
249
|
+
from sklearn.metrics import mean_squared_error as sklearn_mse
|
250
|
+
return sklearn_mse(y_true, y_pred)
|
251
|
+
|
252
|
+
elif library == 'torch':
|
253
|
+
import torch
|
254
|
+
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
255
|
+
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
256
|
+
return torch.mean((y_true_tensor - y_pred_tensor) ** 2).item()
|
257
|
+
|
258
|
+
elif library == 'tensorflow':
|
259
|
+
import tensorflow as tf
|
260
|
+
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
261
|
+
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
262
|
+
return tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor)).numpy()
|
263
|
+
|
264
|
+
def root_mean_squared_error(self, y_true, y_pred, library=None):
|
265
|
+
"""
|
266
|
+
Computes Root Mean Squared Error (RMSE).
|
267
|
+
"""
|
268
|
+
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
269
|
+
|
270
|
+
if library == 'sklearn':
|
271
|
+
from sklearn.metrics import mean_squared_error as sklearn_mse
|
272
|
+
return np.sqrt(sklearn_mse(y_true, y_pred))
|
273
|
+
|
274
|
+
if library == 'torch':
|
275
|
+
import torch
|
276
|
+
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
277
|
+
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
278
|
+
return torch.sqrt(torch.mean((y_true_tensor - y_pred_tensor) ** 2)).item()
|
279
|
+
|
280
|
+
if library == 'tensorflow':
|
281
|
+
import tensorflow as tf
|
282
|
+
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
283
|
+
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
284
|
+
return tf.sqrt(tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor))).numpy()
|
285
|
+
|
286
|
+
mse = self.mean_squared_error(y_true, y_pred)
|
287
|
+
return np.sqrt(mse)
|
288
|
+
|
289
|
+
def mean_absolute_percentage_error(self, y_true, y_pred, library=None):
|
290
|
+
"""
|
291
|
+
Computes Mean Absolute Percentage Error (MAPE).
|
292
|
+
"""
|
293
|
+
y_true, y_pred = self.validator.validate_regression_targets(y_true, y_pred)
|
294
|
+
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
295
|
+
y_true = np.clip(y_true, 1e-8, None)
|
296
|
+
|
297
|
+
if library == 'sklearn':
|
298
|
+
from sklearn.metrics import mean_absolute_percentage_error as sklearn_mape
|
299
|
+
return sklearn_mape(y_true, y_pred) * 100
|
300
|
+
|
301
|
+
if library == 'torch':
|
302
|
+
import torch
|
303
|
+
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
304
|
+
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
305
|
+
return torch.mean(torch.abs((y_true_tensor - y_pred_tensor) / torch.clamp(y_true_tensor, min=1e-8))).item() * 100
|
306
|
+
|
307
|
+
if library == 'tensorflow':
|
308
|
+
import tensorflow as tf
|
309
|
+
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
310
|
+
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
311
|
+
return tf.reduce_mean(tf.abs((y_true_tensor - y_pred_tensor) / tf.clip_by_value(y_true_tensor, 1e-8, tf.float32.max))).numpy() * 100
|
312
|
+
|
313
|
+
return np.mean(np.abs((y_true - y_pred) / np.clip(np.abs(y_true), 1e-8, None))) * 100
|
314
|
+
|
315
|
+
def explained_variance_score(self, y_true, y_pred, library=None, flatten=True):
|
316
|
+
"""
|
317
|
+
Computes Explained Variance Score.
|
318
|
+
"""
|
319
|
+
y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
|
320
|
+
|
321
|
+
if library == 'sklearn':
|
322
|
+
from sklearn.metrics import explained_variance_score as sklearn_evs
|
323
|
+
return sklearn_evs(y_true, y_pred)
|
324
|
+
|
325
|
+
if library == 'torch':
|
326
|
+
import torch
|
327
|
+
y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
|
328
|
+
y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
|
329
|
+
variance_residual = torch.var(y_true_tensor - y_pred_tensor)
|
330
|
+
variance_y = torch.var(y_true_tensor)
|
331
|
+
return 1 - variance_residual / variance_y if variance_y != 0 else 0
|
332
|
+
|
333
|
+
if library == 'tensorflow':
|
334
|
+
import tensorflow as tf
|
335
|
+
y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
|
336
|
+
y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
|
337
|
+
variance_residual = tf.math.reduce_variance(y_true_tensor - y_pred_tensor)
|
338
|
+
variance_y = tf.math.reduce_variance(y_true_tensor)
|
339
|
+
return 1 - variance_residual / variance_y if variance_y != 0 else 0
|
340
|
+
|
341
|
+
numerator = np.var(y_true - y_pred)
|
342
|
+
denominator = np.var(y_true)
|
343
|
+
return 1 - numerator / denominator if denominator != 0 else 0
|
344
|
+
|
345
|
+
def adjusted_r2_score(self, y_true, y_pred, n_features, library=None, flatten=True):
|
346
|
+
"""
|
347
|
+
Computes Adjusted R-Squared Score.
|
348
|
+
|
349
|
+
Parameters:
|
350
|
+
y_true: array-like of shape (n_samples,)
|
351
|
+
Ground truth (correct) target values.
|
352
|
+
|
353
|
+
y_pred: array-like of shape (n_samples,)
|
354
|
+
Estimated target values.
|
355
|
+
|
356
|
+
n_features: int
|
357
|
+
Number of independent features in the model.
|
358
|
+
|
359
|
+
library: str, optional (default=None)
|
360
|
+
Library to use for computation. Supports {'sklearn', 'statsmodels', None}.
|
361
|
+
|
362
|
+
flatten: bool, optional (default=True)
|
363
|
+
If True, flattens multidimensional arrays before computation.
|
364
|
+
"""
|
365
|
+
# Validate inputs
|
366
|
+
y_true, y_pred, _ = self.validator.validate_r2_score_inputs(y_true, y_pred)
|
367
|
+
|
368
|
+
# Ensure inputs are 1D arrays
|
369
|
+
if y_true.ndim == 0 or y_pred.ndim == 0:
|
370
|
+
y_true = np.array([y_true])
|
371
|
+
y_pred = np.array([y_pred])
|
372
|
+
|
373
|
+
if flatten and y_true.ndim > 1:
|
374
|
+
y_true = y_true.flatten()
|
375
|
+
y_pred = y_pred.flatten()
|
376
|
+
|
377
|
+
if library == 'sklearn':
|
378
|
+
from sklearn.metrics import r2_score
|
379
|
+
r2 = r2_score(y_true, y_pred)
|
380
|
+
elif library == 'statsmodels':
|
381
|
+
import statsmodels.api as sm
|
382
|
+
X = sm.add_constant(y_pred)
|
383
|
+
model = sm.OLS(y_true, X).fit()
|
384
|
+
r2 = model.rsquared
|
385
|
+
else:
|
386
|
+
numerator = np.sum((y_true - y_pred) ** 2)
|
387
|
+
denominator = np.sum((y_true - np.mean(y_true)) ** 2)
|
388
|
+
r2 = 1 - (numerator / denominator) if denominator != 0 else 0.0
|
391
389
|
|
392
|
-
|
393
|
-
|
394
|
-
|
390
|
+
n_samples = len(y_true)
|
391
|
+
if n_samples <= n_features + 1:
|
392
|
+
raise ValueError("Number of samples must be greater than number of features plus one for adjusted R-squared computation.")
|
395
393
|
|
396
|
-
|
397
|
-
|
394
|
+
adjusted_r2 = 1 - (1 - r2) * (n_samples - 1) / (n_samples - n_features - 1)
|
395
|
+
return adjusted_r2
|
398
396
|
|
399
397
|
if __name__ == '__main__':
|
400
398
|
# Example usage
|
@@ -0,0 +1,8 @@
|
|
1
|
+
Moral88/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
+
Moral88/regression.py,sha256=vxkrbEvGW9Lys7TKVsM1SNqRtqBfCQjfUcm_hmyyGjU,16293
|
3
|
+
Moral88/segmentation.py,sha256=v0yqxdrKbM9LM7wVKLjJ4HrhrSrilNNeWS6-oK_27Ag,1363
|
4
|
+
Moral88-0.9.0.dist-info/LICENSE,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
+
Moral88-0.9.0.dist-info/METADATA,sha256=RM87uHl8gYyDJmAre7sS49ZEAbkgYdmWcgwtNprcdEY,407
|
6
|
+
Moral88-0.9.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
7
|
+
Moral88-0.9.0.dist-info/top_level.txt,sha256=-dyn5iTprnSUHbtMpvRO-prJsIoaRxao7wlfCHLSsv4,8
|
8
|
+
Moral88-0.9.0.dist-info/RECORD,,
|
Moral88-0.8.0.dist-info/RECORD
DELETED
@@ -1,8 +0,0 @@
|
|
1
|
-
Moral88/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
-
Moral88/regression.py,sha256=V4Iqug1rkACBvHoKZ2-4-CK-XK6VP20qok8rWqWjEdE,17302
|
3
|
-
Moral88/segmentation.py,sha256=v0yqxdrKbM9LM7wVKLjJ4HrhrSrilNNeWS6-oK_27Ag,1363
|
4
|
-
Moral88-0.8.0.dist-info/LICENSE,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
-
Moral88-0.8.0.dist-info/METADATA,sha256=rlMlw0Mt2e202Je7YqkoWNFNmsieOKfleNM2DcelDGw,407
|
6
|
-
Moral88-0.8.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
7
|
-
Moral88-0.8.0.dist-info/top_level.txt,sha256=-dyn5iTprnSUHbtMpvRO-prJsIoaRxao7wlfCHLSsv4,8
|
8
|
-
Moral88-0.8.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|