Moral88 0.7.0__py3-none-any.whl → 0.9.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Moral88/regression.py CHANGED
@@ -101,298 +101,298 @@ class DataValidator:
101
101
  return y_true, y_pred
102
102
 
103
103
 
104
- class metrics:
105
- def mean_bias_deviation(self, y_true, y_pred, library=None, flatten=True):
106
- """
107
- Computes Mean Bias Deviation (MBD).
108
- """
109
- y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
110
-
111
- if flatten and y_true.ndim > 1:
112
- y_true = y_true.flatten()
113
- y_pred = y_pred.flatten()
114
-
115
- if library == 'sklearn':
116
- # Sklearn does not have a direct implementation for MBD
117
- raise NotImplementedError("Mean Bias Deviation is not implemented in sklearn.")
118
-
119
- if library == 'torch':
120
- import torch
121
- y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
122
- y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
123
- bias = torch.mean(y_pred_tensor - y_true_tensor).item()
124
- return bias
125
-
126
- if library == 'tensorflow':
127
- import tensorflow as tf
128
- y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
129
- y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
130
- bias = tf.reduce_mean(y_pred_tensor - y_true_tensor).numpy()
131
- return bias
132
-
133
- # Default implementation
134
- return np.mean(y_pred - y_true)
135
- def __init__(self):
136
- self.validator = DataValidator()
137
-
138
- def r2_score(self, y_true, y_pred, sample_weight=None, library=None, flatten=True):
139
- """
140
- Computes R2 score.
141
- """
142
- y_true, y_pred, sample_weight = self.validator.validate_r2_score_inputs(y_true, y_pred, sample_weight)
143
104
 
144
- if flatten and y_true.ndim > 1:
105
+ def mean_bias_deviation(self, y_true, y_pred, library=None, flatten=True):
106
+ """
107
+ Computes Mean Bias Deviation (MBD).
108
+ """
109
+ y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
110
+
111
+ if flatten and y_true.ndim > 1:
112
+ y_true = y_true.flatten()
113
+ y_pred = y_pred.flatten()
114
+
115
+ if library == 'sklearn':
116
+ # Sklearn does not have a direct implementation for MBD
117
+ raise NotImplementedError("Mean Bias Deviation is not implemented in sklearn.")
118
+
119
+ if library == 'torch':
120
+ import torch
121
+ y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
122
+ y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
123
+ bias = torch.mean(y_pred_tensor - y_true_tensor).item()
124
+ return bias
125
+
126
+ if library == 'tensorflow':
127
+ import tensorflow as tf
128
+ y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
129
+ y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
130
+ bias = tf.reduce_mean(y_pred_tensor - y_true_tensor).numpy()
131
+ return bias
132
+
133
+ # Default implementation
134
+ return np.mean(y_pred - y_true)
135
+ def __init__(self):
136
+ self.validator = DataValidator()
137
+
138
+ def r2_score(self, y_true, y_pred, sample_weight=None, library=None, flatten=True):
139
+ """
140
+ Computes R2 score.
141
+ """
142
+ y_true, y_pred, sample_weight = self.validator.validate_r2_score_inputs(y_true, y_pred, sample_weight)
143
+
144
+ if flatten and y_true.ndim > 1:
145
+ y_true = y_true.flatten()
146
+ y_pred = y_pred.flatten()
147
+
148
+ if library == 'sklearn':
149
+ from sklearn.metrics import r2_score as sklearn_r2
150
+ return sklearn_r2(y_true, y_pred, sample_weight=sample_weight)
151
+
152
+ if library == 'statsmodels':
153
+ import statsmodels.api as sm
154
+ model = sm.OLS(y_true, sm.add_constant(y_pred)).fit()
155
+ return model.rsquared
156
+
157
+ numerator = np.sum((y_true - y_pred) ** 2)
158
+ denominator = np.sum((y_true - np.mean(y_true)) ** 2)
159
+
160
+ if denominator == 0:
161
+ return 0.0
162
+ return 1 - (numerator / denominator)
163
+
164
+ def mean_absolute_error(self, y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
165
+ """
166
+ Computes Mean Absolute Error (MAE).
167
+ """
168
+ y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
169
+
170
+ if flatten:
171
+ y_true = y_true.ravel()
172
+ y_pred = y_pred.ravel()
173
+
174
+ if library == 'Moral88':
175
+ if threshold is not None:
176
+ y_pred = np.clip(y_pred, threshold[0], threshold[1])
177
+
178
+ if y_true.ndim > 1 and flatten:
145
179
  y_true = y_true.flatten()
146
180
  y_pred = y_pred.flatten()
181
+ absolute_errors = np.abs(y_true - y_pred)
182
+
183
+ if method == 'mean':
184
+ result = np.mean(absolute_errors)
185
+ elif method == 'sum':
186
+ result = np.sum(absolute_errors)
187
+ elif method == 'none':
188
+ result = absolute_errors
189
+ else:
190
+ raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
147
191
 
148
- if library == 'sklearn':
149
- from sklearn.metrics import r2_score as sklearn_r2
150
- return sklearn_r2(y_true, y_pred, sample_weight=sample_weight)
151
-
152
- if library == 'statsmodels':
153
- import statsmodels.api as sm
154
- model = sm.OLS(y_true, sm.add_constant(y_pred)).fit()
155
- return model.rsquared
156
-
157
- numerator = np.sum((y_true - y_pred) ** 2)
158
- denominator = np.sum((y_true - np.mean(y_true)) ** 2)
159
-
160
- if denominator == 0:
161
- return 0.0
162
- return 1 - (numerator / denominator)
163
-
164
- def mean_absolute_error(self, y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
165
- """
166
- Computes Mean Absolute Error (MAE).
167
- """
168
- y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
169
-
170
- if flatten:
171
- y_true = y_true.ravel()
172
- y_pred = y_pred.ravel()
173
-
174
- if library == 'Moral88':
175
- if threshold is not None:
176
- y_pred = np.clip(y_pred, threshold[0], threshold[1])
177
-
178
- if y_true.ndim > 1 and flatten:
179
- y_true = y_true.flatten()
180
- y_pred = y_pred.flatten()
181
- absolute_errors = np.abs(y_true - y_pred)
182
-
183
- if method == 'mean':
184
- result = np.mean(absolute_errors)
185
- elif method == 'sum':
186
- result = np.sum(absolute_errors)
187
- elif method == 'none':
188
- result = absolute_errors
189
- else:
190
- raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
191
-
192
- # if normalize and method != 'none':
193
- # range_y = np.ptp(y_true)
194
- # result = result / max(abs(range_y), 1)
195
-
196
- return result
197
-
198
- elif library == 'sklearn':
199
- from sklearn.metrics import mean_absolute_error as sklearn_mae
200
- return sklearn_mae(y_true, y_pred)
201
-
202
- elif library == 'torch':
203
- import torch
204
- y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
205
- y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
206
- return torch.mean(torch.abs(y_true_tensor - y_pred_tensor)).item()
207
-
208
- elif library == 'tensorflow':
209
- import tensorflow as tf
210
- y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
211
- y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
212
- return tf.reduce_mean(tf.abs(y_true_tensor - y_pred_tensor)).numpy()
213
-
214
- def mean_squared_error(self, y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
215
- """
216
- Computes Mean Squared Error (MSE).
217
- """
218
- y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
219
-
220
- if flatten:
221
- y_true = y_true.ravel()
222
- y_pred = y_pred.ravel()
223
-
224
- if library == 'Moral88':
225
- if threshold is not None:
226
- y_pred = np.clip(y_pred, threshold[0], threshold[1])
227
-
228
- if y_true.ndim > 1 and flatten:
229
- y_true = y_true.flatten()
230
- y_pred = y_pred.flatten()
231
- squared_errors = (y_true - y_pred) ** 2
232
-
233
- if method == 'mean':
234
- result = np.mean(squared_errors)
235
- elif method == 'sum':
236
- result = np.sum(squared_errors)
237
- elif method == 'none':
238
- result = squared_errors
239
- else:
240
- raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
241
-
242
- # if normalize and method != 'none':
243
- # range_y = np.ptp(y_true)
244
- # result = result / max(abs(range_y), 1)
245
-
246
- return result
247
-
248
- elif library == 'sklearn':
249
- from sklearn.metrics import mean_squared_error as sklearn_mse
250
- return sklearn_mse(y_true, y_pred)
251
-
252
- elif library == 'torch':
253
- import torch
254
- y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
255
- y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
256
- return torch.mean((y_true_tensor - y_pred_tensor) ** 2).item()
257
-
258
- elif library == 'tensorflow':
259
- import tensorflow as tf
260
- y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
261
- y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
262
- return tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor)).numpy()
263
-
264
- def root_mean_squared_error(self, y_true, y_pred, library=None):
265
- """
266
- Computes Root Mean Squared Error (RMSE).
267
- """
268
- y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
269
-
270
- if library == 'sklearn':
271
- from sklearn.metrics import mean_squared_error as sklearn_mse
272
- return np.sqrt(sklearn_mse(y_true, y_pred))
273
-
274
- if library == 'torch':
275
- import torch
276
- y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
277
- y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
278
- return torch.sqrt(torch.mean((y_true_tensor - y_pred_tensor) ** 2)).item()
279
-
280
- if library == 'tensorflow':
281
- import tensorflow as tf
282
- y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
283
- y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
284
- return tf.sqrt(tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor))).numpy()
285
-
286
- mse = self.mean_squared_error(y_true, y_pred)
287
- return np.sqrt(mse)
192
+ # if normalize and method != 'none':
193
+ # range_y = np.ptp(y_true)
194
+ # result = result / max(abs(range_y), 1)
288
195
 
289
- def mean_absolute_percentage_error(self, y_true, y_pred, library=None):
290
- """
291
- Computes Mean Absolute Percentage Error (MAPE).
292
- """
293
- y_true, y_pred = self.validator.validate_regression_targets(y_true, y_pred)
294
- y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
295
- y_true = np.clip(y_true, 1e-8, None)
196
+ return result
296
197
 
297
- if library == 'sklearn':
298
- from sklearn.metrics import mean_absolute_percentage_error as sklearn_mape
299
- return sklearn_mape(y_true, y_pred) * 100
198
+ elif library == 'sklearn':
199
+ from sklearn.metrics import mean_absolute_error as sklearn_mae
200
+ return sklearn_mae(y_true, y_pred)
300
201
 
301
- if library == 'torch':
302
- import torch
303
- y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
304
- y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
305
- return torch.mean(torch.abs((y_true_tensor - y_pred_tensor) / torch.clamp(y_true_tensor, min=1e-8))).item() * 100
202
+ elif library == 'torch':
203
+ import torch
204
+ y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
205
+ y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
206
+ return torch.mean(torch.abs(y_true_tensor - y_pred_tensor)).item()
306
207
 
307
- if library == 'tensorflow':
308
- import tensorflow as tf
309
- y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
310
- y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
311
- return tf.reduce_mean(tf.abs((y_true_tensor - y_pred_tensor) / tf.clip_by_value(y_true_tensor, 1e-8, tf.float32.max))).numpy() * 100
208
+ elif library == 'tensorflow':
209
+ import tensorflow as tf
210
+ y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
211
+ y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
212
+ return tf.reduce_mean(tf.abs(y_true_tensor - y_pred_tensor)).numpy()
312
213
 
313
- return np.mean(np.abs((y_true - y_pred) / np.clip(np.abs(y_true), 1e-8, None))) * 100
214
+ def mean_squared_error(self, y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
215
+ """
216
+ Computes Mean Squared Error (MSE).
217
+ """
218
+ y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
314
219
 
315
- def explained_variance_score(self, y_true, y_pred, library=None, flatten=True):
316
- """
317
- Computes Explained Variance Score.
318
- """
319
- y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
320
-
321
- if library == 'sklearn':
322
- from sklearn.metrics import explained_variance_score as sklearn_evs
323
- return sklearn_evs(y_true, y_pred)
324
-
325
- if library == 'torch':
326
- import torch
327
- y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
328
- y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
329
- variance_residual = torch.var(y_true_tensor - y_pred_tensor)
330
- variance_y = torch.var(y_true_tensor)
331
- return 1 - variance_residual / variance_y if variance_y != 0 else 0
332
-
333
- if library == 'tensorflow':
334
- import tensorflow as tf
335
- y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
336
- y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
337
- variance_residual = tf.math.reduce_variance(y_true_tensor - y_pred_tensor)
338
- variance_y = tf.math.reduce_variance(y_true_tensor)
339
- return 1 - variance_residual / variance_y if variance_y != 0 else 0
340
-
341
- numerator = np.var(y_true - y_pred)
342
- denominator = np.var(y_true)
343
- return 1 - numerator / denominator if denominator != 0 else 0
344
-
345
- def adjusted_r2_score(self, y_true, y_pred, n_features, library=None, flatten=True):
346
- """
347
- Computes Adjusted R-Squared Score.
348
-
349
- Parameters:
350
- y_true: array-like of shape (n_samples,)
351
- Ground truth (correct) target values.
352
-
353
- y_pred: array-like of shape (n_samples,)
354
- Estimated target values.
355
-
356
- n_features: int
357
- Number of independent features in the model.
358
-
359
- library: str, optional (default=None)
360
- Library to use for computation. Supports {'sklearn', 'statsmodels', None}.
361
-
362
- flatten: bool, optional (default=True)
363
- If True, flattens multidimensional arrays before computation.
364
- """
365
- # Validate inputs
366
- y_true, y_pred, _ = self.validator.validate_r2_score_inputs(y_true, y_pred)
220
+ if flatten:
221
+ y_true = y_true.ravel()
222
+ y_pred = y_pred.ravel()
367
223
 
368
- # Ensure inputs are 1D arrays
369
- if y_true.ndim == 0 or y_pred.ndim == 0:
370
- y_true = np.array([y_true])
371
- y_pred = np.array([y_pred])
224
+ if library == 'Moral88':
225
+ if threshold is not None:
226
+ y_pred = np.clip(y_pred, threshold[0], threshold[1])
372
227
 
373
- if flatten and y_true.ndim > 1:
228
+ if y_true.ndim > 1 and flatten:
374
229
  y_true = y_true.flatten()
375
230
  y_pred = y_pred.flatten()
376
-
377
- if library == 'sklearn':
378
- from sklearn.metrics import r2_score
379
- r2 = r2_score(y_true, y_pred)
380
- elif library == 'statsmodels':
381
- import statsmodels.api as sm
382
- X = sm.add_constant(y_pred)
383
- model = sm.OLS(y_true, X).fit()
384
- r2 = model.rsquared
231
+ squared_errors = (y_true - y_pred) ** 2
232
+
233
+ if method == 'mean':
234
+ result = np.mean(squared_errors)
235
+ elif method == 'sum':
236
+ result = np.sum(squared_errors)
237
+ elif method == 'none':
238
+ result = squared_errors
385
239
  else:
386
- numerator = np.sum((y_true - y_pred) ** 2)
387
- denominator = np.sum((y_true - np.mean(y_true)) ** 2)
388
- r2 = 1 - (numerator / denominator) if denominator != 0 else 0.0
240
+ raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
241
+
242
+ # if normalize and method != 'none':
243
+ # range_y = np.ptp(y_true)
244
+ # result = result / max(abs(range_y), 1)
245
+
246
+ return result
247
+
248
+ elif library == 'sklearn':
249
+ from sklearn.metrics import mean_squared_error as sklearn_mse
250
+ return sklearn_mse(y_true, y_pred)
251
+
252
+ elif library == 'torch':
253
+ import torch
254
+ y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
255
+ y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
256
+ return torch.mean((y_true_tensor - y_pred_tensor) ** 2).item()
257
+
258
+ elif library == 'tensorflow':
259
+ import tensorflow as tf
260
+ y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
261
+ y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
262
+ return tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor)).numpy()
263
+
264
+ def root_mean_squared_error(self, y_true, y_pred, library=None):
265
+ """
266
+ Computes Root Mean Squared Error (RMSE).
267
+ """
268
+ y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
269
+
270
+ if library == 'sklearn':
271
+ from sklearn.metrics import mean_squared_error as sklearn_mse
272
+ return np.sqrt(sklearn_mse(y_true, y_pred))
273
+
274
+ if library == 'torch':
275
+ import torch
276
+ y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
277
+ y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
278
+ return torch.sqrt(torch.mean((y_true_tensor - y_pred_tensor) ** 2)).item()
279
+
280
+ if library == 'tensorflow':
281
+ import tensorflow as tf
282
+ y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
283
+ y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
284
+ return tf.sqrt(tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor))).numpy()
285
+
286
+ mse = self.mean_squared_error(y_true, y_pred)
287
+ return np.sqrt(mse)
288
+
289
+ def mean_absolute_percentage_error(self, y_true, y_pred, library=None):
290
+ """
291
+ Computes Mean Absolute Percentage Error (MAPE).
292
+ """
293
+ y_true, y_pred = self.validator.validate_regression_targets(y_true, y_pred)
294
+ y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
295
+ y_true = np.clip(y_true, 1e-8, None)
296
+
297
+ if library == 'sklearn':
298
+ from sklearn.metrics import mean_absolute_percentage_error as sklearn_mape
299
+ return sklearn_mape(y_true, y_pred) * 100
300
+
301
+ if library == 'torch':
302
+ import torch
303
+ y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
304
+ y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
305
+ return torch.mean(torch.abs((y_true_tensor - y_pred_tensor) / torch.clamp(y_true_tensor, min=1e-8))).item() * 100
306
+
307
+ if library == 'tensorflow':
308
+ import tensorflow as tf
309
+ y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
310
+ y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
311
+ return tf.reduce_mean(tf.abs((y_true_tensor - y_pred_tensor) / tf.clip_by_value(y_true_tensor, 1e-8, tf.float32.max))).numpy() * 100
312
+
313
+ return np.mean(np.abs((y_true - y_pred) / np.clip(np.abs(y_true), 1e-8, None))) * 100
314
+
315
+ def explained_variance_score(self, y_true, y_pred, library=None, flatten=True):
316
+ """
317
+ Computes Explained Variance Score.
318
+ """
319
+ y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
320
+
321
+ if library == 'sklearn':
322
+ from sklearn.metrics import explained_variance_score as sklearn_evs
323
+ return sklearn_evs(y_true, y_pred)
324
+
325
+ if library == 'torch':
326
+ import torch
327
+ y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
328
+ y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
329
+ variance_residual = torch.var(y_true_tensor - y_pred_tensor)
330
+ variance_y = torch.var(y_true_tensor)
331
+ return 1 - variance_residual / variance_y if variance_y != 0 else 0
332
+
333
+ if library == 'tensorflow':
334
+ import tensorflow as tf
335
+ y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
336
+ y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
337
+ variance_residual = tf.math.reduce_variance(y_true_tensor - y_pred_tensor)
338
+ variance_y = tf.math.reduce_variance(y_true_tensor)
339
+ return 1 - variance_residual / variance_y if variance_y != 0 else 0
340
+
341
+ numerator = np.var(y_true - y_pred)
342
+ denominator = np.var(y_true)
343
+ return 1 - numerator / denominator if denominator != 0 else 0
344
+
345
+ def adjusted_r2_score(self, y_true, y_pred, n_features, library=None, flatten=True):
346
+ """
347
+ Computes Adjusted R-Squared Score.
348
+
349
+ Parameters:
350
+ y_true: array-like of shape (n_samples,)
351
+ Ground truth (correct) target values.
352
+
353
+ y_pred: array-like of shape (n_samples,)
354
+ Estimated target values.
355
+
356
+ n_features: int
357
+ Number of independent features in the model.
358
+
359
+ library: str, optional (default=None)
360
+ Library to use for computation. Supports {'sklearn', 'statsmodels', None}.
361
+
362
+ flatten: bool, optional (default=True)
363
+ If True, flattens multidimensional arrays before computation.
364
+ """
365
+ # Validate inputs
366
+ y_true, y_pred, _ = self.validator.validate_r2_score_inputs(y_true, y_pred)
367
+
368
+ # Ensure inputs are 1D arrays
369
+ if y_true.ndim == 0 or y_pred.ndim == 0:
370
+ y_true = np.array([y_true])
371
+ y_pred = np.array([y_pred])
372
+
373
+ if flatten and y_true.ndim > 1:
374
+ y_true = y_true.flatten()
375
+ y_pred = y_pred.flatten()
376
+
377
+ if library == 'sklearn':
378
+ from sklearn.metrics import r2_score
379
+ r2 = r2_score(y_true, y_pred)
380
+ elif library == 'statsmodels':
381
+ import statsmodels.api as sm
382
+ X = sm.add_constant(y_pred)
383
+ model = sm.OLS(y_true, X).fit()
384
+ r2 = model.rsquared
385
+ else:
386
+ numerator = np.sum((y_true - y_pred) ** 2)
387
+ denominator = np.sum((y_true - np.mean(y_true)) ** 2)
388
+ r2 = 1 - (numerator / denominator) if denominator != 0 else 0.0
389
389
 
390
- n_samples = len(y_true)
391
- if n_samples <= n_features + 1:
392
- raise ValueError("Number of samples must be greater than number of features plus one for adjusted R-squared computation.")
390
+ n_samples = len(y_true)
391
+ if n_samples <= n_features + 1:
392
+ raise ValueError("Number of samples must be greater than number of features plus one for adjusted R-squared computation.")
393
393
 
394
- adjusted_r2 = 1 - (1 - r2) * (n_samples - 1) / (n_samples - n_features - 1)
395
- return adjusted_r2
394
+ adjusted_r2 = 1 - (1 - r2) * (n_samples - 1) / (n_samples - n_features - 1)
395
+ return adjusted_r2
396
396
 
397
397
  if __name__ == '__main__':
398
398
  # Example usage
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: Moral88
3
- Version: 0.7.0
3
+ Version: 0.9.0
4
4
  Summary: A library for regression evaluation metrics.
5
5
  Author: Morteza Alizadeh
6
6
  Author-email: alizadeh.c2m@gmail.com
@@ -0,0 +1,8 @@
1
+ Moral88/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ Moral88/regression.py,sha256=vxkrbEvGW9Lys7TKVsM1SNqRtqBfCQjfUcm_hmyyGjU,16293
3
+ Moral88/segmentation.py,sha256=v0yqxdrKbM9LM7wVKLjJ4HrhrSrilNNeWS6-oK_27Ag,1363
4
+ Moral88-0.9.0.dist-info/LICENSE,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ Moral88-0.9.0.dist-info/METADATA,sha256=RM87uHl8gYyDJmAre7sS49ZEAbkgYdmWcgwtNprcdEY,407
6
+ Moral88-0.9.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
7
+ Moral88-0.9.0.dist-info/top_level.txt,sha256=-dyn5iTprnSUHbtMpvRO-prJsIoaRxao7wlfCHLSsv4,8
8
+ Moral88-0.9.0.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- Moral88/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- Moral88/regression.py,sha256=yzYuAdGv9IjLpXQ6yhlpC26ZIKcK5Cn3zDY_2N7lRW8,17263
3
- Moral88/segmentation.py,sha256=v0yqxdrKbM9LM7wVKLjJ4HrhrSrilNNeWS6-oK_27Ag,1363
4
- Moral88-0.7.0.dist-info/LICENSE,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
- Moral88-0.7.0.dist-info/METADATA,sha256=WxT0FQzxE2BEhykJCIKSdTkJlwCbb2_-89z3s3BHjZw,407
6
- Moral88-0.7.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
7
- Moral88-0.7.0.dist-info/top_level.txt,sha256=-dyn5iTprnSUHbtMpvRO-prJsIoaRxao7wlfCHLSsv4,8
8
- Moral88-0.7.0.dist-info/RECORD,,