Moral88 0.4.0__tar.gz → 0.5.0__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,346 @@
1
+ import numpy as np
2
+ import warnings
3
+ from typing import Union, List, Tuple
4
+ from scipy import sparse
5
+
6
+ class DataValidator:
7
+ def __init__(self):
8
+ pass
9
+
10
+ def check_device_cpu(self, device):
11
+ if device not in {"cpu", None}:
12
+ raise ValueError(f"Unsupported device: {device!r}. Only 'cpu' is supported.")
13
+
14
+ def is_1d_array(self, array: Union[np.ndarray, list], warn: bool = False) -> np.ndarray:
15
+ """
16
+ Ensures input is a 1D array. Raises an error if it's not 1D or convertible to 1D.
17
+ """
18
+ array = np.asarray(array)
19
+ shape = array.shape
20
+
21
+ if len(shape) == 1:
22
+ return array
23
+ elif len(shape) == 2 and shape[1] == 1:
24
+ if warn:
25
+ warnings.warn("Input is 2D but will be converted to 1D.", UserWarning)
26
+ return array.ravel()
27
+ else:
28
+ raise ValueError(f"Input must be 1D. Found shape {shape}.")
29
+
30
+ def check_samples(self, array: Union[np.ndarray, list]) -> int:
31
+ """
32
+ Returns the number of samples in the array.
33
+ """
34
+ if hasattr(array, 'shape') and len(array.shape) > 0:
35
+ return array.shape[0]
36
+ else:
37
+ raise TypeError("Input must be an array-like object with at least one dimension.")
38
+
39
+ def check_consistent_length(self, *arrays: Union[np.ndarray, list]):
40
+ """
41
+ Ensures all input arrays have the same length.
42
+ """
43
+ lengths = [self.check_samples(arr) for arr in arrays]
44
+ if len(set(lengths)) > 1:
45
+ raise ValueError(f"Inconsistent lengths: {lengths}")
46
+
47
+ def validate_regression_targets(self, y_true, y_pred, dtype=np.float64):
48
+ """
49
+ Ensures regression target values are consistent and converted to the specified dtype.
50
+ """
51
+ y_true = np.asarray(y_true, dtype=dtype)
52
+ y_pred = np.asarray(y_pred, dtype=dtype)
53
+
54
+ if y_true.shape != y_pred.shape:
55
+ raise ValueError(f"Shapes of y_true {y_true.shape} and y_pred {y_pred.shape} do not match.")
56
+
57
+ return y_true, y_pred
58
+
59
+ def check_array(self, array, ensure_2d: bool = True, dtype=np.float64, allow_nan: bool = False):
60
+ """
61
+ Validates input array and converts it to specified dtype.
62
+ """
63
+ array = np.asarray(array, dtype=dtype)
64
+
65
+ if ensure_2d and array.ndim == 1:
66
+ array = array.reshape(-1, 1)
67
+
68
+ if not allow_nan and np.isnan(array).any():
69
+ raise ValueError("Input contains NaN values, which are not allowed.")
70
+
71
+ return array
72
+
73
+ def check_sparse(self, array, accept_sparse: Tuple[str] = ('csr', 'csc')):
74
+ """
75
+ Validates sparse matrices and converts to an acceptable format.
76
+ """
77
+ if sparse.issparse(array):
78
+ if array.format not in accept_sparse:
79
+ return array.asformat(accept_sparse[0])
80
+ return array
81
+ else:
82
+ raise ValueError("Input is not a sparse matrix.")
83
+
84
+ def validate_r2_score_inputs(self, y_true, y_pred, sample_weight=None):
85
+ """
86
+ Ensures inputs for R2 score computation are valid.
87
+ """
88
+ y_true, y_pred = self.validate_regression_targets(y_true, y_pred)
89
+ if sample_weight is not None:
90
+ sample_weight = self.is_1d_array(sample_weight)
91
+ return y_true, y_pred, sample_weight
92
+
93
+ def validate_mae_mse_inputs(self, y_true, y_pred, library=None):
94
+ """
95
+ Ensures inputs for MAE and MSE computation are valid.
96
+ """
97
+ y_true, y_pred = self.validate_regression_targets(y_true, y_pred)
98
+ if library not in {None, 'sklearn', 'torch', 'tensorflow', 'Moral88'}:
99
+ raise ValueError(f"Invalid library: {library}. Choose from {{'Moral88', 'sklearn', 'torch', 'tensorflow'}}.")
100
+ return y_true, y_pred
101
+
102
+
103
+ class Metrics:
104
+ def mean_bias_deviation(self, y_true, y_pred, library=None):
105
+ """
106
+ Computes Mean Bias Deviation (MBD).
107
+ """
108
+ y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
109
+
110
+ if library == 'sklearn':
111
+ # Sklearn does not have a direct implementation for MBD
112
+ raise NotImplementedError("Mean Bias Deviation is not implemented in sklearn.")
113
+
114
+ if library == 'torch':
115
+ import torch
116
+ y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
117
+ y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
118
+ bias = torch.mean(y_pred_tensor - y_true_tensor).item()
119
+ return bias
120
+
121
+ if library == 'tensorflow':
122
+ import tensorflow as tf
123
+ y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
124
+ y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
125
+ bias = tf.reduce_mean(y_pred_tensor - y_true_tensor).numpy()
126
+ return bias
127
+
128
+ # Default implementation
129
+ return np.mean(y_pred - y_true)
130
+ def __init__(self):
131
+ self.validator = DataValidator()
132
+
133
+ def r2_score(self, y_true, y_pred, sample_weight=None, library=None):
134
+ """
135
+ Computes R2 score.
136
+ """
137
+ y_true, y_pred, sample_weight = self.validator.validate_r2_score_inputs(y_true, y_pred, sample_weight)
138
+
139
+ if library == 'sklearn':
140
+ from sklearn.metrics import r2_score as sklearn_r2
141
+ return sklearn_r2(y_true, y_pred, sample_weight=sample_weight)
142
+
143
+ if library == 'statsmodels':
144
+ import statsmodels.api as sm
145
+ model = sm.OLS(y_true, sm.add_constant(y_pred)).fit()
146
+ return model.rsquared
147
+
148
+ numerator = np.sum((y_true - y_pred) ** 2)
149
+ denominator = np.sum((y_true - np.mean(y_true)) ** 2)
150
+
151
+ if denominator == 0:
152
+ return 0.0
153
+ return 1 - (numerator / denominator)
154
+
155
+ def mean_absolute_error(self, y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
156
+ """
157
+ Computes Mean Absolute Error (MAE).
158
+ """
159
+ y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
160
+
161
+ if flatten:
162
+ y_true = y_true.ravel()
163
+ y_pred = y_pred.ravel()
164
+
165
+ if library == 'Moral88':
166
+ if threshold is not None:
167
+ y_pred = np.clip(y_pred, threshold[0], threshold[1])
168
+
169
+ absolute_errors = np.abs(y_true - y_pred)
170
+
171
+ if method == 'mean':
172
+ result = np.mean(absolute_errors)
173
+ elif method == 'sum':
174
+ result = np.sum(absolute_errors)
175
+ elif method == 'none':
176
+ result = absolute_errors
177
+ else:
178
+ raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
179
+
180
+ if normalize and method != 'none':
181
+ range_y = np.ptp(y_true)
182
+ result = result / max(abs(range_y), 1)
183
+
184
+ return result
185
+
186
+ elif library == 'sklearn':
187
+ from sklearn.metrics import mean_absolute_error as sklearn_mae
188
+ return sklearn_mae(y_true, y_pred)
189
+
190
+ elif library == 'torch':
191
+ import torch
192
+ y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
193
+ y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
194
+ return torch.mean(torch.abs(y_true_tensor - y_pred_tensor)).item()
195
+
196
+ elif library == 'tensorflow':
197
+ import tensorflow as tf
198
+ y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
199
+ y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
200
+ return tf.reduce_mean(tf.abs(y_true_tensor - y_pred_tensor)).numpy()
201
+
202
+ def mean_squared_error(self, y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
203
+ """
204
+ Computes Mean Squared Error (MSE).
205
+ """
206
+ y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
207
+
208
+ if flatten:
209
+ y_true = y_true.ravel()
210
+ y_pred = y_pred.ravel()
211
+
212
+ if library == 'Moral88':
213
+ if threshold is not None:
214
+ y_pred = np.clip(y_pred, threshold[0], threshold[1])
215
+
216
+ squared_errors = (y_true - y_pred) ** 2
217
+
218
+ if method == 'mean':
219
+ result = np.mean(squared_errors)
220
+ elif method == 'sum':
221
+ result = np.sum(squared_errors)
222
+ elif method == 'none':
223
+ result = squared_errors
224
+ else:
225
+ raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
226
+
227
+ if normalize and method != 'none':
228
+ range_y = np.ptp(y_true)
229
+ result = result / max(abs(range_y), 1)
230
+
231
+ return result
232
+
233
+ elif library == 'sklearn':
234
+ from sklearn.metrics import mean_squared_error as sklearn_mse
235
+ return sklearn_mse(y_true, y_pred)
236
+
237
+ elif library == 'torch':
238
+ import torch
239
+ y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
240
+ y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
241
+ return torch.mean((y_true_tensor - y_pred_tensor) ** 2).item()
242
+
243
+ elif library == 'tensorflow':
244
+ import tensorflow as tf
245
+ y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
246
+ y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
247
+ return tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor)).numpy()
248
+
249
+ def root_mean_squared_error(self, y_true, y_pred, library=None):
250
+ """
251
+ Computes Root Mean Squared Error (RMSE).
252
+ """
253
+ y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
254
+
255
+ if library == 'sklearn':
256
+ from sklearn.metrics import mean_squared_error as sklearn_mse
257
+ return np.sqrt(sklearn_mse(y_true, y_pred))
258
+
259
+ if library == 'torch':
260
+ import torch
261
+ y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
262
+ y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
263
+ return torch.sqrt(torch.mean((y_true_tensor - y_pred_tensor) ** 2)).item()
264
+
265
+ if library == 'tensorflow':
266
+ import tensorflow as tf
267
+ y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
268
+ y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
269
+ return tf.sqrt(tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor))).numpy()
270
+
271
+ mse = self.mean_squared_error(y_true, y_pred)
272
+ return np.sqrt(mse)
273
+
274
+ def mean_absolute_percentage_error(self, y_true, y_pred, library=None):
275
+ """
276
+ Computes Mean Absolute Percentage Error (MAPE).
277
+ """
278
+ y_true, y_pred = self.validator.validate_regression_targets(y_true, y_pred)
279
+ y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
280
+
281
+ if library == 'sklearn':
282
+ from sklearn.metrics import mean_absolute_percentage_error as sklearn_mape
283
+ return sklearn_mape(y_true, y_pred) * 100
284
+
285
+ if library == 'torch':
286
+ import torch
287
+ y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
288
+ y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
289
+ return torch.mean(torch.abs((y_true_tensor - y_pred_tensor) / torch.clamp(y_true_tensor, min=1e-8))).item() * 100
290
+
291
+ if library == 'tensorflow':
292
+ import tensorflow as tf
293
+ y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
294
+ y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
295
+ return tf.reduce_mean(tf.abs((y_true_tensor - y_pred_tensor) / tf.clip_by_value(y_true_tensor, 1e-8, tf.float32.max))).numpy() * 100
296
+
297
+ return np.mean(np.abs((y_true - y_pred) / np.clip(y_true, 1e-8, None))) * 100
298
+
299
+ def explained_variance_score(self, y_true, y_pred, library=None):
300
+ """
301
+ Computes Explained Variance Score.
302
+ """
303
+ y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
304
+
305
+ if library == 'sklearn':
306
+ from sklearn.metrics import explained_variance_score as sklearn_evs
307
+ return sklearn_evs(y_true, y_pred)
308
+
309
+ if library == 'torch':
310
+ import torch
311
+ y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
312
+ y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
313
+ variance_residual = torch.var(y_true_tensor - y_pred_tensor)
314
+ variance_y = torch.var(y_true_tensor)
315
+ return 1 - variance_residual / variance_y if variance_y != 0 else 0
316
+
317
+ if library == 'tensorflow':
318
+ import tensorflow as tf
319
+ y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
320
+ y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
321
+ variance_residual = tf.math.reduce_variance(y_true_tensor - y_pred_tensor)
322
+ variance_y = tf.math.reduce_variance(y_true_tensor)
323
+ return 1 - variance_residual / variance_y if variance_y != 0 else 0
324
+
325
+ numerator = np.var(y_true - y_pred)
326
+ denominator = np.var(y_true)
327
+ return 1 - numerator / denominator if denominator != 0 else 0
328
+
329
+ if __name__ == '__main__':
330
+ # Example usage
331
+ validator = DataValidator()
332
+ metrics = Metrics()
333
+
334
+ # Test validation
335
+ arr = [[1], [2], [3]]
336
+ print("1D array:", validator.is_1d_array(arr))
337
+ print("Samples:", validator.check_samples(arr))
338
+
339
+ # Test R2 score
340
+ y_true = [3, -0.5, 2, 7]
341
+ y_pred = [2.5, 0.0, 2, 8]
342
+ print("R2 Score:", metrics.r2_score(y_true, y_pred))
343
+
344
+ # Test MAE and MSE
345
+ print("Mean Absolute Error:", metrics.mean_absolute_error(y_true, y_pred))
346
+ print("Mean Squared Error:", metrics.mean_squared_error(y_true, y_pred))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: Moral88
3
- Version: 0.4.0
3
+ Version: 0.5.0
4
4
  Summary: A library for regression evaluation metrics.
5
5
  Author: Morteza Alizadeh
6
6
  Author-email: alizadeh.c2m@gmail.com
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: Moral88
3
- Version: 0.4.0
3
+ Version: 0.5.0
4
4
  Summary: A library for regression evaluation metrics.
5
5
  Author: Morteza Alizadeh
6
6
  Author-email: alizadeh.c2m@gmail.com
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name='Moral88',
5
- version='0.4.0',
5
+ version='0.5.0',
6
6
  description='A library for regression evaluation metrics.',
7
7
  author='Morteza Alizadeh',
8
8
  author_email='alizadeh.c2m@gmail.com',
@@ -1,266 +0,0 @@
1
- import numpy as np
2
-
3
- def validate_inputs(y_true, y_pred):
4
- """
5
- Validate the inputs for type and length.
6
- """
7
- if not isinstance(y_true, (list, tuple, np.ndarray)) or not isinstance(y_pred, (list, tuple, np.ndarray)):
8
- raise TypeError("Both y_true and y_pred must be lists, tuples, or numpy arrays.")
9
-
10
- y_true = np.array(y_true)
11
- y_pred = np.array(y_pred)
12
-
13
- if y_true.shape != y_pred.shape:
14
- raise ValueError("Shapes of y_true and y_pred must be the same.")
15
-
16
- if not np.issubdtype(y_true.dtype, np.number) or not np.issubdtype(y_pred.dtype, np.number):
17
- raise TypeError("All elements in y_true and y_pred must be numeric.")
18
-
19
- def mean_absolute_error(y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
20
- """
21
- Calculate Mean Absolute Error (MAE) for single or multi-dimensional data.
22
- """
23
- validate_inputs(y_true, y_pred)
24
-
25
- # y_true = np.array(y_true)
26
- # y_pred = np.array(y_pred)
27
-
28
- if flatten:
29
- y_true = y_true.ravel()
30
- y_pred = y_pred.ravel()
31
-
32
- if library == 'Moral88':
33
- if threshold is not None:
34
- y_pred = np.clip(y_pred, threshold[0], threshold[1])
35
-
36
- absolute_errors = np.abs(y_true - y_pred)
37
-
38
- if method == 'mean':
39
- result = np.mean(absolute_errors)
40
- elif method == 'sum':
41
- result = np.sum(absolute_errors)
42
- elif method == 'none':
43
- result = absolute_errors
44
- else:
45
- raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
46
-
47
- if normalize and method != 'none':
48
- range_y = np.ptp(y_true)
49
- result = result / max(abs(range_y), 1)
50
-
51
- return result
52
-
53
- elif library == 'sklearn':
54
- from sklearn.metrics import mean_absolute_error as sklearn_mae
55
- return sklearn_mae(y_true, y_pred)
56
-
57
- elif library == 'torch':
58
- import torch
59
- y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
60
- y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
61
- return torch.mean(torch.abs(y_true_tensor - y_pred_tensor)).item()
62
-
63
- elif library == 'tensorflow':
64
- import tensorflow as tf
65
- y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
66
- y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
67
- return tf.reduce_mean(tf.abs(y_true_tensor - y_pred_tensor)).numpy()
68
-
69
- else:
70
- raise ValueError(f"Invalid library: {library}. Choose from {'Moral88', 'sklearn', 'torch', 'tensorflow'}.")
71
-
72
- def mean_squared_error(y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
73
- """
74
- Calculate Mean Squared Error (MSE) for single or multi-dimensional data.
75
- """
76
- validate_inputs(y_true, y_pred)
77
-
78
- # y_true = np.array(y_true)
79
- # y_pred = np.array(y_pred)
80
-
81
- if flatten:
82
- y_true = y_true.ravel()
83
- y_pred = y_pred.ravel()
84
-
85
- if library == 'Moral88':
86
- if threshold is not None:
87
- y_pred = np.clip(y_pred, threshold[0], threshold[1])
88
-
89
- squared_errors = (y_true - y_pred) ** 2
90
-
91
- if method == 'mean':
92
- result = np.mean(squared_errors)
93
- elif method == 'sum':
94
- result = np.sum(squared_errors)
95
- elif method == 'none':
96
- result = squared_errors
97
- else:
98
- raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
99
-
100
- if normalize and method != 'none':
101
- range_y = np.ptp(y_true)
102
- result = result / max(abs(range_y), 1)
103
-
104
- return result
105
-
106
- elif library == 'sklearn':
107
- from sklearn.metrics import mean_squared_error as sklearn_mse
108
- return sklearn_mse(y_true, y_pred)
109
-
110
- elif library == 'torch':
111
- import torch
112
- y_true_tensor = torch.tensor(y_true, dtype=torch.float32)
113
- y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32)
114
- return torch.mean((y_true_tensor - y_pred_tensor) ** 2).item()
115
-
116
- elif library == 'tensorflow':
117
- import tensorflow as tf
118
- y_true_tensor = tf.convert_to_tensor(y_true, dtype=tf.float32)
119
- y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
120
- return tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor)).numpy()
121
-
122
- else:
123
- raise ValueError(f"Invalid library: {library}. Choose from {'Moral88', 'sklearn', 'torch', 'tensorflow'}.")
124
-
125
- def r_squared(y_true, y_pred, flatten=True):
126
- """
127
- Compute R-Squared for single or multi-dimensional data.
128
- """
129
- validate_inputs(y_true, y_pred)
130
-
131
- y_true = np.array(y_true)
132
- y_pred = np.array(y_pred)
133
-
134
- if flatten:
135
- y_true = y_true.ravel()
136
- y_pred = y_pred.ravel()
137
-
138
- ss_total = np.sum((y_true - np.mean(y_true)) ** 2)
139
- ss_residual = np.sum((y_true - y_pred) ** 2)
140
- return 1 - (ss_residual / ss_total)
141
-
142
- import numpy as np
143
- import warnings
144
- from typing import Union, List, Tuple
145
- from scipy import sparse
146
-
147
- class DataValidator:
148
- def __init__(self):
149
- pass
150
-
151
- def check_device_cpu(self, device):
152
- if device not in {"cpu", None}:
153
- raise ValueError(f"Unsupported device: {device!r}. Only 'cpu' is supported.")
154
-
155
- def is_1d_array(self, array: Union[np.ndarray, list], warn: bool = False) -> np.ndarray:
156
- """
157
- Ensures input is a 1D array. Raises an error if it's not 1D or convertible to 1D.
158
- """
159
- array = np.asarray(array)
160
- shape = array.shape
161
-
162
- if len(shape) == 1:
163
- return array
164
- elif len(shape) == 2 and shape[1] == 1:
165
- if warn:
166
- warnings.warn("Input is 2D but will be converted to 1D.", UserWarning)
167
- return array.ravel()
168
- else:
169
- raise ValueError(f"Input must be 1D. Found shape {shape}.")
170
-
171
- def check_samples(self, array: Union[np.ndarray, list]) -> int:
172
- """
173
- Returns the number of samples in the array.
174
- """
175
- if hasattr(array, 'shape') and len(array.shape) > 0:
176
- return array.shape[0]
177
- else:
178
- raise TypeError("Input must be an array-like object with at least one dimension.")
179
-
180
- def check_consistent_length(self, *arrays: Union[np.ndarray, list]):
181
- """
182
- Ensures all input arrays have the same length.
183
- """
184
- lengths = [self.check_samples(arr) for arr in arrays]
185
- if len(set(lengths)) > 1:
186
- raise ValueError(f"Inconsistent lengths: {lengths}")
187
-
188
- def validate_regression_targets(self, y_true, y_pred, dtype=np.float64):
189
- """
190
- Ensures regression target values are consistent and converted to the specified dtype.
191
- """
192
- y_true = np.asarray(y_true, dtype=dtype)
193
- y_pred = np.asarray(y_pred, dtype=dtype)
194
-
195
- if y_true.shape != y_pred.shape:
196
- raise ValueError(f"Shapes of y_true {y_true.shape} and y_pred {y_pred.shape} do not match.")
197
-
198
- return y_true, y_pred
199
-
200
- def check_array(self, array, ensure_2d: bool = True, dtype=np.float64, allow_nan: bool = False):
201
- """
202
- Validates input array and converts it to specified dtype.
203
- """
204
- array = np.asarray(array, dtype=dtype)
205
-
206
- if ensure_2d and array.ndim == 1:
207
- array = array.reshape(-1, 1)
208
-
209
- if not allow_nan and np.isnan(array).any():
210
- raise ValueError("Input contains NaN values, which are not allowed.")
211
-
212
- return array
213
-
214
- def check_sparse(self, array, accept_sparse: Tuple[str] = ('csr', 'csc')):
215
- """
216
- Validates sparse matrices and converts to an acceptable format.
217
- """
218
- if sparse.issparse(array):
219
- if array.format not in accept_sparse:
220
- return array.asformat(accept_sparse[0])
221
- return array
222
- else:
223
- raise ValueError("Input is not a sparse matrix.")
224
-
225
- def validate_r2_score_inputs(self, y_true, y_pred, sample_weight=None):
226
- """
227
- Ensures inputs for R2 score computation are valid.
228
- """
229
- y_true, y_pred = self.validate_regression_targets(y_true, y_pred)
230
- if sample_weight is not None:
231
- sample_weight = self.is_1d_array(sample_weight)
232
- return y_true, y_pred, sample_weight
233
-
234
-
235
- class Metrics:
236
- def __init__(self):
237
- self.validator = DataValidator()
238
-
239
- def r2_score(self, y_true, y_pred, sample_weight=None):
240
- """
241
- Computes R2 score.
242
- """
243
- y_true, y_pred, sample_weight = self.validator.validate_r2_score_inputs(y_true, y_pred, sample_weight)
244
-
245
- numerator = np.sum((y_true - y_pred) ** 2)
246
- denominator = np.sum((y_true - np.mean(y_true)) ** 2)
247
-
248
- if denominator == 0:
249
- return 0.0
250
- return 1 - (numerator / denominator)
251
-
252
-
253
- if __name__ == '__main__':
254
- # Example usage
255
- validator = DataValidator()
256
- metrics = Metrics()
257
-
258
- # Test validation
259
- arr = [[1], [2], [3]]
260
- print("1D array:", validator.is_1d_array(arr))
261
- print("Samples:", validator.check_samples(arr))
262
-
263
- # Test R2 score
264
- y_true = [3, -0.5, 2, 7]
265
- y_pred = [2.5, 0.0, 2, 8]
266
- print("R2 Score:", metrics.r2_score(y_true, y_pred))
File without changes
File without changes
File without changes
File without changes
File without changes