Moral88 0.9.0__tar.gz → 0.10.0__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1 @@
1
+ from .regression import mean_absolute_error, mean_squared_error, r2_score, mean_bias_deviation, adjusted_r2_score, root_mean_squared_error, mean_absolute_percentage_error, explained_variance_score
@@ -1,112 +1,13 @@
1
1
  import numpy as np
2
- import warnings
3
- from typing import Union, List, Tuple
4
- from scipy import sparse
5
-
6
- class DataValidator:
7
- def __init__(self):
8
- pass
9
-
10
- def check_device_cpu(self, device):
11
- if device not in {"cpu", None}:
12
- raise ValueError(f"Unsupported device: {device!r}. Only 'cpu' is supported.")
13
-
14
- def is_1d_array(self, array: Union[np.ndarray, list], warn: bool = False) -> np.ndarray:
15
- """
16
- Ensures input is a 1D array. Raises an error if it's not 1D or convertible to 1D.
17
- """
18
- array = np.asarray(array)
19
- shape = array.shape
20
-
21
- if len(shape) == 1:
22
- return array
23
- elif len(shape) == 2 and shape[1] == 1:
24
- if warn:
25
- warnings.warn("Input is 2D but will be converted to 1D.", UserWarning)
26
- return array.ravel()
27
- else:
28
- raise ValueError(f"Input must be 1D. Found shape {shape}.")
29
-
30
- def check_samples(self, array: Union[np.ndarray, list]) -> int:
31
- """
32
- Returns the number of samples in the array.
33
- """
34
- array = np.asarray(array)
35
- if hasattr(array, 'shape') and len(array.shape) > 0:
36
- return array.shape[0]
37
- else:
38
- raise TypeError("Input must be an array-like object with at least one dimension.")
39
-
40
- def check_consistent_length(self, *arrays: Union[np.ndarray, list]):
41
- """
42
- Ensures all input arrays have the same length.
43
- """
44
- lengths = [self.check_samples(arr) for arr in arrays]
45
- if len(set(lengths)) > 1:
46
- raise ValueError(f"Inconsistent lengths: {lengths}")
47
-
48
- def validate_regression_targets(self, y_true, y_pred, dtype=np.float64):
49
- """
50
- Ensures regression target values are consistent and converted to the specified dtype.
51
- """
52
- y_true = np.asarray(y_true, dtype=dtype)
53
- y_pred = np.asarray(y_pred, dtype=dtype)
54
-
55
- if y_true.shape != y_pred.shape:
56
- raise ValueError(f"Shapes of y_true {y_true.shape} and y_pred {y_pred.shape} do not match.")
57
-
58
- return y_true, y_pred
59
-
60
- def check_array(self, array, ensure_2d: bool = True, dtype=np.float64, allow_nan: bool = False):
61
- """
62
- Validates input array and converts it to specified dtype.
63
- """
64
- array = np.asarray(array, dtype=dtype)
65
-
66
- if ensure_2d and array.ndim == 1:
67
- array = array.reshape(-1, 1)
68
-
69
- if not allow_nan and np.isnan(array).any():
70
- raise ValueError("Input contains NaN values, which are not allowed.")
71
-
72
- return array
73
-
74
- def check_sparse(self, array, accept_sparse: Tuple[str] = ('csr', 'csc')):
75
- """
76
- Validates sparse matrices and converts to an acceptable format.
77
- """
78
- if sparse.issparse(array):
79
- if array.format not in accept_sparse:
80
- return array.asformat(accept_sparse[0])
81
- return array
82
- else:
83
- raise ValueError("Input is not a sparse matrix.")
84
-
85
- def validate_r2_score_inputs(self, y_true, y_pred, sample_weight=None):
86
- """
87
- Ensures inputs for R2 score computation are valid.
88
- """
89
- y_true, y_pred = self.validate_regression_targets(y_true, y_pred)
90
- if sample_weight is not None:
91
- sample_weight = self.is_1d_array(sample_weight)
92
- return y_true, y_pred, sample_weight
93
-
94
- def validate_mae_mse_inputs(self, y_true, y_pred, library=None):
95
- """
96
- Ensures inputs for MAE and MSE computation are valid.
97
- """
98
- y_true, y_pred = self.validate_regression_targets(y_true, y_pred)
99
- if library not in {None, 'sklearn', 'torch', 'tensorflow', 'Moral88'}:
100
- raise ValueError(f"Invalid library: {library}. Choose from {{'Moral88', 'sklearn', 'torch', 'tensorflow'}}.")
101
- return y_true, y_pred
102
-
2
+ from Moral88.utils import DataValidator
103
3
 
4
+ validator = DataValidator()
104
5
 
105
- def mean_bias_deviation(self, y_true, y_pred, library=None, flatten=True):
6
+ def mean_bias_deviation( y_true, y_pred, library=None, flatten=True):
106
7
  """
107
8
  Computes Mean Bias Deviation (MBD).
108
9
  """
109
- y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
10
+ y_true, y_pred = validator.validate_mae_mse_inputs(y_true, y_pred, library)
110
11
 
111
12
  if flatten and y_true.ndim > 1:
112
13
  y_true = y_true.flatten()
@@ -132,14 +33,12 @@ def mean_bias_deviation(self, y_true, y_pred, library=None, flatten=True):
132
33
 
133
34
  # Default implementation
134
35
  return np.mean(y_pred - y_true)
135
- def __init__(self):
136
- self.validator = DataValidator()
137
36
 
138
- def r2_score(self, y_true, y_pred, sample_weight=None, library=None, flatten=True):
37
+ def r2_score( y_true, y_pred, sample_weight=None, library=None, flatten=True):
139
38
  """
140
39
  Computes R2 score.
141
40
  """
142
- y_true, y_pred, sample_weight = self.validator.validate_r2_score_inputs(y_true, y_pred, sample_weight)
41
+ y_true, y_pred, sample_weight = validator.validate_r2_score_inputs(y_true, y_pred, sample_weight)
143
42
 
144
43
  if flatten and y_true.ndim > 1:
145
44
  y_true = y_true.flatten()
@@ -161,11 +60,11 @@ def r2_score(self, y_true, y_pred, sample_weight=None, library=None, flatten=Tru
161
60
  return 0.0
162
61
  return 1 - (numerator / denominator)
163
62
 
164
- def mean_absolute_error(self, y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
63
+ def mean_absolute_error( y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
165
64
  """
166
65
  Computes Mean Absolute Error (MAE).
167
66
  """
168
- y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
67
+ y_true, y_pred = validator.validate_mae_mse_inputs(y_true, y_pred, library)
169
68
 
170
69
  if flatten:
171
70
  y_true = y_true.ravel()
@@ -211,11 +110,11 @@ def mean_absolute_error(self, y_true, y_pred, normalize=True, threshold=None, me
211
110
  y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
212
111
  return tf.reduce_mean(tf.abs(y_true_tensor - y_pred_tensor)).numpy()
213
112
 
214
- def mean_squared_error(self, y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
113
+ def mean_squared_error(y_true, y_pred, normalize=True, threshold=None, method='mean', library='Moral88', flatten=True):
215
114
  """
216
115
  Computes Mean Squared Error (MSE).
217
116
  """
218
- y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
117
+ y_true, y_pred = validator.validate_mae_mse_inputs(y_true, y_pred, library)
219
118
 
220
119
  if flatten:
221
120
  y_true = y_true.ravel()
@@ -239,10 +138,6 @@ def mean_squared_error(self, y_true, y_pred, normalize=True, threshold=None, met
239
138
  else:
240
139
  raise ValueError("Invalid method. Choose from {'mean', 'sum', 'none'}.")
241
140
 
242
- # if normalize and method != 'none':
243
- # range_y = np.ptp(y_true)
244
- # result = result / max(abs(range_y), 1)
245
-
246
141
  return result
247
142
 
248
143
  elif library == 'sklearn':
@@ -261,11 +156,11 @@ def mean_squared_error(self, y_true, y_pred, normalize=True, threshold=None, met
261
156
  y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
262
157
  return tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor)).numpy()
263
158
 
264
- def root_mean_squared_error(self, y_true, y_pred, library=None):
159
+ def root_mean_squared_error(y_true, y_pred, library=None):
265
160
  """
266
161
  Computes Root Mean Squared Error (RMSE).
267
162
  """
268
- y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
163
+ y_true, y_pred = validator.validate_mae_mse_inputs(y_true, y_pred, library)
269
164
 
270
165
  if library == 'sklearn':
271
166
  from sklearn.metrics import mean_squared_error as sklearn_mse
@@ -283,15 +178,15 @@ def root_mean_squared_error(self, y_true, y_pred, library=None):
283
178
  y_pred_tensor = tf.convert_to_tensor(y_pred, dtype=tf.float32)
284
179
  return tf.sqrt(tf.reduce_mean(tf.square(y_true_tensor - y_pred_tensor))).numpy()
285
180
 
286
- mse = self.mean_squared_error(y_true, y_pred)
181
+ mse = mean_squared_error(y_true, y_pred)
287
182
  return np.sqrt(mse)
288
183
 
289
- def mean_absolute_percentage_error(self, y_true, y_pred, library=None):
184
+ def mean_absolute_percentage_error(y_true, y_pred, library=None):
290
185
  """
291
186
  Computes Mean Absolute Percentage Error (MAPE).
292
187
  """
293
- y_true, y_pred = self.validator.validate_regression_targets(y_true, y_pred)
294
- y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
188
+ y_true, y_pred = validator.validate_regression_targets(y_true, y_pred)
189
+ y_true, y_pred = validator.validate_mae_mse_inputs(y_true, y_pred, library)
295
190
  y_true = np.clip(y_true, 1e-8, None)
296
191
 
297
192
  if library == 'sklearn':
@@ -312,11 +207,11 @@ def mean_absolute_percentage_error(self, y_true, y_pred, library=None):
312
207
 
313
208
  return np.mean(np.abs((y_true - y_pred) / np.clip(np.abs(y_true), 1e-8, None))) * 100
314
209
 
315
- def explained_variance_score(self, y_true, y_pred, library=None, flatten=True):
210
+ def explained_variance_score(y_true, y_pred, library=None, flatten=True):
316
211
  """
317
212
  Computes Explained Variance Score.
318
213
  """
319
- y_true, y_pred = self.validator.validate_mae_mse_inputs(y_true, y_pred, library)
214
+ y_true, y_pred = validator.validate_mae_mse_inputs(y_true, y_pred, library)
320
215
 
321
216
  if library == 'sklearn':
322
217
  from sklearn.metrics import explained_variance_score as sklearn_evs
@@ -342,7 +237,7 @@ def explained_variance_score(self, y_true, y_pred, library=None, flatten=True):
342
237
  denominator = np.var(y_true)
343
238
  return 1 - numerator / denominator if denominator != 0 else 0
344
239
 
345
- def adjusted_r2_score(self, y_true, y_pred, n_features, library=None, flatten=True):
240
+ def adjusted_r2_score(y_true, y_pred, n_features, library=None, flatten=True):
346
241
  """
347
242
  Computes Adjusted R-Squared Score.
348
243
 
@@ -363,7 +258,7 @@ def adjusted_r2_score(self, y_true, y_pred, n_features, library=None, flatten=Tr
363
258
  If True, flattens multidimensional arrays before computation.
364
259
  """
365
260
  # Validate inputs
366
- y_true, y_pred, _ = self.validator.validate_r2_score_inputs(y_true, y_pred)
261
+ y_true, y_pred, _ = validator.validate_r2_score_inputs(y_true, y_pred)
367
262
 
368
263
  # Ensure inputs are 1D arrays
369
264
  if y_true.ndim == 0 or y_pred.ndim == 0:
@@ -395,9 +290,9 @@ def adjusted_r2_score(self, y_true, y_pred, n_features, library=None, flatten=Tr
395
290
  return adjusted_r2
396
291
 
397
292
  if __name__ == '__main__':
398
- # Example usage
293
+ # Example usage
399
294
  validator = DataValidator()
400
- metrics = Metrics()
295
+
401
296
 
402
297
  # Test validation
403
298
  arr = [[1], [2], [3]]
@@ -408,12 +303,12 @@ if __name__ == '__main__':
408
303
  y_true = [3, -0.5, 2, 7]
409
304
  y_pred = [2.5, 0.0, 2, 8]
410
305
 
411
- print("Mean Absolute Error:", metrics.mean_absolute_error(y_true, y_pred))
412
- print("Mean Squared Error:", metrics.mean_squared_error(y_true, y_pred))
413
- print("R2 Score:", metrics.r2_score(y_true, y_pred))
414
- print("Mean Bias Deviation: ", metrics.mean_bias_deviation(y_true, y_pred))
415
- print("Explained Variance Score: ", metrics.explained_variance_score(y_true, y_pred))
416
- print("Mean Absolute Percentage Error: ", metrics.mean_absolute_percentage_error(y_true, y_pred))
417
- print("Root Mean Squared Error: ", metrics.root_mean_squared_error(y_true, y_pred))
418
- print("adjusted_r2_score: ", metrics.adjusted_r2_score(y_true, y_pred, 2))
306
+ print("Mean Absolute Error:", mean_absolute_error(y_true, y_pred))
307
+ print("Mean Squared Error:", mean_squared_error(y_true, y_pred))
308
+ print("R2 Score:", r2_score(y_true, y_pred))
309
+ print("Mean Bias Deviation: ", mean_bias_deviation(y_true, y_pred))
310
+ print("Explained Variance Score: ", explained_variance_score(y_true, y_pred))
311
+ print("Mean Absolute Percentage Error: ", mean_absolute_percentage_error(y_true, y_pred))
312
+ print("Root Mean Squared Error: ", root_mean_squared_error(y_true, y_pred))
313
+ print("adjusted_r2_score: ", adjusted_r2_score(y_true, y_pred, 2))
419
314
 
@@ -0,0 +1,101 @@
1
+ import numpy as np
2
+ import warnings
3
+ from typing import Union, List, Tuple
4
+ from scipy import sparse
5
+
6
+ class DataValidator:
7
+ def __init__(self):
8
+ pass
9
+
10
+ def check_device_cpu(self, device):
11
+ if device not in {"cpu", None}:
12
+ raise ValueError(f"Unsupported device: {device!r}. Only 'cpu' is supported.")
13
+
14
+ def is_1d_array(self, array: Union[np.ndarray, list], warn: bool = False) -> np.ndarray:
15
+ """
16
+ Ensures input is a 1D array. Raises an error if it's not 1D or convertible to 1D.
17
+ """
18
+ array = np.asarray(array)
19
+ shape = array.shape
20
+
21
+ if len(shape) == 1:
22
+ return array
23
+ elif len(shape) == 2 and shape[1] == 1:
24
+ if warn:
25
+ warnings.warn("Input is 2D but will be converted to 1D.", UserWarning)
26
+ return array.ravel()
27
+ else:
28
+ raise ValueError(f"Input must be 1D. Found shape {shape}.")
29
+
30
+ def check_samples(self, array: Union[np.ndarray, list]) -> int:
31
+ """
32
+ Returns the number of samples in the array.
33
+ """
34
+ array = np.asarray(array)
35
+ if hasattr(array, 'shape') and len(array.shape) > 0:
36
+ return array.shape[0]
37
+ else:
38
+ raise TypeError("Input must be an array-like object with at least one dimension.")
39
+
40
+ def check_consistent_length(self, *arrays: Union[np.ndarray, list]):
41
+ """
42
+ Ensures all input arrays have the same length.
43
+ """
44
+ lengths = [self.check_samples(arr) for arr in arrays]
45
+ if len(set(lengths)) > 1:
46
+ raise ValueError(f"Inconsistent lengths: {lengths}")
47
+
48
+ def validate_regression_targets(self, y_true, y_pred, dtype=np.float64):
49
+ """
50
+ Ensures regression target values are consistent and converted to the specified dtype.
51
+ """
52
+ y_true = np.asarray(y_true, dtype=dtype)
53
+ y_pred = np.asarray(y_pred, dtype=dtype)
54
+
55
+ if y_true.shape != y_pred.shape:
56
+ raise ValueError(f"Shapes of y_true {y_true.shape} and y_pred {y_pred.shape} do not match.")
57
+
58
+ return y_true, y_pred
59
+
60
+ def check_array(self, array, ensure_2d: bool = True, dtype=np.float64, allow_nan: bool = False):
61
+ """
62
+ Validates input array and converts it to specified dtype.
63
+ """
64
+ array = np.asarray(array, dtype=dtype)
65
+
66
+ if ensure_2d and array.ndim == 1:
67
+ array = array.reshape(-1, 1)
68
+
69
+ if not allow_nan and np.isnan(array).any():
70
+ raise ValueError("Input contains NaN values, which are not allowed.")
71
+
72
+ return array
73
+
74
+ def check_sparse(self, array, accept_sparse: Tuple[str] = ('csr', 'csc')):
75
+ """
76
+ Validates sparse matrices and converts to an acceptable format.
77
+ """
78
+ if sparse.issparse(array):
79
+ if array.format not in accept_sparse:
80
+ return array.asformat(accept_sparse[0])
81
+ return array
82
+ else:
83
+ raise ValueError("Input is not a sparse matrix.")
84
+
85
+ def validate_r2_score_inputs(self, y_true, y_pred, sample_weight=None):
86
+ """
87
+ Ensures inputs for R2 score computation are valid.
88
+ """
89
+ y_true, y_pred = self.validate_regression_targets(y_true, y_pred)
90
+ if sample_weight is not None:
91
+ sample_weight = self.is_1d_array(sample_weight)
92
+ return y_true, y_pred, sample_weight
93
+
94
+ def validate_mae_mse_inputs(self, y_true, y_pred, library=None):
95
+ """
96
+ Ensures inputs for MAE and MSE computation are valid.
97
+ """
98
+ y_true, y_pred = self.validate_regression_targets(y_true, y_pred)
99
+ if library not in {None, 'sklearn', 'torch', 'tensorflow', 'Moral88'}:
100
+ raise ValueError(f"Invalid library: {library}. Choose from {{'Moral88', 'sklearn', 'torch', 'tensorflow'}}.")
101
+ return y_true, y_pred
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: Moral88
3
- Version: 0.9.0
3
+ Version: 0.10.0
4
4
  Summary: A library for regression evaluation metrics.
5
5
  Author: Morteza Alizadeh
6
6
  Author-email: alizadeh.c2m@gmail.com
@@ -3,9 +3,11 @@ README.md
3
3
  setup.py
4
4
  Moral88/__init__.py
5
5
  Moral88/regression.py
6
- Moral88/segmentation.py
6
+ Moral88/utils.py
7
7
  Moral88.egg-info/PKG-INFO
8
8
  Moral88.egg-info/SOURCES.txt
9
9
  Moral88.egg-info/dependency_links.txt
10
10
  Moral88.egg-info/requires.txt
11
- Moral88.egg-info/top_level.txt
11
+ Moral88.egg-info/top_level.txt
12
+ tests/__init__.py
13
+ tests/test_regression.py
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: Moral88
3
- Version: 0.9.0
3
+ Version: 0.10.0
4
4
  Summary: A library for regression evaluation metrics.
5
5
  Author: Morteza Alizadeh
6
6
  Author-email: alizadeh.c2m@gmail.com
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name='Moral88',
5
- version='0.9.0',
5
+ version='0.10.0',
6
6
  description='A library for regression evaluation metrics.',
7
7
  author='Morteza Alizadeh',
8
8
  author_email='alizadeh.c2m@gmail.com',
@@ -0,0 +1,99 @@
1
+ import pytest
2
+ import numpy as np
3
+ from Moral88.regression import (
4
+ mean_absolute_error,
5
+ mean_absolute_error,
6
+ mean_squared_error,
7
+ r2_score,
8
+ mean_bias_deviation,
9
+ adjusted_r2_score,
10
+ root_mean_squared_error,
11
+ mean_absolute_percentage_error,
12
+ explained_variance_score
13
+ )
14
+ import warnings
15
+ from Moral88.utils import DataValidator
16
+
17
+ validator = DataValidator()
18
+
19
+ def test_is_1d_array():
20
+ validator = DataValidator()
21
+ array = [[1], [2], [3]]
22
+ with warnings.catch_warnings():
23
+ warnings.simplefilter("ignore", UserWarning)
24
+ result = validator.is_1d_array(array, warn=True)
25
+ assert result.ndim == 1
26
+ assert np.array_equal(result, np.array([1, 2, 3]))
27
+
28
+ def test_check_samples():
29
+ validator = DataValidator()
30
+ array = [[1, 2], [3, 4], [5, 6]]
31
+ result = validator.check_samples(array)
32
+ assert result == 3
33
+
34
+ def test_check_consistent_length():
35
+ validator = DataValidator()
36
+ array1 = [1, 2, 3]
37
+ array2 = [4, 5, 6]
38
+ validator.check_consistent_length(array1, array2) # Should not raise an error
39
+
40
+ array3 = [7, 8]
41
+ with pytest.raises(ValueError):
42
+ validator.check_consistent_length(array1, array3)
43
+
44
+ def test_mean_absolute_error():
45
+
46
+ y_true = [3, -0.5, 2, 7]
47
+ y_pred = [2.5, 0.0, 2, 8]
48
+ result = mean_absolute_error(y_true, y_pred)
49
+ assert result == pytest.approx(0.5, rel=1e-2)
50
+
51
+ def test_mean_squared_error():
52
+
53
+ y_true = [3, -0.5, 2, 7]
54
+ y_pred = [2.5, 0.0, 2, 8]
55
+ result = mean_squared_error(y_true, y_pred)
56
+ assert result == pytest.approx(0.375, rel=1e-2)
57
+
58
+ def test_r2_score():
59
+
60
+ y_true = [3, -0.5, 2, 7]
61
+ y_pred = [2.5, 0.0, 2, 8]
62
+ result = r2_score(y_true, y_pred)
63
+ assert result == pytest.approx(0.948, rel=1e-2)
64
+
65
+ def test_mean_bias_deviation():
66
+
67
+ y_true = [3, -0.5, 2, 7]
68
+ y_pred = [2.5, 0.0, 2, 8]
69
+ result = mean_bias_deviation(y_true, y_pred)
70
+ assert result == pytest.approx(0.25, rel=1e-2)
71
+
72
+ def test_explained_variance_score():
73
+
74
+ y_true = [3, -0.5, 2, 7]
75
+ y_pred = [2.5, 0.0, 2, 8]
76
+ result = explained_variance_score(y_true, y_pred)
77
+ assert result == pytest.approx(0.957, rel=1e-2)
78
+
79
+ def test_mean_absolute_percentage_error():
80
+
81
+ y_true = [3, -0.5, 2, 7]
82
+ y_pred = [2.5, 0.0, 2, 8]
83
+ result = mean_absolute_percentage_error(y_true, y_pred)
84
+ assert result == pytest.approx(32.738095, rel=1e-2)
85
+
86
+ def test_root_mean_squared_error():
87
+
88
+ y_true = [3, -0.5, 2, 7]
89
+ y_pred = [2.5, 0.0, 2, 8]
90
+ result = root_mean_squared_error(y_true, y_pred)
91
+ assert result == pytest.approx(0.612, rel=1e-2)
92
+
93
+ def test_adjusted_r2_score():
94
+
95
+ y_true = [3, -0.5, 2, 7]
96
+ y_pred = [2.5, 0.0, 2, 8]
97
+ n_features = 2
98
+ result = adjusted_r2_score(y_true, y_pred, n_features)
99
+ assert result == pytest.approx(0.8458, rel=1e-2)
@@ -1,38 +0,0 @@
1
- def validate_segmentation_inputs(y_true, y_pred):
2
- """
3
- Validate the inputs for type and shape.
4
- """
5
- if not isinstance(y_true, (list, tuple)) or not isinstance(y_pred, (list, tuple)):
6
- raise TypeError("Both y_true and y_pred must be lists or tuples.")
7
-
8
- if len(y_true) != len(y_pred):
9
- raise ValueError("Length of y_true and y_pred must be the same.")
10
-
11
- if not all(isinstance(x, (int, float)) for x in y_true + y_pred):
12
- raise TypeError("All elements in y_true and y_pred must be numeric.")
13
-
14
- def dice_score(y_true, y_pred, threshold=0.5):
15
- """
16
- Compute the Dice Score.
17
-
18
- Args:
19
- y_true (list or tuple): Ground truth binary values.
20
- y_pred (list or tuple): Predicted values (probabilities or binary).
21
- threshold (float): Threshold to binarize y_pred if necessary.
22
-
23
- Returns:
24
- float: Dice Score.
25
- """
26
- validate_segmentation_inputs(y_true, y_pred)
27
-
28
- # Binarize predictions based on threshold
29
- y_pred = [1 if p >= threshold else 0 for p in y_pred]
30
-
31
- # Calculate intersection and union
32
- intersection = sum(yt * yp for yt, yp in zip(y_true, y_pred))
33
- total = sum(y_true) + sum(y_pred)
34
-
35
- if total == 0:
36
- return 1.0 # Perfect match if both are completely empty
37
-
38
- return 2 * intersection / total
File without changes
File without changes
File without changes