tsadmetrics 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tests/test_spm.py DELETED
@@ -1,213 +0,0 @@
1
- import unittest
2
- from tsadmetrics.metrics.spm import *
3
-
4
- from sklearn.metrics import fbeta_score
5
- import numpy as np
6
- import random
7
-
8
- class TestPointwiseFScore(unittest.TestCase):
9
-
10
- def setUp(self):
11
- """
12
- Configuración inicial para las pruebas.
13
- """
14
- self.num_tests = 100
15
- self.test_cases = []
16
- for _ in range(self.num_tests):
17
- y_true = np.random.choice([0, 1], size=(10000,))
18
- y_pred = np.random.choice([0, 1], size=(10000,))
19
- self.test_cases.append((y_true, y_pred))
20
-
21
- y_true_perfect = np.random.choice([0, 1], size=(10000,))
22
- y_pred_perfect = y_true_perfect.copy()
23
- self.test_cases.append((y_true_perfect, y_pred_perfect))
24
-
25
- y_true_all_zeros = np.random.choice([0, 1], size=(10000,))
26
- y_pred_all_zeros = np.zeros(10000, dtype=int)
27
- self.test_cases.append((y_true_all_zeros, y_pred_all_zeros))
28
-
29
-
30
-
31
-
32
- def test(self):
33
-
34
- for y_true, y_pred in self.test_cases:
35
- with self.subTest(y_true=y_true, y_pred=y_pred):
36
- beta = random.randint(0,1000000)
37
- metric = PointwiseFScore(beta=beta)
38
- f_score = metric.compute(y_true, y_pred)
39
- expected_f_score = fbeta_score(y_true, y_pred, beta=beta)
40
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
41
-
42
- class TestPrecisionAtK(unittest.TestCase):
43
-
44
- def setUp(self):
45
-
46
-
47
- self.y_true1 = np.array([0,0,1,1])
48
-
49
-
50
- self.y_pred1 = np.array([0.2, 0.9, 0.3, 0.8])
51
-
52
- self.y_pred2 = np.array([1, 2, 3, 4])
53
-
54
- self.y_pred3 = np.array([3, 4, 1, 2])
55
-
56
- self.y_true2 = np.array([1,1,1,0])
57
-
58
- self.y_pred4 = np.array([3, 4, 1, 2])
59
-
60
- self.y_pred5 = self.y_true1
61
- self.y_pred6 = np.zeros(len(self.y_true1))
62
-
63
-
64
-
65
-
66
- def test(self):
67
- metric = PrecisionAtK()
68
- score = round(metric.compute(self.y_true1, self.y_pred1),2)
69
- expected_score = 0.5
70
- self.assertAlmostEqual(score, expected_score, places=4)
71
-
72
- score = round(metric.compute(self.y_true1, self.y_pred2),2)
73
- expected_score = 1
74
- self.assertAlmostEqual(score, expected_score, places=4)
75
-
76
- score = round(metric.compute(self.y_true1, self.y_pred3),2)
77
- expected_score = 0
78
- self.assertAlmostEqual(score, expected_score, places=4)
79
-
80
- score = round(metric.compute(self.y_true2, self.y_pred4),2)
81
- expected_score = round(2/3,2)
82
- self.assertAlmostEqual(score, expected_score, places=4)
83
-
84
- score = round(metric.compute(self.y_true1, self.y_pred5),2)
85
- expected_metric = 1.0
86
- self.assertAlmostEqual(score, expected_metric, places=4)
87
-
88
- score = round(metric.compute(self.y_true1, self.y_pred6),2)
89
- expected_metric = 0.5
90
- self.assertAlmostEqual(score, expected_metric, places=4)
91
-
92
- def test_consistency(self):
93
- try:
94
- metric = PrecisionAtK()
95
- for _ in range(100):
96
- y_true = np.random.choice([0, 1], size=(100,))
97
- y_pred = np.random.random( size=(100,))
98
-
99
- score = metric.compute(y_true, y_pred)
100
- except Exception as e:
101
- self.fail(f"PrecisionAtK raised an exception {e}")
102
-
103
-
104
-
105
- class TestPointwiseAucRoc(unittest.TestCase):
106
- def setUp(self):
107
- """
108
- Configuración inicial para las pruebas.
109
- """
110
-
111
- self.y_true1 = np.array([0,0,1,1])
112
-
113
-
114
- self.y_pred1 = np.array([1, 3, 2, 4])
115
-
116
- self.y_pred2 = np.array([1, 2, 3, 4])
117
-
118
- self.y_pred3 = np.array([4, 4, 4, 4])
119
-
120
- self.y_pred4 = self.y_true1
121
- self.y_pred5 = np.zeros(len(self.y_true1))
122
-
123
-
124
- def test(self):
125
- metric = PointwiseAucRoc()
126
- score = round(metric.compute(self.y_true1, self.y_pred1),2)
127
- expected_score = 0.75
128
- self.assertAlmostEqual(score, expected_score, places=4)
129
-
130
- score = round(metric.compute(self.y_true1, self.y_pred2),2)
131
- expected_score = 1
132
- self.assertAlmostEqual(score, expected_score, places=4)
133
-
134
- score = round(metric.compute(self.y_true1, self.y_pred3),2)
135
- expected_score = 0.5
136
- self.assertAlmostEqual(score, expected_score, places=4)
137
-
138
- score = round(metric.compute(self.y_true1, self.y_pred4),2)
139
- expected_metric = 1.0
140
- self.assertAlmostEqual(score, expected_metric, places=4)
141
-
142
- score = round(metric.compute(self.y_true1, self.y_pred5),2)
143
- expected_metric = 0.5
144
- self.assertAlmostEqual(score, expected_metric, places=4)
145
-
146
-
147
- def test_consistency(self):
148
- try:
149
- metric = PointwiseAucRoc()
150
- for _ in range(100):
151
- y_true = np.random.choice([0, 1], size=(100,))
152
- y_pred = np.random.random( size=(100,))
153
-
154
- score = metric.compute(y_true, y_pred)
155
- except Exception as e:
156
- self.fail(f"PointwiseAucRoc raised an exception {e}")
157
-
158
-
159
- class TestPointwiseAucPr(unittest.TestCase):
160
- def setUp(self):
161
- """
162
- Configuración inicial para las pruebas.
163
- """
164
-
165
- self.y_true1 = np.array([0,0,1,1])
166
-
167
-
168
- self.y_pred1 = np.array([1, 3, 2, 4])
169
-
170
- self.y_pred2 = np.array([1, 2, 3, 4])
171
-
172
- self.y_pred3 = np.array([4, 4, 4, 4])
173
-
174
- self.y_pred4 = self.y_true1
175
- self.y_pred5 = np.zeros(len(self.y_true1))
176
-
177
-
178
- def test(self):
179
- """
180
- Prueba para la función metric.compute.
181
- """
182
- metric = PointwiseAucPr()
183
- score = round(metric.compute(self.y_true1, self.y_pred1),2)
184
- expected_score = 0.83
185
- self.assertAlmostEqual(score, expected_score, places=4)
186
-
187
- score = round(metric.compute(self.y_true1, self.y_pred2),2)
188
- expected_score = 1
189
- self.assertAlmostEqual(score, expected_score, places=4)
190
-
191
- score = round(metric.compute(self.y_true1, self.y_pred3),2)
192
- expected_score = 0.5
193
- self.assertAlmostEqual(score, expected_score, places=4)
194
-
195
- score = round(metric.compute(self.y_true1, self.y_pred4),2)
196
- expected_metric = 1.0
197
- self.assertAlmostEqual(score, expected_metric, places=4)
198
-
199
- score = round(metric.compute(self.y_true1, self.y_pred5),2)
200
- expected_metric = 0.5
201
- self.assertAlmostEqual(score, expected_metric, places=4)
202
-
203
-
204
- def test_consistency(self):
205
- try:
206
- metric = PointwiseAucPr()
207
- for _ in range(100):
208
- y_true = np.random.choice([0, 1], size=(100,))
209
- y_pred = np.random.random( size=(100,))
210
-
211
- score = metric.compute(y_true, y_pred)
212
- except Exception as e:
213
- self.fail(f"auc_pr raised an exception {e}")
tests/test_tmem.py DELETED
@@ -1,198 +0,0 @@
1
- import unittest
2
-
3
- import numpy as np
4
- import random
5
-
6
- import unittest
7
- import numpy as np
8
- from tsadmetrics.metrics.tem.tmem import *
9
-
10
- class TestTemporalDistance(unittest.TestCase):
11
-
12
- def setUp(self):
13
- self.y_true1 = np.array([0, 0, 1, 1, 0, 0])
14
- self.y_pred1 = np.array([0, 0, 1, 1, 0, 0])
15
-
16
- self.y_true2 = np.array([0, 0, 0, 1, 1, 0])
17
- self.y_pred2 = np.array([0, 1, 1, 0, 0, 0])
18
-
19
- self.y_true3 = np.array([0, 0, 1, 1, 0, 0])
20
- self.y_pred3 = np.array([1, 1, 0, 0, 0, 0])
21
-
22
- self.y_pred4 = self.y_true1
23
- self.y_pred5 = np.zeros(len(self.y_true1))
24
-
25
- def test_temporal_distance_euclidean(self):
26
- metric = TemporalDistance(distance=0)
27
-
28
- td = metric.compute(self.y_true1, self.y_pred1)
29
- expected = 0
30
- self.assertEqual(td, expected)
31
-
32
- td = metric.compute(self.y_true2, self.y_pred2)
33
- expected = 6
34
- self.assertEqual(td, expected)
35
-
36
- td = metric.compute(self.y_true3, self.y_pred3)
37
- expected = 6
38
- self.assertEqual(td, expected)
39
-
40
- score = round(metric.compute(self.y_true1, self.y_pred4),2)
41
- expected_metric = 0
42
- self.assertAlmostEqual(score, expected_metric, places=4)
43
-
44
- score = round(metric.compute(self.y_true1, self.y_pred5),2)
45
- expected_metric = 12
46
- self.assertAlmostEqual(score, expected_metric, places=4)
47
-
48
- def test_temporal_distance_squared(self):
49
- metric = TemporalDistance(distance=1)
50
-
51
- td = metric.compute(self.y_true1, self.y_pred1)
52
- expected = 0
53
- self.assertEqual(td, expected)
54
-
55
- td = metric.compute(self.y_true2, self.y_pred2)
56
- expected = 18
57
- self.assertEqual(td, expected)
58
-
59
- td = metric.compute(self.y_true3, self.y_pred3)
60
- expected = 18
61
- self.assertEqual(td, expected)
62
-
63
- score = round(metric.compute(self.y_true1, self.y_pred4),2)
64
- expected_metric = 0
65
- self.assertAlmostEqual(score, expected_metric, places=4)
66
-
67
- score = round(metric.compute(self.y_true1, self.y_pred5),2)
68
- expected_metric = 144
69
- self.assertAlmostEqual(score, expected_metric, places=4)
70
-
71
- def test_consistency(self):
72
- try:
73
-
74
- for _ in range(100):
75
- y_true = np.random.choice([0, 1], size=(100,))
76
- y_pred = np.zeros(100)
77
- metric=TemporalDistance(distance=random.choice([0, 1]))
78
- metric.compute(y_true, y_pred)
79
- except Exception as e:
80
- self.fail(f"absolute_detection_distance raised an exception {e}")
81
-
82
-
83
- class TestAbsoluteDetectionDistance(unittest.TestCase):
84
-
85
- def setUp(self):
86
- """
87
- Configuración inicial para las pruebas.
88
- """
89
- self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
90
- self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
91
- self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
92
-
93
- self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
94
- self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
95
- self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
96
-
97
- self.y_pred3 = self.y_true1
98
- self.y_pred4 = np.zeros(len(self.y_true1))
99
-
100
- def test(self):
101
- metric = AbsoluteDetectionDistance()
102
- score = round(metric.compute(self.y_true1, self.y_pred1),2)
103
- expected_score = 0.25
104
- self.assertAlmostEqual(score, expected_score, places=4)
105
-
106
- score = round(metric.compute(self.y_true1, self.y_pred2),2)
107
- expected_score = 0.25
108
- self.assertAlmostEqual(score, expected_score, places=4)
109
-
110
- score = round(metric.compute(self.y_true2, self.y_pred21),2)
111
- expected_score = 0.06
112
- self.assertAlmostEqual(score, expected_score, places=4)
113
-
114
- score = round(metric.compute(self.y_true2, self.y_pred22),2)
115
- expected_score = 0.12
116
- self.assertAlmostEqual(score, expected_score, places=4)
117
-
118
- score = round(metric.compute(self.y_true1, self.y_pred3),2)
119
- expected_metric = 0.17 #The mean of the distances is never 0
120
- self.assertAlmostEqual(score, expected_metric, places=4)
121
-
122
- score = round(metric.compute(self.y_true1, self.y_pred4),2)
123
- expected_metric = 0
124
- self.assertAlmostEqual(score, expected_metric, places=4)
125
-
126
-
127
- def testconsistency(self):
128
- try:
129
- y_true = np.random.choice([0, 1], size=(100,))
130
- y_pred = np.zeros(100)
131
- metric = AbsoluteDetectionDistance()
132
- for _ in range(100):
133
- y_true = np.random.choice([0, 1], size=(100,))
134
- y_pred = np.random.choice([0, 1], size=(100,))
135
-
136
- score = metric.compute(y_true, y_pred)
137
- except Exception as e:
138
- self.fail(f"AbsoluteDetectionDistance raised an exception {e}")
139
-
140
-
141
- class TestEnhancedTimeseriesAwareFScore(unittest.TestCase):
142
-
143
- def setUp(self):
144
-
145
- self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
146
- self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
147
- self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
148
-
149
- self.y_true2 = np.array([0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
150
- self.y_pred21 = np.array([0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
151
- self.y_pred22 = np.array([0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
152
-
153
- self.y_pred3 = self.y_true1
154
- self.y_pred4 = np.zeros(len(self.y_true1))
155
-
156
-
157
-
158
- def test(self):
159
- metric = EnhancedTimeseriesAwareFScore(theta_p=0.5, theta_r=0.1)
160
- f_score = round(metric.compute(self.y_true1, self.y_pred1),2)
161
- expected_f_score = 0.67
162
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
163
-
164
- f_score = round(metric.compute(self.y_true1, self.y_pred2),2)
165
- expected_f_score = 0.72
166
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
167
-
168
- f_score = round(metric.compute(self.y_true2, self.y_pred21),2)
169
- expected_f_score = 0.77
170
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
171
-
172
- f_score = round(metric.compute(self.y_true2, self.y_pred22),2)
173
- expected_f_score = 0.67
174
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
175
-
176
- score = round(metric.compute(self.y_true1, self.y_pred3),2)
177
- expected_metric = 1.0
178
- self.assertAlmostEqual(score, expected_metric, places=4)
179
-
180
- score = round(metric.compute(self.y_true1, self.y_pred4),2)
181
- expected_metric = 0
182
- self.assertAlmostEqual(score, expected_metric, places=4)
183
-
184
- def test_consistency(self):
185
- try:
186
- y_true = np.random.choice([0, 1], size=(100,))
187
- y_pred = np.zeros(100)
188
- metric = EnhancedTimeseriesAwareFScore(theta_r=random.random(), theta_p=random.random())
189
- metric.compute(y_true, y_pred)
190
- for _ in range(100):
191
- y_true = np.random.choice([0, 1], size=(100,))
192
- y_pred = np.random.choice([0, 1], size=(100,))
193
-
194
- f_score = metric.compute(y_true, y_pred)
195
- except Exception as e:
196
- self.fail(f"EnhancedTimeseriesAwareFScore raised an exception {e}")
197
-
198
-