tsadmetrics 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tests/test_tpdm.py DELETED
@@ -1,369 +0,0 @@
1
- import unittest
2
- from tsadmetrics.metrics.tem.tpdm import *
3
-
4
- import numpy as np
5
- import random
6
-
7
- class TestCompositeFScore(unittest.TestCase):
8
-
9
- def setUp(self):
10
- """
11
- Configuración inicial para las pruebas.
12
- """
13
- self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
14
- self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
15
- self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
16
- self.y_pred3 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
17
- self.y_pred4 = np.zeros(len(self.y_true))
18
-
19
-
20
-
21
-
22
- def test(self):
23
-
24
- metric = CompositeFScore()
25
- f_score = round(metric.compute(self.y_true, self.y_pred1),2)
26
- expected_f_score = 0.67
27
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
28
-
29
- f_score = round(metric.compute(self.y_true, self.y_pred2),2)
30
- expected_f_score = 1
31
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
32
-
33
- f_score = round(metric.compute(self.y_true, self.y_pred3),2)
34
- expected_f_score = 1
35
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
36
-
37
- f_score = round(metric.compute(self.y_true, self.y_pred4),2)
38
- expected_f_score = 0
39
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
40
-
41
-
42
- def test_consistency(self):
43
- metric = CompositeFScore()
44
- try:
45
- y_true = np.random.choice([0, 1], size=(100,))
46
- y_pred = np.zeros(100)
47
- metric.compute(y_true, y_pred)
48
- for _ in range(1000):
49
- y_true = np.random.choice([0, 1], size=(10,))
50
- y_pred = np.random.choice([0, 1], size=(10,))
51
- f_score = metric.compute(y_true, y_pred)
52
-
53
- except Exception as e:
54
- self.fail(f"CompositeFScore raised an exception {e}")
55
-
56
- class TestPointadjustedFScore(unittest.TestCase):
57
-
58
- def setUp(self):
59
-
60
- self.y_true = np.array([0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0])
61
- self.y_pred = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0])
62
- self.y_pred2 = np.array([0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0])
63
- self.y_pred3 = np.zeros(len(self.y_true))
64
-
65
-
66
- def test(self):
67
- metric = PointadjustedFScore()
68
- f_score = round(metric.compute(self.y_true, self.y_pred),2)
69
- expected_f_score = 0.93
70
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
71
-
72
- f_score = round(metric.compute(self.y_true, self.y_pred2),2)
73
- expected_f_score = 1
74
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
75
-
76
- f_score = round(metric.compute(self.y_true, self.y_pred3),2)
77
- expected_f_score = 0
78
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
79
-
80
- def test_consistency(self):
81
- metric = PointadjustedFScore()
82
- try:
83
- y_true = np.random.choice([0, 1], size=(100,))
84
- y_pred = np.zeros(100)
85
- metric.compute(y_true, y_pred)
86
- for _ in range(1000):
87
- y_true = np.random.choice([0, 1], size=(100,))
88
- y_pred = np.random.choice([0, 1], size=(100,))
89
- f_score = metric.compute(y_true, y_pred)
90
- except Exception as e:
91
- self.fail(f"PointadjustedFScore raised an exception {e}")
92
-
93
-
94
-
95
-
96
- class TestSegmentwiseFScore(unittest.TestCase):
97
-
98
- def setUp(self):
99
-
100
- self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
101
- self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
102
- self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
103
- self.y_pred3 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
104
- self.y_pred4 = np.zeros(len(self.y_true))
105
-
106
-
107
- def test(self):
108
- metric = SegmentwiseFScore()
109
- f_score = round(metric.compute(self.y_true, self.y_pred1),2)
110
- expected_f_score = 0.67
111
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
112
-
113
- f_score = round(metric.compute(self.y_true, self.y_pred2),2)
114
- expected_f_score = 1
115
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
116
-
117
- f_score = round(metric.compute(self.y_true, self.y_pred3),2)
118
- expected_f_score = 1
119
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
120
-
121
- f_score = round(metric.compute(self.y_true, self.y_pred4),2)
122
- expected_f_score = 0
123
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
124
-
125
-
126
-
127
- def test_consistency(self):
128
- metric = SegmentwiseFScore()
129
- try:
130
- y_true = np.random.choice([0, 1], size=(100,))
131
- y_pred = np.zeros(100)
132
- metric.compute(y_true, y_pred)
133
- for _ in range(1000):
134
- y_true = np.random.choice([0, 1], size=(10,))
135
- y_pred = np.random.choice([0, 1], size=(10,))
136
- f_score = metric.compute(y_true, y_pred)
137
-
138
- except Exception as e:
139
- self.fail(f"SegmentwiseFScore raised an exception {e}")
140
-
141
-
142
-
143
- class TestPointadjustedAucRoc(unittest.TestCase):
144
-
145
- def setUp(self):
146
-
147
-
148
- self.y_true1 = np.array([0,0,1,1])
149
-
150
-
151
- self.y_pred1 = np.array([1, 3, 2, 4])
152
-
153
- self.y_pred2 = np.array([1, 2, 3, 4])
154
-
155
- self.y_pred3 = np.array([4, 4, 4, 4])
156
-
157
- self.y_true2 = np.array([0,1,1,0,0,0,0,0,1,1,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,1,0,0,1,1,0
158
- ,1,1,1,0,0,1,0,0,1,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,1,1,1,1,1,0,1,1
159
- ,1,1,1,1,0,0,1,1,1,1,0,1,0,0,1,1,1,0,0,1,0,0,1,0,1,1])
160
-
161
-
162
- self.y_pred4 = [0.1280475, 0.12059283 ,0.29936968 ,0.85866402 ,0.74071874 ,0.22310849
163
- ,0.11281839 ,0.26133246 ,0.33696106 ,0.01442675 ,0.51962876 ,0.07828833
164
- ,0.45337844 ,0.09444483 ,0.91216588 ,0.18847595 ,0.26828481 ,0.65248919
165
- ,0.46291981 ,0.43730757 ,0.78087553 ,0.45031043 ,0.88661033 ,0.56209352
166
- ,0.45029423 ,0.17638205 ,0.9261279 ,0.58830652 ,0.01602648 ,0.73903379
167
- ,0.61831379 ,0.74779903 ,0.42682106 ,0.82583519 ,0.19709012 ,0.44925962
168
- ,0.62752415 ,0.52458327 ,0.46291768 ,0.33937527 ,0.34868777 ,0.12293847
169
- ,0.84477504 ,0.10225254 ,0.37048167 ,0.04476031 ,0.36680499 ,0.11346155
170
- ,0.10583112 ,0.09493136 ,0.54878736 ,0.68514489 ,0.5940307 ,0.14526962
171
- ,0.69385728 ,0.38888727 ,0.61495304 ,0.06795402 ,0.02894603 ,0.08293609
172
- ,0.22865685 ,0.63531487 ,0.97966126 ,0.31418622 ,0.8943095 ,0.22974177
173
- ,0.94402929 ,0.13140625 ,0.80539267 ,0.40160344 ,0.38151339 ,0.65011626
174
- ,0.71657942 ,0.93297398 ,0.32043329 ,0.54667941 ,0.90645979 ,0.98730183
175
- ,0.82351336 ,0.10404812 ,0.6962921 ,0.72890752 ,0.49700666 ,0.47461103
176
- ,0.59696079 ,0.85876179 ,0.247344 ,0.38187879 ,0.23906861 ,0.5266315
177
- ,0.08171512 ,0.27903375 ,0.61112439 ,0.20784267 ,0.90652453 ,0.87575255
178
- ,0.26972245 ,0.78780138 ,0.37649185 ,0.08467683]
179
-
180
- self.y_pred5 = self.y_true1
181
- self.y_pred6 = np.zeros(len(self.y_true1))
182
-
183
-
184
- def test(self):
185
- metric = PointadjustedAucRoc()
186
- score = round(metric.compute(self.y_true1, self.y_pred1),2)
187
- expected_score = 1.0
188
- self.assertAlmostEqual(score, expected_score, places=4)
189
-
190
- score = round(metric.compute(self.y_true1, self.y_pred2),2)
191
- expected_score = 1.0
192
- self.assertAlmostEqual(score, expected_score, places=4)
193
-
194
- score = round(metric.compute(self.y_true1, self.y_pred3),2)
195
- expected_score = 0.5
196
- self.assertAlmostEqual(score, expected_score, places=4)
197
-
198
-
199
- score = round(metric.compute(self.y_true2, self.y_pred4),2)
200
- expected_score = 0.75
201
- self.assertAlmostEqual(score, expected_score, places=4)
202
-
203
- score = round(metric.compute(self.y_true1, self.y_pred5),2)
204
- expected_score = 1.0
205
- self.assertAlmostEqual(score, expected_score, places=4)
206
-
207
- score = round(metric.compute(self.y_true1, self.y_pred6),2)
208
- expected_score = 0.5
209
- self.assertAlmostEqual(score, expected_score, places=4)
210
-
211
-
212
-
213
-
214
- def test_consistency(self):
215
- y_true, y_pred = [],[]
216
- metric = PointadjustedAucRoc()
217
- try:
218
- for _ in range(100):
219
- y_true = np.random.choice([0, 1], size=(100,))
220
- y_pred = np.random.random( size=(100,))
221
- score = metric.compute(y_true, y_pred)
222
- except Exception as e:
223
- self.fail(f"PointadjustedAucRoc raised an exception {e}")
224
-
225
-
226
-
227
- class TestPointadjustedAucPr(unittest.TestCase):
228
-
229
- def setUp(self):
230
- """
231
- Configuración inicial para las pruebas.
232
- """
233
-
234
- self.y_true1 = np.array([0,0,1,1])
235
-
236
-
237
- self.y_pred1 = np.array([1, 3, 2, 4])
238
-
239
- self.y_pred2 = np.array([1, 2, 3, 4])
240
-
241
- self.y_pred3 = np.array([4, 4, 4, 4])
242
-
243
- self.y_true2 = np.array([0,1,1,0,0,0,0,0,1,1,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,1,0,0,1,1,0
244
- ,1,1,1,0,0,1,0,0,1,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,1,1,1,1,1,0,1,1
245
- ,1,1,1,1,0,0,1,1,1,1,0,1,0,0,1,1,1,0,0,1,0,0,1,0,1,1])
246
-
247
-
248
- self.y_pred4 = [0.1280475, 0.12059283 ,0.29936968 ,0.85866402 ,0.74071874 ,0.22310849
249
- ,0.11281839 ,0.26133246 ,0.33696106 ,0.01442675 ,0.51962876 ,0.07828833
250
- ,0.45337844 ,0.09444483 ,0.91216588 ,0.18847595 ,0.26828481 ,0.65248919
251
- ,0.46291981 ,0.43730757 ,0.78087553 ,0.45031043 ,0.88661033 ,0.56209352
252
- ,0.45029423 ,0.17638205 ,0.9261279 ,0.58830652 ,0.01602648 ,0.73903379
253
- ,0.61831379 ,0.74779903 ,0.42682106 ,0.82583519 ,0.19709012 ,0.44925962
254
- ,0.62752415 ,0.52458327 ,0.46291768 ,0.33937527 ,0.34868777 ,0.12293847
255
- ,0.84477504 ,0.10225254 ,0.37048167 ,0.04476031 ,0.36680499 ,0.11346155
256
- ,0.10583112 ,0.09493136 ,0.54878736 ,0.68514489 ,0.5940307 ,0.14526962
257
- ,0.69385728 ,0.38888727 ,0.61495304 ,0.06795402 ,0.02894603 ,0.08293609
258
- ,0.22865685 ,0.63531487 ,0.97966126 ,0.31418622 ,0.8943095 ,0.22974177
259
- ,0.94402929 ,0.13140625 ,0.80539267 ,0.40160344 ,0.38151339 ,0.65011626
260
- ,0.71657942 ,0.93297398 ,0.32043329 ,0.54667941 ,0.90645979 ,0.98730183
261
- ,0.82351336 ,0.10404812 ,0.6962921 ,0.72890752 ,0.49700666 ,0.47461103
262
- ,0.59696079 ,0.85876179 ,0.247344 ,0.38187879 ,0.23906861 ,0.5266315
263
- ,0.08171512 ,0.27903375 ,0.61112439 ,0.20784267 ,0.90652453 ,0.87575255
264
- ,0.26972245 ,0.78780138 ,0.37649185 ,0.08467683]
265
-
266
- self.y_pred5 = self.y_true1
267
- self.y_pred6 = np.zeros(len(self.y_true1))
268
-
269
- def test(self):
270
- metric = PointadjustedAucPr()
271
- score = round(metric.compute(self.y_true1, self.y_pred1),2)
272
- expected_score = 1.0
273
- self.assertAlmostEqual(score, expected_score, places=4)
274
-
275
- score = round(metric.compute(self.y_true1, self.y_pred2),2)
276
- expected_score = 1.0
277
- self.assertAlmostEqual(score, expected_score, places=4)
278
-
279
- score = round(metric.compute(self.y_true1, self.y_pred3),2)
280
- expected_score = 0.75
281
- self.assertAlmostEqual(score, expected_score, places=4)
282
-
283
-
284
- score = round(metric.compute(self.y_true2, self.y_pred4),2)
285
- expected_score = 0.78
286
- self.assertAlmostEqual(score, expected_score, places=4)
287
-
288
- score = round(metric.compute(self.y_true1, self.y_pred5),2)
289
- expected_score = 1
290
- self.assertAlmostEqual(score, expected_score, places=4)
291
-
292
- score = round(metric.compute(self.y_true1, self.y_pred6),2)
293
- expected_score = 0.75
294
- self.assertAlmostEqual(score, expected_score, places=4)
295
-
296
-
297
- def test_consistency(self):
298
- y_true, y_pred = [],[]
299
- metric = PointadjustedAucPr()
300
- try:
301
- for _ in range(100):
302
- y_true = np.random.choice([0, 1], size=(100,))
303
- y_pred = np.random.random( size=(100,))
304
- score = metric.compute(y_true, y_pred)
305
- except Exception as e:
306
- self.fail(f"PointadjustedAucPr raised an exception {e}")
307
-
308
-
309
-
310
-
311
- class TestRangebasedFScore(unittest.TestCase):
312
-
313
- def setUp(self):
314
-
315
- self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
316
- self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
317
- self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
318
-
319
- self.y_true2 = np.array([0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
320
- self.y_pred21 = np.array([0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
321
- self.y_pred22 = np.array([0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
322
-
323
- self.y_pred3 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
324
- self.y_pred4 = np.zeros(len(self.y_true1))
325
-
326
-
327
- def test(self):
328
- metric = RangebasedFScore(beta=1,p_alpha=0.2,r_alpha=0.2,cardinality_mode='one',p_bias='flat',r_bias='flat')
329
- f_score = round(metric.compute(self.y_true1, self.y_pred1),2)
330
- expected_f_score = 0.67
331
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
332
-
333
- f_score = round(metric.compute(self.y_true1, self.y_pred2),2)
334
- expected_f_score = 0.46
335
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
336
-
337
- f_score = round(metric.compute(self.y_true2, self.y_pred21),2)
338
- expected_f_score = 0.71
339
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
340
-
341
- f_score = round(metric.compute(self.y_true2, self.y_pred22),2)
342
- expected_f_score = 0.67
343
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
344
-
345
- f_score = round(metric.compute(self.y_true1, self.y_pred3),2)
346
- expected_f_score = 1
347
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
348
-
349
- f_score = round(metric.compute(self.y_true1, self.y_pred4),2)
350
- expected_f_score = 0
351
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
352
-
353
- def test_range_based_consistency(self):
354
-
355
- try:
356
- modes = ['flat','front','back'
357
- ,'middle']
358
- modes_c = ['one','reciprocal']
359
- metric = RangebasedFScore(beta=2,p_alpha=random.random(),r_alpha=random.random(),cardinality_mode=random.choice(modes_c),p_bias=random.choice(modes),r_bias=random.choice(modes))
360
- y_true = np.random.choice([0, 1], size=(100,))
361
- y_pred = np.zeros(100)
362
- metric.compute(y_true, y_pred)
363
- for _ in range(100):
364
- y_true = np.random.choice([0, 1], size=(100,))
365
- y_pred = np.random.choice([0, 1], size=(100,))
366
- metric = RangebasedFScore(beta=2,p_alpha=random.random(),r_alpha=random.random(),cardinality_mode=random.choice(modes_c),p_bias=random.choice(modes),r_bias=random.choice(modes))
367
- f_score = metric.compute(y_true, y_pred)
368
- except Exception as e:
369
- self.fail(f"RangeBasedFScore raised an exception {e}")
tests/test_tstm.py DELETED
@@ -1,338 +0,0 @@
1
- import unittest
2
-
3
- import numpy as np
4
- import random
5
-
6
- import unittest
7
- import numpy as np
8
- from tsadmetrics.metrics.tem.tstm import *
9
-
10
- class TestAffiliationbasedFScore(unittest.TestCase):
11
-
12
- def setUp(self):
13
-
14
- self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
15
- self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
16
- self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
17
-
18
- self.y_true2 = np.array([0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
19
- self.y_pred21 = np.array([0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
20
- self.y_pred22 = np.array([0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
21
-
22
- self.y_pred3 = self.y_true1
23
- self.y_pred4 = np.zeros(len(self.y_true1))
24
-
25
-
26
- def test(self):
27
- metric = AffiliationbasedFScore(beta=1.0)
28
- f_score = round(metric.compute(self.y_true1, self.y_pred1),2)
29
- expected_f_score = 0.67
30
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
31
-
32
- f_score = round(metric.compute(self.y_true1, self.y_pred2),2)
33
- expected_f_score = 0.77
34
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
35
-
36
- f_score = round(metric.compute(self.y_true2, self.y_pred21),2)
37
- expected_f_score = 0.77
38
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
39
-
40
- f_score = round(metric.compute(self.y_true2, self.y_pred22),2)
41
- expected_f_score = 0.67
42
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
43
-
44
- score = round(metric.compute(self.y_true1, self.y_pred3),2)
45
- expected_metric = 1.0
46
- self.assertAlmostEqual(score, expected_metric, places=4)
47
-
48
- score = round(metric.compute(self.y_true1, self.y_pred4),2)
49
- expected_metric = 0
50
- self.assertAlmostEqual(score, expected_metric, places=4)
51
-
52
- def test_consistency(self):
53
- try:
54
- y_true = np.random.choice([0, 1], size=(100,))
55
- y_pred = np.zeros(100)
56
- metric = AffiliationbasedFScore(beta=1.0)
57
- metric.compute(y_true, y_pred)
58
- for _ in range(100):
59
- y_true = np.random.choice([0, 1], size=(100,))
60
- y_pred = np.random.choice([0, 1], size=(100,))
61
-
62
- f_score = metric.compute(y_true, y_pred)
63
- except Exception as e:
64
- self.fail(f"AffiliationbasedFScore raised an exception {e}")
65
-
66
-
67
- class TestTimeTolerantFScore(unittest.TestCase):
68
-
69
- def setUp(self):
70
-
71
- self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
72
- self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
73
- self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
74
-
75
- self.y_pred3 = self.y_true
76
- self.y_pred4 = np.zeros(len(self.y_true))
77
-
78
-
79
- def test(self):
80
- metric = TimeTolerantFScore(t=2)
81
- f_score = round(metric.compute(self.y_true, self.y_pred1),2)
82
- expected_f_score = 0.67
83
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
84
-
85
- f_score = round(metric.compute(self.y_true, self.y_pred2),2)
86
- expected_f_score = 0.55
87
- self.assertAlmostEqual(f_score, expected_f_score, places=4)
88
-
89
- score = round(metric.compute(self.y_true, self.y_pred3),2)
90
- expected_metric = 1.0
91
- self.assertAlmostEqual(score, expected_metric, places=4)
92
-
93
- score = round(metric.compute(self.y_true, self.y_pred4),2)
94
- expected_metric = 0
95
- self.assertAlmostEqual(score, expected_metric, places=4)
96
-
97
- def test_consistency(self):
98
- try:
99
- for _ in range(1000):
100
-
101
- y_true = np.random.choice([0, 1], size=(100,))
102
- y_pred = np.random.choice([0, 1], size=(100,))
103
- t = random.randint(1,100)
104
- metric = TimeTolerantFScore(t=t)
105
- f_score = metric.compute(y_true, y_pred)
106
- except Exception as e:
107
- self.fail(f"TimeTolerantFScore raised an exception {e}")
108
-
109
-
110
- class TestVusRoc(unittest.TestCase):
111
-
112
- def setUp(self):
113
-
114
- self.y_true1 = np.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
115
- self.y_true2 = np.array([0, 1, 0, 1, 0, 0, 0, 0, 0, 0])
116
-
117
- self.y_pred1 = np.array( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
118
-
119
- self.y_pred2 = np.array([8, 0, 9, 1, 7, 2, 3, 4, 5, 6])
120
-
121
- self.y_pred3 = self.y_true1
122
- self.y_pred4 = np.zeros(len(self.y_true1))
123
-
124
-
125
-
126
-
127
- def test(self):
128
- metric = VusRoc(window=4)
129
- score = round(metric.compute(self.y_true1, self.y_pred1),2)
130
- self.assertTrue(score <= 0.1)
131
-
132
- score = round(metric.compute(self.y_true2, self.y_pred2),2)
133
- self.assertTrue(score > 0.4)
134
- metric = VusRoc(window=0)
135
- score = metric.compute(self.y_true2, self.y_pred2)
136
- self.assertTrue(score < 0.4)
137
-
138
- score = round(metric.compute(self.y_true1, self.y_pred3),2)
139
- expected_metric = 1.0
140
- self.assertAlmostEqual(score, expected_metric, places=4)
141
-
142
- score = round(metric.compute(self.y_true1, self.y_pred4),2)
143
- expected_metric = 0.5
144
- self.assertAlmostEqual(score, expected_metric, places=4)
145
-
146
-
147
- def test_consistency(self):
148
- try:
149
- metric = VusRoc(window=4)
150
- for _ in range(10):
151
- y_true = np.random.choice([0, 1], size=(100,))
152
- y_pred = np.random.random( size=(100,))
153
- score = metric.compute(y_true, y_pred)
154
- except Exception as e:
155
- self.fail(f"VusRoc raised an exception {e}")
156
-
157
- class TestVusPr(unittest.TestCase):
158
-
159
- def setUp(self):
160
-
161
-
162
- self.y_true1 = np.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
163
- self.y_true2 = np.array([0, 1, 0, 1, 0, 0, 0, 0, 0, 0])
164
-
165
- self.y_pred1 = np.array( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
166
-
167
- self.y_pred2 = np.array([8, 0, 9, 1, 7, 2, 3, 4, 5, 6])
168
-
169
- self.y_pred3 = self.y_true1
170
- self.y_pred4 = np.zeros(len(self.y_true1))
171
-
172
-
173
-
174
-
175
- def test(self):
176
- metric = VusPr()
177
- score = round(metric.compute(self.y_true1, self.y_pred1),2)
178
- print(score)
179
- self.assertTrue(score <= 0.2)
180
-
181
- score = round(metric.compute(self.y_true2, self.y_pred2),2)
182
- self.assertTrue(score > 0.5)
183
-
184
- metric = VusPr(window=0)
185
- score = metric.compute(self.y_true2, self.y_pred2)
186
-
187
- self.assertTrue(score < 0.5)
188
-
189
- score = round(metric.compute(self.y_true1, self.y_pred3),2)
190
- expected_metric = 1.0
191
- self.assertAlmostEqual(score, expected_metric, places=4)
192
-
193
- score = round(metric.compute(self.y_true1, self.y_pred4),2)
194
- expected_metric = 0.2
195
- self.assertAlmostEqual(score, expected_metric, places=4)
196
-
197
-
198
- def test_consistency(self):
199
- try:
200
- metric = VusPr(window=4)
201
- for _ in range(10):
202
- y_true = np.random.choice([0, 1], size=(100,))
203
- y_pred = np.random.random( size=(100,))
204
- score = metric.compute(y_true, y_pred)
205
- except Exception as e:
206
- self.fail(f"VusPr raised an exception {e}")
207
-
208
-
209
- class TestPateFScore(unittest.TestCase):
210
-
211
- def setUp(self):
212
- """
213
- Configuración inicial para las pruebas.
214
- """
215
- self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
216
- self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
217
- self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
218
-
219
- self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
220
- self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
221
- self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
222
-
223
- self.y_pred3 = self.y_true1
224
- self.y_pred4 = np.zeros(len(self.y_true1))
225
-
226
- def test(self):
227
- metric = PateFScore(early=2, delay=2)
228
- score = round(metric.compute(self.y_true1, self.y_pred1),2)
229
- expected_score = 0.67
230
- self.assertAlmostEqual(score, expected_score, places=4)
231
-
232
- score = round(metric.compute(self.y_true1, self.y_pred2),2)
233
- expected_score = 0.27
234
- self.assertAlmostEqual(score, expected_score, places=4)
235
-
236
- score = round(metric.compute(self.y_true2, self.y_pred21),2)
237
- expected_score = 0.71
238
- self.assertAlmostEqual(score, expected_score, places=4)
239
-
240
- score = round(metric.compute(self.y_true2, self.y_pred22),2)
241
- expected_score = 0.62
242
- self.assertAlmostEqual(score, expected_score, places=4)
243
-
244
- score = round(metric.compute(self.y_true1, self.y_pred3),2)
245
- expected_metric = 1.0
246
- self.assertAlmostEqual(score, expected_metric, places=4)
247
-
248
- score = round(metric.compute(self.y_true1, self.y_pred4),2)
249
- expected_metric = 0
250
- self.assertAlmostEqual(score, expected_metric, places=4)
251
-
252
-
253
-
254
-
255
- def test_consistency(self):
256
- try:
257
- y_true = np.random.choice([0, 1], size=(100,))
258
- y_pred = np.zeros(100)
259
- metric = PateFScore(early=5, delay=5)
260
- for _ in range(10):
261
- y_true = np.random.choice([0, 1], size=(100,))
262
- y_pred = np.random.choice([0, 1], size=(100,))
263
-
264
- score = metric.compute(y_true, y_pred)
265
- except Exception as e:
266
- self.fail(f"PateFScore raised an exception {e}")
267
-
268
-
269
-
270
- class TestPate(unittest.TestCase):
271
-
272
- def setUp(self):
273
-
274
- self.y_true1 = np.array([0,0,1,1])
275
-
276
-
277
- self.y_pred1 = np.array([1, 3, 2, 4])
278
-
279
- self.y_pred2 = np.array([1, 2, 3, 4])
280
-
281
- self.y_pred3 = np.array([4, 4, 4, 4])
282
-
283
- self.y_true2 = np.array([0,1,1,0,0,0,0,0,1,1,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,1,0,0,1,1,0
284
- ,1,1,1,0,0,1,0,0,1,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,1,1,1,1,1,0,1,1
285
- ,1,1,1,1,0,0,1,1,1,1,0,1,0,0,1,1,1,0,0,1,0,0,1,0,1,1])
286
-
287
-
288
- self.y_pred4 = [0.1280475, 0.12059283 ,0.29936968 ,0.85866402 ,0.74071874 ,0.22310849
289
- ,0.11281839 ,0.26133246 ,0.33696106 ,0.01442675 ,0.51962876 ,0.07828833
290
- ,0.45337844 ,0.09444483 ,0.91216588 ,0.18847595 ,0.26828481 ,0.65248919
291
- ,0.46291981 ,0.43730757 ,0.78087553 ,0.45031043 ,0.88661033 ,0.56209352
292
- ,0.45029423 ,0.17638205 ,0.9261279 ,0.58830652 ,0.01602648 ,0.73903379
293
- ,0.61831379 ,0.74779903 ,0.42682106 ,0.82583519 ,0.19709012 ,0.44925962
294
- ,0.62752415 ,0.52458327 ,0.46291768 ,0.33937527 ,0.34868777 ,0.12293847
295
- ,0.84477504 ,0.10225254 ,0.37048167 ,0.04476031 ,0.36680499 ,0.11346155
296
- ,0.10583112 ,0.09493136 ,0.54878736 ,0.68514489 ,0.5940307 ,0.14526962
297
- ,0.69385728 ,0.38888727 ,0.61495304 ,0.06795402 ,0.02894603 ,0.08293609
298
- ,0.22865685 ,0.63531487 ,0.97966126 ,0.31418622 ,0.8943095 ,0.22974177
299
- ,0.94402929 ,0.13140625 ,0.80539267 ,0.40160344 ,0.38151339 ,0.65011626
300
- ,0.71657942 ,0.93297398 ,0.32043329 ,0.54667941 ,0.90645979 ,0.98730183
301
- ,0.82351336 ,0.10404812 ,0.6962921 ,0.72890752 ,0.49700666 ,0.47461103
302
- ,0.59696079 ,0.85876179 ,0.247344 ,0.38187879 ,0.23906861 ,0.5266315
303
- ,0.08171512 ,0.27903375 ,0.61112439 ,0.20784267 ,0.90652453 ,0.87575255
304
- ,0.26972245 ,0.78780138 ,0.37649185 ,0.08467683]
305
-
306
- self.y_pred5 = self.y_true1
307
- self.y_pred6 = np.zeros(len(self.y_true1))
308
-
309
- def test(self):
310
- metric = Pate(early=1, delay=1)
311
- score = round(metric.compute(self.y_true1, self.y_pred1),2)
312
- expected_score = 0.79
313
- self.assertAlmostEqual(score, expected_score, places=4)
314
-
315
- score = round(metric.compute(self.y_true1, self.y_pred2),2)
316
- expected_score = 1.0
317
- self.assertAlmostEqual(score, expected_score, places=4)
318
-
319
- score = round(metric.compute(self.y_true1, self.y_pred3),2)
320
- expected_score = 0.75
321
- self.assertAlmostEqual(score, expected_score, places=4)
322
-
323
- metric = Pate(early=5, delay=5)
324
- score = round(metric.compute(self.y_true2, self.y_pred4),2)
325
- expected_score = 0.67
326
- self.assertAlmostEqual(score, expected_score, places=4)
327
-
328
-
329
- def test_consistency(self):
330
- try:
331
- metric = Pate(early=5, delay=5)
332
- for _ in range(10):
333
- y_true = np.random.choice([0, 1], size=(100,))
334
- y_pred = np.random.random( size=(100,))
335
-
336
- score = metric.compute(y_true, y_pred)
337
- except Exception as e:
338
- self.fail(f"Pate raised an exception {e}")