tsadmetrics 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. entorno/bin/activate_this.py +32 -0
  2. entorno/bin/rst2html.py +23 -0
  3. entorno/bin/rst2html4.py +26 -0
  4. entorno/bin/rst2html5.py +33 -0
  5. entorno/bin/rst2latex.py +26 -0
  6. entorno/bin/rst2man.py +27 -0
  7. entorno/bin/rst2odt.py +28 -0
  8. entorno/bin/rst2odt_prepstyles.py +20 -0
  9. entorno/bin/rst2pseudoxml.py +23 -0
  10. entorno/bin/rst2s5.py +24 -0
  11. entorno/bin/rst2xetex.py +27 -0
  12. entorno/bin/rst2xml.py +23 -0
  13. entorno/bin/rstpep2html.py +25 -0
  14. tests/__init__.py +0 -0
  15. tests/test_binary.py +759 -0
  16. tests/test_non_binary.py +371 -0
  17. tsadmetrics/_tsadeval/affiliation/__init__.py +0 -0
  18. tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +86 -0
  19. tsadmetrics/_tsadeval/affiliation/_integral_interval.py +464 -0
  20. tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +68 -0
  21. tsadmetrics/_tsadeval/affiliation/generics.py +135 -0
  22. tsadmetrics/_tsadeval/affiliation/metrics.py +114 -0
  23. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +175 -0
  24. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +50 -0
  25. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +184 -0
  26. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/__init__.py +0 -0
  27. tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
  28. tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +386 -0
  29. tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +362 -0
  30. tsadmetrics/_tsadeval/prts/__init__.py +0 -0
  31. tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
  32. tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +165 -0
  33. tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +121 -0
  34. tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
  35. tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +61 -0
  36. tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +86 -0
  37. tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +21 -0
  38. tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +85 -0
  39. tsadmetrics/utils.py +10 -4
  40. {tsadmetrics-0.1.4.dist-info → tsadmetrics-0.1.6.dist-info}/METADATA +1 -1
  41. tsadmetrics-0.1.6.dist-info/RECORD +58 -0
  42. tsadmetrics-0.1.6.dist-info/top_level.txt +3 -0
  43. tsadmetrics-0.1.4.dist-info/RECORD +0 -20
  44. tsadmetrics-0.1.4.dist-info/top_level.txt +0 -1
  45. {tsadmetrics-0.1.4.dist-info → tsadmetrics-0.1.6.dist-info}/WHEEL +0 -0
tests/test_binary.py ADDED
@@ -0,0 +1,759 @@
1
+ import unittest
2
+ from tsadmetrics import *
3
+
4
+ from sklearn.metrics import recall_score, precision_score, fbeta_score
5
+ import numpy as np
6
+ import random
7
+
8
+ class TestPointWiseMetrics(unittest.TestCase):
9
+
10
+ def setUp(self):
11
+ """
12
+ Configuración inicial para las pruebas.
13
+ """
14
+ self.num_tests = 100 # Número de conjuntos de datos aleatorios a generar para las pruebas
15
+ self.test_cases = []
16
+ for _ in range(self.num_tests):
17
+ y_true = np.random.choice([0, 1], size=(10000,))
18
+ y_pred = np.random.choice([0, 1], size=(10000,))
19
+ self.test_cases.append((y_true, y_pred))
20
+
21
+ def test_point_wise_recall(self):
22
+ """
23
+ Prueba para la función point_wise_recall.
24
+ """
25
+ for y_true, y_pred in self.test_cases:
26
+ with self.subTest(y_true=y_true, y_pred=y_pred):
27
+ recall = point_wise_recall(y_true, y_pred)
28
+ expected_recall = recall_score(y_true, y_pred)
29
+ self.assertAlmostEqual(recall, expected_recall, places=4)
30
+
31
+ def test_point_wise_precision(self):
32
+ """
33
+ Prueba para la función point_wise_precision.
34
+ """
35
+ for y_true, y_pred in self.test_cases:
36
+ with self.subTest(y_true=y_true, y_pred=y_pred):
37
+ precision = point_wise_precision(y_true, y_pred)
38
+ expected_precision = precision_score(y_true, y_pred)
39
+ self.assertAlmostEqual(precision, expected_precision, places=4)
40
+
41
+ def test_point_wise_f_score(self):
42
+ """
43
+ Prueba para la función point_wise_f_score.
44
+ """
45
+ for y_true, y_pred in self.test_cases:
46
+ with self.subTest(y_true=y_true, y_pred=y_pred):
47
+ beta = random.randint(0,1000000)
48
+ f_score = point_wise_f_score(y_true, y_pred, beta=1)
49
+ expected_f_score = fbeta_score(y_true, y_pred, beta=1)
50
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
51
+
52
+ class TestPointAdjustedMetrics(unittest.TestCase):
53
+
54
+ def setUp(self):
55
+ """
56
+ Configuración inicial para las pruebas.
57
+ """
58
+ self.y_true = np.array([0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0])
59
+ self.y_pred = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0])
60
+
61
+ def test_point_adjusted_recall(self):
62
+ """
63
+ Prueba para la función point_wise_recall.
64
+ """
65
+ recall = point_adjusted_recall(self.y_true, self.y_pred)
66
+ expected_recall = 1
67
+ self.assertAlmostEqual(recall, expected_recall, places=4)
68
+ def test_point_adjusted_precision(self):
69
+ """
70
+ Prueba para la función point_adjusted_precision.
71
+ """
72
+ precision = round(point_adjusted_precision(self.y_true, self.y_pred),2)
73
+ expected_precision = 0.87
74
+ self.assertAlmostEqual(precision, expected_precision, places=4)
75
+
76
+ def test_point_adjusted_f_score(self):
77
+ """
78
+ Prueba para la función point_adjusted_f_score.
79
+ """
80
+ f_score = round(point_adjusted_f_score(self.y_true, self.y_pred),2)
81
+ expected_f_score = 0.93
82
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
83
+
84
+ def test_point_adjusted_consistency(self):
85
+ try:
86
+ y_true = np.random.choice([0, 1], size=(100,))
87
+ y_pred = np.zeros(100)
88
+ point_adjusted_f_score(y_true, y_pred)
89
+ for _ in range(1000):
90
+ y_true = np.random.choice([0, 1], size=(100,))
91
+ y_pred = np.random.choice([0, 1], size=(100,))
92
+ f_score = point_adjusted_f_score(y_true, y_pred)
93
+ except Exception as e:
94
+ self.fail(f"point_adjusted_f_score raised an exception {e}")
95
+
96
+ class TestDelayThPointAdjustedMetrics(unittest.TestCase):
97
+
98
+ def setUp(self):
99
+ """
100
+ Configuración inicial para las pruebas.
101
+ """
102
+ self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
103
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
104
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
105
+
106
+
107
+
108
+ def test_delay_th_point_adjusted_f_score(self):
109
+ """
110
+ Prueba para la función delay_th_point_adjusted_f_score.
111
+ """
112
+ f_score = round(delay_th_point_adjusted_f_score(self.y_true, self.y_pred1, 2),2)
113
+ expected_f_score = 0.67
114
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
115
+
116
+ f_score = round(delay_th_point_adjusted_f_score(self.y_true, self.y_pred2, 2),2)
117
+ expected_f_score = 1
118
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
119
+
120
+ def test_delay_th_point_adjusted_consistency(self):
121
+ try:
122
+ y_true = np.random.choice([0, 1], size=(100,))
123
+ y_pred = np.zeros(100)
124
+ delay_th_point_adjusted_f_score(y_true, y_pred,7)
125
+ for _ in range(1000):
126
+ y_true = np.random.choice([0, 1], size=(100,))
127
+ y_pred = np.random.choice([0, 1], size=(100,))
128
+ f_score = delay_th_point_adjusted_f_score(y_true, y_pred, 7)
129
+ except Exception as e:
130
+ self.fail(f"delay_th_point_adjusted_f_score raised an exception {e}")
131
+
132
+ class TestPointAdjustedMetricsAtK(unittest.TestCase):
133
+
134
+ def setUp(self):
135
+ """
136
+ Configuración inicial para las pruebas.
137
+ """
138
+ self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
139
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
140
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
141
+
142
+
143
+ def test_point_adjusted_at_k_f_score(self):
144
+ """
145
+ Prueba para la función point_adjusted_at_k_f_score.
146
+ """
147
+ f_score = round(point_adjusted_at_k_f_score(self.y_true, self.y_pred1,0.2),2)
148
+ expected_f_score = 0.67
149
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
150
+
151
+ f_score = round(point_adjusted_at_k_f_score(self.y_true, self.y_pred2,0.2),2)
152
+ expected_f_score = 0.22
153
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
154
+
155
+ def test_point_adjusted_at_k_consistency(self):
156
+ try:
157
+ y_true = np.random.choice([0, 1], size=(100,))
158
+ y_pred = np.zeros(100)
159
+ point_adjusted_at_k_f_score(y_true, y_pred,0.3)
160
+ for _ in range(1000):
161
+ y_true = np.random.choice([0, 1], size=(100,))
162
+ y_pred = np.random.choice([0, 1], size=(100,))
163
+ f_score = point_adjusted_at_k_f_score(y_true, y_pred,0.3)
164
+ except Exception as e:
165
+ self.fail(f"point_adjusted_at_k_f_score raised an exception {e}")
166
+
167
+ class TestLatencySparsityAwareMetrics(unittest.TestCase):
168
+
169
+ def setUp(self):
170
+ """
171
+ Configuración inicial para las pruebas.
172
+ """
173
+ self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
174
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
175
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
176
+
177
+ def test_latency_sparsity_aw_f_score(self):
178
+ """
179
+ Prueba para la función latency_sparsity_aw_f_score.
180
+ """
181
+ f_score = round(latency_sparsity_aw_f_score(self.y_true, self.y_pred1,2),2)
182
+ expected_f_score = 0.71
183
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
184
+
185
+ f_score = round(latency_sparsity_aw_f_score(self.y_true, self.y_pred2,2),2)
186
+ expected_f_score = 1
187
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
188
+
189
+ def test_latency_sparsity_aw_consistency(self):
190
+ try:
191
+ y_true = np.random.choice([0, 1], size=(100,))
192
+ y_pred = np.zeros(100)
193
+ latency_sparsity_aw_f_score(y_true, y_pred,3)
194
+ for _ in range(1000):
195
+ y_true = np.random.choice([0, 1], size=(100,))
196
+ y_pred = np.random.choice([0, 1], size=(100,))
197
+ f_score = latency_sparsity_aw_f_score(y_true, y_pred,3)
198
+ except Exception as e:
199
+ self.fail(f"latency_sparsity_aw_f_score raised an exception {e}")
200
+
201
+ class TestSegmentWiseMetrics(unittest.TestCase):
202
+
203
+ def setUp(self):
204
+ """
205
+ Configuración inicial para las pruebas.
206
+ """
207
+ self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
208
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
209
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
210
+
211
+
212
+ def test_segment_wise_f_score(self):
213
+ """
214
+ Prueba para la función segment_wise_f_score.
215
+ """
216
+ f_score = round(segment_wise_f_score(self.y_true, self.y_pred1),2)
217
+ expected_f_score = 0.67
218
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
219
+
220
+ f_score = round(segment_wise_f_score(self.y_true, self.y_pred2),2)
221
+ expected_f_score = 1
222
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
223
+
224
+
225
+
226
+ def test_segment_wise_consistency(self):
227
+ try:
228
+ y_true = np.random.choice([0, 1], size=(100,))
229
+ y_pred = np.zeros(100)
230
+ segment_wise_f_score(y_true, y_pred,7)
231
+ for _ in range(1000):
232
+ y_true = np.random.choice([0, 1], size=(10,))
233
+ y_pred = np.random.choice([0, 1], size=(10,))
234
+ f_score = segment_wise_f_score(y_true, y_pred)
235
+
236
+ except Exception as e:
237
+ self.fail(f"segment_wise_f_score raised an exception {e}")
238
+
239
+ class TestCompositeMetrics(unittest.TestCase):
240
+
241
+ def setUp(self):
242
+ """
243
+ Configuración inicial para las pruebas.
244
+ """
245
+ self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
246
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
247
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
248
+
249
+
250
+
251
+
252
+ def test_composite_f_score(self):
253
+ """
254
+ Prueba para la función composite_f_score.
255
+ """
256
+ f_score = round(composite_f_score(self.y_true, self.y_pred1),2)
257
+ expected_f_score = 0.67
258
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
259
+
260
+ f_score = round(composite_f_score(self.y_true, self.y_pred2),2)
261
+ expected_f_score = 1
262
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
263
+
264
+
265
+ def test_composite_consistency(self):
266
+ try:
267
+ y_true = np.random.choice([0, 1], size=(100,))
268
+ y_pred = np.zeros(100)
269
+ composite_f_score(y_true, y_pred,7)
270
+ for _ in range(1000):
271
+ y_true = np.random.choice([0, 1], size=(10,))
272
+ y_pred = np.random.choice([0, 1], size=(10,))
273
+ f_score = composite_f_score(y_true, y_pred)
274
+
275
+ except Exception as e:
276
+ self.fail(f"composite_f_score raised an exception {e}")
277
+
278
+ class TestTimeTolerantMetrics(unittest.TestCase):
279
+
280
+ def setUp(self):
281
+ """
282
+ Configuración inicial para las pruebas.
283
+ """
284
+ self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
285
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
286
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
287
+
288
+ def test_time_tolerant_recall(self):
289
+ """
290
+ Prueba para la función time_tolerant_recall.
291
+ """
292
+ recall = round(time_tolerant_recall(self.y_true, self.y_pred1,2),2)
293
+ expected_recall = 0.5
294
+ self.assertAlmostEqual(recall, expected_recall, places=4)
295
+
296
+ recall = round(time_tolerant_recall(self.y_true, self.y_pred2,2),3)
297
+ expected_recall = 0.375
298
+ self.assertAlmostEqual(recall, expected_recall, places=4)
299
+
300
+ def test_time_tolerant_precision(self):
301
+ """
302
+ Prueba para la función time_tolerant_precision.
303
+ """
304
+ precision = round(time_tolerant_precision(self.y_true, self.y_pred1,2),2)
305
+ expected_precision = 1
306
+ self.assertAlmostEqual(precision, expected_precision, places=4)
307
+
308
+ precision = round(time_tolerant_precision(self.y_true, self.y_pred2,2),2)
309
+ expected_precision = 1
310
+ self.assertAlmostEqual(precision, expected_precision, places=4)
311
+
312
+ def test_time_tolerant_f_score(self):
313
+ """
314
+ Prueba para la función time_tolerant_f_score.
315
+ """
316
+ f_score = round(time_tolerant_f_score(self.y_true, self.y_pred1,2),2)
317
+ expected_f_score = 0.67
318
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
319
+
320
+ f_score = round(time_tolerant_f_score(self.y_true, self.y_pred2,2),2)
321
+ expected_f_score = 0.55
322
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
323
+
324
+ def test_time_tolerant_consistency(self):
325
+ try:
326
+ y_true = np.random.choice([0, 1], size=(100,))
327
+ y_pred = np.zeros(100)
328
+ time_tolerant_f_score(y_true, y_pred,7)
329
+ for _ in range(1000):
330
+ y_true = np.random.choice([0, 1], size=(100,))
331
+ y_pred = np.random.choice([0, 1], size=(100,))
332
+ t = random.randint(1,100)
333
+ f_score = time_tolerant_f_score(y_true, y_pred,t)
334
+ except Exception as e:
335
+ self.fail(f"time_tolerant_f_score raised an exception {e}")
336
+
337
+
338
+ class TestRangeBasedMetrics(unittest.TestCase):
339
+
340
+ def setUp(self):
341
+ """
342
+ Configuración inicial para las pruebas.
343
+ """
344
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
345
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
346
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
347
+
348
+ self.y_true2 = np.array([0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
349
+ self.y_pred21 = np.array([0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
350
+ self.y_pred22 = np.array([0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
351
+
352
+
353
+ def test_range_based_f_score(self):
354
+ """
355
+ Prueba para la función range_based_f_score.
356
+ """
357
+ f_score = round(range_based_f_score(self.y_true1, self.y_pred1, beta=1,p_alpha=0.2,r_alpha=0.2,cardinality_mode='one',p_bias='flat',r_bias='flat'),2)
358
+ expected_f_score = 0.67
359
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
360
+
361
+ f_score = round(range_based_f_score(self.y_true1, self.y_pred2,beta=1,p_alpha=0.2,r_alpha=0.2,cardinality_mode='one',p_bias='flat',r_bias='flat'),2)
362
+ expected_f_score = 0.46
363
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
364
+
365
+ f_score = round(range_based_f_score(self.y_true2, self.y_pred21,beta=1,p_alpha=0.2,r_alpha=0.2,cardinality_mode='one',p_bias='flat',r_bias='flat'),2)
366
+ expected_f_score = 0.71
367
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
368
+
369
+ f_score = round(range_based_f_score(self.y_true2, self.y_pred22,beta=1,p_alpha=0.2,r_alpha=0.2,cardinality_mode='one',p_bias='flat',r_bias='flat'),2)
370
+ expected_f_score = 0.67
371
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
372
+
373
+ def test_range_based_consistency(self):
374
+ try:
375
+ modes = ['flat','front','back','middle']
376
+ modes_c = ['one','reciprocal']
377
+
378
+ y_true = np.random.choice([0, 1], size=(100,))
379
+ y_pred = np.zeros(100)
380
+ range_based_f_score(y_true, y_pred,beta=2,p_alpha=random.random(),r_alpha=random.random(),cardinality_mode=random.choice(modes_c),p_bias=random.choice(modes),r_bias=random.choice(modes))
381
+ for _ in range(100):
382
+ y_true = np.random.choice([0, 1], size=(100,))
383
+ y_pred = np.random.choice([0, 1], size=(100,))
384
+ f_score = range_based_f_score(y_true, y_pred,beta=2,p_alpha=random.random(),r_alpha=random.random(),cardinality_mode=random.choice(modes_c),p_bias=random.choice(modes),r_bias=random.choice(modes))
385
+ except Exception as e:
386
+ self.fail(f"range_based_f_score raised an exception {e}")
387
+
388
+ class TestTSAwareMetrics(unittest.TestCase):
389
+
390
+ def setUp(self):
391
+ """
392
+ Configuración inicial para las pruebas.
393
+ """
394
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
395
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
396
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
397
+
398
+ self.y_true2 = np.array([0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
399
+ self.y_pred21 = np.array([0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
400
+ self.y_pred22 = np.array([0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
401
+
402
+
403
+
404
+ def test_ts_aware_f_score(self):
405
+ """
406
+ Prueba para la función ts_aware_f_score.
407
+ """
408
+ f_score = round(ts_aware_f_score(self.y_true1, self.y_pred1,1, 0.5, 0, 0.5),2)
409
+ expected_f_score = 0.67
410
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
411
+
412
+ f_score = round(ts_aware_f_score(self.y_true1, self.y_pred2,1, 0.5, 0, 0.5),2)
413
+ expected_f_score = 0.12
414
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
415
+
416
+ f_score = round(ts_aware_f_score(self.y_true2, self.y_pred21,1, 0.5, 0, 0.5),2)
417
+ expected_f_score = 0.77
418
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
419
+
420
+ f_score = round(ts_aware_f_score(self.y_true2, self.y_pred22,1, 0.5, 0, 0.5),2)
421
+ expected_f_score = 0.67
422
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
423
+
424
+ def test_ts_aware_consistency(self):
425
+ try:
426
+ y_true = np.random.choice([0, 1], size=(100,))
427
+ y_pred = np.zeros(100)
428
+ ts_aware_f_score(y_true, y_pred, 1, random.random(), 0, random.random())
429
+ for _ in range(100):
430
+ y_true = np.random.choice([0, 1], size=(100,))
431
+ y_pred = np.random.choice([0, 1], size=(100,))
432
+
433
+ f_score = ts_aware_f_score(y_true, y_pred, 1, random.random(), 0, random.random())
434
+ except Exception as e:
435
+ self.fail(f"ts_aware_f_score raised an exception {e}")
436
+
437
+ class TestEnhancedTSAwareMetrics(unittest.TestCase):
438
+
439
+ def setUp(self):
440
+ """
441
+ Configuración inicial para las pruebas.
442
+ """
443
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
444
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
445
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
446
+
447
+ self.y_true2 = np.array([0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
448
+ self.y_pred21 = np.array([0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
449
+ self.y_pred22 = np.array([0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
450
+
451
+
452
+
453
+ def test_enhanced_ts_aware_f_score(self):
454
+ """
455
+ Prueba para la función ts_aware_f_score.
456
+ """
457
+ f_score = round(enhanced_ts_aware_f_score(self.y_true1, self.y_pred1,1, 0.5, 0.1),2)
458
+ expected_f_score = 0.67
459
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
460
+
461
+ f_score = round(enhanced_ts_aware_f_score(self.y_true1, self.y_pred2,1, 0.5, 0.1),2)
462
+ expected_f_score = 0.72
463
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
464
+
465
+ f_score = round(enhanced_ts_aware_f_score(self.y_true2, self.y_pred21,1, 0.5, 0.1),2)
466
+ expected_f_score = 0.77
467
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
468
+
469
+ f_score = round(enhanced_ts_aware_f_score(self.y_true2, self.y_pred22,1, 0.5, 0.1),2)
470
+ expected_f_score = 0.67
471
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
472
+
473
+ def test_enhanced_ts_aware_consistency(self):
474
+ try:
475
+ y_true = np.random.choice([0, 1], size=(100,))
476
+ y_pred = np.zeros(100)
477
+ enhanced_ts_aware_f_score(y_true, y_pred, 1, random.random(), random.random())
478
+ for _ in range(100):
479
+ y_true = np.random.choice([0, 1], size=(100,))
480
+ y_pred = np.random.choice([0, 1], size=(100,))
481
+
482
+ f_score = enhanced_ts_aware_f_score(y_true, y_pred, 1, random.random(), random.random())
483
+ except Exception as e:
484
+ self.fail(f"enhanced_ts_aware_f_score raised an exception {e}")
485
+
486
+
487
+ class TestAffiliationBasedMetrics(unittest.TestCase):
488
+
489
+ def setUp(self):
490
+ """
491
+ Configuración inicial para las pruebas.
492
+ """
493
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
494
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
495
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
496
+
497
+ self.y_true2 = np.array([0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
498
+ self.y_pred21 = np.array([0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
499
+ self.y_pred22 = np.array([0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
500
+
501
+
502
+
503
+ def test_affiliation_based_f_score(self):
504
+ """
505
+ Prueba para la función ts_aware_f_score.
506
+ """
507
+ f_score = round(affiliation_based_f_score(self.y_true1, self.y_pred1,1),2)
508
+ expected_f_score = 0.67
509
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
510
+
511
+ f_score = round(affiliation_based_f_score(self.y_true1, self.y_pred2,1),2)
512
+ expected_f_score = 0.77
513
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
514
+
515
+ f_score = round(affiliation_based_f_score(self.y_true2, self.y_pred21,1),2)
516
+ expected_f_score = 0.77
517
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
518
+
519
+ f_score = round(affiliation_based_f_score(self.y_true2, self.y_pred22,1),2)
520
+ expected_f_score = 0.67
521
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
522
+
523
+ def test_affiliation_based_consistency(self):
524
+ try:
525
+ y_true = np.random.choice([0, 1], size=(100,))
526
+ y_pred = np.zeros(100)
527
+ affiliation_based_f_score(y_true, y_pred, 1)
528
+ for _ in range(100):
529
+ y_true = np.random.choice([0, 1], size=(100,))
530
+ y_pred = np.random.choice([0, 1], size=(100,))
531
+
532
+ f_score = affiliation_based_f_score(y_true, y_pred, 1)
533
+ except Exception as e:
534
+ self.fail(f"affiliation_based_f_score raised an exception {e}")
535
+
536
+
537
+
538
+ class TestNABScore(unittest.TestCase):
539
+
540
+ def setUp(self):
541
+ """
542
+ Configuración inicial para las pruebas.
543
+ """
544
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
545
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
546
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
547
+
548
+ self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
549
+ self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
550
+ self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
551
+
552
+
553
+
554
+ def test_nab_score(self):
555
+ """
556
+ Prueba para la función ts_aware_f_score.
557
+ """
558
+ f_score = round(nab_score(self.y_true1, self.y_pred1),2)
559
+ expected_f_score = 50
560
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
561
+
562
+ f_score = round(nab_score(self.y_true1, self.y_pred2),2)
563
+ expected_f_score = 100
564
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
565
+
566
+ f_score = round(nab_score(self.y_true2, self.y_pred21),2)
567
+ expected_f_score = 33.33
568
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
569
+
570
+ f_score = round(nab_score(self.y_true2, self.y_pred22),2)
571
+ expected_f_score = 66.67
572
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
573
+
574
+ def test_nab_score_consistency(self):
575
+ try:
576
+ y_true = np.random.choice([0, 1], size=(100,))
577
+ y_pred = np.zeros(100)
578
+ nab_score(y_true, y_pred)
579
+ for _ in range(100):
580
+ y_true = np.random.choice([0, 1], size=(100,))
581
+ y_pred = np.random.choice([0, 1], size=(100,))
582
+
583
+ score = nab_score(y_true, y_pred)
584
+ except Exception as e:
585
+ self.fail(f"nab_score raised an exception {e}")
586
+
587
+
588
+ class TestAverageDetectionCount(unittest.TestCase):
589
+
590
+ def setUp(self):
591
+ """
592
+ Configuración inicial para las pruebas.
593
+ """
594
+ pass
595
+
596
+
597
+
598
+
599
+ def test_average_detection_count_consistency(self):
600
+ try:
601
+ y_true = np.random.choice([0, 1], size=(100,))
602
+ y_pred = np.zeros(100)
603
+ average_detection_count(y_true, y_pred)
604
+ for _ in range(100):
605
+ y_true = np.random.choice([0, 1], size=(100,))
606
+ y_pred = np.random.choice([0, 1], size=(100,))
607
+
608
+ score = average_detection_count(y_true, y_pred)
609
+ except Exception as e:
610
+ self.fail(f"average_detection_count raised an exception {e}")
611
+
612
+ class TestAbsoluteDetectionDistance(unittest.TestCase):
613
+
614
+ def setUp(self):
615
+ """
616
+ Configuración inicial para las pruebas.
617
+ """
618
+ pass
619
+
620
+
621
+
622
+
623
+ def test_absolute_detection_distance_consistency(self):
624
+ try:
625
+ y_true = np.random.choice([0, 1], size=(100,))
626
+ y_pred = np.zeros(100)
627
+ absolute_detection_distance(y_true, y_pred)
628
+ for _ in range(100):
629
+ y_true = np.random.choice([0, 1], size=(100,))
630
+ y_pred = np.random.choice([0, 1], size=(100,))
631
+
632
+ score = absolute_detection_distance(y_true, y_pred)
633
+ except Exception as e:
634
+ self.fail(f"absolute_detection_distance raised an exception {e}")
635
+
636
+ class TestTotalDetectedInRange(unittest.TestCase):
637
+
638
+ def setUp(self):
639
+ """
640
+ Configuración inicial para las pruebas.
641
+ """
642
+ pass
643
+
644
+
645
+
646
+
647
+ def test_total_detected_in_range_consistency(self):
648
+ try:
649
+ y_true = np.random.choice([0, 1], size=(100,))
650
+ y_pred = np.zeros(100)
651
+ total_detected_in_range(y_true, y_pred,k=4)
652
+ for _ in range(100):
653
+ y_true = np.random.choice([0, 1], size=(100,))
654
+ y_pred = np.random.choice([0, 1], size=(100,))
655
+
656
+ score = total_detected_in_range(y_true, y_pred,k=4)
657
+ except Exception as e:
658
+ self.fail(f"total_detected_in_range raised an exception {e}")
659
+
660
+ class TestDetectionAccuracyInRange(unittest.TestCase):
661
+
662
+ def setUp(self):
663
+ """
664
+ Configuración inicial para las pruebas.
665
+ """
666
+ pass
667
+
668
+
669
+
670
+
671
+ def test_detection_accuracy_in_range_consistency(self):
672
+ try:
673
+ y_true = np.random.choice([0, 1], size=(100,))
674
+ y_pred = np.zeros(100)
675
+ detection_accuracy_in_range(y_true, y_pred,k=4)
676
+ for _ in range(100):
677
+ y_true = np.random.choice([0, 1], size=(100,))
678
+ y_pred = np.random.choice([0, 1], size=(100,))
679
+
680
+ score = detection_accuracy_in_range(y_true, y_pred,k=4)
681
+ except Exception as e:
682
+ self.fail(f"detection_accuracy_in_range raised an exception {e}")
683
+
684
+
685
+ class TestWeightedDetectionDifference(unittest.TestCase):
686
+
687
+ def setUp(self):
688
+ """
689
+ Configuración inicial para las pruebas.
690
+ """
691
+ pass
692
+
693
+
694
+
695
+
696
+ def test_weighted_detection_difference_consistency(self):
697
+ try:
698
+ y_true = np.random.choice([0, 1], size=(100,))
699
+ y_pred = np.zeros(100)
700
+ weighted_detection_difference(y_true, y_pred,k=4)
701
+ for _ in range(100):
702
+ y_true = np.random.choice([0, 1], size=(100,))
703
+ y_pred = np.random.choice([0, 1], size=(100,))
704
+
705
+ score = weighted_detection_difference(y_true, y_pred,k=4)
706
+ except Exception as e:
707
+ self.fail(f"weighted_detection_difference raised an exception {e}")
708
+
709
+ class TestPATE(unittest.TestCase):
710
+
711
+ def setUp(self):
712
+ """
713
+ Configuración inicial para las pruebas.
714
+ """
715
+ pass
716
+
717
+
718
+
719
+
720
+ def test_pate_consistency(self):
721
+ try:
722
+ y_true = np.random.choice([0, 1], size=(100,))
723
+ y_pred = np.zeros(100)
724
+ binary_pate(y_true, y_pred, early=5, delay=5)
725
+ for _ in range(10):
726
+ y_true = np.random.choice([0, 1], size=(100,))
727
+ y_pred = np.random.choice([0, 1], size=(100,))
728
+
729
+ score = binary_pate(y_true, y_pred, early=5, delay=5)
730
+ except Exception as e:
731
+ self.fail(f"binary_pate raised an exception {e}")
732
+
733
+
734
+ class TestMeanTimeToDetect(unittest.TestCase):
735
+
736
+ def setUp(self):
737
+ """
738
+ Configuración inicial para las pruebas.
739
+ """
740
+ pass
741
+
742
+
743
+
744
+
745
+ def test_mean_time_to_detect_consistency(self):
746
+ try:
747
+ y_true = np.random.choice([0, 1], size=(100,))
748
+ y_pred = np.zeros(100)
749
+ mean_time_to_detect(y_true, y_pred)
750
+ for _ in range(100):
751
+ y_true = np.random.choice([0, 1], size=(100,))
752
+ y_pred = np.random.choice([0, 1], size=(100,))
753
+
754
+ score = mean_time_to_detect(y_true, y_pred)
755
+ except Exception as e:
756
+ self.fail(f"mean_time_to_detect raised an exception {e}")
757
+
758
+ if __name__ == '__main__':
759
+ unittest.main()