tsadmetrics 0.1.16__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. docs/api_doc/conf.py +67 -0
  2. docs/{conf.py → full_doc/conf.py} +1 -1
  3. docs/manual_doc/conf.py +67 -0
  4. examples/example_direct_data.py +28 -0
  5. examples/example_direct_single_data.py +25 -0
  6. examples/example_file_reference.py +24 -0
  7. examples/example_global_config_file.py +13 -0
  8. examples/example_metric_config_file.py +19 -0
  9. examples/example_simple_metric.py +8 -0
  10. examples/specific_examples/AbsoluteDetectionDistance_example.py +24 -0
  11. examples/specific_examples/AffiliationbasedFScore_example.py +24 -0
  12. examples/specific_examples/AverageDetectionCount_example.py +24 -0
  13. examples/specific_examples/CompositeFScore_example.py +24 -0
  14. examples/specific_examples/DelayThresholdedPointadjustedFScore_example.py +24 -0
  15. examples/specific_examples/DetectionAccuracyInRange_example.py +24 -0
  16. examples/specific_examples/EnhancedTimeseriesAwareFScore_example.py +24 -0
  17. examples/specific_examples/LatencySparsityawareFScore_example.py +24 -0
  18. examples/specific_examples/MeanTimeToDetect_example.py +24 -0
  19. examples/specific_examples/NabScore_example.py +24 -0
  20. examples/specific_examples/PateFScore_example.py +24 -0
  21. examples/specific_examples/Pate_example.py +24 -0
  22. examples/specific_examples/PointadjustedAtKFScore_example.py +24 -0
  23. examples/specific_examples/PointadjustedAucPr_example.py +24 -0
  24. examples/specific_examples/PointadjustedAucRoc_example.py +24 -0
  25. examples/specific_examples/PointadjustedFScore_example.py +24 -0
  26. examples/specific_examples/RangebasedFScore_example.py +24 -0
  27. examples/specific_examples/SegmentwiseFScore_example.py +24 -0
  28. examples/specific_examples/TemporalDistance_example.py +24 -0
  29. examples/specific_examples/TimeTolerantFScore_example.py +24 -0
  30. examples/specific_examples/TimeseriesAwareFScore_example.py +24 -0
  31. examples/specific_examples/TotalDetectedInRange_example.py +24 -0
  32. examples/specific_examples/VusPr_example.py +24 -0
  33. examples/specific_examples/VusRoc_example.py +24 -0
  34. examples/specific_examples/WeightedDetectionDifference_example.py +24 -0
  35. tests/test_dpm.py +212 -0
  36. tests/test_ptdm.py +366 -0
  37. tests/test_registry.py +58 -0
  38. tests/test_runner.py +185 -0
  39. tests/test_spm.py +213 -0
  40. tests/test_tmem.py +198 -0
  41. tests/test_tpdm.py +369 -0
  42. tests/test_tstm.py +338 -0
  43. tsadmetrics/__init__.py +0 -21
  44. tsadmetrics/base/Metric.py +188 -0
  45. tsadmetrics/evaluation/Report.py +25 -0
  46. tsadmetrics/evaluation/Runner.py +253 -0
  47. tsadmetrics/metrics/Registry.py +141 -0
  48. tsadmetrics/metrics/__init__.py +2 -0
  49. tsadmetrics/metrics/spm/PointwiseAucPr.py +62 -0
  50. tsadmetrics/metrics/spm/PointwiseAucRoc.py +63 -0
  51. tsadmetrics/metrics/spm/PointwiseFScore.py +86 -0
  52. tsadmetrics/metrics/spm/PrecisionAtK.py +81 -0
  53. tsadmetrics/metrics/spm/__init__.py +9 -0
  54. tsadmetrics/metrics/tem/dpm/DelayThresholdedPointadjustedFScore.py +83 -0
  55. tsadmetrics/metrics/tem/dpm/LatencySparsityawareFScore.py +76 -0
  56. tsadmetrics/metrics/tem/dpm/MeanTimeToDetect.py +47 -0
  57. tsadmetrics/metrics/tem/dpm/NabScore.py +60 -0
  58. tsadmetrics/metrics/tem/dpm/__init__.py +11 -0
  59. tsadmetrics/metrics/tem/ptdm/AverageDetectionCount.py +53 -0
  60. tsadmetrics/metrics/tem/ptdm/DetectionAccuracyInRange.py +66 -0
  61. tsadmetrics/metrics/tem/ptdm/PointadjustedAtKFScore.py +80 -0
  62. tsadmetrics/metrics/tem/ptdm/TimeseriesAwareFScore.py +248 -0
  63. tsadmetrics/metrics/tem/ptdm/TotalDetectedInRange.py +65 -0
  64. tsadmetrics/metrics/tem/ptdm/WeightedDetectionDifference.py +97 -0
  65. tsadmetrics/metrics/tem/ptdm/__init__.py +12 -0
  66. tsadmetrics/metrics/tem/tmem/AbsoluteDetectionDistance.py +48 -0
  67. tsadmetrics/metrics/tem/tmem/EnhancedTimeseriesAwareFScore.py +252 -0
  68. tsadmetrics/metrics/tem/tmem/TemporalDistance.py +68 -0
  69. tsadmetrics/metrics/tem/tmem/__init__.py +9 -0
  70. tsadmetrics/metrics/tem/tpdm/CompositeFScore.py +104 -0
  71. tsadmetrics/metrics/tem/tpdm/PointadjustedAucPr.py +123 -0
  72. tsadmetrics/metrics/tem/tpdm/PointadjustedAucRoc.py +119 -0
  73. tsadmetrics/metrics/tem/tpdm/PointadjustedFScore.py +96 -0
  74. tsadmetrics/metrics/tem/tpdm/RangebasedFScore.py +236 -0
  75. tsadmetrics/metrics/tem/tpdm/SegmentwiseFScore.py +73 -0
  76. tsadmetrics/metrics/tem/tpdm/__init__.py +12 -0
  77. tsadmetrics/metrics/tem/tstm/AffiliationbasedFScore.py +68 -0
  78. tsadmetrics/metrics/tem/tstm/Pate.py +62 -0
  79. tsadmetrics/metrics/tem/tstm/PateFScore.py +61 -0
  80. tsadmetrics/metrics/tem/tstm/TimeTolerantFScore.py +85 -0
  81. tsadmetrics/metrics/tem/tstm/VusPr.py +51 -0
  82. tsadmetrics/metrics/tem/tstm/VusRoc.py +55 -0
  83. tsadmetrics/metrics/tem/tstm/__init__.py +15 -0
  84. tsadmetrics/{_tsadeval/affiliation/_integral_interval.py → utils/functions_affiliation.py} +377 -9
  85. tsadmetrics/utils/functions_auc.py +393 -0
  86. tsadmetrics/utils/functions_conversion.py +63 -0
  87. tsadmetrics/utils/functions_counting_metrics.py +26 -0
  88. tsadmetrics/{_tsadeval/latency_sparsity_aware.py → utils/functions_latency_sparsity_aware.py} +1 -1
  89. tsadmetrics/{_tsadeval/nabscore.py → utils/functions_nabscore.py} +15 -1
  90. tsadmetrics-1.0.0.dist-info/METADATA +69 -0
  91. tsadmetrics-1.0.0.dist-info/RECORD +99 -0
  92. {tsadmetrics-0.1.16.dist-info → tsadmetrics-1.0.0.dist-info}/top_level.txt +1 -1
  93. entorno/bin/activate_this.py +0 -32
  94. entorno/bin/rst2html.py +0 -23
  95. entorno/bin/rst2html4.py +0 -26
  96. entorno/bin/rst2html5.py +0 -33
  97. entorno/bin/rst2latex.py +0 -26
  98. entorno/bin/rst2man.py +0 -27
  99. entorno/bin/rst2odt.py +0 -28
  100. entorno/bin/rst2odt_prepstyles.py +0 -20
  101. entorno/bin/rst2pseudoxml.py +0 -23
  102. entorno/bin/rst2s5.py +0 -24
  103. entorno/bin/rst2xetex.py +0 -27
  104. entorno/bin/rst2xml.py +0 -23
  105. entorno/bin/rstpep2html.py +0 -25
  106. tests/test_binary.py +0 -946
  107. tests/test_non_binary.py +0 -420
  108. tests/test_utils.py +0 -49
  109. tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +0 -86
  110. tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +0 -68
  111. tsadmetrics/_tsadeval/affiliation/generics.py +0 -135
  112. tsadmetrics/_tsadeval/affiliation/metrics.py +0 -114
  113. tsadmetrics/_tsadeval/auc_roc_pr_plot.py +0 -295
  114. tsadmetrics/_tsadeval/discontinuity_graph.py +0 -109
  115. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +0 -175
  116. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +0 -50
  117. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +0 -184
  118. tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
  119. tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +0 -386
  120. tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +0 -362
  121. tsadmetrics/_tsadeval/metrics.py +0 -698
  122. tsadmetrics/_tsadeval/prts/__init__.py +0 -0
  123. tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
  124. tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +0 -165
  125. tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +0 -121
  126. tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
  127. tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +0 -61
  128. tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +0 -86
  129. tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +0 -21
  130. tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +0 -85
  131. tsadmetrics/_tsadeval/tests.py +0 -376
  132. tsadmetrics/_tsadeval/threshold_plt.py +0 -30
  133. tsadmetrics/_tsadeval/time_tolerant.py +0 -33
  134. tsadmetrics/binary_metrics.py +0 -1652
  135. tsadmetrics/metric_utils.py +0 -98
  136. tsadmetrics/non_binary_metrics.py +0 -398
  137. tsadmetrics/scripts/__init__.py +0 -0
  138. tsadmetrics/scripts/compute_metrics.py +0 -42
  139. tsadmetrics/utils.py +0 -122
  140. tsadmetrics/validation.py +0 -35
  141. tsadmetrics-0.1.16.dist-info/METADATA +0 -23
  142. tsadmetrics-0.1.16.dist-info/RECORD +0 -64
  143. tsadmetrics-0.1.16.dist-info/entry_points.txt +0 -2
  144. /tsadmetrics/{_tsadeval → base}/__init__.py +0 -0
  145. /tsadmetrics/{_tsadeval/affiliation → evaluation}/__init__.py +0 -0
  146. /tsadmetrics/{_tsadeval/eTaPR_pkg/DataManage → metrics/tem}/__init__.py +0 -0
  147. /tsadmetrics/{_tsadeval/vus_utils.py → utils/functions_vus.py} +0 -0
  148. {tsadmetrics-0.1.16.dist-info → tsadmetrics-1.0.0.dist-info}/WHEEL +0 -0
tests/test_ptdm.py ADDED
@@ -0,0 +1,366 @@
1
+ import unittest
2
+ from tsadmetrics.metrics.tem.ptdm import *
3
+
4
+ import numpy as np
5
+ import random
6
+
7
+ class TestAverageDetectionCount(unittest.TestCase):
8
+
9
+
10
+ def setUp(self):
11
+ """
12
+ Configuración inicial para las pruebas.
13
+ """
14
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
15
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
16
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
17
+
18
+ self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
19
+ self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
20
+ self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
21
+
22
+ self.y_pred3 = self.y_true1
23
+ self.y_pred4 = np.zeros(len(self.y_true1))
24
+
25
+
26
+
27
+ def test(self):
28
+ metric = AverageDetectionCount()
29
+ score = round(metric.compute(self.y_true1, self.y_pred1),2)
30
+ expected_metric = 0.5
31
+ self.assertAlmostEqual(score, expected_metric, places=4)
32
+
33
+ score = round(metric.compute(self.y_true1, self.y_pred2),2)
34
+ expected_metric = 0.12
35
+ self.assertAlmostEqual(score, expected_metric, places=4)
36
+
37
+ score = round(metric.compute(self.y_true2, self.y_pred21),2)
38
+ expected_metric = 0.33
39
+ self.assertAlmostEqual(score, expected_metric, places=4)
40
+
41
+ score = round(metric.compute(self.y_true2, self.y_pred22),2)
42
+ expected_metric = 0.67
43
+ self.assertAlmostEqual(score, expected_metric, places=4)
44
+
45
+ score = round(metric.compute(self.y_true1, self.y_pred3),2)
46
+ expected_metric = 1.0
47
+ self.assertAlmostEqual(score, expected_metric, places=4)
48
+
49
+ score = round(metric.compute(self.y_true1, self.y_pred4),2)
50
+ expected_metric = 0
51
+ self.assertAlmostEqual(score, expected_metric, places=4)
52
+
53
+
54
+ def test_consistency(self):
55
+ metric = AverageDetectionCount()
56
+ try:
57
+ y_true = np.random.choice([0, 1], size=(100,))
58
+ y_pred = np.zeros(100)
59
+ metric.compute(y_true, y_pred)
60
+ for _ in range(100):
61
+ y_true = np.random.choice([0, 1], size=(100,))
62
+ y_pred = np.random.choice([0, 1], size=(100,))
63
+
64
+ score = metric.compute(y_true, y_pred)
65
+ except Exception as e:
66
+ self.fail(f"AverageDetectionCount raised an exception {e}")
67
+
68
+
69
+ class TestDetectionAccuracyInRange(unittest.TestCase):
70
+
71
+ def setUp(self):
72
+ """
73
+ Configuración inicial para las pruebas.
74
+ """
75
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
76
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
77
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
78
+
79
+ self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
80
+ self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
81
+ self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
82
+
83
+ self.y_pred3 = self.y_true1
84
+ self.y_pred4 = np.zeros(len(self.y_true1))
85
+
86
+ def test(self):
87
+ metric = DetectionAccuracyInRange(k=3)
88
+ score = round(metric.compute(self.y_true1, self.y_pred1),2)
89
+ expected_score = 1.0
90
+ self.assertAlmostEqual(score, expected_score, places=4)
91
+
92
+ score = round(metric.compute(self.y_true1, self.y_pred2),2)
93
+ expected_score = 1.0
94
+ self.assertAlmostEqual(score, expected_score, places=4)
95
+
96
+ score = round(metric.compute(self.y_true2, self.y_pred21),2)
97
+ expected_score = 1.0
98
+ self.assertAlmostEqual(score, expected_score, places=4)
99
+
100
+ score = round(metric.compute(self.y_true2, self.y_pred22),2)
101
+ expected_score = 1.0
102
+ self.assertAlmostEqual(score, expected_score, places=4)
103
+
104
+ score = round(metric.compute(self.y_true1, self.y_pred3),2)
105
+ expected_metric = 1.0
106
+ self.assertAlmostEqual(score, expected_metric, places=4)
107
+
108
+ score = round(metric.compute(self.y_true1, self.y_pred4),2)
109
+ expected_metric = 0
110
+ self.assertAlmostEqual(score, expected_metric, places=4)
111
+
112
+
113
+
114
+
115
+ def test_consistency(self):
116
+ metric = DetectionAccuracyInRange(k=4)
117
+ try:
118
+ y_true = np.random.choice([0, 1], size=(100,))
119
+ y_pred = np.zeros(100)
120
+
121
+ for _ in range(100):
122
+ y_true = np.random.choice([0, 1], size=(100,))
123
+ y_pred = np.random.choice([0, 1], size=(100,))
124
+
125
+ score = metric.compute(y_true, y_pred)
126
+ except Exception as e:
127
+ self.fail(f"DetectionAccuracyInRange raised an exception {e}")
128
+
129
+
130
+
131
+
132
+
133
+
134
+ class TestPointadjustedAtKFScore(unittest.TestCase):
135
+
136
+ def setUp(self):
137
+ """
138
+ Configuración inicial para las pruebas.
139
+ """
140
+ self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
141
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
142
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
143
+
144
+ self.y_pred3 = self.y_true
145
+ self.y_pred4 = np.zeros(len(self.y_true))
146
+
147
+
148
+ def test(self):
149
+ metric = PointadjustedAtKFScore(k=0.2)
150
+ f_score = round(metric.compute(self.y_true, self.y_pred1),2)
151
+ expected_f_score = 0.67
152
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
153
+
154
+ f_score = round(metric.compute(self.y_true, self.y_pred2),2)
155
+ expected_f_score = 0.22
156
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
157
+
158
+ score = round(metric.compute(self.y_true, self.y_pred3),2)
159
+ expected_metric = 1.0
160
+ self.assertAlmostEqual(score, expected_metric, places=4)
161
+
162
+ score = round(metric.compute(self.y_true, self.y_pred4),2)
163
+ expected_metric = 0
164
+ self.assertAlmostEqual(score, expected_metric, places=4)
165
+
166
+ def test_consistency(self):
167
+ metric = PointadjustedAtKFScore(k=0.3)
168
+ try:
169
+ y_true = np.random.choice([0, 1], size=(100,))
170
+ y_pred = np.zeros(100)
171
+ metric.compute(y_true, y_pred)
172
+ for _ in range(1000):
173
+ y_true = np.random.choice([0, 1], size=(100,))
174
+ y_pred = np.random.choice([0, 1], size=(100,))
175
+ f_score = metric.compute(y_true, y_pred)
176
+ except Exception as e:
177
+ self.fail(f"PointadjustedAtKFScore raised an exception {e}")
178
+
179
+
180
+
181
+ class TestTimeseriesAwareFScore(unittest.TestCase):
182
+
183
+ def setUp(self):
184
+ """
185
+ Configuración inicial para las pruebas.
186
+ """
187
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
188
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
189
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
190
+
191
+ self.y_true2 = np.array([0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
192
+ self.y_pred21 = np.array([0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
193
+ self.y_pred22 = np.array([0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
194
+
195
+ self.y_pred3 = self.y_true1
196
+ self.y_pred4 = np.zeros(len(self.y_true1))
197
+
198
+
199
+
200
+ def test(self):
201
+
202
+ metric = TimeseriesAwareFScore(beta=1, alpha=0.5,delta=0, theta=0.5)
203
+ f_score = round(metric.compute(self.y_true1, self.y_pred1),2)
204
+ expected_f_score = 0.67
205
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
206
+
207
+ f_score = round(metric.compute(self.y_true1, self.y_pred2),2)
208
+ expected_f_score = 0.12
209
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
210
+
211
+ f_score = round(metric.compute(self.y_true2, self.y_pred21),2)
212
+ expected_f_score = 0.77
213
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
214
+
215
+ f_score = round(metric.compute(self.y_true2, self.y_pred22),2)
216
+ expected_f_score = 0.67
217
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
218
+
219
+ score = round(metric.compute(self.y_true1, self.y_pred3),2)
220
+ expected_metric = 1.0
221
+ self.assertAlmostEqual(score, expected_metric, places=4)
222
+
223
+ score = round(metric.compute(self.y_true1, self.y_pred4),2)
224
+ expected_metric = 0
225
+ self.assertAlmostEqual(score, expected_metric, places=4)
226
+
227
+ def test_consistency(self):
228
+ metric = TimeseriesAwareFScore(beta=1, alpha=0.5,delta=0, theta=0.5)
229
+ try:
230
+ y_true = np.random.choice([0, 1], size=(100,))
231
+ y_pred = np.zeros(100)
232
+ metric.compute(y_true, y_pred)
233
+ for _ in range(100):
234
+ y_true = np.random.choice([0, 1], size=(100,))
235
+ y_pred = np.random.choice([0, 1], size=(100,))
236
+ metric = TimeseriesAwareFScore(beta=1, alpha=random.random(),delta=0, theta=random.random())
237
+ f_score = metric.compute(y_true, y_pred)
238
+ except Exception as e:
239
+ self.fail(f"ts_aware_f_score raised an exception {e}")
240
+
241
+
242
+
243
+
244
+ class TestTotalDetectedInRange(unittest.TestCase):
245
+
246
+ def setUp(self):
247
+ """
248
+ Configuración inicial para las pruebas.
249
+ """
250
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
251
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
252
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
253
+
254
+ self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
255
+ self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
256
+ self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
257
+
258
+ self.y_pred3 = self.y_true1
259
+ self.y_pred4 = np.zeros(len(self.y_true1))
260
+
261
+ def test(self):
262
+ metric = TotalDetectedInRange(k=3)
263
+ score = round(metric.compute(self.y_true1, self.y_pred1),2)
264
+ expected_score = 0.5
265
+ self.assertAlmostEqual(score, expected_score, places=4)
266
+
267
+ score = round(metric.compute(self.y_true1, self.y_pred2),2)
268
+ expected_score = 0.5
269
+ self.assertAlmostEqual(score, expected_score, places=4)
270
+
271
+ score = round(metric.compute(self.y_true2, self.y_pred21),2)
272
+ expected_score = 0.56
273
+ self.assertAlmostEqual(score, expected_score, places=4)
274
+
275
+ score = round(metric.compute(self.y_true2, self.y_pred22),2)
276
+ expected_score = 0.44
277
+ self.assertAlmostEqual(score, expected_score, places=4)
278
+
279
+ score = round(metric.compute(self.y_true1, self.y_pred3),2)
280
+ expected_metric = 1.0
281
+ self.assertAlmostEqual(score, expected_metric, places=4)
282
+
283
+ score = round(metric.compute(self.y_true1, self.y_pred4),2)
284
+ expected_metric = 0
285
+ self.assertAlmostEqual(score, expected_metric, places=4)
286
+
287
+
288
+
289
+
290
+ def test_consistency(self):
291
+ metric = TotalDetectedInRange(k=4)
292
+ try:
293
+ y_true = np.random.choice([0, 1], size=(100,))
294
+ y_pred = np.zeros(100)
295
+ metric.compute(y_true, y_pred)
296
+ for _ in range(100):
297
+ y_true = np.random.choice([0, 1], size=(100,))
298
+ y_pred = np.random.choice([0, 1], size=(100,))
299
+
300
+ score = metric.compute(y_true, y_pred)
301
+ except Exception as e:
302
+ self.fail(f"TotalDetectedInRange raised an exception {e}")
303
+
304
+
305
+
306
+
307
+
308
+ class TestWeightedDetectionDifference(unittest.TestCase):
309
+
310
+ def setUp(self):
311
+ """
312
+ Configuración inicial para las pruebas.
313
+ """
314
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
315
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
316
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
317
+
318
+ self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
319
+ self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
320
+ self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
321
+
322
+ self.y_pred3 = self.y_true1
323
+ self.y_pred4 = np.zeros(len(self.y_true1))
324
+
325
+ def test(self):
326
+ metric = WeightedDetectionDifference(k=3)
327
+ score = round(metric.compute(self.y_true1, self.y_pred1),2)
328
+ expected_score = 18.89
329
+ self.assertAlmostEqual(score, expected_score, places=4)
330
+
331
+ score = round(metric.compute(self.y_true1, self.y_pred2),2)
332
+ expected_score = 24.89
333
+ self.assertAlmostEqual(score, expected_score, places=4)
334
+
335
+ score = round(metric.compute(self.y_true2, self.y_pred21),2)
336
+ expected_score = 15.73
337
+ self.assertAlmostEqual(score, expected_score, places=4)
338
+
339
+ score = round(metric.compute(self.y_true2, self.y_pred22),2)
340
+ expected_score = 16.73
341
+ self.assertAlmostEqual(score, expected_score, places=4)
342
+
343
+ score = round(metric.compute(self.y_true1, self.y_pred3),2)
344
+ expected_metric = 10
345
+ self.assertGreater(score, expected_metric)
346
+
347
+ score = round(metric.compute(self.y_true1, self.y_pred4),2)
348
+ expected_metric = 0
349
+ self.assertAlmostEqual(score, expected_metric, places=4)
350
+
351
+
352
+
353
+
354
+ def test_consistency(self):
355
+ metric = WeightedDetectionDifference(k=4)
356
+ try:
357
+ y_true = np.random.choice([0, 1], size=(100,))
358
+ y_pred = np.zeros(100)
359
+ metric.compute(y_true, y_pred)
360
+ for _ in range(100):
361
+ y_true = np.random.choice([0, 1], size=(100,))
362
+ y_pred = np.random.choice([0, 1], size=(100,))
363
+
364
+ score = metric.compute(y_true, y_pred)
365
+ except Exception as e:
366
+ self.fail(f"WeightedDetectionDifference raised an exception {e}")
tests/test_registry.py ADDED
@@ -0,0 +1,58 @@
1
+ import unittest
2
+ from tsadmetrics.metrics.Registry import Registry
3
+ from sklearn.metrics import fbeta_score
4
+ import numpy as np
5
+ import random
6
+ class TestRegistry(unittest.TestCase):
7
+ def setUp(self):
8
+ """
9
+ Configuración inicial para las pruebas.
10
+ """
11
+ self.registry = Registry()
12
+ self.sample_metric = "pwf"
13
+
14
+ self.num_tests = 100
15
+ self.test_cases = []
16
+ for _ in range(self.num_tests):
17
+ y_true = np.random.choice([0, 1], size=(10000,))
18
+ y_pred = np.random.choice([0, 1], size=(10000,))
19
+ self.test_cases.append((y_true, y_pred))
20
+
21
+ y_true_perfect = np.random.choice([0, 1], size=(10000,))
22
+ y_pred_perfect = y_true_perfect.copy()
23
+ self.test_cases.append((y_true_perfect, y_pred_perfect))
24
+
25
+ y_true_all_zeros = np.random.choice([0, 1], size=(10000,))
26
+ y_pred_all_zeros = np.zeros(10000, dtype=int)
27
+ self.test_cases.append((y_true_all_zeros, y_pred_all_zeros))
28
+
29
+ def test(self):
30
+
31
+ for y_true, y_pred in self.test_cases:
32
+
33
+ with self.subTest(y_true=y_true, y_pred=y_pred):
34
+ beta = random.randint(0,1000000)
35
+ metric = self.registry.get_metric(self.sample_metric,beta=beta)
36
+ f_score = metric.compute(y_true, y_pred)
37
+ expected_f_score = fbeta_score(y_true, y_pred, beta=beta)
38
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
39
+
40
+ def test_load_metrics_from_file(self):
41
+ """
42
+ Prueba que las métricas se pueden cargar desde un fichero.
43
+ """
44
+ metrics_file = "tests/test_data/example_metrics_config.yaml"
45
+
46
+
47
+ loaded_metrics = self.registry.load_metrics_info_from_file(metrics_file)
48
+
49
+ expected_metrics = [
50
+ ("adc", {}),
51
+ ("dair", {}),
52
+ ("pakf", {"k":0.2})
53
+ ]
54
+ self.assertEqual(len(loaded_metrics), len(expected_metrics))
55
+ for (m1, p1), (m2, p2) in zip(loaded_metrics, expected_metrics):
56
+ self.assertEqual(m1, m2)
57
+ self.assertEqual(p1, p2)
58
+
tests/test_runner.py ADDED
@@ -0,0 +1,185 @@
1
+ import unittest
2
+
3
+ from tsadmetrics.evaluation.Runner import Runner
4
+ import numpy as np
5
+ import os
6
+
7
+ class TestRunner(unittest.TestCase):
8
+
9
+
10
+ def setUp(self):
11
+ """
12
+ Configuración inicial para las pruebas.
13
+ """
14
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
15
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
16
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
17
+
18
+
19
+
20
+
21
+
22
+ def test_direct_data(self):
23
+ dataset_evaluations = [
24
+ ("dataset1", self.y_true1, (self.y_pred1, self.y_pred1)),
25
+ ("dataset2", self.y_true1, (self.y_pred2, self.y_pred2))
26
+
27
+ ]
28
+ metrics = [
29
+ ("adc",{}),
30
+ ("dair",{}),
31
+ ("pakf",{"k":0.2}),]
32
+ expected_score_d1 = {
33
+ "adc":0.5,
34
+ "dair":1.0,
35
+ "pakf":0.67
36
+ }
37
+ expected_score_d2 = {
38
+ "adc":0.12,
39
+ "dair":1.0,
40
+ "pakf":0.22
41
+ }
42
+ runnner = Runner(dataset_evaluations, metrics)
43
+ results = runnner.run()
44
+ for metric, expected in expected_score_d1.items():
45
+ params = {}
46
+ for p in metrics:
47
+ if p[0] == metric:
48
+ params = p[1]
49
+ print(metric)
50
+ self.assertAlmostEqual(round(results.loc["dataset1", f"{metric}"], 2), expected, places=4)
51
+ for metric, expected in expected_score_d2.items():
52
+ params = {}
53
+ for p in metrics:
54
+ if p[0] == metric:
55
+ params = p[1]
56
+ self.assertAlmostEqual(round(results.loc["dataset2", f"{metric}"],2), expected, places=4)
57
+
58
+ def test_file_reference(self):
59
+ dataset_evaluations = [
60
+ ("dataset1", "tests/test_data/results1.csv"),
61
+ ("dataset2", "tests/test_data/results2.csv")
62
+
63
+ ]
64
+ metrics = [
65
+ ("adc",{}),
66
+ ("dair",{}),
67
+ ("pakf",{"k":0.2})]
68
+ expected_score_d1 = {
69
+ "adc":0.5,
70
+ "dair":1.0,
71
+ "pakf":0.67
72
+ }
73
+ expected_score_d2 = {
74
+ "adc":0.12,
75
+ "dair":1.0,
76
+ "pakf":0.22
77
+ }
78
+ runnner = Runner(dataset_evaluations, metrics)
79
+ results = runnner.run()
80
+ for metric, expected in expected_score_d1.items():
81
+ params = {}
82
+ for p in metrics:
83
+ if p[0] == metric:
84
+ params = p[1]
85
+ self.assertAlmostEqual(round(results.loc["dataset1",f"{metric}"],2), expected, places=4)
86
+ for metric, expected in expected_score_d2.items():
87
+ params = {}
88
+ for p in metrics:
89
+ if p[0] == metric:
90
+ params = p[1]
91
+ self.assertAlmostEqual(round(results.loc["dataset2", f"{metric}"],2), expected, places=4)
92
+
93
+ def test_metrics_from_file(self):
94
+ dataset_evaluations = [
95
+ ("dataset1", "tests/test_data/results1.csv"),
96
+ ("dataset2", "tests/test_data/results2.csv")
97
+
98
+ ]
99
+ metrics = [
100
+ ("adc",{}),
101
+ ("dair",{}),
102
+ ("pakf",{"k":0.2})]
103
+ expected_score_d1 = {
104
+ "adc":0.5,
105
+ "dair":1.0,
106
+ "pakf":0.67
107
+ }
108
+ expected_score_d2 = {
109
+ "adc":0.12,
110
+ "dair":1.0,
111
+ "pakf":0.22
112
+ }
113
+ runnner = Runner(dataset_evaluations,"tests/test_data/example_metrics_config.yaml")
114
+ results = runnner.run()
115
+ for metric, expected in expected_score_d1.items():
116
+ params = {}
117
+ for p in metrics:
118
+ if p[0] == metric:
119
+ params = p[1]
120
+ self.assertAlmostEqual(round(results.loc["dataset1",f"{metric}"],2), expected, places=4)
121
+ for metric, expected in expected_score_d2.items():
122
+ params = {}
123
+ for p in metrics:
124
+ if p[0] == metric:
125
+ params = p[1]
126
+ self.assertAlmostEqual(round(results.loc["dataset2", f"{metric}"],2), expected, places=4)
127
+
128
+ def test_evaluation_from_file(self):
129
+ metrics = [
130
+ ("adc",{}),
131
+ ("dair",{}),
132
+ ("pakf",{"k":0.2})]
133
+ expected_score_d1 = {
134
+ "adc":0.5,
135
+ "dair":1.0,
136
+ "pakf":0.67
137
+ }
138
+ expected_score_d2 = {
139
+ "adc":0.12,
140
+ "dair":1.0,
141
+ "pakf":0.22
142
+ }
143
+ runnner = Runner("tests/test_data/example_evaluation_config.yaml")
144
+ results = runnner.run()
145
+ print(results)
146
+ for metric, expected in expected_score_d1.items():
147
+ params = {}
148
+ for p in metrics:
149
+ if p[0] == metric:
150
+ params = p[1]
151
+ self.assertAlmostEqual(round(results.loc["dataset1", f"{metric}"],2), expected, places=4)
152
+ for metric, expected in expected_score_d2.items():
153
+ params = {}
154
+ for p in metrics:
155
+ if p[0] == metric:
156
+ params = p[1]
157
+ self.assertAlmostEqual(round(results.loc["dataset2",f"{metric}"],2), expected, places=4)
158
+ def test_report(self):
159
+ dataset_evaluations = [
160
+ ("dataset1", "tests/test_data/results1.csv"),
161
+ ("dataset2", "tests/test_data/results2.csv")
162
+ ]
163
+
164
+ metrics = [
165
+ ("adc",{}),
166
+ ("dair",{}),
167
+ ("pakf",{"k":0.2})]
168
+ expected_score_d1 = {
169
+ "adc":0.5,
170
+ "dair":1.0,
171
+ "pakf":0.67
172
+ }
173
+ expected_score_d2 = {
174
+ "adc":0.12,
175
+ "dair":1.0,
176
+ "pakf":0.22
177
+ }
178
+ runnner = Runner(dataset_evaluations, metrics)
179
+ results = runnner.run(generate_report=True, report_file="tests/evaluation_report.csv")
180
+
181
+ with open("tests/evaluation_report.csv", "r") as generated_file, open("tests/test_data/evaluation_report.csv", "r") as expected_file:
182
+ self.assertEqual(generated_file.read(), expected_file.read())
183
+
184
+ os.remove("tests/evaluation_report.csv")
185
+