tsadmetrics 0.1.17__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. {docs_manual → docs/api_doc}/conf.py +3 -26
  2. docs/{conf.py → full_doc/conf.py} +1 -1
  3. {docs_api → docs/manual_doc}/conf.py +3 -26
  4. examples/example_direct_data.py +28 -0
  5. examples/example_direct_single_data.py +25 -0
  6. examples/example_file_reference.py +24 -0
  7. examples/example_global_config_file.py +13 -0
  8. examples/example_metric_config_file.py +19 -0
  9. examples/example_simple_metric.py +8 -0
  10. examples/specific_examples/AbsoluteDetectionDistance_example.py +24 -0
  11. examples/specific_examples/AffiliationbasedFScore_example.py +24 -0
  12. examples/specific_examples/AverageDetectionCount_example.py +24 -0
  13. examples/specific_examples/CompositeFScore_example.py +24 -0
  14. examples/specific_examples/DelayThresholdedPointadjustedFScore_example.py +24 -0
  15. examples/specific_examples/DetectionAccuracyInRange_example.py +24 -0
  16. examples/specific_examples/EnhancedTimeseriesAwareFScore_example.py +24 -0
  17. examples/specific_examples/LatencySparsityawareFScore_example.py +24 -0
  18. examples/specific_examples/MeanTimeToDetect_example.py +24 -0
  19. examples/specific_examples/NabScore_example.py +24 -0
  20. examples/specific_examples/PateFScore_example.py +24 -0
  21. examples/specific_examples/Pate_example.py +24 -0
  22. examples/specific_examples/PointadjustedAtKFScore_example.py +24 -0
  23. examples/specific_examples/PointadjustedAucPr_example.py +24 -0
  24. examples/specific_examples/PointadjustedAucRoc_example.py +24 -0
  25. examples/specific_examples/PointadjustedFScore_example.py +24 -0
  26. examples/specific_examples/RangebasedFScore_example.py +24 -0
  27. examples/specific_examples/SegmentwiseFScore_example.py +24 -0
  28. examples/specific_examples/TemporalDistance_example.py +24 -0
  29. examples/specific_examples/TimeTolerantFScore_example.py +24 -0
  30. examples/specific_examples/TimeseriesAwareFScore_example.py +24 -0
  31. examples/specific_examples/TotalDetectedInRange_example.py +24 -0
  32. examples/specific_examples/VusPr_example.py +24 -0
  33. examples/specific_examples/VusRoc_example.py +24 -0
  34. examples/specific_examples/WeightedDetectionDifference_example.py +24 -0
  35. tests/test_dpm.py +212 -0
  36. tests/test_ptdm.py +366 -0
  37. tests/test_registry.py +58 -0
  38. tests/test_runner.py +185 -0
  39. tests/test_spm.py +213 -0
  40. tests/test_tmem.py +198 -0
  41. tests/test_tpdm.py +369 -0
  42. tests/test_tstm.py +338 -0
  43. tsadmetrics/__init__.py +0 -21
  44. tsadmetrics/base/Metric.py +188 -0
  45. tsadmetrics/evaluation/Report.py +25 -0
  46. tsadmetrics/evaluation/Runner.py +253 -0
  47. tsadmetrics/metrics/Registry.py +141 -0
  48. tsadmetrics/metrics/__init__.py +2 -0
  49. tsadmetrics/metrics/spm/PointwiseAucPr.py +62 -0
  50. tsadmetrics/metrics/spm/PointwiseAucRoc.py +63 -0
  51. tsadmetrics/metrics/spm/PointwiseFScore.py +86 -0
  52. tsadmetrics/metrics/spm/PrecisionAtK.py +81 -0
  53. tsadmetrics/metrics/spm/__init__.py +9 -0
  54. tsadmetrics/metrics/tem/dpm/DelayThresholdedPointadjustedFScore.py +83 -0
  55. tsadmetrics/metrics/tem/dpm/LatencySparsityawareFScore.py +76 -0
  56. tsadmetrics/metrics/tem/dpm/MeanTimeToDetect.py +47 -0
  57. tsadmetrics/metrics/tem/dpm/NabScore.py +60 -0
  58. tsadmetrics/metrics/tem/dpm/__init__.py +11 -0
  59. tsadmetrics/metrics/tem/ptdm/AverageDetectionCount.py +53 -0
  60. tsadmetrics/metrics/tem/ptdm/DetectionAccuracyInRange.py +66 -0
  61. tsadmetrics/metrics/tem/ptdm/PointadjustedAtKFScore.py +80 -0
  62. tsadmetrics/metrics/tem/ptdm/TimeseriesAwareFScore.py +248 -0
  63. tsadmetrics/metrics/tem/ptdm/TotalDetectedInRange.py +65 -0
  64. tsadmetrics/metrics/tem/ptdm/WeightedDetectionDifference.py +97 -0
  65. tsadmetrics/metrics/tem/ptdm/__init__.py +12 -0
  66. tsadmetrics/metrics/tem/tmem/AbsoluteDetectionDistance.py +48 -0
  67. tsadmetrics/metrics/tem/tmem/EnhancedTimeseriesAwareFScore.py +252 -0
  68. tsadmetrics/metrics/tem/tmem/TemporalDistance.py +68 -0
  69. tsadmetrics/metrics/tem/tmem/__init__.py +9 -0
  70. tsadmetrics/metrics/tem/tpdm/CompositeFScore.py +104 -0
  71. tsadmetrics/metrics/tem/tpdm/PointadjustedAucPr.py +123 -0
  72. tsadmetrics/metrics/tem/tpdm/PointadjustedAucRoc.py +119 -0
  73. tsadmetrics/metrics/tem/tpdm/PointadjustedFScore.py +96 -0
  74. tsadmetrics/metrics/tem/tpdm/RangebasedFScore.py +236 -0
  75. tsadmetrics/metrics/tem/tpdm/SegmentwiseFScore.py +73 -0
  76. tsadmetrics/metrics/tem/tpdm/__init__.py +12 -0
  77. tsadmetrics/metrics/tem/tstm/AffiliationbasedFScore.py +68 -0
  78. tsadmetrics/metrics/tem/tstm/Pate.py +62 -0
  79. tsadmetrics/metrics/tem/tstm/PateFScore.py +61 -0
  80. tsadmetrics/metrics/tem/tstm/TimeTolerantFScore.py +85 -0
  81. tsadmetrics/metrics/tem/tstm/VusPr.py +51 -0
  82. tsadmetrics/metrics/tem/tstm/VusRoc.py +55 -0
  83. tsadmetrics/metrics/tem/tstm/__init__.py +15 -0
  84. tsadmetrics/{_tsadeval/affiliation/_integral_interval.py → utils/functions_affiliation.py} +377 -9
  85. tsadmetrics/utils/functions_auc.py +393 -0
  86. tsadmetrics/utils/functions_conversion.py +63 -0
  87. tsadmetrics/utils/functions_counting_metrics.py +26 -0
  88. tsadmetrics/{_tsadeval/latency_sparsity_aware.py → utils/functions_latency_sparsity_aware.py} +1 -1
  89. tsadmetrics/{_tsadeval/nabscore.py → utils/functions_nabscore.py} +15 -1
  90. tsadmetrics-1.0.0.dist-info/METADATA +69 -0
  91. tsadmetrics-1.0.0.dist-info/RECORD +99 -0
  92. tsadmetrics-1.0.0.dist-info/top_level.txt +4 -0
  93. entorno/bin/activate_this.py +0 -32
  94. entorno/bin/rst2html.py +0 -23
  95. entorno/bin/rst2html4.py +0 -26
  96. entorno/bin/rst2html5.py +0 -33
  97. entorno/bin/rst2latex.py +0 -26
  98. entorno/bin/rst2man.py +0 -27
  99. entorno/bin/rst2odt.py +0 -28
  100. entorno/bin/rst2odt_prepstyles.py +0 -20
  101. entorno/bin/rst2pseudoxml.py +0 -23
  102. entorno/bin/rst2s5.py +0 -24
  103. entorno/bin/rst2xetex.py +0 -27
  104. entorno/bin/rst2xml.py +0 -23
  105. entorno/bin/rstpep2html.py +0 -25
  106. tests/test_binary.py +0 -946
  107. tests/test_non_binary.py +0 -450
  108. tests/test_utils.py +0 -49
  109. tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +0 -86
  110. tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +0 -68
  111. tsadmetrics/_tsadeval/affiliation/generics.py +0 -135
  112. tsadmetrics/_tsadeval/affiliation/metrics.py +0 -114
  113. tsadmetrics/_tsadeval/auc_roc_pr_plot.py +0 -295
  114. tsadmetrics/_tsadeval/discontinuity_graph.py +0 -109
  115. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +0 -175
  116. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +0 -50
  117. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +0 -184
  118. tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
  119. tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +0 -386
  120. tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +0 -362
  121. tsadmetrics/_tsadeval/metrics.py +0 -698
  122. tsadmetrics/_tsadeval/prts/__init__.py +0 -0
  123. tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
  124. tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +0 -165
  125. tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +0 -121
  126. tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
  127. tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +0 -61
  128. tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +0 -86
  129. tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +0 -21
  130. tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +0 -85
  131. tsadmetrics/_tsadeval/tests.py +0 -376
  132. tsadmetrics/_tsadeval/threshold_plt.py +0 -30
  133. tsadmetrics/_tsadeval/time_tolerant.py +0 -33
  134. tsadmetrics/binary_metrics.py +0 -1652
  135. tsadmetrics/metric_utils.py +0 -98
  136. tsadmetrics/non_binary_metrics.py +0 -372
  137. tsadmetrics/scripts/__init__.py +0 -0
  138. tsadmetrics/scripts/compute_metrics.py +0 -42
  139. tsadmetrics/utils.py +0 -124
  140. tsadmetrics/validation.py +0 -35
  141. tsadmetrics-0.1.17.dist-info/METADATA +0 -54
  142. tsadmetrics-0.1.17.dist-info/RECORD +0 -66
  143. tsadmetrics-0.1.17.dist-info/entry_points.txt +0 -2
  144. tsadmetrics-0.1.17.dist-info/top_level.txt +0 -6
  145. /tsadmetrics/{_tsadeval → base}/__init__.py +0 -0
  146. /tsadmetrics/{_tsadeval/affiliation → evaluation}/__init__.py +0 -0
  147. /tsadmetrics/{_tsadeval/eTaPR_pkg/DataManage → metrics/tem}/__init__.py +0 -0
  148. /tsadmetrics/{_tsadeval/vus_utils.py → utils/functions_vus.py} +0 -0
  149. {tsadmetrics-0.1.17.dist-info → tsadmetrics-1.0.0.dist-info}/WHEEL +0 -0
tests/test_spm.py ADDED
@@ -0,0 +1,213 @@
1
+ import unittest
2
+ from tsadmetrics.metrics.spm import *
3
+
4
+ from sklearn.metrics import fbeta_score
5
+ import numpy as np
6
+ import random
7
+
8
+ class TestPointwiseFScore(unittest.TestCase):
9
+
10
+ def setUp(self):
11
+ """
12
+ Configuración inicial para las pruebas.
13
+ """
14
+ self.num_tests = 100
15
+ self.test_cases = []
16
+ for _ in range(self.num_tests):
17
+ y_true = np.random.choice([0, 1], size=(10000,))
18
+ y_pred = np.random.choice([0, 1], size=(10000,))
19
+ self.test_cases.append((y_true, y_pred))
20
+
21
+ y_true_perfect = np.random.choice([0, 1], size=(10000,))
22
+ y_pred_perfect = y_true_perfect.copy()
23
+ self.test_cases.append((y_true_perfect, y_pred_perfect))
24
+
25
+ y_true_all_zeros = np.random.choice([0, 1], size=(10000,))
26
+ y_pred_all_zeros = np.zeros(10000, dtype=int)
27
+ self.test_cases.append((y_true_all_zeros, y_pred_all_zeros))
28
+
29
+
30
+
31
+
32
+ def test(self):
33
+
34
+ for y_true, y_pred in self.test_cases:
35
+ with self.subTest(y_true=y_true, y_pred=y_pred):
36
+ beta = random.randint(0,1000000)
37
+ metric = PointwiseFScore(beta=beta)
38
+ f_score = metric.compute(y_true, y_pred)
39
+ expected_f_score = fbeta_score(y_true, y_pred, beta=beta)
40
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
41
+
42
+ class TestPrecisionAtK(unittest.TestCase):
43
+
44
+ def setUp(self):
45
+
46
+
47
+ self.y_true1 = np.array([0,0,1,1])
48
+
49
+
50
+ self.y_pred1 = np.array([0.2, 0.9, 0.3, 0.8])
51
+
52
+ self.y_pred2 = np.array([1, 2, 3, 4])
53
+
54
+ self.y_pred3 = np.array([3, 4, 1, 2])
55
+
56
+ self.y_true2 = np.array([1,1,1,0])
57
+
58
+ self.y_pred4 = np.array([3, 4, 1, 2])
59
+
60
+ self.y_pred5 = self.y_true1
61
+ self.y_pred6 = np.zeros(len(self.y_true1))
62
+
63
+
64
+
65
+
66
+ def test(self):
67
+ metric = PrecisionAtK()
68
+ score = round(metric.compute(self.y_true1, self.y_pred1),2)
69
+ expected_score = 0.5
70
+ self.assertAlmostEqual(score, expected_score, places=4)
71
+
72
+ score = round(metric.compute(self.y_true1, self.y_pred2),2)
73
+ expected_score = 1
74
+ self.assertAlmostEqual(score, expected_score, places=4)
75
+
76
+ score = round(metric.compute(self.y_true1, self.y_pred3),2)
77
+ expected_score = 0
78
+ self.assertAlmostEqual(score, expected_score, places=4)
79
+
80
+ score = round(metric.compute(self.y_true2, self.y_pred4),2)
81
+ expected_score = round(2/3,2)
82
+ self.assertAlmostEqual(score, expected_score, places=4)
83
+
84
+ score = round(metric.compute(self.y_true1, self.y_pred5),2)
85
+ expected_metric = 1.0
86
+ self.assertAlmostEqual(score, expected_metric, places=4)
87
+
88
+ score = round(metric.compute(self.y_true1, self.y_pred6),2)
89
+ expected_metric = 0.5
90
+ self.assertAlmostEqual(score, expected_metric, places=4)
91
+
92
+ def test_consistency(self):
93
+ try:
94
+ metric = PrecisionAtK()
95
+ for _ in range(100):
96
+ y_true = np.random.choice([0, 1], size=(100,))
97
+ y_pred = np.random.random( size=(100,))
98
+
99
+ score = metric.compute(y_true, y_pred)
100
+ except Exception as e:
101
+ self.fail(f"PrecisionAtK raised an exception {e}")
102
+
103
+
104
+
105
+ class TestPointwiseAucRoc(unittest.TestCase):
106
+ def setUp(self):
107
+ """
108
+ Configuración inicial para las pruebas.
109
+ """
110
+
111
+ self.y_true1 = np.array([0,0,1,1])
112
+
113
+
114
+ self.y_pred1 = np.array([1, 3, 2, 4])
115
+
116
+ self.y_pred2 = np.array([1, 2, 3, 4])
117
+
118
+ self.y_pred3 = np.array([4, 4, 4, 4])
119
+
120
+ self.y_pred4 = self.y_true1
121
+ self.y_pred5 = np.zeros(len(self.y_true1))
122
+
123
+
124
+ def test(self):
125
+ metric = PointwiseAucRoc()
126
+ score = round(metric.compute(self.y_true1, self.y_pred1),2)
127
+ expected_score = 0.75
128
+ self.assertAlmostEqual(score, expected_score, places=4)
129
+
130
+ score = round(metric.compute(self.y_true1, self.y_pred2),2)
131
+ expected_score = 1
132
+ self.assertAlmostEqual(score, expected_score, places=4)
133
+
134
+ score = round(metric.compute(self.y_true1, self.y_pred3),2)
135
+ expected_score = 0.5
136
+ self.assertAlmostEqual(score, expected_score, places=4)
137
+
138
+ score = round(metric.compute(self.y_true1, self.y_pred4),2)
139
+ expected_metric = 1.0
140
+ self.assertAlmostEqual(score, expected_metric, places=4)
141
+
142
+ score = round(metric.compute(self.y_true1, self.y_pred5),2)
143
+ expected_metric = 0.5
144
+ self.assertAlmostEqual(score, expected_metric, places=4)
145
+
146
+
147
+ def test_consistency(self):
148
+ try:
149
+ metric = PointwiseAucRoc()
150
+ for _ in range(100):
151
+ y_true = np.random.choice([0, 1], size=(100,))
152
+ y_pred = np.random.random( size=(100,))
153
+
154
+ score = metric.compute(y_true, y_pred)
155
+ except Exception as e:
156
+ self.fail(f"PointwiseAucRoc raised an exception {e}")
157
+
158
+
159
+ class TestPointwiseAucPr(unittest.TestCase):
160
+ def setUp(self):
161
+ """
162
+ Configuración inicial para las pruebas.
163
+ """
164
+
165
+ self.y_true1 = np.array([0,0,1,1])
166
+
167
+
168
+ self.y_pred1 = np.array([1, 3, 2, 4])
169
+
170
+ self.y_pred2 = np.array([1, 2, 3, 4])
171
+
172
+ self.y_pred3 = np.array([4, 4, 4, 4])
173
+
174
+ self.y_pred4 = self.y_true1
175
+ self.y_pred5 = np.zeros(len(self.y_true1))
176
+
177
+
178
+ def test(self):
179
+ """
180
+ Prueba para la función metric.compute.
181
+ """
182
+ metric = PointwiseAucPr()
183
+ score = round(metric.compute(self.y_true1, self.y_pred1),2)
184
+ expected_score = 0.83
185
+ self.assertAlmostEqual(score, expected_score, places=4)
186
+
187
+ score = round(metric.compute(self.y_true1, self.y_pred2),2)
188
+ expected_score = 1
189
+ self.assertAlmostEqual(score, expected_score, places=4)
190
+
191
+ score = round(metric.compute(self.y_true1, self.y_pred3),2)
192
+ expected_score = 0.5
193
+ self.assertAlmostEqual(score, expected_score, places=4)
194
+
195
+ score = round(metric.compute(self.y_true1, self.y_pred4),2)
196
+ expected_metric = 1.0
197
+ self.assertAlmostEqual(score, expected_metric, places=4)
198
+
199
+ score = round(metric.compute(self.y_true1, self.y_pred5),2)
200
+ expected_metric = 0.5
201
+ self.assertAlmostEqual(score, expected_metric, places=4)
202
+
203
+
204
+ def test_consistency(self):
205
+ try:
206
+ metric = PointwiseAucPr()
207
+ for _ in range(100):
208
+ y_true = np.random.choice([0, 1], size=(100,))
209
+ y_pred = np.random.random( size=(100,))
210
+
211
+ score = metric.compute(y_true, y_pred)
212
+ except Exception as e:
213
+ self.fail(f"auc_pr raised an exception {e}")
tests/test_tmem.py ADDED
@@ -0,0 +1,198 @@
1
+ import unittest
2
+
3
+ import numpy as np
4
+ import random
5
+
6
+ import unittest
7
+ import numpy as np
8
+ from tsadmetrics.metrics.tem.tmem import *
9
+
10
+ class TestTemporalDistance(unittest.TestCase):
11
+
12
+ def setUp(self):
13
+ self.y_true1 = np.array([0, 0, 1, 1, 0, 0])
14
+ self.y_pred1 = np.array([0, 0, 1, 1, 0, 0])
15
+
16
+ self.y_true2 = np.array([0, 0, 0, 1, 1, 0])
17
+ self.y_pred2 = np.array([0, 1, 1, 0, 0, 0])
18
+
19
+ self.y_true3 = np.array([0, 0, 1, 1, 0, 0])
20
+ self.y_pred3 = np.array([1, 1, 0, 0, 0, 0])
21
+
22
+ self.y_pred4 = self.y_true1
23
+ self.y_pred5 = np.zeros(len(self.y_true1))
24
+
25
+ def test_temporal_distance_euclidean(self):
26
+ metric = TemporalDistance(distance=0)
27
+
28
+ td = metric.compute(self.y_true1, self.y_pred1)
29
+ expected = 0
30
+ self.assertEqual(td, expected)
31
+
32
+ td = metric.compute(self.y_true2, self.y_pred2)
33
+ expected = 6
34
+ self.assertEqual(td, expected)
35
+
36
+ td = metric.compute(self.y_true3, self.y_pred3)
37
+ expected = 6
38
+ self.assertEqual(td, expected)
39
+
40
+ score = round(metric.compute(self.y_true1, self.y_pred4),2)
41
+ expected_metric = 0
42
+ self.assertAlmostEqual(score, expected_metric, places=4)
43
+
44
+ score = round(metric.compute(self.y_true1, self.y_pred5),2)
45
+ expected_metric = 12
46
+ self.assertAlmostEqual(score, expected_metric, places=4)
47
+
48
+ def test_temporal_distance_squared(self):
49
+ metric = TemporalDistance(distance=1)
50
+
51
+ td = metric.compute(self.y_true1, self.y_pred1)
52
+ expected = 0
53
+ self.assertEqual(td, expected)
54
+
55
+ td = metric.compute(self.y_true2, self.y_pred2)
56
+ expected = 18
57
+ self.assertEqual(td, expected)
58
+
59
+ td = metric.compute(self.y_true3, self.y_pred3)
60
+ expected = 18
61
+ self.assertEqual(td, expected)
62
+
63
+ score = round(metric.compute(self.y_true1, self.y_pred4),2)
64
+ expected_metric = 0
65
+ self.assertAlmostEqual(score, expected_metric, places=4)
66
+
67
+ score = round(metric.compute(self.y_true1, self.y_pred5),2)
68
+ expected_metric = 144
69
+ self.assertAlmostEqual(score, expected_metric, places=4)
70
+
71
+ def test_consistency(self):
72
+ try:
73
+
74
+ for _ in range(100):
75
+ y_true = np.random.choice([0, 1], size=(100,))
76
+ y_pred = np.zeros(100)
77
+ metric=TemporalDistance(distance=random.choice([0, 1]))
78
+ metric.compute(y_true, y_pred)
79
+ except Exception as e:
80
+ self.fail(f"absolute_detection_distance raised an exception {e}")
81
+
82
+
83
+ class TestAbsoluteDetectionDistance(unittest.TestCase):
84
+
85
+ def setUp(self):
86
+ """
87
+ Configuración inicial para las pruebas.
88
+ """
89
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
90
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
91
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
92
+
93
+ self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
94
+ self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
95
+ self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
96
+
97
+ self.y_pred3 = self.y_true1
98
+ self.y_pred4 = np.zeros(len(self.y_true1))
99
+
100
+ def test(self):
101
+ metric = AbsoluteDetectionDistance()
102
+ score = round(metric.compute(self.y_true1, self.y_pred1),2)
103
+ expected_score = 0.25
104
+ self.assertAlmostEqual(score, expected_score, places=4)
105
+
106
+ score = round(metric.compute(self.y_true1, self.y_pred2),2)
107
+ expected_score = 0.25
108
+ self.assertAlmostEqual(score, expected_score, places=4)
109
+
110
+ score = round(metric.compute(self.y_true2, self.y_pred21),2)
111
+ expected_score = 0.06
112
+ self.assertAlmostEqual(score, expected_score, places=4)
113
+
114
+ score = round(metric.compute(self.y_true2, self.y_pred22),2)
115
+ expected_score = 0.12
116
+ self.assertAlmostEqual(score, expected_score, places=4)
117
+
118
+ score = round(metric.compute(self.y_true1, self.y_pred3),2)
119
+ expected_metric = 0.17 #The mean of the distances is never 0
120
+ self.assertAlmostEqual(score, expected_metric, places=4)
121
+
122
+ score = round(metric.compute(self.y_true1, self.y_pred4),2)
123
+ expected_metric = 0
124
+ self.assertAlmostEqual(score, expected_metric, places=4)
125
+
126
+
127
+ def testconsistency(self):
128
+ try:
129
+ y_true = np.random.choice([0, 1], size=(100,))
130
+ y_pred = np.zeros(100)
131
+ metric = AbsoluteDetectionDistance()
132
+ for _ in range(100):
133
+ y_true = np.random.choice([0, 1], size=(100,))
134
+ y_pred = np.random.choice([0, 1], size=(100,))
135
+
136
+ score = metric.compute(y_true, y_pred)
137
+ except Exception as e:
138
+ self.fail(f"AbsoluteDetectionDistance raised an exception {e}")
139
+
140
+
141
+ class TestEnhancedTimeseriesAwareFScore(unittest.TestCase):
142
+
143
+ def setUp(self):
144
+
145
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
146
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
147
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
148
+
149
+ self.y_true2 = np.array([0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
150
+ self.y_pred21 = np.array([0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
151
+ self.y_pred22 = np.array([0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0])
152
+
153
+ self.y_pred3 = self.y_true1
154
+ self.y_pred4 = np.zeros(len(self.y_true1))
155
+
156
+
157
+
158
+ def test(self):
159
+ metric = EnhancedTimeseriesAwareFScore(theta_p=0.5, theta_r=0.1)
160
+ f_score = round(metric.compute(self.y_true1, self.y_pred1),2)
161
+ expected_f_score = 0.67
162
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
163
+
164
+ f_score = round(metric.compute(self.y_true1, self.y_pred2),2)
165
+ expected_f_score = 0.72
166
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
167
+
168
+ f_score = round(metric.compute(self.y_true2, self.y_pred21),2)
169
+ expected_f_score = 0.77
170
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
171
+
172
+ f_score = round(metric.compute(self.y_true2, self.y_pred22),2)
173
+ expected_f_score = 0.67
174
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
175
+
176
+ score = round(metric.compute(self.y_true1, self.y_pred3),2)
177
+ expected_metric = 1.0
178
+ self.assertAlmostEqual(score, expected_metric, places=4)
179
+
180
+ score = round(metric.compute(self.y_true1, self.y_pred4),2)
181
+ expected_metric = 0
182
+ self.assertAlmostEqual(score, expected_metric, places=4)
183
+
184
+ def test_consistency(self):
185
+ try:
186
+ y_true = np.random.choice([0, 1], size=(100,))
187
+ y_pred = np.zeros(100)
188
+ metric = EnhancedTimeseriesAwareFScore(theta_r=random.random(), theta_p=random.random())
189
+ metric.compute(y_true, y_pred)
190
+ for _ in range(100):
191
+ y_true = np.random.choice([0, 1], size=(100,))
192
+ y_pred = np.random.choice([0, 1], size=(100,))
193
+
194
+ f_score = metric.compute(y_true, y_pred)
195
+ except Exception as e:
196
+ self.fail(f"EnhancedTimeseriesAwareFScore raised an exception {e}")
197
+
198
+