tsadmetrics 0.1.17__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. {docs_manual → docs/api_doc}/conf.py +3 -26
  2. docs/{conf.py → full_doc/conf.py} +1 -1
  3. {docs_api → docs/manual_doc}/conf.py +3 -26
  4. examples/example_direct_data.py +28 -0
  5. examples/example_direct_single_data.py +25 -0
  6. examples/example_file_reference.py +24 -0
  7. examples/example_global_config_file.py +13 -0
  8. examples/example_metric_config_file.py +19 -0
  9. examples/example_simple_metric.py +8 -0
  10. examples/specific_examples/AbsoluteDetectionDistance_example.py +24 -0
  11. examples/specific_examples/AffiliationbasedFScore_example.py +24 -0
  12. examples/specific_examples/AverageDetectionCount_example.py +24 -0
  13. examples/specific_examples/CompositeFScore_example.py +24 -0
  14. examples/specific_examples/DelayThresholdedPointadjustedFScore_example.py +24 -0
  15. examples/specific_examples/DetectionAccuracyInRange_example.py +24 -0
  16. examples/specific_examples/EnhancedTimeseriesAwareFScore_example.py +24 -0
  17. examples/specific_examples/LatencySparsityawareFScore_example.py +24 -0
  18. examples/specific_examples/MeanTimeToDetect_example.py +24 -0
  19. examples/specific_examples/NabScore_example.py +24 -0
  20. examples/specific_examples/PateFScore_example.py +24 -0
  21. examples/specific_examples/Pate_example.py +24 -0
  22. examples/specific_examples/PointadjustedAtKFScore_example.py +24 -0
  23. examples/specific_examples/PointadjustedAucPr_example.py +24 -0
  24. examples/specific_examples/PointadjustedAucRoc_example.py +24 -0
  25. examples/specific_examples/PointadjustedFScore_example.py +24 -0
  26. examples/specific_examples/RangebasedFScore_example.py +24 -0
  27. examples/specific_examples/SegmentwiseFScore_example.py +24 -0
  28. examples/specific_examples/TemporalDistance_example.py +24 -0
  29. examples/specific_examples/TimeTolerantFScore_example.py +24 -0
  30. examples/specific_examples/TimeseriesAwareFScore_example.py +24 -0
  31. examples/specific_examples/TotalDetectedInRange_example.py +24 -0
  32. examples/specific_examples/VusPr_example.py +24 -0
  33. examples/specific_examples/VusRoc_example.py +24 -0
  34. examples/specific_examples/WeightedDetectionDifference_example.py +24 -0
  35. tests/test_dpm.py +212 -0
  36. tests/test_ptdm.py +366 -0
  37. tests/test_registry.py +58 -0
  38. tests/test_runner.py +185 -0
  39. tests/test_spm.py +213 -0
  40. tests/test_tmem.py +198 -0
  41. tests/test_tpdm.py +369 -0
  42. tests/test_tstm.py +338 -0
  43. tsadmetrics/__init__.py +0 -21
  44. tsadmetrics/base/Metric.py +188 -0
  45. tsadmetrics/evaluation/Report.py +25 -0
  46. tsadmetrics/evaluation/Runner.py +253 -0
  47. tsadmetrics/metrics/Registry.py +141 -0
  48. tsadmetrics/metrics/__init__.py +2 -0
  49. tsadmetrics/metrics/spm/PointwiseAucPr.py +62 -0
  50. tsadmetrics/metrics/spm/PointwiseAucRoc.py +63 -0
  51. tsadmetrics/metrics/spm/PointwiseFScore.py +86 -0
  52. tsadmetrics/metrics/spm/PrecisionAtK.py +81 -0
  53. tsadmetrics/metrics/spm/__init__.py +9 -0
  54. tsadmetrics/metrics/tem/dpm/DelayThresholdedPointadjustedFScore.py +83 -0
  55. tsadmetrics/metrics/tem/dpm/LatencySparsityawareFScore.py +76 -0
  56. tsadmetrics/metrics/tem/dpm/MeanTimeToDetect.py +47 -0
  57. tsadmetrics/metrics/tem/dpm/NabScore.py +60 -0
  58. tsadmetrics/metrics/tem/dpm/__init__.py +11 -0
  59. tsadmetrics/metrics/tem/ptdm/AverageDetectionCount.py +53 -0
  60. tsadmetrics/metrics/tem/ptdm/DetectionAccuracyInRange.py +66 -0
  61. tsadmetrics/metrics/tem/ptdm/PointadjustedAtKFScore.py +80 -0
  62. tsadmetrics/metrics/tem/ptdm/TimeseriesAwareFScore.py +248 -0
  63. tsadmetrics/metrics/tem/ptdm/TotalDetectedInRange.py +65 -0
  64. tsadmetrics/metrics/tem/ptdm/WeightedDetectionDifference.py +97 -0
  65. tsadmetrics/metrics/tem/ptdm/__init__.py +12 -0
  66. tsadmetrics/metrics/tem/tmem/AbsoluteDetectionDistance.py +48 -0
  67. tsadmetrics/metrics/tem/tmem/EnhancedTimeseriesAwareFScore.py +252 -0
  68. tsadmetrics/metrics/tem/tmem/TemporalDistance.py +68 -0
  69. tsadmetrics/metrics/tem/tmem/__init__.py +9 -0
  70. tsadmetrics/metrics/tem/tpdm/CompositeFScore.py +104 -0
  71. tsadmetrics/metrics/tem/tpdm/PointadjustedAucPr.py +123 -0
  72. tsadmetrics/metrics/tem/tpdm/PointadjustedAucRoc.py +119 -0
  73. tsadmetrics/metrics/tem/tpdm/PointadjustedFScore.py +96 -0
  74. tsadmetrics/metrics/tem/tpdm/RangebasedFScore.py +236 -0
  75. tsadmetrics/metrics/tem/tpdm/SegmentwiseFScore.py +73 -0
  76. tsadmetrics/metrics/tem/tpdm/__init__.py +12 -0
  77. tsadmetrics/metrics/tem/tstm/AffiliationbasedFScore.py +68 -0
  78. tsadmetrics/metrics/tem/tstm/Pate.py +62 -0
  79. tsadmetrics/metrics/tem/tstm/PateFScore.py +61 -0
  80. tsadmetrics/metrics/tem/tstm/TimeTolerantFScore.py +85 -0
  81. tsadmetrics/metrics/tem/tstm/VusPr.py +51 -0
  82. tsadmetrics/metrics/tem/tstm/VusRoc.py +55 -0
  83. tsadmetrics/metrics/tem/tstm/__init__.py +15 -0
  84. tsadmetrics/{_tsadeval/affiliation/_integral_interval.py → utils/functions_affiliation.py} +377 -9
  85. tsadmetrics/utils/functions_auc.py +393 -0
  86. tsadmetrics/utils/functions_conversion.py +63 -0
  87. tsadmetrics/utils/functions_counting_metrics.py +26 -0
  88. tsadmetrics/{_tsadeval/latency_sparsity_aware.py → utils/functions_latency_sparsity_aware.py} +1 -1
  89. tsadmetrics/{_tsadeval/nabscore.py → utils/functions_nabscore.py} +15 -1
  90. tsadmetrics-1.0.0.dist-info/METADATA +69 -0
  91. tsadmetrics-1.0.0.dist-info/RECORD +99 -0
  92. tsadmetrics-1.0.0.dist-info/top_level.txt +4 -0
  93. entorno/bin/activate_this.py +0 -32
  94. entorno/bin/rst2html.py +0 -23
  95. entorno/bin/rst2html4.py +0 -26
  96. entorno/bin/rst2html5.py +0 -33
  97. entorno/bin/rst2latex.py +0 -26
  98. entorno/bin/rst2man.py +0 -27
  99. entorno/bin/rst2odt.py +0 -28
  100. entorno/bin/rst2odt_prepstyles.py +0 -20
  101. entorno/bin/rst2pseudoxml.py +0 -23
  102. entorno/bin/rst2s5.py +0 -24
  103. entorno/bin/rst2xetex.py +0 -27
  104. entorno/bin/rst2xml.py +0 -23
  105. entorno/bin/rstpep2html.py +0 -25
  106. tests/test_binary.py +0 -946
  107. tests/test_non_binary.py +0 -450
  108. tests/test_utils.py +0 -49
  109. tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +0 -86
  110. tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +0 -68
  111. tsadmetrics/_tsadeval/affiliation/generics.py +0 -135
  112. tsadmetrics/_tsadeval/affiliation/metrics.py +0 -114
  113. tsadmetrics/_tsadeval/auc_roc_pr_plot.py +0 -295
  114. tsadmetrics/_tsadeval/discontinuity_graph.py +0 -109
  115. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +0 -175
  116. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +0 -50
  117. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +0 -184
  118. tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
  119. tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +0 -386
  120. tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +0 -362
  121. tsadmetrics/_tsadeval/metrics.py +0 -698
  122. tsadmetrics/_tsadeval/prts/__init__.py +0 -0
  123. tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
  124. tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +0 -165
  125. tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +0 -121
  126. tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
  127. tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +0 -61
  128. tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +0 -86
  129. tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +0 -21
  130. tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +0 -85
  131. tsadmetrics/_tsadeval/tests.py +0 -376
  132. tsadmetrics/_tsadeval/threshold_plt.py +0 -30
  133. tsadmetrics/_tsadeval/time_tolerant.py +0 -33
  134. tsadmetrics/binary_metrics.py +0 -1652
  135. tsadmetrics/metric_utils.py +0 -98
  136. tsadmetrics/non_binary_metrics.py +0 -372
  137. tsadmetrics/scripts/__init__.py +0 -0
  138. tsadmetrics/scripts/compute_metrics.py +0 -42
  139. tsadmetrics/utils.py +0 -124
  140. tsadmetrics/validation.py +0 -35
  141. tsadmetrics-0.1.17.dist-info/METADATA +0 -54
  142. tsadmetrics-0.1.17.dist-info/RECORD +0 -66
  143. tsadmetrics-0.1.17.dist-info/entry_points.txt +0 -2
  144. tsadmetrics-0.1.17.dist-info/top_level.txt +0 -6
  145. /tsadmetrics/{_tsadeval → base}/__init__.py +0 -0
  146. /tsadmetrics/{_tsadeval/affiliation → evaluation}/__init__.py +0 -0
  147. /tsadmetrics/{_tsadeval/eTaPR_pkg/DataManage → metrics/tem}/__init__.py +0 -0
  148. /tsadmetrics/{_tsadeval/vus_utils.py → utils/functions_vus.py} +0 -0
  149. {tsadmetrics-0.1.17.dist-info → tsadmetrics-1.0.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mdpt.SegmentwiseFScore import SegmentwiseFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = SegmentwiseFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("SegmentwiseFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("swf", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mect.TemporalDistance import TemporalDistance
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = TemporalDistance()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("TemporalDistance:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("td", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mtdt.TimeTolerantFScore import TimeTolerantFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = TimeTolerantFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("TimeTolerantFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("ttf", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mdtp.TimeseriesAwareFScore import TimeseriesAwareFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = TimeseriesAwareFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("TimeseriesAwareFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("taf", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mdtp.TotalDetectedInRange import TotalDetectedInRange
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = TotalDetectedInRange()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("TotalDetectedInRange:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("tdir", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mtdt.VusPr import VusPr
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
7
+
8
+ # Direct usage
9
+ metric = VusPr()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("VusPr:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("vus_pr", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mtdt.VusRoc import VusRoc
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
7
+
8
+ # Direct usage
9
+ metric = VusRoc()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("VusRoc:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("vus_roc", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mdtp.WeightedDetectionDifference import WeightedDetectionDifference
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = WeightedDetectionDifference()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("WeightedDetectionDifference:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("wdd", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
tests/test_dpm.py ADDED
@@ -0,0 +1,212 @@
1
+ import unittest
2
+
3
+ import numpy as np
4
+ import random
5
+
6
+ import unittest
7
+ import numpy as np
8
+ from tsadmetrics.metrics.tem.dpm import *
9
+
10
+ class TestDelayThresholdedPointadjustedFScore(unittest.TestCase):
11
+
12
+ def setUp(self):
13
+
14
+ self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
15
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
16
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
17
+ self.y_pred3 = self.y_true
18
+ self.y_pred4 = np.zeros(len(self.y_true))
19
+
20
+
21
+
22
+ def test(self):
23
+ metric = DelayThresholdedPointadjustedFScore(k=2, beta=1.0)
24
+ f_score = round(metric.compute(self.y_true, self.y_pred1),2)
25
+ expected_f_score = 0.67
26
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
27
+
28
+ f_score = round(metric.compute(self.y_true, self.y_pred2),2)
29
+ expected_f_score = 1
30
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
31
+
32
+ score = round(metric.compute(self.y_true, self.y_pred3),2)
33
+ expected_metric = 1.0
34
+ self.assertAlmostEqual(score, expected_metric, places=4)
35
+
36
+ score = round(metric.compute(self.y_true, self.y_pred4),2)
37
+ expected_metric = 0
38
+ self.assertAlmostEqual(score, expected_metric, places=4)
39
+
40
+ def test_consistency(self):
41
+ try:
42
+ y_true = np.random.choice([0, 1], size=(100,))
43
+ y_pred = np.zeros(100)
44
+ metric = DelayThresholdedPointadjustedFScore(k=2, beta=1.0)
45
+ metric.compute(y_true, y_pred)
46
+ for _ in range(1000):
47
+ y_true = np.random.choice([0, 1], size=(100,))
48
+ y_pred = np.random.choice([0, 1], size=(100,))
49
+ f_score = metric.compute(y_true, y_pred)
50
+ except Exception as e:
51
+ self.fail(f"DelayThresholdedPointadjustedFScore raised an exception {e}")
52
+
53
+
54
+ class TestLatencySparsityawareFScore(unittest.TestCase):
55
+
56
+ def setUp(self):
57
+
58
+ self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
59
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
60
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
61
+ self.y_pred3 = self.y_true
62
+ self.y_pred4 = np.zeros(len(self.y_true))
63
+
64
+ def test(self):
65
+ metric = LatencySparsityawareFScore(ni=2, beta=1.0)
66
+ f_score = round(metric.compute(self.y_true, self.y_pred1),2)
67
+ expected_f_score = 0.71
68
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
69
+
70
+ f_score = round(metric.compute(self.y_true, self.y_pred2),2)
71
+ expected_f_score = 1
72
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
73
+
74
+ score = round(metric.compute(self.y_true, self.y_pred3),2)
75
+ expected_metric = 1.0
76
+ self.assertAlmostEqual(score, expected_metric, places=4)
77
+
78
+ score = round(metric.compute(self.y_true, self.y_pred4),2)
79
+ expected_metric = 0
80
+ self.assertAlmostEqual(score, expected_metric, places=4)
81
+
82
+ def test_consistency(self):
83
+ try:
84
+ y_true = np.random.choice([0, 1], size=(100,))
85
+ y_pred = np.zeros(100)
86
+ metric = LatencySparsityawareFScore(ni=2, beta=1.0)
87
+ metric.compute(y_true, y_pred)
88
+ for _ in range(1000):
89
+ y_true = np.random.choice([0, 1], size=(100,))
90
+ y_pred = np.random.choice([0, 1], size=(100,))
91
+ f_score = metric.compute(y_true, y_pred)
92
+ except Exception as e:
93
+ self.fail(f"LatencySparsityawareFScore raised an exception {e}")
94
+
95
+
96
+ class TestMeanTimeToDetect(unittest.TestCase):
97
+
98
+ def setUp(self):
99
+
100
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
101
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
102
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
103
+
104
+ self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
105
+ self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
106
+ self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
107
+
108
+ self.y_pred3 = self.y_true1
109
+ self.y_pred4 = np.zeros(len(self.y_true1))
110
+
111
+ def test(self):
112
+ metric = MeanTimeToDetect()
113
+ score = round(metric.compute(self.y_true1, self.y_pred1),2)
114
+ expected_score = 0.0
115
+ self.assertAlmostEqual(score, expected_score, places=4)
116
+
117
+ score = round(metric.compute(self.y_true1, self.y_pred2),2)
118
+ expected_score = 0.0
119
+ self.assertAlmostEqual(score, expected_score, places=4)
120
+
121
+ score = round(metric.compute(self.y_true2, self.y_pred21),2)
122
+ expected_score = 8.0
123
+ self.assertAlmostEqual(score, expected_score, places=4)
124
+
125
+ score = round(metric.compute(self.y_true2, self.y_pred22),2)
126
+ expected_score = 0.0
127
+ self.assertAlmostEqual(score, expected_score, places=4)
128
+
129
+ score = round(metric.compute(self.y_true1, self.y_pred3),2)
130
+ expected_metric = 0.0
131
+ self.assertAlmostEqual(score, expected_metric, places=4)
132
+
133
+ score = round(metric.compute(self.y_true1, self.y_pred4),2)
134
+ expected_metric = 0.0
135
+ self.assertAlmostEqual(score, expected_metric, places=4)
136
+
137
+
138
+
139
+
140
+
141
+ def test_consistency(self):
142
+ try:
143
+
144
+ y_true = np.random.choice([0, 1], size=(100,))
145
+ y_pred = np.zeros(100)
146
+ metric = MeanTimeToDetect()
147
+ metric.compute(y_true, y_pred)
148
+ for _ in range(100):
149
+ y_true = np.random.choice([0, 1], size=(100,))
150
+ y_pred = np.random.choice([0, 1], size=(100,))
151
+
152
+ score = metric.compute(y_true, y_pred)
153
+ except Exception as e:
154
+ self.fail(f"MeanTimeToDetect raised an exception {e}")
155
+
156
+
157
+ class TestNabScore(unittest.TestCase):
158
+
159
+ def setUp(self):
160
+
161
+ self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
162
+ self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
163
+ self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
164
+
165
+ self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
166
+ self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
167
+ self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
168
+
169
+ self.y_pred3 = self.y_true1
170
+ self.y_pred4 = np.zeros(len(self.y_true1))
171
+
172
+
173
+
174
+ def test(self):
175
+ metric = NabScore()
176
+ f_score = round(metric.compute(self.y_true1, self.y_pred1),2)
177
+ expected_f_score = 50
178
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
179
+
180
+ f_score = round(metric.compute(self.y_true1, self.y_pred2),2)
181
+ expected_f_score = 100
182
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
183
+
184
+ f_score = round(metric.compute(self.y_true2, self.y_pred21),2)
185
+ expected_f_score = 33.33
186
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
187
+
188
+ f_score = round(metric.compute(self.y_true2, self.y_pred22),2)
189
+ expected_f_score = 66.67
190
+ self.assertAlmostEqual(f_score, expected_f_score, places=4)
191
+
192
+ score = round(metric.compute(self.y_true1, self.y_pred3),2)
193
+ expected_metric = 100
194
+ self.assertAlmostEqual(score, expected_metric, places=4)
195
+
196
+ score = round(metric.compute(self.y_true1, self.y_pred4),2)
197
+ expected_metric = 0
198
+ self.assertAlmostEqual(score, expected_metric, places=4)
199
+
200
+ def test_consistency(self):
201
+ try:
202
+ metric = NabScore()
203
+ y_true = np.random.choice([0, 1], size=(100,))
204
+ y_pred = np.zeros(100)
205
+ metric.compute(y_true, y_pred)
206
+ for _ in range(100):
207
+ y_true = np.random.choice([0, 1], size=(100,))
208
+ y_pred = np.random.choice([0, 1], size=(100,))
209
+
210
+ score = metric.compute(y_true, y_pred)
211
+ except Exception as e:
212
+ self.fail(f"NabScore raised an exception {e}")