tsadmetrics 0.1.17__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (143) hide show
  1. {docs_api → docs/add_docs/api_doc}/conf.py +3 -26
  2. {docs_manual → docs/add_docs/full_doc}/conf.py +2 -25
  3. docs/add_docs/manual_doc/conf.py +67 -0
  4. docs/conf.py +1 -1
  5. examples/example_direct_data.py +28 -0
  6. examples/example_direct_single_data.py +25 -0
  7. examples/example_file_reference.py +24 -0
  8. examples/example_global_config_file.py +13 -0
  9. examples/example_metric_config_file.py +19 -0
  10. examples/example_simple_metric.py +8 -0
  11. examples/specific_examples/AbsoluteDetectionDistance_example.py +24 -0
  12. examples/specific_examples/AffiliationbasedFScore_example.py +24 -0
  13. examples/specific_examples/AverageDetectionCount_example.py +24 -0
  14. examples/specific_examples/CompositeFScore_example.py +24 -0
  15. examples/specific_examples/DelayThresholdedPointadjustedFScore_example.py +24 -0
  16. examples/specific_examples/DetectionAccuracyInRange_example.py +24 -0
  17. examples/specific_examples/EnhancedTimeseriesAwareFScore_example.py +24 -0
  18. examples/specific_examples/LatencySparsityawareFScore_example.py +24 -0
  19. examples/specific_examples/MeanTimeToDetect_example.py +24 -0
  20. examples/specific_examples/NabScore_example.py +24 -0
  21. examples/specific_examples/PateFScore_example.py +24 -0
  22. examples/specific_examples/Pate_example.py +24 -0
  23. examples/specific_examples/PointadjustedAtKFScore_example.py +24 -0
  24. examples/specific_examples/PointadjustedAucPr_example.py +24 -0
  25. examples/specific_examples/PointadjustedAucRoc_example.py +24 -0
  26. examples/specific_examples/PointadjustedFScore_example.py +24 -0
  27. examples/specific_examples/RangebasedFScore_example.py +24 -0
  28. examples/specific_examples/SegmentwiseFScore_example.py +24 -0
  29. examples/specific_examples/TemporalDistance_example.py +24 -0
  30. examples/specific_examples/TimeTolerantFScore_example.py +24 -0
  31. examples/specific_examples/TimeseriesAwareFScore_example.py +24 -0
  32. examples/specific_examples/TotalDetectedInRange_example.py +24 -0
  33. examples/specific_examples/VusPr_example.py +24 -0
  34. examples/specific_examples/VusRoc_example.py +24 -0
  35. examples/specific_examples/WeightedDetectionDifference_example.py +24 -0
  36. tsadmetrics/__init__.py +0 -21
  37. tsadmetrics/base/Metric.py +188 -0
  38. tsadmetrics/evaluation/Report.py +25 -0
  39. tsadmetrics/evaluation/Runner.py +253 -0
  40. tsadmetrics/metrics/Registry.py +141 -0
  41. tsadmetrics/metrics/__init__.py +2 -0
  42. tsadmetrics/metrics/spm/PointwiseAucPr.py +62 -0
  43. tsadmetrics/metrics/spm/PointwiseAucRoc.py +63 -0
  44. tsadmetrics/metrics/spm/PointwiseFScore.py +86 -0
  45. tsadmetrics/metrics/spm/PrecisionAtK.py +81 -0
  46. tsadmetrics/metrics/spm/__init__.py +9 -0
  47. tsadmetrics/metrics/tem/dpm/DelayThresholdedPointadjustedFScore.py +83 -0
  48. tsadmetrics/metrics/tem/dpm/LatencySparsityawareFScore.py +76 -0
  49. tsadmetrics/metrics/tem/dpm/MeanTimeToDetect.py +47 -0
  50. tsadmetrics/metrics/tem/dpm/NabScore.py +60 -0
  51. tsadmetrics/metrics/tem/dpm/__init__.py +11 -0
  52. tsadmetrics/metrics/tem/ptdm/AverageDetectionCount.py +53 -0
  53. tsadmetrics/metrics/tem/ptdm/DetectionAccuracyInRange.py +66 -0
  54. tsadmetrics/metrics/tem/ptdm/PointadjustedAtKFScore.py +80 -0
  55. tsadmetrics/metrics/tem/ptdm/TimeseriesAwareFScore.py +248 -0
  56. tsadmetrics/metrics/tem/ptdm/TotalDetectedInRange.py +65 -0
  57. tsadmetrics/metrics/tem/ptdm/WeightedDetectionDifference.py +97 -0
  58. tsadmetrics/metrics/tem/ptdm/__init__.py +12 -0
  59. tsadmetrics/metrics/tem/tmem/AbsoluteDetectionDistance.py +48 -0
  60. tsadmetrics/metrics/tem/tmem/EnhancedTimeseriesAwareFScore.py +252 -0
  61. tsadmetrics/metrics/tem/tmem/TemporalDistance.py +68 -0
  62. tsadmetrics/metrics/tem/tmem/__init__.py +9 -0
  63. tsadmetrics/metrics/tem/tpdm/CompositeFScore.py +104 -0
  64. tsadmetrics/metrics/tem/tpdm/PointadjustedAucPr.py +123 -0
  65. tsadmetrics/metrics/tem/tpdm/PointadjustedAucRoc.py +119 -0
  66. tsadmetrics/metrics/tem/tpdm/PointadjustedFScore.py +96 -0
  67. tsadmetrics/metrics/tem/tpdm/RangebasedFScore.py +236 -0
  68. tsadmetrics/metrics/tem/tpdm/SegmentwiseFScore.py +73 -0
  69. tsadmetrics/metrics/tem/tpdm/__init__.py +12 -0
  70. tsadmetrics/metrics/tem/tstm/AffiliationbasedFScore.py +68 -0
  71. tsadmetrics/metrics/tem/tstm/Pate.py +62 -0
  72. tsadmetrics/metrics/tem/tstm/PateFScore.py +61 -0
  73. tsadmetrics/metrics/tem/tstm/TimeTolerantFScore.py +85 -0
  74. tsadmetrics/metrics/tem/tstm/VusPr.py +51 -0
  75. tsadmetrics/metrics/tem/tstm/VusRoc.py +55 -0
  76. tsadmetrics/metrics/tem/tstm/__init__.py +15 -0
  77. tsadmetrics/{_tsadeval/affiliation/_integral_interval.py → utils/functions_affiliation.py} +377 -9
  78. tsadmetrics/utils/functions_auc.py +393 -0
  79. tsadmetrics/utils/functions_conversion.py +63 -0
  80. tsadmetrics/utils/functions_counting_metrics.py +26 -0
  81. tsadmetrics/{_tsadeval/latency_sparsity_aware.py → utils/functions_latency_sparsity_aware.py} +1 -1
  82. tsadmetrics/{_tsadeval/nabscore.py → utils/functions_nabscore.py} +15 -1
  83. tsadmetrics-1.0.1.dist-info/METADATA +83 -0
  84. tsadmetrics-1.0.1.dist-info/RECORD +91 -0
  85. tsadmetrics-1.0.1.dist-info/top_level.txt +3 -0
  86. entorno/bin/activate_this.py +0 -32
  87. entorno/bin/rst2html.py +0 -23
  88. entorno/bin/rst2html4.py +0 -26
  89. entorno/bin/rst2html5.py +0 -33
  90. entorno/bin/rst2latex.py +0 -26
  91. entorno/bin/rst2man.py +0 -27
  92. entorno/bin/rst2odt.py +0 -28
  93. entorno/bin/rst2odt_prepstyles.py +0 -20
  94. entorno/bin/rst2pseudoxml.py +0 -23
  95. entorno/bin/rst2s5.py +0 -24
  96. entorno/bin/rst2xetex.py +0 -27
  97. entorno/bin/rst2xml.py +0 -23
  98. entorno/bin/rstpep2html.py +0 -25
  99. tests/test_binary.py +0 -946
  100. tests/test_non_binary.py +0 -450
  101. tests/test_utils.py +0 -49
  102. tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +0 -86
  103. tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +0 -68
  104. tsadmetrics/_tsadeval/affiliation/generics.py +0 -135
  105. tsadmetrics/_tsadeval/affiliation/metrics.py +0 -114
  106. tsadmetrics/_tsadeval/auc_roc_pr_plot.py +0 -295
  107. tsadmetrics/_tsadeval/discontinuity_graph.py +0 -109
  108. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +0 -175
  109. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +0 -50
  110. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +0 -184
  111. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/__init__.py +0 -0
  112. tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
  113. tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +0 -386
  114. tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +0 -362
  115. tsadmetrics/_tsadeval/metrics.py +0 -698
  116. tsadmetrics/_tsadeval/prts/__init__.py +0 -0
  117. tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
  118. tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +0 -165
  119. tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +0 -121
  120. tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
  121. tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +0 -61
  122. tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +0 -86
  123. tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +0 -21
  124. tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +0 -85
  125. tsadmetrics/_tsadeval/tests.py +0 -376
  126. tsadmetrics/_tsadeval/threshold_plt.py +0 -30
  127. tsadmetrics/_tsadeval/time_tolerant.py +0 -33
  128. tsadmetrics/binary_metrics.py +0 -1652
  129. tsadmetrics/metric_utils.py +0 -98
  130. tsadmetrics/non_binary_metrics.py +0 -372
  131. tsadmetrics/scripts/__init__.py +0 -0
  132. tsadmetrics/scripts/compute_metrics.py +0 -42
  133. tsadmetrics/utils.py +0 -124
  134. tsadmetrics/validation.py +0 -35
  135. tsadmetrics-0.1.17.dist-info/METADATA +0 -54
  136. tsadmetrics-0.1.17.dist-info/RECORD +0 -66
  137. tsadmetrics-0.1.17.dist-info/entry_points.txt +0 -2
  138. tsadmetrics-0.1.17.dist-info/top_level.txt +0 -6
  139. {tests → tsadmetrics/base}/__init__.py +0 -0
  140. /tsadmetrics/{_tsadeval → evaluation}/__init__.py +0 -0
  141. /tsadmetrics/{_tsadeval/affiliation → metrics/tem}/__init__.py +0 -0
  142. /tsadmetrics/{_tsadeval/vus_utils.py → utils/functions_vus.py} +0 -0
  143. {tsadmetrics-0.1.17.dist-info → tsadmetrics-1.0.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,9 @@
1
+ from .PointwiseFScore import PointwiseFScore
2
+ from .PrecisionAtK import PrecisionAtK
3
+ from .PointwiseAucRoc import PointwiseAucRoc
4
+ from .PointwiseAucPr import PointwiseAucPr
5
+
6
+ __all__ = ['PointwiseFScore',
7
+ 'PrecisionAtK',
8
+ 'PointwiseAucRoc',
9
+ 'PointwiseAucPr']
@@ -0,0 +1,83 @@
1
+ from ....base.Metric import Metric
2
+ import numpy as np
3
+ from ....utils.functions_conversion import full_series_to_segmentwise
4
+
5
+
6
+ class DelayThresholdedPointadjustedFScore(Metric):
7
+ """
8
+ Calculate delay thresholded point-adjusted F-score for anomaly detection in time series.
9
+
10
+ This metric is based on the standard F-score, but applies a temporal adjustment
11
+ to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
12
+ if at least one point within the first k time steps of the segment is predicted as anomalous,
13
+ all points in the segment are marked as correctly detected. The adjusted predictions are then
14
+ compared to the ground-truth labels using the standard point-wise F-score formulation.
15
+
16
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
17
+
18
+ For more information, see the original paper:
19
+ https://doi.org/10.1145/3292500.3330680
20
+
21
+ Parameters:
22
+ k (int):
23
+ Maximum number of time steps from the start of an anomaly segment within which a prediction must occur
24
+ for the segment to be considered detected.
25
+ beta (float):
26
+ The beta value, which determines the weight of precision in the combined score.
27
+ Default is 1, which gives equal weight to precision and recall.
28
+ """
29
+ name = "dtpaf"
30
+ binary_prediction = True
31
+ param_schema = {
32
+ "k": {
33
+ "default": 1,
34
+ "type": int
35
+ },
36
+ "beta": {
37
+ "default": 1.0,
38
+ "type": float
39
+ }
40
+ }
41
+
42
+ def __init__(self, **kwargs):
43
+ super().__init__(name="dtpaf", **kwargs)
44
+
45
+ def _compute(self, y_true, y_pred):
46
+ """
47
+ Calculate the delay thresholded point-adjusted F-score.
48
+
49
+ Parameters:
50
+ y_true (np.array):
51
+ The ground truth binary labels for the time series data.
52
+ y_pred (np.array):
53
+ The predicted binary labels for the time series data.
54
+
55
+ Returns:
56
+ float: The computed delay thresholded point-adjusted F-score.
57
+ """
58
+
59
+ adjusted_prediction = y_pred.copy()
60
+ k = self.params["k"]
61
+
62
+ for start, end in full_series_to_segmentwise(y_true):
63
+ anomaly_adjusted = False
64
+ for i in range(start, min(start + k, end + 1)):
65
+ if adjusted_prediction[i] == 1:
66
+ adjusted_prediction[start:end + 1] = 1
67
+ anomaly_adjusted = True
68
+ break
69
+ if not anomaly_adjusted:
70
+ adjusted_prediction[start:end + 1] = 0
71
+
72
+ tp = np.sum(adjusted_prediction * y_true)
73
+ fp = np.sum(adjusted_prediction * (1 - y_true))
74
+ fn = np.sum((1 - adjusted_prediction) * y_true)
75
+
76
+ precision = tp / (tp + fp) if (tp + fp) > 0 else 0
77
+ recall = tp / (tp + fn) if (tp + fn) > 0 else 0
78
+
79
+ if precision == 0 or recall == 0:
80
+ return 0
81
+
82
+ beta = self.params["beta"]
83
+ return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
@@ -0,0 +1,76 @@
1
+ from ....base.Metric import Metric
2
+ import numpy as np
3
+ from ....utils.functions_latency_sparsity_aware import calc_twseq
4
+
5
+ class LatencySparsityawareFScore(Metric):
6
+ """
7
+ Calculate latency and sparsity aware F-score for anomaly detection in time series.
8
+
9
+ This metric is based on the standard F-score, but applies a temporal adjustment
10
+ to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
11
+ all points in the segment are marked as correctly detected only after the first true positive
12
+ is predicted within that segment. This encourages early detection by delaying credit for correct
13
+ predictions until the anomaly is initially detected. Additionally, to reduce the impact of
14
+ scattered false positives, predictions are subsampled using a sparsity factor n, so that
15
+ only one prediction is considered every n time steps. The adjusted predictions are then used
16
+ to _compute the standard point-wise F-score.
17
+
18
+ Implementation of https://dl.acm.org/doi/10.1145/3447548.3467174
19
+
20
+ For more information, see the original paper:
21
+ https://doi.org/10.1145/3447548.3467174
22
+
23
+ Parameters:
24
+ ni (int):
25
+ The batch size used in the implementation to handle latency and sparsity.
26
+ beta (float):
27
+ The beta value, which determines the weight of precision in the combined score.
28
+ Default is 1, which gives equal weight to precision and recall.
29
+ """
30
+ name = "lsaf"
31
+ binary_prediction = True
32
+ param_schema = {
33
+ "ni": {
34
+ "default": 1,
35
+ "type": int
36
+ },
37
+ "beta": {
38
+ "default": 1.0,
39
+ "type": float
40
+ }
41
+ }
42
+
43
+ def __init__(self, **kwargs):
44
+ super().__init__(name="lsaf", **kwargs)
45
+
46
+ def _compute(self, y_true, y_pred):
47
+ """
48
+ Calculate the latency and sparsity aware F-score.
49
+
50
+ Parameters:
51
+ y_true (np.array):
52
+ The ground truth binary labels for the time series data.
53
+ y_pred (np.array):
54
+ The predicted binary labels for the time series data.
55
+
56
+ Returns:
57
+ float: The latency and sparsity aware F-score, which is the harmonic mean
58
+ of precision and recall, adjusted by the beta value.
59
+ """
60
+
61
+ if np.sum(y_pred) == 0:
62
+ return 0
63
+
64
+ _, precision, recall, _, _, _, _, _ = calc_twseq(
65
+ y_pred,
66
+ y_true,
67
+ normal=0,
68
+ threshold=0.5,
69
+ tw=self.params["ni"],
70
+ )
71
+
72
+ if precision == 0 or recall == 0:
73
+ return 0
74
+
75
+ beta = self.params["beta"]
76
+ return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
@@ -0,0 +1,47 @@
1
+ from ....base.Metric import Metric
2
+ import numpy as np
3
+ from ....utils.functions_conversion import full_series_to_segmentwise
4
+
5
+ class MeanTimeToDetect(Metric):
6
+ """
7
+ Calculate mean time to detect for anomaly detection in time series.
8
+
9
+ This metric quantifies the average detection delay across all true anomaly events.
10
+ For each ground-truth anomaly segment, let i be the index where the segment starts,
11
+ and let :math:`{j \geq i}` be the first index within that segment where the model predicts an anomaly.
12
+ The detection delay for that event is defined as:
13
+
14
+ .. math::
15
+ \Delta t = j - i
16
+
17
+ The MTTD is the mean of all such :math:`{\Delta t}` values, one per true anomaly segment, and expresses
18
+ the average number of time steps between the true onset of an anomaly and its first detection.
19
+ """
20
+ name = "mttd"
21
+ binary_prediction = True
22
+ def __init__(self, **kwargs):
23
+ super().__init__(name="mttd", **kwargs)
24
+
25
+ def _compute(self, y_true, y_pred):
26
+ """
27
+ Calculate the mean time to detect.
28
+
29
+ Parameters:
30
+ y_true (np.array):
31
+ The ground truth binary labels for the time series data.
32
+ y_pred (np.array):
33
+ The predicted binary labels for the time series data.
34
+
35
+ Returns:
36
+ float: The mean time to detect.
37
+ """
38
+
39
+ a_events = full_series_to_segmentwise(y_true)
40
+ t_sum = 0
41
+ for a, _ in a_events:
42
+ for i in range(a, len(y_pred)):
43
+ if y_pred[i] == 1:
44
+ t_sum += i - a
45
+ break
46
+
47
+ return t_sum / len(a_events)
@@ -0,0 +1,60 @@
1
+ from ....base.Metric import Metric
2
+ import numpy as np
3
+ from ....utils.functions_conversion import full_series_to_pointwise
4
+ from ....utils.functions_nabscore import Sweeper, calculate_scores
5
+
6
+ class NabScore(Metric):
7
+ """
8
+ Calculate NAB score for anomaly detection in time series.
9
+
10
+ This metric rewards early and accurate detections of anomalies while penalizing false positives.
11
+ For each ground truth anomaly segment, only the first correctly predicted anomaly point contributes
12
+ positively to the score, with earlier detections receiving higher rewards. In contrast, every false
13
+ positive prediction contributes negatively.
14
+
15
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
16
+
17
+ For more information, see the original paper:
18
+ https://doi.org/10.1109/ICMLA.2015.141
19
+ """
20
+ name = "nab_score"
21
+ binary_prediction = True
22
+ def __init__(self, **kwargs):
23
+ super().__init__(name="nab_score", **kwargs)
24
+
25
+ def _compute(self, y_true, y_pred):
26
+ """
27
+ Calculate the NAB score.
28
+
29
+ Parameters:
30
+ y_true (np.array):
31
+ The ground truth binary labels for the time series data.
32
+ y_pred (np.array):
33
+ The predicted binary labels for the time series data.
34
+
35
+ Returns:
36
+ float: The computed NAB score.
37
+ """
38
+ sweeper = Sweeper(probationPercent=0, costMatrix={"tpWeight": 1, "fpWeight": 0.11, "fnWeight": 1})
39
+
40
+ if len(full_series_to_pointwise(y_pred)) == 0:
41
+ return 0
42
+ if len(full_series_to_pointwise(y_true)) == 0:
43
+ return np.nan
44
+
45
+ try:
46
+ sweeper, null_score, raw_score = calculate_scores(
47
+ sweeper,
48
+ full_series_to_pointwise(y_true),
49
+ full_series_to_pointwise(y_pred),
50
+ len(y_true)
51
+ )
52
+ sweeper, null_score, perfect_score = calculate_scores(
53
+ sweeper,
54
+ full_series_to_pointwise(y_true),
55
+ full_series_to_pointwise(y_true),
56
+ len(y_true)
57
+ )
58
+ return (raw_score - null_score) / (perfect_score - null_score) * 100
59
+ except Exception:
60
+ return 0
@@ -0,0 +1,11 @@
1
+ from .DelayThresholdedPointadjustedFScore import DelayThresholdedPointadjustedFScore
2
+ from .LatencySparsityawareFScore import LatencySparsityawareFScore
3
+ from .MeanTimeToDetect import MeanTimeToDetect
4
+ from .NabScore import NabScore
5
+
6
+ __all__ = [
7
+ "DelayThresholdedPointadjustedFScore",
8
+ "LatencySparsityawareFScore",
9
+ "MeanTimeToDetect",
10
+ "NabScore"
11
+ ]
@@ -0,0 +1,53 @@
1
+ from ....base.Metric import Metric
2
+ import numpy as np
3
+ from ....utils.functions_conversion import full_series_to_segmentwise, full_series_to_pointwise
4
+
5
+ class AverageDetectionCount(Metric):
6
+ """
7
+ Calculate average detection count for anomaly detection in time series.
8
+
9
+ This metric computes, for each ground-truth anomalous segment, the percentage of points within that segment
10
+ that are predicted as anomalous. It then averages these percentages across all true anomaly events,
11
+ providing an estimate of detection coverage per event.
12
+
13
+ For more information, see the original paper:
14
+ https://ceur-ws.org/Vol-1226/paper31.pdf
15
+
16
+ Parameters:
17
+ None
18
+ """
19
+
20
+ name = "adc"
21
+ binary_prediction = True
22
+ param_schema = {}
23
+
24
+ def __init__(self, **kwargs):
25
+ super().__init__(name="adc", **kwargs)
26
+
27
+ def _compute(self, y_true, y_pred):
28
+ """
29
+ Calculate the average detection count.
30
+
31
+ Parameters:
32
+ y_true (np.array):
33
+ The ground truth binary labels for the time series data.
34
+ y_pred (np.array):
35
+ The predicted binary labels for the time series data.
36
+
37
+ Returns:
38
+ float: The average detection count score.
39
+ """
40
+
41
+
42
+ azs = full_series_to_segmentwise(y_true)
43
+ a_points = full_series_to_pointwise(y_pred)
44
+
45
+ counts = []
46
+ for az in azs:
47
+ count = 0
48
+ for ap in a_points:
49
+ if ap >= az[0] and ap <= az[1]:
50
+ count+=1
51
+ counts.append(count/(az[1] - az[0] + 1)) # Normalize by segment length
52
+
53
+ return np.mean(counts)
@@ -0,0 +1,66 @@
1
+ from ....base.Metric import Metric
2
+ from ....utils.functions_counting_metrics import counting_method
3
+ import numpy as np
4
+
5
+ class DetectionAccuracyInRange(Metric):
6
+ """
7
+ Calculate detection accuracy in range for anomaly detection in time series.
8
+
9
+ This metric measures the proportion of predicted anomaly events that correspond to true anomalies.
10
+ It is defined as:
11
+
12
+ .. math::
13
+ \\text{DAIR} = \\frac{EM + DA}{EM + DA + FA}
14
+
15
+ Where:
16
+
17
+ - EM (Exact Match):
18
+ Number of predicted anomaly segments that exactly match a true anomaly segment.
19
+ - DA (Detected Anomaly):
20
+ Number of true anomaly points not exactly matched where at least one prediction falls
21
+ within a window [i-k, i+k] around the true point index i or within the true segment range.
22
+ - FA (False Anomaly):
23
+ Number of predicted anomaly segments that do not overlap any true anomaly segment
24
+ even within a k-step tolerance window around true points.
25
+
26
+ For more information, see the original paper:
27
+ https://acta.sapientia.ro/content/docs/evaluation-metrics-for-anomaly-detection.pdf
28
+
29
+ Parameters:
30
+ k (int):
31
+ Half-window size for tolerance around each true anomaly point. A prediction within k
32
+ time steps of a true point counts toward detection.
33
+ """
34
+ name = "dair"
35
+ binary_prediction = True
36
+ param_schema = {
37
+ "k": {
38
+ "default": 5,
39
+ "type": int
40
+ }
41
+ }
42
+
43
+ def __init__(self, **kwargs):
44
+ super().__init__(name="dair", **kwargs)
45
+
46
+ def _compute(self, y_true, y_pred):
47
+ """
48
+ Calculate detection accuracy in range for anomaly detection in time series.
49
+
50
+ Parameters:
51
+ y_true (np.array):
52
+ The ground truth binary labels for the time series data.
53
+ y_pred (np.array):
54
+ The predicted binary labels for the time series data.
55
+
56
+ Returns:
57
+ float: The detection accuracy in range score.
58
+ """
59
+
60
+ if np.sum(y_pred) == 0:
61
+ return 0
62
+
63
+ k = self.params["k"]
64
+ em, da, _, fa = counting_method(y_true, y_pred, k)
65
+
66
+ return (em + da) / (em + da + fa)
@@ -0,0 +1,80 @@
1
+ from ....base.Metric import Metric
2
+ import numpy as np
3
+ from ....utils.functions_conversion import full_series_to_segmentwise, full_series_to_pointwise, pointwise_to_full_series
4
+
5
+ class PointadjustedAtKFScore(Metric):
6
+ """
7
+ Calculate point-adjusted at K% F-score for anomaly detection in time series.
8
+ This metric is based on the standard F-Score, but applies a temporal adjustment
9
+ to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
10
+ if at least K% of the points within that segment are predicted as anomalous, all points in
11
+ the segment are marked as correctly detected. The adjusted predictions are then used
12
+ to _compute the standard F-Score precision.
13
+
14
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
15
+
16
+ For more information, see the original paper:
17
+ https://ojs.aaai.org/index.php/AAAI/article/view/20680
18
+
19
+ Parameters:
20
+ k (float):
21
+ The minimum percentage of the anomaly that must be detected to consider the anomaly as detected.
22
+ beta (float):
23
+ The beta value, which determines the weight of precision in the combined score.
24
+ Default is 1, which gives equal weight to precision and recall.
25
+ """
26
+
27
+ name = "pakf"
28
+ binary_prediction = True
29
+ param_schema = {
30
+ "k": {
31
+ "default": 0.5,
32
+ "type": float
33
+ },
34
+ "beta": {
35
+ "default": 1.0,
36
+ "type": float
37
+ }
38
+ }
39
+
40
+ def __init__(self, **kwargs):
41
+ super().__init__(name="pakf", **kwargs)
42
+
43
+ def _compute(self, y_true, y_pred):
44
+ """
45
+ Calculate the point-adjusted at K% F-score.
46
+
47
+ Parameters:
48
+ y_true (np.array):
49
+ The ground truth binary labels for the time series data.
50
+ y_pred (np.array):
51
+ The predicted binary labels for the time series data.
52
+
53
+ Returns:
54
+ float: The point-adjusted at k F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
55
+ """
56
+
57
+ adjusted_prediction = full_series_to_pointwise(y_pred).tolist()
58
+ for start, end in full_series_to_segmentwise(y_true):
59
+ correct_points = 0
60
+ for i in range(start, end + 1):
61
+ if i in adjusted_prediction:
62
+ correct_points += 1
63
+ if correct_points / (end + 1 - start) >= self.params['k']:
64
+ for j in range(start, end + 1):
65
+ adjusted_prediction.append(j)
66
+ break
67
+
68
+ adjusted_prediction = pointwise_to_full_series(np.sort(np.unique(adjusted_prediction)), len(y_true))
69
+ tp = np.sum(adjusted_prediction * y_true)
70
+ fp = np.sum(adjusted_prediction * (1 - y_true))
71
+ fn = np.sum((1 - adjusted_prediction) * y_true)
72
+
73
+ precision = tp / (tp + fp) if (tp + fp) > 0 else 0
74
+ recall = tp / (tp + fn) if (tp + fn) > 0 else 0
75
+
76
+ if precision == 0 or recall == 0:
77
+ return 0
78
+
79
+ beta = self.params['beta']
80
+ return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)