tsadmetrics 0.1.17__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {docs_api → docs/add_docs/api_doc}/conf.py +3 -26
- {docs_manual → docs/add_docs/full_doc}/conf.py +2 -25
- docs/add_docs/manual_doc/conf.py +67 -0
- docs/conf.py +1 -1
- examples/example_direct_data.py +28 -0
- examples/example_direct_single_data.py +25 -0
- examples/example_file_reference.py +24 -0
- examples/example_global_config_file.py +13 -0
- examples/example_metric_config_file.py +19 -0
- examples/example_simple_metric.py +8 -0
- examples/specific_examples/AbsoluteDetectionDistance_example.py +24 -0
- examples/specific_examples/AffiliationbasedFScore_example.py +24 -0
- examples/specific_examples/AverageDetectionCount_example.py +24 -0
- examples/specific_examples/CompositeFScore_example.py +24 -0
- examples/specific_examples/DelayThresholdedPointadjustedFScore_example.py +24 -0
- examples/specific_examples/DetectionAccuracyInRange_example.py +24 -0
- examples/specific_examples/EnhancedTimeseriesAwareFScore_example.py +24 -0
- examples/specific_examples/LatencySparsityawareFScore_example.py +24 -0
- examples/specific_examples/MeanTimeToDetect_example.py +24 -0
- examples/specific_examples/NabScore_example.py +24 -0
- examples/specific_examples/PateFScore_example.py +24 -0
- examples/specific_examples/Pate_example.py +24 -0
- examples/specific_examples/PointadjustedAtKFScore_example.py +24 -0
- examples/specific_examples/PointadjustedAucPr_example.py +24 -0
- examples/specific_examples/PointadjustedAucRoc_example.py +24 -0
- examples/specific_examples/PointadjustedFScore_example.py +24 -0
- examples/specific_examples/RangebasedFScore_example.py +24 -0
- examples/specific_examples/SegmentwiseFScore_example.py +24 -0
- examples/specific_examples/TemporalDistance_example.py +24 -0
- examples/specific_examples/TimeTolerantFScore_example.py +24 -0
- examples/specific_examples/TimeseriesAwareFScore_example.py +24 -0
- examples/specific_examples/TotalDetectedInRange_example.py +24 -0
- examples/specific_examples/VusPr_example.py +24 -0
- examples/specific_examples/VusRoc_example.py +24 -0
- examples/specific_examples/WeightedDetectionDifference_example.py +24 -0
- tsadmetrics/__init__.py +0 -21
- tsadmetrics/base/Metric.py +188 -0
- tsadmetrics/evaluation/Report.py +25 -0
- tsadmetrics/evaluation/Runner.py +253 -0
- tsadmetrics/metrics/Registry.py +141 -0
- tsadmetrics/metrics/__init__.py +2 -0
- tsadmetrics/metrics/spm/PointwiseAucPr.py +62 -0
- tsadmetrics/metrics/spm/PointwiseAucRoc.py +63 -0
- tsadmetrics/metrics/spm/PointwiseFScore.py +86 -0
- tsadmetrics/metrics/spm/PrecisionAtK.py +81 -0
- tsadmetrics/metrics/spm/__init__.py +9 -0
- tsadmetrics/metrics/tem/dpm/DelayThresholdedPointadjustedFScore.py +83 -0
- tsadmetrics/metrics/tem/dpm/LatencySparsityawareFScore.py +76 -0
- tsadmetrics/metrics/tem/dpm/MeanTimeToDetect.py +47 -0
- tsadmetrics/metrics/tem/dpm/NabScore.py +60 -0
- tsadmetrics/metrics/tem/dpm/__init__.py +11 -0
- tsadmetrics/metrics/tem/ptdm/AverageDetectionCount.py +53 -0
- tsadmetrics/metrics/tem/ptdm/DetectionAccuracyInRange.py +66 -0
- tsadmetrics/metrics/tem/ptdm/PointadjustedAtKFScore.py +80 -0
- tsadmetrics/metrics/tem/ptdm/TimeseriesAwareFScore.py +248 -0
- tsadmetrics/metrics/tem/ptdm/TotalDetectedInRange.py +65 -0
- tsadmetrics/metrics/tem/ptdm/WeightedDetectionDifference.py +97 -0
- tsadmetrics/metrics/tem/ptdm/__init__.py +12 -0
- tsadmetrics/metrics/tem/tmem/AbsoluteDetectionDistance.py +48 -0
- tsadmetrics/metrics/tem/tmem/EnhancedTimeseriesAwareFScore.py +252 -0
- tsadmetrics/metrics/tem/tmem/TemporalDistance.py +68 -0
- tsadmetrics/metrics/tem/tmem/__init__.py +9 -0
- tsadmetrics/metrics/tem/tpdm/CompositeFScore.py +104 -0
- tsadmetrics/metrics/tem/tpdm/PointadjustedAucPr.py +123 -0
- tsadmetrics/metrics/tem/tpdm/PointadjustedAucRoc.py +119 -0
- tsadmetrics/metrics/tem/tpdm/PointadjustedFScore.py +96 -0
- tsadmetrics/metrics/tem/tpdm/RangebasedFScore.py +236 -0
- tsadmetrics/metrics/tem/tpdm/SegmentwiseFScore.py +73 -0
- tsadmetrics/metrics/tem/tpdm/__init__.py +12 -0
- tsadmetrics/metrics/tem/tstm/AffiliationbasedFScore.py +68 -0
- tsadmetrics/metrics/tem/tstm/Pate.py +62 -0
- tsadmetrics/metrics/tem/tstm/PateFScore.py +61 -0
- tsadmetrics/metrics/tem/tstm/TimeTolerantFScore.py +85 -0
- tsadmetrics/metrics/tem/tstm/VusPr.py +51 -0
- tsadmetrics/metrics/tem/tstm/VusRoc.py +55 -0
- tsadmetrics/metrics/tem/tstm/__init__.py +15 -0
- tsadmetrics/{_tsadeval/affiliation/_integral_interval.py → utils/functions_affiliation.py} +377 -9
- tsadmetrics/utils/functions_auc.py +393 -0
- tsadmetrics/utils/functions_conversion.py +63 -0
- tsadmetrics/utils/functions_counting_metrics.py +26 -0
- tsadmetrics/{_tsadeval/latency_sparsity_aware.py → utils/functions_latency_sparsity_aware.py} +1 -1
- tsadmetrics/{_tsadeval/nabscore.py → utils/functions_nabscore.py} +15 -1
- tsadmetrics-1.0.1.dist-info/METADATA +83 -0
- tsadmetrics-1.0.1.dist-info/RECORD +91 -0
- tsadmetrics-1.0.1.dist-info/top_level.txt +3 -0
- entorno/bin/activate_this.py +0 -32
- entorno/bin/rst2html.py +0 -23
- entorno/bin/rst2html4.py +0 -26
- entorno/bin/rst2html5.py +0 -33
- entorno/bin/rst2latex.py +0 -26
- entorno/bin/rst2man.py +0 -27
- entorno/bin/rst2odt.py +0 -28
- entorno/bin/rst2odt_prepstyles.py +0 -20
- entorno/bin/rst2pseudoxml.py +0 -23
- entorno/bin/rst2s5.py +0 -24
- entorno/bin/rst2xetex.py +0 -27
- entorno/bin/rst2xml.py +0 -23
- entorno/bin/rstpep2html.py +0 -25
- tests/test_binary.py +0 -946
- tests/test_non_binary.py +0 -450
- tests/test_utils.py +0 -49
- tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +0 -86
- tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +0 -68
- tsadmetrics/_tsadeval/affiliation/generics.py +0 -135
- tsadmetrics/_tsadeval/affiliation/metrics.py +0 -114
- tsadmetrics/_tsadeval/auc_roc_pr_plot.py +0 -295
- tsadmetrics/_tsadeval/discontinuity_graph.py +0 -109
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +0 -175
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +0 -50
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +0 -184
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/__init__.py +0 -0
- tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
- tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +0 -386
- tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +0 -362
- tsadmetrics/_tsadeval/metrics.py +0 -698
- tsadmetrics/_tsadeval/prts/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +0 -165
- tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +0 -121
- tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +0 -61
- tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +0 -86
- tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +0 -21
- tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +0 -85
- tsadmetrics/_tsadeval/tests.py +0 -376
- tsadmetrics/_tsadeval/threshold_plt.py +0 -30
- tsadmetrics/_tsadeval/time_tolerant.py +0 -33
- tsadmetrics/binary_metrics.py +0 -1652
- tsadmetrics/metric_utils.py +0 -98
- tsadmetrics/non_binary_metrics.py +0 -372
- tsadmetrics/scripts/__init__.py +0 -0
- tsadmetrics/scripts/compute_metrics.py +0 -42
- tsadmetrics/utils.py +0 -124
- tsadmetrics/validation.py +0 -35
- tsadmetrics-0.1.17.dist-info/METADATA +0 -54
- tsadmetrics-0.1.17.dist-info/RECORD +0 -66
- tsadmetrics-0.1.17.dist-info/entry_points.txt +0 -2
- tsadmetrics-0.1.17.dist-info/top_level.txt +0 -6
- {tests → tsadmetrics/base}/__init__.py +0 -0
- /tsadmetrics/{_tsadeval → evaluation}/__init__.py +0 -0
- /tsadmetrics/{_tsadeval/affiliation → metrics/tem}/__init__.py +0 -0
- /tsadmetrics/{_tsadeval/vus_utils.py → utils/functions_vus.py} +0 -0
- {tsadmetrics-0.1.17.dist-info → tsadmetrics-1.0.1.dist-info}/WHEEL +0 -0
tsadmetrics/binary_metrics.py
DELETED
@@ -1,1652 +0,0 @@
|
|
1
|
-
import numpy as np
|
2
|
-
from .metric_utils import *
|
3
|
-
from .validation import *
|
4
|
-
from ._tsadeval.metrics import *
|
5
|
-
from ._tsadeval.prts.basic_metrics_ts import ts_fscore
|
6
|
-
from pate.PATE_metric import PATE
|
7
|
-
def point_wise_recall(y_true: np.array, y_pred: np.array):
|
8
|
-
"""
|
9
|
-
Calculate point-wise recall for anomaly detection in time series.
|
10
|
-
Esta métrica consiste en el recall clásico, sin tener en cuenta el contexto
|
11
|
-
temporal.
|
12
|
-
|
13
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
14
|
-
|
15
|
-
Parameters:
|
16
|
-
y_true (np.array):
|
17
|
-
The ground truth binary labels for the time series data.
|
18
|
-
y_pred (np.array):
|
19
|
-
The predicted binary labels for the time series data.
|
20
|
-
|
21
|
-
Returns:
|
22
|
-
float: The point-wise recall score, which is the ratio of true positives to the sum of true positives and false negatives.
|
23
|
-
"""
|
24
|
-
validate_binary_inputs(y_true, y_pred)
|
25
|
-
|
26
|
-
m = Pointwise_metrics(len(y_true),y_true,y_pred)
|
27
|
-
m.set_confusion()
|
28
|
-
TP,FN = m.tp,m.fn
|
29
|
-
if TP == 0:
|
30
|
-
return 0
|
31
|
-
return TP / (TP + FN)
|
32
|
-
|
33
|
-
def point_wise_precision(y_true: np.array, y_pred: np.array):
|
34
|
-
"""
|
35
|
-
Calculate point-wise precision for anomaly detection in time series.
|
36
|
-
Esta métrica consiste en la precisión clásica, sin tener en cuenta el contexto
|
37
|
-
temporal.
|
38
|
-
|
39
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
40
|
-
|
41
|
-
Parameters:
|
42
|
-
y_true (np.array):
|
43
|
-
The ground truth binary labels for the time series data.
|
44
|
-
y_pred (np.array):
|
45
|
-
The predicted binary labels for the time series data.
|
46
|
-
|
47
|
-
Returns:
|
48
|
-
float: The point-wise precision score, which is the ratio of true positives to the sum of true positives and false positives.
|
49
|
-
"""
|
50
|
-
validate_binary_inputs(y_true, y_pred)
|
51
|
-
|
52
|
-
m = Pointwise_metrics(len(y_true),y_true,y_pred)
|
53
|
-
m.set_confusion()
|
54
|
-
TP,FP = m.tp,m.fp
|
55
|
-
if TP == 0:
|
56
|
-
return 0
|
57
|
-
return TP / (TP + FP)
|
58
|
-
|
59
|
-
def point_wise_f_score(y_true: np.array, y_pred: np.array, beta=1):
|
60
|
-
"""
|
61
|
-
Calculate point-wise F-score for anomaly detection in time series.
|
62
|
-
Esta métrica consiste en la F-score clásica, sin tener en cuenta el contexto
|
63
|
-
temporal.
|
64
|
-
|
65
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
66
|
-
|
67
|
-
Parameters:
|
68
|
-
y_true (np.array):
|
69
|
-
The ground truth binary labels for the time series data.
|
70
|
-
y_pred (np.array):
|
71
|
-
The predicted binary labels for the time series data.
|
72
|
-
beta (float):
|
73
|
-
The beta value, which determines the weight of precision in the combined score.
|
74
|
-
Default is 1, which gives equal weight to precision and recall.
|
75
|
-
|
76
|
-
Returns:
|
77
|
-
float: The point-wise F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
78
|
-
"""
|
79
|
-
validate_binary_inputs(y_true, y_pred)
|
80
|
-
|
81
|
-
precision = point_wise_precision(y_true, y_pred)
|
82
|
-
recall = point_wise_recall(y_true, y_pred)
|
83
|
-
|
84
|
-
if precision == 0 or recall == 0:
|
85
|
-
return 0
|
86
|
-
|
87
|
-
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
|
88
|
-
|
89
|
-
|
90
|
-
def point_adjusted_recall(y_true: np.array, y_pred: np.array):
|
91
|
-
"""
|
92
|
-
This metric is based on the standard recall score, but applies a temporal adjustment
|
93
|
-
to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
|
94
|
-
if at least one point within that segment is predicted as anomalous, all points in the segment
|
95
|
-
are marked as correctly detected. The adjusted predictions are then compared to the ground-truth
|
96
|
-
labels using the standard point-wise recall formulation.
|
97
|
-
|
98
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
99
|
-
|
100
|
-
For more information, see the original paper:
|
101
|
-
https://doi.org/10.1145/3178876.3185996
|
102
|
-
|
103
|
-
Parameters:
|
104
|
-
y_true (np.array):
|
105
|
-
The ground truth binary labels for the time series data.
|
106
|
-
y_pred (np.array):
|
107
|
-
The predicted binary labels for the time series data.
|
108
|
-
|
109
|
-
Returns:
|
110
|
-
float: The point-adjusted recall score, which is the ratio of true positives to the sum of true positives and false negatives.
|
111
|
-
"""
|
112
|
-
validate_binary_inputs(y_true, y_pred)
|
113
|
-
|
114
|
-
if np.sum(y_pred) == 0:
|
115
|
-
return 0
|
116
|
-
m = PointAdjust(len(y_true),y_true,y_pred)
|
117
|
-
TP,FN = m.tp,m.fn
|
118
|
-
if TP == 0:
|
119
|
-
return 0
|
120
|
-
return TP / (TP + FN)
|
121
|
-
|
122
|
-
def point_adjusted_precision(y_true: np.array, y_pred: np.array):
|
123
|
-
"""
|
124
|
-
Calculate point-adjusted precision for anomaly detection in time series.
|
125
|
-
This metric is based on the standard precision score, but applies a temporal adjustment
|
126
|
-
to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
|
127
|
-
if at least one point within that segment is predicted as anomalous, all points in the segment
|
128
|
-
are marked as correctly detected. The adjusted predictions are then compared to the ground-truth
|
129
|
-
labels using the standard point-wise precision formulation.
|
130
|
-
|
131
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
132
|
-
|
133
|
-
For more information, see the original paper:
|
134
|
-
https://doi.org/10.1145/3178876.3185996
|
135
|
-
|
136
|
-
Parameters:
|
137
|
-
y_true (np.array):
|
138
|
-
The ground truth binary labels for the time series data.
|
139
|
-
y_pred (np.array):
|
140
|
-
The predicted binary labels for the time series data.
|
141
|
-
|
142
|
-
Returns:
|
143
|
-
float: The point-adjusted precision score, which is the ratio of true positives to the sum of true positives and false positives.
|
144
|
-
"""
|
145
|
-
validate_binary_inputs(y_true, y_pred)
|
146
|
-
|
147
|
-
if np.sum(y_pred) == 0:
|
148
|
-
return 0
|
149
|
-
m = PointAdjust(len(y_true),y_true,y_pred)
|
150
|
-
TP,FP = m.tp,m.fp
|
151
|
-
if TP == 0:
|
152
|
-
return 0
|
153
|
-
return TP / (TP + FP)
|
154
|
-
|
155
|
-
def point_adjusted_f_score(y_true: np.array, y_pred: np.array, beta=1):
|
156
|
-
"""
|
157
|
-
Calculate point-adjusted F-score for anomaly detection in time series.
|
158
|
-
This metric is based on the standard F-score, but applies a temporal adjustment
|
159
|
-
to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
|
160
|
-
if at least one point within that segment is predicted as anomalous, all points in the segment
|
161
|
-
are marked as correctly detected. The adjusted predictions are then compared to the ground-truth
|
162
|
-
labels using the standard point-wise F-Score formulation.
|
163
|
-
|
164
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
165
|
-
|
166
|
-
For more information, see the original paper:
|
167
|
-
https://doi.org/10.1145/3178876.3185996
|
168
|
-
|
169
|
-
Parameters:
|
170
|
-
y_true (np.array):
|
171
|
-
The ground truth binary labels for the time series data.
|
172
|
-
y_pred (np.array):
|
173
|
-
The predicted binary labels for the time series data.
|
174
|
-
beta (float):
|
175
|
-
The beta value, which determines the weight of precision in the combined score.
|
176
|
-
Default is 1, which gives equal weight to precision and recall.
|
177
|
-
|
178
|
-
Returns:
|
179
|
-
float: The point-adjusted F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
180
|
-
"""
|
181
|
-
validate_binary_inputs(y_true, y_pred)
|
182
|
-
|
183
|
-
precision = point_adjusted_precision(y_true, y_pred)
|
184
|
-
recall = point_adjusted_recall(y_true, y_pred)
|
185
|
-
|
186
|
-
if precision == 0 or recall == 0:
|
187
|
-
return 0
|
188
|
-
|
189
|
-
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
def delay_th_point_adjusted_recall(y_true: np.array, y_pred: np.array, k: int):
|
194
|
-
"""
|
195
|
-
Calculate delay thresholded point-adjusted recall for anomaly detection in time series.
|
196
|
-
This metric is based on the standard recall score, but applies a temporal adjustment
|
197
|
-
to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
|
198
|
-
if at least one point within the first k time steps of the segment is predicted as anomalous,
|
199
|
-
all points in the segment are marked as correctly detected. The adjusted predictions are then
|
200
|
-
used to compute the standard point-wise recall formulation.
|
201
|
-
|
202
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
203
|
-
|
204
|
-
For more information, see the original paper:
|
205
|
-
https://doi.org/10.1145/3292500.3330680
|
206
|
-
|
207
|
-
Parameters:
|
208
|
-
y_true (np.array):
|
209
|
-
The ground truth binary labels for the time series data.
|
210
|
-
y_pred (np.array):
|
211
|
-
The predicted binary labels for the time series data.
|
212
|
-
k (int):
|
213
|
-
Maximum number of time steps from the start of an anomaly segment within which a
|
214
|
-
prediction must occur for the segment to be considered detected.
|
215
|
-
|
216
|
-
Returns:
|
217
|
-
float: The delay thresholded point-adjusted recall score, which is the ratio of true positives to the sum of true positives and false negatives.
|
218
|
-
"""
|
219
|
-
validate_binary_inputs(y_true, y_pred)
|
220
|
-
|
221
|
-
if np.sum(y_pred) == 0:
|
222
|
-
return 0
|
223
|
-
m = DelayThresholdedPointAdjust(len(y_true),y_true,y_pred,k=k)
|
224
|
-
TP,FN = m.tp,m.fn
|
225
|
-
if TP == 0:
|
226
|
-
return 0
|
227
|
-
return TP / (TP + FN)
|
228
|
-
|
229
|
-
def delay_th_point_adjusted_precision(y_true: np.array, y_pred: np.array, k: int):
|
230
|
-
"""
|
231
|
-
Calculate delay thresholded point-adjusted precision for anomaly detection in time series.
|
232
|
-
This metric is based on the standard precision score, but applies a temporal adjustment
|
233
|
-
to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
|
234
|
-
if at least one point within the first k time steps of the segment is predicted as anomalous,
|
235
|
-
all points in the segment are marked as correctly detected. The adjusted predictions are then
|
236
|
-
used to compute the standard point-wise precision fromulation.
|
237
|
-
|
238
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
239
|
-
|
240
|
-
For more information, see the original paper:
|
241
|
-
https://doi.org/10.1145/3292500.3330680
|
242
|
-
|
243
|
-
Parameters:
|
244
|
-
y_true (np.array):
|
245
|
-
The ground truth binary labels for the time series data.
|
246
|
-
y_pred (np.array):
|
247
|
-
The predicted binary labels for the time series data.
|
248
|
-
k (int):
|
249
|
-
Maximum number of time steps from the start of an anomaly segment
|
250
|
-
within which a prediction must occur for the segment to be considered detected.
|
251
|
-
|
252
|
-
Returns:
|
253
|
-
float: The delay thresholded point-adjusted precision score, which is the ratio of true positives to the sum of true positives and false positives.
|
254
|
-
"""
|
255
|
-
validate_binary_inputs(y_true, y_pred)
|
256
|
-
|
257
|
-
if np.sum(y_pred) == 0:
|
258
|
-
return 0
|
259
|
-
m = DelayThresholdedPointAdjust(len(y_true),y_true,y_pred,k=k)
|
260
|
-
TP,FP = m.tp,m.fp
|
261
|
-
if TP == 0:
|
262
|
-
return 0
|
263
|
-
return TP / (TP + FP)
|
264
|
-
|
265
|
-
def delay_th_point_adjusted_f_score(y_true: np.array, y_pred: np.array, k: int, beta=1):
|
266
|
-
"""
|
267
|
-
Calculate delay thresholded point-adjusted F-score for anomaly detection in time series.
|
268
|
-
This metric is based on the standard F-score, but applies a temporal adjustment
|
269
|
-
to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
|
270
|
-
if at least one point within the first k time steps of the segment is predicted as anomalous,
|
271
|
-
all points in the segment are marked as correctly detected. The adjusted predictions are then
|
272
|
-
used to compute the standard point-wise F-Score formulation.
|
273
|
-
|
274
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
275
|
-
|
276
|
-
For more information, see the original paper:
|
277
|
-
https://doi.org/10.1145/3292500.3330680
|
278
|
-
|
279
|
-
Parameters:
|
280
|
-
y_true (np.array):
|
281
|
-
The ground truth binary labels for the time series data.
|
282
|
-
y_pred (np.array):
|
283
|
-
The predicted binary labels for the time series data.
|
284
|
-
k (int):
|
285
|
-
Maximum number of time steps from the start of an anomaly segment within which a prediction must occur for the segment to be considered detected.
|
286
|
-
beta (float):
|
287
|
-
The beta value, which determines the weight of precision in the combined score.
|
288
|
-
Default is 1, which gives equal weight to precision and recall.
|
289
|
-
|
290
|
-
Returns:
|
291
|
-
float: The delay thresholded point-adjusted F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
292
|
-
"""
|
293
|
-
validate_binary_inputs(y_true, y_pred)
|
294
|
-
|
295
|
-
precision = delay_th_point_adjusted_precision(y_true, y_pred, k)
|
296
|
-
recall = delay_th_point_adjusted_recall(y_true, y_pred, k)
|
297
|
-
|
298
|
-
if precision == 0 or recall == 0:
|
299
|
-
return 0
|
300
|
-
|
301
|
-
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
|
302
|
-
|
303
|
-
|
304
|
-
def point_adjusted_at_k_recall(y_true: np.array, y_pred: np.array, k: float):
|
305
|
-
"""
|
306
|
-
Calculate k percent point-adjusted at K% recall for anomaly detection in time series.
|
307
|
-
This metric is based on the standard recall score, but applies a temporal adjustment
|
308
|
-
to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
|
309
|
-
if at least K% of the points within that segment are predicted as anomalous, all points in
|
310
|
-
the segment are marked as correctly detected. The adjusted predictions are then used
|
311
|
-
to compute the standard point-wise recall.
|
312
|
-
|
313
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
314
|
-
|
315
|
-
For more information, see the original paper:
|
316
|
-
https://ojs.aaai.org/index.php/AAAI/article/view/20680
|
317
|
-
|
318
|
-
Parameters:
|
319
|
-
y_true (np.array):
|
320
|
-
The ground truth binary labels for the time series data.
|
321
|
-
y_pred (np.array):
|
322
|
-
The predicted binary labels for the time series data.
|
323
|
-
k (float):
|
324
|
-
The minimum percentage of the anomaly that must be detected to consider the anomaly as detected.
|
325
|
-
|
326
|
-
Returns:
|
327
|
-
float: The point-adjusted recall score, which is the ratio of true positives to the sum of true positives and false negatives.
|
328
|
-
"""
|
329
|
-
validate_binary_inputs(y_true, y_pred)
|
330
|
-
|
331
|
-
m = PointAdjustKPercent(len(y_true),y_true,y_pred,k=k)
|
332
|
-
TP,FN = m.tp,m.fn
|
333
|
-
if TP == 0:
|
334
|
-
return 0
|
335
|
-
return TP / (TP + FN)
|
336
|
-
|
337
|
-
def point_adjusted_at_k_precision(y_true: np.array, y_pred: np.array, k: float):
|
338
|
-
"""
|
339
|
-
Calculate point-adjusted at K% precision for anomaly detection in time series.
|
340
|
-
This metric is based on the standard precision score, but applies a temporal adjustment
|
341
|
-
to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
|
342
|
-
if at least K% of the points within that segment are predicted as anomalous, all points in
|
343
|
-
the segment are marked as correctly detected. The adjusted predictions are then used
|
344
|
-
to compute the standard point-wise precision.
|
345
|
-
|
346
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
347
|
-
|
348
|
-
For more information, see the original paper:
|
349
|
-
https://ojs.aaai.org/index.php/AAAI/article/view/20680
|
350
|
-
|
351
|
-
Parameters:
|
352
|
-
y_true (np.array):
|
353
|
-
The ground truth binary labels for the time series data.
|
354
|
-
y_pred (np.array):
|
355
|
-
The predicted binary labels for the time series data.
|
356
|
-
k (float):
|
357
|
-
The minimum percentage of the anomaly that must be detected to consider the anomaly as detected.
|
358
|
-
|
359
|
-
Returns:
|
360
|
-
float: The point-adjusted precision score, which is the ratio of true positives to the sum of true positives and false positives.
|
361
|
-
"""
|
362
|
-
validate_binary_inputs(y_true, y_pred)
|
363
|
-
|
364
|
-
m = PointAdjustKPercent(len(y_true),y_true,y_pred,k=k)
|
365
|
-
TP,FP = m.tp,m.fp
|
366
|
-
if TP == 0:
|
367
|
-
return 0
|
368
|
-
return TP / (TP + FP)
|
369
|
-
|
370
|
-
def point_adjusted_at_k_f_score(y_true: np.array, y_pred: np.array, k: float, beta=1):
|
371
|
-
"""
|
372
|
-
Calculate point-adjusted at K% F-score for anomaly detection in time series.
|
373
|
-
This metric is based on the standard F-Score, but applies a temporal adjustment
|
374
|
-
to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
|
375
|
-
if at least K% of the points within that segment are predicted as anomalous, all points in
|
376
|
-
the segment are marked as correctly detected. The adjusted predictions are then used
|
377
|
-
to compute the standard F-Score precision.
|
378
|
-
|
379
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
380
|
-
|
381
|
-
For more information, see the original paper:
|
382
|
-
https://ojs.aaai.org/index.php/AAAI/article/view/20680
|
383
|
-
|
384
|
-
Parameters:
|
385
|
-
y_true (np.array):
|
386
|
-
The ground truth binary labels for the time series data.
|
387
|
-
y_pred (np.array):
|
388
|
-
The predicted binary labels for the time series data.
|
389
|
-
k (float):
|
390
|
-
The minimum percentage of the anomaly that must be detected to consider the anomaly as detected.
|
391
|
-
beta (float):
|
392
|
-
The beta value, which determines the weight of precision in the combined score.
|
393
|
-
Default is 1, which gives equal weight to precision and recall.
|
394
|
-
|
395
|
-
Returns:
|
396
|
-
float: The point-adjusted F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
397
|
-
"""
|
398
|
-
validate_binary_inputs(y_true, y_pred)
|
399
|
-
|
400
|
-
precision = point_adjusted_at_k_precision(y_true, y_pred, k)
|
401
|
-
recall = point_adjusted_at_k_recall(y_true, y_pred, k)
|
402
|
-
|
403
|
-
if precision == 0 or recall == 0:
|
404
|
-
return 0
|
405
|
-
|
406
|
-
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
|
407
|
-
|
408
|
-
|
409
|
-
def latency_sparsity_aw_recall(y_true: np.array, y_pred: np.array, ni: int):
|
410
|
-
"""
|
411
|
-
Calculate latency and sparsity aware recall for anomaly detection in time series.
|
412
|
-
This metric is based on the standard recall, but applies a temporal adjustment
|
413
|
-
to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
|
414
|
-
all points in the segment are marked as correctly detected only after the first true positive
|
415
|
-
is predicted within that segment. This encourages early detection by delaying credit for correct
|
416
|
-
predictions until the anomaly is initially detected. Additionally, to reduce the impact of
|
417
|
-
scattered false positives, predictions are subsampled using a sparsity factor n, so that
|
418
|
-
only one prediction is considered every n time steps. The adjusted predictions are then used
|
419
|
-
to compute the standard point-wise recall.
|
420
|
-
|
421
|
-
Implementation of https://dl.acm.org/doi/10.1145/3447548.3467174
|
422
|
-
|
423
|
-
For more information, see the original paper:
|
424
|
-
https://doi.org/10.1145/3447548.3467174
|
425
|
-
|
426
|
-
Parameters:
|
427
|
-
y_true (np.array):
|
428
|
-
The ground truth binary labels for the time series data.
|
429
|
-
y_pred (np.array):
|
430
|
-
The predicted binary labels for the time series data.
|
431
|
-
ni (int):
|
432
|
-
The batch size used in the implementation to handle latency and sparsity.
|
433
|
-
|
434
|
-
Returns:
|
435
|
-
float: The latency and sparsity aware recall score, which is the ratio of true positives to the sum of true positives and false negatives.
|
436
|
-
"""
|
437
|
-
validate_binary_inputs(y_true, y_pred)
|
438
|
-
|
439
|
-
if np.sum(y_pred) == 0:
|
440
|
-
return 0
|
441
|
-
m = LatencySparsityAware(len(y_true),y_true,y_pred,tw=ni)
|
442
|
-
TP,FN = m.tp, m.fn
|
443
|
-
if TP == 0:
|
444
|
-
return 0
|
445
|
-
return TP / (TP + FN)
|
446
|
-
|
447
|
-
def latency_sparsity_aw_precision(y_true: np.array, y_pred: np.array, ni: int):
|
448
|
-
"""
|
449
|
-
Calculate latency and sparsity aware precision for anomaly detection in time series.
|
450
|
-
This metric is based on the standard precision, but applies a temporal adjustment
|
451
|
-
to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
|
452
|
-
all points in the segment are marked as correctly detected only after the first true positive
|
453
|
-
is predicted within that segment. This encourages early detection by delaying credit for correct
|
454
|
-
predictions until the anomaly is initially detected. Additionally, to reduce the impact of
|
455
|
-
scattered false positives, predictions are subsampled using a sparsity factor n, so that
|
456
|
-
only one prediction is considered every n time steps. The adjusted predictions are then used
|
457
|
-
to compute the standard point-wise precision.
|
458
|
-
|
459
|
-
Implementation of https://dl.acm.org/doi/10.1145/3447548.3467174
|
460
|
-
|
461
|
-
For more information, see the original paper:
|
462
|
-
https://doi.org/10.1145/3447548.3467174
|
463
|
-
|
464
|
-
Parameters:
|
465
|
-
y_true (np.array):
|
466
|
-
The ground truth binary labels for the time series data.
|
467
|
-
y_pred (np.array):
|
468
|
-
The predicted binary labels for the time series data.
|
469
|
-
ni (int):
|
470
|
-
The batch size used in the implementation to handle latency and sparsity.
|
471
|
-
|
472
|
-
Returns:
|
473
|
-
float: The latency and sparsity aware precision score, which is the ratio of true positives to the sum of true positives and false positives.
|
474
|
-
"""
|
475
|
-
validate_binary_inputs(y_true, y_pred)
|
476
|
-
|
477
|
-
if np.sum(y_pred) == 0:
|
478
|
-
return 0
|
479
|
-
m = LatencySparsityAware(len(y_true),y_true,y_pred,tw=ni)
|
480
|
-
TP,FP = m.tp, m.fp
|
481
|
-
if TP == 0:
|
482
|
-
return 0
|
483
|
-
return TP / (TP + FP)
|
484
|
-
|
485
|
-
def latency_sparsity_aw_f_score(y_true: np.array, y_pred: np.array, ni: int, beta=1):
|
486
|
-
"""
|
487
|
-
Calculate latency and sparsity aware F-score for anomaly detection in time series.
|
488
|
-
This metric is based on the standard F-score, but applies a temporal adjustment
|
489
|
-
to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
|
490
|
-
all points in the segment are marked as correctly detected only after the first true positive
|
491
|
-
is predicted within that segment. This encourages early detection by delaying credit for correct
|
492
|
-
predictions until the anomaly is initially detected. Additionally, to reduce the impact of
|
493
|
-
scattered false positives, predictions are subsampled using a sparsity factor n, so that
|
494
|
-
only one prediction is considered every n time steps. The adjusted predictions are then used
|
495
|
-
to compute the standard point-wise F-score.
|
496
|
-
|
497
|
-
Implementation of https://dl.acm.org/doi/10.1145/3447548.3467174
|
498
|
-
|
499
|
-
For more information, see the original paper:
|
500
|
-
https://doi.org/10.1145/3447548.3467174
|
501
|
-
|
502
|
-
Parameters:
|
503
|
-
y_true (np.array):
|
504
|
-
The ground truth binary labels for the time series data.
|
505
|
-
y_pred (np.array):
|
506
|
-
The predicted binary labels for the time series data.
|
507
|
-
ni (int):
|
508
|
-
The batch size used in the implementation to handle latency and sparsity.
|
509
|
-
beta (float):
|
510
|
-
The beta value, which determines the weight of precision in the combined score.
|
511
|
-
Default is 1, which gives equal weight to precision and recall.
|
512
|
-
|
513
|
-
Returns:
|
514
|
-
float: The latency and sparsity aware F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
515
|
-
"""
|
516
|
-
validate_binary_inputs(y_true, y_pred)
|
517
|
-
|
518
|
-
if np.sum(y_pred) == 0:
|
519
|
-
return 0
|
520
|
-
|
521
|
-
recall = latency_sparsity_aw_recall(y_true,y_pred,ni)
|
522
|
-
precision = latency_sparsity_aw_precision(y_true,y_pred,ni)
|
523
|
-
if precision == 0 or recall == 0:
|
524
|
-
return 0
|
525
|
-
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
|
526
|
-
|
527
|
-
|
528
|
-
def segment_wise_recall(y_true: np.array, y_pred: np.array):
|
529
|
-
"""
|
530
|
-
Calculate segment-wise recall for anomaly detection in time series.
|
531
|
-
This metric is based on the standard recall, but applies a temporal adjustment
|
532
|
-
to the predictions before computing it. Specifically, each contiguous segment of anomalous points
|
533
|
-
is treated as a single unit. A true positive is counted if at least one point in a ground-truth
|
534
|
-
anomalous segment is predicted as anomalous. A false negative is counted if no point in the segment
|
535
|
-
is detected, and a false positive is recorded for each predicted anomalous segment that does not
|
536
|
-
overlap with any ground-truth anomaly. The final recall is computed using these adjusted
|
537
|
-
segment-level counts.
|
538
|
-
|
539
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
540
|
-
|
541
|
-
For more information, see the original paper:
|
542
|
-
https://doi.org/10.1145/3219819.3219845
|
543
|
-
|
544
|
-
Parameters:
|
545
|
-
y_true (np.array):
|
546
|
-
The ground truth binary labels for the time series data.
|
547
|
-
y_pred (np.array):
|
548
|
-
The predicted binary labels for the time series data.
|
549
|
-
|
550
|
-
Returns:
|
551
|
-
float: The segment-wise recall score, which is the ratio of true positives to the sum of true positives and false negatives.
|
552
|
-
"""
|
553
|
-
validate_binary_inputs(y_true, y_pred)
|
554
|
-
|
555
|
-
m = Segmentwise_metrics(len(y_true),y_true,y_pred)
|
556
|
-
TP,FN = m.tp,m.fn
|
557
|
-
if TP == 0:
|
558
|
-
return 0
|
559
|
-
return TP / (TP + FN)
|
560
|
-
|
561
|
-
def segment_wise_precision(y_true: np.array, y_pred: np.array):
|
562
|
-
"""
|
563
|
-
Calculate segment-wise precision for anomaly detection in time series.
|
564
|
-
This metric is based on the standard precision, but applies a temporal adjustment
|
565
|
-
to the predictions before computing it. Specifically, each contiguous segment of anomalous points
|
566
|
-
is treated as a single unit. A true positive is counted if at least one point in a ground-truth
|
567
|
-
anomalous segment is predicted as anomalous. A false negative is counted if no point in the segment
|
568
|
-
is detected, and a false positive is recorded for each predicted anomalous segment that does not
|
569
|
-
overlap with any ground-truth anomaly. The final precision is computed using these adjusted
|
570
|
-
segment-level counts.
|
571
|
-
|
572
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
573
|
-
|
574
|
-
For more information, see the original paper:
|
575
|
-
https://doi.org/10.1145/3219819.3219845
|
576
|
-
|
577
|
-
Parameters:
|
578
|
-
y_true (np.array):
|
579
|
-
The ground truth binary labels for the time series data.
|
580
|
-
y_pred (np.array):
|
581
|
-
The predicted binary labels for the time series data.
|
582
|
-
|
583
|
-
Returns:
|
584
|
-
float: The segment-wise precision score, which is the ratio of true positives to the sum of true positives and false positives.
|
585
|
-
"""
|
586
|
-
validate_binary_inputs(y_true, y_pred)
|
587
|
-
|
588
|
-
m = Segmentwise_metrics(len(y_true),y_true,y_pred)
|
589
|
-
TP,FP = m.tp,m.fp
|
590
|
-
if TP == 0:
|
591
|
-
return 0
|
592
|
-
return TP / (TP + FP)
|
593
|
-
|
594
|
-
def segment_wise_f_score(y_true: np.array, y_pred: np.array, beta=1):
|
595
|
-
"""
|
596
|
-
Calculate segment-wise F-score for anomaly detection in time series.
|
597
|
-
This metric is based on the standard F-score, but applies a temporal adjustment
|
598
|
-
to the predictions before computing it. Specifically, each contiguous segment of anomalous points
|
599
|
-
is treated as a single unit. A true positive is counted if at least one point in a ground-truth
|
600
|
-
anomalous segment is predicted as anomalous. A false negative is counted if no point in the segment
|
601
|
-
is detected, and a false positive is recorded for each predicted anomalous segment that does not
|
602
|
-
overlap with any ground-truth anomaly. The final F-score is computed using these adjusted
|
603
|
-
segment-level counts.
|
604
|
-
|
605
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
606
|
-
|
607
|
-
For more information, see the original paper:
|
608
|
-
https://doi.org/10.1145/3219819.3219845
|
609
|
-
|
610
|
-
Parameters:
|
611
|
-
y_true (np.array):
|
612
|
-
The ground truth binary labels for the time series data.
|
613
|
-
y_pred (np.array):
|
614
|
-
The predicted binary labels for the time series data.
|
615
|
-
beta (float):
|
616
|
-
The beta value, which determines the weight of precision in the combined score.
|
617
|
-
Default is 1, which gives equal weight to precision and recall.
|
618
|
-
|
619
|
-
Returns:
|
620
|
-
float: The segment-wise F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
621
|
-
|
622
|
-
"""
|
623
|
-
validate_binary_inputs(y_true, y_pred)
|
624
|
-
|
625
|
-
m = Segmentwise_metrics(len(y_true),y_true,y_pred)
|
626
|
-
TP,FN,FP = m.tp,m.fn,m.fp
|
627
|
-
if TP==0:
|
628
|
-
return 0
|
629
|
-
|
630
|
-
precision = TP / (TP + FP)
|
631
|
-
recall = TP / (TP + FN)
|
632
|
-
|
633
|
-
if precision == 0 or recall == 0:
|
634
|
-
return 0
|
635
|
-
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
|
636
|
-
|
637
|
-
def composite_f_score(y_true: np.array, y_pred: np.array, beta=1):
|
638
|
-
"""
|
639
|
-
Calculate composite F-score for anomaly detection in time series.
|
640
|
-
This metric combines aspects of the point_wise_f_score and the segment_wise_f_score.
|
641
|
-
It is defined as the harmonic mean of point_wise_precision and segment_wise_recall.
|
642
|
-
The use of point-wise precision ensures that false positives are properly penalized,
|
643
|
-
a feature that segment-wise metrics typically lack.
|
644
|
-
|
645
|
-
Implementation of https://ieeexplore.ieee.org/document/9525836
|
646
|
-
|
647
|
-
For more information, see the original paper:
|
648
|
-
https://doi.org/10.1109/TNNLS.2021.3105827
|
649
|
-
|
650
|
-
Parameters:
|
651
|
-
y_true (np.array):
|
652
|
-
The ground truth binary labels for the time series data.
|
653
|
-
y_pred (np.array):
|
654
|
-
The predicted binary labels for the time series data.
|
655
|
-
beta (float):
|
656
|
-
The beta value, which determines the weight of precision in the combined score.
|
657
|
-
Default is 1, which gives equal weight to precision and recall.
|
658
|
-
|
659
|
-
Returns:
|
660
|
-
float: The composite F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
661
|
-
|
662
|
-
"""
|
663
|
-
validate_binary_inputs(y_true, y_pred)
|
664
|
-
|
665
|
-
m = Composite_f(len(y_true),y_true,y_pred)
|
666
|
-
#Point wise precision
|
667
|
-
precision = m.precision()
|
668
|
-
|
669
|
-
#Segment wise recall
|
670
|
-
recall = m.recall()
|
671
|
-
|
672
|
-
if precision==0 or recall==0:
|
673
|
-
return 0
|
674
|
-
|
675
|
-
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
|
676
|
-
|
677
|
-
def time_tolerant_recall(y_true: np.array, y_pred: np.array, t: int) -> float:
|
678
|
-
"""
|
679
|
-
Calculate time tolerant recall for anomaly detection in time series.
|
680
|
-
This metric is based on the standard recall, but applies a temporal adjustment
|
681
|
-
to the predictions before computing it. Specifically, a predicted anomalous point is considered
|
682
|
-
a true positive if it lies within a temporal window of size :math:`{\\tau}` around any ground-truth anomalous point.
|
683
|
-
This allows for small temporal deviations in the predictions to be tolerated. The adjusted predictions are then used
|
684
|
-
to compute the standard point-wise recall.
|
685
|
-
|
686
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
687
|
-
|
688
|
-
For more information, see the original paper:
|
689
|
-
10.48550/arXiv.2008.05788
|
690
|
-
|
691
|
-
Parameters:
|
692
|
-
y_true (np.array):
|
693
|
-
The ground truth binary labels for the time series data.
|
694
|
-
y_pred (np.array):
|
695
|
-
The predicted binary labels for the time series data.
|
696
|
-
t (int):
|
697
|
-
The time tolerance parameter
|
698
|
-
|
699
|
-
Returns:
|
700
|
-
float: The time tolerant recall score, which is the ratio of true positives to the sum of true positives and false negatives.
|
701
|
-
"""
|
702
|
-
validate_binary_inputs(y_true, y_pred)
|
703
|
-
|
704
|
-
if np.sum(y_pred) == 0:
|
705
|
-
return 0
|
706
|
-
|
707
|
-
m = Time_Tolerant(len(y_true),y_true,y_pred,d=t)
|
708
|
-
return m.recall()
|
709
|
-
|
710
|
-
def time_tolerant_precision(y_true: np.array, y_pred: np.array, t: int) -> float:
|
711
|
-
"""
|
712
|
-
Calculate time tolerant precision for anomaly detection in time series.
|
713
|
-
This metric is based on the standard precision, but applies a temporal adjustment
|
714
|
-
to the predictions before computing it. Specifically, a predicted anomalous point is considered
|
715
|
-
a true positive if it lies within a temporal window of size :math:`{\\tau}` around any ground-truth anomalous point.
|
716
|
-
This allows for small temporal deviations in the predictions to be tolerated. The adjusted predictions are then used
|
717
|
-
to compute the standard point-wise precision.
|
718
|
-
|
719
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
720
|
-
|
721
|
-
For more information, see the original paper:
|
722
|
-
10.48550/arXiv.2008.05788
|
723
|
-
|
724
|
-
Parameters:
|
725
|
-
y_true (np.array):
|
726
|
-
The ground truth binary labels for the time series data.
|
727
|
-
y_pred (np.array):
|
728
|
-
The predicted binary labels for the time series data.
|
729
|
-
t (int):
|
730
|
-
The time tolerance parameter
|
731
|
-
|
732
|
-
Returns:
|
733
|
-
float: The time tolerant precision score, which is the ratio of true positives to the sum of true positives and false positives.
|
734
|
-
"""
|
735
|
-
validate_binary_inputs(y_true, y_pred)
|
736
|
-
|
737
|
-
if np.sum(y_pred) == 0:
|
738
|
-
return 0
|
739
|
-
m = Time_Tolerant(len(y_true),y_true,y_pred, d=t)
|
740
|
-
return m.precision()
|
741
|
-
|
742
|
-
|
743
|
-
def time_tolerant_f_score(y_true: np.array, y_pred: np.array, t: int, beta=1):
|
744
|
-
"""
|
745
|
-
Calculate time tolerant F-score for anomaly detection in time series.
|
746
|
-
This metric is based on the standard F-score, but applies a temporal adjustment
|
747
|
-
to the predictions before computing it. Specifically, a predicted anomalous point is considered
|
748
|
-
a true positive if it lies within a temporal window of size :math:`{\\tau}` around any ground-truth anomalous point.
|
749
|
-
This allows for small temporal deviations in the predictions to be tolerated.The adjusted predictions are then used
|
750
|
-
to compute the standard point-wise F-Score.
|
751
|
-
|
752
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
753
|
-
|
754
|
-
For more information, see the original paper:
|
755
|
-
10.48550/arXiv.2008.05788
|
756
|
-
|
757
|
-
Parameters:
|
758
|
-
y_true (np.array):
|
759
|
-
The ground truth binary labels for the time series data.
|
760
|
-
y_pred (np.array):
|
761
|
-
The predicted binary labels for the time series data.
|
762
|
-
t (int):
|
763
|
-
The time tolerance parameter
|
764
|
-
beta (float):
|
765
|
-
The beta value, which determines the weight of precision in the combined score.
|
766
|
-
Default is 1, which gives equal weight to precision and recall.
|
767
|
-
|
768
|
-
Returns:
|
769
|
-
float: The time tolerant F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
770
|
-
|
771
|
-
"""
|
772
|
-
validate_binary_inputs(y_true, y_pred)
|
773
|
-
|
774
|
-
precision = time_tolerant_precision(y_true,y_pred,t)
|
775
|
-
recall = time_tolerant_recall(y_true,y_pred,t)
|
776
|
-
if precision==0 or recall==0:
|
777
|
-
return 0
|
778
|
-
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
|
779
|
-
|
780
|
-
|
781
|
-
def range_based_recall(y_true: np.array, y_pred: np.array, alpha: float, bias='flat', cardinality_mode='one'):
|
782
|
-
"""
|
783
|
-
Calculate range-based recall for anomaly detection in time series.
|
784
|
-
|
785
|
-
This metric extends standard recall by evaluating detection at the level of anomalous ranges
|
786
|
-
rather than individual points. For each true anomaly range, it computes a score that rewards
|
787
|
-
(1) detecting the existence of the range, (2) the proportion of overlap, and (3) penalties or
|
788
|
-
bonuses based on the position and fragmentation of predicted segments. These components are
|
789
|
-
weighted by :math:`{\\alpha}` (existence vs. overlap) and further shaped by customizable bias functions
|
790
|
-
for positional and cardinality factors.
|
791
|
-
|
792
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
793
|
-
|
794
|
-
For more information, see the original paper:
|
795
|
-
https://proceedings.neurips.cc/paper_files/paper/2018/file/8f468c873a32bb0619eaeb2050ba45d1-Paper.pdf
|
796
|
-
|
797
|
-
Parameters:
|
798
|
-
y_true (np.array):
|
799
|
-
The ground truth binary labels for the time series data.
|
800
|
-
y_pred (np.array):
|
801
|
-
The predicted binary labels for the time series data.
|
802
|
-
alpha (float):
|
803
|
-
Relative importance of existence reward. 0 \\leq alpha \\leq 1.
|
804
|
-
bias (str):
|
805
|
-
Positional bias. This should be "flat", "front", "middle", or "back".
|
806
|
-
cardinality_mode (str, optional):
|
807
|
-
Cardinality type. This should be "one", "reciprocal" or "udf_gamma".
|
808
|
-
|
809
|
-
Returns:
|
810
|
-
float: The range-based recall score.
|
811
|
-
"""
|
812
|
-
validate_binary_inputs(y_true, y_pred)
|
813
|
-
|
814
|
-
if np.sum(y_pred) == 0:
|
815
|
-
return 0
|
816
|
-
m = Range_PR(len(y_true),y_true,y_pred,cardinality=cardinality_mode, alpha=alpha,bias=bias)
|
817
|
-
return m.recall()
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
def range_based_precision(y_true: np.array, y_pred: np.array, alpha: float, bias='flat', cardinality_mode='one'):
|
822
|
-
"""
|
823
|
-
Calculate range-based precision for anomaly detection in time series.
|
824
|
-
|
825
|
-
This metric extends standard precision by scoring predictions at the range level. Each
|
826
|
-
predicted anomaly range is evaluated for (1) overlap with any true ranges, (2) the size of
|
827
|
-
that overlap, and (3) positional and fragmentation effects via bias functions. Cardinality
|
828
|
-
penalties can be applied when a single true range is covered by multiple predicted ranges.
|
829
|
-
These components are weighted by :math:`{\\alpha}` (existence vs. overlap) and further shaped by customizable bias functions
|
830
|
-
for positional and cardinality factors.
|
831
|
-
|
832
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
833
|
-
|
834
|
-
For more information, see the original paper:
|
835
|
-
https://proceedings.neurips.cc/paper_files/paper/2018/file/8f468c873a32bb0619eaeb2050ba45d1-Paper.pdf
|
836
|
-
|
837
|
-
Parameters:
|
838
|
-
y_true (np.array):
|
839
|
-
The ground truth binary labels for the time series data.
|
840
|
-
y_pred (np.array):
|
841
|
-
The predicted binary labels for the time series data.
|
842
|
-
alpha (float):
|
843
|
-
Relative importance of existence reward. 0 \\leq alpha \\leq 1.
|
844
|
-
bias (str):
|
845
|
-
Positional bias. This should be "flat", "front", "middle", or "back".
|
846
|
-
cardinality_mode (str, optional):
|
847
|
-
Cardinality type. This should be "one", "reciprocal" or "udf_gamma".
|
848
|
-
|
849
|
-
Returns:
|
850
|
-
float: The range-based precision score.
|
851
|
-
"""
|
852
|
-
validate_binary_inputs(y_true, y_pred)
|
853
|
-
|
854
|
-
if np.sum(y_pred) == 0:
|
855
|
-
return 0
|
856
|
-
m = Range_PR(len(y_true),y_true,y_pred,cardinality=cardinality_mode, alpha=alpha,bias=bias)
|
857
|
-
return m.precision()
|
858
|
-
|
859
|
-
|
860
|
-
|
861
|
-
|
862
|
-
|
863
|
-
|
864
|
-
def range_based_f_score(y_true: np.array, y_pred: np.array, p_alpha: float, r_alpha: float, p_bias='flat', r_bias='flat', cardinality_mode='one', beta=1) -> float:
|
865
|
-
"""
|
866
|
-
Calculate range-based F-score for anomaly detection in time series.
|
867
|
-
|
868
|
-
This metric combines range-based precision and range-based recall into a single harmonic mean.
|
869
|
-
It inherits all customizability of the underlying precision and recall—existence vs. overlap
|
870
|
-
weighting, positional bias, and cardinality factors—allowing fine-grained control over how
|
871
|
-
both missed detections and false alarms are penalized in a temporal context.
|
872
|
-
|
873
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
874
|
-
|
875
|
-
For more information, see the original paper:
|
876
|
-
https://proceedings.neurips.cc/paper_files/paper/2018/file/8f468c873a32bb0619eaeb2050ba45d1-Paper.pdf
|
877
|
-
|
878
|
-
|
879
|
-
Parameters:
|
880
|
-
y_true (np.array):
|
881
|
-
The ground truth binary labels for the time series data.
|
882
|
-
y_pred (np.array):
|
883
|
-
The predicted binary labels for the time series data.
|
884
|
-
p_alpha (float):
|
885
|
-
Relative importance of existence reward for precision. 0 \\leq alpha \\leq 1.
|
886
|
-
r_alpha (float):
|
887
|
-
Relative importance of existence reward for recall. 0 \\leq alpha \\leq 1.
|
888
|
-
p_bias (str):
|
889
|
-
Positional bias for precision. This should be "flat", "front", "middle", or "back".
|
890
|
-
r_bias (str):
|
891
|
-
Positional bias for recall. This should be "flat", "front", "middle", or "back".
|
892
|
-
cardinality_mode (str, optional):
|
893
|
-
Cardinality type. This should be "one", "reciprocal" or "udf_gamma".
|
894
|
-
beta (float):
|
895
|
-
The beta value, which determines the weight of precision in the combined score.
|
896
|
-
Default is 1, which gives equal weight to precision and recall.
|
897
|
-
|
898
|
-
Returns:
|
899
|
-
float: The range-based F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
900
|
-
"""
|
901
|
-
validate_binary_inputs(y_true, y_pred)
|
902
|
-
|
903
|
-
if np.sum(y_pred) == 0:
|
904
|
-
return 0
|
905
|
-
f = ts_fscore(y_true, y_pred, beta=beta, p_alpha=p_alpha, r_alpha=r_alpha, cardinality=cardinality_mode, p_bias=p_bias, r_bias=r_bias)
|
906
|
-
return f
|
907
|
-
|
908
|
-
|
909
|
-
|
910
|
-
|
911
|
-
def ts_aware_recall(y_true: np.array, y_pred: np.array, alpha: float, delta: float, theta: float, past_range: bool = False):
|
912
|
-
"""
|
913
|
-
Calculate time series aware recall for anomaly detection in time series.
|
914
|
-
|
915
|
-
This metric is based on the range_based_recall, but introduces two key modifications.
|
916
|
-
First, a predicted anomalous segment is only counted as a true positive if it covers at least a fraction
|
917
|
-
:math:`{\\theta}` of the ground‑truth anomaly range. Second, each labeled anomaly is extended by a tolerance window of
|
918
|
-
length :math:`{\\delta}` at its end, within which any overlap contribution decays linearly from full weight down to zero.
|
919
|
-
Unlike the original range-based formulation, this variant omits cardinality and positional bias terms,
|
920
|
-
focusing solely on overlap fraction and end‑tolerance decay.
|
921
|
-
|
922
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
923
|
-
|
924
|
-
For more information, see the original paper:
|
925
|
-
https://doi.org/10.1145/3357384.3358118
|
926
|
-
|
927
|
-
Parameters:
|
928
|
-
y_true (np.array):
|
929
|
-
The ground truth binary labels for the time series data.
|
930
|
-
y_pred (np.array):
|
931
|
-
The predicted binary labels for the time series data.
|
932
|
-
alpha (float):
|
933
|
-
Relative importance of the existence reward versus overlap reward (0 \\leq :math:`{\\alpha}` \\leq 1).
|
934
|
-
delta (float):
|
935
|
-
Tolerance window length at the end of each true anomaly segment.
|
936
|
-
- If past_range is True, :math:`{\\delta}` must be a float in (0, 1], representing the fraction of the segment’s
|
937
|
-
length to extend. E.g., :math:`{\\delta}` = 0.5 extends a segment of length 10 by 5 time steps.
|
938
|
-
- If past_range is False, :math:`{\\delta}` must be a non-negative integer, representing an absolute number of
|
939
|
-
time steps to extend each segment.
|
940
|
-
theta (float):
|
941
|
-
Minimum fraction (0 \\leq :math:`{\\theta}` \\leq 1) of the true anomaly range that must be overlapped by
|
942
|
-
predictions for the segment to count as detected.
|
943
|
-
past_range (bool):
|
944
|
-
Determines how :math:`{\\delta}` is interpreted.
|
945
|
-
- True: :math:`{\\delta}` is treated as a fractional extension of each segment’s length.
|
946
|
-
- False: :math:`{\\delta}` is treated as an absolute number of time steps.
|
947
|
-
|
948
|
-
Returns:
|
949
|
-
float: The time series aware recall score.
|
950
|
-
"""
|
951
|
-
validate_binary_inputs(y_true, y_pred)
|
952
|
-
|
953
|
-
m = TaF(len(y_true),y_true,y_pred,alpha=alpha,theta=theta,delta=delta,past_range=past_range)
|
954
|
-
return m.recall()
|
955
|
-
|
956
|
-
|
957
|
-
|
958
|
-
|
959
|
-
def ts_aware_precision(y_true: np.array, y_pred: np.array,alpha: float, delta: float, theta: float, past_range: bool = False):
|
960
|
-
"""
|
961
|
-
Calculate time series aware precision for anomaly detection in time series.
|
962
|
-
|
963
|
-
This metric is based on the range_based_precision, but introduces two key modifications.
|
964
|
-
First, a predicted anomalous segment is only counted as a true positive if it covers at least a fraction
|
965
|
-
:math:`{\\theta}` of the ground‑truth anomaly range. Second, each labeled anomaly is extended by a tolerance window of
|
966
|
-
length :math:`{\\delta}` at its end, within which any overlap contribution decays linearly from full weight down to zero.
|
967
|
-
Unlike the original range-based formulation, this variant omits cardinality and positional bias terms,
|
968
|
-
focusing solely on overlap fraction and end‑tolerance decay.
|
969
|
-
|
970
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
971
|
-
|
972
|
-
For more information, see the original paper:
|
973
|
-
https://doi.org/10.1145/3357384.3358118
|
974
|
-
|
975
|
-
Parameters:
|
976
|
-
y_true (np.array):
|
977
|
-
The ground truth binary labels for the time series data.
|
978
|
-
y_pred (np.array):
|
979
|
-
The predicted binary labels for the time series data.
|
980
|
-
alpha (float):
|
981
|
-
Relative importance of the existence reward versus overlap reward (0 \\leq :math:`{\\alpha}` \\leq 1).
|
982
|
-
delta (float):
|
983
|
-
Tolerance window length at the end of each true anomaly segment.
|
984
|
-
- If past_range is True, :math:`{\\delta}` must be a float in (0, 1], representing the fraction of the segment’s
|
985
|
-
length to extend. E.g., :math:`{\\delta}` = 0.5 extends a segment of length 10 by 5 time steps.
|
986
|
-
- If past_range is False, :math:`{\\delta}` must be a non-negative integer, representing an absolute number of
|
987
|
-
time steps to extend each segment.
|
988
|
-
theta (float):
|
989
|
-
Minimum fraction (0 \\leq :math:`{\\theta}` \\leq 1) of the true anomaly range that must be overlapped by
|
990
|
-
predictions for the segment to count as detected.
|
991
|
-
past_range (bool):
|
992
|
-
Determines how :math:`{\\delta}` is interpreted.
|
993
|
-
- True: :math:`{\\delta}` is treated as a fractional extension of each segment’s length.
|
994
|
-
- False: :math:`{\\delta}` is treated as an absolute number of time steps.
|
995
|
-
|
996
|
-
Returns:
|
997
|
-
float: The time series aware precision score.
|
998
|
-
"""
|
999
|
-
validate_binary_inputs(y_true, y_pred)
|
1000
|
-
|
1001
|
-
m = TaF(len(y_true),y_true,y_pred,alpha=alpha,theta=theta,delta=delta,past_range=past_range)
|
1002
|
-
return m.precision()
|
1003
|
-
|
1004
|
-
|
1005
|
-
|
1006
|
-
|
1007
|
-
|
1008
|
-
def ts_aware_f_score(y_true: np.array, y_pred: np.array, beta: float, alpha: float, delta: float, theta: float, past_range: bool = False):
|
1009
|
-
"""
|
1010
|
-
Calculate time series aware F-score for anomaly detection in time series.
|
1011
|
-
|
1012
|
-
This metric is based on the range_based_f_score, but introduces two key modifications.
|
1013
|
-
First, a predicted anomalous segment is only counted as a true positive if it covers at least a fraction
|
1014
|
-
:math:`{\\theta}` of the ground‑truth anomaly range. Second, each labeled anomaly is extended by a tolerance window of
|
1015
|
-
length :math:`{\\delta}` at its end, within which any overlap contribution decays linearly from full weight down to zero.
|
1016
|
-
Unlike the original range-based formulation, this variant omits cardinality and positional bias terms,
|
1017
|
-
focusing solely on overlap fraction and end‑tolerance decay.
|
1018
|
-
|
1019
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1020
|
-
|
1021
|
-
For more information, see the original paper:
|
1022
|
-
https://doi.org/10.1145/3357384.3358118
|
1023
|
-
|
1024
|
-
Parameters:
|
1025
|
-
y_true (np.array):
|
1026
|
-
The ground truth binary labels for the time series data.
|
1027
|
-
y_pred (np.array):
|
1028
|
-
The predicted binary labels for the time series data.
|
1029
|
-
beta (float):
|
1030
|
-
The beta value, which determines the weight of precision in the combined score.
|
1031
|
-
Default is 1, which gives equal weight to precision and recall.
|
1032
|
-
alpha (float):
|
1033
|
-
Relative importance of the existence reward versus overlap reward (0 \\leq :math:`{\\alpha}` \\leq 1).
|
1034
|
-
delta (float):
|
1035
|
-
Tolerance window length at the end of each true anomaly segment.
|
1036
|
-
- If past_range is True, :math:`{\\delta}` must be a float in (0, 1], representing the fraction of the segment’s
|
1037
|
-
length to extend. E.g., :math:`{\\delta}` = 0.5 extends a segment of length 10 by 5 time steps.
|
1038
|
-
- If past_range is False, :math:`{\\delta}` must be a non-negative integer, representing an absolute number of
|
1039
|
-
time steps to extend each segment.
|
1040
|
-
theta (float):
|
1041
|
-
Minimum fraction (0 \\leq :math:`{\\theta}` \\leq 1) of the true anomaly range that must be overlapped by
|
1042
|
-
predictions for the segment to count as detected.
|
1043
|
-
past_range (bool):
|
1044
|
-
Determines how :math:`{\\delta}` is interpreted.
|
1045
|
-
- True: :math:`{\\delta}` is treated as a fractional extension of each segment’s length.
|
1046
|
-
- False: :math:`{\\delta}` is treated as an absolute number of time steps.
|
1047
|
-
|
1048
|
-
Returns:
|
1049
|
-
float: The time series aware F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
1050
|
-
"""
|
1051
|
-
validate_binary_inputs(y_true, y_pred)
|
1052
|
-
|
1053
|
-
m = TaF(len(y_true),y_true,y_pred,alpha=alpha,theta=theta,delta=delta,past_range=past_range)
|
1054
|
-
precision = m.precision()
|
1055
|
-
recall = m.recall()
|
1056
|
-
if precision==0 or recall==0:
|
1057
|
-
return 0
|
1058
|
-
|
1059
|
-
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
|
1060
|
-
|
1061
|
-
|
1062
|
-
|
1063
|
-
|
1064
|
-
|
1065
|
-
def enhanced_ts_aware_recall(y_true: np.array, y_pred: np.array, theta: float):
|
1066
|
-
"""
|
1067
|
-
Calculate enhanced time series aware recall for anomaly detection in time series.
|
1068
|
-
|
1069
|
-
This metric is similar to the range-based recall in that it accounts for both detection existence
|
1070
|
-
and overlap proportion. Additionally, it requires that a significant fraction :math:`{\\theta}` of each true anomaly
|
1071
|
-
segment be detected.
|
1072
|
-
|
1073
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1074
|
-
|
1075
|
-
For more information, see the original paper:
|
1076
|
-
https://doi.org/10.1145/3477314.3507024
|
1077
|
-
|
1078
|
-
Parameters:
|
1079
|
-
y_true (np.array):
|
1080
|
-
The ground truth binary labels for the time series data.
|
1081
|
-
y_pred (np.array):
|
1082
|
-
The predicted binary labels for the time series data.
|
1083
|
-
theta (float):
|
1084
|
-
Minimum fraction (0 \\leq :math:`{\\theta}` \\leq 1) of a true segment that must be overlapped
|
1085
|
-
by predictions to count as detected.
|
1086
|
-
|
1087
|
-
Returns:
|
1088
|
-
float: The time series aware recall score.
|
1089
|
-
"""
|
1090
|
-
validate_binary_inputs(y_true, y_pred)
|
1091
|
-
|
1092
|
-
if np.sum(y_pred) == 0:
|
1093
|
-
return 0
|
1094
|
-
m = eTaF(len(y_true),y_true,y_pred,theta_p=theta)
|
1095
|
-
return m.recall()
|
1096
|
-
|
1097
|
-
|
1098
|
-
|
1099
|
-
|
1100
|
-
def enhanced_ts_aware_precision(y_true: np.array, y_pred: np.array, theta: float):
|
1101
|
-
"""
|
1102
|
-
Calculate enhanced time series aware precision for anomaly detection in time series.
|
1103
|
-
|
1104
|
-
This metric is similar to the range-based precision. Additionally, it requires that a significant fraction :math:`{\\theta}`
|
1105
|
-
of each predicted segment overlaps with the ground truth. Finally, precision contributions from each event are weighted by
|
1106
|
-
the square root of the true segment’s length, providing a compromise between point-wise and segment-wise approaches.
|
1107
|
-
|
1108
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1109
|
-
|
1110
|
-
For more information, see the original paper:
|
1111
|
-
https://doi.org/10.1145/3477314.3507024
|
1112
|
-
|
1113
|
-
Parameters:
|
1114
|
-
y_true (np.array):
|
1115
|
-
The ground truth binary labels for the time series data.
|
1116
|
-
y_pred (np.array):
|
1117
|
-
The predicted binary labels for the time series data.
|
1118
|
-
theta (float):
|
1119
|
-
Minimum fraction (0 \\leq :math:`{\\theta}` \\leq 1) of a predicted segment that must be overlapped
|
1120
|
-
by ground truth to count as detected.
|
1121
|
-
|
1122
|
-
Returns:
|
1123
|
-
float: The time series aware precision score.
|
1124
|
-
"""
|
1125
|
-
validate_binary_inputs(y_true, y_pred)
|
1126
|
-
|
1127
|
-
if np.sum(y_pred) == 0:
|
1128
|
-
return 0
|
1129
|
-
m = eTaF(len(y_true),y_true,y_pred,theta_p=theta)
|
1130
|
-
return m.precision()
|
1131
|
-
|
1132
|
-
|
1133
|
-
|
1134
|
-
|
1135
|
-
|
1136
|
-
def enhanced_ts_aware_f_score(y_true: np.array, y_pred: np.array, theta_p: float, theta_r: float):
|
1137
|
-
"""
|
1138
|
-
Calculate enhanced time series aware F-score for anomaly detection in time series.
|
1139
|
-
|
1140
|
-
This metric is similar to the range-based F-score in that it accounts for both detection existence
|
1141
|
-
and overlap proportion. Additionally, it requires that a significant fraction :math:`{\\theta_r}` of each true anomaly
|
1142
|
-
segment be detected, and that a significant fraction :math:`{\\theta_p}` of each predicted segment overlaps with the
|
1143
|
-
ground truth. Finally, F-score contributions from each event are weighted by the square root of the
|
1144
|
-
true segment’s length, providing a compromise between point-wise and segment-wise approaches.
|
1145
|
-
|
1146
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1147
|
-
|
1148
|
-
For more information, see the original paper:
|
1149
|
-
https://doi.org/10.1145/3477314.3507024
|
1150
|
-
|
1151
|
-
Parameters:
|
1152
|
-
y_true (np.array):
|
1153
|
-
The ground truth binary labels for the time series data.
|
1154
|
-
y_pred (np.array):
|
1155
|
-
The predicted binary labels for the time series data.
|
1156
|
-
theta_p (float):
|
1157
|
-
Minimum fraction (0 \\leq :math:`{\\theta_p}` \\leq 1) of a predicted segment that must be overlapped
|
1158
|
-
by ground truth to count as detected.
|
1159
|
-
theta_r (float):
|
1160
|
-
Minimum fraction (0 \\leq :math:`{\\theta_r}` \\leq 1) of a true segment that must be overlapped
|
1161
|
-
by predictions to count as detected.
|
1162
|
-
|
1163
|
-
Returns:
|
1164
|
-
float: The time series aware F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
1165
|
-
"""
|
1166
|
-
validate_binary_inputs(y_true, y_pred)
|
1167
|
-
|
1168
|
-
if np.sum(y_pred) == 0:
|
1169
|
-
return 0
|
1170
|
-
m = eTaF(len(y_true),y_true,y_pred,theta_p=theta_p, theta_r=theta_r)
|
1171
|
-
return m.result['f1']
|
1172
|
-
|
1173
|
-
|
1174
|
-
|
1175
|
-
def affiliation_based_recall(y_true: np.array, y_pred: np.array):
|
1176
|
-
"""
|
1177
|
-
Calculate affiliation based recall for anomaly detection in time series.
|
1178
|
-
|
1179
|
-
This metric evaluates how well each labeled anomaly is affiliated with predicted points.
|
1180
|
-
It computes the average distance from each ground truth anomaly point to the nearest
|
1181
|
-
predicted anomaly point.
|
1182
|
-
|
1183
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1184
|
-
|
1185
|
-
For more information, see the original paper:
|
1186
|
-
https://dl.acm.org/doi/10.1145/3534678.3539339
|
1187
|
-
|
1188
|
-
Parameters:
|
1189
|
-
y_true (np.array):
|
1190
|
-
The ground truth binary labels for the time series data.
|
1191
|
-
y_pred (np.array):
|
1192
|
-
The predicted binary labels for the time series data.
|
1193
|
-
|
1194
|
-
Returns:
|
1195
|
-
float: The affiliation based recall score.
|
1196
|
-
"""
|
1197
|
-
validate_binary_inputs(y_true, y_pred)
|
1198
|
-
|
1199
|
-
if np.sum(y_pred) == 0:
|
1200
|
-
return 0
|
1201
|
-
m = Affiliation(len(y_true),y_true,y_pred)
|
1202
|
-
s = m.get_score()
|
1203
|
-
return m.r
|
1204
|
-
|
1205
|
-
|
1206
|
-
def affiliation_based_precision(y_true: np.array, y_pred: np.array):
|
1207
|
-
"""
|
1208
|
-
Calculate affiliation based F-score for anomaly detection in time series.
|
1209
|
-
|
1210
|
-
This metric evaluates how well each predicted anomaly is affiliated with labeled points.
|
1211
|
-
It computes the average distance from each predicted anomaly point to the nearest
|
1212
|
-
ground truth anomaly point.
|
1213
|
-
|
1214
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1215
|
-
|
1216
|
-
For more information, see the original paper:
|
1217
|
-
https://dl.acm.org/doi/10.1145/3534678.3539339
|
1218
|
-
|
1219
|
-
Parameters:
|
1220
|
-
y_true (np.array):
|
1221
|
-
The ground truth binary labels for the time series data.
|
1222
|
-
y_pred (np.array):
|
1223
|
-
The predicted binary labels for the time series data.
|
1224
|
-
|
1225
|
-
|
1226
|
-
Returns:
|
1227
|
-
float: The affiliation based precision score.
|
1228
|
-
"""
|
1229
|
-
validate_binary_inputs(y_true, y_pred)
|
1230
|
-
|
1231
|
-
if np.sum(y_pred) == 0:
|
1232
|
-
return 0
|
1233
|
-
m = Affiliation(len(y_true),y_true,y_pred)
|
1234
|
-
s = m.get_score()
|
1235
|
-
return m.p
|
1236
|
-
|
1237
|
-
|
1238
|
-
def affiliation_based_f_score(y_true: np.array, y_pred: np.array, beta=1):
|
1239
|
-
"""
|
1240
|
-
Calculate affiliation based F-score for anomaly detection in time series.
|
1241
|
-
|
1242
|
-
This metric combines the affiliation-based precision and recall into a single score
|
1243
|
-
using the harmonic mean, adjusted by a weight :math:`{\\beta}` to control the relative importance
|
1244
|
-
of recall versus precision. Since both precision and recall are distance-based,
|
1245
|
-
the F-score reflects a balance between how well predicted anomalies align with true
|
1246
|
-
anomalies and vice versa.
|
1247
|
-
|
1248
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1249
|
-
|
1250
|
-
For more information, see the original paper:
|
1251
|
-
https://dl.acm.org/doi/10.1145/3534678.3539339
|
1252
|
-
|
1253
|
-
Parameters:
|
1254
|
-
y_true (np.array):
|
1255
|
-
The ground truth binary labels for the time series data.
|
1256
|
-
y_pred (np.array):
|
1257
|
-
The predicted binary labels for the time series data.
|
1258
|
-
beta (float):
|
1259
|
-
The beta value, which determines the weight of precision in the combined score.
|
1260
|
-
|
1261
|
-
|
1262
|
-
Returns:
|
1263
|
-
float: The affiliation based F-score.
|
1264
|
-
"""
|
1265
|
-
validate_binary_inputs(y_true, y_pred)
|
1266
|
-
|
1267
|
-
if np.sum(y_pred) == 0:
|
1268
|
-
return 0
|
1269
|
-
m = Affiliation(len(y_true),y_true,y_pred)
|
1270
|
-
return m.get_score(beta)
|
1271
|
-
|
1272
|
-
|
1273
|
-
def nab_score(y_true: np.array, y_pred: np.array):
|
1274
|
-
"""
|
1275
|
-
Calculate NAB score for anomaly detection in time series.
|
1276
|
-
|
1277
|
-
This metric rewards early and accurate detections of anomalies while penalizing false positives.
|
1278
|
-
For each ground truth anomaly segment, only the first correctly predicted anomaly point contributes
|
1279
|
-
positively to the score, with earlier detections receiving higher rewards. In contrast, every false
|
1280
|
-
positive prediction contributes negatively.
|
1281
|
-
|
1282
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1283
|
-
|
1284
|
-
For more information, see the original paper:
|
1285
|
-
https://doi.org/10.1109/ICMLA.2015.141
|
1286
|
-
|
1287
|
-
Parameters:
|
1288
|
-
y_true (np.array):
|
1289
|
-
The ground truth binary labels for the time series data.
|
1290
|
-
y_pred (np.array):
|
1291
|
-
The predicted binary labels for the time series data.
|
1292
|
-
|
1293
|
-
|
1294
|
-
Returns:
|
1295
|
-
float: The nab score.
|
1296
|
-
"""
|
1297
|
-
validate_binary_inputs(y_true, y_pred)
|
1298
|
-
|
1299
|
-
m = NAB_score(len(y_true),y_true,y_pred)
|
1300
|
-
return m.get_score()
|
1301
|
-
|
1302
|
-
def temporal_distance(y_true: np.array, y_pred: np.array, distance: int = 0):
|
1303
|
-
"""
|
1304
|
-
Calculate temporal distane for anomaly detection in time series.
|
1305
|
-
|
1306
|
-
This metric computes the sum of the distances from each labelled anomaly point to
|
1307
|
-
the closest predicted anomaly point, and from each predicted anomaly point to the
|
1308
|
-
closest labelled anomaly point.
|
1309
|
-
|
1310
|
-
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1311
|
-
|
1312
|
-
For more information, see the original paper:
|
1313
|
-
https://sciendo.com/article/10.2478/ausi-2019-0008
|
1314
|
-
|
1315
|
-
Parameters:
|
1316
|
-
y_true (np.array):
|
1317
|
-
The ground truth binary labels for the time series data.
|
1318
|
-
y_pred (np.array):
|
1319
|
-
The predicted binary labels for the time series data.
|
1320
|
-
distance (int):
|
1321
|
-
The distance type parameter for the temporal distance calculation.
|
1322
|
-
0: Euclidean distance
|
1323
|
-
1: Squared Euclidean distance
|
1324
|
-
|
1325
|
-
|
1326
|
-
Returns:
|
1327
|
-
float: The temporal distance.
|
1328
|
-
"""
|
1329
|
-
validate_binary_inputs(y_true, y_pred)
|
1330
|
-
|
1331
|
-
m = Temporal_Distance(len(y_true),y_true,y_pred,distance=distance)
|
1332
|
-
return m.get_score()
|
1333
|
-
|
1334
|
-
def average_detection_count(y_true: np.array, y_pred: np.array):
|
1335
|
-
"""
|
1336
|
-
Calculate average detection count for anomaly detection in time series.
|
1337
|
-
|
1338
|
-
This metric computes, for each ground-truth anomalous segment, the percentage of points within that segment
|
1339
|
-
that are predicted as anomalous. It then averages these percentages across all true anomaly events,
|
1340
|
-
providing an estimate of detection coverage per event.
|
1341
|
-
|
1342
|
-
For more information, see the original paper:
|
1343
|
-
https://ceur-ws.org/Vol-1226/paper31.pdf
|
1344
|
-
|
1345
|
-
Parameters:
|
1346
|
-
y_true (np.array):
|
1347
|
-
The ground truth binary labels for the time series data.
|
1348
|
-
y_pred (np.array):
|
1349
|
-
The predicted binary labels for the time series data.
|
1350
|
-
|
1351
|
-
|
1352
|
-
Returns:
|
1353
|
-
float: The average detection count score.
|
1354
|
-
"""
|
1355
|
-
validate_binary_inputs(y_true, y_pred)
|
1356
|
-
|
1357
|
-
b = Binary_detection(len(y_true),y_true,y_pred)
|
1358
|
-
azs = b.get_gt_anomalies_segmentwise()
|
1359
|
-
a_points = b.get_predicted_anomalies_ptwise()
|
1360
|
-
|
1361
|
-
counts = []
|
1362
|
-
for az in azs:
|
1363
|
-
count = 0
|
1364
|
-
for ap in a_points:
|
1365
|
-
if ap >= az[0] and ap <= az[1]:
|
1366
|
-
count+=1
|
1367
|
-
counts.append(count/(az[1] - az[0] + 1)) # Normalize by segment length
|
1368
|
-
|
1369
|
-
return np.mean(counts)
|
1370
|
-
|
1371
|
-
def absolute_detection_distance(y_true: np.array, y_pred: np.array):
|
1372
|
-
"""
|
1373
|
-
Calculate absolute detection distance for anomaly detection in time series.
|
1374
|
-
|
1375
|
-
This metric computes, for each predicted anomaly point that overlaps a ground-truth anomaly segment,
|
1376
|
-
the relative distance from that point to the temporal center of the corresponding segment. It then sums all
|
1377
|
-
those distances and divides by the total number of such matching predicted points, yielding the
|
1378
|
-
mean distance to segment centers for correctly detected points.
|
1379
|
-
|
1380
|
-
For more information, see the original paper:
|
1381
|
-
https://ceur-ws.org/Vol-1226/paper31.pdf
|
1382
|
-
|
1383
|
-
Parameters:
|
1384
|
-
y_true (np.array):
|
1385
|
-
The ground truth binary labels for the time series data.
|
1386
|
-
y_pred (np.array):
|
1387
|
-
The predicted binary labels for the time series data.
|
1388
|
-
|
1389
|
-
|
1390
|
-
Returns:
|
1391
|
-
float: The absolute detection distance.
|
1392
|
-
"""
|
1393
|
-
validate_binary_inputs(y_true, y_pred)
|
1394
|
-
|
1395
|
-
b = Binary_detection(len(y_true),y_true,y_pred)
|
1396
|
-
azs = b.get_gt_anomalies_segmentwise()
|
1397
|
-
a_points = b.get_predicted_anomalies_ptwise()
|
1398
|
-
if len(a_points) == 0:
|
1399
|
-
return float('inf')
|
1400
|
-
distance = 0
|
1401
|
-
for az in azs:
|
1402
|
-
for ap in a_points:
|
1403
|
-
if ap >= az[0] and ap <= az[1]:
|
1404
|
-
center = int((az[0] + az[1]) / 2)
|
1405
|
-
distance+=abs(ap - center)/max(1,int((az[0] + az[1]) / 2))
|
1406
|
-
|
1407
|
-
return distance/len(a_points)
|
1408
|
-
|
1409
|
-
|
1410
|
-
def total_detected_in_range(y_true: np.array, y_pred: np.array, k: int):
|
1411
|
-
"""
|
1412
|
-
Calculate total detected in range for anomaly detection in time series.
|
1413
|
-
|
1414
|
-
This metric measures the proportion of true anomaly events that are correctly detected.
|
1415
|
-
It is defined as:
|
1416
|
-
|
1417
|
-
.. math::
|
1418
|
-
\\text{TDIR} = \\frac{EM + DA}{EM + DA + MA}
|
1419
|
-
|
1420
|
-
Where:
|
1421
|
-
|
1422
|
-
- EM (Exact Match):
|
1423
|
-
Number of predicted anomaly segments that exactly match a true anomaly segment.
|
1424
|
-
- DA (Detected Anomaly):
|
1425
|
-
Number of true anomaly points not exactly matched where at least one prediction falls
|
1426
|
-
within a window [i-k, i+k] around the true point index i or within the true segment range.
|
1427
|
-
- MA (Missed Anomaly):
|
1428
|
-
Number of true anomaly segments that do not overlap any predicted anomaly segment
|
1429
|
-
even within a k-step tolerance window around true points.
|
1430
|
-
|
1431
|
-
For more information, see the original paper:
|
1432
|
-
https://acta.sapientia.ro/content/docs/evaluation-metrics-for-anomaly-detection.pdf
|
1433
|
-
|
1434
|
-
Parameters:
|
1435
|
-
y_true (np.array):
|
1436
|
-
The ground truth binary labels for the time series data.
|
1437
|
-
y_pred (np.array):
|
1438
|
-
The predicted binary labels for the time series data.
|
1439
|
-
k (int):
|
1440
|
-
Half-window size for tolerance around each true anomaly point. A prediction within k
|
1441
|
-
time steps of a true point counts toward detection.
|
1442
|
-
|
1443
|
-
Returns:
|
1444
|
-
float: The total detected in range score.
|
1445
|
-
"""
|
1446
|
-
validate_binary_inputs(y_true, y_pred)
|
1447
|
-
|
1448
|
-
if np.sum(y_pred) == 0:
|
1449
|
-
return 0
|
1450
|
-
em,da,ma,_ = counting_method(y_true, y_pred, k)
|
1451
|
-
|
1452
|
-
|
1453
|
-
return (em + da)/(em + da + ma)
|
1454
|
-
|
1455
|
-
|
1456
|
-
def detection_accuracy_in_range(y_true: np.array, y_pred: np.array, k: int):
|
1457
|
-
"""
|
1458
|
-
Calculate detection accuracy in range for anomaly detection in time series.
|
1459
|
-
|
1460
|
-
This metric measures the proportion of predicted anomaly events that correspond to true anomalies.
|
1461
|
-
It is defined as:
|
1462
|
-
|
1463
|
-
.. math::
|
1464
|
-
\\text{DAIR} = \\frac{EM + DA}{EM + DA + FA}
|
1465
|
-
|
1466
|
-
Where:
|
1467
|
-
|
1468
|
-
- EM (Exact Match):
|
1469
|
-
Number of predicted anomaly segments that exactly match a true anomaly segment.
|
1470
|
-
- DA (Detected Anomaly):
|
1471
|
-
Number of true anomaly points not exactly matched where at least one prediction falls
|
1472
|
-
within a window [i-k, i+k] around the true point index i or within the true segment range.
|
1473
|
-
- FA (False Anomaly):
|
1474
|
-
Number of predicted anomaly segments that do not overlap any true anomaly segment
|
1475
|
-
even within a k-step tolerance window around true points.
|
1476
|
-
|
1477
|
-
For more information, see the original paper:
|
1478
|
-
https://acta.sapientia.ro/content/docs/evaluation-metrics-for-anomaly-detection.pdf
|
1479
|
-
|
1480
|
-
Parameters:
|
1481
|
-
y_true (np.array):
|
1482
|
-
The ground truth binary labels for the time series data.
|
1483
|
-
y_pred (np.array):
|
1484
|
-
The predicted binary labels for the time series data.
|
1485
|
-
k (int):
|
1486
|
-
Half-window size for tolerance around each true anomaly point. A prediction within k
|
1487
|
-
time steps of a true point counts toward detection.
|
1488
|
-
|
1489
|
-
Returns:
|
1490
|
-
float: The detection accuracy in range score.
|
1491
|
-
"""
|
1492
|
-
validate_binary_inputs(y_true, y_pred)
|
1493
|
-
|
1494
|
-
if np.sum(y_pred) == 0:
|
1495
|
-
return 0
|
1496
|
-
em,da,_,fa = counting_method(y_true, y_pred, k)
|
1497
|
-
|
1498
|
-
|
1499
|
-
return (em + da)/(em + da + fa)
|
1500
|
-
|
1501
|
-
|
1502
|
-
def weighted_detection_difference(y_true: np.array, y_pred: np.array, k: int):
|
1503
|
-
"""
|
1504
|
-
Calculate weighted detection difference for anomaly detection in time series.
|
1505
|
-
|
1506
|
-
For each true anomaly segment, each point in the segment is assigned a weight based on a
|
1507
|
-
Gaussian function centered at the segment’s midpoint: points closer to the center receive higher
|
1508
|
-
weights, which decay with distance according to the standard deviation sigma. These weights form
|
1509
|
-
the basis for scoring both correct detections and false alarms.
|
1510
|
-
|
1511
|
-
WS (Weighted Sum) is defined as the sum of Gaussian weights for all predicted anomaly points that
|
1512
|
-
fall within any true anomaly segment (extended by delta time steps at the ends).
|
1513
|
-
WF (False Alarm Weight) is the sum of Gaussian weights for all predicted anomaly points that do
|
1514
|
-
not overlap any true anomaly segment (within the same extension).
|
1515
|
-
|
1516
|
-
The final score is:
|
1517
|
-
|
1518
|
-
.. math::
|
1519
|
-
\\text{WDD} = \\text{WS} - \\text{WF} \\cdot \\text{FA}
|
1520
|
-
|
1521
|
-
Where:
|
1522
|
-
|
1523
|
-
- WS:
|
1524
|
-
Sum of Gaussian weights for all predicted anomaly points that fall
|
1525
|
-
within any true anomaly segment (extended by delta time steps at the ends).
|
1526
|
-
- WF:
|
1527
|
-
Sum of Gaussian weights for all predicted anomaly points that do not
|
1528
|
-
overlap any true anomaly segment (within the same extension).
|
1529
|
-
- FA (False Anomaly):
|
1530
|
-
Number of predicted anomaly segments that do not overlap any true anomaly segment
|
1531
|
-
even within a k-step tolerance window around true points.
|
1532
|
-
|
1533
|
-
For more information, see the original paper:
|
1534
|
-
https://acta.sapientia.ro/content/docs/evaluation-metrics-for-anomaly-detection.pdf
|
1535
|
-
|
1536
|
-
Parameters:
|
1537
|
-
y_true (np.array):
|
1538
|
-
The ground truth binary labels for the time series data.
|
1539
|
-
y_pred (np.array):
|
1540
|
-
The predicted binary labels for the time series data.
|
1541
|
-
k (int):
|
1542
|
-
The maximum number of time steps within which an anomaly must be predicted to be considered detected.
|
1543
|
-
|
1544
|
-
Returns:
|
1545
|
-
float: The weighted detection difference.
|
1546
|
-
"""
|
1547
|
-
validate_binary_inputs(y_true, y_pred)
|
1548
|
-
|
1549
|
-
if np.sum(y_pred) == 0:
|
1550
|
-
return 0
|
1551
|
-
|
1552
|
-
def gaussian(dt,tmax):
|
1553
|
-
if dt < tmax:
|
1554
|
-
return 1- dt/tmax
|
1555
|
-
else:
|
1556
|
-
return -1
|
1557
|
-
|
1558
|
-
tmax = len(y_true)
|
1559
|
-
|
1560
|
-
ones_indices = np.where(y_true == 1)[0]
|
1561
|
-
|
1562
|
-
y_modified = y_true.astype(float).copy()
|
1563
|
-
|
1564
|
-
for i in range(len(y_true)):
|
1565
|
-
if y_true[i] == 0:
|
1566
|
-
dt = np.min(np.abs(ones_indices - i)) if len(ones_indices) > 0 else tmax
|
1567
|
-
y_modified[i] = gaussian(dt, tmax)
|
1568
|
-
|
1569
|
-
ws = 0
|
1570
|
-
wf = 0
|
1571
|
-
for i in range(len(y_pred)):
|
1572
|
-
if y_pred[i] != 1:
|
1573
|
-
ws+=y_modified[i]
|
1574
|
-
else:
|
1575
|
-
wf+=y_modified[i]
|
1576
|
-
|
1577
|
-
_,_,_,fa = counting_method(y_true, y_pred, k)
|
1578
|
-
|
1579
|
-
|
1580
|
-
return ws - wf*fa
|
1581
|
-
|
1582
|
-
|
1583
|
-
def binary_pate(y_true: np.array, y_pred: np.array, early: int, delay: int):
|
1584
|
-
"""
|
1585
|
-
Calculate PATE score for anomaly detection in time series.
|
1586
|
-
|
1587
|
-
PATE evaluates predictions by assigning weighted scores based on temporal proximity
|
1588
|
-
to true anomaly intervals. It uses buffer zones around each true anomaly: an early buffer of length
|
1589
|
-
`early` preceding the interval and a delay buffer of length `delay` following it. Detections within
|
1590
|
-
the true interval receive full weight, while those in the early or delay buffers receive linearly
|
1591
|
-
decaying weights based on distance from the interval edges. Predictions outside these zones are
|
1592
|
-
treated as false positives, and missed intervals as false negatives. The final score balances these
|
1593
|
-
weighted detections into a single measure of performance.
|
1594
|
-
|
1595
|
-
Implementation of https://arxiv.org/abs/2405.12096
|
1596
|
-
|
1597
|
-
For more information, see the original paper:
|
1598
|
-
https://arxiv.org/abs/2405.12096
|
1599
|
-
|
1600
|
-
Parameters:
|
1601
|
-
y_true (np.array):
|
1602
|
-
The ground truth binary labels for the time series data.
|
1603
|
-
y_pred (np.array):
|
1604
|
-
The predicted binary labels for the time series data.
|
1605
|
-
early (int):
|
1606
|
-
The maximum number of time steps before an anomaly must be predicted to be considered early.
|
1607
|
-
delay (int):
|
1608
|
-
The maximum number of time steps after an anomaly must be predicted to be considered delayed.
|
1609
|
-
|
1610
|
-
Returns:
|
1611
|
-
float: The PATE score.
|
1612
|
-
"""
|
1613
|
-
validate_binary_inputs(y_true, y_pred)
|
1614
|
-
|
1615
|
-
return PATE(y_true, y_pred, early, delay, binary_scores=True)
|
1616
|
-
|
1617
|
-
def mean_time_to_detect(y_true: np.array, y_pred: np.array):
|
1618
|
-
"""
|
1619
|
-
Calculate mean time to detect for anomaly detection in time series.
|
1620
|
-
|
1621
|
-
This metric quantifies the average detection delay across all true anomaly events.
|
1622
|
-
For each ground-truth anomaly segment, let i be the index where the segment starts,
|
1623
|
-
and let :math:`{j \geq i}` be the first index within that segment where the model predicts an anomaly.
|
1624
|
-
The detection delay for that event is defined as:
|
1625
|
-
|
1626
|
-
.. math::
|
1627
|
-
\Delta t = j - i
|
1628
|
-
|
1629
|
-
The MTTD is the mean of all such :math:`{\Delta t}` values, one per true anomaly segment, and expresses
|
1630
|
-
the average number of time steps between the true onset of an anomaly and its first detection.
|
1631
|
-
|
1632
|
-
Parameters:
|
1633
|
-
y_true (np.array):
|
1634
|
-
The ground truth binary labels for the time series data.
|
1635
|
-
y_pred (np.array):
|
1636
|
-
The predicted binary labels for the time series data.
|
1637
|
-
|
1638
|
-
Returns:
|
1639
|
-
float: The mean time to detect.
|
1640
|
-
"""
|
1641
|
-
validate_binary_inputs(y_true, y_pred)
|
1642
|
-
|
1643
|
-
b = Binary_detection(len(y_true),y_true,y_pred)
|
1644
|
-
a_events = b.get_gt_anomalies_segmentwise()
|
1645
|
-
t_sum = 0
|
1646
|
-
for a,_ in a_events:
|
1647
|
-
for i in range(a,len(y_pred)):
|
1648
|
-
if y_pred[i] == 1:
|
1649
|
-
t_sum+=i-a
|
1650
|
-
break
|
1651
|
-
|
1652
|
-
return t_sum/len(a_events)
|