tsadmetrics 0.1.15__py3-none-any.whl → 0.1.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- docs_api/conf.py +90 -0
- docs_manual/conf.py +90 -0
- tests/test_binary.py +194 -7
- tests/test_non_binary.py +138 -59
- tests/test_utils.py +49 -0
- tsadmetrics/__init__.py +1 -1
- tsadmetrics/binary_metrics.py +214 -19
- tsadmetrics/metric_utils.py +3 -238
- tsadmetrics/non_binary_metrics.py +113 -110
- tsadmetrics/scripts/__init__.py +0 -0
- tsadmetrics/scripts/compute_metrics.py +42 -0
- tsadmetrics/utils.py +71 -2
- tsadmetrics/validation.py +35 -0
- tsadmetrics-0.1.17.dist-info/METADATA +54 -0
- {tsadmetrics-0.1.15.dist-info → tsadmetrics-0.1.17.dist-info}/RECORD +18 -11
- tsadmetrics-0.1.17.dist-info/entry_points.txt +2 -0
- {tsadmetrics-0.1.15.dist-info → tsadmetrics-0.1.17.dist-info}/top_level.txt +2 -0
- tsadmetrics-0.1.15.dist-info/METADATA +0 -23
- {tsadmetrics-0.1.15.dist-info → tsadmetrics-0.1.17.dist-info}/WHEEL +0 -0
tsadmetrics/binary_metrics.py
CHANGED
@@ -1,8 +1,6 @@
|
|
1
1
|
import numpy as np
|
2
2
|
from .metric_utils import *
|
3
|
-
from .
|
4
|
-
|
5
|
-
|
3
|
+
from .validation import *
|
6
4
|
from ._tsadeval.metrics import *
|
7
5
|
from ._tsadeval.prts.basic_metrics_ts import ts_fscore
|
8
6
|
from pate.PATE_metric import PATE
|
@@ -23,6 +21,8 @@ def point_wise_recall(y_true: np.array, y_pred: np.array):
|
|
23
21
|
Returns:
|
24
22
|
float: The point-wise recall score, which is the ratio of true positives to the sum of true positives and false negatives.
|
25
23
|
"""
|
24
|
+
validate_binary_inputs(y_true, y_pred)
|
25
|
+
|
26
26
|
m = Pointwise_metrics(len(y_true),y_true,y_pred)
|
27
27
|
m.set_confusion()
|
28
28
|
TP,FN = m.tp,m.fn
|
@@ -47,6 +47,8 @@ def point_wise_precision(y_true: np.array, y_pred: np.array):
|
|
47
47
|
Returns:
|
48
48
|
float: The point-wise precision score, which is the ratio of true positives to the sum of true positives and false positives.
|
49
49
|
"""
|
50
|
+
validate_binary_inputs(y_true, y_pred)
|
51
|
+
|
50
52
|
m = Pointwise_metrics(len(y_true),y_true,y_pred)
|
51
53
|
m.set_confusion()
|
52
54
|
TP,FP = m.tp,m.fp
|
@@ -74,6 +76,8 @@ def point_wise_f_score(y_true: np.array, y_pred: np.array, beta=1):
|
|
74
76
|
Returns:
|
75
77
|
float: The point-wise F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
76
78
|
"""
|
79
|
+
validate_binary_inputs(y_true, y_pred)
|
80
|
+
|
77
81
|
precision = point_wise_precision(y_true, y_pred)
|
78
82
|
recall = point_wise_recall(y_true, y_pred)
|
79
83
|
|
@@ -91,7 +95,10 @@ def point_adjusted_recall(y_true: np.array, y_pred: np.array):
|
|
91
95
|
are marked as correctly detected. The adjusted predictions are then compared to the ground-truth
|
92
96
|
labels using the standard point-wise recall formulation.
|
93
97
|
|
94
|
-
|
98
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
99
|
+
|
100
|
+
For more information, see the original paper:
|
101
|
+
https://doi.org/10.1145/3178876.3185996
|
95
102
|
|
96
103
|
Parameters:
|
97
104
|
y_true (np.array):
|
@@ -102,6 +109,8 @@ def point_adjusted_recall(y_true: np.array, y_pred: np.array):
|
|
102
109
|
Returns:
|
103
110
|
float: The point-adjusted recall score, which is the ratio of true positives to the sum of true positives and false negatives.
|
104
111
|
"""
|
112
|
+
validate_binary_inputs(y_true, y_pred)
|
113
|
+
|
105
114
|
if np.sum(y_pred) == 0:
|
106
115
|
return 0
|
107
116
|
m = PointAdjust(len(y_true),y_true,y_pred)
|
@@ -121,6 +130,9 @@ def point_adjusted_precision(y_true: np.array, y_pred: np.array):
|
|
121
130
|
|
122
131
|
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
123
132
|
|
133
|
+
For more information, see the original paper:
|
134
|
+
https://doi.org/10.1145/3178876.3185996
|
135
|
+
|
124
136
|
Parameters:
|
125
137
|
y_true (np.array):
|
126
138
|
The ground truth binary labels for the time series data.
|
@@ -130,6 +142,8 @@ def point_adjusted_precision(y_true: np.array, y_pred: np.array):
|
|
130
142
|
Returns:
|
131
143
|
float: The point-adjusted precision score, which is the ratio of true positives to the sum of true positives and false positives.
|
132
144
|
"""
|
145
|
+
validate_binary_inputs(y_true, y_pred)
|
146
|
+
|
133
147
|
if np.sum(y_pred) == 0:
|
134
148
|
return 0
|
135
149
|
m = PointAdjust(len(y_true),y_true,y_pred)
|
@@ -146,8 +160,12 @@ def point_adjusted_f_score(y_true: np.array, y_pred: np.array, beta=1):
|
|
146
160
|
if at least one point within that segment is predicted as anomalous, all points in the segment
|
147
161
|
are marked as correctly detected. The adjusted predictions are then compared to the ground-truth
|
148
162
|
labels using the standard point-wise F-Score formulation.
|
163
|
+
|
149
164
|
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
150
165
|
|
166
|
+
For more information, see the original paper:
|
167
|
+
https://doi.org/10.1145/3178876.3185996
|
168
|
+
|
151
169
|
Parameters:
|
152
170
|
y_true (np.array):
|
153
171
|
The ground truth binary labels for the time series data.
|
@@ -160,6 +178,8 @@ def point_adjusted_f_score(y_true: np.array, y_pred: np.array, beta=1):
|
|
160
178
|
Returns:
|
161
179
|
float: The point-adjusted F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
162
180
|
"""
|
181
|
+
validate_binary_inputs(y_true, y_pred)
|
182
|
+
|
163
183
|
precision = point_adjusted_precision(y_true, y_pred)
|
164
184
|
recall = point_adjusted_recall(y_true, y_pred)
|
165
185
|
|
@@ -181,6 +201,9 @@ def delay_th_point_adjusted_recall(y_true: np.array, y_pred: np.array, k: int):
|
|
181
201
|
|
182
202
|
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
183
203
|
|
204
|
+
For more information, see the original paper:
|
205
|
+
https://doi.org/10.1145/3292500.3330680
|
206
|
+
|
184
207
|
Parameters:
|
185
208
|
y_true (np.array):
|
186
209
|
The ground truth binary labels for the time series data.
|
@@ -193,6 +216,8 @@ def delay_th_point_adjusted_recall(y_true: np.array, y_pred: np.array, k: int):
|
|
193
216
|
Returns:
|
194
217
|
float: The delay thresholded point-adjusted recall score, which is the ratio of true positives to the sum of true positives and false negatives.
|
195
218
|
"""
|
219
|
+
validate_binary_inputs(y_true, y_pred)
|
220
|
+
|
196
221
|
if np.sum(y_pred) == 0:
|
197
222
|
return 0
|
198
223
|
m = DelayThresholdedPointAdjust(len(y_true),y_true,y_pred,k=k)
|
@@ -212,6 +237,9 @@ def delay_th_point_adjusted_precision(y_true: np.array, y_pred: np.array, k: int
|
|
212
237
|
|
213
238
|
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
214
239
|
|
240
|
+
For more information, see the original paper:
|
241
|
+
https://doi.org/10.1145/3292500.3330680
|
242
|
+
|
215
243
|
Parameters:
|
216
244
|
y_true (np.array):
|
217
245
|
The ground truth binary labels for the time series data.
|
@@ -224,6 +252,8 @@ def delay_th_point_adjusted_precision(y_true: np.array, y_pred: np.array, k: int
|
|
224
252
|
Returns:
|
225
253
|
float: The delay thresholded point-adjusted precision score, which is the ratio of true positives to the sum of true positives and false positives.
|
226
254
|
"""
|
255
|
+
validate_binary_inputs(y_true, y_pred)
|
256
|
+
|
227
257
|
if np.sum(y_pred) == 0:
|
228
258
|
return 0
|
229
259
|
m = DelayThresholdedPointAdjust(len(y_true),y_true,y_pred,k=k)
|
@@ -243,6 +273,9 @@ def delay_th_point_adjusted_f_score(y_true: np.array, y_pred: np.array, k: int,
|
|
243
273
|
|
244
274
|
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
245
275
|
|
276
|
+
For more information, see the original paper:
|
277
|
+
https://doi.org/10.1145/3292500.3330680
|
278
|
+
|
246
279
|
Parameters:
|
247
280
|
y_true (np.array):
|
248
281
|
The ground truth binary labels for the time series data.
|
@@ -257,6 +290,8 @@ def delay_th_point_adjusted_f_score(y_true: np.array, y_pred: np.array, k: int,
|
|
257
290
|
Returns:
|
258
291
|
float: The delay thresholded point-adjusted F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
259
292
|
"""
|
293
|
+
validate_binary_inputs(y_true, y_pred)
|
294
|
+
|
260
295
|
precision = delay_th_point_adjusted_precision(y_true, y_pred, k)
|
261
296
|
recall = delay_th_point_adjusted_recall(y_true, y_pred, k)
|
262
297
|
|
@@ -277,6 +312,9 @@ def point_adjusted_at_k_recall(y_true: np.array, y_pred: np.array, k: float):
|
|
277
312
|
|
278
313
|
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
279
314
|
|
315
|
+
For more information, see the original paper:
|
316
|
+
https://ojs.aaai.org/index.php/AAAI/article/view/20680
|
317
|
+
|
280
318
|
Parameters:
|
281
319
|
y_true (np.array):
|
282
320
|
The ground truth binary labels for the time series data.
|
@@ -288,6 +326,8 @@ def point_adjusted_at_k_recall(y_true: np.array, y_pred: np.array, k: float):
|
|
288
326
|
Returns:
|
289
327
|
float: The point-adjusted recall score, which is the ratio of true positives to the sum of true positives and false negatives.
|
290
328
|
"""
|
329
|
+
validate_binary_inputs(y_true, y_pred)
|
330
|
+
|
291
331
|
m = PointAdjustKPercent(len(y_true),y_true,y_pred,k=k)
|
292
332
|
TP,FN = m.tp,m.fn
|
293
333
|
if TP == 0:
|
@@ -302,7 +342,11 @@ def point_adjusted_at_k_precision(y_true: np.array, y_pred: np.array, k: float):
|
|
302
342
|
if at least K% of the points within that segment are predicted as anomalous, all points in
|
303
343
|
the segment are marked as correctly detected. The adjusted predictions are then used
|
304
344
|
to compute the standard point-wise precision.
|
345
|
+
|
305
346
|
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
347
|
+
|
348
|
+
For more information, see the original paper:
|
349
|
+
https://ojs.aaai.org/index.php/AAAI/article/view/20680
|
306
350
|
|
307
351
|
Parameters:
|
308
352
|
y_true (np.array):
|
@@ -315,6 +359,8 @@ def point_adjusted_at_k_precision(y_true: np.array, y_pred: np.array, k: float):
|
|
315
359
|
Returns:
|
316
360
|
float: The point-adjusted precision score, which is the ratio of true positives to the sum of true positives and false positives.
|
317
361
|
"""
|
362
|
+
validate_binary_inputs(y_true, y_pred)
|
363
|
+
|
318
364
|
m = PointAdjustKPercent(len(y_true),y_true,y_pred,k=k)
|
319
365
|
TP,FP = m.tp,m.fp
|
320
366
|
if TP == 0:
|
@@ -331,6 +377,9 @@ def point_adjusted_at_k_f_score(y_true: np.array, y_pred: np.array, k: float, be
|
|
331
377
|
to compute the standard F-Score precision.
|
332
378
|
|
333
379
|
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
380
|
+
|
381
|
+
For more information, see the original paper:
|
382
|
+
https://ojs.aaai.org/index.php/AAAI/article/view/20680
|
334
383
|
|
335
384
|
Parameters:
|
336
385
|
y_true (np.array):
|
@@ -346,6 +395,8 @@ def point_adjusted_at_k_f_score(y_true: np.array, y_pred: np.array, k: float, be
|
|
346
395
|
Returns:
|
347
396
|
float: The point-adjusted F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
348
397
|
"""
|
398
|
+
validate_binary_inputs(y_true, y_pred)
|
399
|
+
|
349
400
|
precision = point_adjusted_at_k_precision(y_true, y_pred, k)
|
350
401
|
recall = point_adjusted_at_k_recall(y_true, y_pred, k)
|
351
402
|
|
@@ -369,6 +420,9 @@ def latency_sparsity_aw_recall(y_true: np.array, y_pred: np.array, ni: int):
|
|
369
420
|
|
370
421
|
Implementation of https://dl.acm.org/doi/10.1145/3447548.3467174
|
371
422
|
|
423
|
+
For more information, see the original paper:
|
424
|
+
https://doi.org/10.1145/3447548.3467174
|
425
|
+
|
372
426
|
Parameters:
|
373
427
|
y_true (np.array):
|
374
428
|
The ground truth binary labels for the time series data.
|
@@ -380,6 +434,8 @@ def latency_sparsity_aw_recall(y_true: np.array, y_pred: np.array, ni: int):
|
|
380
434
|
Returns:
|
381
435
|
float: The latency and sparsity aware recall score, which is the ratio of true positives to the sum of true positives and false negatives.
|
382
436
|
"""
|
437
|
+
validate_binary_inputs(y_true, y_pred)
|
438
|
+
|
383
439
|
if np.sum(y_pred) == 0:
|
384
440
|
return 0
|
385
441
|
m = LatencySparsityAware(len(y_true),y_true,y_pred,tw=ni)
|
@@ -402,6 +458,9 @@ def latency_sparsity_aw_precision(y_true: np.array, y_pred: np.array, ni: int):
|
|
402
458
|
|
403
459
|
Implementation of https://dl.acm.org/doi/10.1145/3447548.3467174
|
404
460
|
|
461
|
+
For more information, see the original paper:
|
462
|
+
https://doi.org/10.1145/3447548.3467174
|
463
|
+
|
405
464
|
Parameters:
|
406
465
|
y_true (np.array):
|
407
466
|
The ground truth binary labels for the time series data.
|
@@ -413,6 +472,8 @@ def latency_sparsity_aw_precision(y_true: np.array, y_pred: np.array, ni: int):
|
|
413
472
|
Returns:
|
414
473
|
float: The latency and sparsity aware precision score, which is the ratio of true positives to the sum of true positives and false positives.
|
415
474
|
"""
|
475
|
+
validate_binary_inputs(y_true, y_pred)
|
476
|
+
|
416
477
|
if np.sum(y_pred) == 0:
|
417
478
|
return 0
|
418
479
|
m = LatencySparsityAware(len(y_true),y_true,y_pred,tw=ni)
|
@@ -435,6 +496,9 @@ def latency_sparsity_aw_f_score(y_true: np.array, y_pred: np.array, ni: int, bet
|
|
435
496
|
|
436
497
|
Implementation of https://dl.acm.org/doi/10.1145/3447548.3467174
|
437
498
|
|
499
|
+
For more information, see the original paper:
|
500
|
+
https://doi.org/10.1145/3447548.3467174
|
501
|
+
|
438
502
|
Parameters:
|
439
503
|
y_true (np.array):
|
440
504
|
The ground truth binary labels for the time series data.
|
@@ -449,6 +513,8 @@ def latency_sparsity_aw_f_score(y_true: np.array, y_pred: np.array, ni: int, bet
|
|
449
513
|
Returns:
|
450
514
|
float: The latency and sparsity aware F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
451
515
|
"""
|
516
|
+
validate_binary_inputs(y_true, y_pred)
|
517
|
+
|
452
518
|
if np.sum(y_pred) == 0:
|
453
519
|
return 0
|
454
520
|
|
@@ -470,7 +536,10 @@ def segment_wise_recall(y_true: np.array, y_pred: np.array):
|
|
470
536
|
overlap with any ground-truth anomaly. The final recall is computed using these adjusted
|
471
537
|
segment-level counts.
|
472
538
|
|
473
|
-
Implementation of https://
|
539
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
540
|
+
|
541
|
+
For more information, see the original paper:
|
542
|
+
https://doi.org/10.1145/3219819.3219845
|
474
543
|
|
475
544
|
Parameters:
|
476
545
|
y_true (np.array):
|
@@ -481,6 +550,8 @@ def segment_wise_recall(y_true: np.array, y_pred: np.array):
|
|
481
550
|
Returns:
|
482
551
|
float: The segment-wise recall score, which is the ratio of true positives to the sum of true positives and false negatives.
|
483
552
|
"""
|
553
|
+
validate_binary_inputs(y_true, y_pred)
|
554
|
+
|
484
555
|
m = Segmentwise_metrics(len(y_true),y_true,y_pred)
|
485
556
|
TP,FN = m.tp,m.fn
|
486
557
|
if TP == 0:
|
@@ -498,7 +569,10 @@ def segment_wise_precision(y_true: np.array, y_pred: np.array):
|
|
498
569
|
overlap with any ground-truth anomaly. The final precision is computed using these adjusted
|
499
570
|
segment-level counts.
|
500
571
|
|
501
|
-
Implementation of https://
|
572
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
573
|
+
|
574
|
+
For more information, see the original paper:
|
575
|
+
https://doi.org/10.1145/3219819.3219845
|
502
576
|
|
503
577
|
Parameters:
|
504
578
|
y_true (np.array):
|
@@ -509,6 +583,8 @@ def segment_wise_precision(y_true: np.array, y_pred: np.array):
|
|
509
583
|
Returns:
|
510
584
|
float: The segment-wise precision score, which is the ratio of true positives to the sum of true positives and false positives.
|
511
585
|
"""
|
586
|
+
validate_binary_inputs(y_true, y_pred)
|
587
|
+
|
512
588
|
m = Segmentwise_metrics(len(y_true),y_true,y_pred)
|
513
589
|
TP,FP = m.tp,m.fp
|
514
590
|
if TP == 0:
|
@@ -526,7 +602,10 @@ def segment_wise_f_score(y_true: np.array, y_pred: np.array, beta=1):
|
|
526
602
|
overlap with any ground-truth anomaly. The final F-score is computed using these adjusted
|
527
603
|
segment-level counts.
|
528
604
|
|
529
|
-
Implementation of https://
|
605
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
606
|
+
|
607
|
+
For more information, see the original paper:
|
608
|
+
https://doi.org/10.1145/3219819.3219845
|
530
609
|
|
531
610
|
Parameters:
|
532
611
|
y_true (np.array):
|
@@ -541,6 +620,8 @@ def segment_wise_f_score(y_true: np.array, y_pred: np.array, beta=1):
|
|
541
620
|
float: The segment-wise F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
542
621
|
|
543
622
|
"""
|
623
|
+
validate_binary_inputs(y_true, y_pred)
|
624
|
+
|
544
625
|
m = Segmentwise_metrics(len(y_true),y_true,y_pred)
|
545
626
|
TP,FN,FP = m.tp,m.fn,m.fp
|
546
627
|
if TP==0:
|
@@ -563,6 +644,9 @@ def composite_f_score(y_true: np.array, y_pred: np.array, beta=1):
|
|
563
644
|
|
564
645
|
Implementation of https://ieeexplore.ieee.org/document/9525836
|
565
646
|
|
647
|
+
For more information, see the original paper:
|
648
|
+
https://doi.org/10.1109/TNNLS.2021.3105827
|
649
|
+
|
566
650
|
Parameters:
|
567
651
|
y_true (np.array):
|
568
652
|
The ground truth binary labels for the time series data.
|
@@ -576,6 +660,8 @@ def composite_f_score(y_true: np.array, y_pred: np.array, beta=1):
|
|
576
660
|
float: The composite F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
577
661
|
|
578
662
|
"""
|
663
|
+
validate_binary_inputs(y_true, y_pred)
|
664
|
+
|
579
665
|
m = Composite_f(len(y_true),y_true,y_pred)
|
580
666
|
#Point wise precision
|
581
667
|
precision = m.precision()
|
@@ -597,7 +683,10 @@ def time_tolerant_recall(y_true: np.array, y_pred: np.array, t: int) -> float:
|
|
597
683
|
This allows for small temporal deviations in the predictions to be tolerated. The adjusted predictions are then used
|
598
684
|
to compute the standard point-wise recall.
|
599
685
|
|
600
|
-
Implementation of https://
|
686
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
687
|
+
|
688
|
+
For more information, see the original paper:
|
689
|
+
10.48550/arXiv.2008.05788
|
601
690
|
|
602
691
|
Parameters:
|
603
692
|
y_true (np.array):
|
@@ -610,6 +699,8 @@ def time_tolerant_recall(y_true: np.array, y_pred: np.array, t: int) -> float:
|
|
610
699
|
Returns:
|
611
700
|
float: The time tolerant recall score, which is the ratio of true positives to the sum of true positives and false negatives.
|
612
701
|
"""
|
702
|
+
validate_binary_inputs(y_true, y_pred)
|
703
|
+
|
613
704
|
if np.sum(y_pred) == 0:
|
614
705
|
return 0
|
615
706
|
|
@@ -625,7 +716,10 @@ def time_tolerant_precision(y_true: np.array, y_pred: np.array, t: int) -> float
|
|
625
716
|
This allows for small temporal deviations in the predictions to be tolerated. The adjusted predictions are then used
|
626
717
|
to compute the standard point-wise precision.
|
627
718
|
|
628
|
-
Implementation of https://
|
719
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
720
|
+
|
721
|
+
For more information, see the original paper:
|
722
|
+
10.48550/arXiv.2008.05788
|
629
723
|
|
630
724
|
Parameters:
|
631
725
|
y_true (np.array):
|
@@ -638,6 +732,8 @@ def time_tolerant_precision(y_true: np.array, y_pred: np.array, t: int) -> float
|
|
638
732
|
Returns:
|
639
733
|
float: The time tolerant precision score, which is the ratio of true positives to the sum of true positives and false positives.
|
640
734
|
"""
|
735
|
+
validate_binary_inputs(y_true, y_pred)
|
736
|
+
|
641
737
|
if np.sum(y_pred) == 0:
|
642
738
|
return 0
|
643
739
|
m = Time_Tolerant(len(y_true),y_true,y_pred, d=t)
|
@@ -653,7 +749,10 @@ def time_tolerant_f_score(y_true: np.array, y_pred: np.array, t: int, beta=1):
|
|
653
749
|
This allows for small temporal deviations in the predictions to be tolerated.The adjusted predictions are then used
|
654
750
|
to compute the standard point-wise F-Score.
|
655
751
|
|
656
|
-
Implementation of https://
|
752
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
753
|
+
|
754
|
+
For more information, see the original paper:
|
755
|
+
10.48550/arXiv.2008.05788
|
657
756
|
|
658
757
|
Parameters:
|
659
758
|
y_true (np.array):
|
@@ -670,6 +769,8 @@ def time_tolerant_f_score(y_true: np.array, y_pred: np.array, t: int, beta=1):
|
|
670
769
|
float: The time tolerant F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
671
770
|
|
672
771
|
"""
|
772
|
+
validate_binary_inputs(y_true, y_pred)
|
773
|
+
|
673
774
|
precision = time_tolerant_precision(y_true,y_pred,t)
|
674
775
|
recall = time_tolerant_recall(y_true,y_pred,t)
|
675
776
|
if precision==0 or recall==0:
|
@@ -688,6 +789,8 @@ def range_based_recall(y_true: np.array, y_pred: np.array, alpha: float, bias='f
|
|
688
789
|
weighted by :math:`{\\alpha}` (existence vs. overlap) and further shaped by customizable bias functions
|
689
790
|
for positional and cardinality factors.
|
690
791
|
|
792
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
793
|
+
|
691
794
|
For more information, see the original paper:
|
692
795
|
https://proceedings.neurips.cc/paper_files/paper/2018/file/8f468c873a32bb0619eaeb2050ba45d1-Paper.pdf
|
693
796
|
|
@@ -706,6 +809,8 @@ def range_based_recall(y_true: np.array, y_pred: np.array, alpha: float, bias='f
|
|
706
809
|
Returns:
|
707
810
|
float: The range-based recall score.
|
708
811
|
"""
|
812
|
+
validate_binary_inputs(y_true, y_pred)
|
813
|
+
|
709
814
|
if np.sum(y_pred) == 0:
|
710
815
|
return 0
|
711
816
|
m = Range_PR(len(y_true),y_true,y_pred,cardinality=cardinality_mode, alpha=alpha,bias=bias)
|
@@ -724,6 +829,8 @@ def range_based_precision(y_true: np.array, y_pred: np.array, alpha: float, bias
|
|
724
829
|
These components are weighted by :math:`{\\alpha}` (existence vs. overlap) and further shaped by customizable bias functions
|
725
830
|
for positional and cardinality factors.
|
726
831
|
|
832
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
833
|
+
|
727
834
|
For more information, see the original paper:
|
728
835
|
https://proceedings.neurips.cc/paper_files/paper/2018/file/8f468c873a32bb0619eaeb2050ba45d1-Paper.pdf
|
729
836
|
|
@@ -742,6 +849,8 @@ def range_based_precision(y_true: np.array, y_pred: np.array, alpha: float, bias
|
|
742
849
|
Returns:
|
743
850
|
float: The range-based precision score.
|
744
851
|
"""
|
852
|
+
validate_binary_inputs(y_true, y_pred)
|
853
|
+
|
745
854
|
if np.sum(y_pred) == 0:
|
746
855
|
return 0
|
747
856
|
m = Range_PR(len(y_true),y_true,y_pred,cardinality=cardinality_mode, alpha=alpha,bias=bias)
|
@@ -761,6 +870,8 @@ def range_based_f_score(y_true: np.array, y_pred: np.array, p_alpha: float, r_al
|
|
761
870
|
weighting, positional bias, and cardinality factors—allowing fine-grained control over how
|
762
871
|
both missed detections and false alarms are penalized in a temporal context.
|
763
872
|
|
873
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
874
|
+
|
764
875
|
For more information, see the original paper:
|
765
876
|
https://proceedings.neurips.cc/paper_files/paper/2018/file/8f468c873a32bb0619eaeb2050ba45d1-Paper.pdf
|
766
877
|
|
@@ -787,6 +898,8 @@ def range_based_f_score(y_true: np.array, y_pred: np.array, p_alpha: float, r_al
|
|
787
898
|
Returns:
|
788
899
|
float: The range-based F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
789
900
|
"""
|
901
|
+
validate_binary_inputs(y_true, y_pred)
|
902
|
+
|
790
903
|
if np.sum(y_pred) == 0:
|
791
904
|
return 0
|
792
905
|
f = ts_fscore(y_true, y_pred, beta=beta, p_alpha=p_alpha, r_alpha=r_alpha, cardinality=cardinality_mode, p_bias=p_bias, r_bias=r_bias)
|
@@ -806,6 +919,11 @@ def ts_aware_recall(y_true: np.array, y_pred: np.array, alpha: float, delta: flo
|
|
806
919
|
Unlike the original range-based formulation, this variant omits cardinality and positional bias terms,
|
807
920
|
focusing solely on overlap fraction and end‑tolerance decay.
|
808
921
|
|
922
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
923
|
+
|
924
|
+
For more information, see the original paper:
|
925
|
+
https://doi.org/10.1145/3357384.3358118
|
926
|
+
|
809
927
|
Parameters:
|
810
928
|
y_true (np.array):
|
811
929
|
The ground truth binary labels for the time series data.
|
@@ -830,6 +948,8 @@ def ts_aware_recall(y_true: np.array, y_pred: np.array, alpha: float, delta: flo
|
|
830
948
|
Returns:
|
831
949
|
float: The time series aware recall score.
|
832
950
|
"""
|
951
|
+
validate_binary_inputs(y_true, y_pred)
|
952
|
+
|
833
953
|
m = TaF(len(y_true),y_true,y_pred,alpha=alpha,theta=theta,delta=delta,past_range=past_range)
|
834
954
|
return m.recall()
|
835
955
|
|
@@ -847,6 +967,11 @@ def ts_aware_precision(y_true: np.array, y_pred: np.array,alpha: float, delta: f
|
|
847
967
|
Unlike the original range-based formulation, this variant omits cardinality and positional bias terms,
|
848
968
|
focusing solely on overlap fraction and end‑tolerance decay.
|
849
969
|
|
970
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
971
|
+
|
972
|
+
For more information, see the original paper:
|
973
|
+
https://doi.org/10.1145/3357384.3358118
|
974
|
+
|
850
975
|
Parameters:
|
851
976
|
y_true (np.array):
|
852
977
|
The ground truth binary labels for the time series data.
|
@@ -871,6 +996,8 @@ def ts_aware_precision(y_true: np.array, y_pred: np.array,alpha: float, delta: f
|
|
871
996
|
Returns:
|
872
997
|
float: The time series aware precision score.
|
873
998
|
"""
|
999
|
+
validate_binary_inputs(y_true, y_pred)
|
1000
|
+
|
874
1001
|
m = TaF(len(y_true),y_true,y_pred,alpha=alpha,theta=theta,delta=delta,past_range=past_range)
|
875
1002
|
return m.precision()
|
876
1003
|
|
@@ -889,6 +1016,11 @@ def ts_aware_f_score(y_true: np.array, y_pred: np.array, beta: float, alpha: flo
|
|
889
1016
|
Unlike the original range-based formulation, this variant omits cardinality and positional bias terms,
|
890
1017
|
focusing solely on overlap fraction and end‑tolerance decay.
|
891
1018
|
|
1019
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1020
|
+
|
1021
|
+
For more information, see the original paper:
|
1022
|
+
https://doi.org/10.1145/3357384.3358118
|
1023
|
+
|
892
1024
|
Parameters:
|
893
1025
|
y_true (np.array):
|
894
1026
|
The ground truth binary labels for the time series data.
|
@@ -916,7 +1048,8 @@ def ts_aware_f_score(y_true: np.array, y_pred: np.array, beta: float, alpha: flo
|
|
916
1048
|
Returns:
|
917
1049
|
float: The time series aware F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
918
1050
|
"""
|
919
|
-
|
1051
|
+
validate_binary_inputs(y_true, y_pred)
|
1052
|
+
|
920
1053
|
m = TaF(len(y_true),y_true,y_pred,alpha=alpha,theta=theta,delta=delta,past_range=past_range)
|
921
1054
|
precision = m.precision()
|
922
1055
|
recall = m.recall()
|
@@ -937,6 +1070,11 @@ def enhanced_ts_aware_recall(y_true: np.array, y_pred: np.array, theta: float):
|
|
937
1070
|
and overlap proportion. Additionally, it requires that a significant fraction :math:`{\\theta}` of each true anomaly
|
938
1071
|
segment be detected.
|
939
1072
|
|
1073
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1074
|
+
|
1075
|
+
For more information, see the original paper:
|
1076
|
+
https://doi.org/10.1145/3477314.3507024
|
1077
|
+
|
940
1078
|
Parameters:
|
941
1079
|
y_true (np.array):
|
942
1080
|
The ground truth binary labels for the time series data.
|
@@ -949,6 +1087,8 @@ def enhanced_ts_aware_recall(y_true: np.array, y_pred: np.array, theta: float):
|
|
949
1087
|
Returns:
|
950
1088
|
float: The time series aware recall score.
|
951
1089
|
"""
|
1090
|
+
validate_binary_inputs(y_true, y_pred)
|
1091
|
+
|
952
1092
|
if np.sum(y_pred) == 0:
|
953
1093
|
return 0
|
954
1094
|
m = eTaF(len(y_true),y_true,y_pred,theta_p=theta)
|
@@ -965,6 +1105,11 @@ def enhanced_ts_aware_precision(y_true: np.array, y_pred: np.array, theta: float
|
|
965
1105
|
of each predicted segment overlaps with the ground truth. Finally, precision contributions from each event are weighted by
|
966
1106
|
the square root of the true segment’s length, providing a compromise between point-wise and segment-wise approaches.
|
967
1107
|
|
1108
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1109
|
+
|
1110
|
+
For more information, see the original paper:
|
1111
|
+
https://doi.org/10.1145/3477314.3507024
|
1112
|
+
|
968
1113
|
Parameters:
|
969
1114
|
y_true (np.array):
|
970
1115
|
The ground truth binary labels for the time series data.
|
@@ -977,6 +1122,8 @@ def enhanced_ts_aware_precision(y_true: np.array, y_pred: np.array, theta: float
|
|
977
1122
|
Returns:
|
978
1123
|
float: The time series aware precision score.
|
979
1124
|
"""
|
1125
|
+
validate_binary_inputs(y_true, y_pred)
|
1126
|
+
|
980
1127
|
if np.sum(y_pred) == 0:
|
981
1128
|
return 0
|
982
1129
|
m = eTaF(len(y_true),y_true,y_pred,theta_p=theta)
|
@@ -996,6 +1143,11 @@ def enhanced_ts_aware_f_score(y_true: np.array, y_pred: np.array, theta_p: float
|
|
996
1143
|
ground truth. Finally, F-score contributions from each event are weighted by the square root of the
|
997
1144
|
true segment’s length, providing a compromise between point-wise and segment-wise approaches.
|
998
1145
|
|
1146
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1147
|
+
|
1148
|
+
For more information, see the original paper:
|
1149
|
+
https://doi.org/10.1145/3477314.3507024
|
1150
|
+
|
999
1151
|
Parameters:
|
1000
1152
|
y_true (np.array):
|
1001
1153
|
The ground truth binary labels for the time series data.
|
@@ -1011,6 +1163,8 @@ def enhanced_ts_aware_f_score(y_true: np.array, y_pred: np.array, theta_p: float
|
|
1011
1163
|
Returns:
|
1012
1164
|
float: The time series aware F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
|
1013
1165
|
"""
|
1166
|
+
validate_binary_inputs(y_true, y_pred)
|
1167
|
+
|
1014
1168
|
if np.sum(y_pred) == 0:
|
1015
1169
|
return 0
|
1016
1170
|
m = eTaF(len(y_true),y_true,y_pred,theta_p=theta_p, theta_r=theta_r)
|
@@ -1026,6 +1180,11 @@ def affiliation_based_recall(y_true: np.array, y_pred: np.array):
|
|
1026
1180
|
It computes the average distance from each ground truth anomaly point to the nearest
|
1027
1181
|
predicted anomaly point.
|
1028
1182
|
|
1183
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1184
|
+
|
1185
|
+
For more information, see the original paper:
|
1186
|
+
https://dl.acm.org/doi/10.1145/3534678.3539339
|
1187
|
+
|
1029
1188
|
Parameters:
|
1030
1189
|
y_true (np.array):
|
1031
1190
|
The ground truth binary labels for the time series data.
|
@@ -1035,6 +1194,8 @@ def affiliation_based_recall(y_true: np.array, y_pred: np.array):
|
|
1035
1194
|
Returns:
|
1036
1195
|
float: The affiliation based recall score.
|
1037
1196
|
"""
|
1197
|
+
validate_binary_inputs(y_true, y_pred)
|
1198
|
+
|
1038
1199
|
if np.sum(y_pred) == 0:
|
1039
1200
|
return 0
|
1040
1201
|
m = Affiliation(len(y_true),y_true,y_pred)
|
@@ -1050,6 +1211,11 @@ def affiliation_based_precision(y_true: np.array, y_pred: np.array):
|
|
1050
1211
|
It computes the average distance from each predicted anomaly point to the nearest
|
1051
1212
|
ground truth anomaly point.
|
1052
1213
|
|
1214
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1215
|
+
|
1216
|
+
For more information, see the original paper:
|
1217
|
+
https://dl.acm.org/doi/10.1145/3534678.3539339
|
1218
|
+
|
1053
1219
|
Parameters:
|
1054
1220
|
y_true (np.array):
|
1055
1221
|
The ground truth binary labels for the time series data.
|
@@ -1060,6 +1226,8 @@ def affiliation_based_precision(y_true: np.array, y_pred: np.array):
|
|
1060
1226
|
Returns:
|
1061
1227
|
float: The affiliation based precision score.
|
1062
1228
|
"""
|
1229
|
+
validate_binary_inputs(y_true, y_pred)
|
1230
|
+
|
1063
1231
|
if np.sum(y_pred) == 0:
|
1064
1232
|
return 0
|
1065
1233
|
m = Affiliation(len(y_true),y_true,y_pred)
|
@@ -1077,6 +1245,11 @@ def affiliation_based_f_score(y_true: np.array, y_pred: np.array, beta=1):
|
|
1077
1245
|
the F-score reflects a balance between how well predicted anomalies align with true
|
1078
1246
|
anomalies and vice versa.
|
1079
1247
|
|
1248
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1249
|
+
|
1250
|
+
For more information, see the original paper:
|
1251
|
+
https://dl.acm.org/doi/10.1145/3534678.3539339
|
1252
|
+
|
1080
1253
|
Parameters:
|
1081
1254
|
y_true (np.array):
|
1082
1255
|
The ground truth binary labels for the time series data.
|
@@ -1089,6 +1262,8 @@ def affiliation_based_f_score(y_true: np.array, y_pred: np.array, beta=1):
|
|
1089
1262
|
Returns:
|
1090
1263
|
float: The affiliation based F-score.
|
1091
1264
|
"""
|
1265
|
+
validate_binary_inputs(y_true, y_pred)
|
1266
|
+
|
1092
1267
|
if np.sum(y_pred) == 0:
|
1093
1268
|
return 0
|
1094
1269
|
m = Affiliation(len(y_true),y_true,y_pred)
|
@@ -1104,6 +1279,11 @@ def nab_score(y_true: np.array, y_pred: np.array):
|
|
1104
1279
|
positively to the score, with earlier detections receiving higher rewards. In contrast, every false
|
1105
1280
|
positive prediction contributes negatively.
|
1106
1281
|
|
1282
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1283
|
+
|
1284
|
+
For more information, see the original paper:
|
1285
|
+
https://doi.org/10.1109/ICMLA.2015.141
|
1286
|
+
|
1107
1287
|
Parameters:
|
1108
1288
|
y_true (np.array):
|
1109
1289
|
The ground truth binary labels for the time series data.
|
@@ -1114,7 +1294,8 @@ def nab_score(y_true: np.array, y_pred: np.array):
|
|
1114
1294
|
Returns:
|
1115
1295
|
float: The nab score.
|
1116
1296
|
"""
|
1117
|
-
|
1297
|
+
validate_binary_inputs(y_true, y_pred)
|
1298
|
+
|
1118
1299
|
m = NAB_score(len(y_true),y_true,y_pred)
|
1119
1300
|
return m.get_score()
|
1120
1301
|
|
@@ -1126,6 +1307,10 @@ def temporal_distance(y_true: np.array, y_pred: np.array, distance: int = 0):
|
|
1126
1307
|
the closest predicted anomaly point, and from each predicted anomaly point to the
|
1127
1308
|
closest labelled anomaly point.
|
1128
1309
|
|
1310
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
1311
|
+
|
1312
|
+
For more information, see the original paper:
|
1313
|
+
https://sciendo.com/article/10.2478/ausi-2019-0008
|
1129
1314
|
|
1130
1315
|
Parameters:
|
1131
1316
|
y_true (np.array):
|
@@ -1141,7 +1326,8 @@ def temporal_distance(y_true: np.array, y_pred: np.array, distance: int = 0):
|
|
1141
1326
|
Returns:
|
1142
1327
|
float: The temporal distance.
|
1143
1328
|
"""
|
1144
|
-
|
1329
|
+
validate_binary_inputs(y_true, y_pred)
|
1330
|
+
|
1145
1331
|
m = Temporal_Distance(len(y_true),y_true,y_pred,distance=distance)
|
1146
1332
|
return m.get_score()
|
1147
1333
|
|
@@ -1166,7 +1352,8 @@ def average_detection_count(y_true: np.array, y_pred: np.array):
|
|
1166
1352
|
Returns:
|
1167
1353
|
float: The average detection count score.
|
1168
1354
|
"""
|
1169
|
-
|
1355
|
+
validate_binary_inputs(y_true, y_pred)
|
1356
|
+
|
1170
1357
|
b = Binary_detection(len(y_true),y_true,y_pred)
|
1171
1358
|
azs = b.get_gt_anomalies_segmentwise()
|
1172
1359
|
a_points = b.get_predicted_anomalies_ptwise()
|
@@ -1203,7 +1390,8 @@ def absolute_detection_distance(y_true: np.array, y_pred: np.array):
|
|
1203
1390
|
Returns:
|
1204
1391
|
float: The absolute detection distance.
|
1205
1392
|
"""
|
1206
|
-
|
1393
|
+
validate_binary_inputs(y_true, y_pred)
|
1394
|
+
|
1207
1395
|
b = Binary_detection(len(y_true),y_true,y_pred)
|
1208
1396
|
azs = b.get_gt_anomalies_segmentwise()
|
1209
1397
|
a_points = b.get_predicted_anomalies_ptwise()
|
@@ -1255,6 +1443,8 @@ def total_detected_in_range(y_true: np.array, y_pred: np.array, k: int):
|
|
1255
1443
|
Returns:
|
1256
1444
|
float: The total detected in range score.
|
1257
1445
|
"""
|
1446
|
+
validate_binary_inputs(y_true, y_pred)
|
1447
|
+
|
1258
1448
|
if np.sum(y_pred) == 0:
|
1259
1449
|
return 0
|
1260
1450
|
em,da,ma,_ = counting_method(y_true, y_pred, k)
|
@@ -1299,6 +1489,8 @@ def detection_accuracy_in_range(y_true: np.array, y_pred: np.array, k: int):
|
|
1299
1489
|
Returns:
|
1300
1490
|
float: The detection accuracy in range score.
|
1301
1491
|
"""
|
1492
|
+
validate_binary_inputs(y_true, y_pred)
|
1493
|
+
|
1302
1494
|
if np.sum(y_pred) == 0:
|
1303
1495
|
return 0
|
1304
1496
|
em,da,_,fa = counting_method(y_true, y_pred, k)
|
@@ -1352,6 +1544,8 @@ def weighted_detection_difference(y_true: np.array, y_pred: np.array, k: int):
|
|
1352
1544
|
Returns:
|
1353
1545
|
float: The weighted detection difference.
|
1354
1546
|
"""
|
1547
|
+
validate_binary_inputs(y_true, y_pred)
|
1548
|
+
|
1355
1549
|
if np.sum(y_pred) == 0:
|
1356
1550
|
return 0
|
1357
1551
|
|
@@ -1398,6 +1592,8 @@ def binary_pate(y_true: np.array, y_pred: np.array, early: int, delay: int):
|
|
1398
1592
|
treated as false positives, and missed intervals as false negatives. The final score balances these
|
1399
1593
|
weighted detections into a single measure of performance.
|
1400
1594
|
|
1595
|
+
Implementation of https://arxiv.org/abs/2405.12096
|
1596
|
+
|
1401
1597
|
For more information, see the original paper:
|
1402
1598
|
https://arxiv.org/abs/2405.12096
|
1403
1599
|
|
@@ -1414,7 +1610,8 @@ def binary_pate(y_true: np.array, y_pred: np.array, early: int, delay: int):
|
|
1414
1610
|
Returns:
|
1415
1611
|
float: The PATE score.
|
1416
1612
|
"""
|
1417
|
-
|
1613
|
+
validate_binary_inputs(y_true, y_pred)
|
1614
|
+
|
1418
1615
|
return PATE(y_true, y_pred, early, delay, binary_scores=True)
|
1419
1616
|
|
1420
1617
|
def mean_time_to_detect(y_true: np.array, y_pred: np.array):
|
@@ -1438,12 +1635,10 @@ def mean_time_to_detect(y_true: np.array, y_pred: np.array):
|
|
1438
1635
|
y_pred (np.array):
|
1439
1636
|
The predicted binary labels for the time series data.
|
1440
1637
|
|
1441
|
-
For more information, see the original paper:
|
1442
|
-
https://arxiv.org/pdf/2211.05244
|
1443
|
-
|
1444
1638
|
Returns:
|
1445
1639
|
float: The mean time to detect.
|
1446
1640
|
"""
|
1641
|
+
validate_binary_inputs(y_true, y_pred)
|
1447
1642
|
|
1448
1643
|
b = Binary_detection(len(y_true),y_true,y_pred)
|
1449
1644
|
a_events = b.get_gt_anomalies_segmentwise()
|