tsadmetrics 0.1.16__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- docs/api_doc/conf.py +67 -0
- docs/{conf.py → full_doc/conf.py} +1 -1
- docs/manual_doc/conf.py +67 -0
- examples/example_direct_data.py +28 -0
- examples/example_direct_single_data.py +25 -0
- examples/example_file_reference.py +24 -0
- examples/example_global_config_file.py +13 -0
- examples/example_metric_config_file.py +19 -0
- examples/example_simple_metric.py +8 -0
- examples/specific_examples/AbsoluteDetectionDistance_example.py +24 -0
- examples/specific_examples/AffiliationbasedFScore_example.py +24 -0
- examples/specific_examples/AverageDetectionCount_example.py +24 -0
- examples/specific_examples/CompositeFScore_example.py +24 -0
- examples/specific_examples/DelayThresholdedPointadjustedFScore_example.py +24 -0
- examples/specific_examples/DetectionAccuracyInRange_example.py +24 -0
- examples/specific_examples/EnhancedTimeseriesAwareFScore_example.py +24 -0
- examples/specific_examples/LatencySparsityawareFScore_example.py +24 -0
- examples/specific_examples/MeanTimeToDetect_example.py +24 -0
- examples/specific_examples/NabScore_example.py +24 -0
- examples/specific_examples/PateFScore_example.py +24 -0
- examples/specific_examples/Pate_example.py +24 -0
- examples/specific_examples/PointadjustedAtKFScore_example.py +24 -0
- examples/specific_examples/PointadjustedAucPr_example.py +24 -0
- examples/specific_examples/PointadjustedAucRoc_example.py +24 -0
- examples/specific_examples/PointadjustedFScore_example.py +24 -0
- examples/specific_examples/RangebasedFScore_example.py +24 -0
- examples/specific_examples/SegmentwiseFScore_example.py +24 -0
- examples/specific_examples/TemporalDistance_example.py +24 -0
- examples/specific_examples/TimeTolerantFScore_example.py +24 -0
- examples/specific_examples/TimeseriesAwareFScore_example.py +24 -0
- examples/specific_examples/TotalDetectedInRange_example.py +24 -0
- examples/specific_examples/VusPr_example.py +24 -0
- examples/specific_examples/VusRoc_example.py +24 -0
- examples/specific_examples/WeightedDetectionDifference_example.py +24 -0
- tests/test_dpm.py +212 -0
- tests/test_ptdm.py +366 -0
- tests/test_registry.py +58 -0
- tests/test_runner.py +185 -0
- tests/test_spm.py +213 -0
- tests/test_tmem.py +198 -0
- tests/test_tpdm.py +369 -0
- tests/test_tstm.py +338 -0
- tsadmetrics/__init__.py +0 -21
- tsadmetrics/base/Metric.py +188 -0
- tsadmetrics/evaluation/Report.py +25 -0
- tsadmetrics/evaluation/Runner.py +253 -0
- tsadmetrics/metrics/Registry.py +141 -0
- tsadmetrics/metrics/__init__.py +2 -0
- tsadmetrics/metrics/spm/PointwiseAucPr.py +62 -0
- tsadmetrics/metrics/spm/PointwiseAucRoc.py +63 -0
- tsadmetrics/metrics/spm/PointwiseFScore.py +86 -0
- tsadmetrics/metrics/spm/PrecisionAtK.py +81 -0
- tsadmetrics/metrics/spm/__init__.py +9 -0
- tsadmetrics/metrics/tem/dpm/DelayThresholdedPointadjustedFScore.py +83 -0
- tsadmetrics/metrics/tem/dpm/LatencySparsityawareFScore.py +76 -0
- tsadmetrics/metrics/tem/dpm/MeanTimeToDetect.py +47 -0
- tsadmetrics/metrics/tem/dpm/NabScore.py +60 -0
- tsadmetrics/metrics/tem/dpm/__init__.py +11 -0
- tsadmetrics/metrics/tem/ptdm/AverageDetectionCount.py +53 -0
- tsadmetrics/metrics/tem/ptdm/DetectionAccuracyInRange.py +66 -0
- tsadmetrics/metrics/tem/ptdm/PointadjustedAtKFScore.py +80 -0
- tsadmetrics/metrics/tem/ptdm/TimeseriesAwareFScore.py +248 -0
- tsadmetrics/metrics/tem/ptdm/TotalDetectedInRange.py +65 -0
- tsadmetrics/metrics/tem/ptdm/WeightedDetectionDifference.py +97 -0
- tsadmetrics/metrics/tem/ptdm/__init__.py +12 -0
- tsadmetrics/metrics/tem/tmem/AbsoluteDetectionDistance.py +48 -0
- tsadmetrics/metrics/tem/tmem/EnhancedTimeseriesAwareFScore.py +252 -0
- tsadmetrics/metrics/tem/tmem/TemporalDistance.py +68 -0
- tsadmetrics/metrics/tem/tmem/__init__.py +9 -0
- tsadmetrics/metrics/tem/tpdm/CompositeFScore.py +104 -0
- tsadmetrics/metrics/tem/tpdm/PointadjustedAucPr.py +123 -0
- tsadmetrics/metrics/tem/tpdm/PointadjustedAucRoc.py +119 -0
- tsadmetrics/metrics/tem/tpdm/PointadjustedFScore.py +96 -0
- tsadmetrics/metrics/tem/tpdm/RangebasedFScore.py +236 -0
- tsadmetrics/metrics/tem/tpdm/SegmentwiseFScore.py +73 -0
- tsadmetrics/metrics/tem/tpdm/__init__.py +12 -0
- tsadmetrics/metrics/tem/tstm/AffiliationbasedFScore.py +68 -0
- tsadmetrics/metrics/tem/tstm/Pate.py +62 -0
- tsadmetrics/metrics/tem/tstm/PateFScore.py +61 -0
- tsadmetrics/metrics/tem/tstm/TimeTolerantFScore.py +85 -0
- tsadmetrics/metrics/tem/tstm/VusPr.py +51 -0
- tsadmetrics/metrics/tem/tstm/VusRoc.py +55 -0
- tsadmetrics/metrics/tem/tstm/__init__.py +15 -0
- tsadmetrics/{_tsadeval/affiliation/_integral_interval.py → utils/functions_affiliation.py} +377 -9
- tsadmetrics/utils/functions_auc.py +393 -0
- tsadmetrics/utils/functions_conversion.py +63 -0
- tsadmetrics/utils/functions_counting_metrics.py +26 -0
- tsadmetrics/{_tsadeval/latency_sparsity_aware.py → utils/functions_latency_sparsity_aware.py} +1 -1
- tsadmetrics/{_tsadeval/nabscore.py → utils/functions_nabscore.py} +15 -1
- tsadmetrics-1.0.0.dist-info/METADATA +69 -0
- tsadmetrics-1.0.0.dist-info/RECORD +99 -0
- {tsadmetrics-0.1.16.dist-info → tsadmetrics-1.0.0.dist-info}/top_level.txt +1 -1
- entorno/bin/activate_this.py +0 -32
- entorno/bin/rst2html.py +0 -23
- entorno/bin/rst2html4.py +0 -26
- entorno/bin/rst2html5.py +0 -33
- entorno/bin/rst2latex.py +0 -26
- entorno/bin/rst2man.py +0 -27
- entorno/bin/rst2odt.py +0 -28
- entorno/bin/rst2odt_prepstyles.py +0 -20
- entorno/bin/rst2pseudoxml.py +0 -23
- entorno/bin/rst2s5.py +0 -24
- entorno/bin/rst2xetex.py +0 -27
- entorno/bin/rst2xml.py +0 -23
- entorno/bin/rstpep2html.py +0 -25
- tests/test_binary.py +0 -946
- tests/test_non_binary.py +0 -420
- tests/test_utils.py +0 -49
- tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +0 -86
- tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +0 -68
- tsadmetrics/_tsadeval/affiliation/generics.py +0 -135
- tsadmetrics/_tsadeval/affiliation/metrics.py +0 -114
- tsadmetrics/_tsadeval/auc_roc_pr_plot.py +0 -295
- tsadmetrics/_tsadeval/discontinuity_graph.py +0 -109
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +0 -175
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +0 -50
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +0 -184
- tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
- tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +0 -386
- tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +0 -362
- tsadmetrics/_tsadeval/metrics.py +0 -698
- tsadmetrics/_tsadeval/prts/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +0 -165
- tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +0 -121
- tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +0 -61
- tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +0 -86
- tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +0 -21
- tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +0 -85
- tsadmetrics/_tsadeval/tests.py +0 -376
- tsadmetrics/_tsadeval/threshold_plt.py +0 -30
- tsadmetrics/_tsadeval/time_tolerant.py +0 -33
- tsadmetrics/binary_metrics.py +0 -1652
- tsadmetrics/metric_utils.py +0 -98
- tsadmetrics/non_binary_metrics.py +0 -398
- tsadmetrics/scripts/__init__.py +0 -0
- tsadmetrics/scripts/compute_metrics.py +0 -42
- tsadmetrics/utils.py +0 -122
- tsadmetrics/validation.py +0 -35
- tsadmetrics-0.1.16.dist-info/METADATA +0 -23
- tsadmetrics-0.1.16.dist-info/RECORD +0 -64
- tsadmetrics-0.1.16.dist-info/entry_points.txt +0 -2
- /tsadmetrics/{_tsadeval → base}/__init__.py +0 -0
- /tsadmetrics/{_tsadeval/affiliation → evaluation}/__init__.py +0 -0
- /tsadmetrics/{_tsadeval/eTaPR_pkg/DataManage → metrics/tem}/__init__.py +0 -0
- /tsadmetrics/{_tsadeval/vus_utils.py → utils/functions_vus.py} +0 -0
- {tsadmetrics-0.1.16.dist-info → tsadmetrics-1.0.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,62 @@
|
|
1
|
+
from ...base.Metric import Metric
|
2
|
+
import numpy as np
|
3
|
+
from ...utils.functions_auc import precision_recall_curve
|
4
|
+
|
5
|
+
class PointwiseAucPr(Metric):
|
6
|
+
"""
|
7
|
+
Point-wise Area Under the Precision-Recall Curve (AUC-PR) for anomaly detection.
|
8
|
+
|
9
|
+
This metric computes the standard Area Under the Precision-Recall Curve (AUC-PR)
|
10
|
+
in a **point-wise manner**. Each time-series data point is treated independently
|
11
|
+
when calculating precision and recall, making this suitable for anomaly detection tasks
|
12
|
+
where anomalies are labeled at the individual point level.
|
13
|
+
|
14
|
+
Reference:
|
15
|
+
Implementation based on:
|
16
|
+
https://link.springer.com/article/10.1007/s10618-023-00988-8
|
17
|
+
|
18
|
+
Attributes:
|
19
|
+
name (str):
|
20
|
+
Fixed name identifier for this metric: `"pw_auc_pr"`.
|
21
|
+
binary_prediction (bool):
|
22
|
+
Indicates whether this metric expects binary predictions. Always `False`
|
23
|
+
since it requires continuous anomaly scores.
|
24
|
+
|
25
|
+
Raises:
|
26
|
+
ValueError:
|
27
|
+
If input arrays are invalid or improperly shaped (handled by the base class).
|
28
|
+
TypeError:
|
29
|
+
If inputs are not array-like.
|
30
|
+
"""
|
31
|
+
|
32
|
+
name = "pw_auc_pr"
|
33
|
+
binary_prediction = False
|
34
|
+
def __init__(self, **kwargs):
|
35
|
+
"""
|
36
|
+
Initialize the PointwiseAucPr metric.
|
37
|
+
|
38
|
+
Parameters:
|
39
|
+
**kwargs:
|
40
|
+
Additional keyword arguments passed to the base `Metric` class.
|
41
|
+
These may include configuration parameters or overrides.
|
42
|
+
"""
|
43
|
+
super().__init__(name="pw_auc_pr", **kwargs)
|
44
|
+
|
45
|
+
def _compute(self, y_true, y_anomaly_scores):
|
46
|
+
"""
|
47
|
+
Compute the point-wise AUC-PR score.
|
48
|
+
|
49
|
+
Parameters:
|
50
|
+
y_true (np.ndarray):
|
51
|
+
Ground-truth binary labels for the time series.
|
52
|
+
Values must be 0 (normal) or 1 (anomaly).
|
53
|
+
y_anomaly_scores (np.ndarray):
|
54
|
+
Continuous anomaly scores assigned to each point in the series.
|
55
|
+
|
56
|
+
Returns:
|
57
|
+
float:
|
58
|
+
The computed point-wise AUC-PR score.
|
59
|
+
"""
|
60
|
+
|
61
|
+
precision, recall, _ = precision_recall_curve(y_true, y_anomaly_scores)
|
62
|
+
return -np.sum(np.diff(recall) * np.array(precision)[:-1])
|
@@ -0,0 +1,63 @@
|
|
1
|
+
from ...base.Metric import Metric
|
2
|
+
import numpy as np
|
3
|
+
from ...utils.functions_auc import roc_curve, auc
|
4
|
+
|
5
|
+
class PointwiseAucRoc(Metric):
|
6
|
+
"""
|
7
|
+
Point-wise Area Under the Receiver Operating Characteristic Curve (AUC-ROC) for anomaly detection.
|
8
|
+
|
9
|
+
This metric computes the standard Area Under the ROC Curve (AUC-ROC)
|
10
|
+
in a **point-wise manner**. Each time-series data point is treated independently
|
11
|
+
when calculating true positives, false positives, and false negatives.
|
12
|
+
It is widely used to evaluate the ability of anomaly scoring functions
|
13
|
+
to distinguish between normal and anomalous points.
|
14
|
+
|
15
|
+
Reference:
|
16
|
+
Implementation based on:
|
17
|
+
https://link.springer.com/article/10.1007/s10618-023-00988-8
|
18
|
+
|
19
|
+
Attributes:
|
20
|
+
name (str):
|
21
|
+
Fixed name identifier for this metric: `"pw_auc_roc"`.
|
22
|
+
binary_prediction (bool):
|
23
|
+
Indicates whether this metric expects binary predictions. Always `False`
|
24
|
+
since it requires continuous anomaly scores.
|
25
|
+
|
26
|
+
Raises:
|
27
|
+
ValueError:
|
28
|
+
If input arrays are invalid or improperly shaped (validated in the base class).
|
29
|
+
TypeError:
|
30
|
+
If inputs are not array-like.
|
31
|
+
"""
|
32
|
+
|
33
|
+
name = "pw_auc_roc"
|
34
|
+
binary_prediction = False
|
35
|
+
|
36
|
+
def __init__(self, **kwargs):
|
37
|
+
"""
|
38
|
+
Initialize the PointwiseAucRoc metric.
|
39
|
+
|
40
|
+
Parameters:
|
41
|
+
**kwargs:
|
42
|
+
Additional keyword arguments passed to the base `Metric` class.
|
43
|
+
These may include configuration parameters or overrides.
|
44
|
+
"""
|
45
|
+
super().__init__(name="pw_auc_roc", **kwargs)
|
46
|
+
|
47
|
+
def _compute(self, y_true, y_anomaly_scores):
|
48
|
+
"""
|
49
|
+
Compute the point-wise AUC-ROC score.
|
50
|
+
|
51
|
+
Parameters:
|
52
|
+
y_true (np.ndarray):
|
53
|
+
Ground-truth binary labels for the time series.
|
54
|
+
Values must be 0 (normal) or 1 (anomaly).
|
55
|
+
y_anomaly_scores (np.ndarray):
|
56
|
+
Continuous anomaly scores assigned to each point in the series.
|
57
|
+
|
58
|
+
Returns:
|
59
|
+
float:
|
60
|
+
The computed point-wise AUC-ROC score.
|
61
|
+
"""
|
62
|
+
fpr, tpr, _ = roc_curve(y_true, y_anomaly_scores)
|
63
|
+
return auc(fpr, tpr)
|
@@ -0,0 +1,86 @@
|
|
1
|
+
from ...base.Metric import Metric
|
2
|
+
import numpy as np
|
3
|
+
|
4
|
+
class PointwiseFScore(Metric):
|
5
|
+
"""
|
6
|
+
Point-wise F-score for anomaly detection in time series.
|
7
|
+
|
8
|
+
This metric computes the classical F-score without considering temporal context,
|
9
|
+
treating each time-series point independently. It balances precision and recall
|
10
|
+
according to the configurable parameter `beta`.
|
11
|
+
|
12
|
+
Reference:
|
13
|
+
Implementation based on:
|
14
|
+
https://link.springer.com/article/10.1007/s10618-023-00988-8
|
15
|
+
|
16
|
+
Parameters:
|
17
|
+
beta (float, optional):
|
18
|
+
The beta value determines the relative weight of recall compared to precision.
|
19
|
+
A value of 1.0 gives equal weight (F1-score).
|
20
|
+
Default is 1.0.
|
21
|
+
|
22
|
+
Attributes:
|
23
|
+
name (str):
|
24
|
+
Fixed name identifier for this metric: `"pwf"`.
|
25
|
+
binary_prediction (bool):
|
26
|
+
Indicates whether this metric expects binary predictions. Always `True`.
|
27
|
+
param_schema (dict):
|
28
|
+
Schema for supported parameters:
|
29
|
+
- `beta` (float, default=1.0).
|
30
|
+
|
31
|
+
Raises:
|
32
|
+
ValueError:
|
33
|
+
If required parameters are missing (validated by the base class).
|
34
|
+
TypeError:
|
35
|
+
If parameter types do not match the schema (validated by the base class).
|
36
|
+
"""
|
37
|
+
|
38
|
+
name = "pwf"
|
39
|
+
binary_prediction = True
|
40
|
+
param_schema = {
|
41
|
+
"beta": {
|
42
|
+
"default": 1.0,
|
43
|
+
"type": float
|
44
|
+
}
|
45
|
+
}
|
46
|
+
|
47
|
+
def __init__(self, **kwargs):
|
48
|
+
"""
|
49
|
+
Initialize the PointwiseFScore metric.
|
50
|
+
|
51
|
+
Parameters:
|
52
|
+
**kwargs:
|
53
|
+
Additional keyword arguments passed to the base `Metric` class.
|
54
|
+
These may include configuration parameters such as `beta`.
|
55
|
+
"""
|
56
|
+
super().__init__(name="pwf", **kwargs)
|
57
|
+
|
58
|
+
def _compute(self, y_true, y_pred):
|
59
|
+
"""
|
60
|
+
Compute the point-wise F-score.
|
61
|
+
|
62
|
+
Parameters:
|
63
|
+
y_true (np.ndarray):
|
64
|
+
Ground-truth binary labels for the time series.
|
65
|
+
Values must be 0 (normal) or 1 (anomaly).
|
66
|
+
y_pred (np.ndarray):
|
67
|
+
Predicted binary labels for the time series.
|
68
|
+
Values must be 0 (normal) or 1 (anomaly).
|
69
|
+
|
70
|
+
Returns:
|
71
|
+
float:
|
72
|
+
The computed point-wise F-score.
|
73
|
+
Returns 0 if either precision or recall is 0.
|
74
|
+
"""
|
75
|
+
tp = np.sum(y_pred * y_true)
|
76
|
+
fp = np.sum(y_pred * (1 - y_true))
|
77
|
+
fn = np.sum((1 - y_pred) * y_true)
|
78
|
+
|
79
|
+
precision = tp / (tp + fp) if (tp + fp) > 0 else 0
|
80
|
+
recall = tp / (tp + fn) if (tp + fn) > 0 else 0
|
81
|
+
|
82
|
+
if precision == 0 or recall == 0:
|
83
|
+
return 0
|
84
|
+
|
85
|
+
beta = self.params['beta']
|
86
|
+
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
|
@@ -0,0 +1,81 @@
|
|
1
|
+
from ...base.Metric import Metric
|
2
|
+
import numpy as np
|
3
|
+
|
4
|
+
class PrecisionAtK(Metric):
|
5
|
+
"""
|
6
|
+
Precision at K (P@K) for anomaly detection in time series.
|
7
|
+
|
8
|
+
This metric evaluates how many of the top-`k` points with the highest anomaly
|
9
|
+
scores correspond to true anomalies. It is particularly useful when focusing
|
10
|
+
on identifying the most anomalous points rather than setting a global threshold.
|
11
|
+
|
12
|
+
By definition, `k` is automatically set to the number of true anomalies present
|
13
|
+
in `y_true`.
|
14
|
+
|
15
|
+
.. math::
|
16
|
+
k = \sum(y\_true)
|
17
|
+
|
18
|
+
Reference:
|
19
|
+
Implementation based on:
|
20
|
+
https://link.springer.com/article/10.1007/s10618-023-00988-8
|
21
|
+
|
22
|
+
Attributes:
|
23
|
+
name (str):
|
24
|
+
Fixed name identifier for this metric: `"pak"`.
|
25
|
+
binary_prediction (bool):
|
26
|
+
Indicates whether this metric expects binary predictions. Always `False`
|
27
|
+
since it requires continuous anomaly scores.
|
28
|
+
|
29
|
+
Raises:
|
30
|
+
AssertionError:
|
31
|
+
- If the number of true anomalies (`k`) is zero.
|
32
|
+
- If the number of predicted positives is less than `k`.
|
33
|
+
ValueError:
|
34
|
+
If input arrays are invalid or improperly shaped (validated in the base class).
|
35
|
+
TypeError:
|
36
|
+
If inputs are not array-like.
|
37
|
+
"""
|
38
|
+
|
39
|
+
name = "pak"
|
40
|
+
binary_prediction = False
|
41
|
+
|
42
|
+
def __init__(self, **kwargs):
|
43
|
+
"""
|
44
|
+
Initialize the PrecisionAtK metric.
|
45
|
+
|
46
|
+
Parameters:
|
47
|
+
**kwargs:
|
48
|
+
Additional keyword arguments passed to the base `Metric` class.
|
49
|
+
These may include configuration parameters or overrides.
|
50
|
+
"""
|
51
|
+
super().__init__(name="pak", **kwargs)
|
52
|
+
|
53
|
+
def _compute(self, y_true, y_anomaly_scores):
|
54
|
+
"""
|
55
|
+
Compute the Precision at K (P@K) score.
|
56
|
+
|
57
|
+
Parameters:
|
58
|
+
y_true (np.ndarray):
|
59
|
+
Ground-truth binary labels for the time series.
|
60
|
+
Values must be 0 (normal) or 1 (anomaly).
|
61
|
+
y_anomaly_scores (np.ndarray):
|
62
|
+
Continuous anomaly scores assigned to each point in the series.
|
63
|
+
|
64
|
+
Returns:
|
65
|
+
float:
|
66
|
+
The precision at K score, where K = number of anomalies in `y_true`.
|
67
|
+
|
68
|
+
Raises:
|
69
|
+
AssertionError:
|
70
|
+
If `k = sum(y_true)` is 0.
|
71
|
+
If fewer than `k` points are predicted as anomalies.
|
72
|
+
"""
|
73
|
+
k = int(sum(y_true))
|
74
|
+
assert k > 0, "The number of true anomalies (k) must be greater than zero."
|
75
|
+
threshold = np.sort(y_anomaly_scores)[-k]
|
76
|
+
|
77
|
+
pred = y_anomaly_scores >= threshold
|
78
|
+
assert sum(pred) >= k, (
|
79
|
+
f"Number of predicted positives ({sum(pred)}) should be >= k ({k})."
|
80
|
+
)
|
81
|
+
return np.dot(pred, y_true) / sum(pred)
|
@@ -0,0 +1,9 @@
|
|
1
|
+
from .PointwiseFScore import PointwiseFScore
|
2
|
+
from .PrecisionAtK import PrecisionAtK
|
3
|
+
from .PointwiseAucRoc import PointwiseAucRoc
|
4
|
+
from .PointwiseAucPr import PointwiseAucPr
|
5
|
+
|
6
|
+
__all__ = ['PointwiseFScore',
|
7
|
+
'PrecisionAtK',
|
8
|
+
'PointwiseAucRoc',
|
9
|
+
'PointwiseAucPr']
|
@@ -0,0 +1,83 @@
|
|
1
|
+
from ....base.Metric import Metric
|
2
|
+
import numpy as np
|
3
|
+
from ....utils.functions_conversion import full_series_to_segmentwise
|
4
|
+
|
5
|
+
|
6
|
+
class DelayThresholdedPointadjustedFScore(Metric):
|
7
|
+
"""
|
8
|
+
Calculate delay thresholded point-adjusted F-score for anomaly detection in time series.
|
9
|
+
|
10
|
+
This metric is based on the standard F-score, but applies a temporal adjustment
|
11
|
+
to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
|
12
|
+
if at least one point within the first k time steps of the segment is predicted as anomalous,
|
13
|
+
all points in the segment are marked as correctly detected. The adjusted predictions are then
|
14
|
+
compared to the ground-truth labels using the standard point-wise F-score formulation.
|
15
|
+
|
16
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
17
|
+
|
18
|
+
For more information, see the original paper:
|
19
|
+
https://doi.org/10.1145/3292500.3330680
|
20
|
+
|
21
|
+
Parameters:
|
22
|
+
k (int):
|
23
|
+
Maximum number of time steps from the start of an anomaly segment within which a prediction must occur
|
24
|
+
for the segment to be considered detected.
|
25
|
+
beta (float):
|
26
|
+
The beta value, which determines the weight of precision in the combined score.
|
27
|
+
Default is 1, which gives equal weight to precision and recall.
|
28
|
+
"""
|
29
|
+
name = "dtpaf"
|
30
|
+
binary_prediction = True
|
31
|
+
param_schema = {
|
32
|
+
"k": {
|
33
|
+
"default": 1,
|
34
|
+
"type": int
|
35
|
+
},
|
36
|
+
"beta": {
|
37
|
+
"default": 1.0,
|
38
|
+
"type": float
|
39
|
+
}
|
40
|
+
}
|
41
|
+
|
42
|
+
def __init__(self, **kwargs):
|
43
|
+
super().__init__(name="dtpaf", **kwargs)
|
44
|
+
|
45
|
+
def _compute(self, y_true, y_pred):
|
46
|
+
"""
|
47
|
+
Calculate the delay thresholded point-adjusted F-score.
|
48
|
+
|
49
|
+
Parameters:
|
50
|
+
y_true (np.array):
|
51
|
+
The ground truth binary labels for the time series data.
|
52
|
+
y_pred (np.array):
|
53
|
+
The predicted binary labels for the time series data.
|
54
|
+
|
55
|
+
Returns:
|
56
|
+
float: The computed delay thresholded point-adjusted F-score.
|
57
|
+
"""
|
58
|
+
|
59
|
+
adjusted_prediction = y_pred.copy()
|
60
|
+
k = self.params["k"]
|
61
|
+
|
62
|
+
for start, end in full_series_to_segmentwise(y_true):
|
63
|
+
anomaly_adjusted = False
|
64
|
+
for i in range(start, min(start + k, end + 1)):
|
65
|
+
if adjusted_prediction[i] == 1:
|
66
|
+
adjusted_prediction[start:end + 1] = 1
|
67
|
+
anomaly_adjusted = True
|
68
|
+
break
|
69
|
+
if not anomaly_adjusted:
|
70
|
+
adjusted_prediction[start:end + 1] = 0
|
71
|
+
|
72
|
+
tp = np.sum(adjusted_prediction * y_true)
|
73
|
+
fp = np.sum(adjusted_prediction * (1 - y_true))
|
74
|
+
fn = np.sum((1 - adjusted_prediction) * y_true)
|
75
|
+
|
76
|
+
precision = tp / (tp + fp) if (tp + fp) > 0 else 0
|
77
|
+
recall = tp / (tp + fn) if (tp + fn) > 0 else 0
|
78
|
+
|
79
|
+
if precision == 0 or recall == 0:
|
80
|
+
return 0
|
81
|
+
|
82
|
+
beta = self.params["beta"]
|
83
|
+
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
|
@@ -0,0 +1,76 @@
|
|
1
|
+
from ....base.Metric import Metric
|
2
|
+
import numpy as np
|
3
|
+
from ....utils.functions_latency_sparsity_aware import calc_twseq
|
4
|
+
|
5
|
+
class LatencySparsityawareFScore(Metric):
|
6
|
+
"""
|
7
|
+
Calculate latency and sparsity aware F-score for anomaly detection in time series.
|
8
|
+
|
9
|
+
This metric is based on the standard F-score, but applies a temporal adjustment
|
10
|
+
to the predictions before computing it. Specifically, for each ground-truth anomalous segment,
|
11
|
+
all points in the segment are marked as correctly detected only after the first true positive
|
12
|
+
is predicted within that segment. This encourages early detection by delaying credit for correct
|
13
|
+
predictions until the anomaly is initially detected. Additionally, to reduce the impact of
|
14
|
+
scattered false positives, predictions are subsampled using a sparsity factor n, so that
|
15
|
+
only one prediction is considered every n time steps. The adjusted predictions are then used
|
16
|
+
to _compute the standard point-wise F-score.
|
17
|
+
|
18
|
+
Implementation of https://dl.acm.org/doi/10.1145/3447548.3467174
|
19
|
+
|
20
|
+
For more information, see the original paper:
|
21
|
+
https://doi.org/10.1145/3447548.3467174
|
22
|
+
|
23
|
+
Parameters:
|
24
|
+
ni (int):
|
25
|
+
The batch size used in the implementation to handle latency and sparsity.
|
26
|
+
beta (float):
|
27
|
+
The beta value, which determines the weight of precision in the combined score.
|
28
|
+
Default is 1, which gives equal weight to precision and recall.
|
29
|
+
"""
|
30
|
+
name = "lsaf"
|
31
|
+
binary_prediction = True
|
32
|
+
param_schema = {
|
33
|
+
"ni": {
|
34
|
+
"default": 1,
|
35
|
+
"type": int
|
36
|
+
},
|
37
|
+
"beta": {
|
38
|
+
"default": 1.0,
|
39
|
+
"type": float
|
40
|
+
}
|
41
|
+
}
|
42
|
+
|
43
|
+
def __init__(self, **kwargs):
|
44
|
+
super().__init__(name="lsaf", **kwargs)
|
45
|
+
|
46
|
+
def _compute(self, y_true, y_pred):
|
47
|
+
"""
|
48
|
+
Calculate the latency and sparsity aware F-score.
|
49
|
+
|
50
|
+
Parameters:
|
51
|
+
y_true (np.array):
|
52
|
+
The ground truth binary labels for the time series data.
|
53
|
+
y_pred (np.array):
|
54
|
+
The predicted binary labels for the time series data.
|
55
|
+
|
56
|
+
Returns:
|
57
|
+
float: The latency and sparsity aware F-score, which is the harmonic mean
|
58
|
+
of precision and recall, adjusted by the beta value.
|
59
|
+
"""
|
60
|
+
|
61
|
+
if np.sum(y_pred) == 0:
|
62
|
+
return 0
|
63
|
+
|
64
|
+
_, precision, recall, _, _, _, _, _ = calc_twseq(
|
65
|
+
y_pred,
|
66
|
+
y_true,
|
67
|
+
normal=0,
|
68
|
+
threshold=0.5,
|
69
|
+
tw=self.params["ni"],
|
70
|
+
)
|
71
|
+
|
72
|
+
if precision == 0 or recall == 0:
|
73
|
+
return 0
|
74
|
+
|
75
|
+
beta = self.params["beta"]
|
76
|
+
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
|
@@ -0,0 +1,47 @@
|
|
1
|
+
from ....base.Metric import Metric
|
2
|
+
import numpy as np
|
3
|
+
from ....utils.functions_conversion import full_series_to_segmentwise
|
4
|
+
|
5
|
+
class MeanTimeToDetect(Metric):
|
6
|
+
"""
|
7
|
+
Calculate mean time to detect for anomaly detection in time series.
|
8
|
+
|
9
|
+
This metric quantifies the average detection delay across all true anomaly events.
|
10
|
+
For each ground-truth anomaly segment, let i be the index where the segment starts,
|
11
|
+
and let :math:`{j \geq i}` be the first index within that segment where the model predicts an anomaly.
|
12
|
+
The detection delay for that event is defined as:
|
13
|
+
|
14
|
+
.. math::
|
15
|
+
\Delta t = j - i
|
16
|
+
|
17
|
+
The MTTD is the mean of all such :math:`{\Delta t}` values, one per true anomaly segment, and expresses
|
18
|
+
the average number of time steps between the true onset of an anomaly and its first detection.
|
19
|
+
"""
|
20
|
+
name = "mttd"
|
21
|
+
binary_prediction = True
|
22
|
+
def __init__(self, **kwargs):
|
23
|
+
super().__init__(name="mttd", **kwargs)
|
24
|
+
|
25
|
+
def _compute(self, y_true, y_pred):
|
26
|
+
"""
|
27
|
+
Calculate the mean time to detect.
|
28
|
+
|
29
|
+
Parameters:
|
30
|
+
y_true (np.array):
|
31
|
+
The ground truth binary labels for the time series data.
|
32
|
+
y_pred (np.array):
|
33
|
+
The predicted binary labels for the time series data.
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
float: The mean time to detect.
|
37
|
+
"""
|
38
|
+
|
39
|
+
a_events = full_series_to_segmentwise(y_true)
|
40
|
+
t_sum = 0
|
41
|
+
for a, _ in a_events:
|
42
|
+
for i in range(a, len(y_pred)):
|
43
|
+
if y_pred[i] == 1:
|
44
|
+
t_sum += i - a
|
45
|
+
break
|
46
|
+
|
47
|
+
return t_sum / len(a_events)
|
@@ -0,0 +1,60 @@
|
|
1
|
+
from ....base.Metric import Metric
|
2
|
+
import numpy as np
|
3
|
+
from ....utils.functions_conversion import full_series_to_pointwise
|
4
|
+
from ....utils.functions_nabscore import Sweeper, calculate_scores
|
5
|
+
|
6
|
+
class NabScore(Metric):
|
7
|
+
"""
|
8
|
+
Calculate NAB score for anomaly detection in time series.
|
9
|
+
|
10
|
+
This metric rewards early and accurate detections of anomalies while penalizing false positives.
|
11
|
+
For each ground truth anomaly segment, only the first correctly predicted anomaly point contributes
|
12
|
+
positively to the score, with earlier detections receiving higher rewards. In contrast, every false
|
13
|
+
positive prediction contributes negatively.
|
14
|
+
|
15
|
+
Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
|
16
|
+
|
17
|
+
For more information, see the original paper:
|
18
|
+
https://doi.org/10.1109/ICMLA.2015.141
|
19
|
+
"""
|
20
|
+
name = "nab_score"
|
21
|
+
binary_prediction = True
|
22
|
+
def __init__(self, **kwargs):
|
23
|
+
super().__init__(name="nab_score", **kwargs)
|
24
|
+
|
25
|
+
def _compute(self, y_true, y_pred):
|
26
|
+
"""
|
27
|
+
Calculate the NAB score.
|
28
|
+
|
29
|
+
Parameters:
|
30
|
+
y_true (np.array):
|
31
|
+
The ground truth binary labels for the time series data.
|
32
|
+
y_pred (np.array):
|
33
|
+
The predicted binary labels for the time series data.
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
float: The computed NAB score.
|
37
|
+
"""
|
38
|
+
sweeper = Sweeper(probationPercent=0, costMatrix={"tpWeight": 1, "fpWeight": 0.11, "fnWeight": 1})
|
39
|
+
|
40
|
+
if len(full_series_to_pointwise(y_pred)) == 0:
|
41
|
+
return 0
|
42
|
+
if len(full_series_to_pointwise(y_true)) == 0:
|
43
|
+
return np.nan
|
44
|
+
|
45
|
+
try:
|
46
|
+
sweeper, null_score, raw_score = calculate_scores(
|
47
|
+
sweeper,
|
48
|
+
full_series_to_pointwise(y_true),
|
49
|
+
full_series_to_pointwise(y_pred),
|
50
|
+
len(y_true)
|
51
|
+
)
|
52
|
+
sweeper, null_score, perfect_score = calculate_scores(
|
53
|
+
sweeper,
|
54
|
+
full_series_to_pointwise(y_true),
|
55
|
+
full_series_to_pointwise(y_true),
|
56
|
+
len(y_true)
|
57
|
+
)
|
58
|
+
return (raw_score - null_score) / (perfect_score - null_score) * 100
|
59
|
+
except Exception:
|
60
|
+
return 0
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from .DelayThresholdedPointadjustedFScore import DelayThresholdedPointadjustedFScore
|
2
|
+
from .LatencySparsityawareFScore import LatencySparsityawareFScore
|
3
|
+
from .MeanTimeToDetect import MeanTimeToDetect
|
4
|
+
from .NabScore import NabScore
|
5
|
+
|
6
|
+
__all__ = [
|
7
|
+
"DelayThresholdedPointadjustedFScore",
|
8
|
+
"LatencySparsityawareFScore",
|
9
|
+
"MeanTimeToDetect",
|
10
|
+
"NabScore"
|
11
|
+
]
|
@@ -0,0 +1,53 @@
|
|
1
|
+
from ....base.Metric import Metric
|
2
|
+
import numpy as np
|
3
|
+
from ....utils.functions_conversion import full_series_to_segmentwise, full_series_to_pointwise
|
4
|
+
|
5
|
+
class AverageDetectionCount(Metric):
|
6
|
+
"""
|
7
|
+
Calculate average detection count for anomaly detection in time series.
|
8
|
+
|
9
|
+
This metric computes, for each ground-truth anomalous segment, the percentage of points within that segment
|
10
|
+
that are predicted as anomalous. It then averages these percentages across all true anomaly events,
|
11
|
+
providing an estimate of detection coverage per event.
|
12
|
+
|
13
|
+
For more information, see the original paper:
|
14
|
+
https://ceur-ws.org/Vol-1226/paper31.pdf
|
15
|
+
|
16
|
+
Parameters:
|
17
|
+
None
|
18
|
+
"""
|
19
|
+
|
20
|
+
name = "adc"
|
21
|
+
binary_prediction = True
|
22
|
+
param_schema = {}
|
23
|
+
|
24
|
+
def __init__(self, **kwargs):
|
25
|
+
super().__init__(name="adc", **kwargs)
|
26
|
+
|
27
|
+
def _compute(self, y_true, y_pred):
|
28
|
+
"""
|
29
|
+
Calculate the average detection count.
|
30
|
+
|
31
|
+
Parameters:
|
32
|
+
y_true (np.array):
|
33
|
+
The ground truth binary labels for the time series data.
|
34
|
+
y_pred (np.array):
|
35
|
+
The predicted binary labels for the time series data.
|
36
|
+
|
37
|
+
Returns:
|
38
|
+
float: The average detection count score.
|
39
|
+
"""
|
40
|
+
|
41
|
+
|
42
|
+
azs = full_series_to_segmentwise(y_true)
|
43
|
+
a_points = full_series_to_pointwise(y_pred)
|
44
|
+
|
45
|
+
counts = []
|
46
|
+
for az in azs:
|
47
|
+
count = 0
|
48
|
+
for ap in a_points:
|
49
|
+
if ap >= az[0] and ap <= az[1]:
|
50
|
+
count+=1
|
51
|
+
counts.append(count/(az[1] - az[0] + 1)) # Normalize by segment length
|
52
|
+
|
53
|
+
return np.mean(counts)
|