tsadmetrics 0.1.12__py3-none-any.whl → 0.1.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tsadmetrics/binary_metrics.py +404 -232
- tsadmetrics/non_binary_metrics.py +51 -24
- tsadmetrics/utils.py +13 -7
- {tsadmetrics-0.1.12.dist-info → tsadmetrics-0.1.14.dist-info}/METADATA +1 -1
- {tsadmetrics-0.1.12.dist-info → tsadmetrics-0.1.14.dist-info}/RECORD +7 -7
- {tsadmetrics-0.1.12.dist-info → tsadmetrics-0.1.14.dist-info}/WHEEL +0 -0
- {tsadmetrics-0.1.12.dist-info → tsadmetrics-0.1.14.dist-info}/top_level.txt +0 -0
@@ -15,8 +15,13 @@ def precision_at_k(y_true : np.array, y_anomaly_scores: np.array):
|
|
15
15
|
y_true. That is, k = sum(y_true).
|
16
16
|
|
17
17
|
Parameters:
|
18
|
-
|
19
|
-
|
18
|
+
y_true (np.array):
|
19
|
+
The ground truth binary labels for the time series data.
|
20
|
+
y_anomaly_scores (np.array):
|
21
|
+
The predicted anomaly scores for the time series data.
|
22
|
+
|
23
|
+
Returns:
|
24
|
+
float: The precision at k score.
|
20
25
|
"""
|
21
26
|
m = PatK_pw(y_true,y_anomaly_scores)
|
22
27
|
|
@@ -31,8 +36,10 @@ def auc_roc_pw(y_true : np.array, y_anomaly_scores: np.array):
|
|
31
36
|
independently when calculating true positives, false positives, and false negatives.
|
32
37
|
|
33
38
|
Parameters:
|
34
|
-
y_true (np.array):
|
35
|
-
|
39
|
+
y_true (np.array):
|
40
|
+
Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
|
41
|
+
y_anomaly_scores (np.array):
|
42
|
+
Continuous anomaly scores assigned to each point in the series.
|
36
43
|
|
37
44
|
Returns:
|
38
45
|
float: AUC-ROC score.
|
@@ -52,8 +59,10 @@ def auc_pr_pw(y_true : np.array ,y_anomaly_scores: np.array):
|
|
52
59
|
independently when calculating precision and recall.
|
53
60
|
|
54
61
|
Parameters:
|
55
|
-
y_true (np.array):
|
56
|
-
|
62
|
+
y_true (np.array):
|
63
|
+
Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
|
64
|
+
y_anomaly_scores (np.array):
|
65
|
+
Continuous anomaly scores assigned to each point in the series.
|
57
66
|
|
58
67
|
Returns:
|
59
68
|
float: AUC-PR score.
|
@@ -76,8 +85,10 @@ def auc_pr_pa(y_true: np.array, y_anomaly_scores: np.array):
|
|
76
85
|
which are used to construct the PR curve.
|
77
86
|
|
78
87
|
Parameters:
|
79
|
-
y_true (np.array):
|
80
|
-
|
88
|
+
y_true (np.array):
|
89
|
+
Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
|
90
|
+
y_anomaly_scores (np.array):
|
91
|
+
Continuous anomaly scores assigned to each point in the series.
|
81
92
|
|
82
93
|
Returns:
|
83
94
|
float: AUC-PR score (with point-adjusted evaluation).
|
@@ -166,8 +177,10 @@ def auc_pr_sw(y_true: np.array, y_anomaly_scores: np.array):
|
|
166
177
|
to compute precision and recall for constructing the PR curve.
|
167
178
|
|
168
179
|
Parameters:
|
169
|
-
y_true (np.array):
|
170
|
-
|
180
|
+
y_true (np.array):
|
181
|
+
Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
|
182
|
+
y_anomaly_scores (np.array):
|
183
|
+
Continuous anomaly scores assigned to each point in the series.
|
171
184
|
|
172
185
|
Returns:
|
173
186
|
float: AUC-PR score (with segment-wise evaluation).
|
@@ -271,16 +284,21 @@ def vus_roc(y_true : np.array ,y_anomaly_scores: np.array, window=4):
|
|
271
284
|
the ROC-AUC over different values of the tolerance parameter, from 0 to `window`, thus producing
|
272
285
|
a volume under the ROC surface.
|
273
286
|
|
287
|
+
For more information, see the original paper:
|
288
|
+
https://dl.acm.org/doi/10.14778/3551793.3551830
|
289
|
+
|
274
290
|
Parameters:
|
275
|
-
y_true (np.array):
|
276
|
-
|
277
|
-
|
291
|
+
y_true (np.array):
|
292
|
+
Ground-truth binary labels (0 = normal, 1 = anomaly).
|
293
|
+
y_anomaly_scores (np.array):
|
294
|
+
Anomaly scores for each time point.
|
295
|
+
window (int):
|
296
|
+
Maximum temporal tolerance `l` used to smooth the evaluation.
|
278
297
|
|
279
298
|
Returns:
|
280
299
|
float: VUS-ROC score.
|
281
300
|
|
282
|
-
|
283
|
-
https://dl.acm.org/doi/10.14778/3551793.3551830
|
301
|
+
|
284
302
|
"""
|
285
303
|
m = VUS_ROC(y_true,y_anomaly_scores,max_window=window)
|
286
304
|
|
@@ -296,16 +314,21 @@ def vus_pr(y_true : np.array ,y_anomaly_scores: np.array, window=4):
|
|
296
314
|
anomalies that are temporally close to the true events. The final metric integrates the PR-AUC
|
297
315
|
over several levels of temporal tolerance (from 0 to `window`), yielding a volume under the PR surface.
|
298
316
|
|
317
|
+
For more information, see the original paper:
|
318
|
+
https://dl.acm.org/doi/10.14778/3551793.3551830
|
319
|
+
|
299
320
|
Parameters:
|
300
|
-
y_true (np.array):
|
301
|
-
|
302
|
-
|
321
|
+
y_true (np.array):
|
322
|
+
Ground-truth binary labels (0 = normal, 1 = anomaly).
|
323
|
+
y_anomaly_scores (np.array):
|
324
|
+
Anomaly scores for each time point.
|
325
|
+
window (int):
|
326
|
+
Maximum temporal tolerance `l` used to smooth the evaluation.
|
303
327
|
|
304
328
|
Returns:
|
305
329
|
float: VUS-PR score.
|
306
330
|
|
307
|
-
|
308
|
-
https://dl.acm.org/doi/10.14778/3551793.3551830
|
331
|
+
|
309
332
|
"""
|
310
333
|
m = VUS_PR(y_true,y_anomaly_scores,max_window=window)
|
311
334
|
|
@@ -331,10 +354,14 @@ def real_pate(y_true: np.array, y_anomaly_scores: np.array, early: int, delay: i
|
|
331
354
|
https://arxiv.org/abs/2405.12096
|
332
355
|
|
333
356
|
Parameters:
|
334
|
-
y_true (np.array):
|
335
|
-
|
336
|
-
|
337
|
-
|
357
|
+
y_true (np.array):
|
358
|
+
Ground truth binary labels (0 = normal, 1 = anomaly).
|
359
|
+
y_anomaly_scores (np.array):
|
360
|
+
Real-valued anomaly scores for each time point.
|
361
|
+
early (int):
|
362
|
+
Length of the early buffer zone before each anomaly interval.
|
363
|
+
delay (int):
|
364
|
+
Length of the delay buffer zone after each anomaly interval.
|
338
365
|
|
339
366
|
Returns:
|
340
367
|
float: The real-valued PATE score.
|
tsadmetrics/utils.py
CHANGED
@@ -7,14 +7,20 @@ def compute_metrics(y_true: np.array,y_pred: np.array, metrics: list, metrics_pa
|
|
7
7
|
Computes the specified metrics for the given true and predicted values.
|
8
8
|
|
9
9
|
Parameters:
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
10
|
+
y_true (np.array):
|
11
|
+
True labels.
|
12
|
+
y_pred (np.array):
|
13
|
+
Predicted labels or scores.
|
14
|
+
metrics (list):
|
15
|
+
List of metric names to compute.
|
16
|
+
metrics_params (dict):
|
17
|
+
Dictionary of parameters for each metric.
|
18
|
+
is_anomaly_score (bool):
|
19
|
+
Flag indicating if y_true and y_pred are anomaly scores. Otherwise, they are treated as binary labels.
|
20
|
+
verbose (bool):
|
21
|
+
Flag to print additional information.
|
16
22
|
Returns:
|
17
|
-
|
23
|
+
results (DataFrame): DataFrame containing the computed metrics and their values.
|
18
24
|
"""
|
19
25
|
if not (np.array_equal(np.unique(y_true), [0, 1]) or np.array_equal(np.unique(y_true), [0]) or np.array_equal(np.unique(y_true), [1])):
|
20
26
|
raise ValueError("y_true must be binary labels (0 or 1).")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: tsadmetrics
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.14
|
4
4
|
Summary: =?unknown-8bit?q?Librer=C3=ADa_para_evaluaci=C3=B3n_de_detecci=C3=B3n_de_anomal=C3=ADas?= en series temporales
|
5
5
|
Home-page: https://github.com/pathsko/TSADmetrics
|
6
6
|
Author: Pedro Rafael Velasco Priego
|
@@ -16,11 +16,11 @@ tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
16
|
tests/test_binary.py,sha256=dj9BsKBo5rpWw4JGiKKoVkg4rIW4YylTie2VxH2DAGo,29787
|
17
17
|
tests/test_non_binary.py,sha256=syANlwm0DKsL6geGeq6nQI6ZVe6T_YXWTyk2-Hmck4s,11308
|
18
18
|
tsadmetrics/__init__.py,sha256=MTWOa43fgOdkMNo5NglCReRnB8hoF0ob2PIvDziCNHw,1575
|
19
|
-
tsadmetrics/binary_metrics.py,sha256=
|
19
|
+
tsadmetrics/binary_metrics.py,sha256=6GxE3HSiAC9OeDOpP6QFgPwbp-Q37-F3cUdyYcpRrxE,62841
|
20
20
|
tsadmetrics/metric_utils.py,sha256=fm8v0X37_AlqWpkcUT9r3680QsjLljrHe2YuXkRLAZ4,10873
|
21
|
-
tsadmetrics/non_binary_metrics.py,sha256=
|
21
|
+
tsadmetrics/non_binary_metrics.py,sha256=O6AqceHrjCVV1kJPBzXQIgtiu6afzoiJz2biNsxf3_4,13389
|
22
22
|
tsadmetrics/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
|
-
tsadmetrics/utils.py,sha256=
|
23
|
+
tsadmetrics/utils.py,sha256=BqsG4DyP3AffuMFQCJ-Qy4YaDu4IkFudZWYCvyTGvvY,2444
|
24
24
|
tsadmetrics/_tsadeval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
25
25
|
tsadmetrics/_tsadeval/auc_roc_pr_plot.py,sha256=PHqJUXq2qI248XV9o04D8SsUJgowetaKq0Cu5bYrIAE,12689
|
26
26
|
tsadmetrics/_tsadeval/discontinuity_graph.py,sha256=Ci65l_DPi6HTtb8NvQJe1najgGrRuEpOMWvSyi2AeR0,4088
|
@@ -53,7 +53,7 @@ tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py,sha256=pJz4iuPyVGNvwsaR
|
|
53
53
|
tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py,sha256=jLkcMg7UNl25SHtZUBGkP-RV8HsvaZCtjakryl7PFWU,3204
|
54
54
|
tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py,sha256=OhUJSm_I7VZ_gX_SSg8AYUq3_NW9rMIy7lAVsnOFw4Q,417
|
55
55
|
tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py,sha256=LL-0pPer3ymovVRlktaHo5XDzpgiDhWOVfdPOzKR6og,3152
|
56
|
-
tsadmetrics-0.1.
|
57
|
-
tsadmetrics-0.1.
|
58
|
-
tsadmetrics-0.1.
|
59
|
-
tsadmetrics-0.1.
|
56
|
+
tsadmetrics-0.1.14.dist-info/METADATA,sha256=TCFL9Dpv6zwwM_5n2HeCxKgFP-KB4AHYrvCe3rMZMOI,831
|
57
|
+
tsadmetrics-0.1.14.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
58
|
+
tsadmetrics-0.1.14.dist-info/top_level.txt,sha256=s2VIr_ePl-WZbYt9FsYbsDGM7J-Qc5cgpwEOeQ3FVpM,31
|
59
|
+
tsadmetrics-0.1.14.dist-info/RECORD,,
|
File without changes
|
File without changes
|