tsadmetrics 0.1.9__tar.gz → 0.1.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. {tsadmetrics-0.1.9/tsadmetrics.egg-info → tsadmetrics-0.1.10}/PKG-INFO +1 -1
  2. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/pyproject.toml +1 -1
  3. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/setup.py +1 -1
  4. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tests/test_non_binary.py +2 -2
  5. tsadmetrics-0.1.10/tsadmetrics/non_binary_metrics.py +216 -0
  6. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10/tsadmetrics.egg-info}/PKG-INFO +1 -1
  7. tsadmetrics-0.1.9/tsadmetrics/non_binary_metrics.py +0 -92
  8. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/MANIFEST.in +0 -0
  9. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/README.md +0 -0
  10. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/entorno/bin/activate_this.py +0 -0
  11. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/entorno/bin/rst2html.py +0 -0
  12. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/entorno/bin/rst2html4.py +0 -0
  13. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/entorno/bin/rst2html5.py +0 -0
  14. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/entorno/bin/rst2latex.py +0 -0
  15. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/entorno/bin/rst2man.py +0 -0
  16. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/entorno/bin/rst2odt.py +0 -0
  17. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/entorno/bin/rst2odt_prepstyles.py +0 -0
  18. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/entorno/bin/rst2pseudoxml.py +0 -0
  19. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/entorno/bin/rst2s5.py +0 -0
  20. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/entorno/bin/rst2xetex.py +0 -0
  21. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/entorno/bin/rst2xml.py +0 -0
  22. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/entorno/bin/rstpep2html.py +0 -0
  23. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/setup.cfg +0 -0
  24. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tests/__init__.py +0 -0
  25. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tests/test_binary.py +0 -0
  26. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/__init__.py +0 -0
  27. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/__init__.py +0 -0
  28. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/affiliation/__init__.py +0 -0
  29. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +0 -0
  30. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/affiliation/_integral_interval.py +0 -0
  31. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +0 -0
  32. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/affiliation/generics.py +0 -0
  33. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/affiliation/metrics.py +0 -0
  34. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/auc_roc_pr_plot.py +0 -0
  35. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/discontinuity_graph.py +0 -0
  36. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +0 -0
  37. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +0 -0
  38. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +0 -0
  39. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/__init__.py +0 -0
  40. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
  41. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +0 -0
  42. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +0 -0
  43. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/latency_sparsity_aware.py +0 -0
  44. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/metrics.py +0 -0
  45. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/nabscore.py +0 -0
  46. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/__init__.py +0 -0
  47. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
  48. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +0 -0
  49. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +0 -0
  50. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
  51. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +0 -0
  52. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +0 -0
  53. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +0 -0
  54. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +0 -0
  55. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/tests.py +0 -0
  56. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/threshold_plt.py +0 -0
  57. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/time_tolerant.py +0 -0
  58. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/vus_utils.py +0 -0
  59. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/binary_metrics.py +0 -0
  60. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/metric_utils.py +0 -0
  61. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/py.typed +0 -0
  62. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics/utils.py +0 -0
  63. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics.egg-info/SOURCES.txt +0 -0
  64. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics.egg-info/dependency_links.txt +0 -0
  65. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics.egg-info/requires.txt +0 -0
  66. {tsadmetrics-0.1.9 → tsadmetrics-0.1.10}/tsadmetrics.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tsadmetrics
3
- Version: 0.1.9
3
+ Version: 0.1.10
4
4
  Summary: Librería para evaluación de detección de anomalías en series temporales
5
5
  Home-page: https://github.com/pathsko/TSADmetrics
6
6
  Author: Pedro Rafael Velasco Priego
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "tsadmetrics"
3
- version = "0.1.9"
3
+ version = "0.1.10"
4
4
  description = "Librería para evaluación de detección de anomalías en series temporales"
5
5
  authors = [
6
6
  { name = "Pedro Rafael Velasco Priego", email = "i12veprp@uco.es" }
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
5
5
 
6
6
  setup(
7
7
  name="tsadmetrics",
8
- version="0.1.9",
8
+ version="0.1.10",
9
9
  author="Pedro Rafael Velasco Priego",
10
10
  author_email="i12veprp@uco.es",
11
11
  description="A library for time series anomaly detection metrics and evaluation.",
@@ -202,7 +202,7 @@ class TestAUCPRPA(unittest.TestCase):
202
202
  self.assertAlmostEqual(score, expected_score, places=4)
203
203
 
204
204
  score = round(auc_pr_pa(self.y_true1, self.y_pred3),2)
205
- expected_score = 0.5
205
+ expected_score = 0.75
206
206
  self.assertAlmostEqual(score, expected_score, places=4)
207
207
 
208
208
  if len(self.y_true2) == len(self.y_pred4):
@@ -252,7 +252,7 @@ class TestAUCPRSW(unittest.TestCase):
252
252
  self.assertAlmostEqual(score, expected_score, places=4)
253
253
 
254
254
  score = round(auc_pr_sw(self.y_true1, self.y_pred3),2)
255
- expected_score = 0.5
255
+ expected_score = 1
256
256
  self.assertAlmostEqual(score, expected_score, places=4)
257
257
 
258
258
 
@@ -0,0 +1,216 @@
1
+ import numpy as np
2
+ from ._tsadeval.metrics import *
3
+ from .metric_utils import transform_to_full_series
4
+ from sklearn.metrics import auc
5
+ from .binary_metrics import point_adjusted_precision, point_adjusted_recall, segment_wise_precision, segment_wise_recall
6
+ from pate.PATE_metric import PATE
7
+ def precision_at_k(y_true : np.array ,y_anomaly_scores: np.array):
8
+
9
+ m = PatK_pw(y_true,y_anomaly_scores)
10
+
11
+ return m.get_score()
12
+
13
+ def auc_roc_pw(y_true : np.array ,y_anomaly_scores: np.array):
14
+
15
+ m = AUC_ROC(y_true,y_anomaly_scores)
16
+
17
+ return m.get_score()
18
+
19
+
20
+ def auc_pr_pw(y_true : np.array ,y_anomaly_scores: np.array):
21
+
22
+ m = AUC_PR_pw(y_true,y_anomaly_scores)
23
+
24
+ return m.get_score()
25
+
26
+
27
+
28
+ def auc_pr_pa(y_true: np.array, y_anomaly_scores: np.array):
29
+ precisions = [1]
30
+ recalls = [0]
31
+ tps,fps,fns = [],[],[]
32
+
33
+ p_adj = PointAdjust(len(y_true),y_true,(np.array(y_anomaly_scores) >= 0.5).astype(int))
34
+ segments= p_adj.get_gt_anomalies_segmentwise()
35
+ idx = np.argsort(y_anomaly_scores)[::-1].astype(int)
36
+ y_true_sorted = np.array(y_true)[idx]
37
+ y_anomaly_scores_sorted = np.array(y_anomaly_scores)[idx]
38
+
39
+ segment_mins = []
40
+ for start,end in segments:
41
+ anoms_scores = y_anomaly_scores[start:end+1]
42
+ segment_mins.append([np.max(anoms_scores),end-start+1])
43
+
44
+ for i_t in range(len(y_anomaly_scores_sorted)):
45
+ fp,tp,fn = 0,0,0
46
+ if i_t > 0 and y_anomaly_scores_sorted[i_t] == y_anomaly_scores_sorted[i_t-1] :
47
+ tp = tps[-1]
48
+ fp = fps[-1]
49
+ fn = fns[-1]
50
+ else:
51
+ if y_true_sorted[i_t] == 0:
52
+ #FP
53
+ if len(fps)==0:
54
+ aux_y_pred = (y_anomaly_scores >= y_anomaly_scores_sorted[i_t]).astype(int)
55
+ for i in range(len(aux_y_pred)):
56
+ if aux_y_pred[i] == 1 and y_true[i] == 0:
57
+ fp+=1
58
+
59
+
60
+ else:
61
+ fp=fps[i_t-1]+1
62
+ else:
63
+ if len(fps)==0:
64
+ aux_y_pred = (y_anomaly_scores >= y_anomaly_scores_sorted[i_t]).astype(int)
65
+ for i in range(len(aux_y_pred)):
66
+ if aux_y_pred[i] == 1 and y_true[i] == 0:
67
+ fp+=1
68
+ else:
69
+ fp=fps[i_t-1]
70
+ for score, length in segment_mins:
71
+ if score >= y_anomaly_scores_sorted[i_t]:
72
+ #TP
73
+ tp+= length
74
+ else:
75
+ #FN
76
+ fn+= length
77
+ tps.append(tp)
78
+ fns.append(fn)
79
+ fps.append(fp)
80
+ for tp,fp,fn in zip(tps,fps,fns):
81
+ if tp>0:
82
+ precisions.append(tp/(tp+fp))
83
+ recalls.append(tp/(tp+fn))
84
+ else:
85
+ precisions.append(0)
86
+ recalls.append(0)
87
+
88
+
89
+ recalls.append(1)
90
+ precisions.append(0)
91
+
92
+ auc_value = auc(recalls, precisions)
93
+ return auc_value
94
+
95
+
96
+
97
+
98
+ def auc_pr_sw(y_true: np.array, y_anomaly_scores: np.array):
99
+ precisions = [1]
100
+ recalls = [0]
101
+ tps,fps,fns = [],[],[]
102
+
103
+
104
+ segments = []
105
+ i=0
106
+ while i < len(y_true):
107
+ if y_true[i] == 1:
108
+ start = i
109
+ end = i
110
+ while i < len(y_true) and y_true[i] == 1:
111
+ end = i
112
+ i += 1
113
+ segments.append([start,end])
114
+ i+=1
115
+ idx = np.argsort(y_anomaly_scores)[::-1].astype(int)
116
+ y_anomaly_scores_sorted = np.array(y_anomaly_scores)[idx]
117
+
118
+ segment_mins = []
119
+ for start,end in segments:
120
+ anoms_scores = y_anomaly_scores[start:end+1]
121
+ segment_mins.append([np.max(anoms_scores),[start,end]])
122
+
123
+ for i_t in range(len(y_anomaly_scores_sorted)):
124
+ fp,tp,fn = 0,0,0
125
+
126
+
127
+ aux_y_pred = (y_anomaly_scores >= y_anomaly_scores_sorted[i_t]).astype(int)
128
+ for score,seg in segment_mins:
129
+ start,end = seg
130
+ if score >= y_anomaly_scores_sorted[i_t]:
131
+ #TP
132
+ tp+= 1
133
+ if aux_y_pred[start]== 1:
134
+ # Extender hacia la izquierda
135
+ i = start - 1
136
+ while i >= 0 and aux_y_pred[i] == 1:
137
+ aux_y_pred[i] = 0
138
+ i -= 1
139
+
140
+ if aux_y_pred[end] == 1:
141
+ # Extender hacia la derecha
142
+ i = end + 1
143
+ while i < len(aux_y_pred) and aux_y_pred[i] == 1:
144
+ aux_y_pred[i] = 0
145
+ i += 1
146
+ aux_y_pred[start:end+1] = 0
147
+
148
+ else:
149
+ #FN
150
+ fn+= 1
151
+
152
+ if np.sum(aux_y_pred)>0:
153
+ fpsegments = []
154
+ i=0
155
+ while i < len(aux_y_pred):
156
+ if aux_y_pred[i] == 1:
157
+ start = i
158
+ end = i
159
+ while i < len(aux_y_pred) and aux_y_pred[i] == 1:
160
+ end = i
161
+ i += 1
162
+ fpsegments.append([start,end])
163
+ i+=1
164
+ fp = len(fpsegments)
165
+ else:
166
+ fp = 0
167
+
168
+
169
+ tps.append(tp)
170
+ fns.append(fn)
171
+ fps.append(fp)
172
+ for tp,fp,fn in zip(tps,fps,fns):
173
+ if tp>0:
174
+ precisions.append(tp/(tp+fp))
175
+ recalls.append(tp/(tp+fn))
176
+ else:
177
+ precisions.append(0)
178
+ recalls.append(0)
179
+
180
+
181
+
182
+ auc_value = auc(recalls, precisions)
183
+
184
+ return auc_value
185
+
186
+
187
+ def vus_roc(y_true : np.array ,y_anomaly_scores: np.array, window=4):
188
+
189
+ m = VUS_ROC(y_true,y_anomaly_scores,max_window=window)
190
+
191
+ return m.get_score()
192
+
193
+
194
+ def vus_pr(y_true : np.array ,y_anomaly_scores: np.array, window=4):
195
+
196
+ m = VUS_PR(y_true,y_anomaly_scores,max_window=window)
197
+
198
+ return m.get_score()
199
+
200
+
201
+ def real_pate(y_true: np.array, y_anomaly_scores: np.array, early: int, delay: int):
202
+ """
203
+ Calculate PATE score for anomaly detection in time series.
204
+ The PATE score is the ratio of the number of true positives to the sum of true positives, false positives, and false negatives, within a given early and delay range.
205
+
206
+ Parameters:
207
+ y_true (np.array): The ground truth binary labels for the time series data.
208
+ y_anomaly_scores (np.array): The predicted binary labels for the time series data.
209
+ early (int): The maximum number of time steps before an anomaly must be predicted to be considered early.
210
+ delay (int): The maximum number of time steps after an anomaly must be predicted to be considered delayed.
211
+
212
+ Returns:
213
+ float: The PATE score.
214
+ """
215
+
216
+ return PATE(y_true, y_anomaly_scores, early, delay, binary_scores=False)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tsadmetrics
3
- Version: 0.1.9
3
+ Version: 0.1.10
4
4
  Summary: Librería para evaluación de detección de anomalías en series temporales
5
5
  Home-page: https://github.com/pathsko/TSADmetrics
6
6
  Author: Pedro Rafael Velasco Priego
@@ -1,92 +0,0 @@
1
- import numpy as np
2
- from ._tsadeval.metrics import *
3
- from .metric_utils import transform_to_full_series
4
- from sklearn.metrics import auc
5
- from .binary_metrics import point_adjusted_precision, point_adjusted_recall, segment_wise_precision, segment_wise_recall
6
- from pate.PATE_metric import PATE
7
- def precision_at_k(y_true : np.array ,y_anomaly_scores: np.array):
8
-
9
- m = PatK_pw(y_true,y_anomaly_scores)
10
-
11
- return m.get_score()
12
-
13
- def auc_roc_pw(y_true : np.array ,y_anomaly_scores: np.array):
14
-
15
- m = AUC_ROC(y_true,y_anomaly_scores)
16
-
17
- return m.get_score()
18
-
19
-
20
- def auc_pr_pw(y_true : np.array ,y_anomaly_scores: np.array):
21
-
22
- m = AUC_PR_pw(y_true,y_anomaly_scores)
23
-
24
- return m.get_score()
25
-
26
-
27
-
28
- def auc_pr_pa(y_true: np.array, y_anomaly_scores: np.array):
29
- thresholds = np.unique(y_anomaly_scores)[::-1] # Descending order
30
- precisions = [1]
31
- recalls = [0]
32
- for t in thresholds[:-1]:
33
-
34
- y_pred = (y_anomaly_scores >= t).astype(int)
35
-
36
-
37
- precisions.append(point_adjusted_precision(y_true, y_pred))
38
- recalls.append(point_adjusted_recall(y_true, y_pred))
39
-
40
- recalls.append(1)
41
- precisions.append(0)
42
- auc_value = auc(recalls, precisions)
43
- return auc_value
44
-
45
-
46
-
47
-
48
- def auc_pr_sw(y_true: np.array, y_anomaly_scores: np.array):
49
- thresholds = np.unique(y_anomaly_scores)[::-1] # Descending order
50
- precisions = [1]
51
- recalls = [0]
52
-
53
- for t in thresholds[:-1]:
54
- y_pred = (y_anomaly_scores >= t).astype(int)
55
- precisions.append(segment_wise_precision(y_true, y_pred))
56
- recalls.append(segment_wise_recall(y_true, y_pred))
57
- recalls.append(1)
58
- precisions.append(0)
59
- auc_value = auc(recalls, precisions)
60
- return auc_value
61
-
62
-
63
- def vus_roc(y_true : np.array ,y_anomaly_scores: np.array, window=4):
64
-
65
- m = VUS_ROC(y_true,y_anomaly_scores,max_window=window)
66
-
67
- return m.get_score()
68
-
69
-
70
- def vus_pr(y_true : np.array ,y_anomaly_scores: np.array, window=4):
71
-
72
- m = VUS_PR(y_true,y_anomaly_scores,max_window=window)
73
-
74
- return m.get_score()
75
-
76
-
77
- def real_pate(y_true: np.array, y_anomaly_scores: np.array, early: int, delay: int):
78
- """
79
- Calculate PATE score for anomaly detection in time series.
80
- The PATE score is the ratio of the number of true positives to the sum of true positives, false positives, and false negatives, within a given early and delay range.
81
-
82
- Parameters:
83
- y_true (np.array): The ground truth binary labels for the time series data.
84
- y_anomaly_scores (np.array): The predicted binary labels for the time series data.
85
- early (int): The maximum number of time steps before an anomaly must be predicted to be considered early.
86
- delay (int): The maximum number of time steps after an anomaly must be predicted to be considered delayed.
87
-
88
- Returns:
89
- float: The PATE score.
90
- """
91
-
92
- return PATE(y_true, y_anomaly_scores, early, delay, binary_scores=False)
File without changes
File without changes
File without changes