tsadmetrics 0.1.16__py3-none-any.whl → 0.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
docs_api/conf.py ADDED
@@ -0,0 +1,90 @@
1
+ # Configuration file for the Sphinx documentation builder.
2
+ #
3
+
4
+ import os
5
+ import sys
6
+ sys.path.insert(0, os.path.abspath('../'))
7
+
8
+
9
+ project = 'TSADmetrics'
10
+ copyright = '2025, Pedro Rafael Velasco Priego'
11
+ author = 'Pedro Rafael Velasco Priego'
12
+ release = 'MIT'
13
+
14
+ # -- General configuration ---------------------------------------------------
15
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
16
+
17
+
18
+ extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc',]
19
+
20
+
21
+
22
+ templates_path = ['_templates']
23
+ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
24
+
25
+ html_theme = 'furo'
26
+ html_static_path = ['_static']
27
+ html_theme_options = {
28
+ #"sidebar_hide_name": True,
29
+ "light_css_variables": {
30
+ "color-brand-primary": "#2e5c7d",
31
+ "color-brand-content": "#2e5c7d",
32
+ "codebgcolor": "red",
33
+ "codetextcolor": "red",
34
+ },
35
+ "dark_css_variables": {
36
+ "color-brand-primary": "#6998b4",
37
+ "color-brand-content": "#6998b4",
38
+ "codebgcolor": "green",
39
+ "codetextcolor": "green",
40
+ },
41
+ "navigation_with_keys": True
42
+
43
+ }
44
+ html_baseurl = ''
45
+
46
+ html_css_files = [
47
+ 'css/custom.css',
48
+ ]
49
+
50
+ epub_show_urls = 'footnote'
51
+
52
+ # -- Options for HTML output -------------------------------------------------
53
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
54
+
55
+
56
+
57
+
58
+ ### -- LaTeX options -------------------------------------------------
59
+
60
+ # comando para compilar: make latexpdf LATEXMKOPTS="-xelatex"
61
+
62
+ latex_elements = {
63
+ 'maxlistdepth': '10', # Aumenta el límite de anidamiento
64
+ 'papersize': 'a4paper',
65
+ 'pointsize': '10pt',
66
+ 'maketitle': r'''
67
+ \makeatletter
68
+ \begin{titlepage}
69
+ \noindent\rule{\textwidth}{1pt}\\[3cm]
70
+ \begin{center}
71
+ {\Huge\sffamily\bfseries TSADmetrics API Reference}\\[1.5cm]
72
+ {\Large\sffamily Time Series Anomaly Detection Metrics}\\[3cm]
73
+ \begin{minipage}{0.8\textwidth}
74
+ \centering
75
+ {\large\sffamily
76
+ \begin{tabular}{l@{\hspace{1cm}}l}
77
+ \textbf{Autor:} & Pedro Rafael Velasco Priego \\
78
+ \textbf{Directores:} & Dra. Amelia Zafra Gómez \\
79
+ & Dr. Sebastián Ventura Soto \\
80
+ \end{tabular}
81
+ }
82
+ \end{minipage}\\[5cm]
83
+ {\large\sffamily \@date}\\
84
+ {\large\sffamily \copyright\ 2025 Pedro Rafael Velasco Priego}
85
+ \end{center}
86
+ \noindent\rule{\textwidth}{1pt}
87
+ \end{titlepage}
88
+ \makeatother
89
+ ''',
90
+ }
docs_manual/conf.py ADDED
@@ -0,0 +1,90 @@
1
+ # Configuration file for the Sphinx documentation builder.
2
+ #
3
+
4
+ import os
5
+ import sys
6
+ sys.path.insert(0, os.path.abspath('../'))
7
+
8
+
9
+ project = 'TSADmetrics'
10
+ copyright = '2025, Pedro Rafael Velasco Priego'
11
+ author = 'Pedro Rafael Velasco Priego'
12
+ release = 'MIT'
13
+
14
+ # -- General configuration ---------------------------------------------------
15
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
16
+
17
+
18
+ extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc',]
19
+
20
+
21
+
22
+ templates_path = ['_templates']
23
+ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
24
+
25
+ html_theme = 'furo'
26
+ html_static_path = ['_static']
27
+ html_theme_options = {
28
+ #"sidebar_hide_name": True,
29
+ "light_css_variables": {
30
+ "color-brand-primary": "#2e5c7d",
31
+ "color-brand-content": "#2e5c7d",
32
+ "codebgcolor": "red",
33
+ "codetextcolor": "red",
34
+ },
35
+ "dark_css_variables": {
36
+ "color-brand-primary": "#6998b4",
37
+ "color-brand-content": "#6998b4",
38
+ "codebgcolor": "green",
39
+ "codetextcolor": "green",
40
+ },
41
+ "navigation_with_keys": True
42
+
43
+ }
44
+ html_baseurl = ''
45
+
46
+ html_css_files = [
47
+ 'css/custom.css',
48
+ ]
49
+
50
+ epub_show_urls = 'footnote'
51
+
52
+ # -- Options for HTML output -------------------------------------------------
53
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
54
+
55
+
56
+
57
+
58
+ ### -- LaTeX options -------------------------------------------------
59
+
60
+ # comando para compilar: make latexpdf LATEXMKOPTS="-xelatex"
61
+
62
+ latex_elements = {
63
+ 'maxlistdepth': '10', # Aumenta el límite de anidamiento
64
+ 'papersize': 'a4paper',
65
+ 'pointsize': '10pt',
66
+ 'maketitle': r'''
67
+ \makeatletter
68
+ \begin{titlepage}
69
+ \noindent\rule{\textwidth}{1pt}\\[3cm]
70
+ \begin{center}
71
+ {\Huge\sffamily\bfseries TSADmetrics User Manual}\\[1.5cm]
72
+ {\Large\sffamily Time Series Anomaly Detection Metrics}\\[3cm]
73
+ \begin{minipage}{0.8\textwidth}
74
+ \centering
75
+ {\large\sffamily
76
+ \begin{tabular}{l@{\hspace{1cm}}l}
77
+ \textbf{Autor:} & Pedro Rafael Velasco Priego \\
78
+ \textbf{Directores:} & Dra. Amelia Zafra Gómez \\
79
+ & Dr. Sebastián Ventura Soto \\
80
+ \end{tabular}
81
+ }
82
+ \end{minipage}\\[5cm]
83
+ {\large\sffamily \@date}\\
84
+ {\large\sffamily \copyright\ 2025 Pedro Rafael Velasco Priego}
85
+ \end{center}
86
+ \noindent\rule{\textwidth}{1pt}
87
+ \end{titlepage}
88
+ \makeatother
89
+ ''',
90
+ }
tests/test_non_binary.py CHANGED
@@ -148,8 +148,7 @@ class TestAUCPRPW(unittest.TestCase):
148
148
 
149
149
 
150
150
 
151
-
152
- class TestAUCPRPA(unittest.TestCase):
151
+ class TestAUCROCPA(unittest.TestCase):
153
152
 
154
153
  def setUp(self):
155
154
  """
@@ -189,40 +188,40 @@ class TestAUCPRPA(unittest.TestCase):
189
188
  ,0.26972245 ,0.78780138 ,0.37649185 ,0.08467683]
190
189
 
191
190
 
192
- def test_auc_pr_pa(self):
191
+ def test_auc_roc_pa(self):
193
192
  """
194
193
  Prueba para la función auc_pr_pa.
195
194
  """
196
- score = round(auc_pr_pa(self.y_true1, self.y_pred1),2)
197
- expected_score = 1.0
195
+ score = round(auc_roc_pa(self.y_true1, self.y_pred1),2)
196
+ expected_score = 0.5
198
197
  self.assertAlmostEqual(score, expected_score, places=4)
199
198
 
200
- score = round(auc_pr_pa(self.y_true1, self.y_pred2),2)
201
- expected_score = 1.0
199
+ score = round(auc_roc_pa(self.y_true1, self.y_pred2),2)
200
+ expected_score = 0.5
202
201
  self.assertAlmostEqual(score, expected_score, places=4)
203
202
 
204
- score = round(auc_pr_pa(self.y_true1, self.y_pred3),2)
205
- expected_score = 0.75
203
+ score = round(auc_roc_pa(self.y_true1, self.y_pred3),2)
204
+ expected_score = 0.25
206
205
  self.assertAlmostEqual(score, expected_score, places=4)
207
206
 
208
207
 
209
- score = round(auc_pr_pa(self.y_true2, self.y_pred4),2)
210
- expected_score = 0.78
208
+ score = round(auc_roc_pa(self.y_true2, self.y_pred4),2)
209
+ expected_score = 0.33
211
210
  self.assertAlmostEqual(score, expected_score, places=4)
212
211
 
213
212
 
214
- def test_auc_pr_pa_consistency(self):
213
+ def test_auc_roc_pa_consistency(self):
215
214
  y_true, y_pred = [],[]
216
215
  try:
217
216
  for _ in range(100):
218
217
  y_true = np.random.choice([0, 1], size=(100,))
219
218
  y_pred = np.random.random( size=(100,))
220
- score = auc_pr_pa(y_true, y_pred)
219
+ score = auc_roc_pa(y_true, y_pred)
221
220
  except Exception as e:
222
- self.fail(f"auc_roc_pr_pa raised an exception {e}")
223
-
221
+ self.fail(f"auc_roc_pa raised an exception {e}")
224
222
 
225
- class TestAUCPRSW(unittest.TestCase):
223
+
224
+ class TestAUCPRPA(unittest.TestCase):
226
225
 
227
226
  def setUp(self):
228
227
  """
@@ -237,34 +236,65 @@ class TestAUCPRSW(unittest.TestCase):
237
236
  self.y_pred2 = np.array([1, 2, 3, 4])
238
237
 
239
238
  self.y_pred3 = np.array([4, 4, 4, 4])
240
-
241
239
 
242
- def test_auc_pr_sw(self):
240
+ self.y_true2 = np.array([0,1,1,0,0,0,0,0,1,1,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,1,0,0,1,1,0
241
+ ,1,1,1,0,0,1,0,0,1,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,1,1,1,1,1,0,1,1
242
+ ,1,1,1,1,0,0,1,1,1,1,0,1,0,0,1,1,1,0,0,1,0,0,1,0,1,1])
243
+
244
+
245
+ self.y_pred4 = [0.1280475, 0.12059283 ,0.29936968 ,0.85866402 ,0.74071874 ,0.22310849
246
+ ,0.11281839 ,0.26133246 ,0.33696106 ,0.01442675 ,0.51962876 ,0.07828833
247
+ ,0.45337844 ,0.09444483 ,0.91216588 ,0.18847595 ,0.26828481 ,0.65248919
248
+ ,0.46291981 ,0.43730757 ,0.78087553 ,0.45031043 ,0.88661033 ,0.56209352
249
+ ,0.45029423 ,0.17638205 ,0.9261279 ,0.58830652 ,0.01602648 ,0.73903379
250
+ ,0.61831379 ,0.74779903 ,0.42682106 ,0.82583519 ,0.19709012 ,0.44925962
251
+ ,0.62752415 ,0.52458327 ,0.46291768 ,0.33937527 ,0.34868777 ,0.12293847
252
+ ,0.84477504 ,0.10225254 ,0.37048167 ,0.04476031 ,0.36680499 ,0.11346155
253
+ ,0.10583112 ,0.09493136 ,0.54878736 ,0.68514489 ,0.5940307 ,0.14526962
254
+ ,0.69385728 ,0.38888727 ,0.61495304 ,0.06795402 ,0.02894603 ,0.08293609
255
+ ,0.22865685 ,0.63531487 ,0.97966126 ,0.31418622 ,0.8943095 ,0.22974177
256
+ ,0.94402929 ,0.13140625 ,0.80539267 ,0.40160344 ,0.38151339 ,0.65011626
257
+ ,0.71657942 ,0.93297398 ,0.32043329 ,0.54667941 ,0.90645979 ,0.98730183
258
+ ,0.82351336 ,0.10404812 ,0.6962921 ,0.72890752 ,0.49700666 ,0.47461103
259
+ ,0.59696079 ,0.85876179 ,0.247344 ,0.38187879 ,0.23906861 ,0.5266315
260
+ ,0.08171512 ,0.27903375 ,0.61112439 ,0.20784267 ,0.90652453 ,0.87575255
261
+ ,0.26972245 ,0.78780138 ,0.37649185 ,0.08467683]
262
+
263
+
264
+ def test_auc_pr_pa(self):
243
265
  """
244
- Prueba para la función auc_pr_sw.
266
+ Prueba para la función auc_pr_pa.
245
267
  """
246
- score = round(auc_pr_sw(self.y_true1, self.y_pred1),2)
268
+ score = round(auc_pr_pa(self.y_true1, self.y_pred1),2)
247
269
  expected_score = 1.0
248
270
  self.assertAlmostEqual(score, expected_score, places=4)
249
271
 
250
- score = round(auc_pr_sw(self.y_true1, self.y_pred2),2)
251
- expected_score = 1
272
+ score = round(auc_pr_pa(self.y_true1, self.y_pred2),2)
273
+ expected_score = 1.0
252
274
  self.assertAlmostEqual(score, expected_score, places=4)
253
275
 
254
- score = round(auc_pr_sw(self.y_true1, self.y_pred3),2)
255
- expected_score = 1
276
+ score = round(auc_pr_pa(self.y_true1, self.y_pred3),2)
277
+ expected_score = 0.75
256
278
  self.assertAlmostEqual(score, expected_score, places=4)
257
279
 
258
280
 
259
- # def test_auc_pr_sw_consistency(self):
260
- # try:
261
- # for _ in range(100):
262
- # y_true = np.random.choice([0, 1], size=(100,))
263
- # y_pred = np.random.random( size=(100,))
264
-
265
- # score = auc_pr_sw(y_true, y_pred)
266
- # except Exception as e:
267
- # self.fail(f"auc_pr_sw raised an exception {e}")
281
+ score = round(auc_pr_pa(self.y_true2, self.y_pred4),2)
282
+ expected_score = 0.78
283
+ self.assertAlmostEqual(score, expected_score, places=4)
284
+
285
+
286
+ def test_auc_pr_pa_consistency(self):
287
+ y_true, y_pred = [],[]
288
+ try:
289
+ for _ in range(100):
290
+ y_true = np.random.choice([0, 1], size=(100,))
291
+ y_pred = np.random.random( size=(100,))
292
+ score = auc_pr_pa(y_true, y_pred)
293
+ except Exception as e:
294
+ self.fail(f"auc_roc_pr_pa raised an exception {e}")
295
+
296
+
297
+
268
298
 
269
299
 
270
300
  class TestVUSROC(unittest.TestCase):
tsadmetrics/__init__.py CHANGED
@@ -18,4 +18,4 @@ __all__ = ['point_wise_recall', 'point_wise_precision', 'point_wise_f_score','po
18
18
  'affiliation_based_recall','affiliation_based_precision','affiliation_based_f_score','nab_score','temporal_distance',
19
19
  'average_detection_count','absolute_detection_distance','total_detected_in_range','detection_accuracy_in_range','weighted_detection_difference',
20
20
  'binary_pate','real_pate','mean_time_to_detect',
21
- 'precision_at_k','auc_roc_pw','auc_pr_pw','auc_pr_pa','auc_pr_sw','vus_roc','vus_pr', 'compute_metrics', 'compute_metrics_from_file']
21
+ 'precision_at_k','auc_roc_pw','auc_pr_pw','auc_roc_pa','auc_pr_pa','vus_roc','vus_pr', 'compute_metrics', 'compute_metrics_from_file']
@@ -84,6 +84,91 @@ def auc_pr_pw(y_true : np.array ,y_anomaly_scores: np.array):
84
84
  return m.get_score()
85
85
 
86
86
 
87
+ def auc_roc_pa(y_true: np.array, y_anomaly_scores: np.array):
88
+ """
89
+ Calculate the AUC-ROC score using point-adjusted evaluation for anomaly detection in time series.
90
+
91
+ This is the standard Area Under the Receiver Operating Characteristic Curve (AUC-ROC), but instead
92
+ of computing true positive rate (TPR) and false positive rate (FPR) point-wise, it uses a point-adjusted
93
+ approach. Specifically, for each ground-truth anomalous segment, if at least one point within that
94
+ segment is predicted as anomalous, the entire segment is considered correctly detected. The adjusted
95
+ predictions are then compared to the ground-truth labels to compute true positives, false positives,
96
+ and false negatives, which are used to construct the ROC curve.
97
+
98
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
99
+
100
+ Parameters:
101
+ y_true (np.array):
102
+ Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
103
+ y_anomaly_scores (np.array):
104
+ Continuous anomaly scores assigned to each point in the series.
105
+
106
+ Returns:
107
+ float: AUC-ROC score (with point-adjusted evaluation).
108
+ """
109
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
110
+
111
+ tprs = [0]
112
+ fprs = [0]
113
+ tps, fps, fns = [], [], []
114
+
115
+ p_adj = PointAdjust(len(y_true), y_true, (np.array(y_anomaly_scores) >= 0.5).astype(int))
116
+ segments = p_adj.get_gt_anomalies_segmentwise()
117
+ idx = np.argsort(y_anomaly_scores)[::-1].astype(int)
118
+ y_true_sorted = np.array(y_true)[idx]
119
+ y_anomaly_scores_sorted = np.array(y_anomaly_scores)[idx]
120
+
121
+ segment_mins = []
122
+ for start, end in segments:
123
+ anoms_scores = y_anomaly_scores[start:end+1]
124
+ segment_mins.append([np.max(anoms_scores), end-start+1])
125
+
126
+ for i_t in range(len(y_anomaly_scores_sorted)):
127
+ fp, tp, fn = 0, 0, 0
128
+ if i_t > 0 and y_anomaly_scores_sorted[i_t] == y_anomaly_scores_sorted[i_t-1]:
129
+ tp = tps[-1]
130
+ fp = fps[-1]
131
+ fn = fns[-1]
132
+ else:
133
+ if y_true_sorted[i_t] == 0:
134
+ # FP
135
+ if len(fps) == 0:
136
+ aux_y_pred = (y_anomaly_scores >= y_anomaly_scores_sorted[i_t]).astype(int)
137
+ for i in range(len(aux_y_pred)):
138
+ if aux_y_pred[i] == 1 and y_true[i] == 0:
139
+ fp += 1
140
+ else:
141
+ fp = fps[i_t-1] + 1
142
+ else:
143
+ if len(fps) == 0:
144
+ aux_y_pred = (y_anomaly_scores >= y_anomaly_scores_sorted[i_t]).astype(int)
145
+ for i in range(len(aux_y_pred)):
146
+ if aux_y_pred[i] == 1 and y_true[i] == 0:
147
+ fp += 1
148
+ else:
149
+ fp = fps[i_t-1]
150
+ for score, length in segment_mins:
151
+ if score >= y_anomaly_scores_sorted[i_t]:
152
+ # TP
153
+ tp += length
154
+ else:
155
+ # FN
156
+ fn += length
157
+ tps.append(tp)
158
+ fns.append(fn)
159
+ fps.append(fp)
160
+ for tp, fp, fn in zip(tps, fps, fns):
161
+ if tp + fn > 0:
162
+ tprs.append(tp / (tp + fn))
163
+ else:
164
+ tprs.append(0)
165
+ if fp + (len(y_true) - np.sum(y_true)) > 0:
166
+ fprs.append(fp / (fp + (len(y_true) - np.sum(y_true))))
167
+ else:
168
+ fprs.append(0)
169
+
170
+ auc_value = auc(fprs, tprs)
171
+ return auc_value
87
172
 
88
173
  def auc_pr_pa(y_true: np.array, y_anomaly_scores: np.array):
89
174
  """
@@ -178,117 +263,6 @@ def auc_pr_pa(y_true: np.array, y_anomaly_scores: np.array):
178
263
 
179
264
 
180
265
 
181
- def auc_pr_sw(y_true: np.array, y_anomaly_scores: np.array):
182
- """
183
- Calculate the AUC-PR score using segment-wise evaluation for anomaly detection in time series.
184
-
185
- This is the standard Area Under the Precision-Recall Curve (AUC-PR), but it uses a segment-wise
186
- adjustment when computing precision and recall. In this evaluation, each contiguous segment of
187
- anomalous ground-truth points is treated as a single unit. A true positive is counted if at least
188
- one predicted anomaly overlaps with the segment. A false negative occurs when a segment is
189
- completely missed, and a false positive is recorded for each predicted anomalous segment
190
- that does not overlap with any ground-truth anomaly. These adjusted counts are then used
191
- to compute precision and recall for constructing the PR curve.
192
-
193
- Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
194
-
195
- Parameters:
196
- y_true (np.array):
197
- Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
198
- y_anomaly_scores (np.array):
199
- Continuous anomaly scores assigned to each point in the series.
200
-
201
- Returns:
202
- float: AUC-PR score (with segment-wise evaluation).
203
- """
204
- validate_non_binary_inputs(y_true, y_anomaly_scores)
205
-
206
- precisions = [1]
207
- recalls = [0]
208
- tps,fps,fns = [],[],[]
209
-
210
-
211
- segments = []
212
- i=0
213
- while i < len(y_true):
214
- if y_true[i] == 1:
215
- start = i
216
- end = i
217
- while i < len(y_true) and y_true[i] == 1:
218
- end = i
219
- i += 1
220
- segments.append([start,end])
221
- i+=1
222
- idx = np.argsort(y_anomaly_scores)[::-1].astype(int)
223
- y_anomaly_scores_sorted = np.array(y_anomaly_scores)[idx]
224
-
225
- segment_mins = []
226
- for start,end in segments:
227
- anoms_scores = y_anomaly_scores[start:end+1]
228
- segment_mins.append([np.max(anoms_scores),[start,end]])
229
-
230
- for i_t in range(len(y_anomaly_scores_sorted)):
231
- fp,tp,fn = 0,0,0
232
-
233
-
234
- aux_y_pred = (y_anomaly_scores >= y_anomaly_scores_sorted[i_t]).astype(int)
235
- for score,seg in segment_mins:
236
- start,end = seg
237
- if score >= y_anomaly_scores_sorted[i_t]:
238
- #TP
239
- tp+= 1
240
- if aux_y_pred[start]== 1:
241
- # Extender hacia la izquierda
242
- i = start - 1
243
- while i >= 0 and aux_y_pred[i] == 1:
244
- aux_y_pred[i] = 0
245
- i -= 1
246
-
247
- if aux_y_pred[end] == 1:
248
- # Extender hacia la derecha
249
- i = end + 1
250
- while i < len(aux_y_pred) and aux_y_pred[i] == 1:
251
- aux_y_pred[i] = 0
252
- i += 1
253
- aux_y_pred[start:end+1] = 0
254
-
255
- else:
256
- #FN
257
- fn+= 1
258
-
259
- if np.sum(aux_y_pred)>0:
260
- fpsegments = []
261
- i=0
262
- while i < len(aux_y_pred):
263
- if aux_y_pred[i] == 1:
264
- start = i
265
- end = i
266
- while i < len(aux_y_pred) and aux_y_pred[i] == 1:
267
- end = i
268
- i += 1
269
- fpsegments.append([start,end])
270
- i+=1
271
- fp = len(fpsegments)
272
- else:
273
- fp = 0
274
-
275
-
276
- tps.append(tp)
277
- fns.append(fn)
278
- fps.append(fp)
279
- for tp,fp,fn in zip(tps,fps,fns):
280
- if tp>0:
281
- precisions.append(tp/(tp+fp))
282
- recalls.append(tp/(tp+fn))
283
- else:
284
- precisions.append(0)
285
- recalls.append(0)
286
-
287
-
288
-
289
- auc_value = auc(recalls, precisions)
290
-
291
- return auc_value
292
266
 
293
267
 
294
268
  def vus_roc(y_true : np.array ,y_anomaly_scores: np.array, window=4):
tsadmetrics/utils.py CHANGED
@@ -61,8 +61,10 @@ def compute_metrics_from_file(results_file: str, conf_file: str, output_dir: str
61
61
  Computes metrics based on prediction results from a CSV file and configuration from a JSON file.
62
62
 
63
63
  Parameters:
64
- results_file (str): Path to CSV file containing y_true and y_pred columns.
65
- conf_file (str): Path to JSON configuration file with metrics and parameters.
64
+ results_file (str):
65
+ Path to CSV file containing y_true and y_pred columns.
66
+ conf_file (str):
67
+ Path to JSON configuration file with metrics and parameters.
66
68
 
67
69
  Returns:
68
70
  pd.DataFrame: DataFrame with computed metrics.
@@ -0,0 +1,54 @@
1
+ Metadata-Version: 2.1
2
+ Name: tsadmetrics
3
+ Version: 0.1.17
4
+ Summary: =?unknown-8bit?q?Librer=C3=ADa_para_evaluaci=C3=B3n_de_detecci=C3=B3n_de_anomal=C3=ADas?= en series temporales
5
+ Home-page: https://github.com/pathsko/TSADmetrics
6
+ Author: Pedro Rafael Velasco Priego
7
+ Author-email: Pedro Rafael Velasco Priego <i12veprp@uco.es>
8
+ Requires-Python: >=3.8
9
+ Description-Content-Type: text/markdown
10
+ Requires-Dist: joblib (==1.4.2)
11
+ Requires-Dist: numpy (==1.24.4)
12
+ Requires-Dist: pandas (==2.0.3)
13
+ Requires-Dist: PATE (==0.1.1)
14
+ Requires-Dist: patsy (==0.5.6)
15
+ Requires-Dist: python-dateutil (==2.9.0.post0)
16
+ Requires-Dist: pytz (==2024.1)
17
+ Requires-Dist: scikit-learn (==1.3.2)
18
+ Requires-Dist: scipy (==1.10.1)
19
+ Requires-Dist: six (==1.16.0)
20
+ Requires-Dist: statsmodels (==0.14.1)
21
+ Requires-Dist: threadpoolctl (==3.5.0)
22
+ Requires-Dist: tzdata (==2024.1)
23
+
24
+ # TSADmetrics - Time Series Anomaly Detection Metrics
25
+
26
+ **TSADmetrics** is a Python library for evaluating anomaly detection algorithms in time series data. It provides a comprehensive set of binary and non-binary metrics designed specifically for the challenges of anomaly detection in temporal contexts.
27
+
28
+ ## Features
29
+
30
+ - **Binary Metrics**: Evaluate discrete anomaly predictions (0/1 labels)
31
+
32
+ - **Non-Binary Metrics**: Assess continuous anomaly scores
33
+
34
+ - **Efficient Computation**: Compute multiple metrics at once
35
+
36
+ - **CLI Tool**: Evaluate metrics directly from CSV/JSON files
37
+
38
+ ## Installation
39
+
40
+ Install TSADmetrics via pip:
41
+
42
+ ```bash
43
+ pip install tsadmetrics
44
+ ```
45
+
46
+ ## Documentation
47
+
48
+ The complete documentation for TSADmetrics is available at:
49
+ ���� [https://tsadmetrics.readthedocs.io/](https://tsadmetrics.readthedocs.io/)
50
+
51
+ ## Acknowledgements
52
+
53
+ This library is based on the concepts and implementations from:
54
+ S��rb��, S., & Ruocco, M. (2023). *Navigating the metric maze: a taxonomy of evaluation metrics for anomaly detection in time series*. https://doi.org/10.1007/s10618-023-00988-8
@@ -1,4 +1,6 @@
1
1
  docs/conf.py,sha256=UvAyr0jPk75vQyREMEG3TIs96Pk-hslOgLQUpySp2tw,1645
2
+ docs_api/conf.py,sha256=Ba8bUV-53VLgJ93hOesXr1xzLMpXlATnyZYlmyoM85g,2333
3
+ docs_manual/conf.py,sha256=uUY57MH63QsFLhn_W7LIbwMMQO3rcdUxhCAdzx_d0Z0,2331
2
4
  entorno/bin/activate_this.py,sha256=45dnJsdtOWIt5LtVSBmBfB8E7AlKcnhnZe9e3WGclak,1199
3
5
  entorno/bin/rst2html.py,sha256=h4RydG-iAectsUra0lNFGwB4_1mngxrtPPgQrxUWQ3A,643
4
6
  entorno/bin/rst2html4.py,sha256=Xiv3Zb1gk4jT7DYFVlf5w4LJtI5ZI3pW3b1KLxyPS5A,765
@@ -14,14 +16,14 @@ entorno/bin/rst2xml.py,sha256=uoIfpn3prnir2tzqdycsAjOg-OWw663XOK47IeHCZdY,651
14
16
  entorno/bin/rstpep2html.py,sha256=sthYQHEgYfj4JqwG45URwVbRAs-HYuwKget7SUwp9fc,719
15
17
  tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
18
  tests/test_binary.py,sha256=Qt14fP-F-TW6KlPz6X-2DhtFpaHNrODiMA2DI39JrBI,39311
17
- tests/test_non_binary.py,sha256=NLXnSvzu5hqsCSlPhNE2IJdE-r-YZv4I7iCCBiYrrsc,13962
19
+ tests/test_non_binary.py,sha256=jBn3GfBpbK7ktIS9FNIeYi0ob8jkkbMCyCX7-_8LI-k,15755
18
20
  tests/test_utils.py,sha256=8Favmlyix1YaAm03XuzMfEjLnq_Ud0YV_6aFwsIMpl8,2192
19
- tsadmetrics/__init__.py,sha256=Qg5AvsmzqC3vhNC2WmRpHx4MYrmNBjnXjou9V-WfwE4,1603
21
+ tsadmetrics/__init__.py,sha256=BxSqvH0NselzEIZhw4rpnUh6UkzRh0eDpkNvChqWv_8,1604
20
22
  tsadmetrics/binary_metrics.py,sha256=PiecIZ2z2B3-uCx1H3KXfLXdSIu8vxY5sUsIb2vmobk,69729
21
23
  tsadmetrics/metric_utils.py,sha256=1nuHQp5fc7whPMfJTfWmKb6XmSngoe6p7fdsoP0Vz-I,2876
22
- tsadmetrics/non_binary_metrics.py,sha256=oCpRQhHmbauXoYMWD3cuI1eycoTOwyfoOKA2D-UQHeM,14545
24
+ tsadmetrics/non_binary_metrics.py,sha256=FsXRMeZIvQsEP5BLT02nW7_yjDnF9wD-h8h-B6eTRVs,14238
23
25
  tsadmetrics/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
- tsadmetrics/utils.py,sha256=TiOFwPNBgWWFCIwOO0BPFr5alRABhj597jsmevUjx54,4889
26
+ tsadmetrics/utils.py,sha256=mu49QEftRI4HEXvjfiE_CnsujgiNd62U9DRWCEvtO9M,4913
25
27
  tsadmetrics/validation.py,sha256=fseGfpGhN-3zAMo2WZLxahcOAsOOyBb2RAFRDKB1KI8,1340
26
28
  tsadmetrics/_tsadeval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
29
  tsadmetrics/_tsadeval/auc_roc_pr_plot.py,sha256=PHqJUXq2qI248XV9o04D8SsUJgowetaKq0Cu5bYrIAE,12689
@@ -57,8 +59,8 @@ tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py,sha256=OhUJSm
57
59
  tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py,sha256=LL-0pPer3ymovVRlktaHo5XDzpgiDhWOVfdPOzKR6og,3152
58
60
  tsadmetrics/scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
59
61
  tsadmetrics/scripts/compute_metrics.py,sha256=PwtH6XmpKEWwdY62pMfZGrgIBTIms0z3qVpw5LjnuwE,991
60
- tsadmetrics-0.1.16.dist-info/METADATA,sha256=inUh6ZZm5fg0cpluGAHUR1ULoN4WP480ZjZ9MNuWxAo,831
61
- tsadmetrics-0.1.16.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
62
- tsadmetrics-0.1.16.dist-info/entry_points.txt,sha256=fnsO232FxrQC6pmeZnyZ4UaiXyvN1rKxksLKQO9n7q8,81
63
- tsadmetrics-0.1.16.dist-info/top_level.txt,sha256=s2VIr_ePl-WZbYt9FsYbsDGM7J-Qc5cgpwEOeQ3FVpM,31
64
- tsadmetrics-0.1.16.dist-info/RECORD,,
62
+ tsadmetrics-0.1.17.dist-info/METADATA,sha256=Lcjim8NZYuZ-WRBosqpYy_CISaVXF2SOZ7dI-RjoiVI,1936
63
+ tsadmetrics-0.1.17.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
64
+ tsadmetrics-0.1.17.dist-info/entry_points.txt,sha256=fnsO232FxrQC6pmeZnyZ4UaiXyvN1rKxksLKQO9n7q8,81
65
+ tsadmetrics-0.1.17.dist-info/top_level.txt,sha256=jFJiGEaMZwBStgmkeS19H9fyWDDcPDgmNjinjVrh0jk,52
66
+ tsadmetrics-0.1.17.dist-info/RECORD,,
@@ -1,4 +1,6 @@
1
1
  docs
2
+ docs_api
3
+ docs_manual
2
4
  entorno
3
5
  tests
4
6
  tsadmetrics
@@ -1,23 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: tsadmetrics
3
- Version: 0.1.16
4
- Summary: =?unknown-8bit?q?Librer=C3=ADa_para_evaluaci=C3=B3n_de_detecci=C3=B3n_de_anomal=C3=ADas?= en series temporales
5
- Home-page: https://github.com/pathsko/TSADmetrics
6
- Author: Pedro Rafael Velasco Priego
7
- Author-email: Pedro Rafael Velasco Priego <i12veprp@uco.es>
8
- Requires-Python: >=3.8
9
- Description-Content-Type: text/markdown
10
- Requires-Dist: joblib (==1.4.2)
11
- Requires-Dist: numpy (==1.24.4)
12
- Requires-Dist: pandas (==2.0.3)
13
- Requires-Dist: PATE (==0.1.1)
14
- Requires-Dist: patsy (==0.5.6)
15
- Requires-Dist: python-dateutil (==2.9.0.post0)
16
- Requires-Dist: pytz (==2024.1)
17
- Requires-Dist: scikit-learn (==1.3.2)
18
- Requires-Dist: scipy (==1.10.1)
19
- Requires-Dist: six (==1.16.0)
20
- Requires-Dist: statsmodels (==0.14.1)
21
- Requires-Dist: threadpoolctl (==3.5.0)
22
- Requires-Dist: tzdata (==2024.1)
23
-