tsadmetrics 0.1.15__py3-none-any.whl → 0.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,241 +1,5 @@
1
1
  import numpy as np
2
- from ._tsadeval.metrics import Binary_anomalies, pointwise_to_full_series, segmentwise_to_full_series, DelayThresholdedPointAdjust
3
- def get_tp_tn_fp_fn_point_wise(y_true: np.array,y_pred: np.array):
4
- TP,TN,FP,FN=0,0,0,0
5
- for true,pred in zip(y_true,y_pred):
6
- if true==pred:
7
- if true==1:
8
- TP+=1
9
- else:
10
- TN+=1
11
- else:
12
- if true==1:
13
- FN+=1
14
- else:
15
- FP+=1
16
- return TP,TN,FP,FN
17
-
18
-
19
- def get_events(y_true,anomaly=True):
20
- events = []
21
- start_idx = None
22
- v = 0
23
- if anomaly:
24
- v = 1
25
- else:
26
- v = 0
27
-
28
- for i, val in enumerate(y_true):
29
- if val == v: # Si encontramos el inicio de un evento
30
- if start_idx is None:
31
- start_idx = i # Establecemos el inicio del evento
32
- elif start_idx is not None: # Si encontramos el final de un evento
33
- events.append((start_idx, i - 1)) # Agregamos el evento a la lista de eventos
34
- start_idx = None # Restablecemos el inicio del evento
35
-
36
- if start_idx is not None: # Si al final de la secuencia aún estamos dentro de un evento
37
- events.append((start_idx, len(y_true) - 1)) # Agregamos el evento final a la lista de eventos
38
-
39
-
40
- return events
41
-
42
- def calculate_intersection(event1, event2):
43
- start_intersection = max(event1[0], event2[0])
44
- end_intersection = min(event1[1], event2[1])
45
-
46
- # If there is an intersection, return the range of the intersection, otherwise return None
47
- if start_intersection <= end_intersection:
48
- return [start_intersection, end_intersection]
49
- else:
50
- return None
51
-
52
- def get_tp_tn_fp_fn_point_adjusted(y_true: np.array,y_pred: np.array):
53
- TP, TN, FP, FN = get_tp_tn_fp_fn_point_wise(y_true, y_pred)
54
- TP=0
55
- FN=0
56
- y_true_events = get_events(y_true,anomaly=True)
57
- y_pred_events = get_events(y_pred,anomaly=True)
58
-
59
- i_true = 0
60
- i_pred = 0
61
- while i_true<len(y_true_events):
62
- detected = False
63
- while i_pred<len(y_pred_events) and y_true_events[i_true][1]>y_pred_events[i_pred][0]:
64
- if calculate_intersection(y_true_events[i_true],y_pred_events[i_pred]) is not None:
65
- TP+= y_true_events[i_true][1]-y_true_events[i_true][0]+1
66
- detected=True
67
- break
68
- elif y_true_events[i_true][0]>y_pred_events[i_pred][1]:
69
- i_pred+=1
70
-
71
- if not detected:
72
- FN+= y_true_events[i_true][1]-y_true_events[i_true][0]+1
73
- i_true+=1
74
-
75
- return TP, TN, FP, FN
76
-
77
- def get_tp_tn_fp_fn_delay_th_point_adjusted(y_true: np.array,y_pred: np.array,k: int):
78
- TP, TN, FP, FN = get_tp_tn_fp_fn_point_wise(y_true, y_pred)
79
- TP=0
80
- FN=0
81
- y_true_events = get_events(y_true,anomaly=True)
82
- y_pred_events = get_events(y_pred,anomaly=True)
83
-
84
- i_true = 0
85
- i_pred = 0
86
- while i_true<len(y_true_events):
87
- detected = False
88
- while i_pred<len(y_pred_events) and y_true_events[i_true][1]>y_pred_events[i_pred][0]:
89
- intersec = calculate_intersection(y_true_events[i_true],y_pred_events[i_pred])
90
- if intersec is not None and intersec[0]-y_true_events[i_true][0]<k:
91
- TP+= y_true_events[i_true][1]-y_true_events[i_true][0]+1
92
- detected=True
93
- break
94
- else:
95
- i_pred+=1
96
-
97
- if not detected:
98
- FN+= y_true_events[i_true][1]-y_true_events[i_true][0]+1
99
- i_true+=1
100
-
101
- return TP, TN, FP, FN
102
-
103
- def get_tp_tn_fp_fn_point_adjusted_at_k(y_true: np.array,y_pred: np.array, k: float):
104
- TP, TN, FP, FN = get_tp_tn_fp_fn_point_wise(y_true, y_pred)
105
- TP=0
106
- FN=0
107
- y_true_events = get_events(y_true,anomaly=True)
108
- y_pred_events = get_events(y_pred,anomaly=True)
109
-
110
- i_true = 0
111
- i_pred = 0
112
- while i_true<len(y_true_events):
113
- detected = False
114
- while i_pred<len(y_pred_events) and y_true_events[i_true][1]>y_pred_events[i_pred][0]:
115
- intersec = calculate_intersection(y_true_events[i_true],y_pred_events[i_pred])
116
- if intersec is not None:
117
- event_size = y_true_events[i_true][1]-y_true_events[i_true][0]+1
118
- intersec_size = intersec[1]-intersec[0]+1
119
- if intersec is not None and intersec_size/event_size>=k:
120
-
121
- TP+= y_true_events[i_true][1]-y_true_events[i_true][0]+1
122
- detected=True
123
- break
124
- else:
125
- i_pred+=1
126
-
127
- if not detected:
128
- FN+= y_true_events[i_true][1]-y_true_events[i_true][0]+1
129
- i_true+=1
130
-
131
- return TP, TN, FP, FN
132
-
133
-
134
- def get_tp_tn_fp_fn_latency_sparsity_aw(y_true: np.array, y_pred: np.array, ni: int):
135
- batched_shape = (int(np.ceil(y_pred.shape[0] / ni)), 1)
136
- label_batch = np.zeros(batched_shape)
137
- pred_batch = np.zeros(batched_shape)
138
- actual = np.copy(y_true)
139
- predict = np.copy(y_pred)
140
- detect_state = False # triggered when a True anomaly is detected by model
141
- anomaly_batch_count = 0
142
- i, i_ni = 0, 0
143
- step = ni
144
-
145
- while i < len(y_true) and step > 1:
146
- j = min(i + step, len(y_true)) # end of ni (batch) starting at i
147
-
148
- # Adjust step size if needed
149
- if step > 2 and actual[i:j].sum() > 1:
150
- if np.diff(np.where(actual[i:j])).max() > 1: # if it finds an interruption in the true label continuity
151
- step = min(int((j - i) / 2), 2) # reduce step size
152
- label_batch = np.append(label_batch, [[0]], axis=0)
153
- pred_batch = np.append(pred_batch, [[0]], axis=0) # increase size
154
- j = i + step
155
- else:
156
- step = ni
157
- else:
158
- step = ni
159
-
160
- # Start rolling window scoring
161
- if actual[i:j].max(): # If label = T
162
- if not actual[i]: # if first value is normal
163
- detect_state = False
164
- s = actual[i:j].argmax() # this is the index of the first occurrence
165
- if detect_state: # if anomaly was previously detected by model
166
- anomaly_batch_count += 1
167
- pred_batch[i_ni], label_batch[i_ni], predict[i + s:j] = 1, 1, 1
168
- elif predict[i:j].max(): # if alert was detected with T
169
- detect_state = True # turn on detection state
170
- anomaly_batch_count += 1
171
- pred_batch[i_ni], label_batch[i_ni], predict[i + s:j] = 1, 1, 1
172
- else:
173
- detect_state = False
174
- label_batch[i_ni] = 1
175
- else:
176
- detect_state = False
177
- if predict[i:j].max(): # if False positive
178
- pred_batch[i_ni] = 1
179
- i += step
180
- i_ni += 1
181
-
182
- if ni == 1:
183
- return get_tp_tn_fp_fn_point_wise(actual, predict)
184
-
185
- return get_tp_tn_fp_fn_point_wise(label_batch.flatten().astype(int), pred_batch.flatten().astype(int))
186
-
187
- def get_tp_fp_fn_segment_wise(y_true: np.array,y_pred: np.array):
188
-
189
-
190
- y_true_anomaly_events = get_events(y_true)
191
- pred_anomaly_events = get_events(y_pred)
192
- y_true_normal_events = get_events(y_true,False)
193
- pred_normal_events = get_events(y_pred,False)
194
-
195
- TP = 0
196
- FN = 0
197
- FP = 0
198
- #TP
199
- i = 0
200
- for e_p in pred_anomaly_events:
201
-
202
- c, d = e_p
203
- while i<len(y_true_anomaly_events):
204
- e_g = y_true_anomaly_events[i]
205
- a, b = e_g
206
- if a>d:
207
- break
208
-
209
- if b<c:
210
- i+=1
211
- continue
212
-
213
- else:
214
- if max(a, c) <= min(b, d):
215
- TP+=1
216
-
217
-
218
- i+=1
219
-
220
- #FN
221
- FN = len(y_true_anomaly_events) - TP
222
- #FP
223
- i = 0
224
- for e_p in y_true_normal_events:
225
-
226
- c, d = e_p
227
- while i<len(pred_anomaly_events):
228
- e_g = pred_anomaly_events[i]
229
- a, b = e_g
230
- if a>d:
231
- break
232
- if b<c:
233
- i+=1
234
- continue
235
- if calculate_intersection(e_g, e_p) is not None:
236
- FP+=1
237
- i+=1
238
- return TP, FP, FN
2
+ from ._tsadeval.metrics import pointwise_to_full_series, segmentwise_to_full_series
239
3
 
240
4
  def is_full_series(length: int, anomalies: np.array):
241
5
  # [1 0 1 1 0]
@@ -330,4 +94,5 @@ def position(i, anomaly_length,bias):
330
94
  else:
331
95
  return anomaly_length - i + 1
332
96
  else:
333
- raise Exception("Error, wrong bias value.")
97
+ raise Exception("Error, wrong bias value.")
98
+
@@ -1,7 +1,8 @@
1
1
  import numpy as np
2
2
  from ._tsadeval.metrics import *
3
+ from .validation import validate_non_binary_inputs
3
4
  from sklearn.metrics import auc
4
- from pate.PATE_metric import PATE
5
+ from pate.PATE_metric import PATE
5
6
  def precision_at_k(y_true : np.array, y_anomaly_scores: np.array):
6
7
  """
7
8
  Calculate the precision at k score for anomaly detection in time series.
@@ -14,6 +15,8 @@ def precision_at_k(y_true : np.array, y_anomaly_scores: np.array):
14
15
  The value of k is automatically set to the number of true anomalies present in
15
16
  y_true. That is, k = sum(y_true).
16
17
 
18
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
19
+
17
20
  Parameters:
18
21
  y_true (np.array):
19
22
  The ground truth binary labels for the time series data.
@@ -23,6 +26,8 @@ def precision_at_k(y_true : np.array, y_anomaly_scores: np.array):
23
26
  Returns:
24
27
  float: The precision at k score.
25
28
  """
29
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
30
+
26
31
  m = PatK_pw(y_true,y_anomaly_scores)
27
32
 
28
33
  return m.get_score()
@@ -35,6 +40,8 @@ def auc_roc_pw(y_true : np.array, y_anomaly_scores: np.array):
35
40
  computed in a point-wise manner. That is, each point in the time series is treated
36
41
  independently when calculating true positives, false positives, and false negatives.
37
42
 
43
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
44
+
38
45
  Parameters:
39
46
  y_true (np.array):
40
47
  Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
@@ -44,6 +51,7 @@ def auc_roc_pw(y_true : np.array, y_anomaly_scores: np.array):
44
51
  Returns:
45
52
  float: AUC-ROC score.
46
53
  """
54
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
47
55
 
48
56
  m = AUC_ROC(y_true,y_anomaly_scores)
49
57
 
@@ -58,6 +66,8 @@ def auc_pr_pw(y_true : np.array ,y_anomaly_scores: np.array):
58
66
  computed in a point-wise manner. That is, each point in the time series is treated
59
67
  independently when calculating precision and recall.
60
68
 
69
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
70
+
61
71
  Parameters:
62
72
  y_true (np.array):
63
73
  Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
@@ -67,11 +77,98 @@ def auc_pr_pw(y_true : np.array ,y_anomaly_scores: np.array):
67
77
  Returns:
68
78
  float: AUC-PR score.
69
79
  """
80
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
81
+
70
82
  m = AUC_PR_pw(y_true,y_anomaly_scores)
71
83
 
72
84
  return m.get_score()
73
85
 
74
86
 
87
+ def auc_roc_pa(y_true: np.array, y_anomaly_scores: np.array):
88
+ """
89
+ Calculate the AUC-ROC score using point-adjusted evaluation for anomaly detection in time series.
90
+
91
+ This is the standard Area Under the Receiver Operating Characteristic Curve (AUC-ROC), but instead
92
+ of computing true positive rate (TPR) and false positive rate (FPR) point-wise, it uses a point-adjusted
93
+ approach. Specifically, for each ground-truth anomalous segment, if at least one point within that
94
+ segment is predicted as anomalous, the entire segment is considered correctly detected. The adjusted
95
+ predictions are then compared to the ground-truth labels to compute true positives, false positives,
96
+ and false negatives, which are used to construct the ROC curve.
97
+
98
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
99
+
100
+ Parameters:
101
+ y_true (np.array):
102
+ Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
103
+ y_anomaly_scores (np.array):
104
+ Continuous anomaly scores assigned to each point in the series.
105
+
106
+ Returns:
107
+ float: AUC-ROC score (with point-adjusted evaluation).
108
+ """
109
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
110
+
111
+ tprs = [0]
112
+ fprs = [0]
113
+ tps, fps, fns = [], [], []
114
+
115
+ p_adj = PointAdjust(len(y_true), y_true, (np.array(y_anomaly_scores) >= 0.5).astype(int))
116
+ segments = p_adj.get_gt_anomalies_segmentwise()
117
+ idx = np.argsort(y_anomaly_scores)[::-1].astype(int)
118
+ y_true_sorted = np.array(y_true)[idx]
119
+ y_anomaly_scores_sorted = np.array(y_anomaly_scores)[idx]
120
+
121
+ segment_mins = []
122
+ for start, end in segments:
123
+ anoms_scores = y_anomaly_scores[start:end+1]
124
+ segment_mins.append([np.max(anoms_scores), end-start+1])
125
+
126
+ for i_t in range(len(y_anomaly_scores_sorted)):
127
+ fp, tp, fn = 0, 0, 0
128
+ if i_t > 0 and y_anomaly_scores_sorted[i_t] == y_anomaly_scores_sorted[i_t-1]:
129
+ tp = tps[-1]
130
+ fp = fps[-1]
131
+ fn = fns[-1]
132
+ else:
133
+ if y_true_sorted[i_t] == 0:
134
+ # FP
135
+ if len(fps) == 0:
136
+ aux_y_pred = (y_anomaly_scores >= y_anomaly_scores_sorted[i_t]).astype(int)
137
+ for i in range(len(aux_y_pred)):
138
+ if aux_y_pred[i] == 1 and y_true[i] == 0:
139
+ fp += 1
140
+ else:
141
+ fp = fps[i_t-1] + 1
142
+ else:
143
+ if len(fps) == 0:
144
+ aux_y_pred = (y_anomaly_scores >= y_anomaly_scores_sorted[i_t]).astype(int)
145
+ for i in range(len(aux_y_pred)):
146
+ if aux_y_pred[i] == 1 and y_true[i] == 0:
147
+ fp += 1
148
+ else:
149
+ fp = fps[i_t-1]
150
+ for score, length in segment_mins:
151
+ if score >= y_anomaly_scores_sorted[i_t]:
152
+ # TP
153
+ tp += length
154
+ else:
155
+ # FN
156
+ fn += length
157
+ tps.append(tp)
158
+ fns.append(fn)
159
+ fps.append(fp)
160
+ for tp, fp, fn in zip(tps, fps, fns):
161
+ if tp + fn > 0:
162
+ tprs.append(tp / (tp + fn))
163
+ else:
164
+ tprs.append(0)
165
+ if fp + (len(y_true) - np.sum(y_true)) > 0:
166
+ fprs.append(fp / (fp + (len(y_true) - np.sum(y_true))))
167
+ else:
168
+ fprs.append(0)
169
+
170
+ auc_value = auc(fprs, tprs)
171
+ return auc_value
75
172
 
76
173
  def auc_pr_pa(y_true: np.array, y_anomaly_scores: np.array):
77
174
  """
@@ -84,6 +181,8 @@ def auc_pr_pa(y_true: np.array, y_anomaly_scores: np.array):
84
181
  to the ground-truth labels to compute true positives, false positives, and false negatives,
85
182
  which are used to construct the PR curve.
86
183
 
184
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
185
+
87
186
  Parameters:
88
187
  y_true (np.array):
89
188
  Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
@@ -93,7 +192,7 @@ def auc_pr_pa(y_true: np.array, y_anomaly_scores: np.array):
93
192
  Returns:
94
193
  float: AUC-PR score (with point-adjusted evaluation).
95
194
  """
96
-
195
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
97
196
 
98
197
  precisions = [1]
99
198
  recalls = [0]
@@ -164,114 +263,6 @@ def auc_pr_pa(y_true: np.array, y_anomaly_scores: np.array):
164
263
 
165
264
 
166
265
 
167
- def auc_pr_sw(y_true: np.array, y_anomaly_scores: np.array):
168
- """
169
- Calculate the AUC-PR score using segment-wise evaluation for anomaly detection in time series.
170
-
171
- This is the standard Area Under the Precision-Recall Curve (AUC-PR), but it uses a segment-wise
172
- adjustment when computing precision and recall. In this evaluation, each contiguous segment of
173
- anomalous ground-truth points is treated as a single unit. A true positive is counted if at least
174
- one predicted anomaly overlaps with the segment. A false negative occurs when a segment is
175
- completely missed, and a false positive is recorded for each predicted anomalous segment
176
- that does not overlap with any ground-truth anomaly. These adjusted counts are then used
177
- to compute precision and recall for constructing the PR curve.
178
-
179
- Parameters:
180
- y_true (np.array):
181
- Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
182
- y_anomaly_scores (np.array):
183
- Continuous anomaly scores assigned to each point in the series.
184
-
185
- Returns:
186
- float: AUC-PR score (with segment-wise evaluation).
187
- """
188
-
189
- precisions = [1]
190
- recalls = [0]
191
- tps,fps,fns = [],[],[]
192
-
193
-
194
- segments = []
195
- i=0
196
- while i < len(y_true):
197
- if y_true[i] == 1:
198
- start = i
199
- end = i
200
- while i < len(y_true) and y_true[i] == 1:
201
- end = i
202
- i += 1
203
- segments.append([start,end])
204
- i+=1
205
- idx = np.argsort(y_anomaly_scores)[::-1].astype(int)
206
- y_anomaly_scores_sorted = np.array(y_anomaly_scores)[idx]
207
-
208
- segment_mins = []
209
- for start,end in segments:
210
- anoms_scores = y_anomaly_scores[start:end+1]
211
- segment_mins.append([np.max(anoms_scores),[start,end]])
212
-
213
- for i_t in range(len(y_anomaly_scores_sorted)):
214
- fp,tp,fn = 0,0,0
215
-
216
-
217
- aux_y_pred = (y_anomaly_scores >= y_anomaly_scores_sorted[i_t]).astype(int)
218
- for score,seg in segment_mins:
219
- start,end = seg
220
- if score >= y_anomaly_scores_sorted[i_t]:
221
- #TP
222
- tp+= 1
223
- if aux_y_pred[start]== 1:
224
- # Extender hacia la izquierda
225
- i = start - 1
226
- while i >= 0 and aux_y_pred[i] == 1:
227
- aux_y_pred[i] = 0
228
- i -= 1
229
-
230
- if aux_y_pred[end] == 1:
231
- # Extender hacia la derecha
232
- i = end + 1
233
- while i < len(aux_y_pred) and aux_y_pred[i] == 1:
234
- aux_y_pred[i] = 0
235
- i += 1
236
- aux_y_pred[start:end+1] = 0
237
-
238
- else:
239
- #FN
240
- fn+= 1
241
-
242
- if np.sum(aux_y_pred)>0:
243
- fpsegments = []
244
- i=0
245
- while i < len(aux_y_pred):
246
- if aux_y_pred[i] == 1:
247
- start = i
248
- end = i
249
- while i < len(aux_y_pred) and aux_y_pred[i] == 1:
250
- end = i
251
- i += 1
252
- fpsegments.append([start,end])
253
- i+=1
254
- fp = len(fpsegments)
255
- else:
256
- fp = 0
257
-
258
-
259
- tps.append(tp)
260
- fns.append(fn)
261
- fps.append(fp)
262
- for tp,fp,fn in zip(tps,fps,fns):
263
- if tp>0:
264
- precisions.append(tp/(tp+fp))
265
- recalls.append(tp/(tp+fn))
266
- else:
267
- precisions.append(0)
268
- recalls.append(0)
269
-
270
-
271
-
272
- auc_value = auc(recalls, precisions)
273
-
274
- return auc_value
275
266
 
276
267
 
277
268
  def vus_roc(y_true : np.array ,y_anomaly_scores: np.array, window=4):
@@ -284,6 +275,8 @@ def vus_roc(y_true : np.array ,y_anomaly_scores: np.array, window=4):
284
275
  the ROC-AUC over different values of the tolerance parameter, from 0 to `window`, thus producing
285
276
  a volume under the ROC surface.
286
277
 
278
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
279
+
287
280
  For more information, see the original paper:
288
281
  https://dl.acm.org/doi/10.14778/3551793.3551830
289
282
 
@@ -300,6 +293,8 @@ def vus_roc(y_true : np.array ,y_anomaly_scores: np.array, window=4):
300
293
 
301
294
 
302
295
  """
296
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
297
+
303
298
  m = VUS_ROC(y_true,y_anomaly_scores,max_window=window)
304
299
 
305
300
  return m.get_score()
@@ -314,6 +309,8 @@ def vus_pr(y_true : np.array ,y_anomaly_scores: np.array, window=4):
314
309
  anomalies that are temporally close to the true events. The final metric integrates the PR-AUC
315
310
  over several levels of temporal tolerance (from 0 to `window`), yielding a volume under the PR surface.
316
311
 
312
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
313
+
317
314
  For more information, see the original paper:
318
315
  https://dl.acm.org/doi/10.14778/3551793.3551830
319
316
 
@@ -330,6 +327,8 @@ def vus_pr(y_true : np.array ,y_anomaly_scores: np.array, window=4):
330
327
 
331
328
 
332
329
  """
330
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
331
+
333
332
  m = VUS_PR(y_true,y_anomaly_scores,max_window=window)
334
333
 
335
334
  return m.get_score()
@@ -350,6 +349,8 @@ def real_pate(y_true: np.array, y_anomaly_scores: np.array, early: int, delay: i
350
349
  The final PATE score aggregates these weighted contributions across all time steps, yielding
351
350
  a smooth performance measure that is sensitive to both the timing and confidence of the predictions.
352
351
 
352
+ Implementation of https://arxiv.org/abs/2405.12096
353
+
353
354
  For more information, see the original paper:
354
355
  https://arxiv.org/abs/2405.12096
355
356
 
@@ -366,4 +367,6 @@ def real_pate(y_true: np.array, y_anomaly_scores: np.array, early: int, delay: i
366
367
  Returns:
367
368
  float: The real-valued PATE score.
368
369
  """
370
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
371
+
369
372
  return PATE(y_true, y_anomaly_scores, early, delay, binary_scores=False)
File without changes
@@ -0,0 +1,42 @@
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ from tsadmetrics.utils import compute_metrics_from_file
4
+
5
+
6
+ def main():
7
+
8
+ parser = argparse.ArgumentParser(
9
+ description='Compute metrics from anomaly detection results and configuration files.'
10
+ )
11
+
12
+
13
+ parser.add_argument(
14
+ '--res_file',
15
+ type=str,
16
+ required=True,
17
+ help='Path to the results CSV file (e.g., results.csv)'
18
+ )
19
+ parser.add_argument(
20
+ '--conf_file',
21
+ type=str,
22
+ required=True,
23
+ help='Path to the configuration JSON file (e.g., conf.json)'
24
+ )
25
+ parser.add_argument(
26
+ '--output_dir',
27
+ type=str,
28
+ required=True,
29
+ help='Directory where output files will be saved (e.g., ./output_dir)'
30
+ )
31
+
32
+ args = parser.parse_args()
33
+
34
+
35
+ compute_metrics_from_file(
36
+ results_file=args.res_file,
37
+ conf_file=args.conf_file,
38
+ output_dir=args.output_dir
39
+ )
40
+
41
+ if __name__ == '__main__':
42
+ main()