tsadmetrics 0.1.14__py3-none-any.whl → 0.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,241 +1,5 @@
1
1
  import numpy as np
2
- from ._tsadeval.metrics import Binary_anomalies, pointwise_to_full_series, segmentwise_to_full_series, DelayThresholdedPointAdjust
3
- def get_tp_tn_fp_fn_point_wise(y_true: np.array,y_pred: np.array):
4
- TP,TN,FP,FN=0,0,0,0
5
- for true,pred in zip(y_true,y_pred):
6
- if true==pred:
7
- if true==1:
8
- TP+=1
9
- else:
10
- TN+=1
11
- else:
12
- if true==1:
13
- FN+=1
14
- else:
15
- FP+=1
16
- return TP,TN,FP,FN
17
-
18
-
19
- def get_events(y_true,anomaly=True):
20
- events = []
21
- start_idx = None
22
- v = 0
23
- if anomaly:
24
- v = 1
25
- else:
26
- v = 0
27
-
28
- for i, val in enumerate(y_true):
29
- if val == v: # Si encontramos el inicio de un evento
30
- if start_idx is None:
31
- start_idx = i # Establecemos el inicio del evento
32
- elif start_idx is not None: # Si encontramos el final de un evento
33
- events.append((start_idx, i - 1)) # Agregamos el evento a la lista de eventos
34
- start_idx = None # Restablecemos el inicio del evento
35
-
36
- if start_idx is not None: # Si al final de la secuencia aún estamos dentro de un evento
37
- events.append((start_idx, len(y_true) - 1)) # Agregamos el evento final a la lista de eventos
38
-
39
-
40
- return events
41
-
42
- def calculate_intersection(event1, event2):
43
- start_intersection = max(event1[0], event2[0])
44
- end_intersection = min(event1[1], event2[1])
45
-
46
- # If there is an intersection, return the range of the intersection, otherwise return None
47
- if start_intersection <= end_intersection:
48
- return [start_intersection, end_intersection]
49
- else:
50
- return None
51
-
52
- def get_tp_tn_fp_fn_point_adjusted(y_true: np.array,y_pred: np.array):
53
- TP, TN, FP, FN = get_tp_tn_fp_fn_point_wise(y_true, y_pred)
54
- TP=0
55
- FN=0
56
- y_true_events = get_events(y_true,anomaly=True)
57
- y_pred_events = get_events(y_pred,anomaly=True)
58
-
59
- i_true = 0
60
- i_pred = 0
61
- while i_true<len(y_true_events):
62
- detected = False
63
- while i_pred<len(y_pred_events) and y_true_events[i_true][1]>y_pred_events[i_pred][0]:
64
- if calculate_intersection(y_true_events[i_true],y_pred_events[i_pred]) is not None:
65
- TP+= y_true_events[i_true][1]-y_true_events[i_true][0]+1
66
- detected=True
67
- break
68
- elif y_true_events[i_true][0]>y_pred_events[i_pred][1]:
69
- i_pred+=1
70
-
71
- if not detected:
72
- FN+= y_true_events[i_true][1]-y_true_events[i_true][0]+1
73
- i_true+=1
74
-
75
- return TP, TN, FP, FN
76
-
77
- def get_tp_tn_fp_fn_delay_th_point_adjusted(y_true: np.array,y_pred: np.array,k: int):
78
- TP, TN, FP, FN = get_tp_tn_fp_fn_point_wise(y_true, y_pred)
79
- TP=0
80
- FN=0
81
- y_true_events = get_events(y_true,anomaly=True)
82
- y_pred_events = get_events(y_pred,anomaly=True)
83
-
84
- i_true = 0
85
- i_pred = 0
86
- while i_true<len(y_true_events):
87
- detected = False
88
- while i_pred<len(y_pred_events) and y_true_events[i_true][1]>y_pred_events[i_pred][0]:
89
- intersec = calculate_intersection(y_true_events[i_true],y_pred_events[i_pred])
90
- if intersec is not None and intersec[0]-y_true_events[i_true][0]<k:
91
- TP+= y_true_events[i_true][1]-y_true_events[i_true][0]+1
92
- detected=True
93
- break
94
- else:
95
- i_pred+=1
96
-
97
- if not detected:
98
- FN+= y_true_events[i_true][1]-y_true_events[i_true][0]+1
99
- i_true+=1
100
-
101
- return TP, TN, FP, FN
102
-
103
- def get_tp_tn_fp_fn_point_adjusted_at_k(y_true: np.array,y_pred: np.array, k: float):
104
- TP, TN, FP, FN = get_tp_tn_fp_fn_point_wise(y_true, y_pred)
105
- TP=0
106
- FN=0
107
- y_true_events = get_events(y_true,anomaly=True)
108
- y_pred_events = get_events(y_pred,anomaly=True)
109
-
110
- i_true = 0
111
- i_pred = 0
112
- while i_true<len(y_true_events):
113
- detected = False
114
- while i_pred<len(y_pred_events) and y_true_events[i_true][1]>y_pred_events[i_pred][0]:
115
- intersec = calculate_intersection(y_true_events[i_true],y_pred_events[i_pred])
116
- if intersec is not None:
117
- event_size = y_true_events[i_true][1]-y_true_events[i_true][0]+1
118
- intersec_size = intersec[1]-intersec[0]+1
119
- if intersec is not None and intersec_size/event_size>=k:
120
-
121
- TP+= y_true_events[i_true][1]-y_true_events[i_true][0]+1
122
- detected=True
123
- break
124
- else:
125
- i_pred+=1
126
-
127
- if not detected:
128
- FN+= y_true_events[i_true][1]-y_true_events[i_true][0]+1
129
- i_true+=1
130
-
131
- return TP, TN, FP, FN
132
-
133
-
134
- def get_tp_tn_fp_fn_latency_sparsity_aw(y_true: np.array, y_pred: np.array, ni: int):
135
- batched_shape = (int(np.ceil(y_pred.shape[0] / ni)), 1)
136
- label_batch = np.zeros(batched_shape)
137
- pred_batch = np.zeros(batched_shape)
138
- actual = np.copy(y_true)
139
- predict = np.copy(y_pred)
140
- detect_state = False # triggered when a True anomaly is detected by model
141
- anomaly_batch_count = 0
142
- i, i_ni = 0, 0
143
- step = ni
144
-
145
- while i < len(y_true) and step > 1:
146
- j = min(i + step, len(y_true)) # end of ni (batch) starting at i
147
-
148
- # Adjust step size if needed
149
- if step > 2 and actual[i:j].sum() > 1:
150
- if np.diff(np.where(actual[i:j])).max() > 1: # if it finds an interruption in the true label continuity
151
- step = min(int((j - i) / 2), 2) # reduce step size
152
- label_batch = np.append(label_batch, [[0]], axis=0)
153
- pred_batch = np.append(pred_batch, [[0]], axis=0) # increase size
154
- j = i + step
155
- else:
156
- step = ni
157
- else:
158
- step = ni
159
-
160
- # Start rolling window scoring
161
- if actual[i:j].max(): # If label = T
162
- if not actual[i]: # if first value is normal
163
- detect_state = False
164
- s = actual[i:j].argmax() # this is the index of the first occurrence
165
- if detect_state: # if anomaly was previously detected by model
166
- anomaly_batch_count += 1
167
- pred_batch[i_ni], label_batch[i_ni], predict[i + s:j] = 1, 1, 1
168
- elif predict[i:j].max(): # if alert was detected with T
169
- detect_state = True # turn on detection state
170
- anomaly_batch_count += 1
171
- pred_batch[i_ni], label_batch[i_ni], predict[i + s:j] = 1, 1, 1
172
- else:
173
- detect_state = False
174
- label_batch[i_ni] = 1
175
- else:
176
- detect_state = False
177
- if predict[i:j].max(): # if False positive
178
- pred_batch[i_ni] = 1
179
- i += step
180
- i_ni += 1
181
-
182
- if ni == 1:
183
- return get_tp_tn_fp_fn_point_wise(actual, predict)
184
-
185
- return get_tp_tn_fp_fn_point_wise(label_batch.flatten().astype(int), pred_batch.flatten().astype(int))
186
-
187
- def get_tp_fp_fn_segment_wise(y_true: np.array,y_pred: np.array):
188
-
189
-
190
- y_true_anomaly_events = get_events(y_true)
191
- pred_anomaly_events = get_events(y_pred)
192
- y_true_normal_events = get_events(y_true,False)
193
- pred_normal_events = get_events(y_pred,False)
194
-
195
- TP = 0
196
- FN = 0
197
- FP = 0
198
- #TP
199
- i = 0
200
- for e_p in pred_anomaly_events:
201
-
202
- c, d = e_p
203
- while i<len(y_true_anomaly_events):
204
- e_g = y_true_anomaly_events[i]
205
- a, b = e_g
206
- if a>d:
207
- break
208
-
209
- if b<c:
210
- i+=1
211
- continue
212
-
213
- else:
214
- if max(a, c) <= min(b, d):
215
- TP+=1
216
-
217
-
218
- i+=1
219
-
220
- #FN
221
- FN = len(y_true_anomaly_events) - TP
222
- #FP
223
- i = 0
224
- for e_p in y_true_normal_events:
225
-
226
- c, d = e_p
227
- while i<len(pred_anomaly_events):
228
- e_g = pred_anomaly_events[i]
229
- a, b = e_g
230
- if a>d:
231
- break
232
- if b<c:
233
- i+=1
234
- continue
235
- if calculate_intersection(e_g, e_p) is not None:
236
- FP+=1
237
- i+=1
238
- return TP, FP, FN
2
+ from ._tsadeval.metrics import pointwise_to_full_series, segmentwise_to_full_series
239
3
 
240
4
  def is_full_series(length: int, anomalies: np.array):
241
5
  # [1 0 1 1 0]
@@ -330,4 +94,5 @@ def position(i, anomaly_length,bias):
330
94
  else:
331
95
  return anomaly_length - i + 1
332
96
  else:
333
- raise Exception("Error, wrong bias value.")
97
+ raise Exception("Error, wrong bias value.")
98
+
@@ -1,7 +1,8 @@
1
1
  import numpy as np
2
2
  from ._tsadeval.metrics import *
3
+ from .validation import validate_non_binary_inputs
3
4
  from sklearn.metrics import auc
4
- from pate.PATE_metric import PATE
5
+ from pate.PATE_metric import PATE
5
6
  def precision_at_k(y_true : np.array, y_anomaly_scores: np.array):
6
7
  """
7
8
  Calculate the precision at k score for anomaly detection in time series.
@@ -14,6 +15,8 @@ def precision_at_k(y_true : np.array, y_anomaly_scores: np.array):
14
15
  The value of k is automatically set to the number of true anomalies present in
15
16
  y_true. That is, k = sum(y_true).
16
17
 
18
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
19
+
17
20
  Parameters:
18
21
  y_true (np.array):
19
22
  The ground truth binary labels for the time series data.
@@ -23,6 +26,8 @@ def precision_at_k(y_true : np.array, y_anomaly_scores: np.array):
23
26
  Returns:
24
27
  float: The precision at k score.
25
28
  """
29
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
30
+
26
31
  m = PatK_pw(y_true,y_anomaly_scores)
27
32
 
28
33
  return m.get_score()
@@ -35,6 +40,8 @@ def auc_roc_pw(y_true : np.array, y_anomaly_scores: np.array):
35
40
  computed in a point-wise manner. That is, each point in the time series is treated
36
41
  independently when calculating true positives, false positives, and false negatives.
37
42
 
43
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
44
+
38
45
  Parameters:
39
46
  y_true (np.array):
40
47
  Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
@@ -44,6 +51,7 @@ def auc_roc_pw(y_true : np.array, y_anomaly_scores: np.array):
44
51
  Returns:
45
52
  float: AUC-ROC score.
46
53
  """
54
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
47
55
 
48
56
  m = AUC_ROC(y_true,y_anomaly_scores)
49
57
 
@@ -58,6 +66,8 @@ def auc_pr_pw(y_true : np.array ,y_anomaly_scores: np.array):
58
66
  computed in a point-wise manner. That is, each point in the time series is treated
59
67
  independently when calculating precision and recall.
60
68
 
69
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
70
+
61
71
  Parameters:
62
72
  y_true (np.array):
63
73
  Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
@@ -67,6 +77,8 @@ def auc_pr_pw(y_true : np.array ,y_anomaly_scores: np.array):
67
77
  Returns:
68
78
  float: AUC-PR score.
69
79
  """
80
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
81
+
70
82
  m = AUC_PR_pw(y_true,y_anomaly_scores)
71
83
 
72
84
  return m.get_score()
@@ -84,6 +96,8 @@ def auc_pr_pa(y_true: np.array, y_anomaly_scores: np.array):
84
96
  to the ground-truth labels to compute true positives, false positives, and false negatives,
85
97
  which are used to construct the PR curve.
86
98
 
99
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
100
+
87
101
  Parameters:
88
102
  y_true (np.array):
89
103
  Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
@@ -93,7 +107,7 @@ def auc_pr_pa(y_true: np.array, y_anomaly_scores: np.array):
93
107
  Returns:
94
108
  float: AUC-PR score (with point-adjusted evaluation).
95
109
  """
96
-
110
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
97
111
 
98
112
  precisions = [1]
99
113
  recalls = [0]
@@ -176,6 +190,8 @@ def auc_pr_sw(y_true: np.array, y_anomaly_scores: np.array):
176
190
  that does not overlap with any ground-truth anomaly. These adjusted counts are then used
177
191
  to compute precision and recall for constructing the PR curve.
178
192
 
193
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
194
+
179
195
  Parameters:
180
196
  y_true (np.array):
181
197
  Ground-truth binary labels for the time series (0 = normal, 1 = anomaly).
@@ -185,6 +201,7 @@ def auc_pr_sw(y_true: np.array, y_anomaly_scores: np.array):
185
201
  Returns:
186
202
  float: AUC-PR score (with segment-wise evaluation).
187
203
  """
204
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
188
205
 
189
206
  precisions = [1]
190
207
  recalls = [0]
@@ -284,6 +301,8 @@ def vus_roc(y_true : np.array ,y_anomaly_scores: np.array, window=4):
284
301
  the ROC-AUC over different values of the tolerance parameter, from 0 to `window`, thus producing
285
302
  a volume under the ROC surface.
286
303
 
304
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
305
+
287
306
  For more information, see the original paper:
288
307
  https://dl.acm.org/doi/10.14778/3551793.3551830
289
308
 
@@ -300,6 +319,8 @@ def vus_roc(y_true : np.array ,y_anomaly_scores: np.array, window=4):
300
319
 
301
320
 
302
321
  """
322
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
323
+
303
324
  m = VUS_ROC(y_true,y_anomaly_scores,max_window=window)
304
325
 
305
326
  return m.get_score()
@@ -314,6 +335,8 @@ def vus_pr(y_true : np.array ,y_anomaly_scores: np.array, window=4):
314
335
  anomalies that are temporally close to the true events. The final metric integrates the PR-AUC
315
336
  over several levels of temporal tolerance (from 0 to `window`), yielding a volume under the PR surface.
316
337
 
338
+ Implementation of https://link.springer.com/article/10.1007/s10618-023-00988-8
339
+
317
340
  For more information, see the original paper:
318
341
  https://dl.acm.org/doi/10.14778/3551793.3551830
319
342
 
@@ -330,6 +353,8 @@ def vus_pr(y_true : np.array ,y_anomaly_scores: np.array, window=4):
330
353
 
331
354
 
332
355
  """
356
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
357
+
333
358
  m = VUS_PR(y_true,y_anomaly_scores,max_window=window)
334
359
 
335
360
  return m.get_score()
@@ -350,6 +375,8 @@ def real_pate(y_true: np.array, y_anomaly_scores: np.array, early: int, delay: i
350
375
  The final PATE score aggregates these weighted contributions across all time steps, yielding
351
376
  a smooth performance measure that is sensitive to both the timing and confidence of the predictions.
352
377
 
378
+ Implementation of https://arxiv.org/abs/2405.12096
379
+
353
380
  For more information, see the original paper:
354
381
  https://arxiv.org/abs/2405.12096
355
382
 
@@ -366,4 +393,6 @@ def real_pate(y_true: np.array, y_anomaly_scores: np.array, early: int, delay: i
366
393
  Returns:
367
394
  float: The real-valued PATE score.
368
395
  """
396
+ validate_non_binary_inputs(y_true, y_anomaly_scores)
397
+
369
398
  return PATE(y_true, y_anomaly_scores, early, delay, binary_scores=False)
File without changes
@@ -0,0 +1,42 @@
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ from tsadmetrics.utils import compute_metrics_from_file
4
+
5
+
6
+ def main():
7
+
8
+ parser = argparse.ArgumentParser(
9
+ description='Compute metrics from anomaly detection results and configuration files.'
10
+ )
11
+
12
+
13
+ parser.add_argument(
14
+ '--res_file',
15
+ type=str,
16
+ required=True,
17
+ help='Path to the results CSV file (e.g., results.csv)'
18
+ )
19
+ parser.add_argument(
20
+ '--conf_file',
21
+ type=str,
22
+ required=True,
23
+ help='Path to the configuration JSON file (e.g., conf.json)'
24
+ )
25
+ parser.add_argument(
26
+ '--output_dir',
27
+ type=str,
28
+ required=True,
29
+ help='Directory where output files will be saved (e.g., ./output_dir)'
30
+ )
31
+
32
+ args = parser.parse_args()
33
+
34
+
35
+ compute_metrics_from_file(
36
+ results_file=args.res_file,
37
+ conf_file=args.conf_file,
38
+ output_dir=args.output_dir
39
+ )
40
+
41
+ if __name__ == '__main__':
42
+ main()
tsadmetrics/utils.py CHANGED
@@ -1,7 +1,8 @@
1
1
  import numpy as np
2
2
  import pandas as pd
3
3
  import time
4
-
4
+ import sys
5
+ import tsadmetrics
5
6
  def compute_metrics(y_true: np.array,y_pred: np.array, metrics: list, metrics_params: dict, is_anomaly_score = False, verbose = False):
6
7
  """
7
8
  Computes the specified metrics for the given true and predicted values.
@@ -52,4 +53,70 @@ def compute_metrics(y_true: np.array,y_pred: np.array, metrics: list, metrics_pa
52
53
  metrics_df['metric_name'] = results.keys()
53
54
  metrics_df['metric_value'] = results.values()
54
55
 
55
- return metrics_df
56
+ return metrics_df
57
+
58
+
59
+ def compute_metrics_from_file(results_file: str, conf_file: str, output_dir: str = '.'):
60
+ """
61
+ Computes metrics based on prediction results from a CSV file and configuration from a JSON file.
62
+
63
+ Parameters:
64
+ results_file (str): Path to CSV file containing y_true and y_pred columns.
65
+ conf_file (str): Path to JSON configuration file with metrics and parameters.
66
+
67
+ Returns:
68
+ pd.DataFrame: DataFrame with computed metrics.
69
+ """
70
+ # Read results data
71
+ res = pd.read_csv(results_file)
72
+ y_true = res['y_true'].values
73
+ y_pred = res['y_pred'].values
74
+
75
+ # Determine if predictions are binary or scores
76
+ is_anomaly_score = False
77
+ unique_values = np.unique(y_pred)
78
+ if not (np.array_equal(unique_values, [0, 1]) or
79
+ np.array_equal(unique_values, [0]) or
80
+ np.array_equal(unique_values, [1])):
81
+ is_anomaly_score = True
82
+ if not np.all((y_pred >= 0) & (y_pred <= 1)):
83
+ raise ValueError("y_pred must be either binary (0/1) or anomaly scores in range [0, 1]")
84
+
85
+ # Read configuration from JSON using pandas
86
+ try:
87
+ config_df = pd.read_json(conf_file, orient='records')
88
+ except ValueError as e:
89
+ raise ValueError(f"Invalid JSON format in configuration file: {str(e)}")
90
+
91
+ # Convert pandas DataFrame to format expected by compute_metrics
92
+ metrics = []
93
+ metrics_params = {}
94
+
95
+ for _, row in config_df.iterrows():
96
+ metric_name = row['name']
97
+ try:
98
+ metric_func = getattr(tsadmetrics, metric_name)
99
+ except AttributeError:
100
+ raise ValueError(f"Metric function '{metric_name}' not found in tsadmetrics module")
101
+
102
+ metrics.append((metric_name, metric_func))
103
+
104
+ # Handle params (convert from pandas Series to dict if needed)
105
+ params = row.get('params', {})
106
+ if pd.notna(params) and params: # Check for non-empty params
107
+ if isinstance(params, pd.Series):
108
+ metrics_params[metric_name] = params.to_dict()
109
+ else:
110
+ metrics_params[metric_name] = params
111
+
112
+ # Compute metrics
113
+ metrics_df = compute_metrics(
114
+ y_true=y_true,
115
+ y_pred=y_pred,
116
+ metrics=metrics,
117
+ metrics_params=metrics_params,
118
+ is_anomaly_score=is_anomaly_score,
119
+ verbose=False
120
+ )
121
+ metrics_df.to_csv(output_dir+'/computed_metrics.csv', index=False)
122
+
@@ -0,0 +1,35 @@
1
+ import numpy as np
2
+
3
+ def check_gt_binary_array(arr):
4
+ if len(arr.shape) != 1:
5
+ raise ValueError("Ground truth input must be a 1D binary array.")
6
+ if not np.all(np.isin(arr, [0, 1])):
7
+ raise ValueError("Ground truth input array must contain only binary values (0 or 1).")
8
+ return True
9
+
10
+ def check_pred_binary_array(arr):
11
+ if len(arr.shape) != 1:
12
+ raise ValueError("Prediction input must be a 1D binary array.")
13
+ if not np.all(np.isin(arr, [0, 1])):
14
+ raise ValueError("Prediction input array must contain only binary values (0 or 1).")
15
+ return True
16
+ def check_same_length(arr1, arr2):
17
+ if len(arr1) != len(arr2):
18
+ raise ValueError("Ground truth and prediction arrays must have the same length.")
19
+ return True
20
+
21
+ def check_pred_continuous_array(arr):
22
+ if len(arr.shape) != 1:
23
+ raise ValueError("Prediction input must be a 1D continuous array.")
24
+ if not np.all((arr >= 0) & (arr <= 1)):
25
+ raise ValueError("All values in the array must be in the range [0, 1].")
26
+ return True
27
+
28
+ def validate_binary_inputs(y_true, y_pred):
29
+ check_gt_binary_array(y_true)
30
+ check_pred_binary_array(y_pred)
31
+ check_same_length(y_true, y_pred)
32
+
33
+ def validate_non_binary_inputs(y_true, y_anomaly_scores):
34
+ check_gt_binary_array(y_true)
35
+ check_same_length(y_true, y_anomaly_scores)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tsadmetrics
3
- Version: 0.1.14
3
+ Version: 0.1.16
4
4
  Summary: =?unknown-8bit?q?Librer=C3=ADa_para_evaluaci=C3=B3n_de_detecci=C3=B3n_de_anomal=C3=ADas?= en series temporales
5
5
  Home-page: https://github.com/pathsko/TSADmetrics
6
6
  Author: Pedro Rafael Velasco Priego
@@ -1,4 +1,4 @@
1
- docs/conf.py,sha256=skVqctOiByesc7wNDW5DpjyTxUCP0wxlpWA1fsJYZhk,1384
1
+ docs/conf.py,sha256=UvAyr0jPk75vQyREMEG3TIs96Pk-hslOgLQUpySp2tw,1645
2
2
  entorno/bin/activate_this.py,sha256=45dnJsdtOWIt5LtVSBmBfB8E7AlKcnhnZe9e3WGclak,1199
3
3
  entorno/bin/rst2html.py,sha256=h4RydG-iAectsUra0lNFGwB4_1mngxrtPPgQrxUWQ3A,643
4
4
  entorno/bin/rst2html4.py,sha256=Xiv3Zb1gk4jT7DYFVlf5w4LJtI5ZI3pW3b1KLxyPS5A,765
@@ -13,14 +13,16 @@ entorno/bin/rst2xetex.py,sha256=spisB81JgqAmMAkjdTaP8awFQS_Zuob9HIcbMi1kOS8,922
13
13
  entorno/bin/rst2xml.py,sha256=uoIfpn3prnir2tzqdycsAjOg-OWw663XOK47IeHCZdY,651
14
14
  entorno/bin/rstpep2html.py,sha256=sthYQHEgYfj4JqwG45URwVbRAs-HYuwKget7SUwp9fc,719
15
15
  tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
- tests/test_binary.py,sha256=dj9BsKBo5rpWw4JGiKKoVkg4rIW4YylTie2VxH2DAGo,29787
17
- tests/test_non_binary.py,sha256=syANlwm0DKsL6geGeq6nQI6ZVe6T_YXWTyk2-Hmck4s,11308
18
- tsadmetrics/__init__.py,sha256=MTWOa43fgOdkMNo5NglCReRnB8hoF0ob2PIvDziCNHw,1575
19
- tsadmetrics/binary_metrics.py,sha256=6GxE3HSiAC9OeDOpP6QFgPwbp-Q37-F3cUdyYcpRrxE,62841
20
- tsadmetrics/metric_utils.py,sha256=fm8v0X37_AlqWpkcUT9r3680QsjLljrHe2YuXkRLAZ4,10873
21
- tsadmetrics/non_binary_metrics.py,sha256=O6AqceHrjCVV1kJPBzXQIgtiu6afzoiJz2biNsxf3_4,13389
16
+ tests/test_binary.py,sha256=Qt14fP-F-TW6KlPz6X-2DhtFpaHNrODiMA2DI39JrBI,39311
17
+ tests/test_non_binary.py,sha256=NLXnSvzu5hqsCSlPhNE2IJdE-r-YZv4I7iCCBiYrrsc,13962
18
+ tests/test_utils.py,sha256=8Favmlyix1YaAm03XuzMfEjLnq_Ud0YV_6aFwsIMpl8,2192
19
+ tsadmetrics/__init__.py,sha256=Qg5AvsmzqC3vhNC2WmRpHx4MYrmNBjnXjou9V-WfwE4,1603
20
+ tsadmetrics/binary_metrics.py,sha256=PiecIZ2z2B3-uCx1H3KXfLXdSIu8vxY5sUsIb2vmobk,69729
21
+ tsadmetrics/metric_utils.py,sha256=1nuHQp5fc7whPMfJTfWmKb6XmSngoe6p7fdsoP0Vz-I,2876
22
+ tsadmetrics/non_binary_metrics.py,sha256=oCpRQhHmbauXoYMWD3cuI1eycoTOwyfoOKA2D-UQHeM,14545
22
23
  tsadmetrics/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
- tsadmetrics/utils.py,sha256=BqsG4DyP3AffuMFQCJ-Qy4YaDu4IkFudZWYCvyTGvvY,2444
24
+ tsadmetrics/utils.py,sha256=TiOFwPNBgWWFCIwOO0BPFr5alRABhj597jsmevUjx54,4889
25
+ tsadmetrics/validation.py,sha256=fseGfpGhN-3zAMo2WZLxahcOAsOOyBb2RAFRDKB1KI8,1340
24
26
  tsadmetrics/_tsadeval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
27
  tsadmetrics/_tsadeval/auc_roc_pr_plot.py,sha256=PHqJUXq2qI248XV9o04D8SsUJgowetaKq0Cu5bYrIAE,12689
26
28
  tsadmetrics/_tsadeval/discontinuity_graph.py,sha256=Ci65l_DPi6HTtb8NvQJe1najgGrRuEpOMWvSyi2AeR0,4088
@@ -53,7 +55,10 @@ tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py,sha256=pJz4iuPyVGNvwsaR
53
55
  tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py,sha256=jLkcMg7UNl25SHtZUBGkP-RV8HsvaZCtjakryl7PFWU,3204
54
56
  tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py,sha256=OhUJSm_I7VZ_gX_SSg8AYUq3_NW9rMIy7lAVsnOFw4Q,417
55
57
  tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py,sha256=LL-0pPer3ymovVRlktaHo5XDzpgiDhWOVfdPOzKR6og,3152
56
- tsadmetrics-0.1.14.dist-info/METADATA,sha256=TCFL9Dpv6zwwM_5n2HeCxKgFP-KB4AHYrvCe3rMZMOI,831
57
- tsadmetrics-0.1.14.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
58
- tsadmetrics-0.1.14.dist-info/top_level.txt,sha256=s2VIr_ePl-WZbYt9FsYbsDGM7J-Qc5cgpwEOeQ3FVpM,31
59
- tsadmetrics-0.1.14.dist-info/RECORD,,
58
+ tsadmetrics/scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
59
+ tsadmetrics/scripts/compute_metrics.py,sha256=PwtH6XmpKEWwdY62pMfZGrgIBTIms0z3qVpw5LjnuwE,991
60
+ tsadmetrics-0.1.16.dist-info/METADATA,sha256=inUh6ZZm5fg0cpluGAHUR1ULoN4WP480ZjZ9MNuWxAo,831
61
+ tsadmetrics-0.1.16.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
62
+ tsadmetrics-0.1.16.dist-info/entry_points.txt,sha256=fnsO232FxrQC6pmeZnyZ4UaiXyvN1rKxksLKQO9n7q8,81
63
+ tsadmetrics-0.1.16.dist-info/top_level.txt,sha256=s2VIr_ePl-WZbYt9FsYbsDGM7J-Qc5cgpwEOeQ3FVpM,31
64
+ tsadmetrics-0.1.16.dist-info/RECORD,,
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ tsadmetrics-compute = tsadmetrics.scripts.compute_metrics:main