tsadmetrics 0.1.8__tar.gz → 0.1.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. {tsadmetrics-0.1.8/tsadmetrics.egg-info → tsadmetrics-0.1.10}/PKG-INFO +1 -1
  2. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/pyproject.toml +1 -1
  3. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/setup.py +1 -1
  4. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tests/test_non_binary.py +2 -2
  5. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/affiliation/_integral_interval.py +22 -22
  6. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/nabscore.py +2 -0
  7. tsadmetrics-0.1.10/tsadmetrics/non_binary_metrics.py +216 -0
  8. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10/tsadmetrics.egg-info}/PKG-INFO +1 -1
  9. tsadmetrics-0.1.8/tsadmetrics/non_binary_metrics.py +0 -92
  10. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/MANIFEST.in +0 -0
  11. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/README.md +0 -0
  12. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/entorno/bin/activate_this.py +0 -0
  13. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/entorno/bin/rst2html.py +0 -0
  14. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/entorno/bin/rst2html4.py +0 -0
  15. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/entorno/bin/rst2html5.py +0 -0
  16. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/entorno/bin/rst2latex.py +0 -0
  17. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/entorno/bin/rst2man.py +0 -0
  18. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/entorno/bin/rst2odt.py +0 -0
  19. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/entorno/bin/rst2odt_prepstyles.py +0 -0
  20. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/entorno/bin/rst2pseudoxml.py +0 -0
  21. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/entorno/bin/rst2s5.py +0 -0
  22. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/entorno/bin/rst2xetex.py +0 -0
  23. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/entorno/bin/rst2xml.py +0 -0
  24. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/entorno/bin/rstpep2html.py +0 -0
  25. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/setup.cfg +0 -0
  26. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tests/__init__.py +0 -0
  27. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tests/test_binary.py +0 -0
  28. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/__init__.py +0 -0
  29. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/__init__.py +0 -0
  30. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/affiliation/__init__.py +0 -0
  31. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +0 -0
  32. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +0 -0
  33. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/affiliation/generics.py +0 -0
  34. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/affiliation/metrics.py +0 -0
  35. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/auc_roc_pr_plot.py +0 -0
  36. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/discontinuity_graph.py +0 -0
  37. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +0 -0
  38. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +0 -0
  39. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +0 -0
  40. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/__init__.py +0 -0
  41. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
  42. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +0 -0
  43. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +0 -0
  44. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/latency_sparsity_aware.py +0 -0
  45. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/metrics.py +0 -0
  46. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/__init__.py +0 -0
  47. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
  48. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +0 -0
  49. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +0 -0
  50. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
  51. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +0 -0
  52. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +0 -0
  53. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +0 -0
  54. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +0 -0
  55. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/tests.py +0 -0
  56. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/threshold_plt.py +0 -0
  57. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/time_tolerant.py +0 -0
  58. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/_tsadeval/vus_utils.py +0 -0
  59. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/binary_metrics.py +0 -0
  60. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/metric_utils.py +0 -0
  61. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/py.typed +0 -0
  62. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics/utils.py +0 -0
  63. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics.egg-info/SOURCES.txt +0 -0
  64. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics.egg-info/dependency_links.txt +0 -0
  65. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics.egg-info/requires.txt +0 -0
  66. {tsadmetrics-0.1.8 → tsadmetrics-0.1.10}/tsadmetrics.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tsadmetrics
3
- Version: 0.1.8
3
+ Version: 0.1.10
4
4
  Summary: Librería para evaluación de detección de anomalías en series temporales
5
5
  Home-page: https://github.com/pathsko/TSADmetrics
6
6
  Author: Pedro Rafael Velasco Priego
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "tsadmetrics"
3
- version = "0.1.8"
3
+ version = "0.1.10"
4
4
  description = "Librería para evaluación de detección de anomalías en series temporales"
5
5
  authors = [
6
6
  { name = "Pedro Rafael Velasco Priego", email = "i12veprp@uco.es" }
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
5
5
 
6
6
  setup(
7
7
  name="tsadmetrics",
8
- version="0.1.8",
8
+ version="0.1.10",
9
9
  author="Pedro Rafael Velasco Priego",
10
10
  author_email="i12veprp@uco.es",
11
11
  description="A library for time series anomaly detection metrics and evaluation.",
@@ -202,7 +202,7 @@ class TestAUCPRPA(unittest.TestCase):
202
202
  self.assertAlmostEqual(score, expected_score, places=4)
203
203
 
204
204
  score = round(auc_pr_pa(self.y_true1, self.y_pred3),2)
205
- expected_score = 0.5
205
+ expected_score = 0.75
206
206
  self.assertAlmostEqual(score, expected_score, places=4)
207
207
 
208
208
  if len(self.y_true2) == len(self.y_pred4):
@@ -252,7 +252,7 @@ class TestAUCPRSW(unittest.TestCase):
252
252
  self.assertAlmostEqual(score, expected_score, places=4)
253
253
 
254
254
  score = round(auc_pr_sw(self.y_true1, self.y_pred3),2)
255
- expected_score = 0.5
255
+ expected_score = 1
256
256
  self.assertAlmostEqual(score, expected_score, places=4)
257
257
 
258
258
 
@@ -125,13 +125,13 @@ def get_pivot_j(I, J):
125
125
  def integral_mini_interval(I, J):
126
126
  """
127
127
  In the specific case where interval I is located outside J,
128
- integral of distance from x to J over the interval x \in I.
128
+ integral of distance from x to J over the interval x in I.
129
129
  This is the *integral* i.e. the sum.
130
130
  It's not the mean (not divided by the length of I yet)
131
131
 
132
132
  :param I: a interval (start, stop), or None
133
133
  :param J: a non empty interval, with empty intersection with I
134
- :return: the integral of distances d(x, J) over x \in I
134
+ :return: the integral of distances d(x, J) over x in I
135
135
  """
136
136
  if I is None:
137
137
  return(0)
@@ -144,14 +144,14 @@ def integral_mini_interval(I, J):
144
144
  def integral_interval_distance(I, J):
145
145
  """
146
146
  For any non empty intervals I, J, compute the
147
- integral of distance from x to J over the interval x \in I.
147
+ integral of distance from x to J over the interval x in I.
148
148
  This is the *integral* i.e. the sum.
149
149
  It's not the mean (not divided by the length of I yet)
150
150
  The interval I can intersect J or not
151
151
 
152
152
  :param I: a interval (start, stop), or None
153
153
  :param J: a non empty interval
154
- :return: the integral of distances d(x, J) over x \in I
154
+ :return: the integral of distances d(x, J) over x in I
155
155
  """
156
156
  # I and J are single intervals (not generic sets)
157
157
  # I is a predicted interval in the range of affiliation of J
@@ -178,15 +178,15 @@ def integral_mini_interval_P_CDFmethod__min_piece(I, J, E):
178
178
  """
179
179
  Helper of `integral_mini_interval_Pprecision_CDFmethod`
180
180
  In the specific case where interval I is located outside J,
181
- compute the integral $\int_{d_min}^{d_max} \min(m, x) dx$, with:
181
+ compute the integral $int_{d_min}^{d_max} min(m, x) dx$, with:
182
182
  - m the smallest distance from J to E,
183
- - d_min the smallest distance d(x, J) from x \in I to J
184
- - d_max the largest distance d(x, J) from x \in I to J
183
+ - d_min the smallest distance d(x, J) from x in I to J
184
+ - d_max the largest distance d(x, J) from x in I to J
185
185
 
186
186
  :param I: a single predicted interval, a non empty interval (start, stop)
187
187
  :param J: ground truth interval, a non empty interval, with empty intersection with I
188
188
  :param E: the affiliation/influence zone for J, represented as a couple (start, stop)
189
- :return: the integral $\int_{d_min}^{d_max} \min(m, x) dx$
189
+ :return: the integral $int_{d_min}^{d_max} min(m, x) dx$
190
190
  """
191
191
  if interval_intersection(I, J) is not None:
192
192
  raise ValueError('I and J should have a void intersection')
@@ -214,13 +214,13 @@ def integral_mini_interval_Pprecision_CDFmethod(I, J, E):
214
214
  """
215
215
  Integral of the probability of distances over the interval I.
216
216
  In the specific case where interval I is located outside J,
217
- compute the integral $\int_{x \in I} Fbar(dist(x,J)) dx$.
217
+ compute the integral $int_{x in I} Fbar(dist(x,J)) dx$.
218
218
  This is the *integral* i.e. the sum (not the mean)
219
219
 
220
220
  :param I: a single predicted interval, a non empty interval (start, stop)
221
221
  :param J: ground truth interval, a non empty interval, with empty intersection with I
222
222
  :param E: the affiliation/influence zone for J, represented as a couple (start, stop)
223
- :return: the integral $\int_{x \in I} Fbar(dist(x,J)) dx$
223
+ :return: the integral $int_{x in I} Fbar(dist(x,J)) dx$
224
224
  """
225
225
  integral_min_piece = integral_mini_interval_P_CDFmethod__min_piece(I, J, E)
226
226
 
@@ -244,13 +244,13 @@ def integral_mini_interval_Pprecision_CDFmethod(I, J, E):
244
244
  def integral_interval_probaCDF_precision(I, J, E):
245
245
  """
246
246
  Integral of the probability of distances over the interval I.
247
- Compute the integral $\int_{x \in I} Fbar(dist(x,J)) dx$.
247
+ Compute the integral $int_{x in I} Fbar(dist(x,J)) dx$.
248
248
  This is the *integral* i.e. the sum (not the mean)
249
249
 
250
250
  :param I: a single (non empty) predicted interval in the zone of affiliation of J
251
251
  :param J: ground truth interval
252
252
  :param E: affiliation/influence zone for J
253
- :return: the integral $\int_{x \in I} Fbar(dist(x,J)) dx$
253
+ :return: the integral $int_{x in I} Fbar(dist(x,J)) dx$
254
254
  """
255
255
  # I and J are single intervals (not generic sets)
256
256
  def f(I_cut):
@@ -307,13 +307,13 @@ def integral_mini_interval_Precall_CDFmethod(I, J, E):
307
307
  """
308
308
  Integral of the probability of distances over the interval J.
309
309
  In the specific case where interval J is located outside I,
310
- compute the integral $\int_{y \in J} Fbar_y(dist(y,I)) dy$.
310
+ compute the integral $int_{y in J} Fbar_y(dist(y,I)) dy$.
311
311
  This is the *integral* i.e. the sum (not the mean)
312
312
 
313
313
  :param I: a single (non empty) predicted interval
314
314
  :param J: ground truth (non empty) interval, with empty intersection with I
315
315
  :param E: the affiliation/influence zone for J, represented as a couple (start, stop)
316
- :return: the integral $\int_{y \in J} Fbar_y(dist(y,I)) dy$
316
+ :return: the integral $int_{y in J} Fbar_y(dist(y,I)) dy$
317
317
  """
318
318
  # The interval J should be located outside I
319
319
  # (so it's either the left piece or the right piece w.r.t I)
@@ -377,7 +377,7 @@ def integral_mini_interval_Precall_CDFmethod(I, J, E):
377
377
  # j_bb_min j_bb_max j_ba_min j_ba_max j_ab_min j_ab_max j_aa_min j_aa_max
378
378
  # (with `b` for before and `a` for after in the previous variable names)
379
379
 
380
- # vs e_mean m = min(t-e_min, e_max-t) d=|i_pivot-t| min(d,m) \int min(d,m)dt \int d dt \int_(min(d,m)+d)dt \int_{t \in J}(min(d,m)+d)dt
380
+ # vs e_mean m = min(t-e_min, e_max-t) d=|i_pivot-t| min(d,m) int min(d,m)dt int d dt int_(min(d,m)+d)dt int_{t in J}(min(d,m)+d)dt
381
381
  # Case J_before_closeE & i_pivot after J before t-e_min i_pivot-t min(i_pivot-t,t-e_min) = t-e_min t^2/2-e_min*t i_pivot*t-t^2/2 t^2/2-e_min*t+i_pivot*t-t^2/2 = (i_pivot-e_min)*t (i_pivot-e_min)*tB - (i_pivot-e_min)*tA = (i_pivot-e_min)*(tB-tA)
382
382
  # Case J_before_closeI & i_pivot after J before t-e_min i_pivot-t min(i_pivot-t,t-e_min) = i_pivot-t i_pivot*t-t^2/2 i_pivot*t-t^2/2 i_pivot*t-t^2/2+i_pivot*t-t^2/2 = 2*i_pivot*t-t^2 2*i_pivot*tB-tB^2 - 2*i_pivot*tA + tA^2 = 2*i_pivot*(tB-tA) - (tB^2 - tA^2)
383
383
  # Case J_after_closeI & i_pivot after J after e_max-t i_pivot-t min(i_pivot-t,e_max-t) = i_pivot-t i_pivot*t-t^2/2 i_pivot*t-t^2/2 i_pivot*t-t^2/2+i_pivot*t-t^2/2 = 2*i_pivot*t-t^2 2*i_pivot*tB-tB^2 - 2*i_pivot*tA + tA^2 = 2*i_pivot*(tB-tA) - (tB^2 - tA^2)
@@ -406,12 +406,12 @@ def integral_mini_interval_Precall_CDFmethod(I, J, E):
406
406
  out_integral_min_dm_plus_d = _sum_wo_nan(out_parts) # integral on all J, i.e. sum of the disjoint parts
407
407
 
408
408
  # We have for each point t of J:
409
- # \bar{F}_{t, recall}(d) = 1 - (1/|E|) * (min(d,m) + d)
409
+ # bar{F}_{t, recall}(d) = 1 - (1/|E|) * (min(d,m) + d)
410
410
  # Since t is a single-point here, and we are in the case where i_pivot is inside E.
411
411
  # The integral is then given by:
412
- # C = \int_{t \in J} \bar{F}_{t, recall}(D(t)) dt
413
- # = \int_{t \in J} 1 - (1/|E|) * (min(d,m) + d) dt
414
- # = |J| - (1/|E|) * [\int_{t \in J} (min(d,m) + d) dt]
412
+ # C = int_{t in J} bar{F}_{t, recall}(D(t)) dt
413
+ # = int_{t in J} 1 - (1/|E|) * (min(d,m) + d) dt
414
+ # = |J| - (1/|E|) * [int_{t in J} (min(d,m) + d) dt]
415
415
  # = |J| - (1/|E|) * out_integral_min_dm_plus_d
416
416
  DeltaJ = max(J) - min(J)
417
417
  DeltaE = max(E) - min(E)
@@ -422,17 +422,17 @@ def integral_mini_interval_Precall_CDFmethod(I, J, E):
422
422
  def integral_interval_probaCDF_recall(I, J, E):
423
423
  """
424
424
  Integral of the probability of distances over the interval J.
425
- Compute the integral $\int_{y \in J} Fbar_y(dist(y,I)) dy$.
425
+ Compute the integral $int_{y in J} Fbar_y(dist(y,I)) dy$.
426
426
  This is the *integral* i.e. the sum (not the mean)
427
427
 
428
428
  :param I: a single (non empty) predicted interval
429
429
  :param J: ground truth (non empty) interval
430
430
  :param E: the affiliation/influence zone for J
431
- :return: the integral $\int_{y \in J} Fbar_y(dist(y,I)) dy$
431
+ :return: the integral $int_{y in J} Fbar_y(dist(y,I)) dy$
432
432
  """
433
433
  # I and J are single intervals (not generic sets)
434
434
  # E is the outside affiliation interval of J (even for recall!)
435
- # (in particular J \subset E)
435
+ # (in particular J subset E)
436
436
  #
437
437
  # J is the portion of the ground truth affiliated to I
438
438
  # I is a predicted interval (can be outside E possibly since it's recall)
@@ -176,6 +176,8 @@ class Sweeper(object):
176
176
  unweightedScore = -1.0
177
177
  else:
178
178
  numerator = abs(prevWindowRightIndex - i)
179
+ if prevWindowWidth==1:
180
+ prevWindowWidth+=1
179
181
  denominator = float(prevWindowWidth - 1)
180
182
  positionPastWindow = numerator / denominator
181
183
  unweightedScore = scaledSigmoid(positionPastWindow)
@@ -0,0 +1,216 @@
1
+ import numpy as np
2
+ from ._tsadeval.metrics import *
3
+ from .metric_utils import transform_to_full_series
4
+ from sklearn.metrics import auc
5
+ from .binary_metrics import point_adjusted_precision, point_adjusted_recall, segment_wise_precision, segment_wise_recall
6
+ from pate.PATE_metric import PATE
7
+ def precision_at_k(y_true : np.array ,y_anomaly_scores: np.array):
8
+
9
+ m = PatK_pw(y_true,y_anomaly_scores)
10
+
11
+ return m.get_score()
12
+
13
+ def auc_roc_pw(y_true : np.array ,y_anomaly_scores: np.array):
14
+
15
+ m = AUC_ROC(y_true,y_anomaly_scores)
16
+
17
+ return m.get_score()
18
+
19
+
20
+ def auc_pr_pw(y_true : np.array ,y_anomaly_scores: np.array):
21
+
22
+ m = AUC_PR_pw(y_true,y_anomaly_scores)
23
+
24
+ return m.get_score()
25
+
26
+
27
+
28
+ def auc_pr_pa(y_true: np.array, y_anomaly_scores: np.array):
29
+ precisions = [1]
30
+ recalls = [0]
31
+ tps,fps,fns = [],[],[]
32
+
33
+ p_adj = PointAdjust(len(y_true),y_true,(np.array(y_anomaly_scores) >= 0.5).astype(int))
34
+ segments= p_adj.get_gt_anomalies_segmentwise()
35
+ idx = np.argsort(y_anomaly_scores)[::-1].astype(int)
36
+ y_true_sorted = np.array(y_true)[idx]
37
+ y_anomaly_scores_sorted = np.array(y_anomaly_scores)[idx]
38
+
39
+ segment_mins = []
40
+ for start,end in segments:
41
+ anoms_scores = y_anomaly_scores[start:end+1]
42
+ segment_mins.append([np.max(anoms_scores),end-start+1])
43
+
44
+ for i_t in range(len(y_anomaly_scores_sorted)):
45
+ fp,tp,fn = 0,0,0
46
+ if i_t > 0 and y_anomaly_scores_sorted[i_t] == y_anomaly_scores_sorted[i_t-1] :
47
+ tp = tps[-1]
48
+ fp = fps[-1]
49
+ fn = fns[-1]
50
+ else:
51
+ if y_true_sorted[i_t] == 0:
52
+ #FP
53
+ if len(fps)==0:
54
+ aux_y_pred = (y_anomaly_scores >= y_anomaly_scores_sorted[i_t]).astype(int)
55
+ for i in range(len(aux_y_pred)):
56
+ if aux_y_pred[i] == 1 and y_true[i] == 0:
57
+ fp+=1
58
+
59
+
60
+ else:
61
+ fp=fps[i_t-1]+1
62
+ else:
63
+ if len(fps)==0:
64
+ aux_y_pred = (y_anomaly_scores >= y_anomaly_scores_sorted[i_t]).astype(int)
65
+ for i in range(len(aux_y_pred)):
66
+ if aux_y_pred[i] == 1 and y_true[i] == 0:
67
+ fp+=1
68
+ else:
69
+ fp=fps[i_t-1]
70
+ for score, length in segment_mins:
71
+ if score >= y_anomaly_scores_sorted[i_t]:
72
+ #TP
73
+ tp+= length
74
+ else:
75
+ #FN
76
+ fn+= length
77
+ tps.append(tp)
78
+ fns.append(fn)
79
+ fps.append(fp)
80
+ for tp,fp,fn in zip(tps,fps,fns):
81
+ if tp>0:
82
+ precisions.append(tp/(tp+fp))
83
+ recalls.append(tp/(tp+fn))
84
+ else:
85
+ precisions.append(0)
86
+ recalls.append(0)
87
+
88
+
89
+ recalls.append(1)
90
+ precisions.append(0)
91
+
92
+ auc_value = auc(recalls, precisions)
93
+ return auc_value
94
+
95
+
96
+
97
+
98
+ def auc_pr_sw(y_true: np.array, y_anomaly_scores: np.array):
99
+ precisions = [1]
100
+ recalls = [0]
101
+ tps,fps,fns = [],[],[]
102
+
103
+
104
+ segments = []
105
+ i=0
106
+ while i < len(y_true):
107
+ if y_true[i] == 1:
108
+ start = i
109
+ end = i
110
+ while i < len(y_true) and y_true[i] == 1:
111
+ end = i
112
+ i += 1
113
+ segments.append([start,end])
114
+ i+=1
115
+ idx = np.argsort(y_anomaly_scores)[::-1].astype(int)
116
+ y_anomaly_scores_sorted = np.array(y_anomaly_scores)[idx]
117
+
118
+ segment_mins = []
119
+ for start,end in segments:
120
+ anoms_scores = y_anomaly_scores[start:end+1]
121
+ segment_mins.append([np.max(anoms_scores),[start,end]])
122
+
123
+ for i_t in range(len(y_anomaly_scores_sorted)):
124
+ fp,tp,fn = 0,0,0
125
+
126
+
127
+ aux_y_pred = (y_anomaly_scores >= y_anomaly_scores_sorted[i_t]).astype(int)
128
+ for score,seg in segment_mins:
129
+ start,end = seg
130
+ if score >= y_anomaly_scores_sorted[i_t]:
131
+ #TP
132
+ tp+= 1
133
+ if aux_y_pred[start]== 1:
134
+ # Extender hacia la izquierda
135
+ i = start - 1
136
+ while i >= 0 and aux_y_pred[i] == 1:
137
+ aux_y_pred[i] = 0
138
+ i -= 1
139
+
140
+ if aux_y_pred[end] == 1:
141
+ # Extender hacia la derecha
142
+ i = end + 1
143
+ while i < len(aux_y_pred) and aux_y_pred[i] == 1:
144
+ aux_y_pred[i] = 0
145
+ i += 1
146
+ aux_y_pred[start:end+1] = 0
147
+
148
+ else:
149
+ #FN
150
+ fn+= 1
151
+
152
+ if np.sum(aux_y_pred)>0:
153
+ fpsegments = []
154
+ i=0
155
+ while i < len(aux_y_pred):
156
+ if aux_y_pred[i] == 1:
157
+ start = i
158
+ end = i
159
+ while i < len(aux_y_pred) and aux_y_pred[i] == 1:
160
+ end = i
161
+ i += 1
162
+ fpsegments.append([start,end])
163
+ i+=1
164
+ fp = len(fpsegments)
165
+ else:
166
+ fp = 0
167
+
168
+
169
+ tps.append(tp)
170
+ fns.append(fn)
171
+ fps.append(fp)
172
+ for tp,fp,fn in zip(tps,fps,fns):
173
+ if tp>0:
174
+ precisions.append(tp/(tp+fp))
175
+ recalls.append(tp/(tp+fn))
176
+ else:
177
+ precisions.append(0)
178
+ recalls.append(0)
179
+
180
+
181
+
182
+ auc_value = auc(recalls, precisions)
183
+
184
+ return auc_value
185
+
186
+
187
+ def vus_roc(y_true : np.array ,y_anomaly_scores: np.array, window=4):
188
+
189
+ m = VUS_ROC(y_true,y_anomaly_scores,max_window=window)
190
+
191
+ return m.get_score()
192
+
193
+
194
+ def vus_pr(y_true : np.array ,y_anomaly_scores: np.array, window=4):
195
+
196
+ m = VUS_PR(y_true,y_anomaly_scores,max_window=window)
197
+
198
+ return m.get_score()
199
+
200
+
201
+ def real_pate(y_true: np.array, y_anomaly_scores: np.array, early: int, delay: int):
202
+ """
203
+ Calculate PATE score for anomaly detection in time series.
204
+ The PATE score is the ratio of the number of true positives to the sum of true positives, false positives, and false negatives, within a given early and delay range.
205
+
206
+ Parameters:
207
+ y_true (np.array): The ground truth binary labels for the time series data.
208
+ y_anomaly_scores (np.array): The predicted binary labels for the time series data.
209
+ early (int): The maximum number of time steps before an anomaly must be predicted to be considered early.
210
+ delay (int): The maximum number of time steps after an anomaly must be predicted to be considered delayed.
211
+
212
+ Returns:
213
+ float: The PATE score.
214
+ """
215
+
216
+ return PATE(y_true, y_anomaly_scores, early, delay, binary_scores=False)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tsadmetrics
3
- Version: 0.1.8
3
+ Version: 0.1.10
4
4
  Summary: Librería para evaluación de detección de anomalías en series temporales
5
5
  Home-page: https://github.com/pathsko/TSADmetrics
6
6
  Author: Pedro Rafael Velasco Priego
@@ -1,92 +0,0 @@
1
- import numpy as np
2
- from ._tsadeval.metrics import *
3
- from .metric_utils import transform_to_full_series
4
- from sklearn.metrics import auc
5
- from .binary_metrics import point_adjusted_precision, point_adjusted_recall, segment_wise_precision, segment_wise_recall
6
- from pate.PATE_metric import PATE
7
- def precision_at_k(y_true : np.array ,y_anomaly_scores: np.array):
8
-
9
- m = PatK_pw(y_true,y_anomaly_scores)
10
-
11
- return m.get_score()
12
-
13
- def auc_roc_pw(y_true : np.array ,y_anomaly_scores: np.array):
14
-
15
- m = AUC_ROC(y_true,y_anomaly_scores)
16
-
17
- return m.get_score()
18
-
19
-
20
- def auc_pr_pw(y_true : np.array ,y_anomaly_scores: np.array):
21
-
22
- m = AUC_PR_pw(y_true,y_anomaly_scores)
23
-
24
- return m.get_score()
25
-
26
-
27
-
28
- def auc_pr_pa(y_true: np.array, y_anomaly_scores: np.array):
29
- thresholds = np.unique(y_anomaly_scores)[::-1] # Descending order
30
- precisions = [1]
31
- recalls = [0]
32
- for t in thresholds[:-1]:
33
-
34
- y_pred = (y_anomaly_scores >= t).astype(int)
35
-
36
-
37
- precisions.append(point_adjusted_precision(y_true, y_pred))
38
- recalls.append(point_adjusted_recall(y_true, y_pred))
39
-
40
- recalls.append(1)
41
- precisions.append(0)
42
- auc_value = auc(recalls, precisions)
43
- return auc_value
44
-
45
-
46
-
47
-
48
- def auc_pr_sw(y_true: np.array, y_anomaly_scores: np.array):
49
- thresholds = np.unique(y_anomaly_scores)[::-1] # Descending order
50
- precisions = [1]
51
- recalls = [0]
52
-
53
- for t in thresholds[:-1]:
54
- y_pred = (y_anomaly_scores >= t).astype(int)
55
- precisions.append(segment_wise_precision(y_true, y_pred))
56
- recalls.append(segment_wise_recall(y_true, y_pred))
57
- recalls.append(1)
58
- precisions.append(0)
59
- auc_value = auc(recalls, precisions)
60
- return auc_value
61
-
62
-
63
- def vus_roc(y_true : np.array ,y_anomaly_scores: np.array, window=4):
64
-
65
- m = VUS_ROC(y_true,y_anomaly_scores,max_window=window)
66
-
67
- return m.get_score()
68
-
69
-
70
- def vus_pr(y_true : np.array ,y_anomaly_scores: np.array, window=4):
71
-
72
- m = VUS_PR(y_true,y_anomaly_scores,max_window=window)
73
-
74
- return m.get_score()
75
-
76
-
77
- def real_pate(y_true: np.array, y_anomaly_scores: np.array, early: int, delay: int):
78
- """
79
- Calculate PATE score for anomaly detection in time series.
80
- The PATE score is the ratio of the number of true positives to the sum of true positives, false positives, and false negatives, within a given early and delay range.
81
-
82
- Parameters:
83
- y_true (np.array): The ground truth binary labels for the time series data.
84
- y_anomaly_scores (np.array): The predicted binary labels for the time series data.
85
- early (int): The maximum number of time steps before an anomaly must be predicted to be considered early.
86
- delay (int): The maximum number of time steps after an anomaly must be predicted to be considered delayed.
87
-
88
- Returns:
89
- float: The PATE score.
90
- """
91
-
92
- return PATE(y_true, y_anomaly_scores, early, delay, binary_scores=False)
File without changes
File without changes
File without changes