tsadmetrics 0.1.13__py3-none-any.whl → 0.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -766,21 +766,21 @@ def range_based_f_score(y_true: np.array, y_pred: np.array, p_alpha: float, r_al
766
766
 
767
767
 
768
768
  Parameters:
769
- y_true (np.array):
770
- The ground truth binary labels for the time series data.
771
- y_pred (np.array):
772
- The predicted binary labels for the time series data.
773
- alpha (float):
774
- Relative importance of existence reward. 0 ≤ alpha ≤ 1.
775
- p_bias (str):
776
- Positional bias for precision. This should be "flat", "front", "middle", or "back".
777
- r_bias (str):
778
- Positional bias for recall. This should be "flat", "front", "middle", or "back".
779
- cardinality_mode (str, optional):
780
- Cardinality type. This should be "one", "reciprocal" or "udf_gamma".
781
- beta (float):
782
- The beta value, which determines the weight of precision in the combined score.
783
- Default is 1, which gives equal weight to precision and recall.
769
+ y_true (np.array):
770
+ The ground truth binary labels for the time series data.
771
+ y_pred (np.array):
772
+ The predicted binary labels for the time series data.
773
+ alpha (float):
774
+ Relative importance of existence reward. 0 ≤ alpha ≤ 1.
775
+ p_bias (str):
776
+ Positional bias for precision. This should be "flat", "front", "middle", or "back".
777
+ r_bias (str):
778
+ Positional bias for recall. This should be "flat", "front", "middle", or "back".
779
+ cardinality_mode (str, optional):
780
+ Cardinality type. This should be "one", "reciprocal" or "udf_gamma".
781
+ beta (float):
782
+ The beta value, which determines the weight of precision in the combined score.
783
+ Default is 1, which gives equal weight to precision and recall.
784
784
 
785
785
  Returns:
786
786
  float: The range-based F-score, which is the harmonic mean of precision and recall, adjusted by the beta value.
@@ -1220,14 +1220,20 @@ def total_detected_in_range(y_true: np.array, y_pred: np.array, k: int):
1220
1220
 
1221
1221
  This metric measures the proportion of true anomaly events that are correctly detected.
1222
1222
  It is defined as:
1223
- TDIR = (EM + DA) / (EM + DA + MA)
1223
+
1224
+ .. math::
1225
+ \\text{TDIR} = \\frac{EM + DA}{EM + DA + MA}
1224
1226
 
1225
1227
  Where:
1226
- EM (Exact Match) = number of predicted anomaly segments that exactly match a true anomaly segment.
1227
- DA (Detected Anomaly)= number of true anomaly points not exactly matched where at least one prediction falls
1228
- within a window [i-k, i+k] around the true point index i or within the true segment range.
1229
- FA (False Anomaly) = number of predicted anomaly segments that do not overlap any true anomaly segment
1230
- even within a k-step tolerance window around true points.
1228
+
1229
+ - EM (Exact Match):
1230
+ Number of predicted anomaly segments that exactly match a true anomaly segment.
1231
+ - DA (Detected Anomaly):
1232
+ Number of true anomaly points not exactly matched where at least one prediction falls
1233
+ within a window [i-k, i+k] around the true point index i or within the true segment range.
1234
+ - MA (Missed Anomaly):
1235
+ Number of true anomaly segments that do not overlap any predicted anomaly segment
1236
+ even within a k-step tolerance window around true points.
1231
1237
 
1232
1238
  For more information, see the original paper:
1233
1239
  https://acta.sapientia.ro/content/docs/evaluation-metrics-for-anomaly-detection.pdf
@@ -1258,14 +1264,20 @@ def detection_accuracy_in_range(y_true: np.array, y_pred: np.array, k: int):
1258
1264
 
1259
1265
  This metric measures the proportion of predicted anomaly events that correspond to true anomalies.
1260
1266
  It is defined as:
1261
- DAIR = (EM + DA) / (EM + DA + FA)
1267
+
1268
+ .. math::
1269
+ \\text{DAIR} = \\frac{EM + DA}{EM + DA + FA}
1262
1270
 
1263
1271
  Where:
1264
- EM (Exact Match) = number of predicted anomaly segments that exactly match a true anomaly segment.
1265
- DA (Detected Anomaly)= number of true anomaly points not exactly matched where at least one prediction falls
1266
- within a window [i-k, i+k] around the true point index i or within the true segment range.
1267
- FA (False Anomaly) = number of predicted anomaly segments that do not overlap any true anomaly segment
1268
- even within a k-step tolerance window around true points.
1272
+
1273
+ - EM (Exact Match):
1274
+ Number of predicted anomaly segments that exactly match a true anomaly segment.
1275
+ - DA (Detected Anomaly):
1276
+ Number of true anomaly points not exactly matched where at least one prediction falls
1277
+ within a window [i-k, i+k] around the true point index i or within the true segment range.
1278
+ - FA (False Anomaly):
1279
+ Number of predicted anomaly segments that do not overlap any true anomaly segment
1280
+ even within a k-step tolerance window around true points.
1269
1281
 
1270
1282
  For more information, see the original paper:
1271
1283
  https://acta.sapientia.ro/content/docs/evaluation-metrics-for-anomaly-detection.pdf
@@ -1280,7 +1292,7 @@ def detection_accuracy_in_range(y_true: np.array, y_pred: np.array, k: int):
1280
1292
  time steps of a true point counts toward detection.
1281
1293
 
1282
1294
  Returns:
1283
- float: The total detected in range.
1295
+ float: The detection accuracy in range score.
1284
1296
  """
1285
1297
  if np.sum(y_pred) == 0:
1286
1298
  return 0
@@ -1305,13 +1317,21 @@ def weighted_detection_difference(y_true: np.array, y_pred: np.array, k: int):
1305
1317
  not overlap any true anomaly segment (within the same extension).
1306
1318
 
1307
1319
  The final score is:
1308
- WDD = WS - WF*FA
1320
+
1321
+ .. math::
1322
+ \\text{WDD} = \\text{WS} - \\text{WF} \\cdot \\text{FA}
1309
1323
 
1310
1324
  Where:
1311
- WS = Σ weights_true_predictions
1312
- WF = Σ weights_false_positives
1313
- FA (False Anomaly) = number of predicted anomaly segments that do not overlap any true anomaly segment
1314
- even within a k-step tolerance window around true points.
1325
+
1326
+ - WS:
1327
+ Sum of Gaussian weights for all predicted anomaly points that fall
1328
+ within any true anomaly segment (extended by delta time steps at the ends).
1329
+ - WF:
1330
+ Sum of Gaussian weights for all predicted anomaly points that do not
1331
+ overlap any true anomaly segment (within the same extension).
1332
+ - FA (False Anomaly):
1333
+ Number of predicted anomaly segments that do not overlap any true anomaly segment
1334
+ even within a k-step tolerance window around true points.
1315
1335
 
1316
1336
  For more information, see the original paper:
1317
1337
  https://acta.sapientia.ro/content/docs/evaluation-metrics-for-anomaly-detection.pdf
@@ -1400,7 +1420,10 @@ def mean_time_to_detect(y_true: np.array, y_pred: np.array):
1400
1420
  For each ground-truth anomaly segment, let i be the index where the segment starts,
1401
1421
  and let j ≥ i be the first index within that segment where the model predicts an anomaly.
1402
1422
  The detection delay for that event is defined as:
1403
- Δ = j - i
1423
+
1424
+ .. math::
1425
+ \Delta = j - i
1426
+
1404
1427
  The MTTD is the mean of all such Δ values, one per true anomaly segment, and expresses
1405
1428
  the average number of time steps between the true onset of an anomaly and its first detection.
1406
1429
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tsadmetrics
3
- Version: 0.1.13
3
+ Version: 0.1.14
4
4
  Summary: =?unknown-8bit?q?Librer=C3=ADa_para_evaluaci=C3=B3n_de_detecci=C3=B3n_de_anomal=C3=ADas?= en series temporales
5
5
  Home-page: https://github.com/pathsko/TSADmetrics
6
6
  Author: Pedro Rafael Velasco Priego
@@ -16,7 +16,7 @@ tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  tests/test_binary.py,sha256=dj9BsKBo5rpWw4JGiKKoVkg4rIW4YylTie2VxH2DAGo,29787
17
17
  tests/test_non_binary.py,sha256=syANlwm0DKsL6geGeq6nQI6ZVe6T_YXWTyk2-Hmck4s,11308
18
18
  tsadmetrics/__init__.py,sha256=MTWOa43fgOdkMNo5NglCReRnB8hoF0ob2PIvDziCNHw,1575
19
- tsadmetrics/binary_metrics.py,sha256=UdXoITIATf585Kvz5i3xhdqEVUogrUHMmJgxj-HoeB0,62477
19
+ tsadmetrics/binary_metrics.py,sha256=6GxE3HSiAC9OeDOpP6QFgPwbp-Q37-F3cUdyYcpRrxE,62841
20
20
  tsadmetrics/metric_utils.py,sha256=fm8v0X37_AlqWpkcUT9r3680QsjLljrHe2YuXkRLAZ4,10873
21
21
  tsadmetrics/non_binary_metrics.py,sha256=O6AqceHrjCVV1kJPBzXQIgtiu6afzoiJz2biNsxf3_4,13389
22
22
  tsadmetrics/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -53,7 +53,7 @@ tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py,sha256=pJz4iuPyVGNvwsaR
53
53
  tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py,sha256=jLkcMg7UNl25SHtZUBGkP-RV8HsvaZCtjakryl7PFWU,3204
54
54
  tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py,sha256=OhUJSm_I7VZ_gX_SSg8AYUq3_NW9rMIy7lAVsnOFw4Q,417
55
55
  tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py,sha256=LL-0pPer3ymovVRlktaHo5XDzpgiDhWOVfdPOzKR6og,3152
56
- tsadmetrics-0.1.13.dist-info/METADATA,sha256=k3BVjOM_Ife2-uXsEWu3o-6tRTUXUqLDd9I9NBV-61g,831
57
- tsadmetrics-0.1.13.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
58
- tsadmetrics-0.1.13.dist-info/top_level.txt,sha256=s2VIr_ePl-WZbYt9FsYbsDGM7J-Qc5cgpwEOeQ3FVpM,31
59
- tsadmetrics-0.1.13.dist-info/RECORD,,
56
+ tsadmetrics-0.1.14.dist-info/METADATA,sha256=TCFL9Dpv6zwwM_5n2HeCxKgFP-KB4AHYrvCe3rMZMOI,831
57
+ tsadmetrics-0.1.14.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
58
+ tsadmetrics-0.1.14.dist-info/top_level.txt,sha256=s2VIr_ePl-WZbYt9FsYbsDGM7J-Qc5cgpwEOeQ3FVpM,31
59
+ tsadmetrics-0.1.14.dist-info/RECORD,,