tsadmetrics 0.1.17__tar.gz → 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (158) hide show
  1. {tsadmetrics-0.1.17/tsadmetrics.egg-info → tsadmetrics-1.0.0}/PKG-INFO +21 -6
  2. tsadmetrics-1.0.0/README.md +46 -0
  3. {tsadmetrics-0.1.17/docs_manual → tsadmetrics-1.0.0/docs/api_doc}/conf.py +3 -26
  4. {tsadmetrics-0.1.17/docs → tsadmetrics-1.0.0/docs/full_doc}/conf.py +1 -1
  5. {tsadmetrics-0.1.17/docs_api → tsadmetrics-1.0.0/docs/manual_doc}/conf.py +3 -26
  6. tsadmetrics-1.0.0/examples/example_direct_data.py +28 -0
  7. tsadmetrics-1.0.0/examples/example_direct_single_data.py +25 -0
  8. tsadmetrics-1.0.0/examples/example_file_reference.py +24 -0
  9. tsadmetrics-1.0.0/examples/example_global_config_file.py +13 -0
  10. tsadmetrics-1.0.0/examples/example_metric_config_file.py +19 -0
  11. tsadmetrics-1.0.0/examples/example_simple_metric.py +8 -0
  12. tsadmetrics-1.0.0/examples/specific_examples/AbsoluteDetectionDistance_example.py +24 -0
  13. tsadmetrics-1.0.0/examples/specific_examples/AffiliationbasedFScore_example.py +24 -0
  14. tsadmetrics-1.0.0/examples/specific_examples/AverageDetectionCount_example.py +24 -0
  15. tsadmetrics-1.0.0/examples/specific_examples/CompositeFScore_example.py +24 -0
  16. tsadmetrics-1.0.0/examples/specific_examples/DelayThresholdedPointadjustedFScore_example.py +24 -0
  17. tsadmetrics-1.0.0/examples/specific_examples/DetectionAccuracyInRange_example.py +24 -0
  18. tsadmetrics-1.0.0/examples/specific_examples/EnhancedTimeseriesAwareFScore_example.py +24 -0
  19. tsadmetrics-1.0.0/examples/specific_examples/LatencySparsityawareFScore_example.py +24 -0
  20. tsadmetrics-1.0.0/examples/specific_examples/MeanTimeToDetect_example.py +24 -0
  21. tsadmetrics-1.0.0/examples/specific_examples/NabScore_example.py +24 -0
  22. tsadmetrics-1.0.0/examples/specific_examples/PateFScore_example.py +24 -0
  23. tsadmetrics-1.0.0/examples/specific_examples/Pate_example.py +24 -0
  24. tsadmetrics-1.0.0/examples/specific_examples/PointadjustedAtKFScore_example.py +24 -0
  25. tsadmetrics-1.0.0/examples/specific_examples/PointadjustedAucPr_example.py +24 -0
  26. tsadmetrics-1.0.0/examples/specific_examples/PointadjustedAucRoc_example.py +24 -0
  27. tsadmetrics-1.0.0/examples/specific_examples/PointadjustedFScore_example.py +24 -0
  28. tsadmetrics-1.0.0/examples/specific_examples/RangebasedFScore_example.py +24 -0
  29. tsadmetrics-1.0.0/examples/specific_examples/SegmentwiseFScore_example.py +24 -0
  30. tsadmetrics-1.0.0/examples/specific_examples/TemporalDistance_example.py +24 -0
  31. tsadmetrics-1.0.0/examples/specific_examples/TimeTolerantFScore_example.py +24 -0
  32. tsadmetrics-1.0.0/examples/specific_examples/TimeseriesAwareFScore_example.py +24 -0
  33. tsadmetrics-1.0.0/examples/specific_examples/TotalDetectedInRange_example.py +24 -0
  34. tsadmetrics-1.0.0/examples/specific_examples/VusPr_example.py +24 -0
  35. tsadmetrics-1.0.0/examples/specific_examples/VusRoc_example.py +24 -0
  36. tsadmetrics-1.0.0/examples/specific_examples/WeightedDetectionDifference_example.py +24 -0
  37. {tsadmetrics-0.1.17 → tsadmetrics-1.0.0}/pyproject.toml +1 -4
  38. {tsadmetrics-0.1.17 → tsadmetrics-1.0.0}/setup.py +4 -8
  39. tsadmetrics-1.0.0/tests/test_dpm.py +212 -0
  40. tsadmetrics-1.0.0/tests/test_ptdm.py +366 -0
  41. tsadmetrics-1.0.0/tests/test_registry.py +58 -0
  42. tsadmetrics-1.0.0/tests/test_runner.py +185 -0
  43. tsadmetrics-1.0.0/tests/test_spm.py +213 -0
  44. tsadmetrics-1.0.0/tests/test_tmem.py +198 -0
  45. tsadmetrics-1.0.0/tests/test_tpdm.py +369 -0
  46. tsadmetrics-1.0.0/tests/test_tstm.py +338 -0
  47. tsadmetrics-1.0.0/tsadmetrics/base/Metric.py +188 -0
  48. tsadmetrics-1.0.0/tsadmetrics/evaluation/Report.py +25 -0
  49. tsadmetrics-1.0.0/tsadmetrics/evaluation/Runner.py +253 -0
  50. tsadmetrics-1.0.0/tsadmetrics/metrics/Registry.py +141 -0
  51. tsadmetrics-1.0.0/tsadmetrics/metrics/__init__.py +2 -0
  52. tsadmetrics-1.0.0/tsadmetrics/metrics/spm/PointwiseAucPr.py +62 -0
  53. tsadmetrics-1.0.0/tsadmetrics/metrics/spm/PointwiseAucRoc.py +63 -0
  54. tsadmetrics-1.0.0/tsadmetrics/metrics/spm/PointwiseFScore.py +86 -0
  55. tsadmetrics-1.0.0/tsadmetrics/metrics/spm/PrecisionAtK.py +81 -0
  56. tsadmetrics-1.0.0/tsadmetrics/metrics/spm/__init__.py +9 -0
  57. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/dpm/DelayThresholdedPointadjustedFScore.py +83 -0
  58. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/dpm/LatencySparsityawareFScore.py +76 -0
  59. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/dpm/MeanTimeToDetect.py +47 -0
  60. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/dpm/NabScore.py +60 -0
  61. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/dpm/__init__.py +11 -0
  62. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/ptdm/AverageDetectionCount.py +53 -0
  63. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/ptdm/DetectionAccuracyInRange.py +66 -0
  64. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/ptdm/PointadjustedAtKFScore.py +80 -0
  65. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/ptdm/TimeseriesAwareFScore.py +248 -0
  66. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/ptdm/TotalDetectedInRange.py +65 -0
  67. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/ptdm/WeightedDetectionDifference.py +97 -0
  68. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/ptdm/__init__.py +12 -0
  69. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tmem/AbsoluteDetectionDistance.py +48 -0
  70. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tmem/EnhancedTimeseriesAwareFScore.py +252 -0
  71. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tmem/TemporalDistance.py +68 -0
  72. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tmem/__init__.py +9 -0
  73. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tpdm/CompositeFScore.py +104 -0
  74. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tpdm/PointadjustedAucPr.py +123 -0
  75. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tpdm/PointadjustedAucRoc.py +119 -0
  76. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tpdm/PointadjustedFScore.py +96 -0
  77. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tpdm/RangebasedFScore.py +236 -0
  78. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tpdm/SegmentwiseFScore.py +73 -0
  79. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tpdm/__init__.py +12 -0
  80. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tstm/AffiliationbasedFScore.py +68 -0
  81. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tstm/Pate.py +62 -0
  82. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tstm/PateFScore.py +61 -0
  83. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tstm/TimeTolerantFScore.py +85 -0
  84. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tstm/VusPr.py +51 -0
  85. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tstm/VusRoc.py +55 -0
  86. tsadmetrics-1.0.0/tsadmetrics/metrics/tem/tstm/__init__.py +15 -0
  87. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/affiliation/_integral_interval.py → tsadmetrics-1.0.0/tsadmetrics/utils/functions_affiliation.py +377 -9
  88. tsadmetrics-1.0.0/tsadmetrics/utils/functions_auc.py +393 -0
  89. tsadmetrics-1.0.0/tsadmetrics/utils/functions_conversion.py +63 -0
  90. tsadmetrics-1.0.0/tsadmetrics/utils/functions_counting_metrics.py +26 -0
  91. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/latency_sparsity_aware.py → tsadmetrics-1.0.0/tsadmetrics/utils/functions_latency_sparsity_aware.py +1 -1
  92. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/nabscore.py → tsadmetrics-1.0.0/tsadmetrics/utils/functions_nabscore.py +15 -1
  93. {tsadmetrics-0.1.17 → tsadmetrics-1.0.0/tsadmetrics.egg-info}/PKG-INFO +21 -6
  94. tsadmetrics-1.0.0/tsadmetrics.egg-info/SOURCES.txt +104 -0
  95. tsadmetrics-1.0.0/tsadmetrics.egg-info/top_level.txt +4 -0
  96. tsadmetrics-0.1.17/README.md +0 -31
  97. tsadmetrics-0.1.17/entorno/bin/activate_this.py +0 -32
  98. tsadmetrics-0.1.17/entorno/bin/rst2html.py +0 -23
  99. tsadmetrics-0.1.17/entorno/bin/rst2html4.py +0 -26
  100. tsadmetrics-0.1.17/entorno/bin/rst2html5.py +0 -33
  101. tsadmetrics-0.1.17/entorno/bin/rst2latex.py +0 -26
  102. tsadmetrics-0.1.17/entorno/bin/rst2man.py +0 -27
  103. tsadmetrics-0.1.17/entorno/bin/rst2odt.py +0 -28
  104. tsadmetrics-0.1.17/entorno/bin/rst2odt_prepstyles.py +0 -20
  105. tsadmetrics-0.1.17/entorno/bin/rst2pseudoxml.py +0 -23
  106. tsadmetrics-0.1.17/entorno/bin/rst2s5.py +0 -24
  107. tsadmetrics-0.1.17/entorno/bin/rst2xetex.py +0 -27
  108. tsadmetrics-0.1.17/entorno/bin/rst2xml.py +0 -23
  109. tsadmetrics-0.1.17/entorno/bin/rstpep2html.py +0 -25
  110. tsadmetrics-0.1.17/tests/test_binary.py +0 -946
  111. tsadmetrics-0.1.17/tests/test_non_binary.py +0 -450
  112. tsadmetrics-0.1.17/tests/test_utils.py +0 -49
  113. tsadmetrics-0.1.17/tsadmetrics/__init__.py +0 -21
  114. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +0 -86
  115. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +0 -68
  116. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/affiliation/generics.py +0 -135
  117. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/affiliation/metrics.py +0 -114
  118. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/auc_roc_pr_plot.py +0 -295
  119. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/discontinuity_graph.py +0 -109
  120. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +0 -175
  121. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +0 -50
  122. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +0 -184
  123. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +0 -386
  124. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +0 -362
  125. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/metrics.py +0 -698
  126. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/prts/__init__.py +0 -0
  127. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
  128. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +0 -165
  129. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +0 -121
  130. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
  131. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +0 -61
  132. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +0 -86
  133. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +0 -21
  134. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +0 -85
  135. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/tests.py +0 -376
  136. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/threshold_plt.py +0 -30
  137. tsadmetrics-0.1.17/tsadmetrics/_tsadeval/time_tolerant.py +0 -33
  138. tsadmetrics-0.1.17/tsadmetrics/binary_metrics.py +0 -1652
  139. tsadmetrics-0.1.17/tsadmetrics/metric_utils.py +0 -98
  140. tsadmetrics-0.1.17/tsadmetrics/non_binary_metrics.py +0 -372
  141. tsadmetrics-0.1.17/tsadmetrics/scripts/__init__.py +0 -0
  142. tsadmetrics-0.1.17/tsadmetrics/scripts/compute_metrics.py +0 -42
  143. tsadmetrics-0.1.17/tsadmetrics/utils.py +0 -124
  144. tsadmetrics-0.1.17/tsadmetrics/validation.py +0 -35
  145. tsadmetrics-0.1.17/tsadmetrics.egg-info/SOURCES.txt +0 -71
  146. tsadmetrics-0.1.17/tsadmetrics.egg-info/entry_points.txt +0 -2
  147. tsadmetrics-0.1.17/tsadmetrics.egg-info/top_level.txt +0 -6
  148. {tsadmetrics-0.1.17 → tsadmetrics-1.0.0}/MANIFEST.in +0 -0
  149. {tsadmetrics-0.1.17 → tsadmetrics-1.0.0}/setup.cfg +0 -0
  150. {tsadmetrics-0.1.17 → tsadmetrics-1.0.0}/tests/__init__.py +0 -0
  151. {tsadmetrics-0.1.17/tsadmetrics/_tsadeval → tsadmetrics-1.0.0/tsadmetrics}/__init__.py +0 -0
  152. {tsadmetrics-0.1.17/tsadmetrics/_tsadeval/affiliation → tsadmetrics-1.0.0/tsadmetrics/base}/__init__.py +0 -0
  153. {tsadmetrics-0.1.17/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage → tsadmetrics-1.0.0/tsadmetrics/evaluation}/__init__.py +0 -0
  154. {tsadmetrics-0.1.17/tsadmetrics/_tsadeval/eTaPR_pkg → tsadmetrics-1.0.0/tsadmetrics/metrics/tem}/__init__.py +0 -0
  155. {tsadmetrics-0.1.17 → tsadmetrics-1.0.0}/tsadmetrics/py.typed +0 -0
  156. /tsadmetrics-0.1.17/tsadmetrics/_tsadeval/vus_utils.py → /tsadmetrics-1.0.0/tsadmetrics/utils/functions_vus.py +0 -0
  157. {tsadmetrics-0.1.17 → tsadmetrics-1.0.0}/tsadmetrics.egg-info/dependency_links.txt +0 -0
  158. {tsadmetrics-0.1.17 → tsadmetrics-1.0.0}/tsadmetrics.egg-info/requires.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tsadmetrics
3
- Version: 0.1.17
3
+ Version: 1.0.0
4
4
  Summary: Librería para evaluación de detección de anomalías en series temporales
5
5
  Home-page: https://github.com/pathsko/TSADmetrics
6
6
  Author: Pedro Rafael Velasco Priego
@@ -23,17 +23,32 @@ Requires-Dist: tzdata==2024.1
23
23
 
24
24
  # TSADmetrics - Time Series Anomaly Detection Metrics
25
25
 
26
- **TSADmetrics** is a Python library for evaluating anomaly detection algorithms in time series data. It provides a comprehensive set of binary and non-binary metrics designed specifically for the challenges of anomaly detection in temporal contexts.
26
+ **TSADmetrics** is a Python library for evaluating anomaly detection algorithms in time series data.
27
+ It provides a comprehensive set of metrics specifically designed to handle the temporal nature of anomalies.
28
+
29
+ ---
27
30
 
28
31
  ## Features
29
32
 
30
- - **Binary Metrics**: Evaluate discrete anomaly predictions (0/1 labels)
33
+ - **Metric Taxonomy**: Metrics are categorized into types based on how they handle temporal context:
34
+
35
+ - **MPI Metrics**: Evaluate predictions at each point independently, ignoring temporal continuity.
36
+ - **MET Metrics**: Consider temporal context, analyzing when and how anomalies occur.
37
+ - **MDPT**: Partial detection within a real anomaly event counts as correct.
38
+ - **MDTP**: Requires detection to cover a significant fraction of the real anomaly.
39
+ - **MECT**: Measures alignment of real vs predicted anomaly events.
40
+ - **MPR**: Penalizes late detections.
41
+ - **MTDT**: Allows temporal tolerance for early or late detections.
42
+
43
+ - **Direct Metric Usage**: Instantiate any metric class and call `compute()` for individual evaluation.
44
+
45
+ - **Batch Evaluation**: Use `Runner` to evaluate multiple datasets and metrics at once, with support for both direct data and CSV/JSON input.
31
46
 
32
- - **Non-Binary Metrics**: Assess continuous anomaly scores
47
+ - **Flexible Configuration**: Load metrics from YAML configuration files or global evaluation config files.
33
48
 
34
- - **Efficient Computation**: Compute multiple metrics at once
49
+ - **CLI Tool**: Compute metrics directly from files without writing Python code.
35
50
 
36
- - **CLI Tool**: Evaluate metrics directly from CSV/JSON files
51
+ ---
37
52
 
38
53
  ## Installation
39
54
 
@@ -0,0 +1,46 @@
1
+ # TSADmetrics - Time Series Anomaly Detection Metrics
2
+
3
+ **TSADmetrics** is a Python library for evaluating anomaly detection algorithms in time series data.
4
+ It provides a comprehensive set of metrics specifically designed to handle the temporal nature of anomalies.
5
+
6
+ ---
7
+
8
+ ## Features
9
+
10
+ - **Metric Taxonomy**: Metrics are categorized into types based on how they handle temporal context:
11
+
12
+ - **MPI Metrics**: Evaluate predictions at each point independently, ignoring temporal continuity.
13
+ - **MET Metrics**: Consider temporal context, analyzing when and how anomalies occur.
14
+ - **MDPT**: Partial detection within a real anomaly event counts as correct.
15
+ - **MDTP**: Requires detection to cover a significant fraction of the real anomaly.
16
+ - **MECT**: Measures alignment of real vs predicted anomaly events.
17
+ - **MPR**: Penalizes late detections.
18
+ - **MTDT**: Allows temporal tolerance for early or late detections.
19
+
20
+ - **Direct Metric Usage**: Instantiate any metric class and call `compute()` for individual evaluation.
21
+
22
+ - **Batch Evaluation**: Use `Runner` to evaluate multiple datasets and metrics at once, with support for both direct data and CSV/JSON input.
23
+
24
+ - **Flexible Configuration**: Load metrics from YAML configuration files or global evaluation config files.
25
+
26
+ - **CLI Tool**: Compute metrics directly from files without writing Python code.
27
+
28
+ ---
29
+
30
+ ## Installation
31
+
32
+ Install TSADmetrics via pip:
33
+
34
+ ```bash
35
+ pip install tsadmetrics
36
+ ```
37
+
38
+ ## Documentation
39
+
40
+ The complete documentation for TSADmetrics is available at:
41
+ 📚 [https://tsadmetrics.readthedocs.io/](https://tsadmetrics.readthedocs.io/)
42
+
43
+ ## Acknowledgements
44
+
45
+ This library is based on the concepts and implementations from:
46
+ Sørbø, S., & Ruocco, M. (2023). *Navigating the metric maze: a taxonomy of evaluation metrics for anomaly detection in time series*. https://doi.org/10.1007/s10618-023-00988-8
@@ -6,7 +6,7 @@ import sys
6
6
  sys.path.insert(0, os.path.abspath('../'))
7
7
 
8
8
 
9
- project = 'TSADmetrics'
9
+ project = 'TSADmetrics API Reference'
10
10
  copyright = '2025, Pedro Rafael Velasco Priego'
11
11
  author = 'Pedro Rafael Velasco Priego'
12
12
  release = 'MIT'
@@ -15,7 +15,7 @@ release = 'MIT'
15
15
  # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
16
16
 
17
17
 
18
- extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc',]
18
+ extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc','sphinx.ext.mathjax']
19
19
 
20
20
 
21
21
 
@@ -63,28 +63,5 @@ latex_elements = {
63
63
  'maxlistdepth': '10', # Aumenta el límite de anidamiento
64
64
  'papersize': 'a4paper',
65
65
  'pointsize': '10pt',
66
- 'maketitle': r'''
67
- \makeatletter
68
- \begin{titlepage}
69
- \noindent\rule{\textwidth}{1pt}\\[3cm]
70
- \begin{center}
71
- {\Huge\sffamily\bfseries TSADmetrics User Manual}\\[1.5cm]
72
- {\Large\sffamily Time Series Anomaly Detection Metrics}\\[3cm]
73
- \begin{minipage}{0.8\textwidth}
74
- \centering
75
- {\large\sffamily
76
- \begin{tabular}{l@{\hspace{1cm}}l}
77
- \textbf{Autor:} & Pedro Rafael Velasco Priego \\
78
- \textbf{Directores:} & Dra. Amelia Zafra Gómez \\
79
- & Dr. Sebastián Ventura Soto \\
80
- \end{tabular}
81
- }
82
- \end{minipage}\\[5cm]
83
- {\large\sffamily \@date}\\
84
- {\large\sffamily \copyright\ 2025 Pedro Rafael Velasco Priego}
85
- \end{center}
86
- \noindent\rule{\textwidth}{1pt}
87
- \end{titlepage}
88
- \makeatother
89
- ''',
66
+
90
67
  }
@@ -15,7 +15,7 @@ release = 'MIT'
15
15
  # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
16
16
 
17
17
 
18
- extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc',]
18
+ extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc','sphinx.ext.mathjax']
19
19
 
20
20
 
21
21
 
@@ -6,7 +6,7 @@ import sys
6
6
  sys.path.insert(0, os.path.abspath('../'))
7
7
 
8
8
 
9
- project = 'TSADmetrics'
9
+ project = 'TSADmetrics User Manual'
10
10
  copyright = '2025, Pedro Rafael Velasco Priego'
11
11
  author = 'Pedro Rafael Velasco Priego'
12
12
  release = 'MIT'
@@ -15,7 +15,7 @@ release = 'MIT'
15
15
  # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
16
16
 
17
17
 
18
- extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc',]
18
+ extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc','sphinx.ext.mathjax']
19
19
 
20
20
 
21
21
 
@@ -63,28 +63,5 @@ latex_elements = {
63
63
  'maxlistdepth': '10', # Aumenta el límite de anidamiento
64
64
  'papersize': 'a4paper',
65
65
  'pointsize': '10pt',
66
- 'maketitle': r'''
67
- \makeatletter
68
- \begin{titlepage}
69
- \noindent\rule{\textwidth}{1pt}\\[3cm]
70
- \begin{center}
71
- {\Huge\sffamily\bfseries TSADmetrics API Reference}\\[1.5cm]
72
- {\Large\sffamily Time Series Anomaly Detection Metrics}\\[3cm]
73
- \begin{minipage}{0.8\textwidth}
74
- \centering
75
- {\large\sffamily
76
- \begin{tabular}{l@{\hspace{1cm}}l}
77
- \textbf{Autor:} & Pedro Rafael Velasco Priego \\
78
- \textbf{Directores:} & Dra. Amelia Zafra Gómez \\
79
- & Dr. Sebastián Ventura Soto \\
80
- \end{tabular}
81
- }
82
- \end{minipage}\\[5cm]
83
- {\large\sffamily \@date}\\
84
- {\large\sffamily \copyright\ 2025 Pedro Rafael Velasco Priego}
85
- \end{center}
86
- \noindent\rule{\textwidth}{1pt}
87
- \end{titlepage}
88
- \makeatother
89
- ''',
66
+
90
67
  }
@@ -0,0 +1,28 @@
1
+ from tsadmetrics.evaluation.Runner import Runner
2
+
3
+
4
+ y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
5
+ y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
7
+ y_pred1_cont = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
8
+ y_pred2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
9
+ y_pred2_cont = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
10
+
11
+ dataset_evaluations = [
12
+ ("dataset1", y_true1, (y_pred1, y_pred1_cont)),
13
+ ("dataset2", y_true2, (y_pred2, y_pred2_cont))
14
+
15
+ ]
16
+
17
+ metrics = [
18
+ ("adc",{}),
19
+ ("dair",{}),
20
+ ("pakf",{"k":0.2}),
21
+ ("pakf",{"k":0.4}),
22
+ ("pakf",{"k":0.5}),
23
+ ]
24
+
25
+ runner = Runner(dataset_evaluations, metrics)
26
+ results = runner.run(generate_report=True, report_file="./example_output/example_direct_data_report.csv")
27
+ print(results)
28
+
@@ -0,0 +1,25 @@
1
+ from tsadmetrics.evaluation.Runner import Runner
2
+
3
+
4
+ y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
5
+ y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
7
+ y_pred2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
8
+
9
+ dataset_evaluations = [
10
+ ("dataset1", y_true1, (y_pred1, y_pred1)),
11
+ ("dataset2", y_true2, (y_pred2, y_pred2))
12
+
13
+ ]
14
+
15
+ metrics = [
16
+ ("adc",{}),
17
+ ("dair",{}),
18
+ ("pakf",{"k":0.2}),
19
+ ("pakf",{"k":0.4})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run(generate_report=True, report_file="./example_output/example_direct_single_data_report.csv")
24
+ print(results)
25
+
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.evaluation.Runner import Runner
2
+
3
+
4
+ y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
5
+ y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+
7
+
8
+ dataset_evaluations = [
9
+ ("dataset1", "example_input/results1.csv"),
10
+ ("dataset2", "example_input/results2.csv")
11
+
12
+ ]
13
+
14
+ metrics = [
15
+ ("adc",{}),
16
+ ("dair",{}),
17
+ ("pakf",{"k":0.2}),
18
+ ("pakf",{"k":0.4})
19
+ ]
20
+
21
+ runner = Runner(dataset_evaluations, metrics)
22
+ results = runner.run(generate_report=True, report_file="./example_output/example_file_reference_report.csv")
23
+ print(results)
24
+
@@ -0,0 +1,13 @@
1
+ from tsadmetrics.evaluation.Runner import Runner
2
+ import numpy as np
3
+
4
+ y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
5
+ y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+
7
+
8
+ global_config_file = "example_input/example_evaluation_config.yaml"
9
+
10
+ runner = Runner(global_config_file)
11
+ results = runner.run(generate_report=True, report_file="./example_output/example_global_config_file_report.csv")
12
+ print(results)
13
+
@@ -0,0 +1,19 @@
1
+ from tsadmetrics.evaluation.Runner import Runner
2
+
3
+
4
+ y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
5
+ y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+
7
+
8
+ dataset_evaluations = [
9
+ ("dataset1", "example_input/results1.csv"),
10
+ ("dataset2", "example_input/results2.csv")
11
+
12
+ ]
13
+
14
+ metrics_file = "example_input/example_metrics_config.yaml"
15
+
16
+ runner = Runner(dataset_evaluations, metrics_file)
17
+ results = runner.run(generate_report=True, report_file="./example_output/example_metric_config_file_report.csv")
18
+ print(results)
19
+
@@ -0,0 +1,8 @@
1
+ from tsadmetrics.metrics.tem.mdpt import PointadjustedFScore
2
+
3
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
4
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
5
+ metric = PointadjustedFScore()
6
+
7
+ result = metric.compute(y_true, y_pred)
8
+ print(f"PointadjustedFScore: {result}")
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mect.AbsoluteDetectionDistance import AbsoluteDetectionDistance
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = AbsoluteDetectionDistance()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("AbsoluteDetectionDistance:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("add", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mtdt.AffiliationbasedFScore import AffiliationbasedFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = AffiliationbasedFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("AffiliationbasedFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("aff_f", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mdtp.AverageDetectionCount import AverageDetectionCount
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = AverageDetectionCount()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("AverageDetectionCount:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("adc", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mdpt.CompositeFScore import CompositeFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = CompositeFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("CompositeFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("cf", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mpr.DelayThresholdedPointadjustedFScore import DelayThresholdedPointadjustedFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = DelayThresholdedPointadjustedFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("DelayThresholdedPointadjustedFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("dtpaf", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mdtp.DetectionAccuracyInRange import DetectionAccuracyInRange
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = DetectionAccuracyInRange()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("DetectionAccuracyInRange:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("dair", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mect.EnhancedTimeseriesAwareFScore import EnhancedTimeseriesAwareFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = EnhancedTimeseriesAwareFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("EnhancedTimeseriesAwareFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("etaf", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mpr.LatencySparsityawareFScore import LatencySparsityawareFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = LatencySparsityawareFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("LatencySparsityawareFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("lsaf", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mpr.MeanTimeToDetect import MeanTimeToDetect
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = MeanTimeToDetect()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("MeanTimeToDetect:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("mttd", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mpr.NabScore import NabScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = NabScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("NabScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("nab_score", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mtdt.PateFScore import PateFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = PateFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("PateFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("pate_f1", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mtdt.Pate import Pate
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
7
+
8
+ # Direct usage
9
+ metric = Pate()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("Pate:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("pate", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mdtp.PointadjustedAtKFScore import PointadjustedAtKFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = PointadjustedAtKFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("PointadjustedAtKFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("pakf", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)