tsadmetrics 0.1.17__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (143) hide show
  1. {docs_api → docs/add_docs/api_doc}/conf.py +3 -26
  2. {docs_manual → docs/add_docs/full_doc}/conf.py +2 -25
  3. docs/add_docs/manual_doc/conf.py +67 -0
  4. docs/conf.py +1 -1
  5. examples/example_direct_data.py +28 -0
  6. examples/example_direct_single_data.py +25 -0
  7. examples/example_file_reference.py +24 -0
  8. examples/example_global_config_file.py +13 -0
  9. examples/example_metric_config_file.py +19 -0
  10. examples/example_simple_metric.py +8 -0
  11. examples/specific_examples/AbsoluteDetectionDistance_example.py +24 -0
  12. examples/specific_examples/AffiliationbasedFScore_example.py +24 -0
  13. examples/specific_examples/AverageDetectionCount_example.py +24 -0
  14. examples/specific_examples/CompositeFScore_example.py +24 -0
  15. examples/specific_examples/DelayThresholdedPointadjustedFScore_example.py +24 -0
  16. examples/specific_examples/DetectionAccuracyInRange_example.py +24 -0
  17. examples/specific_examples/EnhancedTimeseriesAwareFScore_example.py +24 -0
  18. examples/specific_examples/LatencySparsityawareFScore_example.py +24 -0
  19. examples/specific_examples/MeanTimeToDetect_example.py +24 -0
  20. examples/specific_examples/NabScore_example.py +24 -0
  21. examples/specific_examples/PateFScore_example.py +24 -0
  22. examples/specific_examples/Pate_example.py +24 -0
  23. examples/specific_examples/PointadjustedAtKFScore_example.py +24 -0
  24. examples/specific_examples/PointadjustedAucPr_example.py +24 -0
  25. examples/specific_examples/PointadjustedAucRoc_example.py +24 -0
  26. examples/specific_examples/PointadjustedFScore_example.py +24 -0
  27. examples/specific_examples/RangebasedFScore_example.py +24 -0
  28. examples/specific_examples/SegmentwiseFScore_example.py +24 -0
  29. examples/specific_examples/TemporalDistance_example.py +24 -0
  30. examples/specific_examples/TimeTolerantFScore_example.py +24 -0
  31. examples/specific_examples/TimeseriesAwareFScore_example.py +24 -0
  32. examples/specific_examples/TotalDetectedInRange_example.py +24 -0
  33. examples/specific_examples/VusPr_example.py +24 -0
  34. examples/specific_examples/VusRoc_example.py +24 -0
  35. examples/specific_examples/WeightedDetectionDifference_example.py +24 -0
  36. tsadmetrics/__init__.py +0 -21
  37. tsadmetrics/base/Metric.py +188 -0
  38. tsadmetrics/evaluation/Report.py +25 -0
  39. tsadmetrics/evaluation/Runner.py +253 -0
  40. tsadmetrics/metrics/Registry.py +141 -0
  41. tsadmetrics/metrics/__init__.py +2 -0
  42. tsadmetrics/metrics/spm/PointwiseAucPr.py +62 -0
  43. tsadmetrics/metrics/spm/PointwiseAucRoc.py +63 -0
  44. tsadmetrics/metrics/spm/PointwiseFScore.py +86 -0
  45. tsadmetrics/metrics/spm/PrecisionAtK.py +81 -0
  46. tsadmetrics/metrics/spm/__init__.py +9 -0
  47. tsadmetrics/metrics/tem/dpm/DelayThresholdedPointadjustedFScore.py +83 -0
  48. tsadmetrics/metrics/tem/dpm/LatencySparsityawareFScore.py +76 -0
  49. tsadmetrics/metrics/tem/dpm/MeanTimeToDetect.py +47 -0
  50. tsadmetrics/metrics/tem/dpm/NabScore.py +60 -0
  51. tsadmetrics/metrics/tem/dpm/__init__.py +11 -0
  52. tsadmetrics/metrics/tem/ptdm/AverageDetectionCount.py +53 -0
  53. tsadmetrics/metrics/tem/ptdm/DetectionAccuracyInRange.py +66 -0
  54. tsadmetrics/metrics/tem/ptdm/PointadjustedAtKFScore.py +80 -0
  55. tsadmetrics/metrics/tem/ptdm/TimeseriesAwareFScore.py +248 -0
  56. tsadmetrics/metrics/tem/ptdm/TotalDetectedInRange.py +65 -0
  57. tsadmetrics/metrics/tem/ptdm/WeightedDetectionDifference.py +97 -0
  58. tsadmetrics/metrics/tem/ptdm/__init__.py +12 -0
  59. tsadmetrics/metrics/tem/tmem/AbsoluteDetectionDistance.py +48 -0
  60. tsadmetrics/metrics/tem/tmem/EnhancedTimeseriesAwareFScore.py +252 -0
  61. tsadmetrics/metrics/tem/tmem/TemporalDistance.py +68 -0
  62. tsadmetrics/metrics/tem/tmem/__init__.py +9 -0
  63. tsadmetrics/metrics/tem/tpdm/CompositeFScore.py +104 -0
  64. tsadmetrics/metrics/tem/tpdm/PointadjustedAucPr.py +123 -0
  65. tsadmetrics/metrics/tem/tpdm/PointadjustedAucRoc.py +119 -0
  66. tsadmetrics/metrics/tem/tpdm/PointadjustedFScore.py +96 -0
  67. tsadmetrics/metrics/tem/tpdm/RangebasedFScore.py +236 -0
  68. tsadmetrics/metrics/tem/tpdm/SegmentwiseFScore.py +73 -0
  69. tsadmetrics/metrics/tem/tpdm/__init__.py +12 -0
  70. tsadmetrics/metrics/tem/tstm/AffiliationbasedFScore.py +68 -0
  71. tsadmetrics/metrics/tem/tstm/Pate.py +62 -0
  72. tsadmetrics/metrics/tem/tstm/PateFScore.py +61 -0
  73. tsadmetrics/metrics/tem/tstm/TimeTolerantFScore.py +85 -0
  74. tsadmetrics/metrics/tem/tstm/VusPr.py +51 -0
  75. tsadmetrics/metrics/tem/tstm/VusRoc.py +55 -0
  76. tsadmetrics/metrics/tem/tstm/__init__.py +15 -0
  77. tsadmetrics/{_tsadeval/affiliation/_integral_interval.py → utils/functions_affiliation.py} +377 -9
  78. tsadmetrics/utils/functions_auc.py +393 -0
  79. tsadmetrics/utils/functions_conversion.py +63 -0
  80. tsadmetrics/utils/functions_counting_metrics.py +26 -0
  81. tsadmetrics/{_tsadeval/latency_sparsity_aware.py → utils/functions_latency_sparsity_aware.py} +1 -1
  82. tsadmetrics/{_tsadeval/nabscore.py → utils/functions_nabscore.py} +15 -1
  83. tsadmetrics-1.0.1.dist-info/METADATA +83 -0
  84. tsadmetrics-1.0.1.dist-info/RECORD +91 -0
  85. tsadmetrics-1.0.1.dist-info/top_level.txt +3 -0
  86. entorno/bin/activate_this.py +0 -32
  87. entorno/bin/rst2html.py +0 -23
  88. entorno/bin/rst2html4.py +0 -26
  89. entorno/bin/rst2html5.py +0 -33
  90. entorno/bin/rst2latex.py +0 -26
  91. entorno/bin/rst2man.py +0 -27
  92. entorno/bin/rst2odt.py +0 -28
  93. entorno/bin/rst2odt_prepstyles.py +0 -20
  94. entorno/bin/rst2pseudoxml.py +0 -23
  95. entorno/bin/rst2s5.py +0 -24
  96. entorno/bin/rst2xetex.py +0 -27
  97. entorno/bin/rst2xml.py +0 -23
  98. entorno/bin/rstpep2html.py +0 -25
  99. tests/test_binary.py +0 -946
  100. tests/test_non_binary.py +0 -450
  101. tests/test_utils.py +0 -49
  102. tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +0 -86
  103. tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +0 -68
  104. tsadmetrics/_tsadeval/affiliation/generics.py +0 -135
  105. tsadmetrics/_tsadeval/affiliation/metrics.py +0 -114
  106. tsadmetrics/_tsadeval/auc_roc_pr_plot.py +0 -295
  107. tsadmetrics/_tsadeval/discontinuity_graph.py +0 -109
  108. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +0 -175
  109. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +0 -50
  110. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +0 -184
  111. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/__init__.py +0 -0
  112. tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
  113. tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +0 -386
  114. tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +0 -362
  115. tsadmetrics/_tsadeval/metrics.py +0 -698
  116. tsadmetrics/_tsadeval/prts/__init__.py +0 -0
  117. tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
  118. tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +0 -165
  119. tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +0 -121
  120. tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
  121. tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +0 -61
  122. tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +0 -86
  123. tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +0 -21
  124. tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +0 -85
  125. tsadmetrics/_tsadeval/tests.py +0 -376
  126. tsadmetrics/_tsadeval/threshold_plt.py +0 -30
  127. tsadmetrics/_tsadeval/time_tolerant.py +0 -33
  128. tsadmetrics/binary_metrics.py +0 -1652
  129. tsadmetrics/metric_utils.py +0 -98
  130. tsadmetrics/non_binary_metrics.py +0 -372
  131. tsadmetrics/scripts/__init__.py +0 -0
  132. tsadmetrics/scripts/compute_metrics.py +0 -42
  133. tsadmetrics/utils.py +0 -124
  134. tsadmetrics/validation.py +0 -35
  135. tsadmetrics-0.1.17.dist-info/METADATA +0 -54
  136. tsadmetrics-0.1.17.dist-info/RECORD +0 -66
  137. tsadmetrics-0.1.17.dist-info/entry_points.txt +0 -2
  138. tsadmetrics-0.1.17.dist-info/top_level.txt +0 -6
  139. {tests → tsadmetrics/base}/__init__.py +0 -0
  140. /tsadmetrics/{_tsadeval → evaluation}/__init__.py +0 -0
  141. /tsadmetrics/{_tsadeval/affiliation → metrics/tem}/__init__.py +0 -0
  142. /tsadmetrics/{_tsadeval/vus_utils.py → utils/functions_vus.py} +0 -0
  143. {tsadmetrics-0.1.17.dist-info → tsadmetrics-1.0.1.dist-info}/WHEEL +0 -0
@@ -6,7 +6,7 @@ import sys
6
6
  sys.path.insert(0, os.path.abspath('../'))
7
7
 
8
8
 
9
- project = 'TSADmetrics'
9
+ project = 'TSADmetrics API Reference'
10
10
  copyright = '2025, Pedro Rafael Velasco Priego'
11
11
  author = 'Pedro Rafael Velasco Priego'
12
12
  release = 'MIT'
@@ -15,7 +15,7 @@ release = 'MIT'
15
15
  # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
16
16
 
17
17
 
18
- extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc',]
18
+ extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc','sphinx.ext.mathjax']
19
19
 
20
20
 
21
21
 
@@ -63,28 +63,5 @@ latex_elements = {
63
63
  'maxlistdepth': '10', # Aumenta el límite de anidamiento
64
64
  'papersize': 'a4paper',
65
65
  'pointsize': '10pt',
66
- 'maketitle': r'''
67
- \makeatletter
68
- \begin{titlepage}
69
- \noindent\rule{\textwidth}{1pt}\\[3cm]
70
- \begin{center}
71
- {\Huge\sffamily\bfseries TSADmetrics API Reference}\\[1.5cm]
72
- {\Large\sffamily Time Series Anomaly Detection Metrics}\\[3cm]
73
- \begin{minipage}{0.8\textwidth}
74
- \centering
75
- {\large\sffamily
76
- \begin{tabular}{l@{\hspace{1cm}}l}
77
- \textbf{Autor:} & Pedro Rafael Velasco Priego \\
78
- \textbf{Directores:} & Dra. Amelia Zafra Gómez \\
79
- & Dr. Sebastián Ventura Soto \\
80
- \end{tabular}
81
- }
82
- \end{minipage}\\[5cm]
83
- {\large\sffamily \@date}\\
84
- {\large\sffamily \copyright\ 2025 Pedro Rafael Velasco Priego}
85
- \end{center}
86
- \noindent\rule{\textwidth}{1pt}
87
- \end{titlepage}
88
- \makeatother
89
- ''',
66
+
90
67
  }
@@ -15,7 +15,7 @@ release = 'MIT'
15
15
  # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
16
16
 
17
17
 
18
- extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc',]
18
+ extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc','sphinx.ext.mathjax']
19
19
 
20
20
 
21
21
 
@@ -63,28 +63,5 @@ latex_elements = {
63
63
  'maxlistdepth': '10', # Aumenta el límite de anidamiento
64
64
  'papersize': 'a4paper',
65
65
  'pointsize': '10pt',
66
- 'maketitle': r'''
67
- \makeatletter
68
- \begin{titlepage}
69
- \noindent\rule{\textwidth}{1pt}\\[3cm]
70
- \begin{center}
71
- {\Huge\sffamily\bfseries TSADmetrics User Manual}\\[1.5cm]
72
- {\Large\sffamily Time Series Anomaly Detection Metrics}\\[3cm]
73
- \begin{minipage}{0.8\textwidth}
74
- \centering
75
- {\large\sffamily
76
- \begin{tabular}{l@{\hspace{1cm}}l}
77
- \textbf{Autor:} & Pedro Rafael Velasco Priego \\
78
- \textbf{Directores:} & Dra. Amelia Zafra Gómez \\
79
- & Dr. Sebastián Ventura Soto \\
80
- \end{tabular}
81
- }
82
- \end{minipage}\\[5cm]
83
- {\large\sffamily \@date}\\
84
- {\large\sffamily \copyright\ 2025 Pedro Rafael Velasco Priego}
85
- \end{center}
86
- \noindent\rule{\textwidth}{1pt}
87
- \end{titlepage}
88
- \makeatother
89
- ''',
66
+
90
67
  }
@@ -0,0 +1,67 @@
1
+ # Configuration file for the Sphinx documentation builder.
2
+ #
3
+
4
+ import os
5
+ import sys
6
+ sys.path.insert(0, os.path.abspath('../'))
7
+
8
+
9
+ project = 'TSADmetrics User Manual'
10
+ copyright = '2025, Pedro Rafael Velasco Priego'
11
+ author = 'Pedro Rafael Velasco Priego'
12
+ release = 'MIT'
13
+
14
+ # -- General configuration ---------------------------------------------------
15
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
16
+
17
+
18
+ extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc','sphinx.ext.mathjax']
19
+
20
+
21
+
22
+ templates_path = ['_templates']
23
+ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
24
+
25
+ html_theme = 'furo'
26
+ html_static_path = ['_static']
27
+ html_theme_options = {
28
+ #"sidebar_hide_name": True,
29
+ "light_css_variables": {
30
+ "color-brand-primary": "#2e5c7d",
31
+ "color-brand-content": "#2e5c7d",
32
+ "codebgcolor": "red",
33
+ "codetextcolor": "red",
34
+ },
35
+ "dark_css_variables": {
36
+ "color-brand-primary": "#6998b4",
37
+ "color-brand-content": "#6998b4",
38
+ "codebgcolor": "green",
39
+ "codetextcolor": "green",
40
+ },
41
+ "navigation_with_keys": True
42
+
43
+ }
44
+ html_baseurl = ''
45
+
46
+ html_css_files = [
47
+ 'css/custom.css',
48
+ ]
49
+
50
+ epub_show_urls = 'footnote'
51
+
52
+ # -- Options for HTML output -------------------------------------------------
53
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
54
+
55
+
56
+
57
+
58
+ ### -- LaTeX options -------------------------------------------------
59
+
60
+ # comando para compilar: make latexpdf LATEXMKOPTS="-xelatex"
61
+
62
+ latex_elements = {
63
+ 'maxlistdepth': '10', # Aumenta el límite de anidamiento
64
+ 'papersize': 'a4paper',
65
+ 'pointsize': '10pt',
66
+
67
+ }
docs/conf.py CHANGED
@@ -15,7 +15,7 @@ release = 'MIT'
15
15
  # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
16
16
 
17
17
 
18
- extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc',]
18
+ extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc','sphinx.ext.mathjax']
19
19
 
20
20
 
21
21
 
@@ -0,0 +1,28 @@
1
+ from tsadmetrics.evaluation.Runner import Runner
2
+
3
+
4
+ y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
5
+ y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
7
+ y_pred1_cont = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
8
+ y_pred2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
9
+ y_pred2_cont = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
10
+
11
+ dataset_evaluations = [
12
+ ("dataset1", y_true1, (y_pred1, y_pred1_cont)),
13
+ ("dataset2", y_true2, (y_pred2, y_pred2_cont))
14
+
15
+ ]
16
+
17
+ metrics = [
18
+ ("adc",{}),
19
+ ("dair",{}),
20
+ ("pakf",{"k":0.2}),
21
+ ("pakf",{"k":0.4}),
22
+ ("pakf",{"k":0.5}),
23
+ ]
24
+
25
+ runner = Runner(dataset_evaluations, metrics)
26
+ results = runner.run(generate_report=True, report_file="./example_output/example_direct_data_report.csv")
27
+ print(results)
28
+
@@ -0,0 +1,25 @@
1
+ from tsadmetrics.evaluation.Runner import Runner
2
+
3
+
4
+ y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
5
+ y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
7
+ y_pred2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
8
+
9
+ dataset_evaluations = [
10
+ ("dataset1", y_true1, (y_pred1, y_pred1)),
11
+ ("dataset2", y_true2, (y_pred2, y_pred2))
12
+
13
+ ]
14
+
15
+ metrics = [
16
+ ("adc",{}),
17
+ ("dair",{}),
18
+ ("pakf",{"k":0.2}),
19
+ ("pakf",{"k":0.4})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run(generate_report=True, report_file="./example_output/example_direct_single_data_report.csv")
24
+ print(results)
25
+
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.evaluation.Runner import Runner
2
+
3
+
4
+ y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
5
+ y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+
7
+
8
+ dataset_evaluations = [
9
+ ("dataset1", "example_input/results1.csv"),
10
+ ("dataset2", "example_input/results2.csv")
11
+
12
+ ]
13
+
14
+ metrics = [
15
+ ("adc",{}),
16
+ ("dair",{}),
17
+ ("pakf",{"k":0.2}),
18
+ ("pakf",{"k":0.4})
19
+ ]
20
+
21
+ runner = Runner(dataset_evaluations, metrics)
22
+ results = runner.run(generate_report=True, report_file="./example_output/example_file_reference_report.csv")
23
+ print(results)
24
+
@@ -0,0 +1,13 @@
1
+ from tsadmetrics.evaluation.Runner import Runner
2
+ import numpy as np
3
+
4
+ y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
5
+ y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+
7
+
8
+ global_config_file = "example_input/example_evaluation_config.yaml"
9
+
10
+ runner = Runner(global_config_file)
11
+ results = runner.run(generate_report=True, report_file="./example_output/example_global_config_file_report.csv")
12
+ print(results)
13
+
@@ -0,0 +1,19 @@
1
+ from tsadmetrics.evaluation.Runner import Runner
2
+
3
+
4
+ y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
5
+ y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+
7
+
8
+ dataset_evaluations = [
9
+ ("dataset1", "example_input/results1.csv"),
10
+ ("dataset2", "example_input/results2.csv")
11
+
12
+ ]
13
+
14
+ metrics_file = "example_input/example_metrics_config.yaml"
15
+
16
+ runner = Runner(dataset_evaluations, metrics_file)
17
+ results = runner.run(generate_report=True, report_file="./example_output/example_metric_config_file_report.csv")
18
+ print(results)
19
+
@@ -0,0 +1,8 @@
1
+ from tsadmetrics.metrics.tem.mdpt import PointadjustedFScore
2
+
3
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
4
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
5
+ metric = PointadjustedFScore()
6
+
7
+ result = metric.compute(y_true, y_pred)
8
+ print(f"PointadjustedFScore: {result}")
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mect.AbsoluteDetectionDistance import AbsoluteDetectionDistance
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = AbsoluteDetectionDistance()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("AbsoluteDetectionDistance:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("add", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mtdt.AffiliationbasedFScore import AffiliationbasedFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = AffiliationbasedFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("AffiliationbasedFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("aff_f", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mdtp.AverageDetectionCount import AverageDetectionCount
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = AverageDetectionCount()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("AverageDetectionCount:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("adc", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mdpt.CompositeFScore import CompositeFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = CompositeFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("CompositeFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("cf", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mpr.DelayThresholdedPointadjustedFScore import DelayThresholdedPointadjustedFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = DelayThresholdedPointadjustedFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("DelayThresholdedPointadjustedFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("dtpaf", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mdtp.DetectionAccuracyInRange import DetectionAccuracyInRange
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = DetectionAccuracyInRange()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("DetectionAccuracyInRange:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("dair", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mect.EnhancedTimeseriesAwareFScore import EnhancedTimeseriesAwareFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = EnhancedTimeseriesAwareFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("EnhancedTimeseriesAwareFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("etaf", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mpr.LatencySparsityawareFScore import LatencySparsityawareFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = LatencySparsityawareFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("LatencySparsityawareFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("lsaf", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mpr.MeanTimeToDetect import MeanTimeToDetect
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = MeanTimeToDetect()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("MeanTimeToDetect:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("mttd", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mpr.NabScore import NabScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = NabScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("NabScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("nab_score", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mtdt.PateFScore import PateFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = PateFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("PateFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("pate_f1", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mtdt.Pate import Pate
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
7
+
8
+ # Direct usage
9
+ metric = Pate()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("Pate:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("pate", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mdtp.PointadjustedAtKFScore import PointadjustedAtKFScore
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
7
+
8
+ # Direct usage
9
+ metric = PointadjustedAtKFScore()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("PointadjustedAtKFScore:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("pakf", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mdpt.PointadjustedAucPr import PointadjustedAucPr
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
7
+
8
+ # Direct usage
9
+ metric = PointadjustedAucPr()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("PointadjustedAucPr:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("pa_auc_pr", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)
@@ -0,0 +1,24 @@
1
+ from tsadmetrics.metrics.tem.mdpt.PointadjustedAucRoc import PointadjustedAucRoc
2
+ from tsadmetrics.evaluation.Runner import Runner
3
+ import numpy as np
4
+
5
+ y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
6
+ y_pred = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
7
+
8
+ # Direct usage
9
+ metric = PointadjustedAucRoc()
10
+ result = metric.compute(y_true, y_pred)
11
+ print("PointadjustedAucRoc:", result)
12
+
13
+ # Usage with Runner
14
+ dataset_evaluations = [
15
+ ("dataset1", y_true, (y_pred, y_pred))
16
+ ]
17
+
18
+ metrics = [
19
+ ("pa_auc_roc", {})
20
+ ]
21
+
22
+ runner = Runner(dataset_evaluations, metrics)
23
+ results = runner.run()
24
+ print(results)