tsadmetrics 0.1.14__tar.gz → 0.1.16__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {tsadmetrics-0.1.14/tsadmetrics.egg-info → tsadmetrics-0.1.16}/PKG-INFO +1 -1
- tsadmetrics-0.1.16/docs/conf.py +67 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/pyproject.toml +4 -1
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/setup.py +7 -1
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tests/test_binary.py +194 -7
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tests/test_non_binary.py +76 -27
- tsadmetrics-0.1.16/tests/test_utils.py +49 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/__init__.py +1 -1
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/binary_metrics.py +290 -90
- tsadmetrics-0.1.16/tsadmetrics/metric_utils.py +98 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/non_binary_metrics.py +31 -2
- tsadmetrics-0.1.16/tsadmetrics/scripts/__init__.py +0 -0
- tsadmetrics-0.1.16/tsadmetrics/scripts/compute_metrics.py +42 -0
- tsadmetrics-0.1.16/tsadmetrics/utils.py +122 -0
- tsadmetrics-0.1.16/tsadmetrics/validation.py +35 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16/tsadmetrics.egg-info}/PKG-INFO +1 -1
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics.egg-info/SOURCES.txt +6 -1
- tsadmetrics-0.1.16/tsadmetrics.egg-info/entry_points.txt +2 -0
- tsadmetrics-0.1.14/docs/conf.py +0 -43
- tsadmetrics-0.1.14/tsadmetrics/metric_utils.py +0 -333
- tsadmetrics-0.1.14/tsadmetrics/utils.py +0 -55
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/MANIFEST.in +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/README.md +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/entorno/bin/activate_this.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/entorno/bin/rst2html.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/entorno/bin/rst2html4.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/entorno/bin/rst2html5.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/entorno/bin/rst2latex.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/entorno/bin/rst2man.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/entorno/bin/rst2odt.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/entorno/bin/rst2odt_prepstyles.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/entorno/bin/rst2pseudoxml.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/entorno/bin/rst2s5.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/entorno/bin/rst2xetex.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/entorno/bin/rst2xml.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/entorno/bin/rstpep2html.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/setup.cfg +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tests/__init__.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/__init__.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/affiliation/__init__.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/affiliation/_integral_interval.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/affiliation/generics.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/affiliation/metrics.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/auc_roc_pr_plot.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/discontinuity_graph.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/__init__.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/latency_sparsity_aware.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/metrics.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/nabscore.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/prts/__init__.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/tests.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/threshold_plt.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/time_tolerant.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/_tsadeval/vus_utils.py +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics/py.typed +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics.egg-info/dependency_links.txt +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics.egg-info/requires.txt +0 -0
- {tsadmetrics-0.1.14 → tsadmetrics-0.1.16}/tsadmetrics.egg-info/top_level.txt +0 -0
@@ -0,0 +1,67 @@
|
|
1
|
+
# Configuration file for the Sphinx documentation builder.
|
2
|
+
#
|
3
|
+
|
4
|
+
import os
|
5
|
+
import sys
|
6
|
+
sys.path.insert(0, os.path.abspath('../'))
|
7
|
+
|
8
|
+
|
9
|
+
project = 'TSADmetrics'
|
10
|
+
copyright = '2025, Pedro Rafael Velasco Priego'
|
11
|
+
author = 'Pedro Rafael Velasco Priego'
|
12
|
+
release = 'MIT'
|
13
|
+
|
14
|
+
# -- General configuration ---------------------------------------------------
|
15
|
+
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
16
|
+
|
17
|
+
|
18
|
+
extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc',]
|
19
|
+
|
20
|
+
|
21
|
+
|
22
|
+
templates_path = ['_templates']
|
23
|
+
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
24
|
+
|
25
|
+
html_theme = 'furo'
|
26
|
+
html_static_path = ['_static']
|
27
|
+
html_theme_options = {
|
28
|
+
#"sidebar_hide_name": True,
|
29
|
+
"light_css_variables": {
|
30
|
+
"color-brand-primary": "#2e5c7d",
|
31
|
+
"color-brand-content": "#2e5c7d",
|
32
|
+
"codebgcolor": "red",
|
33
|
+
"codetextcolor": "red",
|
34
|
+
},
|
35
|
+
"dark_css_variables": {
|
36
|
+
"color-brand-primary": "#6998b4",
|
37
|
+
"color-brand-content": "#6998b4",
|
38
|
+
"codebgcolor": "green",
|
39
|
+
"codetextcolor": "green",
|
40
|
+
},
|
41
|
+
"navigation_with_keys": True
|
42
|
+
|
43
|
+
}
|
44
|
+
html_baseurl = ''
|
45
|
+
|
46
|
+
html_css_files = [
|
47
|
+
'css/custom.css',
|
48
|
+
]
|
49
|
+
|
50
|
+
epub_show_urls = 'footnote'
|
51
|
+
|
52
|
+
# -- Options for HTML output -------------------------------------------------
|
53
|
+
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
54
|
+
|
55
|
+
|
56
|
+
|
57
|
+
|
58
|
+
### -- LaTeX options -------------------------------------------------
|
59
|
+
|
60
|
+
# comando para compilar: make latexpdf LATEXMKOPTS="-xelatex"
|
61
|
+
|
62
|
+
latex_elements = {
|
63
|
+
'maxlistdepth': '10', # Aumenta el límite de anidamiento
|
64
|
+
'papersize': 'a4paper',
|
65
|
+
'pointsize': '10pt',
|
66
|
+
|
67
|
+
}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "tsadmetrics"
|
3
|
-
version = "0.1.
|
3
|
+
version = "0.1.16"
|
4
4
|
description = "Librería para evaluación de detección de anomalías en series temporales"
|
5
5
|
authors = [
|
6
6
|
{ name = "Pedro Rafael Velasco Priego", email = "i12veprp@uco.es" }
|
@@ -28,3 +28,6 @@ where = ["."]
|
|
28
28
|
[build-system]
|
29
29
|
requires = ["setuptools>=61.0", "wheel"]
|
30
30
|
build-backend = "setuptools.build_meta"
|
31
|
+
|
32
|
+
[project.scripts]
|
33
|
+
tsadmetrics-compute = "tsadmetrics.scripts.compute_metrics:main"
|
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
|
|
5
5
|
|
6
6
|
setup(
|
7
7
|
name="tsadmetrics",
|
8
|
-
version="0.1.
|
8
|
+
version="0.1.16",
|
9
9
|
author="Pedro Rafael Velasco Priego",
|
10
10
|
author_email="i12veprp@uco.es",
|
11
11
|
description="A library for time series anomaly detection metrics and evaluation.",
|
@@ -46,4 +46,10 @@ setup(
|
|
46
46
|
"myst-parser",
|
47
47
|
],
|
48
48
|
},
|
49
|
+
entry_points={
|
50
|
+
"console_scripts": [
|
51
|
+
"tsadmetrics-compute = scripts.compute_metrics:main",
|
52
|
+
],
|
53
|
+
},
|
49
54
|
)
|
55
|
+
|
@@ -593,8 +593,40 @@ class TestAverageDetectionCount(unittest.TestCase):
|
|
593
593
|
"""
|
594
594
|
pass
|
595
595
|
|
596
|
+
def setUp(self):
|
597
|
+
"""
|
598
|
+
Configuración inicial para las pruebas.
|
599
|
+
"""
|
600
|
+
self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
|
601
|
+
self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
602
|
+
self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
|
603
|
+
|
604
|
+
self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
|
605
|
+
self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
|
606
|
+
self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
607
|
+
|
596
608
|
|
597
609
|
|
610
|
+
def test_average_detection_count(self):
|
611
|
+
"""
|
612
|
+
Prueba para la función average_detection_count.
|
613
|
+
"""
|
614
|
+
metric = round(average_detection_count(self.y_true1, self.y_pred1),2)
|
615
|
+
expected_metric = 0.5
|
616
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
617
|
+
|
618
|
+
metric = round(average_detection_count(self.y_true1, self.y_pred2),2)
|
619
|
+
expected_metric = 0.12
|
620
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
621
|
+
|
622
|
+
metric = round(average_detection_count(self.y_true2, self.y_pred21),2)
|
623
|
+
expected_metric = 0.33
|
624
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
625
|
+
|
626
|
+
metric = round(average_detection_count(self.y_true2, self.y_pred22),2)
|
627
|
+
expected_metric = 0.67
|
628
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
629
|
+
|
598
630
|
|
599
631
|
def test_average_detection_count_consistency(self):
|
600
632
|
try:
|
@@ -615,9 +647,33 @@ class TestAbsoluteDetectionDistance(unittest.TestCase):
|
|
615
647
|
"""
|
616
648
|
Configuración inicial para las pruebas.
|
617
649
|
"""
|
618
|
-
|
650
|
+
self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
|
651
|
+
self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
652
|
+
self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
|
619
653
|
|
620
|
-
|
654
|
+
self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
|
655
|
+
self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
|
656
|
+
self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
657
|
+
|
658
|
+
def test_absolute_detection_distance(self):
|
659
|
+
"""
|
660
|
+
Prueba para la función absolute_detection_distance.
|
661
|
+
"""
|
662
|
+
metric = round(absolute_detection_distance(self.y_true1, self.y_pred1),2)
|
663
|
+
expected_metric = 0.25
|
664
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
665
|
+
|
666
|
+
metric = round(absolute_detection_distance(self.y_true1, self.y_pred2),2)
|
667
|
+
expected_metric = 0.25
|
668
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
669
|
+
|
670
|
+
metric = round(absolute_detection_distance(self.y_true2, self.y_pred21),2)
|
671
|
+
expected_metric = 0.06
|
672
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
673
|
+
|
674
|
+
metric = round(absolute_detection_distance(self.y_true2, self.y_pred22),2)
|
675
|
+
expected_metric = 0.12
|
676
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
621
677
|
|
622
678
|
|
623
679
|
def test_absolute_detection_distance_consistency(self):
|
@@ -639,7 +695,33 @@ class TestTotalDetectedInRange(unittest.TestCase):
|
|
639
695
|
"""
|
640
696
|
Configuración inicial para las pruebas.
|
641
697
|
"""
|
642
|
-
|
698
|
+
self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
|
699
|
+
self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
700
|
+
self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
|
701
|
+
|
702
|
+
self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
|
703
|
+
self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
|
704
|
+
self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
705
|
+
|
706
|
+
def test_total_detected_in_range(self):
|
707
|
+
"""
|
708
|
+
Prueba para la función total_detected_in_range.
|
709
|
+
"""
|
710
|
+
metric = round(total_detected_in_range(self.y_true1, self.y_pred1,k=3),2)
|
711
|
+
expected_metric = 0.5
|
712
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
713
|
+
|
714
|
+
metric = round(total_detected_in_range(self.y_true1, self.y_pred2,k=3),2)
|
715
|
+
expected_metric = 0.5
|
716
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
717
|
+
|
718
|
+
metric = round(total_detected_in_range(self.y_true2, self.y_pred21,k=3),2)
|
719
|
+
expected_metric = 0.56
|
720
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
721
|
+
|
722
|
+
metric = round(total_detected_in_range(self.y_true2, self.y_pred22,k=3),2)
|
723
|
+
expected_metric = 0.44
|
724
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
643
725
|
|
644
726
|
|
645
727
|
|
@@ -663,7 +745,33 @@ class TestDetectionAccuracyInRange(unittest.TestCase):
|
|
663
745
|
"""
|
664
746
|
Configuración inicial para las pruebas.
|
665
747
|
"""
|
666
|
-
|
748
|
+
self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
|
749
|
+
self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
750
|
+
self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
|
751
|
+
|
752
|
+
self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
|
753
|
+
self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
|
754
|
+
self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
755
|
+
|
756
|
+
def test_detection_accuracy_in_range(self):
|
757
|
+
"""
|
758
|
+
Prueba para la función detection_accuracy_in_range.
|
759
|
+
"""
|
760
|
+
metric = round(detection_accuracy_in_range(self.y_true1, self.y_pred1,k=3),2)
|
761
|
+
expected_metric = 1.0
|
762
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
763
|
+
|
764
|
+
metric = round(detection_accuracy_in_range(self.y_true1, self.y_pred2,k=3),2)
|
765
|
+
expected_metric = 1.0
|
766
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
767
|
+
|
768
|
+
metric = round(detection_accuracy_in_range(self.y_true2, self.y_pred21,k=3),2)
|
769
|
+
expected_metric = 1.0
|
770
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
771
|
+
|
772
|
+
metric = round(detection_accuracy_in_range(self.y_true2, self.y_pred22,k=3),2)
|
773
|
+
expected_metric = 1.0
|
774
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
667
775
|
|
668
776
|
|
669
777
|
|
@@ -688,7 +796,33 @@ class TestWeightedDetectionDifference(unittest.TestCase):
|
|
688
796
|
"""
|
689
797
|
Configuración inicial para las pruebas.
|
690
798
|
"""
|
691
|
-
|
799
|
+
self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
|
800
|
+
self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
801
|
+
self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
|
802
|
+
|
803
|
+
self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
|
804
|
+
self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
|
805
|
+
self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
806
|
+
|
807
|
+
def test_weighted_detection_difference(self):
|
808
|
+
"""
|
809
|
+
Prueba para la función weighted_detection_difference.
|
810
|
+
"""
|
811
|
+
metric = round(weighted_detection_difference(self.y_true1, self.y_pred1,k=3),2)
|
812
|
+
expected_metric = 18.89
|
813
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
814
|
+
|
815
|
+
metric = round(weighted_detection_difference(self.y_true1, self.y_pred2,k=3),2)
|
816
|
+
expected_metric = 24.89
|
817
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
818
|
+
|
819
|
+
metric = round(weighted_detection_difference(self.y_true2, self.y_pred21,k=3),2)
|
820
|
+
expected_metric = 15.73
|
821
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
822
|
+
|
823
|
+
metric = round(weighted_detection_difference(self.y_true2, self.y_pred22,k=3),2)
|
824
|
+
expected_metric = 16.73
|
825
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
692
826
|
|
693
827
|
|
694
828
|
|
@@ -712,7 +846,33 @@ class TestPATE(unittest.TestCase):
|
|
712
846
|
"""
|
713
847
|
Configuración inicial para las pruebas.
|
714
848
|
"""
|
715
|
-
|
849
|
+
self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
|
850
|
+
self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
851
|
+
self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
|
852
|
+
|
853
|
+
self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
|
854
|
+
self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
|
855
|
+
self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
856
|
+
|
857
|
+
def test_binary_pate(self):
|
858
|
+
"""
|
859
|
+
Prueba para la función binary_pate.
|
860
|
+
"""
|
861
|
+
metric = round(binary_pate(self.y_true1, self.y_pred1,early=2, delay=2),2)
|
862
|
+
expected_metric = 0.67
|
863
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
864
|
+
|
865
|
+
metric = round(binary_pate(self.y_true1, self.y_pred2,early=2, delay=2),2)
|
866
|
+
expected_metric = 0.27
|
867
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
868
|
+
|
869
|
+
metric = round(binary_pate(self.y_true2, self.y_pred21,early=2, delay=2),2)
|
870
|
+
expected_metric = 0.71
|
871
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
872
|
+
|
873
|
+
metric = round(binary_pate(self.y_true2, self.y_pred22,early=2, delay=2),2)
|
874
|
+
expected_metric = 0.62
|
875
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
716
876
|
|
717
877
|
|
718
878
|
|
@@ -737,7 +897,34 @@ class TestMeanTimeToDetect(unittest.TestCase):
|
|
737
897
|
"""
|
738
898
|
Configuración inicial para las pruebas.
|
739
899
|
"""
|
740
|
-
|
900
|
+
self.y_true1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
|
901
|
+
self.y_pred1 = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
902
|
+
self.y_pred2 = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
|
903
|
+
|
904
|
+
self.y_true2 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1])
|
905
|
+
self.y_pred21 = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1])
|
906
|
+
self.y_pred22 = np.array([0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
907
|
+
|
908
|
+
def test_mean_time_to_detect(self):
|
909
|
+
"""
|
910
|
+
Prueba para la función mean_time_to_detect.
|
911
|
+
"""
|
912
|
+
metric = round(mean_time_to_detect(self.y_true1, self.y_pred1),2)
|
913
|
+
expected_metric = 0.0
|
914
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
915
|
+
|
916
|
+
metric = round(mean_time_to_detect(self.y_true1, self.y_pred2),2)
|
917
|
+
expected_metric = 0.0
|
918
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
919
|
+
|
920
|
+
metric = round(mean_time_to_detect(self.y_true2, self.y_pred21),2)
|
921
|
+
expected_metric = 8.0
|
922
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
923
|
+
|
924
|
+
metric = round(mean_time_to_detect(self.y_true2, self.y_pred22),2)
|
925
|
+
expected_metric = 0.0
|
926
|
+
self.assertAlmostEqual(metric, expected_metric, places=4)
|
927
|
+
|
741
928
|
|
742
929
|
|
743
930
|
|
@@ -165,28 +165,28 @@ class TestAUCPRPA(unittest.TestCase):
|
|
165
165
|
|
166
166
|
self.y_pred3 = np.array([4, 4, 4, 4])
|
167
167
|
|
168
|
-
self.y_true2 = [0,1,1,0,0,0,0,0,1,1,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,1,0,0,1,1,0
|
169
|
-
,1,1,1,0,0,1,0,0,1,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,1,1,1,1,1,0,1,1
|
170
|
-
,1,1,1,1,0,0,1,1,1,1,0,1,0,0,1,1,1,0,0,1,0,0,1,0,1,1]
|
168
|
+
self.y_true2 = np.array([0,1,1,0,0,0,0,0,1,1,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,1,0,0,1,1,0
|
169
|
+
,1,1,1,0,0,1,0,0,1,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,1,1,1,1,1,0,1,1
|
170
|
+
,1,1,1,1,0,0,1,1,1,1,0,1,0,0,1,1,1,0,0,1,0,0,1,0,1,1])
|
171
171
|
|
172
172
|
|
173
173
|
self.y_pred4 = [0.1280475, 0.12059283 ,0.29936968 ,0.85866402 ,0.74071874 ,0.22310849
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
174
|
+
,0.11281839 ,0.26133246 ,0.33696106 ,0.01442675 ,0.51962876 ,0.07828833
|
175
|
+
,0.45337844 ,0.09444483 ,0.91216588 ,0.18847595 ,0.26828481 ,0.65248919
|
176
|
+
,0.46291981 ,0.43730757 ,0.78087553 ,0.45031043 ,0.88661033 ,0.56209352
|
177
|
+
,0.45029423 ,0.17638205 ,0.9261279 ,0.58830652 ,0.01602648 ,0.73903379
|
178
|
+
,0.61831379 ,0.74779903 ,0.42682106 ,0.82583519 ,0.19709012 ,0.44925962
|
179
|
+
,0.62752415 ,0.52458327 ,0.46291768 ,0.33937527 ,0.34868777 ,0.12293847
|
180
|
+
,0.84477504 ,0.10225254 ,0.37048167 ,0.04476031 ,0.36680499 ,0.11346155
|
181
|
+
,0.10583112 ,0.09493136 ,0.54878736 ,0.68514489 ,0.5940307 ,0.14526962
|
182
|
+
,0.69385728 ,0.38888727 ,0.61495304 ,0.06795402 ,0.02894603 ,0.08293609
|
183
|
+
,0.22865685 ,0.63531487 ,0.97966126 ,0.31418622 ,0.8943095 ,0.22974177
|
184
|
+
,0.94402929 ,0.13140625 ,0.80539267 ,0.40160344 ,0.38151339 ,0.65011626
|
185
|
+
,0.71657942 ,0.93297398 ,0.32043329 ,0.54667941 ,0.90645979 ,0.98730183
|
186
|
+
,0.82351336 ,0.10404812 ,0.6962921 ,0.72890752 ,0.49700666 ,0.47461103
|
187
|
+
,0.59696079 ,0.85876179 ,0.247344 ,0.38187879 ,0.23906861 ,0.5266315
|
188
|
+
,0.08171512 ,0.27903375 ,0.61112439 ,0.20784267 ,0.90652453 ,0.87575255
|
189
|
+
,0.26972245 ,0.78780138 ,0.37649185 ,0.08467683]
|
190
190
|
|
191
191
|
|
192
192
|
def test_auc_pr_pa(self):
|
@@ -205,10 +205,10 @@ class TestAUCPRPA(unittest.TestCase):
|
|
205
205
|
expected_score = 0.75
|
206
206
|
self.assertAlmostEqual(score, expected_score, places=4)
|
207
207
|
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
208
|
+
|
209
|
+
score = round(auc_pr_pa(self.y_true2, self.y_pred4),2)
|
210
|
+
expected_score = 0.78
|
211
|
+
self.assertAlmostEqual(score, expected_score, places=4)
|
212
212
|
|
213
213
|
|
214
214
|
def test_auc_pr_pa_consistency(self):
|
@@ -355,16 +355,65 @@ class TestNonBinaryPATE(unittest.TestCase):
|
|
355
355
|
"""
|
356
356
|
Configuración inicial para las pruebas.
|
357
357
|
"""
|
358
|
-
|
358
|
+
self.y_true1 = np.array([0,0,1,1])
|
359
359
|
|
360
|
-
|
360
|
+
|
361
|
+
self.y_pred1 = np.array([1, 3, 2, 4])
|
362
|
+
|
363
|
+
self.y_pred2 = np.array([1, 2, 3, 4])
|
364
|
+
|
365
|
+
self.y_pred3 = np.array([4, 4, 4, 4])
|
366
|
+
|
367
|
+
self.y_true2 = np.array([0,1,1,0,0,0,0,0,1,1,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,1,0,0,1,1,0
|
368
|
+
,1,1,1,0,0,1,0,0,1,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,1,1,1,1,1,0,1,1
|
369
|
+
,1,1,1,1,0,0,1,1,1,1,0,1,0,0,1,1,1,0,0,1,0,0,1,0,1,1])
|
370
|
+
|
371
|
+
|
372
|
+
self.y_pred4 = [0.1280475, 0.12059283 ,0.29936968 ,0.85866402 ,0.74071874 ,0.22310849
|
373
|
+
,0.11281839 ,0.26133246 ,0.33696106 ,0.01442675 ,0.51962876 ,0.07828833
|
374
|
+
,0.45337844 ,0.09444483 ,0.91216588 ,0.18847595 ,0.26828481 ,0.65248919
|
375
|
+
,0.46291981 ,0.43730757 ,0.78087553 ,0.45031043 ,0.88661033 ,0.56209352
|
376
|
+
,0.45029423 ,0.17638205 ,0.9261279 ,0.58830652 ,0.01602648 ,0.73903379
|
377
|
+
,0.61831379 ,0.74779903 ,0.42682106 ,0.82583519 ,0.19709012 ,0.44925962
|
378
|
+
,0.62752415 ,0.52458327 ,0.46291768 ,0.33937527 ,0.34868777 ,0.12293847
|
379
|
+
,0.84477504 ,0.10225254 ,0.37048167 ,0.04476031 ,0.36680499 ,0.11346155
|
380
|
+
,0.10583112 ,0.09493136 ,0.54878736 ,0.68514489 ,0.5940307 ,0.14526962
|
381
|
+
,0.69385728 ,0.38888727 ,0.61495304 ,0.06795402 ,0.02894603 ,0.08293609
|
382
|
+
,0.22865685 ,0.63531487 ,0.97966126 ,0.31418622 ,0.8943095 ,0.22974177
|
383
|
+
,0.94402929 ,0.13140625 ,0.80539267 ,0.40160344 ,0.38151339 ,0.65011626
|
384
|
+
,0.71657942 ,0.93297398 ,0.32043329 ,0.54667941 ,0.90645979 ,0.98730183
|
385
|
+
,0.82351336 ,0.10404812 ,0.6962921 ,0.72890752 ,0.49700666 ,0.47461103
|
386
|
+
,0.59696079 ,0.85876179 ,0.247344 ,0.38187879 ,0.23906861 ,0.5266315
|
387
|
+
,0.08171512 ,0.27903375 ,0.61112439 ,0.20784267 ,0.90652453 ,0.87575255
|
388
|
+
,0.26972245 ,0.78780138 ,0.37649185 ,0.08467683]
|
389
|
+
|
390
|
+
def test_real_pate(self):
|
391
|
+
"""
|
392
|
+
Prueba para la función real_pate.
|
393
|
+
"""
|
394
|
+
score = round(real_pate(self.y_true1, self.y_pred1,early=1, delay=1),2)
|
395
|
+
expected_score = 0.79
|
396
|
+
self.assertAlmostEqual(score, expected_score, places=4)
|
397
|
+
|
398
|
+
score = round(real_pate(self.y_true1, self.y_pred2,early=1, delay=1),2)
|
399
|
+
expected_score = 1.0
|
400
|
+
self.assertAlmostEqual(score, expected_score, places=4)
|
401
|
+
|
402
|
+
score = round(real_pate(self.y_true1, self.y_pred3,early=1, delay=1),2)
|
403
|
+
expected_score = 0.75
|
404
|
+
self.assertAlmostEqual(score, expected_score, places=4)
|
405
|
+
|
406
|
+
|
407
|
+
score = round(real_pate(self.y_true2, self.y_pred4,early=5, delay=5),2)
|
408
|
+
expected_score = 0.67
|
409
|
+
self.assertAlmostEqual(score, expected_score, places=4)
|
361
410
|
|
362
411
|
|
363
|
-
def
|
412
|
+
def test_real_pate_consistency(self):
|
364
413
|
try:
|
365
414
|
for _ in range(10):
|
366
415
|
y_true = np.random.choice([0, 1], size=(100,))
|
367
|
-
y_pred = np.random.
|
416
|
+
y_pred = np.random.random( size=(100,))
|
368
417
|
|
369
418
|
score = real_pate(y_true, y_pred, early=5, delay=5)
|
370
419
|
except Exception as e:
|
@@ -0,0 +1,49 @@
|
|
1
|
+
import unittest
|
2
|
+
from tsadmetrics import *
|
3
|
+
import os
|
4
|
+
import numpy as np
|
5
|
+
import random
|
6
|
+
|
7
|
+
class TestComputeMetrics(unittest.TestCase):
|
8
|
+
def setUp(self):
|
9
|
+
self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
|
10
|
+
self.y_pred_binary = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
11
|
+
self.y_pred_non_binary = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.12, 0.11, 0.21, 0.13, 0.4, 0.3, 0.2, 0.1, 0.32, 0.98, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
|
12
|
+
|
13
|
+
|
14
|
+
def test_compute_metrics_binary(self):
|
15
|
+
metrics = [
|
16
|
+
('point_wise_f_score', point_wise_f_score),
|
17
|
+
('segment_wise_f_score', segment_wise_f_score),
|
18
|
+
]
|
19
|
+
metrics_params = {}
|
20
|
+
|
21
|
+
results = compute_metrics(self.y_true, self.y_pred_binary, metrics, metrics_params)
|
22
|
+
|
23
|
+
self.assertTrue('point_wise_f_score' in results['metric_name'].values)
|
24
|
+
self.assertTrue('segment_wise_f_score' in results['metric_name'].values)
|
25
|
+
|
26
|
+
def test_compute_metrics_non_binary(self):
|
27
|
+
metrics = [
|
28
|
+
('vus_roc', vus_roc),
|
29
|
+
('vus_pr', vus_pr),
|
30
|
+
]
|
31
|
+
metrics_params = {
|
32
|
+
'vus_roc': {'window': 3},
|
33
|
+
'vus_pr': {'window': 3}}
|
34
|
+
|
35
|
+
results = compute_metrics(self.y_true, self.y_pred_non_binary, metrics, metrics_params, is_anomaly_score=True)
|
36
|
+
|
37
|
+
self.assertTrue('vus_roc' in results['metric_name'].values)
|
38
|
+
self.assertTrue('vus_pr' in results['metric_name'].values)
|
39
|
+
|
40
|
+
class TestComputeMetricsFromFile(unittest.TestCase):
|
41
|
+
def setUp(self):
|
42
|
+
self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
|
43
|
+
self.y_pred_binary = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
|
44
|
+
self.results_file = 'tests/test_data/results.csv'
|
45
|
+
self.conf_file = 'tests/test_data/config.json'
|
46
|
+
|
47
|
+
def test_compute_metrics_from_file(self):
|
48
|
+
results_df = compute_metrics_from_file(self.results_file, self.conf_file, output_dir='tests/test_data')
|
49
|
+
assert os.path.exists('tests/test_data/computed_metrics.csv'), f"Error: The file 'computed_metrics.csv' was not created."
|
@@ -18,4 +18,4 @@ __all__ = ['point_wise_recall', 'point_wise_precision', 'point_wise_f_score','po
|
|
18
18
|
'affiliation_based_recall','affiliation_based_precision','affiliation_based_f_score','nab_score','temporal_distance',
|
19
19
|
'average_detection_count','absolute_detection_distance','total_detected_in_range','detection_accuracy_in_range','weighted_detection_difference',
|
20
20
|
'binary_pate','real_pate','mean_time_to_detect',
|
21
|
-
'precision_at_k','auc_roc_pw','auc_pr_pw','auc_pr_pa','auc_pr_sw','vus_roc','vus_pr', 'compute_metrics',]
|
21
|
+
'precision_at_k','auc_roc_pw','auc_pr_pw','auc_pr_pa','auc_pr_sw','vus_roc','vus_pr', 'compute_metrics', 'compute_metrics_from_file']
|