tsadmetrics 0.1.16__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- docs/api_doc/conf.py +67 -0
- docs/{conf.py → full_doc/conf.py} +1 -1
- docs/manual_doc/conf.py +67 -0
- examples/example_direct_data.py +28 -0
- examples/example_direct_single_data.py +25 -0
- examples/example_file_reference.py +24 -0
- examples/example_global_config_file.py +13 -0
- examples/example_metric_config_file.py +19 -0
- examples/example_simple_metric.py +8 -0
- examples/specific_examples/AbsoluteDetectionDistance_example.py +24 -0
- examples/specific_examples/AffiliationbasedFScore_example.py +24 -0
- examples/specific_examples/AverageDetectionCount_example.py +24 -0
- examples/specific_examples/CompositeFScore_example.py +24 -0
- examples/specific_examples/DelayThresholdedPointadjustedFScore_example.py +24 -0
- examples/specific_examples/DetectionAccuracyInRange_example.py +24 -0
- examples/specific_examples/EnhancedTimeseriesAwareFScore_example.py +24 -0
- examples/specific_examples/LatencySparsityawareFScore_example.py +24 -0
- examples/specific_examples/MeanTimeToDetect_example.py +24 -0
- examples/specific_examples/NabScore_example.py +24 -0
- examples/specific_examples/PateFScore_example.py +24 -0
- examples/specific_examples/Pate_example.py +24 -0
- examples/specific_examples/PointadjustedAtKFScore_example.py +24 -0
- examples/specific_examples/PointadjustedAucPr_example.py +24 -0
- examples/specific_examples/PointadjustedAucRoc_example.py +24 -0
- examples/specific_examples/PointadjustedFScore_example.py +24 -0
- examples/specific_examples/RangebasedFScore_example.py +24 -0
- examples/specific_examples/SegmentwiseFScore_example.py +24 -0
- examples/specific_examples/TemporalDistance_example.py +24 -0
- examples/specific_examples/TimeTolerantFScore_example.py +24 -0
- examples/specific_examples/TimeseriesAwareFScore_example.py +24 -0
- examples/specific_examples/TotalDetectedInRange_example.py +24 -0
- examples/specific_examples/VusPr_example.py +24 -0
- examples/specific_examples/VusRoc_example.py +24 -0
- examples/specific_examples/WeightedDetectionDifference_example.py +24 -0
- tests/test_dpm.py +212 -0
- tests/test_ptdm.py +366 -0
- tests/test_registry.py +58 -0
- tests/test_runner.py +185 -0
- tests/test_spm.py +213 -0
- tests/test_tmem.py +198 -0
- tests/test_tpdm.py +369 -0
- tests/test_tstm.py +338 -0
- tsadmetrics/__init__.py +0 -21
- tsadmetrics/base/Metric.py +188 -0
- tsadmetrics/evaluation/Report.py +25 -0
- tsadmetrics/evaluation/Runner.py +253 -0
- tsadmetrics/metrics/Registry.py +141 -0
- tsadmetrics/metrics/__init__.py +2 -0
- tsadmetrics/metrics/spm/PointwiseAucPr.py +62 -0
- tsadmetrics/metrics/spm/PointwiseAucRoc.py +63 -0
- tsadmetrics/metrics/spm/PointwiseFScore.py +86 -0
- tsadmetrics/metrics/spm/PrecisionAtK.py +81 -0
- tsadmetrics/metrics/spm/__init__.py +9 -0
- tsadmetrics/metrics/tem/dpm/DelayThresholdedPointadjustedFScore.py +83 -0
- tsadmetrics/metrics/tem/dpm/LatencySparsityawareFScore.py +76 -0
- tsadmetrics/metrics/tem/dpm/MeanTimeToDetect.py +47 -0
- tsadmetrics/metrics/tem/dpm/NabScore.py +60 -0
- tsadmetrics/metrics/tem/dpm/__init__.py +11 -0
- tsadmetrics/metrics/tem/ptdm/AverageDetectionCount.py +53 -0
- tsadmetrics/metrics/tem/ptdm/DetectionAccuracyInRange.py +66 -0
- tsadmetrics/metrics/tem/ptdm/PointadjustedAtKFScore.py +80 -0
- tsadmetrics/metrics/tem/ptdm/TimeseriesAwareFScore.py +248 -0
- tsadmetrics/metrics/tem/ptdm/TotalDetectedInRange.py +65 -0
- tsadmetrics/metrics/tem/ptdm/WeightedDetectionDifference.py +97 -0
- tsadmetrics/metrics/tem/ptdm/__init__.py +12 -0
- tsadmetrics/metrics/tem/tmem/AbsoluteDetectionDistance.py +48 -0
- tsadmetrics/metrics/tem/tmem/EnhancedTimeseriesAwareFScore.py +252 -0
- tsadmetrics/metrics/tem/tmem/TemporalDistance.py +68 -0
- tsadmetrics/metrics/tem/tmem/__init__.py +9 -0
- tsadmetrics/metrics/tem/tpdm/CompositeFScore.py +104 -0
- tsadmetrics/metrics/tem/tpdm/PointadjustedAucPr.py +123 -0
- tsadmetrics/metrics/tem/tpdm/PointadjustedAucRoc.py +119 -0
- tsadmetrics/metrics/tem/tpdm/PointadjustedFScore.py +96 -0
- tsadmetrics/metrics/tem/tpdm/RangebasedFScore.py +236 -0
- tsadmetrics/metrics/tem/tpdm/SegmentwiseFScore.py +73 -0
- tsadmetrics/metrics/tem/tpdm/__init__.py +12 -0
- tsadmetrics/metrics/tem/tstm/AffiliationbasedFScore.py +68 -0
- tsadmetrics/metrics/tem/tstm/Pate.py +62 -0
- tsadmetrics/metrics/tem/tstm/PateFScore.py +61 -0
- tsadmetrics/metrics/tem/tstm/TimeTolerantFScore.py +85 -0
- tsadmetrics/metrics/tem/tstm/VusPr.py +51 -0
- tsadmetrics/metrics/tem/tstm/VusRoc.py +55 -0
- tsadmetrics/metrics/tem/tstm/__init__.py +15 -0
- tsadmetrics/{_tsadeval/affiliation/_integral_interval.py → utils/functions_affiliation.py} +377 -9
- tsadmetrics/utils/functions_auc.py +393 -0
- tsadmetrics/utils/functions_conversion.py +63 -0
- tsadmetrics/utils/functions_counting_metrics.py +26 -0
- tsadmetrics/{_tsadeval/latency_sparsity_aware.py → utils/functions_latency_sparsity_aware.py} +1 -1
- tsadmetrics/{_tsadeval/nabscore.py → utils/functions_nabscore.py} +15 -1
- tsadmetrics-1.0.0.dist-info/METADATA +69 -0
- tsadmetrics-1.0.0.dist-info/RECORD +99 -0
- {tsadmetrics-0.1.16.dist-info → tsadmetrics-1.0.0.dist-info}/top_level.txt +1 -1
- entorno/bin/activate_this.py +0 -32
- entorno/bin/rst2html.py +0 -23
- entorno/bin/rst2html4.py +0 -26
- entorno/bin/rst2html5.py +0 -33
- entorno/bin/rst2latex.py +0 -26
- entorno/bin/rst2man.py +0 -27
- entorno/bin/rst2odt.py +0 -28
- entorno/bin/rst2odt_prepstyles.py +0 -20
- entorno/bin/rst2pseudoxml.py +0 -23
- entorno/bin/rst2s5.py +0 -24
- entorno/bin/rst2xetex.py +0 -27
- entorno/bin/rst2xml.py +0 -23
- entorno/bin/rstpep2html.py +0 -25
- tests/test_binary.py +0 -946
- tests/test_non_binary.py +0 -420
- tests/test_utils.py +0 -49
- tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +0 -86
- tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +0 -68
- tsadmetrics/_tsadeval/affiliation/generics.py +0 -135
- tsadmetrics/_tsadeval/affiliation/metrics.py +0 -114
- tsadmetrics/_tsadeval/auc_roc_pr_plot.py +0 -295
- tsadmetrics/_tsadeval/discontinuity_graph.py +0 -109
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +0 -175
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +0 -50
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +0 -184
- tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
- tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +0 -386
- tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +0 -362
- tsadmetrics/_tsadeval/metrics.py +0 -698
- tsadmetrics/_tsadeval/prts/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +0 -165
- tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +0 -121
- tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +0 -61
- tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +0 -86
- tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +0 -21
- tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +0 -85
- tsadmetrics/_tsadeval/tests.py +0 -376
- tsadmetrics/_tsadeval/threshold_plt.py +0 -30
- tsadmetrics/_tsadeval/time_tolerant.py +0 -33
- tsadmetrics/binary_metrics.py +0 -1652
- tsadmetrics/metric_utils.py +0 -98
- tsadmetrics/non_binary_metrics.py +0 -398
- tsadmetrics/scripts/__init__.py +0 -0
- tsadmetrics/scripts/compute_metrics.py +0 -42
- tsadmetrics/utils.py +0 -122
- tsadmetrics/validation.py +0 -35
- tsadmetrics-0.1.16.dist-info/METADATA +0 -23
- tsadmetrics-0.1.16.dist-info/RECORD +0 -64
- tsadmetrics-0.1.16.dist-info/entry_points.txt +0 -2
- /tsadmetrics/{_tsadeval → base}/__init__.py +0 -0
- /tsadmetrics/{_tsadeval/affiliation → evaluation}/__init__.py +0 -0
- /tsadmetrics/{_tsadeval/eTaPR_pkg/DataManage → metrics/tem}/__init__.py +0 -0
- /tsadmetrics/{_tsadeval/vus_utils.py → utils/functions_vus.py} +0 -0
- {tsadmetrics-0.1.16.dist-info → tsadmetrics-1.0.0.dist-info}/WHEEL +0 -0
docs/api_doc/conf.py
ADDED
@@ -0,0 +1,67 @@
|
|
1
|
+
# Configuration file for the Sphinx documentation builder.
|
2
|
+
#
|
3
|
+
|
4
|
+
import os
|
5
|
+
import sys
|
6
|
+
sys.path.insert(0, os.path.abspath('../'))
|
7
|
+
|
8
|
+
|
9
|
+
project = 'TSADmetrics API Reference'
|
10
|
+
copyright = '2025, Pedro Rafael Velasco Priego'
|
11
|
+
author = 'Pedro Rafael Velasco Priego'
|
12
|
+
release = 'MIT'
|
13
|
+
|
14
|
+
# -- General configuration ---------------------------------------------------
|
15
|
+
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
16
|
+
|
17
|
+
|
18
|
+
extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc','sphinx.ext.mathjax']
|
19
|
+
|
20
|
+
|
21
|
+
|
22
|
+
templates_path = ['_templates']
|
23
|
+
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
24
|
+
|
25
|
+
html_theme = 'furo'
|
26
|
+
html_static_path = ['_static']
|
27
|
+
html_theme_options = {
|
28
|
+
#"sidebar_hide_name": True,
|
29
|
+
"light_css_variables": {
|
30
|
+
"color-brand-primary": "#2e5c7d",
|
31
|
+
"color-brand-content": "#2e5c7d",
|
32
|
+
"codebgcolor": "red",
|
33
|
+
"codetextcolor": "red",
|
34
|
+
},
|
35
|
+
"dark_css_variables": {
|
36
|
+
"color-brand-primary": "#6998b4",
|
37
|
+
"color-brand-content": "#6998b4",
|
38
|
+
"codebgcolor": "green",
|
39
|
+
"codetextcolor": "green",
|
40
|
+
},
|
41
|
+
"navigation_with_keys": True
|
42
|
+
|
43
|
+
}
|
44
|
+
html_baseurl = ''
|
45
|
+
|
46
|
+
html_css_files = [
|
47
|
+
'css/custom.css',
|
48
|
+
]
|
49
|
+
|
50
|
+
epub_show_urls = 'footnote'
|
51
|
+
|
52
|
+
# -- Options for HTML output -------------------------------------------------
|
53
|
+
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
54
|
+
|
55
|
+
|
56
|
+
|
57
|
+
|
58
|
+
### -- LaTeX options -------------------------------------------------
|
59
|
+
|
60
|
+
# comando para compilar: make latexpdf LATEXMKOPTS="-xelatex"
|
61
|
+
|
62
|
+
latex_elements = {
|
63
|
+
'maxlistdepth': '10', # Aumenta el límite de anidamiento
|
64
|
+
'papersize': 'a4paper',
|
65
|
+
'pointsize': '10pt',
|
66
|
+
|
67
|
+
}
|
@@ -15,7 +15,7 @@ release = 'MIT'
|
|
15
15
|
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
16
16
|
|
17
17
|
|
18
|
-
extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc',]
|
18
|
+
extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc','sphinx.ext.mathjax']
|
19
19
|
|
20
20
|
|
21
21
|
|
docs/manual_doc/conf.py
ADDED
@@ -0,0 +1,67 @@
|
|
1
|
+
# Configuration file for the Sphinx documentation builder.
|
2
|
+
#
|
3
|
+
|
4
|
+
import os
|
5
|
+
import sys
|
6
|
+
sys.path.insert(0, os.path.abspath('../'))
|
7
|
+
|
8
|
+
|
9
|
+
project = 'TSADmetrics User Manual'
|
10
|
+
copyright = '2025, Pedro Rafael Velasco Priego'
|
11
|
+
author = 'Pedro Rafael Velasco Priego'
|
12
|
+
release = 'MIT'
|
13
|
+
|
14
|
+
# -- General configuration ---------------------------------------------------
|
15
|
+
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
16
|
+
|
17
|
+
|
18
|
+
extensions = ['sphinx.ext.duration', 'sphinx.ext.doctest', 'sphinx.ext.autodoc','sphinx.ext.mathjax']
|
19
|
+
|
20
|
+
|
21
|
+
|
22
|
+
templates_path = ['_templates']
|
23
|
+
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
24
|
+
|
25
|
+
html_theme = 'furo'
|
26
|
+
html_static_path = ['_static']
|
27
|
+
html_theme_options = {
|
28
|
+
#"sidebar_hide_name": True,
|
29
|
+
"light_css_variables": {
|
30
|
+
"color-brand-primary": "#2e5c7d",
|
31
|
+
"color-brand-content": "#2e5c7d",
|
32
|
+
"codebgcolor": "red",
|
33
|
+
"codetextcolor": "red",
|
34
|
+
},
|
35
|
+
"dark_css_variables": {
|
36
|
+
"color-brand-primary": "#6998b4",
|
37
|
+
"color-brand-content": "#6998b4",
|
38
|
+
"codebgcolor": "green",
|
39
|
+
"codetextcolor": "green",
|
40
|
+
},
|
41
|
+
"navigation_with_keys": True
|
42
|
+
|
43
|
+
}
|
44
|
+
html_baseurl = ''
|
45
|
+
|
46
|
+
html_css_files = [
|
47
|
+
'css/custom.css',
|
48
|
+
]
|
49
|
+
|
50
|
+
epub_show_urls = 'footnote'
|
51
|
+
|
52
|
+
# -- Options for HTML output -------------------------------------------------
|
53
|
+
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
54
|
+
|
55
|
+
|
56
|
+
|
57
|
+
|
58
|
+
### -- LaTeX options -------------------------------------------------
|
59
|
+
|
60
|
+
# comando para compilar: make latexpdf LATEXMKOPTS="-xelatex"
|
61
|
+
|
62
|
+
latex_elements = {
|
63
|
+
'maxlistdepth': '10', # Aumenta el límite de anidamiento
|
64
|
+
'papersize': 'a4paper',
|
65
|
+
'pointsize': '10pt',
|
66
|
+
|
67
|
+
}
|
@@ -0,0 +1,28 @@
|
|
1
|
+
from tsadmetrics.evaluation.Runner import Runner
|
2
|
+
|
3
|
+
|
4
|
+
y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
5
|
+
y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
7
|
+
y_pred1_cont = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
8
|
+
y_pred2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
9
|
+
y_pred2_cont = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
10
|
+
|
11
|
+
dataset_evaluations = [
|
12
|
+
("dataset1", y_true1, (y_pred1, y_pred1_cont)),
|
13
|
+
("dataset2", y_true2, (y_pred2, y_pred2_cont))
|
14
|
+
|
15
|
+
]
|
16
|
+
|
17
|
+
metrics = [
|
18
|
+
("adc",{}),
|
19
|
+
("dair",{}),
|
20
|
+
("pakf",{"k":0.2}),
|
21
|
+
("pakf",{"k":0.4}),
|
22
|
+
("pakf",{"k":0.5}),
|
23
|
+
]
|
24
|
+
|
25
|
+
runner = Runner(dataset_evaluations, metrics)
|
26
|
+
results = runner.run(generate_report=True, report_file="./example_output/example_direct_data_report.csv")
|
27
|
+
print(results)
|
28
|
+
|
@@ -0,0 +1,25 @@
|
|
1
|
+
from tsadmetrics.evaluation.Runner import Runner
|
2
|
+
|
3
|
+
|
4
|
+
y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
5
|
+
y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
7
|
+
y_pred2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
8
|
+
|
9
|
+
dataset_evaluations = [
|
10
|
+
("dataset1", y_true1, (y_pred1, y_pred1)),
|
11
|
+
("dataset2", y_true2, (y_pred2, y_pred2))
|
12
|
+
|
13
|
+
]
|
14
|
+
|
15
|
+
metrics = [
|
16
|
+
("adc",{}),
|
17
|
+
("dair",{}),
|
18
|
+
("pakf",{"k":0.2}),
|
19
|
+
("pakf",{"k":0.4})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run(generate_report=True, report_file="./example_output/example_direct_single_data_report.csv")
|
24
|
+
print(results)
|
25
|
+
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.evaluation.Runner import Runner
|
2
|
+
|
3
|
+
|
4
|
+
y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
5
|
+
y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
|
7
|
+
|
8
|
+
dataset_evaluations = [
|
9
|
+
("dataset1", "example_input/results1.csv"),
|
10
|
+
("dataset2", "example_input/results2.csv")
|
11
|
+
|
12
|
+
]
|
13
|
+
|
14
|
+
metrics = [
|
15
|
+
("adc",{}),
|
16
|
+
("dair",{}),
|
17
|
+
("pakf",{"k":0.2}),
|
18
|
+
("pakf",{"k":0.4})
|
19
|
+
]
|
20
|
+
|
21
|
+
runner = Runner(dataset_evaluations, metrics)
|
22
|
+
results = runner.run(generate_report=True, report_file="./example_output/example_file_reference_report.csv")
|
23
|
+
print(results)
|
24
|
+
|
@@ -0,0 +1,13 @@
|
|
1
|
+
from tsadmetrics.evaluation.Runner import Runner
|
2
|
+
import numpy as np
|
3
|
+
|
4
|
+
y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
5
|
+
y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
|
7
|
+
|
8
|
+
global_config_file = "example_input/example_evaluation_config.yaml"
|
9
|
+
|
10
|
+
runner = Runner(global_config_file)
|
11
|
+
results = runner.run(generate_report=True, report_file="./example_output/example_global_config_file_report.csv")
|
12
|
+
print(results)
|
13
|
+
|
@@ -0,0 +1,19 @@
|
|
1
|
+
from tsadmetrics.evaluation.Runner import Runner
|
2
|
+
|
3
|
+
|
4
|
+
y_true1 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
5
|
+
y_true2 = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
|
7
|
+
|
8
|
+
dataset_evaluations = [
|
9
|
+
("dataset1", "example_input/results1.csv"),
|
10
|
+
("dataset2", "example_input/results2.csv")
|
11
|
+
|
12
|
+
]
|
13
|
+
|
14
|
+
metrics_file = "example_input/example_metrics_config.yaml"
|
15
|
+
|
16
|
+
runner = Runner(dataset_evaluations, metrics_file)
|
17
|
+
results = runner.run(generate_report=True, report_file="./example_output/example_metric_config_file_report.csv")
|
18
|
+
print(results)
|
19
|
+
|
@@ -0,0 +1,8 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mdpt import PointadjustedFScore
|
2
|
+
|
3
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
4
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
5
|
+
metric = PointadjustedFScore()
|
6
|
+
|
7
|
+
result = metric.compute(y_true, y_pred)
|
8
|
+
print(f"PointadjustedFScore: {result}")
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mect.AbsoluteDetectionDistance import AbsoluteDetectionDistance
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = AbsoluteDetectionDistance()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("AbsoluteDetectionDistance:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("add", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mtdt.AffiliationbasedFScore import AffiliationbasedFScore
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = AffiliationbasedFScore()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("AffiliationbasedFScore:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("aff_f", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mdtp.AverageDetectionCount import AverageDetectionCount
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = AverageDetectionCount()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("AverageDetectionCount:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("adc", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mdpt.CompositeFScore import CompositeFScore
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = CompositeFScore()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("CompositeFScore:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("cf", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mpr.DelayThresholdedPointadjustedFScore import DelayThresholdedPointadjustedFScore
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = DelayThresholdedPointadjustedFScore()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("DelayThresholdedPointadjustedFScore:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("dtpaf", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mdtp.DetectionAccuracyInRange import DetectionAccuracyInRange
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = DetectionAccuracyInRange()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("DetectionAccuracyInRange:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("dair", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mect.EnhancedTimeseriesAwareFScore import EnhancedTimeseriesAwareFScore
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = EnhancedTimeseriesAwareFScore()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("EnhancedTimeseriesAwareFScore:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("etaf", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mpr.LatencySparsityawareFScore import LatencySparsityawareFScore
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = LatencySparsityawareFScore()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("LatencySparsityawareFScore:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("lsaf", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mpr.MeanTimeToDetect import MeanTimeToDetect
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = MeanTimeToDetect()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("MeanTimeToDetect:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("mttd", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mpr.NabScore import NabScore
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = NabScore()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("NabScore:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("nab_score", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mtdt.PateFScore import PateFScore
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = PateFScore()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("PateFScore:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("pate_f1", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mtdt.Pate import Pate
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = Pate()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("Pate:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("pate", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mdtp.PointadjustedAtKFScore import PointadjustedAtKFScore
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = PointadjustedAtKFScore()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("PointadjustedAtKFScore:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("pakf", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mdpt.PointadjustedAucPr import PointadjustedAucPr
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = PointadjustedAucPr()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("PointadjustedAucPr:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("pa_auc_pr", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mdpt.PointadjustedAucRoc import PointadjustedAucRoc
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = PointadjustedAucRoc()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("PointadjustedAucRoc:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("pa_auc_roc", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mdpt.PointadjustedFScore import PointadjustedFScore
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = PointadjustedFScore()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("PointadjustedFScore:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("paf", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|