tsadmetrics 0.1.17__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {docs_api → docs/add_docs/api_doc}/conf.py +3 -26
- {docs_manual → docs/add_docs/full_doc}/conf.py +2 -25
- docs/add_docs/manual_doc/conf.py +67 -0
- docs/conf.py +1 -1
- examples/example_direct_data.py +28 -0
- examples/example_direct_single_data.py +25 -0
- examples/example_file_reference.py +24 -0
- examples/example_global_config_file.py +13 -0
- examples/example_metric_config_file.py +19 -0
- examples/example_simple_metric.py +8 -0
- examples/specific_examples/AbsoluteDetectionDistance_example.py +24 -0
- examples/specific_examples/AffiliationbasedFScore_example.py +24 -0
- examples/specific_examples/AverageDetectionCount_example.py +24 -0
- examples/specific_examples/CompositeFScore_example.py +24 -0
- examples/specific_examples/DelayThresholdedPointadjustedFScore_example.py +24 -0
- examples/specific_examples/DetectionAccuracyInRange_example.py +24 -0
- examples/specific_examples/EnhancedTimeseriesAwareFScore_example.py +24 -0
- examples/specific_examples/LatencySparsityawareFScore_example.py +24 -0
- examples/specific_examples/MeanTimeToDetect_example.py +24 -0
- examples/specific_examples/NabScore_example.py +24 -0
- examples/specific_examples/PateFScore_example.py +24 -0
- examples/specific_examples/Pate_example.py +24 -0
- examples/specific_examples/PointadjustedAtKFScore_example.py +24 -0
- examples/specific_examples/PointadjustedAucPr_example.py +24 -0
- examples/specific_examples/PointadjustedAucRoc_example.py +24 -0
- examples/specific_examples/PointadjustedFScore_example.py +24 -0
- examples/specific_examples/RangebasedFScore_example.py +24 -0
- examples/specific_examples/SegmentwiseFScore_example.py +24 -0
- examples/specific_examples/TemporalDistance_example.py +24 -0
- examples/specific_examples/TimeTolerantFScore_example.py +24 -0
- examples/specific_examples/TimeseriesAwareFScore_example.py +24 -0
- examples/specific_examples/TotalDetectedInRange_example.py +24 -0
- examples/specific_examples/VusPr_example.py +24 -0
- examples/specific_examples/VusRoc_example.py +24 -0
- examples/specific_examples/WeightedDetectionDifference_example.py +24 -0
- tsadmetrics/__init__.py +0 -21
- tsadmetrics/base/Metric.py +188 -0
- tsadmetrics/evaluation/Report.py +25 -0
- tsadmetrics/evaluation/Runner.py +253 -0
- tsadmetrics/metrics/Registry.py +141 -0
- tsadmetrics/metrics/__init__.py +2 -0
- tsadmetrics/metrics/spm/PointwiseAucPr.py +62 -0
- tsadmetrics/metrics/spm/PointwiseAucRoc.py +63 -0
- tsadmetrics/metrics/spm/PointwiseFScore.py +86 -0
- tsadmetrics/metrics/spm/PrecisionAtK.py +81 -0
- tsadmetrics/metrics/spm/__init__.py +9 -0
- tsadmetrics/metrics/tem/dpm/DelayThresholdedPointadjustedFScore.py +83 -0
- tsadmetrics/metrics/tem/dpm/LatencySparsityawareFScore.py +76 -0
- tsadmetrics/metrics/tem/dpm/MeanTimeToDetect.py +47 -0
- tsadmetrics/metrics/tem/dpm/NabScore.py +60 -0
- tsadmetrics/metrics/tem/dpm/__init__.py +11 -0
- tsadmetrics/metrics/tem/ptdm/AverageDetectionCount.py +53 -0
- tsadmetrics/metrics/tem/ptdm/DetectionAccuracyInRange.py +66 -0
- tsadmetrics/metrics/tem/ptdm/PointadjustedAtKFScore.py +80 -0
- tsadmetrics/metrics/tem/ptdm/TimeseriesAwareFScore.py +248 -0
- tsadmetrics/metrics/tem/ptdm/TotalDetectedInRange.py +65 -0
- tsadmetrics/metrics/tem/ptdm/WeightedDetectionDifference.py +97 -0
- tsadmetrics/metrics/tem/ptdm/__init__.py +12 -0
- tsadmetrics/metrics/tem/tmem/AbsoluteDetectionDistance.py +48 -0
- tsadmetrics/metrics/tem/tmem/EnhancedTimeseriesAwareFScore.py +252 -0
- tsadmetrics/metrics/tem/tmem/TemporalDistance.py +68 -0
- tsadmetrics/metrics/tem/tmem/__init__.py +9 -0
- tsadmetrics/metrics/tem/tpdm/CompositeFScore.py +104 -0
- tsadmetrics/metrics/tem/tpdm/PointadjustedAucPr.py +123 -0
- tsadmetrics/metrics/tem/tpdm/PointadjustedAucRoc.py +119 -0
- tsadmetrics/metrics/tem/tpdm/PointadjustedFScore.py +96 -0
- tsadmetrics/metrics/tem/tpdm/RangebasedFScore.py +236 -0
- tsadmetrics/metrics/tem/tpdm/SegmentwiseFScore.py +73 -0
- tsadmetrics/metrics/tem/tpdm/__init__.py +12 -0
- tsadmetrics/metrics/tem/tstm/AffiliationbasedFScore.py +68 -0
- tsadmetrics/metrics/tem/tstm/Pate.py +62 -0
- tsadmetrics/metrics/tem/tstm/PateFScore.py +61 -0
- tsadmetrics/metrics/tem/tstm/TimeTolerantFScore.py +85 -0
- tsadmetrics/metrics/tem/tstm/VusPr.py +51 -0
- tsadmetrics/metrics/tem/tstm/VusRoc.py +55 -0
- tsadmetrics/metrics/tem/tstm/__init__.py +15 -0
- tsadmetrics/{_tsadeval/affiliation/_integral_interval.py → utils/functions_affiliation.py} +377 -9
- tsadmetrics/utils/functions_auc.py +393 -0
- tsadmetrics/utils/functions_conversion.py +63 -0
- tsadmetrics/utils/functions_counting_metrics.py +26 -0
- tsadmetrics/{_tsadeval/latency_sparsity_aware.py → utils/functions_latency_sparsity_aware.py} +1 -1
- tsadmetrics/{_tsadeval/nabscore.py → utils/functions_nabscore.py} +15 -1
- tsadmetrics-1.0.1.dist-info/METADATA +83 -0
- tsadmetrics-1.0.1.dist-info/RECORD +91 -0
- tsadmetrics-1.0.1.dist-info/top_level.txt +3 -0
- entorno/bin/activate_this.py +0 -32
- entorno/bin/rst2html.py +0 -23
- entorno/bin/rst2html4.py +0 -26
- entorno/bin/rst2html5.py +0 -33
- entorno/bin/rst2latex.py +0 -26
- entorno/bin/rst2man.py +0 -27
- entorno/bin/rst2odt.py +0 -28
- entorno/bin/rst2odt_prepstyles.py +0 -20
- entorno/bin/rst2pseudoxml.py +0 -23
- entorno/bin/rst2s5.py +0 -24
- entorno/bin/rst2xetex.py +0 -27
- entorno/bin/rst2xml.py +0 -23
- entorno/bin/rstpep2html.py +0 -25
- tests/test_binary.py +0 -946
- tests/test_non_binary.py +0 -450
- tests/test_utils.py +0 -49
- tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +0 -86
- tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +0 -68
- tsadmetrics/_tsadeval/affiliation/generics.py +0 -135
- tsadmetrics/_tsadeval/affiliation/metrics.py +0 -114
- tsadmetrics/_tsadeval/auc_roc_pr_plot.py +0 -295
- tsadmetrics/_tsadeval/discontinuity_graph.py +0 -109
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +0 -175
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +0 -50
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +0 -184
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/__init__.py +0 -0
- tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
- tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +0 -386
- tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +0 -362
- tsadmetrics/_tsadeval/metrics.py +0 -698
- tsadmetrics/_tsadeval/prts/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +0 -165
- tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +0 -121
- tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +0 -61
- tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +0 -86
- tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +0 -21
- tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +0 -85
- tsadmetrics/_tsadeval/tests.py +0 -376
- tsadmetrics/_tsadeval/threshold_plt.py +0 -30
- tsadmetrics/_tsadeval/time_tolerant.py +0 -33
- tsadmetrics/binary_metrics.py +0 -1652
- tsadmetrics/metric_utils.py +0 -98
- tsadmetrics/non_binary_metrics.py +0 -372
- tsadmetrics/scripts/__init__.py +0 -0
- tsadmetrics/scripts/compute_metrics.py +0 -42
- tsadmetrics/utils.py +0 -124
- tsadmetrics/validation.py +0 -35
- tsadmetrics-0.1.17.dist-info/METADATA +0 -54
- tsadmetrics-0.1.17.dist-info/RECORD +0 -66
- tsadmetrics-0.1.17.dist-info/entry_points.txt +0 -2
- tsadmetrics-0.1.17.dist-info/top_level.txt +0 -6
- {tests → tsadmetrics/base}/__init__.py +0 -0
- /tsadmetrics/{_tsadeval → evaluation}/__init__.py +0 -0
- /tsadmetrics/{_tsadeval/affiliation → metrics/tem}/__init__.py +0 -0
- /tsadmetrics/{_tsadeval/vus_utils.py → utils/functions_vus.py} +0 -0
- {tsadmetrics-0.1.17.dist-info → tsadmetrics-1.0.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mdpt.PointadjustedFScore import PointadjustedFScore
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = PointadjustedFScore()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("PointadjustedFScore:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("paf", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mdpt.RangebasedFScore import RangebasedFScore
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = RangebasedFScore()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("RangebasedFScore:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("rbf", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mdpt.SegmentwiseFScore import SegmentwiseFScore
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = SegmentwiseFScore()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("SegmentwiseFScore:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("swf", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mect.TemporalDistance import TemporalDistance
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = TemporalDistance()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("TemporalDistance:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("td", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mtdt.TimeTolerantFScore import TimeTolerantFScore
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = TimeTolerantFScore()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("TimeTolerantFScore:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("ttf", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mdtp.TimeseriesAwareFScore import TimeseriesAwareFScore
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = TimeseriesAwareFScore()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("TimeseriesAwareFScore:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("taf", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mdtp.TotalDetectedInRange import TotalDetectedInRange
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = TotalDetectedInRange()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("TotalDetectedInRange:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("tdir", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mtdt.VusPr import VusPr
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = VusPr()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("VusPr:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("vus_pr", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mtdt.VusRoc import VusRoc
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = VusRoc()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("VusRoc:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("vus_roc", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from tsadmetrics.metrics.tem.mdtp.WeightedDetectionDifference import WeightedDetectionDifference
|
2
|
+
from tsadmetrics.evaluation.Runner import Runner
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
y_true = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
6
|
+
y_pred = [0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
|
7
|
+
|
8
|
+
# Direct usage
|
9
|
+
metric = WeightedDetectionDifference()
|
10
|
+
result = metric.compute(y_true, y_pred)
|
11
|
+
print("WeightedDetectionDifference:", result)
|
12
|
+
|
13
|
+
# Usage with Runner
|
14
|
+
dataset_evaluations = [
|
15
|
+
("dataset1", y_true, (y_pred, y_pred))
|
16
|
+
]
|
17
|
+
|
18
|
+
metrics = [
|
19
|
+
("wdd", {})
|
20
|
+
]
|
21
|
+
|
22
|
+
runner = Runner(dataset_evaluations, metrics)
|
23
|
+
results = runner.run()
|
24
|
+
print(results)
|
tsadmetrics/__init__.py
CHANGED
@@ -1,21 +0,0 @@
|
|
1
|
-
from .binary_metrics import *
|
2
|
-
from .non_binary_metrics import *
|
3
|
-
from .utils import *
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
__author__ = 'Pedro Rafael Velasco Priego i12veprp@uco.es'
|
9
|
-
__version__ = "0.1.3"
|
10
|
-
__all__ = ['point_wise_recall', 'point_wise_precision', 'point_wise_f_score','point_adjusted_recall',
|
11
|
-
'point_adjusted_precision', 'point_adjusted_f_score', 'segment_wise_recall', 'segment_wise_precision',
|
12
|
-
'segment_wise_f_score','delay_th_point_adjusted_recall', 'delay_th_point_adjusted_precision',
|
13
|
-
'delay_th_point_adjusted_f_score','point_adjusted_at_k_recall','point_adjusted_at_k_precision',
|
14
|
-
'point_adjusted_at_k_f_score','latency_sparsity_aw_recall', 'latency_sparsity_aw_precision',
|
15
|
-
'latency_sparsity_aw_f_score','composite_f_score','time_tolerant_recall','time_tolerant_precision',
|
16
|
-
'time_tolerant_f_score','range_based_recall','range_based_precision','range_based_f_score','ts_aware_recall',
|
17
|
-
'ts_aware_precision','ts_aware_f_score','enhanced_ts_aware_recall','enhanced_ts_aware_precision','enhanced_ts_aware_f_score',
|
18
|
-
'affiliation_based_recall','affiliation_based_precision','affiliation_based_f_score','nab_score','temporal_distance',
|
19
|
-
'average_detection_count','absolute_detection_distance','total_detected_in_range','detection_accuracy_in_range','weighted_detection_difference',
|
20
|
-
'binary_pate','real_pate','mean_time_to_detect',
|
21
|
-
'precision_at_k','auc_roc_pw','auc_pr_pw','auc_roc_pa','auc_pr_pa','vus_roc','vus_pr', 'compute_metrics', 'compute_metrics_from_file']
|
@@ -0,0 +1,188 @@
|
|
1
|
+
import yaml
|
2
|
+
import numpy as np
|
3
|
+
|
4
|
+
class Metric:
|
5
|
+
"""
|
6
|
+
Base class for time series anomaly detection metrics.
|
7
|
+
|
8
|
+
This class provides common functionality for metric configuration, including
|
9
|
+
parameter validation from a YAML configuration file and support for a parameter
|
10
|
+
schema defined in each subclass.
|
11
|
+
|
12
|
+
Parameters:
|
13
|
+
name (str, optional):
|
14
|
+
The name of the metric. If not provided, it defaults to the lowercase
|
15
|
+
name of the subclass.
|
16
|
+
config_file (str, optional):
|
17
|
+
Path to a YAML configuration file. Parameters defined in the file under
|
18
|
+
the metric's name will be loaded automatically.
|
19
|
+
\*\*params:
|
20
|
+
Additional parameters passed directly to the metric. These override
|
21
|
+
those loaded from the configuration file.
|
22
|
+
|
23
|
+
Attributes:
|
24
|
+
name (str):
|
25
|
+
Name of the metric instance.
|
26
|
+
params (dict):
|
27
|
+
Dictionary of parameters used by the metric.
|
28
|
+
binary_prediction (bool):
|
29
|
+
Whether the metric expects binary predictions (True) or continuous scores (False).
|
30
|
+
|
31
|
+
Raises:
|
32
|
+
ValueError:
|
33
|
+
If a required parameter is missing or if the configuration file is not found.
|
34
|
+
TypeError:
|
35
|
+
If a parameter does not match its expected type as defined in the schema.
|
36
|
+
"""
|
37
|
+
|
38
|
+
def __init__(self, name=None, config_file=None, **params):
|
39
|
+
self.name = name or self.__class__.__name__.lower()
|
40
|
+
|
41
|
+
# Ensure subclasses define binary_prediction
|
42
|
+
if not hasattr(self.__class__, "binary_prediction"):
|
43
|
+
raise ValueError(
|
44
|
+
f"Subclass {self.__class__.__name__} must define class attribute 'binary_prediction' (True/False)."
|
45
|
+
)
|
46
|
+
if not isinstance(self.__class__.binary_prediction, bool):
|
47
|
+
raise TypeError(
|
48
|
+
f"'binary_prediction' in {self.__class__.__name__} must be of type bool."
|
49
|
+
)
|
50
|
+
|
51
|
+
self.binary_prediction = self.__class__.binary_prediction
|
52
|
+
self.params = {}
|
53
|
+
self.configure(config_file=config_file, **params)
|
54
|
+
|
55
|
+
|
56
|
+
def configure(self, config_file=None, **params):
|
57
|
+
"""
|
58
|
+
Load and validate metric parameters from a YAML configuration file
|
59
|
+
and/or from explicit keyword arguments.
|
60
|
+
|
61
|
+
Parameters:
|
62
|
+
config_file (str, optional):
|
63
|
+
Path to the configuration file. If provided, it will load parameters
|
64
|
+
under the section with the metric's name.
|
65
|
+
\*\*params:
|
66
|
+
Parameters passed directly to the metric instance.
|
67
|
+
|
68
|
+
Raises:
|
69
|
+
ValueError:
|
70
|
+
If a required parameter is not specified or the configuration file is missing.
|
71
|
+
TypeError:
|
72
|
+
If a parameter value does not match the expected type.
|
73
|
+
"""
|
74
|
+
if config_file:
|
75
|
+
try:
|
76
|
+
with open(config_file, 'r') as f:
|
77
|
+
config = yaml.safe_load(f)
|
78
|
+
file_params = config.get(self.name.lower(), {})
|
79
|
+
self.params.update(file_params)
|
80
|
+
except FileNotFoundError:
|
81
|
+
raise ValueError(f"Configuration file '{config_file}' not found.")
|
82
|
+
|
83
|
+
self.params.update(params)
|
84
|
+
|
85
|
+
schema = getattr(self.__class__, 'param_schema', {})
|
86
|
+
for key, rules in schema.items():
|
87
|
+
if key not in self.params:
|
88
|
+
if 'default' in rules:
|
89
|
+
self.params[key] = rules['default']
|
90
|
+
else:
|
91
|
+
raise ValueError(f"Required parameter '{key}' not specified.")
|
92
|
+
|
93
|
+
if 'type' in rules and key in self.params:
|
94
|
+
expected_type = rules['type']
|
95
|
+
if expected_type is float:
|
96
|
+
if not isinstance(self.params[key], (float, int)):
|
97
|
+
raise TypeError(f"Parameter '{key}' must be of type float, got {type(self.params[key]).__name__} instead.")
|
98
|
+
else:
|
99
|
+
if not isinstance(self.params[key], expected_type):
|
100
|
+
raise TypeError(f"Parameter '{key}' must be of type {expected_type.__name__}, got {type(self.params[key]).__name__} instead.")
|
101
|
+
|
102
|
+
def _validate_inputs(self, y_true, y_pred):
|
103
|
+
"""
|
104
|
+
Validate that y_true and y_pred are valid sequences of the same length.
|
105
|
+
|
106
|
+
If binary_prediction = True:
|
107
|
+
Both y_true and y_pred must be binary (0 or 1).
|
108
|
+
If binary_prediction = False:
|
109
|
+
y_true must be binary (0 or 1), y_pred can be continuous values.
|
110
|
+
|
111
|
+
Raises:
|
112
|
+
ValueError: If lengths differ or values are not valid.
|
113
|
+
TypeError: If inputs are not array-like.
|
114
|
+
"""
|
115
|
+
y_true = np.asarray(y_true)
|
116
|
+
y_pred = np.asarray(y_pred)
|
117
|
+
|
118
|
+
if y_true.shape != y_pred.shape:
|
119
|
+
raise ValueError(
|
120
|
+
f"Shape mismatch: y_true has shape {y_true.shape}, y_pred has shape {y_pred.shape}."
|
121
|
+
)
|
122
|
+
|
123
|
+
if y_true.ndim != 1 or y_pred.ndim != 1:
|
124
|
+
raise ValueError("y_true and y_pred must be 1D arrays.")
|
125
|
+
|
126
|
+
if not np.isin(y_true, [0, 1]).all():
|
127
|
+
raise ValueError("y_true must contain only 0 or 1.")
|
128
|
+
|
129
|
+
if self.binary_prediction:
|
130
|
+
if not np.isin(y_pred, [0, 1]).all():
|
131
|
+
raise ValueError("y_pred must contain only 0 or 1 (binary_prediction=True).")
|
132
|
+
|
133
|
+
return y_true, y_pred
|
134
|
+
|
135
|
+
def _compute(self, y_true, y_pred):
|
136
|
+
"""
|
137
|
+
Compute the value of the metric (core implementation).
|
138
|
+
|
139
|
+
This method contains the actual logic of the metric and must be
|
140
|
+
implemented by subclasses. It is automatically called by
|
141
|
+
`compute()` after input validation.
|
142
|
+
|
143
|
+
Parameters:
|
144
|
+
y_true (array-like):
|
145
|
+
Ground truth binary labels.
|
146
|
+
y_pred (array-like):
|
147
|
+
Predicted binary labels.
|
148
|
+
|
149
|
+
Returns:
|
150
|
+
float: The value of the metric.
|
151
|
+
|
152
|
+
Raises:
|
153
|
+
NotImplementedError: If the method is not overridden by a subclass.
|
154
|
+
"""
|
155
|
+
raise NotImplementedError("Subclasses must implement _compute().")
|
156
|
+
|
157
|
+
|
158
|
+
|
159
|
+
def compute(self, y_true, y_pred):
|
160
|
+
"""
|
161
|
+
Compute the value of the metric (wrapper method).
|
162
|
+
|
163
|
+
This method performs input validation and then calls the internal
|
164
|
+
`_compute()` method, which contains the actual metric logic.
|
165
|
+
|
166
|
+
**Important:** Subclasses **should not override** this method.
|
167
|
+
Instead, implement `_compute()` to define the behavior of the metric.
|
168
|
+
|
169
|
+
Parameters
|
170
|
+
----------
|
171
|
+
y_true : array-like
|
172
|
+
Ground truth binary labels.
|
173
|
+
y_pred : array-like
|
174
|
+
Predicted binary labels.
|
175
|
+
|
176
|
+
Returns
|
177
|
+
-------
|
178
|
+
float
|
179
|
+
The value of the metric.
|
180
|
+
|
181
|
+
Raises
|
182
|
+
------
|
183
|
+
NotImplementedError
|
184
|
+
If `_compute()` is not implemented by the subclass.
|
185
|
+
"""
|
186
|
+
y_true, y_pred = self._validate_inputs(y_true, y_pred)
|
187
|
+
return self._compute(y_true, y_pred)
|
188
|
+
|
@@ -0,0 +1,25 @@
|
|
1
|
+
import pandas as pd
|
2
|
+
|
3
|
+
class Report:
|
4
|
+
def __init__(self):
|
5
|
+
pass
|
6
|
+
def generate_report(self, results, output_file):
|
7
|
+
"""
|
8
|
+
Generate a report from the evaluation results.
|
9
|
+
|
10
|
+
Parameters:
|
11
|
+
results (dict):
|
12
|
+
Dictionary containing evaluation results.
|
13
|
+
output_file (str):
|
14
|
+
Path to the output file where the report will be saved.
|
15
|
+
"""
|
16
|
+
|
17
|
+
if type(results) is dict:
|
18
|
+
df = pd.DataFrame.from_dict(results, orient='index')
|
19
|
+
|
20
|
+
df.index.name = 'dataset'
|
21
|
+
df.reset_index(inplace=True)
|
22
|
+
|
23
|
+
df.to_csv(output_file, index=False, sep=';')
|
24
|
+
else:
|
25
|
+
results.to_csv(output_file, sep=';')
|