autogluon.timeseries 0.8.3b20231024__tar.gz → 0.8.3b20231025__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of autogluon.timeseries might be problematic. Click here for more details.
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/PKG-INFO +1 -1
- autogluon.timeseries-0.8.3b20231025/src/autogluon/timeseries/evaluator.py +58 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/learner.py +5 -5
- autogluon.timeseries-0.8.3b20231025/src/autogluon/timeseries/metrics/__init__.py +58 -0
- autogluon.timeseries-0.8.3b20231025/src/autogluon/timeseries/metrics/abstract.py +201 -0
- autogluon.timeseries-0.8.3b20231025/src/autogluon/timeseries/metrics/point.py +154 -0
- autogluon.timeseries-0.8.3b20231025/src/autogluon/timeseries/metrics/quantile.py +26 -0
- autogluon.timeseries-0.8.3b20231025/src/autogluon/timeseries/metrics/utils.py +18 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py +43 -41
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/abstract/model_trial.py +1 -1
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py +22 -47
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/ensemble/greedy_ensemble.py +27 -15
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py +1 -20
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/local/abstract_local_model.py +1 -1
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/multi_window/multi_window_model.py +4 -2
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/presets.py +2 -1
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/predictor.py +24 -15
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/trainer/abstract_trainer.py +14 -22
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/version.py +1 -1
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon.timeseries.egg-info/PKG-INFO +1 -1
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon.timeseries.egg-info/SOURCES.txt +5 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon.timeseries.egg-info/requires.txt +3 -3
- autogluon.timeseries-0.8.3b20231024/src/autogluon/timeseries/evaluator.py +0 -281
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/setup.cfg +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/setup.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/__init__.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/configs/__init__.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/configs/presets_configs.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/dataset/__init__.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/dataset/ts_dataframe.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/__init__.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/abstract/__init__.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/autogluon_tabular/__init__.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/autogluon_tabular/utils.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/ensemble/__init__.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/gluonts/__init__.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/gluonts/torch/__init__.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/gluonts/torch/models.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/local/__init__.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/local/naive.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/local/npts.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/local/statsforecast.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/models/multi_window/__init__.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/splitter.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/trainer/__init__.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/trainer/auto_trainer.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/utils/__init__.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/utils/datetime/__init__.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/utils/datetime/base.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/utils/datetime/lags.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/utils/datetime/seasonality.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/utils/datetime/time_features.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/utils/features.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/utils/forecast.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon/timeseries/utils/warning_filters.py +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon.timeseries.egg-info/dependency_links.txt +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon.timeseries.egg-info/namespace_packages.txt +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon.timeseries.egg-info/top_level.txt +0 -0
- {autogluon.timeseries-0.8.3b20231024 → autogluon.timeseries-0.8.3b20231025}/src/autogluon.timeseries.egg-info/zip-safe +0 -0
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from autogluon.common.utils.deprecated_utils import Deprecated
|
|
4
|
+
from autogluon.timeseries import TimeSeriesDataFrame
|
|
5
|
+
from autogluon.timeseries.metrics import AVAILABLE_METRICS, check_get_evaluation_metric
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@Deprecated(
|
|
9
|
+
min_version_to_warn="1.0",
|
|
10
|
+
min_version_to_error="1.1",
|
|
11
|
+
custom_warning_msg="Please use the metrics defined in autogluon.timeseries.metrics instead.",
|
|
12
|
+
)
|
|
13
|
+
class TimeSeriesEvaluator:
|
|
14
|
+
"""This class has been deprecated in AutoGluon v1.0 and is only provided for backward compatibility!"""
|
|
15
|
+
|
|
16
|
+
METRIC_COEFFICIENTS = {metric_name: metric_cls().sign for metric_name, metric_cls in AVAILABLE_METRICS.items()}
|
|
17
|
+
AVAILABLE_METRICS = list(AVAILABLE_METRICS.keys())
|
|
18
|
+
DEFAULT_METRIC = check_get_evaluation_metric(None).name
|
|
19
|
+
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
eval_metric: str,
|
|
23
|
+
prediction_length: int,
|
|
24
|
+
target_column: str = "target",
|
|
25
|
+
eval_metric_seasonal_period: Optional[int] = None,
|
|
26
|
+
):
|
|
27
|
+
self.eval_metric = check_get_evaluation_metric(eval_metric)
|
|
28
|
+
self.prediction_length = prediction_length
|
|
29
|
+
self.target_column = target_column
|
|
30
|
+
self.seasonal_period = eval_metric_seasonal_period
|
|
31
|
+
|
|
32
|
+
@property
|
|
33
|
+
def coefficient(self) -> int:
|
|
34
|
+
return self.eval_metric.sign
|
|
35
|
+
|
|
36
|
+
@property
|
|
37
|
+
def higher_is_better(self) -> bool:
|
|
38
|
+
return self.eval_metric.greater_is_better_internal
|
|
39
|
+
|
|
40
|
+
@staticmethod
|
|
41
|
+
def check_get_evaluation_metric(
|
|
42
|
+
metric_name: Optional[str] = None,
|
|
43
|
+
raise_if_not_available: bool = True,
|
|
44
|
+
):
|
|
45
|
+
return check_get_evaluation_metric(metric_name)
|
|
46
|
+
|
|
47
|
+
def __call__(self, data: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame) -> float:
|
|
48
|
+
quantile_levels = [float(col) for col in predictions.columns if col != "mean"]
|
|
49
|
+
score = self.eval_metric(
|
|
50
|
+
data=data,
|
|
51
|
+
predictions=predictions,
|
|
52
|
+
prediction_length=self.prediction_length,
|
|
53
|
+
target=self.target_column,
|
|
54
|
+
seasonal_period=self.seasonal_period,
|
|
55
|
+
quantile_levels=quantile_levels,
|
|
56
|
+
)
|
|
57
|
+
# Return raw metric in lower-is-better format to match the old Evaluator API
|
|
58
|
+
return score * self.eval_metric.sign
|
|
@@ -7,7 +7,7 @@ import pandas as pd
|
|
|
7
7
|
|
|
8
8
|
from autogluon.core.learner import AbstractLearner
|
|
9
9
|
from autogluon.timeseries.dataset.ts_dataframe import TimeSeriesDataFrame
|
|
10
|
-
from autogluon.timeseries.
|
|
10
|
+
from autogluon.timeseries.metrics import TimeSeriesScorer, check_get_evaluation_metric
|
|
11
11
|
from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
|
|
12
12
|
from autogluon.timeseries.splitter import AbstractWindowSplitter
|
|
13
13
|
from autogluon.timeseries.trainer import AbstractTimeSeriesTrainer, AutoTimeSeriesTrainer
|
|
@@ -28,14 +28,14 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
28
28
|
target: str = "target",
|
|
29
29
|
known_covariates_names: Optional[List[str]] = None,
|
|
30
30
|
trainer_type: Type[AbstractTimeSeriesTrainer] = AutoTimeSeriesTrainer,
|
|
31
|
-
eval_metric:
|
|
31
|
+
eval_metric: Union[str, TimeSeriesScorer, None] = None,
|
|
32
32
|
eval_metric_seasonal_period: Optional[int] = None,
|
|
33
33
|
prediction_length: int = 1,
|
|
34
34
|
cache_predictions: bool = True,
|
|
35
35
|
**kwargs,
|
|
36
36
|
):
|
|
37
37
|
super().__init__(path_context=path_context)
|
|
38
|
-
self.eval_metric:
|
|
38
|
+
self.eval_metric: TimeSeriesScorer = check_get_evaluation_metric(eval_metric)
|
|
39
39
|
self.eval_metric_seasonal_period = eval_metric_seasonal_period
|
|
40
40
|
self.trainer_type = trainer_type
|
|
41
41
|
self.target = target
|
|
@@ -89,7 +89,7 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
89
89
|
logger.info(f"AutoGluon will save models to {self.path}")
|
|
90
90
|
|
|
91
91
|
logger.info(f"AutoGluon will gauge predictive performance using evaluation metric: '{self.eval_metric}'")
|
|
92
|
-
if
|
|
92
|
+
if not self.eval_metric.greater_is_better_internal:
|
|
93
93
|
logger.info(
|
|
94
94
|
"\tThis metric's sign has been flipped to adhere to being 'higher is better'. "
|
|
95
95
|
"The reported score can be multiplied by -1 to get the metric value.",
|
|
@@ -185,7 +185,7 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
185
185
|
self,
|
|
186
186
|
data: TimeSeriesDataFrame,
|
|
187
187
|
model: AbstractTimeSeriesModel = None,
|
|
188
|
-
metric:
|
|
188
|
+
metric: Union[str, TimeSeriesScorer, None] = None,
|
|
189
189
|
use_cache: bool = True,
|
|
190
190
|
) -> float:
|
|
191
191
|
data = self.feature_generator.transform(data)
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Type, Union
|
|
3
|
+
|
|
4
|
+
from .abstract import TimeSeriesScorer
|
|
5
|
+
from .point import MAE, MAPE, MASE, MSE, RMSE, RMSSE, WAPE, sMAPE
|
|
6
|
+
from .quantile import WQL
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"MAE",
|
|
10
|
+
"MAPE",
|
|
11
|
+
"MASE",
|
|
12
|
+
"sMAPE",
|
|
13
|
+
"MSE",
|
|
14
|
+
"RMSE",
|
|
15
|
+
"RMSSE",
|
|
16
|
+
"WAPE",
|
|
17
|
+
"WQL",
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
DEFAULT_METRIC_NAME = "WQL"
|
|
21
|
+
|
|
22
|
+
AVAILABLE_METRICS = {
|
|
23
|
+
"MASE": MASE,
|
|
24
|
+
"MAPE": MAPE,
|
|
25
|
+
"SMAPE": sMAPE,
|
|
26
|
+
"RMSE": RMSE,
|
|
27
|
+
"RMSSE": RMSSE,
|
|
28
|
+
"WAPE": WAPE,
|
|
29
|
+
"WQL": WQL,
|
|
30
|
+
# Exist for compatibility
|
|
31
|
+
"MSE": MSE,
|
|
32
|
+
"MAE": MAE,
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def check_get_evaluation_metric(
|
|
37
|
+
eval_metric: Union[str, TimeSeriesScorer, Type[TimeSeriesScorer], None] = None
|
|
38
|
+
) -> TimeSeriesScorer:
|
|
39
|
+
if isinstance(eval_metric, TimeSeriesScorer):
|
|
40
|
+
eval_metric = eval_metric
|
|
41
|
+
elif isinstance(eval_metric, type) and issubclass(eval_metric, TimeSeriesScorer):
|
|
42
|
+
# e.g., user passed `eval_metric=CustomMetric` instead of `eval_metric=CustomMetric()`
|
|
43
|
+
eval_metric = eval_metric()
|
|
44
|
+
elif isinstance(eval_metric, str):
|
|
45
|
+
if eval_metric.upper() not in AVAILABLE_METRICS:
|
|
46
|
+
raise ValueError(
|
|
47
|
+
f"Time series metric {eval_metric} not supported. Available metrics are:\n"
|
|
48
|
+
f"{json.dumps(list(AVAILABLE_METRICS.keys()), indent=2)}"
|
|
49
|
+
)
|
|
50
|
+
eval_metric = AVAILABLE_METRICS[eval_metric.upper()]()
|
|
51
|
+
elif eval_metric is None:
|
|
52
|
+
eval_metric = AVAILABLE_METRICS[DEFAULT_METRIC_NAME]()
|
|
53
|
+
else:
|
|
54
|
+
raise ValueError(
|
|
55
|
+
f"eval_metric must be of type str, TimeSeriesScorer or None "
|
|
56
|
+
f"(received eval_metric = {eval_metric} of type {type(eval_metric)})"
|
|
57
|
+
)
|
|
58
|
+
return eval_metric
|
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
from typing import Optional, Tuple
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pd
|
|
5
|
+
|
|
6
|
+
from autogluon.timeseries import TimeSeriesDataFrame
|
|
7
|
+
from autogluon.timeseries.utils.datetime import get_seasonality
|
|
8
|
+
from autogluon.timeseries.utils.warning_filters import warning_filter
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class TimeSeriesScorer:
|
|
12
|
+
"""Base class for all evaluation metrics used in AutoGluon-TimeSeries.
|
|
13
|
+
|
|
14
|
+
This object always returns the metric in greater-is-better format.
|
|
15
|
+
|
|
16
|
+
Follows the design of ``autogluon.core.metrics.Scorer``.
|
|
17
|
+
|
|
18
|
+
Attributes
|
|
19
|
+
----------
|
|
20
|
+
greater_is_better_internal : bool, default = False
|
|
21
|
+
Whether internal method :meth:`~autogluon.timeseries.metrics.TimeSeriesScorer.compute_metric` is
|
|
22
|
+
a loss function (default), meaning low is good, or a score function, meaning high is good.
|
|
23
|
+
optimum : float, default = 0.0
|
|
24
|
+
The best score achievable by the score function, i.e. maximum in case of scorer function and minimum in case of
|
|
25
|
+
loss function.
|
|
26
|
+
optimized_by_median : bool, default = False
|
|
27
|
+
Whether given point forecast metric is optimized by the median (if True) or expected value (if False). If True,
|
|
28
|
+
all models in AutoGluon-TimeSeries will attempt to paste median forecast into the "mean" column.
|
|
29
|
+
needs_quantile : bool, default = False
|
|
30
|
+
Whether the given metric uses the quantile predictions. Some models will modify the training procedure if they
|
|
31
|
+
are trained to optimize a quantile metric.
|
|
32
|
+
equivalent_tabular_regression_metric : str
|
|
33
|
+
Name of an equivalent metric used by AutoGluon-Tabular with ``problem_type="regression"``. Used by models that
|
|
34
|
+
train a TabularPredictor under the hood. This attribute should only be specified by point forecast metrics.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
greater_is_better_internal: bool = False
|
|
38
|
+
optimum: float = 0.0
|
|
39
|
+
optimized_by_median: bool = False
|
|
40
|
+
needs_quantile: bool = False
|
|
41
|
+
equivalent_tabular_regression_metric: Optional[str] = None
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
def sign(self) -> int:
|
|
45
|
+
return 1 if self.greater_is_better_internal else -1
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def name(self) -> str:
|
|
49
|
+
return f"{self.__class__.__name__}"
|
|
50
|
+
|
|
51
|
+
def __repr__(self) -> str:
|
|
52
|
+
return self.name
|
|
53
|
+
|
|
54
|
+
def __str__(self) -> str:
|
|
55
|
+
return self.name
|
|
56
|
+
|
|
57
|
+
@property
|
|
58
|
+
def name_with_sign(self) -> str:
|
|
59
|
+
if self.greater_is_better_internal:
|
|
60
|
+
prefix = ""
|
|
61
|
+
else:
|
|
62
|
+
prefix = "-"
|
|
63
|
+
return f"{prefix}{self.name}"
|
|
64
|
+
|
|
65
|
+
def __call__(
|
|
66
|
+
self,
|
|
67
|
+
data: TimeSeriesDataFrame,
|
|
68
|
+
predictions: TimeSeriesDataFrame,
|
|
69
|
+
prediction_length: int = 1,
|
|
70
|
+
target: str = "target",
|
|
71
|
+
seasonal_period: Optional[int] = None,
|
|
72
|
+
**kwargs,
|
|
73
|
+
) -> float:
|
|
74
|
+
seasonal_period = get_seasonality(data.freq) if seasonal_period is None else seasonal_period
|
|
75
|
+
|
|
76
|
+
data_past = data.slice_by_timestep(None, -prediction_length)
|
|
77
|
+
data_future = data.slice_by_timestep(-prediction_length, None)
|
|
78
|
+
|
|
79
|
+
assert (predictions.num_timesteps_per_item() == prediction_length).all()
|
|
80
|
+
assert data_future.index.equals(predictions.index), "Prediction and data indices do not match."
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
with warning_filter():
|
|
84
|
+
self.save_past_metrics(
|
|
85
|
+
data_past=data_past,
|
|
86
|
+
target=target,
|
|
87
|
+
seasonal_period=seasonal_period,
|
|
88
|
+
**kwargs,
|
|
89
|
+
)
|
|
90
|
+
metric_value = self.compute_metric(
|
|
91
|
+
data_future=data_future,
|
|
92
|
+
predictions=predictions,
|
|
93
|
+
target=target,
|
|
94
|
+
**kwargs,
|
|
95
|
+
)
|
|
96
|
+
finally:
|
|
97
|
+
self.clear_past_metrics()
|
|
98
|
+
return metric_value * self.sign
|
|
99
|
+
|
|
100
|
+
score = __call__
|
|
101
|
+
|
|
102
|
+
def compute_metric(
|
|
103
|
+
self,
|
|
104
|
+
data_future: TimeSeriesDataFrame,
|
|
105
|
+
predictions: TimeSeriesDataFrame,
|
|
106
|
+
target: str = "target",
|
|
107
|
+
**kwargs,
|
|
108
|
+
) -> float:
|
|
109
|
+
"""Internal method that computes the metric for given forecast & actual data.
|
|
110
|
+
|
|
111
|
+
This method should be implemented by all custom metrics.
|
|
112
|
+
|
|
113
|
+
Parameters
|
|
114
|
+
----------
|
|
115
|
+
data_future : TimeSeriesDataFrame
|
|
116
|
+
Actual values of the time series during the forecast horizon (``prediction_length`` values for each time
|
|
117
|
+
series in the dataset). This data frame is guaranteed to have the same index as ``predictions``.
|
|
118
|
+
predictions : TimeSeriesDataFrame
|
|
119
|
+
Data frame with predictions for the forecast horizon. Contain columns "mean" (point forecast) and the
|
|
120
|
+
columns corresponding to each of the quantile levels.
|
|
121
|
+
target : str, default = "target"
|
|
122
|
+
Name of the column in ``data_future`` that contains the target time series.
|
|
123
|
+
|
|
124
|
+
Returns
|
|
125
|
+
-------
|
|
126
|
+
score : float
|
|
127
|
+
Value of the metric for given forecast and data. If self.greater_is_better_internal is True, returns score
|
|
128
|
+
in greater-is-better format, otherwise in lower-is-better format.
|
|
129
|
+
|
|
130
|
+
"""
|
|
131
|
+
raise NotImplementedError
|
|
132
|
+
|
|
133
|
+
def save_past_metrics(
|
|
134
|
+
self,
|
|
135
|
+
data_past: TimeSeriesDataFrame,
|
|
136
|
+
target: str = "target",
|
|
137
|
+
seasonal_period: int = 1,
|
|
138
|
+
**kwargs,
|
|
139
|
+
) -> None:
|
|
140
|
+
"""Compute auxiliary metrics on past data (before forecast horizon), if the chosen metric requires it.
|
|
141
|
+
|
|
142
|
+
This method should only be implemented by metrics that rely on historic (in-sample) data, such as Mean Absolute
|
|
143
|
+
Scaled Error (MASE) https://en.wikipedia.org/wiki/Mean_absolute_scaled_error.
|
|
144
|
+
|
|
145
|
+
We keep this method separate from :meth:`compute_metric` to avoid redundant computations when fitting ensemble.
|
|
146
|
+
"""
|
|
147
|
+
pass
|
|
148
|
+
|
|
149
|
+
def clear_past_metrics(self) -> None:
|
|
150
|
+
"""Clear auxiliary metrics saved in :meth:`save_past_metrics`.
|
|
151
|
+
|
|
152
|
+
This method should only be implemented if :meth:`save_past_metrics` has been implemented.
|
|
153
|
+
"""
|
|
154
|
+
pass
|
|
155
|
+
|
|
156
|
+
def error(self, *args, **kwargs):
|
|
157
|
+
"""Return error in lower-is-better format."""
|
|
158
|
+
return self.optimum - self.score(*args, **kwargs)
|
|
159
|
+
|
|
160
|
+
@staticmethod
|
|
161
|
+
def _safemean(series: pd.Series) -> float:
|
|
162
|
+
"""Compute mean of an pd.Series, ignoring inf, -inf and nan values."""
|
|
163
|
+
return np.nanmean(series.replace([np.inf, -np.inf], np.nan).values)
|
|
164
|
+
|
|
165
|
+
@staticmethod
|
|
166
|
+
def _get_point_forecast_score_inputs(
|
|
167
|
+
data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target"
|
|
168
|
+
) -> Tuple[pd.Series, pd.Series]:
|
|
169
|
+
"""Get inputs necessary to compute point forecast metrics.
|
|
170
|
+
|
|
171
|
+
Returns
|
|
172
|
+
-------
|
|
173
|
+
y_true : pd.Series, shape [num_items * prediction_length]
|
|
174
|
+
Target time series values during the forecast horizon.
|
|
175
|
+
y_pred : pd.Series, shape [num_items * prediction_length]
|
|
176
|
+
Predicted time series values during the forecast horizon.
|
|
177
|
+
"""
|
|
178
|
+
y_true = data_future[target]
|
|
179
|
+
y_pred = predictions["mean"]
|
|
180
|
+
return y_true, y_pred
|
|
181
|
+
|
|
182
|
+
@staticmethod
|
|
183
|
+
def _get_quantile_forecast_score_inputs(
|
|
184
|
+
data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target"
|
|
185
|
+
) -> Tuple[pd.Series, pd.DataFrame, np.ndarray]:
|
|
186
|
+
"""Get inputs necessary to compute quantile forecast metrics.
|
|
187
|
+
|
|
188
|
+
Returns
|
|
189
|
+
-------
|
|
190
|
+
y_true : pd.Series, shape [num_items * prediction_length]
|
|
191
|
+
Target time series values during the forecast horizon.
|
|
192
|
+
q_pred : pd.DataFrame, shape [num_items * prediction_length, num_quantiles]
|
|
193
|
+
Quantile forecast for each predicted quantile level. Column order corresponds to ``quantile_levels``.
|
|
194
|
+
quantile_levels : np.ndarray, shape [num_quantiles]
|
|
195
|
+
Quantile levels for which the forecasts are generated (as floats).
|
|
196
|
+
"""
|
|
197
|
+
quantile_columns = [col for col in predictions.columns if col != "mean"]
|
|
198
|
+
y_true = data_future[target]
|
|
199
|
+
q_pred = predictions[quantile_columns]
|
|
200
|
+
quantile_levels = np.array(quantile_columns, dtype=float)
|
|
201
|
+
return y_true, q_pred, quantile_levels
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
import pandas as pd
|
|
6
|
+
|
|
7
|
+
from autogluon.timeseries import TimeSeriesDataFrame
|
|
8
|
+
from autogluon.timeseries.dataset.ts_dataframe import ITEMID
|
|
9
|
+
|
|
10
|
+
from .abstract import TimeSeriesScorer
|
|
11
|
+
from .utils import _in_sample_abs_seasonal_error, _in_sample_squared_seasonal_error
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class RMSE(TimeSeriesScorer):
|
|
17
|
+
"""Root mean squared error."""
|
|
18
|
+
|
|
19
|
+
equivalent_tabular_regression_metric = "root_mean_squared_error"
|
|
20
|
+
|
|
21
|
+
def compute_metric(
|
|
22
|
+
self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
|
|
23
|
+
) -> float:
|
|
24
|
+
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
25
|
+
return np.sqrt(self._safemean((y_true - y_pred) ** 2))
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class MSE(TimeSeriesScorer):
|
|
29
|
+
"""Mean squared error."""
|
|
30
|
+
|
|
31
|
+
equivalent_tabular_regression_metric = "mean_squared_error"
|
|
32
|
+
|
|
33
|
+
def compute_metric(
|
|
34
|
+
self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
|
|
35
|
+
) -> float:
|
|
36
|
+
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
37
|
+
return self._safemean((y_true - y_pred) ** 2)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class MAE(TimeSeriesScorer):
|
|
41
|
+
"""Mean absolute error."""
|
|
42
|
+
|
|
43
|
+
optimized_by_median = True
|
|
44
|
+
equivalent_tabular_regression_metric = "mean_absolute_error"
|
|
45
|
+
|
|
46
|
+
def compute_metric(
|
|
47
|
+
self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
|
|
48
|
+
) -> float:
|
|
49
|
+
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
50
|
+
return self._safemean((y_true - y_pred).abs())
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class WAPE(TimeSeriesScorer):
|
|
54
|
+
"""Weighted absolute percentage error."""
|
|
55
|
+
|
|
56
|
+
optimized_by_median = True
|
|
57
|
+
equivalent_tabular_regression_metric = "mean_absolute_error"
|
|
58
|
+
|
|
59
|
+
def compute_metric(
|
|
60
|
+
self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
|
|
61
|
+
) -> float:
|
|
62
|
+
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
63
|
+
return (y_true - y_pred).abs().sum() / y_true.abs().sum()
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class sMAPE(TimeSeriesScorer):
|
|
67
|
+
"Symmetric mean absolute percentage error."
|
|
68
|
+
optimized_by_median = True
|
|
69
|
+
equivalent_tabular_regression_metric = "symmetric_mean_absolute_percentage_error"
|
|
70
|
+
|
|
71
|
+
def compute_metric(
|
|
72
|
+
self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
|
|
73
|
+
) -> float:
|
|
74
|
+
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
75
|
+
return self._safemean(2 * ((y_true - y_pred).abs() / (y_true.abs() + y_pred.abs())))
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class MAPE(TimeSeriesScorer):
|
|
79
|
+
"Mean Absolute Percentage Error."
|
|
80
|
+
optimized_by_median = True
|
|
81
|
+
equivalent_tabular_regression_metric = "mean_absolute_percentage_error"
|
|
82
|
+
|
|
83
|
+
def compute_metric(
|
|
84
|
+
self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
|
|
85
|
+
) -> float:
|
|
86
|
+
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
87
|
+
return self._safemean((y_true - y_pred).abs() / y_true.abs())
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class MASE(TimeSeriesScorer):
|
|
91
|
+
"""Mean absolute scaled error."""
|
|
92
|
+
|
|
93
|
+
optimized_by_median = True
|
|
94
|
+
equivalent_tabular_regression_metric = "mean_absolute_error"
|
|
95
|
+
|
|
96
|
+
def __init__(self):
|
|
97
|
+
self._past_abs_seasonal_error: Optional[pd.Series] = None
|
|
98
|
+
|
|
99
|
+
def save_past_metrics(
|
|
100
|
+
self,
|
|
101
|
+
data_past: TimeSeriesDataFrame,
|
|
102
|
+
target: str = "target",
|
|
103
|
+
seasonal_period: int = 1,
|
|
104
|
+
**kwargs,
|
|
105
|
+
) -> None:
|
|
106
|
+
self._past_abs_seasonal_error = _in_sample_abs_seasonal_error(
|
|
107
|
+
y_past=data_past[target], seasonal_period=seasonal_period
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
def clear_past_metrics(self) -> None:
|
|
111
|
+
self._past_abs_seasonal_error = None
|
|
112
|
+
|
|
113
|
+
def compute_metric(
|
|
114
|
+
self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
|
|
115
|
+
) -> float:
|
|
116
|
+
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
117
|
+
if self._past_abs_seasonal_error is None:
|
|
118
|
+
raise AssertionError("Call `save_past_metrics` before `compute_metric`")
|
|
119
|
+
|
|
120
|
+
mae_per_item = (y_true - y_pred).abs().groupby(level=ITEMID, sort=False).mean()
|
|
121
|
+
return self._safemean(mae_per_item / self._past_abs_seasonal_error)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class RMSSE(TimeSeriesScorer):
|
|
125
|
+
"""Root mean squared scaled error."""
|
|
126
|
+
|
|
127
|
+
equivalent_tabular_regression_metric = "root_mean_squared_error"
|
|
128
|
+
|
|
129
|
+
def __init__(self):
|
|
130
|
+
self._past_squared_seasonal_error: Optional[pd.Series] = None
|
|
131
|
+
|
|
132
|
+
def save_past_metrics(
|
|
133
|
+
self,
|
|
134
|
+
data_past: TimeSeriesDataFrame,
|
|
135
|
+
target: str = "target",
|
|
136
|
+
seasonal_period: int = 1,
|
|
137
|
+
**kwargs,
|
|
138
|
+
) -> None:
|
|
139
|
+
self._past_squared_seasonal_error = _in_sample_squared_seasonal_error(
|
|
140
|
+
y_past=data_past[target], seasonal_period=seasonal_period
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
def clear_past_metrics(self) -> None:
|
|
144
|
+
self._past_squared_seasonal_error = None
|
|
145
|
+
|
|
146
|
+
def compute_metric(
|
|
147
|
+
self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
|
|
148
|
+
) -> float:
|
|
149
|
+
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
150
|
+
if self._past_squared_seasonal_error is None:
|
|
151
|
+
raise AssertionError("Call `save_past_metrics` before `compute_metric`")
|
|
152
|
+
|
|
153
|
+
mse_per_item = (y_true - y_pred).pow(2.0).groupby(level=ITEMID, sort=False).mean()
|
|
154
|
+
return np.sqrt(self._safemean(mse_per_item / self._past_squared_seasonal_error))
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from autogluon.timeseries import TimeSeriesDataFrame
|
|
4
|
+
|
|
5
|
+
from .abstract import TimeSeriesScorer
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class WQL(TimeSeriesScorer):
|
|
9
|
+
"""Weighted quantile loss.
|
|
10
|
+
|
|
11
|
+
Also known as weighted pinball loss.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
needs_quantile = True
|
|
15
|
+
|
|
16
|
+
def compute_metric(
|
|
17
|
+
self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
|
|
18
|
+
) -> float:
|
|
19
|
+
y_true, q_pred, quantile_levels = self._get_quantile_forecast_score_inputs(data_future, predictions, target)
|
|
20
|
+
values_true = y_true.values[:, None] # shape [N, 1]
|
|
21
|
+
values_pred = q_pred.values # shape [N, len(quantile_levels)]
|
|
22
|
+
|
|
23
|
+
return 2 * np.mean(
|
|
24
|
+
np.abs((values_true - values_pred) * ((values_true <= values_pred) - quantile_levels)).sum(axis=0)
|
|
25
|
+
/ np.abs(values_true).sum()
|
|
26
|
+
)
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
|
|
3
|
+
from autogluon.timeseries.dataset.ts_dataframe import ITEMID
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _get_seasonal_diffs(*, y_past: pd.Series, seasonal_period: int = 1) -> pd.Series:
|
|
7
|
+
return y_past.groupby(level=ITEMID, sort=False).diff(seasonal_period).abs()
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _in_sample_abs_seasonal_error(*, y_past: pd.Series, seasonal_period: int = 1) -> pd.Series:
|
|
11
|
+
"""Compute seasonal naive forecast error (predict value from seasonal_period steps ago) for each time series."""
|
|
12
|
+
seasonal_diffs = _get_seasonal_diffs(y_past=y_past, seasonal_period=seasonal_period)
|
|
13
|
+
return seasonal_diffs.groupby(level=ITEMID, sort=False).mean().fillna(1.0)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _in_sample_squared_seasonal_error(*, y_past: pd.Series, seasonal_period: int = 1) -> pd.Series:
|
|
17
|
+
seasonal_diffs = _get_seasonal_diffs(y_past=y_past, seasonal_period=seasonal_period)
|
|
18
|
+
return seasonal_diffs.pow(2.0).groupby(level=ITEMID, sort=False).mean().fillna(1.0)
|