autogluon.timeseries 1.2.1b20250424__py3-none-any.whl → 1.2.1b20250426__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. autogluon/timeseries/dataset/ts_dataframe.py +9 -2
  2. autogluon/timeseries/learner.py +1 -4
  3. autogluon/timeseries/metrics/__init__.py +36 -8
  4. autogluon/timeseries/metrics/abstract.py +77 -7
  5. autogluon/timeseries/metrics/point.py +136 -47
  6. autogluon/timeseries/metrics/quantile.py +42 -17
  7. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +7 -20
  8. autogluon/timeseries/models/autogluon_tabular/mlforecast.py +106 -66
  9. autogluon/timeseries/models/autogluon_tabular/transforms.py +15 -10
  10. autogluon/timeseries/models/ensemble/greedy.py +8 -7
  11. autogluon/timeseries/models/local/abstract_local_model.py +43 -36
  12. autogluon/timeseries/models/multi_window/multi_window_model.py +1 -1
  13. autogluon/timeseries/models/presets.py +0 -2
  14. autogluon/timeseries/predictor.py +37 -29
  15. autogluon/timeseries/trainer.py +23 -16
  16. autogluon/timeseries/version.py +1 -1
  17. {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250426.dist-info}/METADATA +5 -5
  18. {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250426.dist-info}/RECORD +25 -25
  19. /autogluon.timeseries-1.2.1b20250424-py3.9-nspkg.pth → /autogluon.timeseries-1.2.1b20250426-py3.9-nspkg.pth +0 -0
  20. {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250426.dist-info}/LICENSE +0 -0
  21. {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250426.dist-info}/NOTICE +0 -0
  22. {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250426.dist-info}/WHEEL +0 -0
  23. {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250426.dist-info}/namespace_packages.txt +0 -0
  24. {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250426.dist-info}/top_level.txt +0 -0
  25. {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250426.dist-info}/zip-safe +0 -0
@@ -8,12 +8,12 @@ from collections.abc import Iterable
8
8
  from itertools import islice
9
9
  from pathlib import Path
10
10
  from pprint import pformat
11
- from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union
11
+ from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union, overload
12
12
 
13
13
  import pandas as pd
14
14
  from joblib.parallel import Parallel, delayed
15
15
  from pandas.core.internals import ArrayManager, BlockManager # type: ignore
16
- from typing_extensions import Self, overload
16
+ from typing_extensions import Self
17
17
 
18
18
  from autogluon.common.loaders import load_pd
19
19
 
@@ -1044,8 +1044,15 @@ class TimeSeriesDataFrame(pd.DataFrame):
1044
1044
  """Convert `TimeSeriesDataFrame` to a `pandas.DataFrame`"""
1045
1045
  return pd.DataFrame(self)
1046
1046
 
1047
+ # inline typing stubs for various overridden methods
1047
1048
  if TYPE_CHECKING:
1048
1049
 
1050
+ def query( # type: ignore
1051
+ self, expr: str, *, inplace: bool = False, **kwargs
1052
+ ) -> Self: ...
1053
+
1054
+ def reindex(*args, **kwargs) -> Self: ... # type: ignore
1055
+
1049
1056
  @overload
1050
1057
  def __new__(cls, data: pd.DataFrame, static_features: Optional[pd.DataFrame] = None) -> Self: ... # type: ignore
1051
1058
 
@@ -29,15 +29,13 @@ class TimeSeriesLearner(AbstractLearner):
29
29
  known_covariates_names: Optional[List[str]] = None,
30
30
  trainer_type: Type[TimeSeriesTrainer] = TimeSeriesTrainer,
31
31
  eval_metric: Union[str, TimeSeriesScorer, None] = None,
32
- eval_metric_seasonal_period: Optional[int] = None,
33
32
  prediction_length: int = 1,
34
33
  cache_predictions: bool = True,
35
34
  ensemble_model_type: Optional[Type] = None,
36
35
  **kwargs,
37
36
  ):
38
37
  super().__init__(path_context=path_context)
39
- self.eval_metric: TimeSeriesScorer = check_get_evaluation_metric(eval_metric)
40
- self.eval_metric_seasonal_period = eval_metric_seasonal_period
38
+ self.eval_metric = check_get_evaluation_metric(eval_metric, prediction_length=prediction_length)
41
39
  self.trainer_type = trainer_type
42
40
  self.target = target
43
41
  self.known_covariates_names = [] if known_covariates_names is None else known_covariates_names
@@ -82,7 +80,6 @@ class TimeSeriesLearner(AbstractLearner):
82
80
  path=self.model_context,
83
81
  prediction_length=self.prediction_length,
84
82
  eval_metric=self.eval_metric,
85
- eval_metric_seasonal_period=self.eval_metric_seasonal_period,
86
83
  target=self.target,
87
84
  quantile_levels=self.quantile_levels,
88
85
  verbosity=kwargs.get("verbosity", 2),
@@ -1,5 +1,7 @@
1
1
  from pprint import pformat
2
- from typing import Type, Union
2
+ from typing import Any, Dict, Optional, Sequence, Type, Union
3
+
4
+ import numpy as np
3
5
 
4
6
  from .abstract import TimeSeriesScorer
5
7
  from .point import MAE, MAPE, MASE, MSE, RMSE, RMSLE, RMSSE, SMAPE, WAPE, WCD
@@ -22,7 +24,7 @@ __all__ = [
22
24
 
23
25
  DEFAULT_METRIC_NAME = "WQL"
24
26
 
25
- AVAILABLE_METRICS = {
27
+ AVAILABLE_METRICS: Dict[str, Type[TimeSeriesScorer]] = {
26
28
  "MASE": MASE,
27
29
  "MAPE": MAPE,
28
30
  "SMAPE": SMAPE,
@@ -42,33 +44,59 @@ DEPRECATED_METRICS = {
42
44
  }
43
45
 
44
46
  # Experimental metrics that are not yet user facing
45
- EXPERIMENTAL_METRICS = {
47
+ EXPERIMENTAL_METRICS: Dict[str, Type[TimeSeriesScorer]] = {
46
48
  "WCD": WCD,
47
49
  }
48
50
 
49
51
 
50
52
  def check_get_evaluation_metric(
51
- eval_metric: Union[str, TimeSeriesScorer, Type[TimeSeriesScorer], None] = None
53
+ eval_metric: Union[str, TimeSeriesScorer, Type[TimeSeriesScorer], None],
54
+ prediction_length: int,
55
+ seasonal_period: Optional[int] = None,
56
+ horizon_weight: Optional[Sequence[float] | np.ndarray] = None,
52
57
  ) -> TimeSeriesScorer:
58
+ """Factory method for TimeSeriesScorer objects.
59
+
60
+ Returns
61
+ -------
62
+ scorer :
63
+ A `TimeSeriesScorer` object based on the provided `eval_metric`.
64
+
65
+ `scorer.prediction_length` is always set to the `prediction_length` provided to this method.
66
+
67
+ If `seasonal_period` is not `None`, then `scorer.seasonal_period` is set to this value. Otherwise the original
68
+ value of `seasonal_period` is kept.
69
+
70
+ If `horizon_weight` is not `None`, then `scorer.horizon_weight` is set to this value. Otherwise the original
71
+ value of `horizon_weight` is kept.
72
+ """
53
73
  scorer: TimeSeriesScorer
74
+ metric_kwargs: Dict[str, Any] = dict(
75
+ prediction_length=prediction_length, seasonal_period=seasonal_period, horizon_weight=horizon_weight
76
+ )
54
77
  if isinstance(eval_metric, TimeSeriesScorer):
55
78
  scorer = eval_metric
79
+ scorer.prediction_length = prediction_length
80
+ if seasonal_period is not None:
81
+ scorer.seasonal_period = seasonal_period
82
+ if horizon_weight is not None:
83
+ scorer.horizon_weight = scorer.check_get_horizon_weight(horizon_weight, prediction_length=prediction_length)
56
84
  elif isinstance(eval_metric, type) and issubclass(eval_metric, TimeSeriesScorer):
57
85
  # e.g., user passed `eval_metric=CustomMetric` instead of `eval_metric=CustomMetric()`
58
- scorer = eval_metric()
86
+ scorer = eval_metric(**metric_kwargs)
59
87
  elif isinstance(eval_metric, str):
60
88
  metric_name = DEPRECATED_METRICS.get(eval_metric, eval_metric).upper()
61
89
  if metric_name in AVAILABLE_METRICS:
62
- scorer = AVAILABLE_METRICS[metric_name]()
90
+ scorer = AVAILABLE_METRICS[metric_name](**metric_kwargs)
63
91
  elif metric_name in EXPERIMENTAL_METRICS:
64
- scorer = EXPERIMENTAL_METRICS[metric_name]()
92
+ scorer = EXPERIMENTAL_METRICS[metric_name](**metric_kwargs)
65
93
  else:
66
94
  raise ValueError(
67
95
  f"Time series metric {eval_metric} not supported. Available metrics are:\n"
68
96
  f"{pformat(sorted(AVAILABLE_METRICS.keys()))}"
69
97
  )
70
98
  elif eval_metric is None:
71
- scorer = AVAILABLE_METRICS[DEFAULT_METRIC_NAME]()
99
+ scorer = AVAILABLE_METRICS[DEFAULT_METRIC_NAME](**metric_kwargs)
72
100
  else:
73
101
  raise ValueError(
74
102
  f"eval_metric must be of type str, TimeSeriesScorer or None "
@@ -1,6 +1,8 @@
1
- from typing import Optional, Tuple, Union
1
+ import warnings
2
+ from typing import Optional, Sequence, Tuple, Union, overload
2
3
 
3
4
  import numpy as np
5
+ import numpy.typing as npt
4
6
  import pandas as pd
5
7
 
6
8
  from autogluon.timeseries import TimeSeriesDataFrame
@@ -15,6 +17,18 @@ class TimeSeriesScorer:
15
17
 
16
18
  Follows the design of ``autogluon.core.metrics.Scorer``.
17
19
 
20
+ Parameters
21
+ ----------
22
+ prediction_length : int, default = 1
23
+ The length of the forecast horizon. The predictions provided to the `TimeSeriesScorer` are expected to contain
24
+ a forecast for this many time steps for each time series.
25
+ seasonal_period : int or None, default = None
26
+ Seasonal period used to compute some evaluation metrics such as mean absolute scaled error (MASE). Defaults to
27
+ `None`, in which case the seasonal period is computed based on the data frequency.
28
+ horizon_weight : Sequence[float], np.ndarray or None, default = None
29
+ Weight assigned to each time step in the forecast horizon when computing the metric. If provided, the
30
+ `horizon_weight` will be stored as a numpy array of shape `[1, prediction_length]`.
31
+
18
32
  Attributes
19
33
  ----------
20
34
  greater_is_better_internal : bool, default = False
@@ -40,6 +54,18 @@ class TimeSeriesScorer:
40
54
  needs_quantile: bool = False
41
55
  equivalent_tabular_regression_metric: Optional[str] = None
42
56
 
57
+ def __init__(
58
+ self,
59
+ prediction_length: int = 1,
60
+ seasonal_period: Optional[int] = None,
61
+ horizon_weight: Optional[Sequence[float]] = None,
62
+ ):
63
+ self.prediction_length = int(prediction_length)
64
+ if self.prediction_length < 1:
65
+ raise ValueError(f"prediction_length must be >= 1 (received {prediction_length})")
66
+ self.seasonal_period = seasonal_period
67
+ self.horizon_weight = self.check_get_horizon_weight(horizon_weight, prediction_length=prediction_length)
68
+
43
69
  @property
44
70
  def sign(self) -> int:
45
71
  return 1 if self.greater_is_better_internal else -1
@@ -66,18 +92,25 @@ class TimeSeriesScorer:
66
92
  self,
67
93
  data: TimeSeriesDataFrame,
68
94
  predictions: TimeSeriesDataFrame,
69
- prediction_length: int = 1,
70
95
  target: str = "target",
71
- seasonal_period: Optional[int] = None,
72
96
  **kwargs,
73
97
  ) -> float:
74
- seasonal_period = get_seasonality(data.freq) if seasonal_period is None else seasonal_period
98
+ seasonal_period = get_seasonality(data.freq) if self.seasonal_period is None else self.seasonal_period
99
+
100
+ if "prediction_length" in kwargs:
101
+ warnings.warn(
102
+ "Passing `prediction_length` to `TimeSeriesScorer.__call__` is deprecated and will be removed in v2.0. "
103
+ "Please set the `eval_metric.prediction_length` attribute instead.",
104
+ category=FutureWarning,
105
+ )
106
+ self.prediction_length = kwargs["prediction_length"]
107
+ self.horizon_weight = self.check_get_horizon_weight(self.horizon_weight, self.prediction_length)
75
108
 
76
- data_past = data.slice_by_timestep(None, -prediction_length)
77
- data_future = data.slice_by_timestep(-prediction_length, None)
109
+ data_past = data.slice_by_timestep(None, -self.prediction_length)
110
+ data_future = data.slice_by_timestep(-self.prediction_length, None)
78
111
 
79
112
  assert not predictions.isna().any().any(), "Predictions contain NaN values."
80
- assert (predictions.num_timesteps_per_item() == prediction_length).all()
113
+ assert (predictions.num_timesteps_per_item() == self.prediction_length).all()
81
114
  assert data_future.index.equals(predictions.index), "Prediction and data indices do not match."
82
115
 
83
116
  try:
@@ -200,3 +233,40 @@ class TimeSeriesScorer:
200
233
  q_pred = pd.DataFrame(predictions[quantile_columns])
201
234
  quantile_levels = np.array(quantile_columns, dtype=float)
202
235
  return y_true, q_pred, quantile_levels
236
+
237
+ @overload
238
+ @staticmethod
239
+ def check_get_horizon_weight(horizon_weight: None, prediction_length: int) -> None: ...
240
+ @overload
241
+ @staticmethod
242
+ def check_get_horizon_weight(
243
+ horizon_weight: Sequence[float] | np.ndarray, prediction_length: int
244
+ ) -> npt.NDArray[np.float64]: ...
245
+
246
+ @staticmethod
247
+ def check_get_horizon_weight(
248
+ horizon_weight: Sequence[float] | np.ndarray | None, prediction_length: int
249
+ ) -> Optional[npt.NDArray[np.float64]]:
250
+ """Convert horizon_weight to a non-negative numpy array that sums up to prediction_length.
251
+ Raises an exception if horizon_weight has an invalid shape or contains invalid values.
252
+
253
+ Returns
254
+ -------
255
+ horizon_weight:
256
+ None if the input is None, otherwise a numpy array of shape [1, prediction_length].
257
+ """
258
+ if horizon_weight is None:
259
+ return None
260
+ horizon_weight_np = np.ravel(horizon_weight).astype(np.float64)
261
+ if horizon_weight_np.shape != (prediction_length,):
262
+ raise ValueError(
263
+ f"horizon_weight must have length equal to {prediction_length=} (got {len(horizon_weight)=})"
264
+ )
265
+ if not (horizon_weight_np >= 0).all():
266
+ raise ValueError(f"All values in horizon_weight must be >= 0 (got {horizon_weight})")
267
+ if not horizon_weight_np.sum() > 0:
268
+ raise ValueError(f"At least some values in horizon_weight must be > 0 (got {horizon_weight})")
269
+ if not np.isfinite(horizon_weight_np).all():
270
+ raise ValueError(f"All horizon_weight values must be finite (got {horizon_weight})")
271
+ horizon_weight_np = horizon_weight_np * prediction_length / horizon_weight_np.sum()
272
+ return horizon_weight_np.reshape([1, prediction_length])
@@ -1,6 +1,6 @@
1
1
  import logging
2
2
  import warnings
3
- from typing import Optional
3
+ from typing import Optional, Sequence
4
4
 
5
5
  import numpy as np
6
6
  import pandas as pd
@@ -38,10 +38,18 @@ class RMSE(TimeSeriesScorer):
38
38
  equivalent_tabular_regression_metric = "root_mean_squared_error"
39
39
 
40
40
  def compute_metric(
41
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
41
+ self,
42
+ data_future: TimeSeriesDataFrame,
43
+ predictions: TimeSeriesDataFrame,
44
+ target: str = "target",
45
+ **kwargs,
42
46
  ) -> float:
43
47
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
44
- return np.sqrt(self._safemean((y_true - y_pred) ** 2))
48
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
49
+ errors = ((y_true - y_pred) ** 2).reshape([-1, self.prediction_length])
50
+ if self.horizon_weight is not None:
51
+ errors *= self.horizon_weight
52
+ return np.sqrt(self._safemean(errors))
45
53
 
46
54
 
47
55
  class MSE(TimeSeriesScorer):
@@ -69,10 +77,18 @@ class MSE(TimeSeriesScorer):
69
77
  equivalent_tabular_regression_metric = "mean_squared_error"
70
78
 
71
79
  def compute_metric(
72
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
80
+ self,
81
+ data_future: TimeSeriesDataFrame,
82
+ predictions: TimeSeriesDataFrame,
83
+ target: str = "target",
84
+ **kwargs,
73
85
  ) -> float:
74
86
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
75
- return self._safemean((y_true - y_pred) ** 2)
87
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
88
+ errors = ((y_true - y_pred) ** 2).reshape([-1, self.prediction_length])
89
+ if self.horizon_weight is not None:
90
+ errors *= self.horizon_weight
91
+ return self._safemean(errors)
76
92
 
77
93
 
78
94
  class MAE(TimeSeriesScorer):
@@ -98,10 +114,18 @@ class MAE(TimeSeriesScorer):
98
114
  equivalent_tabular_regression_metric = "mean_absolute_error"
99
115
 
100
116
  def compute_metric(
101
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
117
+ self,
118
+ data_future: TimeSeriesDataFrame,
119
+ predictions: TimeSeriesDataFrame,
120
+ target: str = "target",
121
+ **kwargs,
102
122
  ) -> float:
103
123
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
104
- return self._safemean((y_true - y_pred).abs())
124
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
125
+ errors = np.abs(y_true - y_pred).reshape([-1, self.prediction_length])
126
+ if self.horizon_weight is not None:
127
+ errors *= self.horizon_weight
128
+ return self._safemean(errors)
105
129
 
106
130
 
107
131
  class WAPE(TimeSeriesScorer):
@@ -119,6 +143,7 @@ class WAPE(TimeSeriesScorer):
119
143
  - not sensitive to outliers
120
144
  - prefers models that accurately estimate the median
121
145
 
146
+ If `self.horizon_weight` is provided, both the errors and the target time series in the denominator will be re-weighted.
122
147
 
123
148
  References
124
149
  ----------
@@ -129,10 +154,19 @@ class WAPE(TimeSeriesScorer):
129
154
  equivalent_tabular_regression_metric = "mean_absolute_error"
130
155
 
131
156
  def compute_metric(
132
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
157
+ self,
158
+ data_future: TimeSeriesDataFrame,
159
+ predictions: TimeSeriesDataFrame,
160
+ target: str = "target",
161
+ **kwargs,
133
162
  ) -> float:
134
163
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
135
- return (y_true - y_pred).abs().sum() / y_true.abs().sum()
164
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
165
+ errors = np.abs(y_true - y_pred).reshape([-1, self.prediction_length])
166
+ if self.horizon_weight is not None:
167
+ errors *= self.horizon_weight
168
+ y_true = y_true.reshape([-1, self.prediction_length]) * self.horizon_weight
169
+ return np.nansum(errors) / np.nansum(np.abs(y_true))
136
170
 
137
171
 
138
172
  class SMAPE(TimeSeriesScorer):
@@ -158,10 +192,18 @@ class SMAPE(TimeSeriesScorer):
158
192
  equivalent_tabular_regression_metric = "symmetric_mean_absolute_percentage_error"
159
193
 
160
194
  def compute_metric(
161
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
195
+ self,
196
+ data_future: TimeSeriesDataFrame,
197
+ predictions: TimeSeriesDataFrame,
198
+ target: str = "target",
199
+ **kwargs,
162
200
  ) -> float:
163
201
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
164
- return self._safemean(2 * ((y_true - y_pred).abs() / (y_true.abs() + y_pred.abs())))
202
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
203
+ errors = (np.abs(y_true - y_pred) / (np.abs(y_true) + np.abs(y_pred))).reshape([-1, self.prediction_length])
204
+ if self.horizon_weight is not None:
205
+ errors *= self.horizon_weight
206
+ return 2 * self._safemean(errors)
165
207
 
166
208
 
167
209
  class MAPE(TimeSeriesScorer):
@@ -187,10 +229,18 @@ class MAPE(TimeSeriesScorer):
187
229
  equivalent_tabular_regression_metric = "mean_absolute_percentage_error"
188
230
 
189
231
  def compute_metric(
190
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
232
+ self,
233
+ data_future: TimeSeriesDataFrame,
234
+ predictions: TimeSeriesDataFrame,
235
+ target: str = "target",
236
+ **kwargs,
191
237
  ) -> float:
192
238
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
193
- return self._safemean((y_true - y_pred).abs() / y_true.abs())
239
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
240
+ errors = (np.abs(y_true - y_pred) / np.abs(y_true)).reshape([-1, self.prediction_length])
241
+ if self.horizon_weight is not None:
242
+ errors *= self.horizon_weight
243
+ return self._safemean(errors)
194
244
 
195
245
 
196
246
  class MASE(TimeSeriesScorer):
@@ -226,7 +276,15 @@ class MASE(TimeSeriesScorer):
226
276
  optimized_by_median = True
227
277
  equivalent_tabular_regression_metric = "mean_absolute_error"
228
278
 
229
- def __init__(self):
279
+ def __init__(
280
+ self,
281
+ prediction_length: int = 1,
282
+ seasonal_period: Optional[int] = None,
283
+ horizon_weight: Optional[Sequence[float]] = None,
284
+ ):
285
+ super().__init__(
286
+ prediction_length=prediction_length, seasonal_period=seasonal_period, horizon_weight=horizon_weight
287
+ )
230
288
  self._past_abs_seasonal_error: Optional[pd.Series] = None
231
289
 
232
290
  def save_past_metrics(
@@ -240,16 +298,22 @@ class MASE(TimeSeriesScorer):
240
298
  self._past_abs_seasonal_error = None
241
299
 
242
300
  def compute_metric(
243
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
301
+ self,
302
+ data_future: TimeSeriesDataFrame,
303
+ predictions: TimeSeriesDataFrame,
304
+ target: str = "target",
305
+ **kwargs,
244
306
  ) -> float:
245
- y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
246
307
  if self._past_abs_seasonal_error is None:
247
308
  raise AssertionError("Call `save_past_metrics` before `compute_metric`")
248
309
 
249
- num_items = len(self._past_abs_seasonal_error)
250
- # Reshape abs errors into [num_items, prediction_length] to normalize per item without groupby
251
- abs_errors = np.abs(y_true.to_numpy() - y_pred.to_numpy()).reshape([num_items, -1])
252
- return self._safemean(abs_errors / self._past_abs_seasonal_error.values[:, None])
310
+ y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
311
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
312
+
313
+ errors = np.abs(y_true - y_pred).reshape([-1, self.prediction_length])
314
+ if self.horizon_weight is not None:
315
+ errors *= self.horizon_weight
316
+ return self._safemean(errors / self._past_abs_seasonal_error.to_numpy()[:, None])
253
317
 
254
318
 
255
319
  class RMSSE(TimeSeriesScorer):
@@ -286,7 +350,15 @@ class RMSSE(TimeSeriesScorer):
286
350
 
287
351
  equivalent_tabular_regression_metric = "root_mean_squared_error"
288
352
 
289
- def __init__(self):
353
+ def __init__(
354
+ self,
355
+ prediction_length: int = 1,
356
+ seasonal_period: Optional[int] = None,
357
+ horizon_weight: Optional[Sequence[float]] = None,
358
+ ):
359
+ super().__init__(
360
+ prediction_length=prediction_length, seasonal_period=seasonal_period, horizon_weight=horizon_weight
361
+ )
290
362
  self._past_squared_seasonal_error: Optional[pd.Series] = None
291
363
 
292
364
  def save_past_metrics(
@@ -300,16 +372,21 @@ class RMSSE(TimeSeriesScorer):
300
372
  self._past_squared_seasonal_error = None
301
373
 
302
374
  def compute_metric(
303
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
375
+ self,
376
+ data_future: TimeSeriesDataFrame,
377
+ predictions: TimeSeriesDataFrame,
378
+ target: str = "target",
379
+ **kwargs,
304
380
  ) -> float:
305
- y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
306
381
  if self._past_squared_seasonal_error is None:
307
382
  raise AssertionError("Call `save_past_metrics` before `compute_metric`")
308
383
 
309
- num_items = len(self._past_squared_seasonal_error)
310
- # Reshape squared errors into [num_items, prediction_length] to normalize per item without groupby
311
- squared_errors = ((y_true.to_numpy() - y_pred.to_numpy()) ** 2.0).reshape([num_items, -1])
312
- return np.sqrt(self._safemean(squared_errors / self._past_squared_seasonal_error.values[:, None]))
384
+ y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
385
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
386
+ errors = ((y_true - y_pred) ** 2).reshape([-1, self.prediction_length])
387
+ if self.horizon_weight is not None:
388
+ errors *= self.horizon_weight
389
+ return np.sqrt(self._safemean(errors / self._past_squared_seasonal_error.to_numpy()[:, None]))
313
390
 
314
391
 
315
392
  class RMSLE(TimeSeriesScorer):
@@ -336,20 +413,26 @@ class RMSLE(TimeSeriesScorer):
336
413
  """
337
414
 
338
415
  def compute_metric(
339
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
416
+ self,
417
+ data_future: TimeSeriesDataFrame,
418
+ predictions: TimeSeriesDataFrame,
419
+ target: str = "target",
420
+ **kwargs,
340
421
  ) -> float:
341
422
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
423
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
342
424
  y_pred = np.clip(y_pred, a_min=0.0, a_max=None)
343
425
 
344
- return np.sqrt(np.power(np.log1p(y_pred) - np.log1p(y_true), 2).mean())
426
+ errors = np.power(np.log1p(y_pred) - np.log1p(y_true), 2).reshape([-1, self.prediction_length])
427
+ if self.horizon_weight is not None:
428
+ errors *= self.horizon_weight
429
+ return np.sqrt(self._safemean(errors))
345
430
 
346
431
  def __call__(
347
432
  self,
348
433
  data: TimeSeriesDataFrame,
349
434
  predictions: TimeSeriesDataFrame,
350
- prediction_length: int = 1,
351
435
  target: str = "target",
352
- seasonal_period: Optional[int] = None,
353
436
  **kwargs,
354
437
  ) -> float:
355
438
  if (data[target] < 0).any():
@@ -357,9 +440,7 @@ class RMSLE(TimeSeriesScorer):
357
440
  return super().__call__(
358
441
  data=data,
359
442
  predictions=predictions,
360
- prediction_length=prediction_length,
361
443
  target=target,
362
- seasonal_period=seasonal_period,
363
444
  **kwargs,
364
445
  )
365
446
 
@@ -382,35 +463,43 @@ class WCD(TimeSeriesScorer):
382
463
  Parameters
383
464
  ----------
384
465
  alpha : float, default = 0.5
385
- Values > 0.5 correspond put a stronger penalty on underpredictions (when cumulative forecast is below the
466
+ Values > 0.5 put a stronger penalty on underpredictions (when cumulative forecast is below the
386
467
  cumulative actual value). Values < 0.5 put a stronger penalty on overpredictions.
387
468
  """
388
469
 
389
- def __init__(self, alpha: float = 0.5):
470
+ def __init__(
471
+ self,
472
+ alpha: float = 0.5,
473
+ prediction_length: int = 1,
474
+ seasonal_period: Optional[int] = None,
475
+ horizon_weight: Optional[Sequence[float]] = None,
476
+ ):
477
+ super().__init__(
478
+ prediction_length=prediction_length, seasonal_period=seasonal_period, horizon_weight=horizon_weight
479
+ )
390
480
  assert 0 < alpha < 1, "alpha must be in (0, 1)"
391
481
  self.alpha = alpha
392
- self.num_items: Optional[int] = None
393
482
  warnings.warn(
394
483
  f"{self.name} is an experimental metric. Its behavior may change in the future version of AutoGluon."
395
484
  )
396
485
 
397
- def save_past_metrics(
398
- self, data_past: TimeSeriesDataFrame, target: str = "target", seasonal_period: int = 1, **kwargs
399
- ) -> None:
400
- self.num_items = data_past.num_items
401
-
402
486
  def _fast_cumsum(self, y: np.ndarray) -> np.ndarray:
403
- """Compute the cumulative sum for each consecutive `prediction_length` items in the array."""
404
- assert self.num_items is not None, "Make sure to call `save_past_metrics` before `compute_metric`"
405
- y = y.reshape(self.num_items, -1)
487
+ """Compute the cumulative sum for each consecutive `self.prediction_length` items in the array."""
488
+ y = y.reshape(-1, self.prediction_length)
406
489
  return np.nancumsum(y, axis=1).ravel()
407
490
 
408
491
  def compute_metric(
409
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
492
+ self,
493
+ data_future: TimeSeriesDataFrame,
494
+ predictions: TimeSeriesDataFrame,
495
+ target: str = "target",
496
+ **kwargs,
410
497
  ) -> float:
411
498
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
412
499
  cumsum_true = self._fast_cumsum(y_true.to_numpy())
413
500
  cumsum_pred = self._fast_cumsum(y_pred.to_numpy())
414
501
  diffs = cumsum_pred - cumsum_true
415
- error = diffs * np.where(diffs < 0, -self.alpha, (1 - self.alpha))
416
- return 2 * self._safemean(error)
502
+ errors = (diffs * np.where(diffs < 0, -self.alpha, (1 - self.alpha))).reshape([-1, self.prediction_length])
503
+ if self.horizon_weight is not None:
504
+ errors *= self.horizon_weight
505
+ return 2 * self._safemean(errors)