autogluon.timeseries 1.2.1b20250224__py3-none-any.whl → 1.4.1b20251215__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of autogluon.timeseries might be problematic. Click here for more details.
- autogluon/timeseries/configs/__init__.py +3 -2
- autogluon/timeseries/configs/hyperparameter_presets.py +62 -0
- autogluon/timeseries/configs/predictor_presets.py +106 -0
- autogluon/timeseries/dataset/ts_dataframe.py +256 -141
- autogluon/timeseries/learner.py +86 -52
- autogluon/timeseries/metrics/__init__.py +42 -8
- autogluon/timeseries/metrics/abstract.py +89 -19
- autogluon/timeseries/metrics/point.py +142 -53
- autogluon/timeseries/metrics/quantile.py +46 -21
- autogluon/timeseries/metrics/utils.py +4 -4
- autogluon/timeseries/models/__init__.py +8 -2
- autogluon/timeseries/models/abstract/__init__.py +2 -2
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py +361 -592
- autogluon/timeseries/models/abstract/model_trial.py +2 -1
- autogluon/timeseries/models/abstract/tunable.py +189 -0
- autogluon/timeseries/models/autogluon_tabular/__init__.py +2 -0
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py +282 -194
- autogluon/timeseries/models/autogluon_tabular/per_step.py +513 -0
- autogluon/timeseries/models/autogluon_tabular/transforms.py +25 -18
- autogluon/timeseries/models/chronos/__init__.py +2 -1
- autogluon/timeseries/models/chronos/chronos2.py +361 -0
- autogluon/timeseries/models/chronos/model.py +219 -138
- autogluon/timeseries/models/chronos/{pipeline/utils.py → utils.py} +81 -50
- autogluon/timeseries/models/ensemble/__init__.py +37 -2
- autogluon/timeseries/models/ensemble/abstract.py +107 -0
- autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
- autogluon/timeseries/models/ensemble/array_based/abstract.py +240 -0
- autogluon/timeseries/models/ensemble/array_based/models.py +185 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +12 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +88 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py +186 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +94 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +107 -0
- autogluon/timeseries/models/ensemble/ensemble_selection.py +167 -0
- autogluon/timeseries/models/ensemble/per_item_greedy.py +172 -0
- autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
- autogluon/timeseries/models/ensemble/weighted/abstract.py +45 -0
- autogluon/timeseries/models/ensemble/weighted/basic.py +91 -0
- autogluon/timeseries/models/ensemble/weighted/greedy.py +62 -0
- autogluon/timeseries/models/gluonts/__init__.py +1 -1
- autogluon/timeseries/models/gluonts/{abstract_gluonts.py → abstract.py} +148 -208
- autogluon/timeseries/models/gluonts/dataset.py +109 -0
- autogluon/timeseries/models/gluonts/{torch/models.py → models.py} +38 -22
- autogluon/timeseries/models/local/__init__.py +0 -7
- autogluon/timeseries/models/local/abstract_local_model.py +71 -74
- autogluon/timeseries/models/local/naive.py +13 -9
- autogluon/timeseries/models/local/npts.py +9 -2
- autogluon/timeseries/models/local/statsforecast.py +52 -36
- autogluon/timeseries/models/multi_window/multi_window_model.py +65 -45
- autogluon/timeseries/models/registry.py +64 -0
- autogluon/timeseries/models/toto/__init__.py +3 -0
- autogluon/timeseries/models/toto/_internal/__init__.py +9 -0
- autogluon/timeseries/models/toto/_internal/backbone/__init__.py +3 -0
- autogluon/timeseries/models/toto/_internal/backbone/attention.py +196 -0
- autogluon/timeseries/models/toto/_internal/backbone/backbone.py +262 -0
- autogluon/timeseries/models/toto/_internal/backbone/distribution.py +70 -0
- autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +136 -0
- autogluon/timeseries/models/toto/_internal/backbone/rope.py +89 -0
- autogluon/timeseries/models/toto/_internal/backbone/rotary_embedding_torch.py +342 -0
- autogluon/timeseries/models/toto/_internal/backbone/scaler.py +305 -0
- autogluon/timeseries/models/toto/_internal/backbone/transformer.py +333 -0
- autogluon/timeseries/models/toto/_internal/dataset.py +165 -0
- autogluon/timeseries/models/toto/_internal/forecaster.py +423 -0
- autogluon/timeseries/models/toto/dataloader.py +108 -0
- autogluon/timeseries/models/toto/hf_pretrained_model.py +200 -0
- autogluon/timeseries/models/toto/model.py +249 -0
- autogluon/timeseries/predictor.py +685 -297
- autogluon/timeseries/regressor.py +94 -44
- autogluon/timeseries/splitter.py +8 -32
- autogluon/timeseries/trainer/__init__.py +3 -0
- autogluon/timeseries/trainer/ensemble_composer.py +444 -0
- autogluon/timeseries/trainer/model_set_builder.py +256 -0
- autogluon/timeseries/trainer/prediction_cache.py +149 -0
- autogluon/timeseries/{trainer.py → trainer/trainer.py} +387 -390
- autogluon/timeseries/trainer/utils.py +17 -0
- autogluon/timeseries/transforms/__init__.py +2 -13
- autogluon/timeseries/transforms/covariate_scaler.py +34 -40
- autogluon/timeseries/transforms/target_scaler.py +37 -20
- autogluon/timeseries/utils/constants.py +10 -0
- autogluon/timeseries/utils/datetime/lags.py +3 -5
- autogluon/timeseries/utils/datetime/seasonality.py +1 -3
- autogluon/timeseries/utils/datetime/time_features.py +2 -2
- autogluon/timeseries/utils/features.py +70 -47
- autogluon/timeseries/utils/forecast.py +19 -14
- autogluon/timeseries/utils/timer.py +173 -0
- autogluon/timeseries/utils/warning_filters.py +4 -2
- autogluon/timeseries/version.py +1 -1
- autogluon.timeseries-1.4.1b20251215-py3.11-nspkg.pth +1 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/METADATA +49 -36
- autogluon_timeseries-1.4.1b20251215.dist-info/RECORD +103 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/WHEEL +1 -1
- autogluon/timeseries/configs/presets_configs.py +0 -79
- autogluon/timeseries/evaluator.py +0 -6
- autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -11
- autogluon/timeseries/models/chronos/pipeline/base.py +0 -160
- autogluon/timeseries/models/chronos/pipeline/chronos.py +0 -585
- autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -518
- autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py +0 -78
- autogluon/timeseries/models/ensemble/greedy_ensemble.py +0 -170
- autogluon/timeseries/models/gluonts/torch/__init__.py +0 -0
- autogluon/timeseries/models/presets.py +0 -360
- autogluon.timeseries-1.2.1b20250224-py3.9-nspkg.pth +0 -1
- autogluon.timeseries-1.2.1b20250224.dist-info/RECORD +0 -68
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info/licenses}/LICENSE +0 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info/licenses}/NOTICE +0 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/namespace_packages.txt +0 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/top_level.txt +0 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/zip-safe +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import warnings
|
|
3
|
-
from typing import
|
|
3
|
+
from typing import Sequence
|
|
4
4
|
|
|
5
5
|
import numpy as np
|
|
6
6
|
import pandas as pd
|
|
@@ -38,10 +38,18 @@ class RMSE(TimeSeriesScorer):
|
|
|
38
38
|
equivalent_tabular_regression_metric = "root_mean_squared_error"
|
|
39
39
|
|
|
40
40
|
def compute_metric(
|
|
41
|
-
self,
|
|
41
|
+
self,
|
|
42
|
+
data_future: TimeSeriesDataFrame,
|
|
43
|
+
predictions: TimeSeriesDataFrame,
|
|
44
|
+
target: str = "target",
|
|
45
|
+
**kwargs,
|
|
42
46
|
) -> float:
|
|
43
47
|
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
44
|
-
|
|
48
|
+
y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
|
|
49
|
+
errors = ((y_true - y_pred) ** 2).reshape([-1, self.prediction_length])
|
|
50
|
+
if self.horizon_weight is not None:
|
|
51
|
+
errors *= self.horizon_weight
|
|
52
|
+
return np.sqrt(self._safemean(errors))
|
|
45
53
|
|
|
46
54
|
|
|
47
55
|
class MSE(TimeSeriesScorer):
|
|
@@ -69,10 +77,18 @@ class MSE(TimeSeriesScorer):
|
|
|
69
77
|
equivalent_tabular_regression_metric = "mean_squared_error"
|
|
70
78
|
|
|
71
79
|
def compute_metric(
|
|
72
|
-
self,
|
|
80
|
+
self,
|
|
81
|
+
data_future: TimeSeriesDataFrame,
|
|
82
|
+
predictions: TimeSeriesDataFrame,
|
|
83
|
+
target: str = "target",
|
|
84
|
+
**kwargs,
|
|
73
85
|
) -> float:
|
|
74
86
|
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
75
|
-
|
|
87
|
+
y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
|
|
88
|
+
errors = ((y_true - y_pred) ** 2).reshape([-1, self.prediction_length])
|
|
89
|
+
if self.horizon_weight is not None:
|
|
90
|
+
errors *= self.horizon_weight
|
|
91
|
+
return self._safemean(errors)
|
|
76
92
|
|
|
77
93
|
|
|
78
94
|
class MAE(TimeSeriesScorer):
|
|
@@ -98,10 +114,18 @@ class MAE(TimeSeriesScorer):
|
|
|
98
114
|
equivalent_tabular_regression_metric = "mean_absolute_error"
|
|
99
115
|
|
|
100
116
|
def compute_metric(
|
|
101
|
-
self,
|
|
117
|
+
self,
|
|
118
|
+
data_future: TimeSeriesDataFrame,
|
|
119
|
+
predictions: TimeSeriesDataFrame,
|
|
120
|
+
target: str = "target",
|
|
121
|
+
**kwargs,
|
|
102
122
|
) -> float:
|
|
103
123
|
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
104
|
-
|
|
124
|
+
y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
|
|
125
|
+
errors = np.abs(y_true - y_pred).reshape([-1, self.prediction_length])
|
|
126
|
+
if self.horizon_weight is not None:
|
|
127
|
+
errors *= self.horizon_weight
|
|
128
|
+
return self._safemean(errors)
|
|
105
129
|
|
|
106
130
|
|
|
107
131
|
class WAPE(TimeSeriesScorer):
|
|
@@ -119,6 +143,7 @@ class WAPE(TimeSeriesScorer):
|
|
|
119
143
|
- not sensitive to outliers
|
|
120
144
|
- prefers models that accurately estimate the median
|
|
121
145
|
|
|
146
|
+
If ``self.horizon_weight`` is provided, both the errors and the target time series in the denominator will be re-weighted.
|
|
122
147
|
|
|
123
148
|
References
|
|
124
149
|
----------
|
|
@@ -129,10 +154,19 @@ class WAPE(TimeSeriesScorer):
|
|
|
129
154
|
equivalent_tabular_regression_metric = "mean_absolute_error"
|
|
130
155
|
|
|
131
156
|
def compute_metric(
|
|
132
|
-
self,
|
|
157
|
+
self,
|
|
158
|
+
data_future: TimeSeriesDataFrame,
|
|
159
|
+
predictions: TimeSeriesDataFrame,
|
|
160
|
+
target: str = "target",
|
|
161
|
+
**kwargs,
|
|
133
162
|
) -> float:
|
|
134
163
|
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
135
|
-
|
|
164
|
+
y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
|
|
165
|
+
errors = np.abs(y_true - y_pred).reshape([-1, self.prediction_length])
|
|
166
|
+
if self.horizon_weight is not None:
|
|
167
|
+
errors *= self.horizon_weight
|
|
168
|
+
y_true = y_true.reshape([-1, self.prediction_length]) * self.horizon_weight
|
|
169
|
+
return np.nansum(errors) / np.nansum(np.abs(y_true))
|
|
136
170
|
|
|
137
171
|
|
|
138
172
|
class SMAPE(TimeSeriesScorer):
|
|
@@ -158,10 +192,18 @@ class SMAPE(TimeSeriesScorer):
|
|
|
158
192
|
equivalent_tabular_regression_metric = "symmetric_mean_absolute_percentage_error"
|
|
159
193
|
|
|
160
194
|
def compute_metric(
|
|
161
|
-
self,
|
|
195
|
+
self,
|
|
196
|
+
data_future: TimeSeriesDataFrame,
|
|
197
|
+
predictions: TimeSeriesDataFrame,
|
|
198
|
+
target: str = "target",
|
|
199
|
+
**kwargs,
|
|
162
200
|
) -> float:
|
|
163
201
|
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
164
|
-
|
|
202
|
+
y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
|
|
203
|
+
errors = (np.abs(y_true - y_pred) / (np.abs(y_true) + np.abs(y_pred))).reshape([-1, self.prediction_length])
|
|
204
|
+
if self.horizon_weight is not None:
|
|
205
|
+
errors *= self.horizon_weight
|
|
206
|
+
return 2 * self._safemean(errors)
|
|
165
207
|
|
|
166
208
|
|
|
167
209
|
class MAPE(TimeSeriesScorer):
|
|
@@ -187,22 +229,30 @@ class MAPE(TimeSeriesScorer):
|
|
|
187
229
|
equivalent_tabular_regression_metric = "mean_absolute_percentage_error"
|
|
188
230
|
|
|
189
231
|
def compute_metric(
|
|
190
|
-
self,
|
|
232
|
+
self,
|
|
233
|
+
data_future: TimeSeriesDataFrame,
|
|
234
|
+
predictions: TimeSeriesDataFrame,
|
|
235
|
+
target: str = "target",
|
|
236
|
+
**kwargs,
|
|
191
237
|
) -> float:
|
|
192
238
|
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
193
|
-
|
|
239
|
+
y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
|
|
240
|
+
errors = (np.abs(y_true - y_pred) / np.abs(y_true)).reshape([-1, self.prediction_length])
|
|
241
|
+
if self.horizon_weight is not None:
|
|
242
|
+
errors *= self.horizon_weight
|
|
243
|
+
return self._safemean(errors)
|
|
194
244
|
|
|
195
245
|
|
|
196
246
|
class MASE(TimeSeriesScorer):
|
|
197
247
|
r"""Mean absolute scaled error.
|
|
198
248
|
|
|
199
|
-
Normalizes the absolute error for each time series by the
|
|
249
|
+
Normalizes the absolute error for each time series by the historical seasonal error of this time series.
|
|
200
250
|
|
|
201
251
|
.. math::
|
|
202
252
|
|
|
203
253
|
\operatorname{MASE} = \frac{1}{N} \frac{1}{H} \sum_{i=1}^{N} \frac{1}{a_i} \sum_{t=T+1}^{T+H} |y_{i,t} - f_{i,t}|
|
|
204
254
|
|
|
205
|
-
where :math:`a_i` is the
|
|
255
|
+
where :math:`a_i` is the historical absolute seasonal error defined as
|
|
206
256
|
|
|
207
257
|
.. math::
|
|
208
258
|
|
|
@@ -226,8 +276,16 @@ class MASE(TimeSeriesScorer):
|
|
|
226
276
|
optimized_by_median = True
|
|
227
277
|
equivalent_tabular_regression_metric = "mean_absolute_error"
|
|
228
278
|
|
|
229
|
-
def __init__(
|
|
230
|
-
self
|
|
279
|
+
def __init__(
|
|
280
|
+
self,
|
|
281
|
+
prediction_length: int = 1,
|
|
282
|
+
seasonal_period: int | None = None,
|
|
283
|
+
horizon_weight: Sequence[float] | None = None,
|
|
284
|
+
):
|
|
285
|
+
super().__init__(
|
|
286
|
+
prediction_length=prediction_length, seasonal_period=seasonal_period, horizon_weight=horizon_weight
|
|
287
|
+
)
|
|
288
|
+
self._past_abs_seasonal_error: pd.Series | None = None
|
|
231
289
|
|
|
232
290
|
def save_past_metrics(
|
|
233
291
|
self, data_past: TimeSeriesDataFrame, target: str = "target", seasonal_period: int = 1, **kwargs
|
|
@@ -240,28 +298,34 @@ class MASE(TimeSeriesScorer):
|
|
|
240
298
|
self._past_abs_seasonal_error = None
|
|
241
299
|
|
|
242
300
|
def compute_metric(
|
|
243
|
-
self,
|
|
301
|
+
self,
|
|
302
|
+
data_future: TimeSeriesDataFrame,
|
|
303
|
+
predictions: TimeSeriesDataFrame,
|
|
304
|
+
target: str = "target",
|
|
305
|
+
**kwargs,
|
|
244
306
|
) -> float:
|
|
245
|
-
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
246
307
|
if self._past_abs_seasonal_error is None:
|
|
247
308
|
raise AssertionError("Call `save_past_metrics` before `compute_metric`")
|
|
248
309
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
310
|
+
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
311
|
+
y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
|
|
312
|
+
|
|
313
|
+
errors = np.abs(y_true - y_pred).reshape([-1, self.prediction_length])
|
|
314
|
+
if self.horizon_weight is not None:
|
|
315
|
+
errors *= self.horizon_weight
|
|
316
|
+
return self._safemean(errors / self._past_abs_seasonal_error.to_numpy()[:, None])
|
|
253
317
|
|
|
254
318
|
|
|
255
319
|
class RMSSE(TimeSeriesScorer):
|
|
256
320
|
r"""Root mean squared scaled error.
|
|
257
321
|
|
|
258
|
-
Normalizes the absolute error for each time series by the
|
|
322
|
+
Normalizes the absolute error for each time series by the historical seasonal error of this time series.
|
|
259
323
|
|
|
260
324
|
.. math::
|
|
261
325
|
|
|
262
326
|
\operatorname{RMSSE} = \sqrt{\frac{1}{N} \frac{1}{H} \sum_{i=1}^{N} \frac{1}{s_i} \sum_{t=T+1}^{T+H} (y_{i,t} - f_{i,t})^2}
|
|
263
327
|
|
|
264
|
-
where :math:`s_i` is the
|
|
328
|
+
where :math:`s_i` is the historical squared seasonal error defined as
|
|
265
329
|
|
|
266
330
|
.. math::
|
|
267
331
|
|
|
@@ -286,8 +350,16 @@ class RMSSE(TimeSeriesScorer):
|
|
|
286
350
|
|
|
287
351
|
equivalent_tabular_regression_metric = "root_mean_squared_error"
|
|
288
352
|
|
|
289
|
-
def __init__(
|
|
290
|
-
self
|
|
353
|
+
def __init__(
|
|
354
|
+
self,
|
|
355
|
+
prediction_length: int = 1,
|
|
356
|
+
seasonal_period: int | None = None,
|
|
357
|
+
horizon_weight: Sequence[float] | None = None,
|
|
358
|
+
):
|
|
359
|
+
super().__init__(
|
|
360
|
+
prediction_length=prediction_length, seasonal_period=seasonal_period, horizon_weight=horizon_weight
|
|
361
|
+
)
|
|
362
|
+
self._past_squared_seasonal_error: pd.Series | None = None
|
|
291
363
|
|
|
292
364
|
def save_past_metrics(
|
|
293
365
|
self, data_past: TimeSeriesDataFrame, target: str = "target", seasonal_period: int = 1, **kwargs
|
|
@@ -300,16 +372,21 @@ class RMSSE(TimeSeriesScorer):
|
|
|
300
372
|
self._past_squared_seasonal_error = None
|
|
301
373
|
|
|
302
374
|
def compute_metric(
|
|
303
|
-
self,
|
|
375
|
+
self,
|
|
376
|
+
data_future: TimeSeriesDataFrame,
|
|
377
|
+
predictions: TimeSeriesDataFrame,
|
|
378
|
+
target: str = "target",
|
|
379
|
+
**kwargs,
|
|
304
380
|
) -> float:
|
|
305
|
-
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
306
381
|
if self._past_squared_seasonal_error is None:
|
|
307
382
|
raise AssertionError("Call `save_past_metrics` before `compute_metric`")
|
|
308
383
|
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
384
|
+
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
385
|
+
y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
|
|
386
|
+
errors = ((y_true - y_pred) ** 2).reshape([-1, self.prediction_length])
|
|
387
|
+
if self.horizon_weight is not None:
|
|
388
|
+
errors *= self.horizon_weight
|
|
389
|
+
return np.sqrt(self._safemean(errors / self._past_squared_seasonal_error.to_numpy()[:, None]))
|
|
313
390
|
|
|
314
391
|
|
|
315
392
|
class RMSLE(TimeSeriesScorer):
|
|
@@ -336,20 +413,26 @@ class RMSLE(TimeSeriesScorer):
|
|
|
336
413
|
"""
|
|
337
414
|
|
|
338
415
|
def compute_metric(
|
|
339
|
-
self,
|
|
416
|
+
self,
|
|
417
|
+
data_future: TimeSeriesDataFrame,
|
|
418
|
+
predictions: TimeSeriesDataFrame,
|
|
419
|
+
target: str = "target",
|
|
420
|
+
**kwargs,
|
|
340
421
|
) -> float:
|
|
341
422
|
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
423
|
+
y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
|
|
342
424
|
y_pred = np.clip(y_pred, a_min=0.0, a_max=None)
|
|
343
425
|
|
|
344
|
-
|
|
426
|
+
errors = np.power(np.log1p(y_pred) - np.log1p(y_true), 2).reshape([-1, self.prediction_length])
|
|
427
|
+
if self.horizon_weight is not None:
|
|
428
|
+
errors *= self.horizon_weight
|
|
429
|
+
return np.sqrt(self._safemean(errors))
|
|
345
430
|
|
|
346
431
|
def __call__(
|
|
347
432
|
self,
|
|
348
433
|
data: TimeSeriesDataFrame,
|
|
349
434
|
predictions: TimeSeriesDataFrame,
|
|
350
|
-
prediction_length: int = 1,
|
|
351
435
|
target: str = "target",
|
|
352
|
-
seasonal_period: Optional[int] = None,
|
|
353
436
|
**kwargs,
|
|
354
437
|
) -> float:
|
|
355
438
|
if (data[target] < 0).any():
|
|
@@ -357,9 +440,7 @@ class RMSLE(TimeSeriesScorer):
|
|
|
357
440
|
return super().__call__(
|
|
358
441
|
data=data,
|
|
359
442
|
predictions=predictions,
|
|
360
|
-
prediction_length=prediction_length,
|
|
361
443
|
target=target,
|
|
362
|
-
seasonal_period=seasonal_period,
|
|
363
444
|
**kwargs,
|
|
364
445
|
)
|
|
365
446
|
|
|
@@ -382,35 +463,43 @@ class WCD(TimeSeriesScorer):
|
|
|
382
463
|
Parameters
|
|
383
464
|
----------
|
|
384
465
|
alpha : float, default = 0.5
|
|
385
|
-
Values > 0.5
|
|
466
|
+
Values > 0.5 put a stronger penalty on underpredictions (when cumulative forecast is below the
|
|
386
467
|
cumulative actual value). Values < 0.5 put a stronger penalty on overpredictions.
|
|
387
468
|
"""
|
|
388
469
|
|
|
389
|
-
def __init__(
|
|
470
|
+
def __init__(
|
|
471
|
+
self,
|
|
472
|
+
alpha: float = 0.5,
|
|
473
|
+
prediction_length: int = 1,
|
|
474
|
+
seasonal_period: int | None = None,
|
|
475
|
+
horizon_weight: Sequence[float] | None = None,
|
|
476
|
+
):
|
|
477
|
+
super().__init__(
|
|
478
|
+
prediction_length=prediction_length, seasonal_period=seasonal_period, horizon_weight=horizon_weight
|
|
479
|
+
)
|
|
390
480
|
assert 0 < alpha < 1, "alpha must be in (0, 1)"
|
|
391
481
|
self.alpha = alpha
|
|
392
|
-
self.num_items: Optional[int] = None
|
|
393
482
|
warnings.warn(
|
|
394
483
|
f"{self.name} is an experimental metric. Its behavior may change in the future version of AutoGluon."
|
|
395
484
|
)
|
|
396
485
|
|
|
397
|
-
def save_past_metrics(
|
|
398
|
-
self, data_past: TimeSeriesDataFrame, target: str = "target", seasonal_period: int = 1, **kwargs
|
|
399
|
-
) -> None:
|
|
400
|
-
self.num_items = data_past.num_items
|
|
401
|
-
|
|
402
486
|
def _fast_cumsum(self, y: np.ndarray) -> np.ndarray:
|
|
403
|
-
"""Compute the cumulative sum for each consecutive `prediction_length` items in the array."""
|
|
404
|
-
|
|
405
|
-
y = y.reshape(self.num_items, -1)
|
|
487
|
+
"""Compute the cumulative sum for each consecutive `self.prediction_length` items in the array."""
|
|
488
|
+
y = y.reshape(-1, self.prediction_length)
|
|
406
489
|
return np.nancumsum(y, axis=1).ravel()
|
|
407
490
|
|
|
408
491
|
def compute_metric(
|
|
409
|
-
self,
|
|
492
|
+
self,
|
|
493
|
+
data_future: TimeSeriesDataFrame,
|
|
494
|
+
predictions: TimeSeriesDataFrame,
|
|
495
|
+
target: str = "target",
|
|
496
|
+
**kwargs,
|
|
410
497
|
) -> float:
|
|
411
498
|
y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
|
|
412
499
|
cumsum_true = self._fast_cumsum(y_true.to_numpy())
|
|
413
500
|
cumsum_pred = self._fast_cumsum(y_pred.to_numpy())
|
|
414
501
|
diffs = cumsum_pred - cumsum_true
|
|
415
|
-
|
|
416
|
-
|
|
502
|
+
errors = (diffs * np.where(diffs < 0, -self.alpha, (1 - self.alpha))).reshape([-1, self.prediction_length])
|
|
503
|
+
if self.horizon_weight is not None:
|
|
504
|
+
errors *= self.horizon_weight
|
|
505
|
+
return 2 * self._safemean(errors)
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
from typing import
|
|
1
|
+
from typing import Sequence
|
|
2
2
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
import pandas as pd
|
|
5
5
|
|
|
6
|
-
from autogluon.timeseries.dataset
|
|
6
|
+
from autogluon.timeseries.dataset import TimeSeriesDataFrame
|
|
7
7
|
|
|
8
8
|
from .abstract import TimeSeriesScorer
|
|
9
9
|
from .utils import in_sample_abs_seasonal_error
|
|
@@ -25,6 +25,7 @@ class WQL(TimeSeriesScorer):
|
|
|
25
25
|
- scale-dependent (time series with large absolute value contribute more to the loss)
|
|
26
26
|
- equivalent to WAPE if ``quantile_levels = [0.5]``
|
|
27
27
|
|
|
28
|
+
If ``horizon_weight`` is provided, both the errors and the target time series in the denominator will be re-weighted.
|
|
28
29
|
|
|
29
30
|
References
|
|
30
31
|
----------
|
|
@@ -34,16 +35,25 @@ class WQL(TimeSeriesScorer):
|
|
|
34
35
|
needs_quantile = True
|
|
35
36
|
|
|
36
37
|
def compute_metric(
|
|
37
|
-
self,
|
|
38
|
+
self,
|
|
39
|
+
data_future: TimeSeriesDataFrame,
|
|
40
|
+
predictions: TimeSeriesDataFrame,
|
|
41
|
+
target: str = "target",
|
|
42
|
+
**kwargs,
|
|
38
43
|
) -> float:
|
|
39
44
|
y_true, q_pred, quantile_levels = self._get_quantile_forecast_score_inputs(data_future, predictions, target)
|
|
40
|
-
|
|
41
|
-
|
|
45
|
+
y_true = y_true.to_numpy()[:, None] # shape [N, 1]
|
|
46
|
+
q_pred = q_pred.to_numpy() # shape [N, len(quantile_levels)]
|
|
42
47
|
|
|
43
|
-
|
|
44
|
-
np.
|
|
45
|
-
|
|
48
|
+
errors = (
|
|
49
|
+
np.abs((q_pred - y_true) * ((y_true <= q_pred) - quantile_levels))
|
|
50
|
+
.mean(axis=1)
|
|
51
|
+
.reshape([-1, self.prediction_length])
|
|
46
52
|
)
|
|
53
|
+
if self.horizon_weight is not None:
|
|
54
|
+
errors *= self.horizon_weight
|
|
55
|
+
y_true = y_true.reshape([-1, self.prediction_length]) * self.horizon_weight
|
|
56
|
+
return 2 * np.nansum(errors) / np.nansum(np.abs(y_true))
|
|
47
57
|
|
|
48
58
|
|
|
49
59
|
class SQL(TimeSeriesScorer):
|
|
@@ -51,13 +61,13 @@ class SQL(TimeSeriesScorer):
|
|
|
51
61
|
|
|
52
62
|
Also known as scaled pinball loss.
|
|
53
63
|
|
|
54
|
-
Normalizes the quantile loss for each time series by the
|
|
64
|
+
Normalizes the quantile loss for each time series by the historical seasonal error of this time series.
|
|
55
65
|
|
|
56
66
|
.. math::
|
|
57
67
|
|
|
58
68
|
\operatorname{SQL} = \frac{1}{N} \frac{1}{H} \sum_{i=1}^{N} \frac{1}{a_i} \sum_{t=T+1}^{T+H} \sum_{q} \rho_q(y_{i,t}, f^q_{i,t})
|
|
59
69
|
|
|
60
|
-
where :math:`a_i` is the
|
|
70
|
+
where :math:`a_i` is the historical absolute seasonal error defined as
|
|
61
71
|
|
|
62
72
|
.. math::
|
|
63
73
|
|
|
@@ -79,8 +89,16 @@ class SQL(TimeSeriesScorer):
|
|
|
79
89
|
|
|
80
90
|
needs_quantile = True
|
|
81
91
|
|
|
82
|
-
def __init__(
|
|
83
|
-
self
|
|
92
|
+
def __init__(
|
|
93
|
+
self,
|
|
94
|
+
prediction_length: int = 1,
|
|
95
|
+
seasonal_period: int | None = None,
|
|
96
|
+
horizon_weight: Sequence[float] | None = None,
|
|
97
|
+
):
|
|
98
|
+
super().__init__(
|
|
99
|
+
prediction_length=prediction_length, seasonal_period=seasonal_period, horizon_weight=horizon_weight
|
|
100
|
+
)
|
|
101
|
+
self._past_abs_seasonal_error: pd.Series | None = None
|
|
84
102
|
|
|
85
103
|
def save_past_metrics(
|
|
86
104
|
self, data_past: TimeSeriesDataFrame, target: str = "target", seasonal_period: int = 1, **kwargs
|
|
@@ -93,17 +111,24 @@ class SQL(TimeSeriesScorer):
|
|
|
93
111
|
self._past_abs_seasonal_error = None
|
|
94
112
|
|
|
95
113
|
def compute_metric(
|
|
96
|
-
self,
|
|
114
|
+
self,
|
|
115
|
+
data_future: TimeSeriesDataFrame,
|
|
116
|
+
predictions: TimeSeriesDataFrame,
|
|
117
|
+
target: str = "target",
|
|
118
|
+
**kwargs,
|
|
97
119
|
) -> float:
|
|
98
120
|
if self._past_abs_seasonal_error is None:
|
|
99
121
|
raise AssertionError("Call `save_past_metrics` before `compute_metric`")
|
|
100
122
|
|
|
101
123
|
y_true, q_pred, quantile_levels = self._get_quantile_forecast_score_inputs(data_future, predictions, target)
|
|
102
|
-
q_pred = q_pred.
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
124
|
+
q_pred = q_pred.to_numpy()
|
|
125
|
+
y_true = y_true.to_numpy()[:, None] # shape [N, 1]
|
|
126
|
+
|
|
127
|
+
errors = (
|
|
128
|
+
np.abs((q_pred - y_true) * ((y_true <= q_pred) - quantile_levels))
|
|
129
|
+
.mean(axis=1)
|
|
130
|
+
.reshape([-1, self.prediction_length])
|
|
131
|
+
)
|
|
132
|
+
if self.horizon_weight is not None:
|
|
133
|
+
errors *= self.horizon_weight
|
|
134
|
+
return 2 * self._safemean(errors / self._past_abs_seasonal_error.to_numpy()[:, None])
|
|
@@ -1,18 +1,18 @@
|
|
|
1
1
|
import pandas as pd
|
|
2
2
|
|
|
3
|
-
from autogluon.timeseries.dataset
|
|
3
|
+
from autogluon.timeseries.dataset import TimeSeriesDataFrame
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
def _get_seasonal_diffs(*, y_past: pd.Series, seasonal_period: int = 1) -> pd.Series:
|
|
7
|
-
return y_past.groupby(level=ITEMID, sort=False).diff(seasonal_period).abs()
|
|
7
|
+
return y_past.groupby(level=TimeSeriesDataFrame.ITEMID, sort=False).diff(seasonal_period).abs()
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
def in_sample_abs_seasonal_error(*, y_past: pd.Series, seasonal_period: int = 1) -> pd.Series:
|
|
11
11
|
"""Compute seasonal naive forecast error (predict value from seasonal_period steps ago) for each time series."""
|
|
12
12
|
seasonal_diffs = _get_seasonal_diffs(y_past=y_past, seasonal_period=seasonal_period)
|
|
13
|
-
return seasonal_diffs.groupby(level=ITEMID, sort=False).mean().fillna(1.0)
|
|
13
|
+
return seasonal_diffs.groupby(level=TimeSeriesDataFrame.ITEMID, sort=False).mean().fillna(1.0)
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
def in_sample_squared_seasonal_error(*, y_past: pd.Series, seasonal_period: int = 1) -> pd.Series:
|
|
17
17
|
seasonal_diffs = _get_seasonal_diffs(y_past=y_past, seasonal_period=seasonal_period)
|
|
18
|
-
return seasonal_diffs.pow(2.0).groupby(level=ITEMID, sort=False).mean().fillna(1.0)
|
|
18
|
+
return seasonal_diffs.pow(2.0).groupby(level=TimeSeriesDataFrame.ITEMID, sort=False).mean().fillna(1.0)
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
from .autogluon_tabular import DirectTabularModel, RecursiveTabularModel
|
|
2
|
-
from .chronos import ChronosModel
|
|
1
|
+
from .autogluon_tabular import DirectTabularModel, PerStepTabularModel, RecursiveTabularModel
|
|
2
|
+
from .chronos import Chronos2Model, ChronosModel
|
|
3
3
|
from .gluonts import (
|
|
4
4
|
DeepARModel,
|
|
5
5
|
DLinearModel,
|
|
@@ -27,6 +27,8 @@ from .local import (
|
|
|
27
27
|
ThetaModel,
|
|
28
28
|
ZeroModel,
|
|
29
29
|
)
|
|
30
|
+
from .registry import ModelRegistry
|
|
31
|
+
from .toto import TotoModel
|
|
30
32
|
|
|
31
33
|
__all__ = [
|
|
32
34
|
"ADIDAModel",
|
|
@@ -43,9 +45,12 @@ __all__ = [
|
|
|
43
45
|
"ETSModel",
|
|
44
46
|
"IMAPAModel",
|
|
45
47
|
"ChronosModel",
|
|
48
|
+
"Chronos2Model",
|
|
49
|
+
"ModelRegistry",
|
|
46
50
|
"NPTSModel",
|
|
47
51
|
"NaiveModel",
|
|
48
52
|
"PatchTSTModel",
|
|
53
|
+
"PerStepTabularModel",
|
|
49
54
|
"RecursiveTabularModel",
|
|
50
55
|
"SeasonalAverageModel",
|
|
51
56
|
"SeasonalNaiveModel",
|
|
@@ -53,6 +58,7 @@ __all__ = [
|
|
|
53
58
|
"TemporalFusionTransformerModel",
|
|
54
59
|
"ThetaModel",
|
|
55
60
|
"TiDEModel",
|
|
61
|
+
"TotoModel",
|
|
56
62
|
"WaveNetModel",
|
|
57
63
|
"ZeroModel",
|
|
58
64
|
]
|
|
@@ -1,3 +1,3 @@
|
|
|
1
|
-
from .abstract_timeseries_model import AbstractTimeSeriesModel
|
|
1
|
+
from .abstract_timeseries_model import AbstractTimeSeriesModel, TimeSeriesModelBase
|
|
2
2
|
|
|
3
|
-
__all__ = ["AbstractTimeSeriesModel"]
|
|
3
|
+
__all__ = ["AbstractTimeSeriesModel", "TimeSeriesModelBase"]
|