autogluon.timeseries 1.2.1b20250224__py3-none-any.whl → 1.4.1b20251215__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of autogluon.timeseries might be problematic. Click here for more details.
- autogluon/timeseries/configs/__init__.py +3 -2
- autogluon/timeseries/configs/hyperparameter_presets.py +62 -0
- autogluon/timeseries/configs/predictor_presets.py +106 -0
- autogluon/timeseries/dataset/ts_dataframe.py +256 -141
- autogluon/timeseries/learner.py +86 -52
- autogluon/timeseries/metrics/__init__.py +42 -8
- autogluon/timeseries/metrics/abstract.py +89 -19
- autogluon/timeseries/metrics/point.py +142 -53
- autogluon/timeseries/metrics/quantile.py +46 -21
- autogluon/timeseries/metrics/utils.py +4 -4
- autogluon/timeseries/models/__init__.py +8 -2
- autogluon/timeseries/models/abstract/__init__.py +2 -2
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py +361 -592
- autogluon/timeseries/models/abstract/model_trial.py +2 -1
- autogluon/timeseries/models/abstract/tunable.py +189 -0
- autogluon/timeseries/models/autogluon_tabular/__init__.py +2 -0
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py +282 -194
- autogluon/timeseries/models/autogluon_tabular/per_step.py +513 -0
- autogluon/timeseries/models/autogluon_tabular/transforms.py +25 -18
- autogluon/timeseries/models/chronos/__init__.py +2 -1
- autogluon/timeseries/models/chronos/chronos2.py +361 -0
- autogluon/timeseries/models/chronos/model.py +219 -138
- autogluon/timeseries/models/chronos/{pipeline/utils.py → utils.py} +81 -50
- autogluon/timeseries/models/ensemble/__init__.py +37 -2
- autogluon/timeseries/models/ensemble/abstract.py +107 -0
- autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
- autogluon/timeseries/models/ensemble/array_based/abstract.py +240 -0
- autogluon/timeseries/models/ensemble/array_based/models.py +185 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +12 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +88 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py +186 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +94 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +107 -0
- autogluon/timeseries/models/ensemble/ensemble_selection.py +167 -0
- autogluon/timeseries/models/ensemble/per_item_greedy.py +172 -0
- autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
- autogluon/timeseries/models/ensemble/weighted/abstract.py +45 -0
- autogluon/timeseries/models/ensemble/weighted/basic.py +91 -0
- autogluon/timeseries/models/ensemble/weighted/greedy.py +62 -0
- autogluon/timeseries/models/gluonts/__init__.py +1 -1
- autogluon/timeseries/models/gluonts/{abstract_gluonts.py → abstract.py} +148 -208
- autogluon/timeseries/models/gluonts/dataset.py +109 -0
- autogluon/timeseries/models/gluonts/{torch/models.py → models.py} +38 -22
- autogluon/timeseries/models/local/__init__.py +0 -7
- autogluon/timeseries/models/local/abstract_local_model.py +71 -74
- autogluon/timeseries/models/local/naive.py +13 -9
- autogluon/timeseries/models/local/npts.py +9 -2
- autogluon/timeseries/models/local/statsforecast.py +52 -36
- autogluon/timeseries/models/multi_window/multi_window_model.py +65 -45
- autogluon/timeseries/models/registry.py +64 -0
- autogluon/timeseries/models/toto/__init__.py +3 -0
- autogluon/timeseries/models/toto/_internal/__init__.py +9 -0
- autogluon/timeseries/models/toto/_internal/backbone/__init__.py +3 -0
- autogluon/timeseries/models/toto/_internal/backbone/attention.py +196 -0
- autogluon/timeseries/models/toto/_internal/backbone/backbone.py +262 -0
- autogluon/timeseries/models/toto/_internal/backbone/distribution.py +70 -0
- autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +136 -0
- autogluon/timeseries/models/toto/_internal/backbone/rope.py +89 -0
- autogluon/timeseries/models/toto/_internal/backbone/rotary_embedding_torch.py +342 -0
- autogluon/timeseries/models/toto/_internal/backbone/scaler.py +305 -0
- autogluon/timeseries/models/toto/_internal/backbone/transformer.py +333 -0
- autogluon/timeseries/models/toto/_internal/dataset.py +165 -0
- autogluon/timeseries/models/toto/_internal/forecaster.py +423 -0
- autogluon/timeseries/models/toto/dataloader.py +108 -0
- autogluon/timeseries/models/toto/hf_pretrained_model.py +200 -0
- autogluon/timeseries/models/toto/model.py +249 -0
- autogluon/timeseries/predictor.py +685 -297
- autogluon/timeseries/regressor.py +94 -44
- autogluon/timeseries/splitter.py +8 -32
- autogluon/timeseries/trainer/__init__.py +3 -0
- autogluon/timeseries/trainer/ensemble_composer.py +444 -0
- autogluon/timeseries/trainer/model_set_builder.py +256 -0
- autogluon/timeseries/trainer/prediction_cache.py +149 -0
- autogluon/timeseries/{trainer.py → trainer/trainer.py} +387 -390
- autogluon/timeseries/trainer/utils.py +17 -0
- autogluon/timeseries/transforms/__init__.py +2 -13
- autogluon/timeseries/transforms/covariate_scaler.py +34 -40
- autogluon/timeseries/transforms/target_scaler.py +37 -20
- autogluon/timeseries/utils/constants.py +10 -0
- autogluon/timeseries/utils/datetime/lags.py +3 -5
- autogluon/timeseries/utils/datetime/seasonality.py +1 -3
- autogluon/timeseries/utils/datetime/time_features.py +2 -2
- autogluon/timeseries/utils/features.py +70 -47
- autogluon/timeseries/utils/forecast.py +19 -14
- autogluon/timeseries/utils/timer.py +173 -0
- autogluon/timeseries/utils/warning_filters.py +4 -2
- autogluon/timeseries/version.py +1 -1
- autogluon.timeseries-1.4.1b20251215-py3.11-nspkg.pth +1 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/METADATA +49 -36
- autogluon_timeseries-1.4.1b20251215.dist-info/RECORD +103 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/WHEEL +1 -1
- autogluon/timeseries/configs/presets_configs.py +0 -79
- autogluon/timeseries/evaluator.py +0 -6
- autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -11
- autogluon/timeseries/models/chronos/pipeline/base.py +0 -160
- autogluon/timeseries/models/chronos/pipeline/chronos.py +0 -585
- autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -518
- autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py +0 -78
- autogluon/timeseries/models/ensemble/greedy_ensemble.py +0 -170
- autogluon/timeseries/models/gluonts/torch/__init__.py +0 -0
- autogluon/timeseries/models/presets.py +0 -360
- autogluon.timeseries-1.2.1b20250224-py3.9-nspkg.pth +0 -1
- autogluon.timeseries-1.2.1b20250224.dist-info/RECORD +0 -68
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info/licenses}/LICENSE +0 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info/licenses}/NOTICE +0 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/namespace_packages.txt +0 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/top_level.txt +0 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/zip-safe +0 -0
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
from abc import ABC
|
|
2
|
+
from typing import Any, Type
|
|
3
|
+
|
|
4
|
+
from autogluon.timeseries.dataset import TimeSeriesDataFrame
|
|
5
|
+
|
|
6
|
+
from .abstract import ArrayBasedTimeSeriesEnsembleModel
|
|
7
|
+
from .regressor import (
|
|
8
|
+
EnsembleRegressor,
|
|
9
|
+
LinearStackerEnsembleRegressor,
|
|
10
|
+
MedianEnsembleRegressor,
|
|
11
|
+
PerQuantileTabularEnsembleRegressor,
|
|
12
|
+
TabularEnsembleRegressor,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class MedianEnsemble(ArrayBasedTimeSeriesEnsembleModel):
|
|
17
|
+
"""Robust ensemble that computes predictions as the element-wise median of base model mean
|
|
18
|
+
and quantile forecasts, providing robustness to outlier predictions.
|
|
19
|
+
|
|
20
|
+
Other Parameters
|
|
21
|
+
----------------
|
|
22
|
+
isotonization : str, default = "sort"
|
|
23
|
+
The isotonization method to use (i.e. the algorithm to prevent quantile non-crossing).
|
|
24
|
+
Currently only "sort" is supported.
|
|
25
|
+
detect_and_ignore_failures : bool, default = True
|
|
26
|
+
Whether to detect and ignore "failed models", defined as models which have a loss that is larger
|
|
27
|
+
than 10x the median loss of all the models. This can be very important for the regression-based
|
|
28
|
+
ensembles, as moving the weight from such a "failed model" to zero can require a long training
|
|
29
|
+
time.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def _get_ensemble_regressor(self) -> MedianEnsembleRegressor:
|
|
33
|
+
return MedianEnsembleRegressor()
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class BaseTabularEnsemble(ArrayBasedTimeSeriesEnsembleModel, ABC):
|
|
37
|
+
ensemble_regressor_type: Type[EnsembleRegressor]
|
|
38
|
+
|
|
39
|
+
def _get_default_hyperparameters(self) -> dict[str, Any]:
|
|
40
|
+
default_hps = super()._get_default_hyperparameters()
|
|
41
|
+
default_hps.update({"model_name": "GBM", "model_hyperparameters": {}})
|
|
42
|
+
return default_hps
|
|
43
|
+
|
|
44
|
+
def _get_ensemble_regressor(self):
|
|
45
|
+
hyperparameters = self.get_hyperparameters()
|
|
46
|
+
return self.ensemble_regressor_type(
|
|
47
|
+
quantile_levels=list(self.quantile_levels),
|
|
48
|
+
model_name=hyperparameters["model_name"],
|
|
49
|
+
model_hyperparameters=hyperparameters["model_hyperparameters"],
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class TabularEnsemble(BaseTabularEnsemble):
|
|
54
|
+
"""Tabular ensemble that uses a single AutoGluon-Tabular model to learn ensemble combinations.
|
|
55
|
+
|
|
56
|
+
This ensemble trains a single tabular model (such as gradient boosting machines) to predict all
|
|
57
|
+
quantiles simultaneously from base model predictions. The tabular model learns complex non-linear
|
|
58
|
+
patterns in how base models should be combined, potentially capturing interactions and conditional
|
|
59
|
+
dependencies that simple weighted averages cannot represent.
|
|
60
|
+
|
|
61
|
+
Other Parameters
|
|
62
|
+
----------------
|
|
63
|
+
model_name : str, default = "GBM"
|
|
64
|
+
Name of the AutoGluon-Tabular model to use for ensemble learning. Model name should be registered
|
|
65
|
+
in AutoGluon-Tabular model registry.
|
|
66
|
+
model_hyperparameters : dict, default = {}
|
|
67
|
+
Hyperparameters to pass to the underlying AutoGluon-Tabular model.
|
|
68
|
+
isotonization : str, default = "sort"
|
|
69
|
+
The isotonization method to use (i.e. the algorithm to prevent quantile non-crossing).
|
|
70
|
+
Currently only "sort" is supported.
|
|
71
|
+
detect_and_ignore_failures : bool, default = True
|
|
72
|
+
Whether to detect and ignore "failed models", defined as models which have a loss that is larger
|
|
73
|
+
than 10x the median loss of all the models. This can be very important for the regression-based
|
|
74
|
+
ensembles, as moving the weight from such a "failed model" to zero can require a long training
|
|
75
|
+
time.
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
ensemble_regressor_type = TabularEnsembleRegressor
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class PerQuantileTabularEnsemble(BaseTabularEnsemble):
|
|
82
|
+
"""Tabular ensemble using separate AutoGluon-Tabular models for each quantile and mean forecast.
|
|
83
|
+
|
|
84
|
+
This ensemble trains dedicated tabular models for each quantile level plus a separate model
|
|
85
|
+
for the mean prediction. Each model specializes in learning optimal combinations for its
|
|
86
|
+
specific target, allowing for quantile-specific ensemble strategies that can capture different
|
|
87
|
+
model behaviors across the prediction distribution.
|
|
88
|
+
|
|
89
|
+
Other Parameters
|
|
90
|
+
----------------
|
|
91
|
+
model_name : str, default = "GBM"
|
|
92
|
+
Name of the AutoGluon-Tabular model to use for ensemble learning. Model name should be registered
|
|
93
|
+
in AutoGluon-Tabular model registry.
|
|
94
|
+
model_hyperparameters : dict, default = {}
|
|
95
|
+
Hyperparameters to pass to the underlying AutoGluon-Tabular model.
|
|
96
|
+
isotonization : str, default = "sort"
|
|
97
|
+
The isotonization method to use (i.e. the algorithm to prevent quantile non-crossing).
|
|
98
|
+
Currently only "sort" is supported.
|
|
99
|
+
detect_and_ignore_failures : bool, default = True
|
|
100
|
+
Whether to detect and ignore "failed models", defined as models which have a loss that is larger
|
|
101
|
+
than 10x the median loss of all the models. This can be very important for the regression-based
|
|
102
|
+
ensembles, as moving the weight from such a "failed model" to zero can require a long training
|
|
103
|
+
time.
|
|
104
|
+
"""
|
|
105
|
+
|
|
106
|
+
ensemble_regressor_type = PerQuantileTabularEnsembleRegressor
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class LinearStackerEnsemble(ArrayBasedTimeSeriesEnsembleModel):
|
|
110
|
+
"""Linear stacking ensemble that learns optimal linear combination weights through gradient-based
|
|
111
|
+
optimization.
|
|
112
|
+
|
|
113
|
+
Weighted combinations can be per model or per model-quantile, model-horizon, model-quantile-horizon
|
|
114
|
+
combinations. These choices are controlled by the ``weights_per`` hyperparameter.
|
|
115
|
+
|
|
116
|
+
The optimization process uses gradient descent with configurable learning rates and convergence
|
|
117
|
+
criteria, allowing for flexible training dynamics. Weight pruning can be applied to remove
|
|
118
|
+
models with negligible contributions, resulting in sparse and interpretable ensembles.
|
|
119
|
+
|
|
120
|
+
Other Parameters
|
|
121
|
+
----------------
|
|
122
|
+
weights_per : str, default = "m"
|
|
123
|
+
Granularity of weight learning.
|
|
124
|
+
|
|
125
|
+
- "m": single weight per model
|
|
126
|
+
- "mq": single weight for each model-quantile combination
|
|
127
|
+
- "mt": single weight for each model-time step where time steps run across the prediction horizon
|
|
128
|
+
- "mtq": single weight for each model-quantile-time step combination
|
|
129
|
+
lr : float, default = 0.1
|
|
130
|
+
Learning rate for PyTorch optimizer during weight training.
|
|
131
|
+
max_epochs : int, default = 10000
|
|
132
|
+
Maximum number of training epochs for weight optimization.
|
|
133
|
+
relative_tolerance : float, default = 1e-7
|
|
134
|
+
Relative tolerance for convergence detection during training.
|
|
135
|
+
prune_below : float, default = 0.0
|
|
136
|
+
Threshold below which weights are pruned to zero for sparsity. The weights are redistributed across
|
|
137
|
+
remaining models after pruning.
|
|
138
|
+
isotonization : str, default = "sort"
|
|
139
|
+
The isotonization method to use (i.e. the algorithm to prevent quantile non-crossing).
|
|
140
|
+
Currently only "sort" is supported.
|
|
141
|
+
detect_and_ignore_failures : bool, default = True
|
|
142
|
+
Whether to detect and ignore "failed models", defined as models which have a loss that is larger
|
|
143
|
+
than 10x the median loss of all the models. This can be very important for the regression-based
|
|
144
|
+
ensembles, as moving the weight from such a "failed model" to zero can require a long training
|
|
145
|
+
time.
|
|
146
|
+
"""
|
|
147
|
+
|
|
148
|
+
def _get_default_hyperparameters(self) -> dict[str, Any]:
|
|
149
|
+
default_hps = super()._get_default_hyperparameters()
|
|
150
|
+
default_hps.update(
|
|
151
|
+
{
|
|
152
|
+
"weights_per": "m",
|
|
153
|
+
"lr": 0.1,
|
|
154
|
+
"max_epochs": 10000,
|
|
155
|
+
"relative_tolerance": 1e-7,
|
|
156
|
+
"prune_below": 0.0,
|
|
157
|
+
}
|
|
158
|
+
)
|
|
159
|
+
return default_hps
|
|
160
|
+
|
|
161
|
+
def _get_ensemble_regressor(self) -> LinearStackerEnsembleRegressor:
|
|
162
|
+
hps = self.get_hyperparameters()
|
|
163
|
+
return LinearStackerEnsembleRegressor(
|
|
164
|
+
quantile_levels=list(self.quantile_levels),
|
|
165
|
+
weights_per=hps["weights_per"],
|
|
166
|
+
lr=hps["lr"],
|
|
167
|
+
max_epochs=hps["max_epochs"],
|
|
168
|
+
relative_tolerance=hps["relative_tolerance"],
|
|
169
|
+
prune_below=hps["prune_below"],
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
def _fit(
|
|
173
|
+
self,
|
|
174
|
+
predictions_per_window: dict[str, list[TimeSeriesDataFrame]],
|
|
175
|
+
data_per_window: list[TimeSeriesDataFrame],
|
|
176
|
+
model_scores: dict[str, float] | None = None,
|
|
177
|
+
time_limit: float | None = None,
|
|
178
|
+
) -> None:
|
|
179
|
+
super()._fit(predictions_per_window, data_per_window, model_scores, time_limit)
|
|
180
|
+
|
|
181
|
+
assert isinstance(self.ensemble_regressor, LinearStackerEnsembleRegressor)
|
|
182
|
+
|
|
183
|
+
if self.ensemble_regressor.kept_indices is not None:
|
|
184
|
+
original_names = self._model_names
|
|
185
|
+
self._model_names = [original_names[i] for i in self.ensemble_regressor.kept_indices]
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from .abstract import EnsembleRegressor, MedianEnsembleRegressor
|
|
2
|
+
from .linear_stacker import LinearStackerEnsembleRegressor
|
|
3
|
+
from .per_quantile_tabular import PerQuantileTabularEnsembleRegressor
|
|
4
|
+
from .tabular import TabularEnsembleRegressor
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
"EnsembleRegressor",
|
|
8
|
+
"LinearStackerEnsembleRegressor",
|
|
9
|
+
"MedianEnsembleRegressor",
|
|
10
|
+
"PerQuantileTabularEnsembleRegressor",
|
|
11
|
+
"TabularEnsembleRegressor",
|
|
12
|
+
]
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from typing_extensions import Self
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class EnsembleRegressor(ABC):
|
|
8
|
+
def __init__(self, *args, **kwargs):
|
|
9
|
+
pass
|
|
10
|
+
|
|
11
|
+
@abstractmethod
|
|
12
|
+
def fit(
|
|
13
|
+
self,
|
|
14
|
+
base_model_mean_predictions: np.ndarray,
|
|
15
|
+
base_model_quantile_predictions: np.ndarray,
|
|
16
|
+
labels: np.ndarray,
|
|
17
|
+
time_limit: float | None = None,
|
|
18
|
+
) -> Self:
|
|
19
|
+
"""
|
|
20
|
+
Parameters
|
|
21
|
+
----------
|
|
22
|
+
base_model_mean_predictions
|
|
23
|
+
Mean (point) predictions of base models. Array of shape
|
|
24
|
+
(num_windows, num_items, prediction_length, 1, num_models)
|
|
25
|
+
|
|
26
|
+
base_model_quantile_predictions
|
|
27
|
+
Quantile predictions of base models. Array of shape
|
|
28
|
+
(num_windows, num_items, prediction_length, num_quantiles, num_models)
|
|
29
|
+
|
|
30
|
+
labels
|
|
31
|
+
Ground truth array of shape
|
|
32
|
+
(num_windows, num_items, prediction_length, 1)
|
|
33
|
+
|
|
34
|
+
time_limit
|
|
35
|
+
Approximately how long ``fit`` will run (wall-clock time in seconds). If
|
|
36
|
+
not specified, training time will not be limited.
|
|
37
|
+
"""
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
@abstractmethod
|
|
41
|
+
def predict(
|
|
42
|
+
self,
|
|
43
|
+
base_model_mean_predictions: np.ndarray,
|
|
44
|
+
base_model_quantile_predictions: np.ndarray,
|
|
45
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
46
|
+
"""Predict with the fitted ensemble regressor for a single window.
|
|
47
|
+
The items do not have to refer to the same item indices used when fitting
|
|
48
|
+
the model.
|
|
49
|
+
|
|
50
|
+
Parameters
|
|
51
|
+
----------
|
|
52
|
+
base_model_mean_predictions
|
|
53
|
+
Mean (point) predictions of base models. Array of shape
|
|
54
|
+
(1, num_items, prediction_length, 1, num_models)
|
|
55
|
+
|
|
56
|
+
base_model_quantile_predictions
|
|
57
|
+
Quantile predictions of base models. Array of shape
|
|
58
|
+
(1, num_items, prediction_length, num_quantiles, num_models)
|
|
59
|
+
|
|
60
|
+
Returns
|
|
61
|
+
-------
|
|
62
|
+
ensemble_mean_predictions
|
|
63
|
+
Array of shape (1, num_items, prediction_length, 1)
|
|
64
|
+
ensemble_quantile_predictions
|
|
65
|
+
Array of shape (1, num_items, prediction_length, num_quantiles)
|
|
66
|
+
"""
|
|
67
|
+
pass
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class MedianEnsembleRegressor(EnsembleRegressor):
|
|
71
|
+
def fit(
|
|
72
|
+
self,
|
|
73
|
+
base_model_mean_predictions: np.ndarray,
|
|
74
|
+
base_model_quantile_predictions: np.ndarray,
|
|
75
|
+
labels: np.ndarray,
|
|
76
|
+
time_limit: float | None = None,
|
|
77
|
+
) -> Self:
|
|
78
|
+
return self
|
|
79
|
+
|
|
80
|
+
def predict(
|
|
81
|
+
self,
|
|
82
|
+
base_model_mean_predictions: np.ndarray,
|
|
83
|
+
base_model_quantile_predictions: np.ndarray,
|
|
84
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
85
|
+
return (
|
|
86
|
+
np.nanmedian(base_model_mean_predictions, axis=-1),
|
|
87
|
+
np.nanmedian(base_model_quantile_predictions, axis=-1),
|
|
88
|
+
)
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
from typing import Literal
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from typing_extensions import Self
|
|
5
|
+
|
|
6
|
+
from autogluon.timeseries.utils.timer import Timer
|
|
7
|
+
|
|
8
|
+
from .abstract import EnsembleRegressor
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class LinearStackerEnsembleRegressor(EnsembleRegressor):
|
|
12
|
+
"""Linear stacker ensemble regressor using PyTorch optimization with softmax weights.
|
|
13
|
+
|
|
14
|
+
Implements weighted averaging of base model predictions with learnable weights optimized
|
|
15
|
+
via gradient descent. Uses PyTorch during training for optimization, then stores weights
|
|
16
|
+
as numpy arrays for efficient prediction.
|
|
17
|
+
|
|
18
|
+
Parameters
|
|
19
|
+
----------
|
|
20
|
+
quantile_levels
|
|
21
|
+
List of quantile levels for quantile predictions (e.g., [0.1, 0.5, 0.9]).
|
|
22
|
+
weights_per
|
|
23
|
+
Weight configuration specifying which dimensions to learn weights for:
|
|
24
|
+
|
|
25
|
+
- "m": Per-model weights (shape: num_models), defaults to "m"
|
|
26
|
+
- "mt": Per-model and per-time weights (shape: prediction_length, num_models)
|
|
27
|
+
- "mq": Per-model and per-model-output (quantiles and mean) weights
|
|
28
|
+
(shape: num_quantiles+1, num_models)
|
|
29
|
+
- "mtq": Per-model, per-time, and per-quantile weights
|
|
30
|
+
(shape: prediction_length, num_quantiles+1, num_models)
|
|
31
|
+
lr
|
|
32
|
+
Learning rate for Adam optimizer. Defaults to 0.1.
|
|
33
|
+
max_epochs
|
|
34
|
+
Maximum number of training epochs. Defaults to 10000.
|
|
35
|
+
relative_tolerance
|
|
36
|
+
Convergence tolerance for relative loss change between epochs. Defaults to 1e-7.
|
|
37
|
+
prune_below
|
|
38
|
+
Importance threshold for model sparsification. Models with importance below this
|
|
39
|
+
threshold are dropped after weight optimization. Set to 0.0 to disable sparsification.
|
|
40
|
+
Defaults to 0.0.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
quantile_levels: list[float],
|
|
46
|
+
weights_per: Literal["m", "mt", "mq", "mtq"] = "m",
|
|
47
|
+
lr: float = 0.1,
|
|
48
|
+
max_epochs: int = 10_000,
|
|
49
|
+
relative_tolerance: float = 1e-7,
|
|
50
|
+
prune_below: float = 0.0,
|
|
51
|
+
):
|
|
52
|
+
super().__init__()
|
|
53
|
+
self.quantile_levels = quantile_levels
|
|
54
|
+
self.weights_per = weights_per
|
|
55
|
+
self.lr = lr
|
|
56
|
+
self.max_epochs = max_epochs
|
|
57
|
+
self.relative_tolerance = relative_tolerance
|
|
58
|
+
self.prune_below = prune_below
|
|
59
|
+
|
|
60
|
+
self.weights: np.ndarray | None = None
|
|
61
|
+
self.kept_indices: list[int] | None = None
|
|
62
|
+
|
|
63
|
+
def _compute_weight_shape(self, base_model_predictions_shape: tuple) -> tuple:
|
|
64
|
+
"""Compute weight tensor shape based on weights_per configuration."""
|
|
65
|
+
_, _, prediction_length, num_outputs, num_models = base_model_predictions_shape
|
|
66
|
+
|
|
67
|
+
shapes = {
|
|
68
|
+
"m": (1, 1, num_models),
|
|
69
|
+
"mt": (prediction_length, 1, num_models),
|
|
70
|
+
"mq": (1, num_outputs, num_models),
|
|
71
|
+
"mtq": (prediction_length, num_outputs, num_models),
|
|
72
|
+
}
|
|
73
|
+
try:
|
|
74
|
+
return (1, 1) + shapes[self.weights_per]
|
|
75
|
+
except KeyError:
|
|
76
|
+
raise ValueError(f"Unsupported weights_per: {self.weights_per}")
|
|
77
|
+
|
|
78
|
+
def make_weighted_average_module(self, base_model_predictions_shape: tuple):
|
|
79
|
+
import torch
|
|
80
|
+
|
|
81
|
+
class WeightedAverage(torch.nn.Module):
|
|
82
|
+
def __init__(self, shape):
|
|
83
|
+
super().__init__()
|
|
84
|
+
self.raw_weights = torch.nn.Parameter(torch.zeros(*shape, dtype=torch.float32))
|
|
85
|
+
|
|
86
|
+
def get_normalized_weights(self):
|
|
87
|
+
return torch.softmax(self.raw_weights, dim=-1) # softmax over models
|
|
88
|
+
|
|
89
|
+
def forward(self, base_model_predictions: torch.Tensor):
|
|
90
|
+
return torch.sum(self.get_normalized_weights() * base_model_predictions, dim=-1)
|
|
91
|
+
|
|
92
|
+
return WeightedAverage(self._compute_weight_shape(base_model_predictions_shape))
|
|
93
|
+
|
|
94
|
+
def fit(
|
|
95
|
+
self,
|
|
96
|
+
base_model_mean_predictions: np.ndarray,
|
|
97
|
+
base_model_quantile_predictions: np.ndarray,
|
|
98
|
+
labels: np.ndarray,
|
|
99
|
+
time_limit: float | None = None,
|
|
100
|
+
) -> Self:
|
|
101
|
+
import torch
|
|
102
|
+
|
|
103
|
+
def _ql(
|
|
104
|
+
labels_tensor: torch.Tensor,
|
|
105
|
+
ensemble_predictions: torch.Tensor,
|
|
106
|
+
) -> torch.Tensor:
|
|
107
|
+
"""Compute the weighted quantile loss on predictions and ground truth (labels).
|
|
108
|
+
Considering that the first dimension of predictions is the mean, we treat
|
|
109
|
+
mean predictions on the same footing as median (0.5) predictions as contribution
|
|
110
|
+
to the overall weighted quantile loss.
|
|
111
|
+
"""
|
|
112
|
+
quantile_levels = torch.tensor([0.5] + self.quantile_levels, dtype=torch.float32)
|
|
113
|
+
error = labels_tensor - ensemble_predictions # (num_windows, num_items, num_time, num_outputs)
|
|
114
|
+
quantile_loss = torch.maximum(quantile_levels * error, (quantile_levels - 1) * error)
|
|
115
|
+
return torch.mean(quantile_loss)
|
|
116
|
+
|
|
117
|
+
timer = Timer(time_limit).start()
|
|
118
|
+
|
|
119
|
+
base_model_predictions = torch.tensor(
|
|
120
|
+
np.concatenate(
|
|
121
|
+
[base_model_mean_predictions, base_model_quantile_predictions],
|
|
122
|
+
axis=3,
|
|
123
|
+
),
|
|
124
|
+
dtype=torch.float32,
|
|
125
|
+
)
|
|
126
|
+
labels_tensor = torch.tensor(labels, dtype=torch.float32)
|
|
127
|
+
|
|
128
|
+
weighted_average = self.make_weighted_average_module(base_model_predictions.shape)
|
|
129
|
+
|
|
130
|
+
optimizer = torch.optim.Adam(weighted_average.parameters(), lr=self.lr)
|
|
131
|
+
|
|
132
|
+
prev_loss = float("inf")
|
|
133
|
+
for _ in range(self.max_epochs):
|
|
134
|
+
optimizer.zero_grad()
|
|
135
|
+
|
|
136
|
+
ensemble_predictions = weighted_average(base_model_predictions)
|
|
137
|
+
|
|
138
|
+
loss = _ql(labels_tensor, ensemble_predictions)
|
|
139
|
+
loss.backward()
|
|
140
|
+
optimizer.step()
|
|
141
|
+
|
|
142
|
+
loss_change = abs(prev_loss - loss.item()) / (loss.item() + 1e-8)
|
|
143
|
+
if loss_change < self.relative_tolerance:
|
|
144
|
+
break
|
|
145
|
+
prev_loss = loss.item()
|
|
146
|
+
|
|
147
|
+
if timer.timed_out():
|
|
148
|
+
break
|
|
149
|
+
|
|
150
|
+
with torch.no_grad():
|
|
151
|
+
self.weights = weighted_average.get_normalized_weights().detach().numpy()
|
|
152
|
+
|
|
153
|
+
assert self.weights is not None
|
|
154
|
+
if self.prune_below > 0.0:
|
|
155
|
+
importances = self.weights.mean(axis=tuple(range(self.weights.ndim - 1))) # shape (num_models,)
|
|
156
|
+
|
|
157
|
+
mask = importances >= self.prune_below
|
|
158
|
+
if not mask.any():
|
|
159
|
+
mask[importances.argmax()] = True
|
|
160
|
+
|
|
161
|
+
if not mask.all():
|
|
162
|
+
self.kept_indices = np.where(mask)[0].tolist()
|
|
163
|
+
self.weights = self.weights[..., mask]
|
|
164
|
+
self.weights = self.weights / self.weights.sum(axis=-1, keepdims=True)
|
|
165
|
+
|
|
166
|
+
return self
|
|
167
|
+
|
|
168
|
+
def predict(
|
|
169
|
+
self,
|
|
170
|
+
base_model_mean_predictions: np.ndarray,
|
|
171
|
+
base_model_quantile_predictions: np.ndarray,
|
|
172
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
173
|
+
if self.weights is None:
|
|
174
|
+
raise ValueError("Model must be fitted before prediction")
|
|
175
|
+
|
|
176
|
+
all_predictions = np.concatenate([base_model_mean_predictions, base_model_quantile_predictions], axis=3)
|
|
177
|
+
|
|
178
|
+
if self.kept_indices is not None:
|
|
179
|
+
assert all_predictions.shape[-1] == len(self.kept_indices)
|
|
180
|
+
|
|
181
|
+
ensemble_pred = np.sum(self.weights * all_predictions, axis=-1)
|
|
182
|
+
|
|
183
|
+
mean_predictions = ensemble_pred[:, :, :, :1]
|
|
184
|
+
quantile_predictions = ensemble_pred[:, :, :, 1:]
|
|
185
|
+
|
|
186
|
+
return mean_predictions, quantile_predictions
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from typing_extensions import Self
|
|
6
|
+
|
|
7
|
+
from autogluon.tabular.registry import ag_model_registry as tabular_ag_model_registry
|
|
8
|
+
from autogluon.timeseries.utils.timer import SplitTimer
|
|
9
|
+
|
|
10
|
+
from .abstract import EnsembleRegressor
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class PerQuantileTabularEnsembleRegressor(EnsembleRegressor):
|
|
16
|
+
"""Ensemble regressor using separate models per quantile plus dedicated mean model."""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
quantile_levels: list[float],
|
|
21
|
+
model_name: str,
|
|
22
|
+
model_hyperparameters: dict | None = None,
|
|
23
|
+
):
|
|
24
|
+
super().__init__()
|
|
25
|
+
self.quantile_levels = quantile_levels
|
|
26
|
+
model_type = tabular_ag_model_registry.key_to_cls(model_name)
|
|
27
|
+
model_hyperparameters = model_hyperparameters or {}
|
|
28
|
+
self.mean_model = model_type(
|
|
29
|
+
problem_type="regression",
|
|
30
|
+
hyperparameters=model_hyperparameters,
|
|
31
|
+
path="",
|
|
32
|
+
name=f"{model_name}_mean",
|
|
33
|
+
)
|
|
34
|
+
self.quantile_models = [
|
|
35
|
+
model_type(
|
|
36
|
+
problem_type="quantile",
|
|
37
|
+
hyperparameters=model_hyperparameters | {"ag.quantile_levels": [quantile]},
|
|
38
|
+
path="",
|
|
39
|
+
name=f"{model_name}_q{quantile}",
|
|
40
|
+
)
|
|
41
|
+
for quantile in quantile_levels
|
|
42
|
+
]
|
|
43
|
+
|
|
44
|
+
def fit(
|
|
45
|
+
self,
|
|
46
|
+
base_model_mean_predictions: np.ndarray,
|
|
47
|
+
base_model_quantile_predictions: np.ndarray,
|
|
48
|
+
labels: np.ndarray,
|
|
49
|
+
time_limit: float | None = None,
|
|
50
|
+
) -> Self:
|
|
51
|
+
num_windows, num_items, prediction_length = base_model_mean_predictions.shape[:3]
|
|
52
|
+
y = pd.Series(labels.reshape(num_windows * num_items * prediction_length))
|
|
53
|
+
|
|
54
|
+
total_rounds = 1 + len(self.quantile_levels)
|
|
55
|
+
timer = SplitTimer(time_limit, rounds=total_rounds).start()
|
|
56
|
+
|
|
57
|
+
# Fit mean model
|
|
58
|
+
X_mean = self._get_feature_df(base_model_mean_predictions, 0)
|
|
59
|
+
self.mean_model.fit(X=X_mean, y=y, time_limit=timer.round_time_remaining())
|
|
60
|
+
timer.next_round()
|
|
61
|
+
|
|
62
|
+
# Fit quantile models
|
|
63
|
+
for i, model in enumerate(self.quantile_models):
|
|
64
|
+
X_q = self._get_feature_df(base_model_quantile_predictions, i)
|
|
65
|
+
model.fit(X=X_q, y=y, time_limit=timer.round_time_remaining())
|
|
66
|
+
timer.next_round()
|
|
67
|
+
|
|
68
|
+
return self
|
|
69
|
+
|
|
70
|
+
def _get_feature_df(self, predictions: np.ndarray, index: int) -> pd.DataFrame:
|
|
71
|
+
num_windows, num_items, prediction_length, _, num_models = predictions.shape
|
|
72
|
+
num_tabular_items = num_windows * num_items * prediction_length
|
|
73
|
+
return pd.DataFrame(
|
|
74
|
+
predictions[:, :, :, index].reshape(num_tabular_items, num_models),
|
|
75
|
+
columns=[f"model_{mi}" for mi in range(num_models)],
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
def predict(
|
|
79
|
+
self, base_model_mean_predictions: np.ndarray, base_model_quantile_predictions: np.ndarray
|
|
80
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
81
|
+
assert self.mean_model.is_fit()
|
|
82
|
+
num_windows, num_items, prediction_length = base_model_mean_predictions.shape[:3]
|
|
83
|
+
assert num_windows == 1, "Prediction expects a single window to be provided"
|
|
84
|
+
|
|
85
|
+
X_mean = self._get_feature_df(base_model_mean_predictions, 0)
|
|
86
|
+
mean_predictions = self.mean_model.predict(X_mean).reshape(num_windows, num_items, prediction_length, 1)
|
|
87
|
+
|
|
88
|
+
quantile_predictions_list = []
|
|
89
|
+
for i, model in enumerate(self.quantile_models):
|
|
90
|
+
X_q = self._get_feature_df(base_model_quantile_predictions, i)
|
|
91
|
+
quantile_predictions_list.append(model.predict(X_q).reshape(num_windows, num_items, prediction_length))
|
|
92
|
+
quantile_predictions = np.stack(quantile_predictions_list, axis=-1)
|
|
93
|
+
|
|
94
|
+
return mean_predictions, quantile_predictions
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from typing_extensions import Self
|
|
6
|
+
|
|
7
|
+
from autogluon.tabular.registry import ag_model_registry as tabular_ag_model_registry
|
|
8
|
+
|
|
9
|
+
from .abstract import EnsembleRegressor
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class TabularEnsembleRegressor(EnsembleRegressor):
|
|
15
|
+
"""Ensemble regressor based on a single model from AutoGluon-Tabular that predicts all quantiles simultaneously."""
|
|
16
|
+
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
quantile_levels: list[float],
|
|
20
|
+
model_name: str,
|
|
21
|
+
model_hyperparameters: dict | None = None,
|
|
22
|
+
):
|
|
23
|
+
super().__init__()
|
|
24
|
+
self.quantile_levels = quantile_levels
|
|
25
|
+
model_type = tabular_ag_model_registry.key_to_cls(model_name)
|
|
26
|
+
model_hyperparameters = model_hyperparameters or {}
|
|
27
|
+
self.model = model_type(
|
|
28
|
+
problem_type="quantile",
|
|
29
|
+
hyperparameters=model_hyperparameters | {"ag.quantile_levels": quantile_levels},
|
|
30
|
+
path="",
|
|
31
|
+
name=model_name,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
def fit(
|
|
35
|
+
self,
|
|
36
|
+
base_model_mean_predictions: np.ndarray,
|
|
37
|
+
base_model_quantile_predictions: np.ndarray,
|
|
38
|
+
labels: np.ndarray,
|
|
39
|
+
time_limit: float | None = None,
|
|
40
|
+
) -> Self:
|
|
41
|
+
X = self._get_feature_df(base_model_mean_predictions, base_model_quantile_predictions)
|
|
42
|
+
num_windows, num_items, prediction_length = base_model_mean_predictions.shape[:3]
|
|
43
|
+
y = pd.Series(labels.reshape(num_windows * num_items * prediction_length))
|
|
44
|
+
self.model.fit(X=X, y=y, time_limit=time_limit)
|
|
45
|
+
return self
|
|
46
|
+
|
|
47
|
+
def predict(
|
|
48
|
+
self,
|
|
49
|
+
base_model_mean_predictions: np.ndarray,
|
|
50
|
+
base_model_quantile_predictions: np.ndarray,
|
|
51
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
52
|
+
assert self.model.is_fit()
|
|
53
|
+
num_windows, num_items, prediction_length = base_model_mean_predictions.shape[:3]
|
|
54
|
+
assert num_windows == 1, "Prediction expects a single window to be provided"
|
|
55
|
+
|
|
56
|
+
X = self._get_feature_df(base_model_mean_predictions, base_model_quantile_predictions)
|
|
57
|
+
|
|
58
|
+
pred = self.model.predict(X)
|
|
59
|
+
|
|
60
|
+
# Reshape back to (num_windows, num_items, prediction_length, num_quantiles)
|
|
61
|
+
pred = pred.reshape(num_windows, num_items, prediction_length, len(self.quantile_levels))
|
|
62
|
+
|
|
63
|
+
# Use median quantile as mean prediction
|
|
64
|
+
median_idx = self._get_median_quantile_index()
|
|
65
|
+
mean_pred = pred[:, :, :, median_idx : median_idx + 1]
|
|
66
|
+
quantile_pred = pred
|
|
67
|
+
|
|
68
|
+
return mean_pred, quantile_pred
|
|
69
|
+
|
|
70
|
+
def _get_feature_df(
|
|
71
|
+
self,
|
|
72
|
+
base_model_mean_predictions: np.ndarray,
|
|
73
|
+
base_model_quantile_predictions: np.ndarray,
|
|
74
|
+
) -> pd.DataFrame:
|
|
75
|
+
num_windows, num_items, prediction_length, _, num_models = base_model_mean_predictions.shape
|
|
76
|
+
num_tabular_items = num_windows * num_items * prediction_length
|
|
77
|
+
features_array = np.hstack(
|
|
78
|
+
[
|
|
79
|
+
base_model_mean_predictions.reshape(num_tabular_items, -1),
|
|
80
|
+
base_model_quantile_predictions.reshape(num_tabular_items, -1),
|
|
81
|
+
]
|
|
82
|
+
)
|
|
83
|
+
return pd.DataFrame(features_array, columns=self._get_feature_names(num_models))
|
|
84
|
+
|
|
85
|
+
def _get_feature_names(self, num_models: int) -> list[str]:
|
|
86
|
+
feature_names = []
|
|
87
|
+
for mi in range(num_models):
|
|
88
|
+
feature_names.append(f"model_{mi}_mean")
|
|
89
|
+
for quantile in self.quantile_levels:
|
|
90
|
+
for mi in range(num_models):
|
|
91
|
+
feature_names.append(f"model_{mi}_q{quantile}")
|
|
92
|
+
|
|
93
|
+
return feature_names
|
|
94
|
+
|
|
95
|
+
def _get_median_quantile_index(self):
|
|
96
|
+
"""Get quantile index closest to 0.5"""
|
|
97
|
+
quantile_array = np.array(self.quantile_levels)
|
|
98
|
+
median_idx = int(np.argmin(np.abs(quantile_array - 0.5)))
|
|
99
|
+
selected_quantile = quantile_array[median_idx]
|
|
100
|
+
|
|
101
|
+
if selected_quantile != 0.5:
|
|
102
|
+
logger.warning(
|
|
103
|
+
f"Selected quantile {selected_quantile} is not exactly 0.5. "
|
|
104
|
+
f"Using closest available quantile for median prediction."
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
return median_idx
|