autogluon.timeseries 1.3.2b20250712__py3-none-any.whl → 1.4.1b20251116__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. autogluon/timeseries/configs/__init__.py +3 -2
  2. autogluon/timeseries/configs/hyperparameter_presets.py +62 -0
  3. autogluon/timeseries/configs/predictor_presets.py +84 -0
  4. autogluon/timeseries/dataset/ts_dataframe.py +98 -72
  5. autogluon/timeseries/learner.py +19 -18
  6. autogluon/timeseries/metrics/__init__.py +5 -5
  7. autogluon/timeseries/metrics/abstract.py +17 -17
  8. autogluon/timeseries/metrics/point.py +1 -1
  9. autogluon/timeseries/metrics/quantile.py +2 -2
  10. autogluon/timeseries/metrics/utils.py +4 -4
  11. autogluon/timeseries/models/__init__.py +4 -0
  12. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +52 -75
  13. autogluon/timeseries/models/abstract/tunable.py +6 -6
  14. autogluon/timeseries/models/autogluon_tabular/mlforecast.py +72 -76
  15. autogluon/timeseries/models/autogluon_tabular/per_step.py +104 -46
  16. autogluon/timeseries/models/autogluon_tabular/transforms.py +9 -7
  17. autogluon/timeseries/models/chronos/model.py +115 -78
  18. autogluon/timeseries/models/chronos/{pipeline/utils.py → utils.py} +76 -44
  19. autogluon/timeseries/models/ensemble/__init__.py +29 -2
  20. autogluon/timeseries/models/ensemble/abstract.py +16 -52
  21. autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
  22. autogluon/timeseries/models/ensemble/array_based/abstract.py +247 -0
  23. autogluon/timeseries/models/ensemble/array_based/models.py +50 -0
  24. autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +10 -0
  25. autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +87 -0
  26. autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +133 -0
  27. autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +141 -0
  28. autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
  29. autogluon/timeseries/models/ensemble/weighted/abstract.py +41 -0
  30. autogluon/timeseries/models/ensemble/{basic.py → weighted/basic.py} +8 -18
  31. autogluon/timeseries/models/ensemble/{greedy.py → weighted/greedy.py} +13 -13
  32. autogluon/timeseries/models/gluonts/abstract.py +26 -26
  33. autogluon/timeseries/models/gluonts/dataset.py +4 -4
  34. autogluon/timeseries/models/gluonts/models.py +27 -12
  35. autogluon/timeseries/models/local/abstract_local_model.py +14 -14
  36. autogluon/timeseries/models/local/naive.py +4 -0
  37. autogluon/timeseries/models/local/npts.py +1 -0
  38. autogluon/timeseries/models/local/statsforecast.py +30 -14
  39. autogluon/timeseries/models/multi_window/multi_window_model.py +34 -23
  40. autogluon/timeseries/models/registry.py +65 -0
  41. autogluon/timeseries/models/toto/__init__.py +3 -0
  42. autogluon/timeseries/models/toto/_internal/__init__.py +9 -0
  43. autogluon/timeseries/models/toto/_internal/backbone/__init__.py +3 -0
  44. autogluon/timeseries/models/toto/_internal/backbone/attention.py +197 -0
  45. autogluon/timeseries/models/toto/_internal/backbone/backbone.py +262 -0
  46. autogluon/timeseries/models/toto/_internal/backbone/distribution.py +70 -0
  47. autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +136 -0
  48. autogluon/timeseries/models/toto/_internal/backbone/rope.py +94 -0
  49. autogluon/timeseries/models/toto/_internal/backbone/scaler.py +306 -0
  50. autogluon/timeseries/models/toto/_internal/backbone/transformer.py +333 -0
  51. autogluon/timeseries/models/toto/_internal/dataset.py +165 -0
  52. autogluon/timeseries/models/toto/_internal/forecaster.py +423 -0
  53. autogluon/timeseries/models/toto/dataloader.py +108 -0
  54. autogluon/timeseries/models/toto/hf_pretrained_model.py +119 -0
  55. autogluon/timeseries/models/toto/model.py +236 -0
  56. autogluon/timeseries/predictor.py +94 -107
  57. autogluon/timeseries/regressor.py +31 -27
  58. autogluon/timeseries/splitter.py +7 -31
  59. autogluon/timeseries/trainer/__init__.py +3 -0
  60. autogluon/timeseries/trainer/ensemble_composer.py +250 -0
  61. autogluon/timeseries/trainer/model_set_builder.py +256 -0
  62. autogluon/timeseries/trainer/prediction_cache.py +149 -0
  63. autogluon/timeseries/{trainer.py → trainer/trainer.py} +182 -307
  64. autogluon/timeseries/trainer/utils.py +18 -0
  65. autogluon/timeseries/transforms/covariate_scaler.py +4 -4
  66. autogluon/timeseries/transforms/target_scaler.py +14 -14
  67. autogluon/timeseries/utils/datetime/lags.py +2 -2
  68. autogluon/timeseries/utils/datetime/time_features.py +2 -2
  69. autogluon/timeseries/utils/features.py +41 -37
  70. autogluon/timeseries/utils/forecast.py +5 -5
  71. autogluon/timeseries/utils/warning_filters.py +3 -1
  72. autogluon/timeseries/version.py +1 -1
  73. autogluon.timeseries-1.4.1b20251116-py3.9-nspkg.pth +1 -0
  74. {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/METADATA +32 -17
  75. autogluon_timeseries-1.4.1b20251116.dist-info/RECORD +96 -0
  76. {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/WHEEL +1 -1
  77. autogluon/timeseries/configs/presets_configs.py +0 -79
  78. autogluon/timeseries/evaluator.py +0 -6
  79. autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -10
  80. autogluon/timeseries/models/chronos/pipeline/base.py +0 -160
  81. autogluon/timeseries/models/chronos/pipeline/chronos.py +0 -544
  82. autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -530
  83. autogluon/timeseries/models/presets.py +0 -358
  84. autogluon.timeseries-1.3.2b20250712-py3.9-nspkg.pth +0 -1
  85. autogluon.timeseries-1.3.2b20250712.dist-info/RECORD +0 -71
  86. {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info/licenses}/LICENSE +0 -0
  87. {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info/licenses}/NOTICE +0 -0
  88. {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/namespace_packages.txt +0 -0
  89. {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/top_level.txt +0 -0
  90. {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/zip-safe +0 -0
@@ -0,0 +1,141 @@
1
+ import logging
2
+ from typing import Optional
3
+
4
+ import numpy as np
5
+ import pandas as pd
6
+ from typing_extensions import Self
7
+
8
+ from autogluon.tabular import TabularPredictor
9
+
10
+ from .abstract import EnsembleRegressor
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ class TabularEnsembleRegressor(EnsembleRegressor):
16
+ """TabularPredictor ensemble regressor using AutoGluon-Tabular as a single
17
+ quantile regressor for the target.
18
+ """
19
+
20
+ def __init__(
21
+ self,
22
+ path: str,
23
+ quantile_levels: list[float],
24
+ tabular_hyperparameters: Optional[dict] = None,
25
+ ):
26
+ super().__init__()
27
+ self.path = path
28
+ self.quantile_levels = quantile_levels
29
+ self.tabular_hyperparameters = tabular_hyperparameters or {}
30
+ self.predictor: Optional[TabularPredictor] = None
31
+
32
+ def set_path(self, path: str) -> None:
33
+ self.path = path
34
+
35
+ def fit(
36
+ self,
37
+ base_model_mean_predictions: np.ndarray,
38
+ base_model_quantile_predictions: np.ndarray,
39
+ labels: np.ndarray,
40
+ time_limit: Optional[int] = None,
41
+ **kwargs,
42
+ ) -> Self:
43
+ self.predictor = TabularPredictor(
44
+ path=self.path,
45
+ label="target",
46
+ problem_type="quantile",
47
+ quantile_levels=self.quantile_levels,
48
+ verbosity=1,
49
+ )
50
+
51
+ # get features
52
+ df = self._get_feature_df(base_model_mean_predictions, base_model_quantile_predictions)
53
+
54
+ # get labels
55
+ num_windows, num_items, prediction_length = base_model_mean_predictions.shape[:3]
56
+ label_series = labels.reshape(num_windows * num_items * prediction_length)
57
+ df["target"] = label_series
58
+
59
+ self.predictor.fit(
60
+ df,
61
+ hyperparameters=self.tabular_hyperparameters,
62
+ time_limit=time_limit, # type: ignore
63
+ )
64
+
65
+ return self
66
+
67
+ def predict(
68
+ self,
69
+ base_model_mean_predictions: np.ndarray,
70
+ base_model_quantile_predictions: np.ndarray,
71
+ ) -> tuple[np.ndarray, np.ndarray]:
72
+ if self.predictor is None:
73
+ try:
74
+ self.predictor = TabularPredictor.load(self.path)
75
+ except FileNotFoundError:
76
+ raise ValueError("Model must be fitted before prediction")
77
+
78
+ num_windows, num_items, prediction_length = base_model_mean_predictions.shape[:3]
79
+ assert num_windows == 1, "Prediction expects a single window to be provided"
80
+
81
+ df = self._get_feature_df(base_model_mean_predictions, base_model_quantile_predictions)
82
+
83
+ pred = self.predictor.predict(df, as_pandas=False)
84
+
85
+ # Reshape back to (num_windows, num_items, prediction_length, num_quantiles)
86
+ pred = pred.reshape(num_windows, num_items, prediction_length, len(self.quantile_levels))
87
+
88
+ # Use median quantile as mean prediction
89
+ median_idx = self._get_median_quantile_index()
90
+ mean_pred = pred[:, :, :, median_idx : median_idx + 1]
91
+ quantile_pred = pred
92
+
93
+ return mean_pred, quantile_pred
94
+
95
+ def _get_feature_df(
96
+ self,
97
+ base_model_mean_predictions: np.ndarray,
98
+ base_model_quantile_predictions: np.ndarray,
99
+ ) -> pd.DataFrame:
100
+ num_windows, num_items, prediction_length, _, num_models = base_model_mean_predictions.shape
101
+ num_tabular_items = num_windows * num_items * prediction_length
102
+
103
+ X = np.hstack(
104
+ [
105
+ base_model_mean_predictions.reshape(num_tabular_items, -1),
106
+ base_model_quantile_predictions.reshape(num_tabular_items, -1),
107
+ ]
108
+ )
109
+
110
+ df = pd.DataFrame(X, columns=self._get_feature_names(num_models))
111
+ return df
112
+
113
+ def _get_feature_names(self, num_models: int) -> list[str]:
114
+ feature_names = []
115
+ for mi in range(num_models):
116
+ feature_names.append(f"model_{mi}_mean")
117
+ for quantile in self.quantile_levels:
118
+ for mi in range(num_models):
119
+ feature_names.append(f"model_{mi}_q{quantile}")
120
+
121
+ return feature_names
122
+
123
+ def _get_median_quantile_index(self):
124
+ """Get quantile index closest to 0.5"""
125
+ quantile_array = np.array(self.quantile_levels)
126
+ median_idx = int(np.argmin(np.abs(quantile_array - 0.5)))
127
+ selected_quantile = quantile_array[median_idx]
128
+
129
+ if selected_quantile != 0.5:
130
+ logger.warning(
131
+ f"Selected quantile {selected_quantile} is not exactly 0.5. "
132
+ f"Using closest available quantile for median prediction."
133
+ )
134
+
135
+ return median_idx
136
+
137
+ def __getstate__(self):
138
+ state = self.__dict__.copy()
139
+ # Remove the predictor to avoid pickling heavy TabularPredictor objects
140
+ state["predictor"] = None
141
+ return state
@@ -0,0 +1,8 @@
1
+ from .basic import PerformanceWeightedEnsemble, SimpleAverageEnsemble
2
+ from .greedy import GreedyEnsemble
3
+
4
+ __all__ = [
5
+ "SimpleAverageEnsemble",
6
+ "PerformanceWeightedEnsemble",
7
+ "GreedyEnsemble",
8
+ ]
@@ -0,0 +1,41 @@
1
+ import functools
2
+ from abc import ABC
3
+ from typing import Optional
4
+
5
+ import numpy as np
6
+
7
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
8
+
9
+ from ..abstract import AbstractTimeSeriesEnsembleModel
10
+
11
+
12
+ class AbstractWeightedTimeSeriesEnsembleModel(AbstractTimeSeriesEnsembleModel, ABC):
13
+ """Abstract class for weighted ensembles which assign one (global) weight per model."""
14
+
15
+ def __init__(self, name: Optional[str] = None, **kwargs):
16
+ super().__init__(name=name, **kwargs)
17
+ self.model_to_weight: dict[str, float] = {}
18
+
19
+ @property
20
+ def model_names(self) -> list[str]:
21
+ return list(self.model_to_weight.keys())
22
+
23
+ @property
24
+ def model_weights(self) -> np.ndarray:
25
+ return np.array(list(self.model_to_weight.values()), dtype=np.float64)
26
+
27
+ def _predict(self, data: dict[str, TimeSeriesDataFrame], **kwargs) -> TimeSeriesDataFrame:
28
+ weighted_predictions = [data[model_name] * weight for model_name, weight in self.model_to_weight.items()]
29
+ return functools.reduce(lambda x, y: x + y, weighted_predictions)
30
+
31
+ def get_info(self) -> dict:
32
+ info = super().get_info()
33
+ info["model_weights"] = self.model_to_weight.copy()
34
+ return info
35
+
36
+ def remap_base_models(self, model_refit_map: dict[str, str]) -> None:
37
+ updated_weights = {}
38
+ for model, weight in self.model_to_weight.items():
39
+ model_full_name = model_refit_map.get(model, model)
40
+ updated_weights[model_full_name] = weight
41
+ self.model_to_weight = updated_weights
@@ -1,4 +1,4 @@
1
- from typing import Dict, List, Optional
1
+ from typing import Any, Optional
2
2
 
3
3
  import numpy as np
4
4
 
@@ -10,16 +10,11 @@ from .abstract import AbstractWeightedTimeSeriesEnsembleModel
10
10
  class SimpleAverageEnsemble(AbstractWeightedTimeSeriesEnsembleModel):
11
11
  """Constructs a weighted ensemble using a simple average of the constituent models' predictions."""
12
12
 
13
- def __init__(self, name: Optional[str] = None, **kwargs):
14
- if name is None:
15
- name = "SimpleAverageEnsemble"
16
- super().__init__(name=name, **kwargs)
17
-
18
13
  def _fit(
19
14
  self,
20
- predictions_per_window: Dict[str, List[TimeSeriesDataFrame]],
21
- data_per_window: List[TimeSeriesDataFrame],
22
- model_scores: Optional[Dict[str, float]] = None,
15
+ predictions_per_window: dict[str, list[TimeSeriesDataFrame]],
16
+ data_per_window: list[TimeSeriesDataFrame],
17
+ model_scores: Optional[dict[str, float]] = None,
23
18
  time_limit: Optional[float] = None,
24
19
  ):
25
20
  self.model_to_weight = {}
@@ -47,19 +42,14 @@ class PerformanceWeightedEnsemble(AbstractWeightedTimeSeriesEnsembleModel):
47
42
  36.1 (2020): 93-97.
48
43
  """
49
44
 
50
- def __init__(self, name: Optional[str] = None, **kwargs):
51
- if name is None:
52
- name = "PerformanceWeightedEnsemble"
53
- super().__init__(name=name, **kwargs)
54
-
55
- def _get_default_hyperparameters(self) -> Dict:
45
+ def _get_default_hyperparameters(self) -> dict[str, Any]:
56
46
  return {"weight_scheme": "sqrt"}
57
47
 
58
48
  def _fit(
59
49
  self,
60
- predictions_per_window: Dict[str, List[TimeSeriesDataFrame]],
61
- data_per_window: List[TimeSeriesDataFrame],
62
- model_scores: Optional[Dict[str, float]] = None,
50
+ predictions_per_window: dict[str, list[TimeSeriesDataFrame]],
51
+ data_per_window: list[TimeSeriesDataFrame],
52
+ model_scores: Optional[dict[str, float]] = None,
63
53
  time_limit: Optional[float] = None,
64
54
  ):
65
55
  assert model_scores is not None
@@ -1,7 +1,7 @@
1
1
  import copy
2
2
  import logging
3
3
  import pprint
4
- from typing import Dict, List, Optional
4
+ from typing import Any, Optional
5
5
 
6
6
  import numpy as np
7
7
 
@@ -47,14 +47,14 @@ class TimeSeriesEnsembleSelection(EnsembleSelection):
47
47
  self.dummy_pred_per_window = []
48
48
  self.scorer_per_window = []
49
49
 
50
- self.dummy_pred_per_window: Optional[List[TimeSeriesDataFrame]]
51
- self.scorer_per_window: Optional[List[TimeSeriesScorer]]
52
- self.data_future_per_window: Optional[List[TimeSeriesDataFrame]]
50
+ self.dummy_pred_per_window: Optional[list[TimeSeriesDataFrame]]
51
+ self.scorer_per_window: Optional[list[TimeSeriesScorer]]
52
+ self.data_future_per_window: Optional[list[TimeSeriesDataFrame]]
53
53
 
54
54
  def fit( # type: ignore
55
55
  self,
56
- predictions: List[List[TimeSeriesDataFrame]],
57
- labels: List[TimeSeriesDataFrame],
56
+ predictions: list[list[TimeSeriesDataFrame]],
57
+ labels: list[TimeSeriesDataFrame],
58
58
  time_limit: Optional[float] = None,
59
59
  ):
60
60
  return super().fit(
@@ -65,10 +65,10 @@ class TimeSeriesEnsembleSelection(EnsembleSelection):
65
65
 
66
66
  def _fit( # type: ignore
67
67
  self,
68
- predictions: List[List[TimeSeriesDataFrame]],
69
- labels: List[TimeSeriesDataFrame],
68
+ predictions: list[list[TimeSeriesDataFrame]],
69
+ labels: list[TimeSeriesDataFrame],
70
70
  time_limit: Optional[float] = None,
71
- sample_weight: Optional[List[float]] = None,
71
+ sample_weight: Optional[list[float]] = None,
72
72
  ):
73
73
  # Stack predictions for each model into a 3d tensor of shape [num_val_windows, num_rows, num_cols]
74
74
  stacked_predictions = [np.stack(preds) for preds in predictions]
@@ -157,14 +157,14 @@ class GreedyEnsemble(AbstractWeightedTimeSeriesEnsembleModel):
157
157
  name = "WeightedEnsemble"
158
158
  super().__init__(name=name, **kwargs)
159
159
 
160
- def _get_default_hyperparameters(self) -> Dict:
160
+ def _get_default_hyperparameters(self) -> dict[str, Any]:
161
161
  return {"ensemble_size": 100}
162
162
 
163
163
  def _fit(
164
164
  self,
165
- predictions_per_window: Dict[str, List[TimeSeriesDataFrame]],
166
- data_per_window: List[TimeSeriesDataFrame],
167
- model_scores: Optional[Dict[str, float]] = None,
165
+ predictions_per_window: dict[str, list[TimeSeriesDataFrame]],
166
+ data_per_window: list[TimeSeriesDataFrame],
167
+ model_scores: Optional[dict[str, float]] = None,
168
168
  time_limit: Optional[float] = None,
169
169
  ):
170
170
  ensemble_selection = TimeSeriesEnsembleSelection(
@@ -3,7 +3,7 @@ import os
3
3
  import shutil
4
4
  from datetime import timedelta
5
5
  from pathlib import Path
6
- from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union, cast, overload
6
+ from typing import TYPE_CHECKING, Any, Callable, Optional, Type, Union, cast, overload
7
7
 
8
8
  import gluonts
9
9
  import gluonts.core.settings
@@ -21,7 +21,7 @@ from autogluon.core.hpo.constants import RAY_BACKEND
21
21
  from autogluon.tabular.models.tabular_nn.utils.categorical_encoders import (
22
22
  OneHotMergeRaresHandleUnknownEncoder as OneHotEncoder,
23
23
  )
24
- from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
24
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
25
25
  from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
26
26
  from autogluon.timeseries.utils.warning_filters import disable_root_logger, warning_filter
27
27
 
@@ -42,20 +42,20 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
42
42
 
43
43
  Parameters
44
44
  ----------
45
- path: str
45
+ path
46
46
  directory to store model artifacts.
47
- freq: str
47
+ freq
48
48
  string representation (compatible with GluonTS frequency strings) for the data provided.
49
49
  For example, "1D" for daily data, "1H" for hourly data, etc.
50
- prediction_length: int
50
+ prediction_length
51
51
  Number of time steps ahead (length of the forecast horizon) the model will be optimized
52
52
  to predict. At inference time, this will be the number of time steps the model will
53
53
  predict.
54
- name: str
54
+ name
55
55
  Name of the model. Also, name of subdirectory inside path where model will be saved.
56
- eval_metric: str
56
+ eval_metric
57
57
  objective function the model intends to optimize, will use WQL by default.
58
- hyperparameters:
58
+ hyperparameters
59
59
  various hyperparameters that will be used by model (can be search spaces instead of
60
60
  fixed values). See *Other Parameters* in each inheriting model's documentation for
61
61
  possible values.
@@ -77,7 +77,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
77
77
  path: Optional[str] = None,
78
78
  name: Optional[str] = None,
79
79
  eval_metric: Optional[str] = None,
80
- hyperparameters: Optional[Dict[str, Any]] = None,
80
+ hyperparameters: Optional[dict[str, Any]] = None,
81
81
  **kwargs, # noqa
82
82
  ):
83
83
  super().__init__(
@@ -100,9 +100,9 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
100
100
  self.num_feat_dynamic_real = 0
101
101
  self.num_past_feat_dynamic_cat = 0
102
102
  self.num_past_feat_dynamic_real = 0
103
- self.feat_static_cat_cardinality: List[int] = []
104
- self.feat_dynamic_cat_cardinality: List[int] = []
105
- self.past_feat_dynamic_cat_cardinality: List[int] = []
103
+ self.feat_static_cat_cardinality: list[int] = []
104
+ self.feat_dynamic_cat_cardinality: list[int] = []
105
+ self.past_feat_dynamic_cat_cardinality: list[int] = []
106
106
  self.negative_data = True
107
107
 
108
108
  def save(self, path: Optional[str] = None, verbose: bool = True) -> str:
@@ -234,7 +234,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
234
234
 
235
235
  return self._get_default_hyperparameters() | init_args
236
236
 
237
- def _get_estimator_init_args(self) -> Dict[str, Any]:
237
+ def _get_estimator_init_args(self) -> dict[str, Any]:
238
238
  """Get GluonTS specific constructor arguments for estimator objects, an alias to `self.get_hyperparameters`
239
239
  for better readability."""
240
240
  return self.get_hyperparameters()
@@ -277,8 +277,8 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
277
277
 
278
278
  return torch.cuda.is_available()
279
279
 
280
- def get_minimum_resources(self, is_gpu_available: bool = False) -> Dict[str, Union[int, float]]:
281
- minimum_resources: Dict[str, Union[int, float]] = {"num_cpus": 1}
280
+ def get_minimum_resources(self, is_gpu_available: bool = False) -> dict[str, Union[int, float]]:
281
+ minimum_resources: dict[str, Union[int, float]] = {"num_cpus": 1}
282
282
  # if GPU is available, we train with 1 GPU per trial
283
283
  if is_gpu_available:
284
284
  minimum_resources["num_gpus"] = 1
@@ -440,7 +440,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
440
440
  self,
441
441
  time_limit: Optional[float],
442
442
  early_stopping_patience: Optional[int] = None,
443
- ) -> List[Callable]:
443
+ ) -> list[Callable]:
444
444
  """Retrieve a list of callback objects for the GluonTS trainer"""
445
445
  from lightning.pytorch.callbacks import EarlyStopping, Timer
446
446
 
@@ -473,7 +473,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
473
473
  data: TimeSeriesDataFrame,
474
474
  known_covariates: Optional[TimeSeriesDataFrame] = None,
475
475
  num_samples: Optional[int] = None,
476
- ) -> List[Forecast]:
476
+ ) -> list[Forecast]:
477
477
  assert self.gts_predictor is not None, "GluonTS models must be fit before predicting."
478
478
  gts_data = self._to_gluonts_dataset(data, known_covariates=known_covariates)
479
479
  return list(
@@ -483,7 +483,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
483
483
  )
484
484
  )
485
485
 
486
- def _stack_quantile_forecasts(self, forecasts: List[QuantileForecast], item_ids: pd.Index) -> pd.DataFrame:
486
+ def _stack_quantile_forecasts(self, forecasts: list[QuantileForecast], item_ids: pd.Index) -> pd.DataFrame:
487
487
  # GluonTS always saves item_id as a string
488
488
  item_id_to_forecast = {str(f.item_id): f for f in forecasts}
489
489
  result_dfs = []
@@ -496,7 +496,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
496
496
  columns_order = ["mean"] + [str(q) for q in self.quantile_levels]
497
497
  return forecast_df[columns_order]
498
498
 
499
- def _stack_sample_forecasts(self, forecasts: List[SampleForecast], item_ids: pd.Index) -> pd.DataFrame:
499
+ def _stack_sample_forecasts(self, forecasts: list[SampleForecast], item_ids: pd.Index) -> pd.DataFrame:
500
500
  item_id_to_forecast = {str(f.item_id): f for f in forecasts}
501
501
  samples_per_item = []
502
502
  for item_id in item_ids:
@@ -509,7 +509,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
509
509
  return pd.DataFrame(forecast_array, columns=["mean"] + [str(q) for q in self.quantile_levels])
510
510
 
511
511
  def _stack_distribution_forecasts(
512
- self, forecasts: List["DistributionForecast"], item_ids: pd.Index
512
+ self, forecasts: list["DistributionForecast"], item_ids: pd.Index
513
513
  ) -> pd.DataFrame:
514
514
  import torch
515
515
  from gluonts.torch.distributions import AffineTransformed
@@ -523,7 +523,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
523
523
  "Expected forecast.distribution to be an instance of AffineTransformed"
524
524
  )
525
525
 
526
- def stack_distributions(distributions: List[Distribution]) -> Distribution:
526
+ def stack_distributions(distributions: list[Distribution]) -> Distribution:
527
527
  """Stack multiple torch.Distribution objects into a single distribution"""
528
528
  last_dist: Distribution = distributions[-1]
529
529
 
@@ -561,18 +561,18 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
561
561
 
562
562
  def _gluonts_forecasts_to_data_frame(
563
563
  self,
564
- forecasts: List[Forecast],
564
+ forecasts: list[Forecast],
565
565
  forecast_index: pd.MultiIndex,
566
566
  ) -> TimeSeriesDataFrame:
567
567
  from gluonts.torch.model.forecast import DistributionForecast
568
568
 
569
- item_ids = forecast_index.unique(level=ITEMID)
569
+ item_ids = forecast_index.unique(level=TimeSeriesDataFrame.ITEMID)
570
570
  if isinstance(forecasts[0], SampleForecast):
571
- forecast_df = self._stack_sample_forecasts(cast(List[SampleForecast], forecasts), item_ids)
571
+ forecast_df = self._stack_sample_forecasts(cast(list[SampleForecast], forecasts), item_ids)
572
572
  elif isinstance(forecasts[0], QuantileForecast):
573
- forecast_df = self._stack_quantile_forecasts(cast(List[QuantileForecast], forecasts), item_ids)
573
+ forecast_df = self._stack_quantile_forecasts(cast(list[QuantileForecast], forecasts), item_ids)
574
574
  elif isinstance(forecasts[0], DistributionForecast):
575
- forecast_df = self._stack_distribution_forecasts(cast(List[DistributionForecast], forecasts), item_ids)
575
+ forecast_df = self._stack_distribution_forecasts(cast(list[DistributionForecast], forecasts), item_ids)
576
576
  else:
577
577
  raise ValueError(f"Unrecognized forecast type {type(forecasts[0])}")
578
578
 
@@ -1,11 +1,11 @@
1
- from typing import Any, Dict, Iterator, Optional, Type
1
+ from typing import Any, Iterator, Optional, Type
2
2
 
3
3
  import numpy as np
4
4
  import pandas as pd
5
5
  from gluonts.dataset.common import Dataset as GluonTSDataset
6
6
  from gluonts.dataset.field_names import FieldName
7
7
 
8
- from autogluon.timeseries.dataset.ts_dataframe import TIMESTAMP, TimeSeriesDataFrame
8
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
9
9
  from autogluon.timeseries.utils.datetime import norm_freq_str
10
10
 
11
11
 
@@ -44,7 +44,7 @@ class SimpleGluonTSDataset(GluonTSDataset):
44
44
  # Replace inefficient groupby ITEMID with indptr that stores start:end of each time series
45
45
  self.item_ids = target_df.item_ids
46
46
  self.indptr = target_df.get_indptr()
47
- self.start_timestamps = target_df.index[self.indptr[:-1]].to_frame(index=False)[TIMESTAMP]
47
+ self.start_timestamps = target_df.index[self.indptr[:-1]].to_frame(index=False)[TimeSeriesDataFrame.TIMESTAMP]
48
48
  assert len(self.item_ids) == len(self.start_timestamps)
49
49
 
50
50
  @staticmethod
@@ -76,7 +76,7 @@ class SimpleGluonTSDataset(GluonTSDataset):
76
76
  def __len__(self):
77
77
  return len(self.indptr) - 1 # noqa
78
78
 
79
- def __iter__(self) -> Iterator[Dict[str, Any]]:
79
+ def __iter__(self) -> Iterator[dict[str, Any]]:
80
80
  for j in range(len(self.indptr) - 1):
81
81
  start_idx = self.indptr[j]
82
82
  end_idx = self.indptr[j + 1]
@@ -3,7 +3,7 @@ Module including wrappers for PyTorch implementations of models in GluonTS
3
3
  """
4
4
 
5
5
  import logging
6
- from typing import Any, Dict, Type
6
+ from typing import Any, Type
7
7
 
8
8
  from gluonts.model.estimator import Estimator as GluonTSEstimator
9
9
 
@@ -60,7 +60,7 @@ class DeepARModel(AbstractGluonTSModel):
60
60
  Distribution output object that defines how the model output is converted to a forecast, and how the loss is computed.
61
61
  scaling: bool, default = True
62
62
  If True, mean absolute scaling will be applied to each *context window* during training & prediction.
63
- Note that this is different from the `target_scaler` that is applied to the *entire time series*.
63
+ Note that this is different from the ``target_scaler`` that is applied to the *entire time series*.
64
64
  max_epochs : int, default = 100
65
65
  Number of epochs the model will be trained for
66
66
  batch_size : int, default = 64
@@ -81,6 +81,8 @@ class DeepARModel(AbstractGluonTSModel):
81
81
 
82
82
  # TODO: Replace "scaling: bool" with "window_scaler": {"mean_abs", None} for consistency?
83
83
 
84
+ ag_priority = 40
85
+
84
86
  _supports_known_covariates = True
85
87
  _supports_static_features = True
86
88
 
@@ -89,7 +91,7 @@ class DeepARModel(AbstractGluonTSModel):
89
91
 
90
92
  return DeepAREstimator
91
93
 
92
- def _get_estimator_init_args(self) -> Dict[str, Any]:
94
+ def _get_estimator_init_args(self) -> dict[str, Any]:
93
95
  init_kwargs = super()._get_estimator_init_args()
94
96
  init_kwargs["num_feat_static_cat"] = self.num_feat_static_cat
95
97
  init_kwargs["num_feat_static_real"] = self.num_feat_static_real
@@ -111,7 +113,7 @@ class SimpleFeedForwardModel(AbstractGluonTSModel):
111
113
  ----------------
112
114
  context_length : int, default = max(10, 2 * prediction_length)
113
115
  Number of time units that condition the predictions
114
- hidden_dimensions: List[int], default = [20, 20]
116
+ hidden_dimensions: list[int], default = [20, 20]
115
117
  Size of hidden layers in the feedforward network
116
118
  distr_output : gluonts.torch.distributions.Output, default = StudentTOutput()
117
119
  Distribution output object that defines how the model output is converted to a forecast, and how the loss is computed.
@@ -119,7 +121,7 @@ class SimpleFeedForwardModel(AbstractGluonTSModel):
119
121
  Whether to use batch normalization
120
122
  mean_scaling : bool, default = True
121
123
  If True, mean absolute scaling will be applied to each *context window* during training & prediction.
122
- Note that this is different from the `target_scaler` that is applied to the *entire time series*.
124
+ Note that this is different from the ``target_scaler`` that is applied to the *entire time series*.
123
125
  max_epochs : int, default = 100
124
126
  Number of epochs the model will be trained for
125
127
  batch_size : int, default = 64
@@ -138,6 +140,8 @@ class SimpleFeedForwardModel(AbstractGluonTSModel):
138
140
  If True, ``lightning_logs`` directory will NOT be removed after the model finished training.
139
141
  """
140
142
 
143
+ ag_priority = 10
144
+
141
145
  def _get_estimator_class(self) -> Type[GluonTSEstimator]:
142
146
  from gluonts.torch.model.simple_feedforward import SimpleFeedForwardEstimator
143
147
 
@@ -199,6 +203,9 @@ class TemporalFusionTransformerModel(AbstractGluonTSModel):
199
203
  If True, ``lightning_logs`` directory will NOT be removed after the model finished training.
200
204
  """
201
205
 
206
+ ag_priority = 45
207
+ ag_model_aliases = ["TFT"]
208
+
202
209
  _supports_known_covariates = True
203
210
  _supports_past_covariates = True
204
211
  _supports_cat_covariates = True
@@ -214,7 +221,7 @@ class TemporalFusionTransformerModel(AbstractGluonTSModel):
214
221
  "context_length": min(512, max(64, 2 * self.prediction_length)),
215
222
  }
216
223
 
217
- def _get_estimator_init_args(self) -> Dict[str, Any]:
224
+ def _get_estimator_init_args(self) -> dict[str, Any]:
218
225
  init_kwargs = super()._get_estimator_init_args()
219
226
  if self.num_feat_dynamic_real > 0:
220
227
  init_kwargs["dynamic_dims"] = [self.num_feat_dynamic_real]
@@ -261,7 +268,7 @@ class DLinearModel(AbstractGluonTSModel):
261
268
  Scaling applied to each *context window* during training & prediction.
262
269
  One of ``"mean"`` (mean absolute scaling), ``"std"`` (standardization), ``None`` (no scaling).
263
270
 
264
- Note that this is different from the `target_scaler` that is applied to the *entire time series*.
271
+ Note that this is different from the ``target_scaler`` that is applied to the *entire time series*.
265
272
  max_epochs : int, default = 100
266
273
  Number of epochs the model will be trained for
267
274
  batch_size : int, default = 64
@@ -282,6 +289,8 @@ class DLinearModel(AbstractGluonTSModel):
282
289
  If True, ``lightning_logs`` directory will NOT be removed after the model finished training.
283
290
  """
284
291
 
292
+ ag_priority = 10
293
+
285
294
  def _get_default_hyperparameters(self):
286
295
  return super()._get_default_hyperparameters() | {
287
296
  "context_length": 96,
@@ -325,7 +334,7 @@ class PatchTSTModel(AbstractGluonTSModel):
325
334
  Scaling applied to each *context window* during training & prediction.
326
335
  One of ``"mean"`` (mean absolute scaling), ``"std"`` (standardization), ``None`` (no scaling).
327
336
 
328
- Note that this is different from the `target_scaler` that is applied to the *entire time series*.
337
+ Note that this is different from the ``target_scaler`` that is applied to the *entire time series*.
329
338
  max_epochs : int, default = 100
330
339
  Number of epochs the model will be trained for
331
340
  batch_size : int, default = 64
@@ -340,6 +349,8 @@ class PatchTSTModel(AbstractGluonTSModel):
340
349
  If True, ``lightning_logs`` directory will NOT be removed after the model finished training.
341
350
  """
342
351
 
352
+ ag_priority = 30
353
+
343
354
  _supports_known_covariates = True
344
355
 
345
356
  def _get_estimator_class(self) -> Type[GluonTSEstimator]:
@@ -350,7 +361,7 @@ class PatchTSTModel(AbstractGluonTSModel):
350
361
  def _get_default_hyperparameters(self):
351
362
  return super()._get_default_hyperparameters() | {"context_length": 96, "patch_len": 16}
352
363
 
353
- def _get_estimator_init_args(self) -> Dict[str, Any]:
364
+ def _get_estimator_init_args(self) -> dict[str, Any]:
354
365
  init_kwargs = super()._get_estimator_init_args()
355
366
  init_kwargs["num_feat_dynamic_real"] = self.num_feat_dynamic_real
356
367
  return init_kwargs
@@ -416,6 +427,8 @@ class WaveNetModel(AbstractGluonTSModel):
416
427
  If True, ``lightning_logs`` directory will NOT be removed after the model finished training.
417
428
  """
418
429
 
430
+ ag_priority = 25
431
+
419
432
  _supports_known_covariates = True
420
433
  _supports_static_features = True
421
434
  default_num_samples: int = 100
@@ -425,7 +438,7 @@ class WaveNetModel(AbstractGluonTSModel):
425
438
 
426
439
  return WaveNetEstimator
427
440
 
428
- def _get_estimator_init_args(self) -> Dict[str, Any]:
441
+ def _get_estimator_init_args(self) -> dict[str, Any]:
429
442
  init_kwargs = super()._get_estimator_init_args()
430
443
  init_kwargs["num_feat_static_cat"] = self.num_feat_static_cat
431
444
  init_kwargs["num_feat_static_real"] = self.num_feat_static_real
@@ -489,7 +502,7 @@ class TiDEModel(AbstractGluonTSModel):
489
502
  Scaling applied to each *context window* during training & prediction.
490
503
  One of ``"mean"`` (mean absolute scaling), ``"std"`` (standardization), ``None`` (no scaling).
491
504
 
492
- Note that this is different from the `target_scaler` that is applied to the *entire time series*.
505
+ Note that this is different from the ``target_scaler`` that is applied to the *entire time series*.
493
506
  max_epochs : int, default = 100
494
507
  Number of epochs the model will be trained for
495
508
  batch_size : int, default = 256
@@ -508,6 +521,8 @@ class TiDEModel(AbstractGluonTSModel):
508
521
  If True, ``lightning_logs`` directory will NOT be removed after the model finished training.
509
522
  """
510
523
 
524
+ ag_priority = 30
525
+
511
526
  _supports_known_covariates = True
512
527
  _supports_static_features = True
513
528
 
@@ -532,7 +547,7 @@ class TiDEModel(AbstractGluonTSModel):
532
547
  "batch_size": 256,
533
548
  }
534
549
 
535
- def _get_estimator_init_args(self) -> Dict[str, Any]:
550
+ def _get_estimator_init_args(self) -> dict[str, Any]:
536
551
  init_kwargs = super()._get_estimator_init_args()
537
552
  init_kwargs["num_feat_static_cat"] = self.num_feat_static_cat
538
553
  init_kwargs["num_feat_static_real"] = self.num_feat_static_real