autogluon.timeseries 1.4.1b20251010__py3-none-any.whl → 1.4.1b20251115__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of autogluon.timeseries might be problematic. Click here for more details.

Files changed (53) hide show
  1. autogluon/timeseries/dataset/ts_dataframe.py +66 -53
  2. autogluon/timeseries/learner.py +5 -4
  3. autogluon/timeseries/metrics/quantile.py +1 -1
  4. autogluon/timeseries/metrics/utils.py +4 -4
  5. autogluon/timeseries/models/autogluon_tabular/mlforecast.py +28 -36
  6. autogluon/timeseries/models/autogluon_tabular/per_step.py +14 -5
  7. autogluon/timeseries/models/autogluon_tabular/transforms.py +9 -7
  8. autogluon/timeseries/models/chronos/model.py +101 -68
  9. autogluon/timeseries/models/chronos/{pipeline/utils.py → utils.py} +64 -32
  10. autogluon/timeseries/models/ensemble/__init__.py +29 -2
  11. autogluon/timeseries/models/ensemble/abstract.py +1 -37
  12. autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
  13. autogluon/timeseries/models/ensemble/array_based/abstract.py +247 -0
  14. autogluon/timeseries/models/ensemble/array_based/models.py +50 -0
  15. autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +10 -0
  16. autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +87 -0
  17. autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +133 -0
  18. autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +141 -0
  19. autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
  20. autogluon/timeseries/models/ensemble/weighted/abstract.py +41 -0
  21. autogluon/timeseries/models/ensemble/{basic.py → weighted/basic.py} +0 -10
  22. autogluon/timeseries/models/gluonts/abstract.py +2 -2
  23. autogluon/timeseries/models/gluonts/dataset.py +2 -2
  24. autogluon/timeseries/models/local/abstract_local_model.py +2 -2
  25. autogluon/timeseries/models/multi_window/multi_window_model.py +1 -1
  26. autogluon/timeseries/models/toto/model.py +5 -3
  27. autogluon/timeseries/predictor.py +10 -26
  28. autogluon/timeseries/regressor.py +9 -7
  29. autogluon/timeseries/splitter.py +1 -25
  30. autogluon/timeseries/trainer/ensemble_composer.py +250 -0
  31. autogluon/timeseries/trainer/trainer.py +124 -193
  32. autogluon/timeseries/trainer/utils.py +18 -0
  33. autogluon/timeseries/transforms/covariate_scaler.py +1 -1
  34. autogluon/timeseries/transforms/target_scaler.py +7 -7
  35. autogluon/timeseries/utils/features.py +9 -5
  36. autogluon/timeseries/utils/forecast.py +5 -5
  37. autogluon/timeseries/version.py +1 -1
  38. autogluon.timeseries-1.4.1b20251115-py3.9-nspkg.pth +1 -0
  39. {autogluon.timeseries-1.4.1b20251010.dist-info → autogluon_timeseries-1.4.1b20251115.dist-info}/METADATA +25 -15
  40. {autogluon.timeseries-1.4.1b20251010.dist-info → autogluon_timeseries-1.4.1b20251115.dist-info}/RECORD +47 -41
  41. {autogluon.timeseries-1.4.1b20251010.dist-info → autogluon_timeseries-1.4.1b20251115.dist-info}/WHEEL +1 -1
  42. autogluon/timeseries/evaluator.py +0 -6
  43. autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -10
  44. autogluon/timeseries/models/chronos/pipeline/base.py +0 -160
  45. autogluon/timeseries/models/chronos/pipeline/chronos.py +0 -544
  46. autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -580
  47. autogluon.timeseries-1.4.1b20251010-py3.9-nspkg.pth +0 -1
  48. /autogluon/timeseries/models/ensemble/{greedy.py → weighted/greedy.py} +0 -0
  49. {autogluon.timeseries-1.4.1b20251010.dist-info → autogluon_timeseries-1.4.1b20251115.dist-info/licenses}/LICENSE +0 -0
  50. {autogluon.timeseries-1.4.1b20251010.dist-info → autogluon_timeseries-1.4.1b20251115.dist-info/licenses}/NOTICE +0 -0
  51. {autogluon.timeseries-1.4.1b20251010.dist-info → autogluon_timeseries-1.4.1b20251115.dist-info}/namespace_packages.txt +0 -0
  52. {autogluon.timeseries-1.4.1b20251010.dist-info → autogluon_timeseries-1.4.1b20251115.dist-info}/top_level.txt +0 -0
  53. {autogluon.timeseries-1.4.1b20251010.dist-info → autogluon_timeseries-1.4.1b20251115.dist-info}/zip-safe +0 -0
@@ -0,0 +1,41 @@
1
+ import functools
2
+ from abc import ABC
3
+ from typing import Optional
4
+
5
+ import numpy as np
6
+
7
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
8
+
9
+ from ..abstract import AbstractTimeSeriesEnsembleModel
10
+
11
+
12
+ class AbstractWeightedTimeSeriesEnsembleModel(AbstractTimeSeriesEnsembleModel, ABC):
13
+ """Abstract class for weighted ensembles which assign one (global) weight per model."""
14
+
15
+ def __init__(self, name: Optional[str] = None, **kwargs):
16
+ super().__init__(name=name, **kwargs)
17
+ self.model_to_weight: dict[str, float] = {}
18
+
19
+ @property
20
+ def model_names(self) -> list[str]:
21
+ return list(self.model_to_weight.keys())
22
+
23
+ @property
24
+ def model_weights(self) -> np.ndarray:
25
+ return np.array(list(self.model_to_weight.values()), dtype=np.float64)
26
+
27
+ def _predict(self, data: dict[str, TimeSeriesDataFrame], **kwargs) -> TimeSeriesDataFrame:
28
+ weighted_predictions = [data[model_name] * weight for model_name, weight in self.model_to_weight.items()]
29
+ return functools.reduce(lambda x, y: x + y, weighted_predictions)
30
+
31
+ def get_info(self) -> dict:
32
+ info = super().get_info()
33
+ info["model_weights"] = self.model_to_weight.copy()
34
+ return info
35
+
36
+ def remap_base_models(self, model_refit_map: dict[str, str]) -> None:
37
+ updated_weights = {}
38
+ for model, weight in self.model_to_weight.items():
39
+ model_full_name = model_refit_map.get(model, model)
40
+ updated_weights[model_full_name] = weight
41
+ self.model_to_weight = updated_weights
@@ -10,11 +10,6 @@ from .abstract import AbstractWeightedTimeSeriesEnsembleModel
10
10
  class SimpleAverageEnsemble(AbstractWeightedTimeSeriesEnsembleModel):
11
11
  """Constructs a weighted ensemble using a simple average of the constituent models' predictions."""
12
12
 
13
- def __init__(self, name: Optional[str] = None, **kwargs):
14
- if name is None:
15
- name = "SimpleAverageEnsemble"
16
- super().__init__(name=name, **kwargs)
17
-
18
13
  def _fit(
19
14
  self,
20
15
  predictions_per_window: dict[str, list[TimeSeriesDataFrame]],
@@ -47,11 +42,6 @@ class PerformanceWeightedEnsemble(AbstractWeightedTimeSeriesEnsembleModel):
47
42
  36.1 (2020): 93-97.
48
43
  """
49
44
 
50
- def __init__(self, name: Optional[str] = None, **kwargs):
51
- if name is None:
52
- name = "PerformanceWeightedEnsemble"
53
- super().__init__(name=name, **kwargs)
54
-
55
45
  def _get_default_hyperparameters(self) -> dict[str, Any]:
56
46
  return {"weight_scheme": "sqrt"}
57
47
 
@@ -21,7 +21,7 @@ from autogluon.core.hpo.constants import RAY_BACKEND
21
21
  from autogluon.tabular.models.tabular_nn.utils.categorical_encoders import (
22
22
  OneHotMergeRaresHandleUnknownEncoder as OneHotEncoder,
23
23
  )
24
- from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
24
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
25
25
  from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
26
26
  from autogluon.timeseries.utils.warning_filters import disable_root_logger, warning_filter
27
27
 
@@ -566,7 +566,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
566
566
  ) -> TimeSeriesDataFrame:
567
567
  from gluonts.torch.model.forecast import DistributionForecast
568
568
 
569
- item_ids = forecast_index.unique(level=ITEMID)
569
+ item_ids = forecast_index.unique(level=TimeSeriesDataFrame.ITEMID)
570
570
  if isinstance(forecasts[0], SampleForecast):
571
571
  forecast_df = self._stack_sample_forecasts(cast(list[SampleForecast], forecasts), item_ids)
572
572
  elif isinstance(forecasts[0], QuantileForecast):
@@ -5,7 +5,7 @@ import pandas as pd
5
5
  from gluonts.dataset.common import Dataset as GluonTSDataset
6
6
  from gluonts.dataset.field_names import FieldName
7
7
 
8
- from autogluon.timeseries.dataset.ts_dataframe import TIMESTAMP, TimeSeriesDataFrame
8
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
9
9
  from autogluon.timeseries.utils.datetime import norm_freq_str
10
10
 
11
11
 
@@ -44,7 +44,7 @@ class SimpleGluonTSDataset(GluonTSDataset):
44
44
  # Replace inefficient groupby ITEMID with indptr that stores start:end of each time series
45
45
  self.item_ids = target_df.item_ids
46
46
  self.indptr = target_df.get_indptr()
47
- self.start_timestamps = target_df.index[self.indptr[:-1]].to_frame(index=False)[TIMESTAMP]
47
+ self.start_timestamps = target_df.index[self.indptr[:-1]].to_frame(index=False)[TimeSeriesDataFrame.TIMESTAMP]
48
48
  assert len(self.item_ids) == len(self.start_timestamps)
49
49
 
50
50
  @staticmethod
@@ -9,7 +9,7 @@ from joblib import Parallel, cpu_count, delayed
9
9
  from scipy.stats import norm
10
10
 
11
11
  from autogluon.core.utils.exceptions import TimeLimitExceeded
12
- from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
12
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
13
13
  from autogluon.timeseries.metrics import TimeSeriesScorer
14
14
  from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
15
15
  from autogluon.timeseries.utils.datetime import get_seasonality
@@ -145,7 +145,7 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
145
145
  data = data.slice_by_timestep(-max_ts_length, None)
146
146
 
147
147
  indptr = data.get_indptr()
148
- target_series = data[self.target].droplevel(level=ITEMID)
148
+ target_series = data[self.target].droplevel(level=TimeSeriesDataFrame.ITEMID)
149
149
  all_series = (target_series[indptr[i] : indptr[i + 1]] for i in range(len(indptr) - 1))
150
150
 
151
151
  # timeout ensures that no individual job takes longer than time_limit
@@ -10,7 +10,7 @@ import numpy as np
10
10
  from typing_extensions import Self
11
11
 
12
12
  import autogluon.core as ag
13
- from autogluon.timeseries.dataset.ts_dataframe import TimeSeriesDataFrame
13
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
14
14
  from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
15
15
  from autogluon.timeseries.models.local.abstract_local_model import AbstractLocalModel
16
16
  from autogluon.timeseries.splitter import AbstractWindowSplitter, ExpandingWindowSplitter
@@ -25,9 +25,11 @@ class TotoModel(AbstractTimeSeriesModel):
25
25
  architecture that autoregressively outputs parametric distribution forecasts. More details can be found on
26
26
  `Hugging Face <https://huggingface.co/Datadog/Toto-Open-Base-1.0>`_ and `GitHub <https://github.com/DataDog/toto>`_.
27
27
 
28
- The AutoGluon implementation of Toto is on a port of the original implementation. It is optimized for easy maintenance
29
- with the rest of the AutoGluon model zoo, and does not feature some important optimizations such as xformers and flash-attention
30
- available in the original model repository. The AutoGluon implementation of Toto requires a CUDA-compatible GPU.
28
+ The AutoGluon implementation of Toto is on a port of the original implementation. AutoGluon supports Toto for
29
+ **inference only**, i.e., the model will not be trained or fine-tuned on the provided training data. Toto is optimized
30
+ for easy maintenance with the rest of the AutoGluon model zoo, and does not feature some important optimizations such
31
+ as xformers and flash-attention available in the original model repository. The AutoGluon implementation of Toto
32
+ requires a CUDA-compatible GPU.
31
33
 
32
34
  References
33
35
  ----------
@@ -22,10 +22,9 @@ from autogluon.core.utils.loaders import load_pkl, load_str
22
22
  from autogluon.core.utils.savers import save_pkl, save_str
23
23
  from autogluon.timeseries import __version__ as current_ag_version
24
24
  from autogluon.timeseries.configs import get_predictor_presets
25
- from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
25
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
26
26
  from autogluon.timeseries.learner import TimeSeriesLearner
27
27
  from autogluon.timeseries.metrics import TimeSeriesScorer, check_get_evaluation_metric
28
- from autogluon.timeseries.splitter import ExpandingWindowSplitter
29
28
  from autogluon.timeseries.trainer import TimeSeriesTrainer
30
29
  from autogluon.timeseries.utils.forecast import make_future_data_frame
31
30
 
@@ -221,20 +220,6 @@ class TimeSeriesPredictor:
221
220
  ensemble_model_type=kwargs.pop("ensemble_model_type", None),
222
221
  )
223
222
 
224
- if "ignore_time_index" in kwargs:
225
- raise TypeError(
226
- "`ignore_time_index` argument to TimeSeriesPredictor.__init__() has been deprecated.\n"
227
- "If your data has irregular timestamps, please either 1) specify the desired regular frequency when "
228
- "creating the predictor as `TimeSeriesPredictor(freq=...)` or 2) manually convert timestamps to "
229
- "regular frequency with `data.convert_frequency(freq=...)`."
230
- )
231
- for k in ["learner_type", "learner_kwargs"]:
232
- if k in kwargs:
233
- val = kwargs.pop(k)
234
- logger.warning(
235
- f"Passing `{k}` to TimeSeriesPredictor has been deprecated and will be removed in v1.4. "
236
- f"The provided value {val} will be ignored."
237
- )
238
223
  if len(kwargs) > 0:
239
224
  for key in kwargs:
240
225
  raise TypeError(f"TimeSeriesPredictor.__init__() got an unexpected keyword argument '{key}'")
@@ -417,7 +402,9 @@ class TimeSeriesPredictor:
417
402
  )
418
403
  train_data = train_data.query("item_id not in @too_short_items")
419
404
 
420
- all_nan_items = train_data.item_ids[train_data[self.target].isna().groupby(ITEMID, sort=False).all()]
405
+ all_nan_items = train_data.item_ids[
406
+ train_data[self.target].isna().groupby(TimeSeriesDataFrame.ITEMID, sort=False).all()
407
+ ]
421
408
  if len(all_nan_items) > 0:
422
409
  logger.info(f"\tRemoving {len(all_nan_items)} time series consisting of only NaN values from train_data.")
423
410
  train_data = train_data.query("item_id not in @all_nan_items")
@@ -751,10 +738,6 @@ class TimeSeriesPredictor:
751
738
  train_data, num_val_windows=num_val_windows, val_step_size=val_step_size
752
739
  )
753
740
 
754
- val_splitter = ExpandingWindowSplitter(
755
- prediction_length=self.prediction_length, num_val_windows=num_val_windows, val_step_size=val_step_size
756
- )
757
-
758
741
  time_left = None if time_limit is None else time_limit - (time.time() - time_start)
759
742
  self._learner.fit(
760
743
  train_data=train_data,
@@ -764,7 +747,8 @@ class TimeSeriesPredictor:
764
747
  excluded_model_types=excluded_model_types,
765
748
  time_limit=time_left,
766
749
  verbosity=verbosity,
767
- val_splitter=val_splitter,
750
+ num_val_windows=num_val_windows,
751
+ val_step_size=val_step_size,
768
752
  refit_every_n_windows=refit_every_n_windows,
769
753
  skip_model_selection=skip_model_selection,
770
754
  enable_ensemble=enable_ensemble,
@@ -866,7 +850,7 @@ class TimeSeriesPredictor:
866
850
  use_cache=use_cache,
867
851
  random_seed=random_seed,
868
852
  )
869
- return cast(TimeSeriesDataFrame, predictions.reindex(original_item_id_order, level=ITEMID))
853
+ return cast(TimeSeriesDataFrame, predictions.reindex(original_item_id_order, level=TimeSeriesDataFrame.ITEMID))
870
854
 
871
855
  def evaluate(
872
856
  self,
@@ -1498,7 +1482,7 @@ class TimeSeriesPredictor:
1498
1482
  )
1499
1483
 
1500
1484
  y_val: list[TimeSeriesDataFrame] = [
1501
- select_target(df) for df in trainer._get_ensemble_oof_data(train_data=train_data, val_data=val_data)
1485
+ select_target(df) for df in trainer._get_validation_windows(train_data=train_data, val_data=val_data)
1502
1486
  ]
1503
1487
  y_test: TimeSeriesDataFrame = select_target(test_data)
1504
1488
 
@@ -1621,7 +1605,7 @@ class TimeSeriesPredictor:
1621
1605
  for q in quantile_levels:
1622
1606
  ax.fill_between(forecast.index, point_forecast, forecast[str(q)], color="C1", alpha=0.2)
1623
1607
  if len(axes) > len(item_ids):
1624
- axes[len(item_ids)].set_axis_off()
1625
- handles, labels = axes[0].get_legend_handles_labels()
1608
+ axes[len(item_ids)].set_axis_off() # type: ignore
1609
+ handles, labels = axes[0].get_legend_handles_labels() # type: ignore
1626
1610
  fig.legend(handles, labels, bbox_to_anchor=(0.5, 0.0), ncols=len(handles))
1627
1611
  return fig
@@ -7,7 +7,7 @@ import pandas as pd
7
7
 
8
8
  from autogluon.core.models import AbstractModel
9
9
  from autogluon.tabular.registry import ag_model_registry as tabular_ag_model_registry
10
- from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
10
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
11
11
  from autogluon.timeseries.utils.features import CovariateMetadata
12
12
 
13
13
  logger = logging.getLogger(__name__)
@@ -119,9 +119,9 @@ class GlobalCovariateRegressor(CovariateRegressor):
119
119
  median_ts_length = data.num_timesteps_per_item().median()
120
120
  features_to_drop = [self.target]
121
121
  if not self.include_item_id:
122
- features_to_drop += [ITEMID]
122
+ features_to_drop += [TimeSeriesDataFrame.ITEMID]
123
123
  if self.validation_fraction is not None:
124
- grouped_df = tabular_df.groupby(ITEMID, observed=False, sort=False)
124
+ grouped_df = tabular_df.groupby(TimeSeriesDataFrame.ITEMID, observed=False, sort=False)
125
125
  val_size = max(int(self.validation_fraction * median_ts_length), 1)
126
126
  train_df = self._subsample_df(grouped_df.head(-val_size))
127
127
  val_df = self._subsample_df(grouped_df.tail(val_size))
@@ -201,7 +201,7 @@ class GlobalCovariateRegressor(CovariateRegressor):
201
201
  assert self.model is not None, "CovariateRegressor must be fit before calling predict."
202
202
  tabular_df = self._get_tabular_df(data, static_features=static_features)
203
203
  if not self.include_item_id:
204
- tabular_df = tabular_df.drop(columns=[ITEMID])
204
+ tabular_df = tabular_df.drop(columns=[TimeSeriesDataFrame.ITEMID])
205
205
  return self.model.predict(X=tabular_df)
206
206
 
207
207
  def _get_tabular_df(
@@ -211,12 +211,14 @@ class GlobalCovariateRegressor(CovariateRegressor):
211
211
  include_target: bool = False,
212
212
  ) -> pd.DataFrame:
213
213
  """Construct a tabular dataframe from known covariates and static features."""
214
- available_columns = [ITEMID] + self.covariate_metadata.known_covariates
214
+ available_columns = [TimeSeriesDataFrame.ITEMID] + self.covariate_metadata.known_covariates
215
215
  if include_target:
216
216
  available_columns += [self.target]
217
- tabular_df = pd.DataFrame(data).reset_index()[available_columns].astype({ITEMID: "category"})
217
+ tabular_df = (
218
+ pd.DataFrame(data).reset_index()[available_columns].astype({TimeSeriesDataFrame.ITEMID: "category"})
219
+ )
218
220
  if static_features is not None and self.include_static_features:
219
- tabular_df = pd.merge(tabular_df, static_features, on=ITEMID)
221
+ tabular_df = pd.merge(tabular_df, static_features, on=TimeSeriesDataFrame.ITEMID)
220
222
  return tabular_df
221
223
 
222
224
  def _subsample_df(self, df: pd.DataFrame) -> pd.DataFrame:
@@ -1,6 +1,6 @@
1
1
  from typing import Iterator, Optional
2
2
 
3
- from .dataset.ts_dataframe import TimeSeriesDataFrame
3
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
4
4
 
5
5
  __all__ = [
6
6
  "AbstractWindowSplitter",
@@ -57,27 +57,3 @@ class ExpandingWindowSplitter(AbstractWindowSplitter):
57
57
  train_data = data.slice_by_timestep(None, train_end)
58
58
  val_data = data.slice_by_timestep(None, val_end)
59
59
  yield train_data, val_data
60
-
61
-
62
- class AbstractTimeSeriesSplitter:
63
- def __init__(self, *args, **kwargs):
64
- raise ValueError(
65
- "`AbstractTimeSeriesSplitter` has been deprecated. "
66
- "Please use `autogluon.timeseries.splitter.ExpandingWindowSplitter` instead."
67
- )
68
-
69
-
70
- class MultiWindowSplitter(AbstractTimeSeriesSplitter):
71
- def __init__(self, *args, **kwargs):
72
- raise ValueError(
73
- "`MultiWindowSplitter` has been deprecated. "
74
- "Please use `autogluon.timeseries.splitter.ExpandingWindowSplitter` instead."
75
- )
76
-
77
-
78
- class LastWindowSplitter(MultiWindowSplitter):
79
- def __init__(self, *args, **kwargs):
80
- raise ValueError(
81
- "`LastWindowSplitter` has been deprecated. "
82
- "Please use `autogluon.timeseries.splitter.ExpandingWindowSplitter` instead."
83
- )
@@ -0,0 +1,250 @@
1
+ import logging
2
+ import os
3
+ import time
4
+ import traceback
5
+ from typing import Iterator, Optional
6
+
7
+ import networkx as nx
8
+ import numpy as np
9
+ from typing_extensions import Self
10
+
11
+ from autogluon.timeseries import TimeSeriesDataFrame
12
+ from autogluon.timeseries.metrics import TimeSeriesScorer
13
+ from autogluon.timeseries.models.ensemble import AbstractTimeSeriesEnsembleModel, get_ensemble_class
14
+ from autogluon.timeseries.splitter import AbstractWindowSplitter
15
+ from autogluon.timeseries.utils.warning_filters import warning_filter
16
+
17
+ from .utils import log_scores_and_times
18
+
19
+ logger = logging.getLogger("autogluon.timeseries.trainer")
20
+
21
+
22
+ class EnsembleComposer:
23
+ """Helper class for TimeSeriesTrainer to build multi-layer stack ensembles."""
24
+
25
+ def __init__(
26
+ self,
27
+ path,
28
+ prediction_length: int,
29
+ eval_metric: TimeSeriesScorer,
30
+ target: str,
31
+ quantile_levels: list[float],
32
+ model_graph: nx.DiGraph,
33
+ ensemble_hyperparameters: dict,
34
+ window_splitter: AbstractWindowSplitter,
35
+ ):
36
+ self.eval_metric = eval_metric
37
+ self.path = path
38
+ self.prediction_length = prediction_length
39
+ self.target = target
40
+ self.quantile_levels = quantile_levels
41
+
42
+ self.ensemble_hyperparameters = ensemble_hyperparameters
43
+
44
+ self.window_splitter = window_splitter
45
+
46
+ self.banned_model_names = list(model_graph.nodes)
47
+ self.model_graph = self._get_base_model_graph(source_graph=model_graph)
48
+
49
+ @staticmethod
50
+ def _get_base_model_graph(source_graph: nx.DiGraph) -> nx.DiGraph:
51
+ """Return a model graph by copying only base models (nodes without predecessors)
52
+ This ensures we start fresh for ensemble building.
53
+ """
54
+ rootset = EnsembleComposer._get_rootset(source_graph)
55
+
56
+ dst_graph = nx.DiGraph()
57
+ for node in rootset:
58
+ dst_graph.add_node(node, **source_graph.nodes[node])
59
+
60
+ return dst_graph
61
+
62
+ @staticmethod
63
+ def _get_rootset(graph: nx.DiGraph) -> list[str]:
64
+ return [n for n in graph.nodes if not list(graph.predecessors(n))]
65
+
66
+ def iter_ensembles(self) -> Iterator[tuple[int, AbstractTimeSeriesEnsembleModel, list[str]]]:
67
+ """Iterate over trained ensemble models, layer by layer.
68
+
69
+ Yields
70
+ ------
71
+ layer_ix
72
+ The layer index of the ensemble.
73
+ model
74
+ The ensemble model object
75
+ base_model_names
76
+ The names of the base models that are part of the ensemble.
77
+ """
78
+ rootset = self._get_rootset(self.model_graph)
79
+
80
+ for layer_ix, layer in enumerate(nx.traversal.bfs_layers(self.model_graph, rootset)):
81
+ if layer_ix == 0: # we don't need base models
82
+ continue
83
+
84
+ for model_name in layer:
85
+ attrs = self.model_graph.nodes[model_name]
86
+ model_path = os.path.join(self.path, *attrs["path"])
87
+ model = attrs["type"].load(path=model_path)
88
+
89
+ yield (
90
+ layer_ix,
91
+ model,
92
+ list(self.model_graph.predecessors(model_name)),
93
+ )
94
+
95
+ def fit(
96
+ self,
97
+ train_data: TimeSeriesDataFrame,
98
+ val_data: Optional[TimeSeriesDataFrame] = None,
99
+ time_limit: Optional[float] = None,
100
+ ) -> Self:
101
+ base_model_scores = {k: self.model_graph.nodes[k]["val_score"] for k in self.model_graph.nodes}
102
+ model_names = list(base_model_scores.keys())
103
+
104
+ if not self._can_fit_ensemble(time_limit, len(model_names)):
105
+ return self
106
+
107
+ logger.info(f"Fitting {len(self.ensemble_hyperparameters)} ensemble(s).")
108
+
109
+ # get target and base model prediction data for ensemble training
110
+ data_per_window = self._get_validation_windows(train_data=train_data, val_data=val_data)
111
+ predictions_per_window = self._get_base_model_predictions(model_names)
112
+
113
+ for ensemble_name, ensemble_hp_dict in self.ensemble_hyperparameters.items():
114
+ try:
115
+ time_start = time.monotonic()
116
+ ensemble_class = get_ensemble_class(ensemble_name)
117
+ ensemble = ensemble_class(
118
+ eval_metric=self.eval_metric,
119
+ target=self.target,
120
+ prediction_length=self.prediction_length,
121
+ path=self.path,
122
+ freq=data_per_window[0].freq,
123
+ quantile_levels=self.quantile_levels,
124
+ hyperparameters=ensemble_hp_dict,
125
+ )
126
+ # update name to prevent name collisions
127
+ ensemble.name = self._get_ensemble_model_name(ensemble.name)
128
+
129
+ with warning_filter():
130
+ ensemble.fit(
131
+ predictions_per_window=predictions_per_window,
132
+ data_per_window=data_per_window,
133
+ model_scores=base_model_scores,
134
+ time_limit=time_limit,
135
+ )
136
+ ensemble.fit_time = time.monotonic() - time_start
137
+
138
+ score_per_fold = []
139
+ for window_idx, data in enumerate(data_per_window):
140
+ predictions = ensemble.predict(
141
+ {n: predictions_per_window[n][window_idx] for n in ensemble.model_names}
142
+ )
143
+ score_per_fold.append(self.eval_metric.score(data, predictions, self.target))
144
+ ensemble.val_score = float(np.mean(score_per_fold, dtype=np.float64))
145
+
146
+ # TODO: add ensemble's own time to predict_time
147
+ ensemble.predict_time = self._calculate_base_models_predict_time(ensemble.model_names)
148
+
149
+ log_scores_and_times(
150
+ ensemble.val_score,
151
+ ensemble.fit_time,
152
+ ensemble.predict_time,
153
+ eval_metric_name=self.eval_metric.name_with_sign,
154
+ )
155
+
156
+ self._add_model(ensemble, base_models=ensemble.model_names)
157
+
158
+ # Save the ensemble model to disk
159
+ ensemble.save()
160
+ except Exception as err: # noqa
161
+ logger.error(
162
+ f"\tWarning: Exception caused {ensemble_name} to fail during training... Skipping this model."
163
+ )
164
+ logger.error(f"\t{err}")
165
+ logger.debug(traceback.format_exc())
166
+
167
+ return self
168
+
169
+ def _add_model(self, model, base_models: list[str]):
170
+ self.model_graph.add_node(
171
+ model.name,
172
+ path=os.path.relpath(model.path, self.path).split(os.sep),
173
+ type=type(model),
174
+ fit_time=model.fit_time,
175
+ predict_time=model.predict_time,
176
+ val_score=model.val_score,
177
+ )
178
+ for base_model in base_models:
179
+ self.model_graph.add_edge(base_model, model.name)
180
+
181
+ def _can_fit_ensemble(
182
+ self,
183
+ time_limit: Optional[float],
184
+ num_models_available_for_ensemble: int,
185
+ ) -> bool:
186
+ if time_limit is not None and time_limit <= 0:
187
+ logger.info(f"Not fitting ensemble due to lack of time remaining. Time left: {time_limit:.1f} seconds")
188
+ return False
189
+
190
+ if num_models_available_for_ensemble <= 1:
191
+ logger.info(
192
+ "Not fitting ensemble as "
193
+ + (
194
+ "no models were successfully trained."
195
+ if not num_models_available_for_ensemble
196
+ else "only 1 model was trained."
197
+ )
198
+ )
199
+ return False
200
+
201
+ return True
202
+
203
+ def _get_validation_windows(
204
+ self, train_data: TimeSeriesDataFrame, val_data: Optional[TimeSeriesDataFrame]
205
+ ) -> list[TimeSeriesDataFrame]:
206
+ # TODO: update for window/stack-layer logic and refit logic
207
+ if val_data is None:
208
+ return [val_fold for _, val_fold in self.window_splitter.split(train_data)]
209
+ else:
210
+ return [val_data]
211
+
212
+ def _get_ensemble_model_name(self, name: str) -> str:
213
+ """Revise name for an ensemble model, ensuring we don't have name collisions"""
214
+ base_name = name
215
+ increment = 1
216
+ while name in self.banned_model_names:
217
+ increment += 1
218
+ name = f"{base_name}_{increment}"
219
+ return name
220
+
221
+ def _get_base_model_predictions(self, model_names: list[str]) -> dict[str, list[TimeSeriesDataFrame]]:
222
+ """Get base model predictions for ensemble training / inference."""
223
+ # TODO: update for window/stack-layer logic and refit logic
224
+ predictions_per_window = {}
225
+
226
+ for model_name in model_names:
227
+ model_attrs = self.model_graph.nodes[model_name]
228
+
229
+ model_path = os.path.join(self.path, *model_attrs["path"])
230
+ model_type = model_attrs["type"]
231
+
232
+ predictions_per_window[model_name] = model_type.load_oof_predictions(path=model_path)
233
+
234
+ return predictions_per_window
235
+
236
+ def _calculate_base_models_predict_time(self, model_names: list[str]) -> float:
237
+ """Calculate ensemble predict time as sum of base model predict times."""
238
+ return sum(self.model_graph.nodes[name]["predict_time"] for name in model_names)
239
+
240
+
241
+ def validate_ensemble_hyperparameters(hyperparameters) -> dict:
242
+ """Validate ensemble hyperparameters dict."""
243
+ if not isinstance(hyperparameters, dict):
244
+ raise ValueError(f"ensemble_hyperparameters must be dict, got {type(hyperparameters)}")
245
+
246
+ # Validate all ensemble names are known
247
+ for ensemble_name, ensemble_hyperparameters in hyperparameters.items():
248
+ get_ensemble_class(ensemble_name) # Will raise if unknown
249
+ assert isinstance(ensemble_hyperparameters, dict)
250
+ return hyperparameters