autogluon.timeseries 1.0.1b20240304__py3-none-any.whl → 1.4.1b20251210__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of autogluon.timeseries might be problematic. Click here for more details.

Files changed (108) hide show
  1. autogluon/timeseries/configs/__init__.py +3 -2
  2. autogluon/timeseries/configs/hyperparameter_presets.py +62 -0
  3. autogluon/timeseries/configs/predictor_presets.py +84 -0
  4. autogluon/timeseries/dataset/ts_dataframe.py +339 -186
  5. autogluon/timeseries/learner.py +192 -60
  6. autogluon/timeseries/metrics/__init__.py +55 -11
  7. autogluon/timeseries/metrics/abstract.py +96 -25
  8. autogluon/timeseries/metrics/point.py +186 -39
  9. autogluon/timeseries/metrics/quantile.py +47 -20
  10. autogluon/timeseries/metrics/utils.py +6 -6
  11. autogluon/timeseries/models/__init__.py +13 -7
  12. autogluon/timeseries/models/abstract/__init__.py +2 -2
  13. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +533 -273
  14. autogluon/timeseries/models/abstract/model_trial.py +10 -10
  15. autogluon/timeseries/models/abstract/tunable.py +189 -0
  16. autogluon/timeseries/models/autogluon_tabular/__init__.py +2 -0
  17. autogluon/timeseries/models/autogluon_tabular/mlforecast.py +369 -215
  18. autogluon/timeseries/models/autogluon_tabular/per_step.py +513 -0
  19. autogluon/timeseries/models/autogluon_tabular/transforms.py +67 -0
  20. autogluon/timeseries/models/autogluon_tabular/utils.py +3 -51
  21. autogluon/timeseries/models/chronos/__init__.py +4 -0
  22. autogluon/timeseries/models/chronos/chronos2.py +361 -0
  23. autogluon/timeseries/models/chronos/model.py +738 -0
  24. autogluon/timeseries/models/chronos/utils.py +369 -0
  25. autogluon/timeseries/models/ensemble/__init__.py +35 -2
  26. autogluon/timeseries/models/ensemble/{abstract_timeseries_ensemble.py → abstract.py} +50 -26
  27. autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
  28. autogluon/timeseries/models/ensemble/array_based/abstract.py +236 -0
  29. autogluon/timeseries/models/ensemble/array_based/models.py +73 -0
  30. autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +12 -0
  31. autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +88 -0
  32. autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py +167 -0
  33. autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +94 -0
  34. autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +107 -0
  35. autogluon/timeseries/models/ensemble/ensemble_selection.py +167 -0
  36. autogluon/timeseries/models/ensemble/per_item_greedy.py +162 -0
  37. autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
  38. autogluon/timeseries/models/ensemble/weighted/abstract.py +40 -0
  39. autogluon/timeseries/models/ensemble/weighted/basic.py +78 -0
  40. autogluon/timeseries/models/ensemble/weighted/greedy.py +57 -0
  41. autogluon/timeseries/models/gluonts/__init__.py +3 -1
  42. autogluon/timeseries/models/gluonts/abstract.py +583 -0
  43. autogluon/timeseries/models/gluonts/dataset.py +109 -0
  44. autogluon/timeseries/models/gluonts/{torch/models.py → models.py} +185 -44
  45. autogluon/timeseries/models/local/__init__.py +1 -10
  46. autogluon/timeseries/models/local/abstract_local_model.py +150 -97
  47. autogluon/timeseries/models/local/naive.py +31 -23
  48. autogluon/timeseries/models/local/npts.py +6 -2
  49. autogluon/timeseries/models/local/statsforecast.py +99 -112
  50. autogluon/timeseries/models/multi_window/multi_window_model.py +99 -40
  51. autogluon/timeseries/models/registry.py +64 -0
  52. autogluon/timeseries/models/toto/__init__.py +3 -0
  53. autogluon/timeseries/models/toto/_internal/__init__.py +9 -0
  54. autogluon/timeseries/models/toto/_internal/backbone/__init__.py +3 -0
  55. autogluon/timeseries/models/toto/_internal/backbone/attention.py +196 -0
  56. autogluon/timeseries/models/toto/_internal/backbone/backbone.py +262 -0
  57. autogluon/timeseries/models/toto/_internal/backbone/distribution.py +70 -0
  58. autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +136 -0
  59. autogluon/timeseries/models/toto/_internal/backbone/rope.py +89 -0
  60. autogluon/timeseries/models/toto/_internal/backbone/rotary_embedding_torch.py +342 -0
  61. autogluon/timeseries/models/toto/_internal/backbone/scaler.py +305 -0
  62. autogluon/timeseries/models/toto/_internal/backbone/transformer.py +333 -0
  63. autogluon/timeseries/models/toto/_internal/dataset.py +165 -0
  64. autogluon/timeseries/models/toto/_internal/forecaster.py +423 -0
  65. autogluon/timeseries/models/toto/dataloader.py +108 -0
  66. autogluon/timeseries/models/toto/hf_pretrained_model.py +118 -0
  67. autogluon/timeseries/models/toto/model.py +236 -0
  68. autogluon/timeseries/predictor.py +826 -305
  69. autogluon/timeseries/regressor.py +253 -0
  70. autogluon/timeseries/splitter.py +10 -31
  71. autogluon/timeseries/trainer/__init__.py +2 -3
  72. autogluon/timeseries/trainer/ensemble_composer.py +439 -0
  73. autogluon/timeseries/trainer/model_set_builder.py +256 -0
  74. autogluon/timeseries/trainer/prediction_cache.py +149 -0
  75. autogluon/timeseries/trainer/trainer.py +1298 -0
  76. autogluon/timeseries/trainer/utils.py +17 -0
  77. autogluon/timeseries/transforms/__init__.py +2 -0
  78. autogluon/timeseries/transforms/covariate_scaler.py +164 -0
  79. autogluon/timeseries/transforms/target_scaler.py +149 -0
  80. autogluon/timeseries/utils/constants.py +10 -0
  81. autogluon/timeseries/utils/datetime/base.py +38 -20
  82. autogluon/timeseries/utils/datetime/lags.py +18 -16
  83. autogluon/timeseries/utils/datetime/seasonality.py +14 -14
  84. autogluon/timeseries/utils/datetime/time_features.py +17 -14
  85. autogluon/timeseries/utils/features.py +317 -53
  86. autogluon/timeseries/utils/forecast.py +31 -17
  87. autogluon/timeseries/utils/timer.py +173 -0
  88. autogluon/timeseries/utils/warning_filters.py +44 -6
  89. autogluon/timeseries/version.py +2 -1
  90. autogluon.timeseries-1.4.1b20251210-py3.11-nspkg.pth +1 -0
  91. {autogluon.timeseries-1.0.1b20240304.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/METADATA +71 -47
  92. autogluon_timeseries-1.4.1b20251210.dist-info/RECORD +103 -0
  93. {autogluon.timeseries-1.0.1b20240304.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/WHEEL +1 -1
  94. autogluon/timeseries/configs/presets_configs.py +0 -11
  95. autogluon/timeseries/evaluator.py +0 -6
  96. autogluon/timeseries/models/ensemble/greedy_ensemble.py +0 -170
  97. autogluon/timeseries/models/gluonts/abstract_gluonts.py +0 -550
  98. autogluon/timeseries/models/gluonts/torch/__init__.py +0 -0
  99. autogluon/timeseries/models/presets.py +0 -325
  100. autogluon/timeseries/trainer/abstract_trainer.py +0 -1144
  101. autogluon/timeseries/trainer/auto_trainer.py +0 -74
  102. autogluon.timeseries-1.0.1b20240304-py3.8-nspkg.pth +0 -1
  103. autogluon.timeseries-1.0.1b20240304.dist-info/RECORD +0 -58
  104. {autogluon.timeseries-1.0.1b20240304.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info/licenses}/LICENSE +0 -0
  105. {autogluon.timeseries-1.0.1b20240304.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info/licenses}/NOTICE +0 -0
  106. {autogluon.timeseries-1.0.1b20240304.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/namespace_packages.txt +0 -0
  107. {autogluon.timeseries-1.0.1b20240304.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/top_level.txt +0 -0
  108. {autogluon.timeseries-1.0.1b20240304.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/zip-safe +0 -0
@@ -26,17 +26,17 @@ def model_trial(
26
26
  """Runs a single trial of a hyperparameter tuning. Replaces
27
27
  `core.models.abstract.model_trial.model_trial` for timeseries models.
28
28
  """
29
- try:
30
- model = init_model(
31
- args, model_cls, init_params, backend=hpo_executor.executor_type, is_bagged_model=is_bagged_model
32
- )
33
- model.set_contexts(path_context=os.path.join(model.path_root, model.name))
29
+ model = init_model(
30
+ args, model_cls, init_params, backend=hpo_executor.executor_type, is_bagged_model=is_bagged_model
31
+ )
32
+ model.set_contexts(path_context=os.path.join(model.path_root, model.name))
34
33
 
35
- train_data = load_pkl.load(train_path)
36
- val_data = load_pkl.load(val_path)
34
+ train_data = load_pkl.load(train_path)
35
+ val_data = load_pkl.load(val_path)
37
36
 
38
- eval_metric = model.eval_metric
37
+ eval_metric = model.eval_metric
39
38
 
39
+ try:
40
40
  model = fit_and_save_model(
41
41
  model,
42
42
  fit_kwargs,
@@ -46,7 +46,6 @@ def model_trial(
46
46
  time_start=time_start,
47
47
  time_limit=time_limit,
48
48
  )
49
-
50
49
  except Exception as err:
51
50
  if not isinstance(err, TimeLimitExceeded):
52
51
  logger.error(f"\tWarning: Exception caused {model.name} to fail during training... Skipping this model.")
@@ -77,7 +76,8 @@ def fit_and_save_model(model, fit_kwargs, train_data, val_data, eval_metric, tim
77
76
  time_fit_start = time.time()
78
77
  model.fit(train_data=train_data, val_data=val_data, time_limit=time_left, **fit_kwargs)
79
78
  model.fit_time = time.time() - time_fit_start
80
- model.score_and_cache_oof(val_data, store_val_score=True, store_predict_time=True)
79
+ if val_data is not None:
80
+ model.score_and_cache_oof(val_data, store_val_score=True, store_predict_time=True)
81
81
 
82
82
  logger.debug(f"\tHyperparameter tune run: {model.name}")
83
83
  logger.debug(f"\t\t{model.val_score:<7.4f}".ljust(15) + f"= Validation score ({eval_metric.name_with_sign})")
@@ -0,0 +1,189 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ import os
5
+ import time
6
+ from abc import ABC, abstractmethod
7
+ from contextlib import nullcontext
8
+ from typing import Any
9
+
10
+ from typing_extensions import Self
11
+
12
+ from autogluon.common.savers import save_pkl
13
+ from autogluon.common.utils.distribute_utils import DistributedContext
14
+ from autogluon.common.utils.log_utils import DuplicateFilter
15
+ from autogluon.common.utils.try_import import try_import_ray
16
+ from autogluon.core.hpo.constants import CUSTOM_BACKEND, RAY_BACKEND
17
+ from autogluon.core.hpo.exceptions import EmptySearchSpace
18
+ from autogluon.core.hpo.executors import HpoExecutor, HpoExecutorFactory, RayHpoExecutor
19
+ from autogluon.core.models import Tunable
20
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
21
+ from autogluon.timeseries.utils.warning_filters import disable_stdout, warning_filter
22
+
23
+ from .model_trial import model_trial, skip_hpo
24
+
25
+ logger = logging.getLogger(__name__)
26
+ dup_filter = DuplicateFilter()
27
+ logger.addFilter(dup_filter)
28
+
29
+
30
+ class TimeSeriesTunable(Tunable, ABC):
31
+ @abstractmethod
32
+ def __init__(self) -> None:
33
+ self.name: str
34
+ self.path: str
35
+ self.path_root: str
36
+
37
+ def hyperparameter_tune(
38
+ self,
39
+ train_data: TimeSeriesDataFrame,
40
+ val_data: TimeSeriesDataFrame | None,
41
+ val_splitter: Any = None,
42
+ default_num_trials: int | None = 1,
43
+ refit_every_n_windows: int | None = 1,
44
+ hyperparameter_tune_kwargs: str | dict = "auto",
45
+ time_limit: float | None = None,
46
+ ) -> tuple[dict[str, Any], Any]:
47
+ hpo_executor = self._get_default_hpo_executor()
48
+ hpo_executor.initialize(
49
+ hyperparameter_tune_kwargs, default_num_trials=default_num_trials, time_limit=time_limit
50
+ )
51
+
52
+ # we use k_fold=1 to circumvent autogluon.core logic to manage resources during parallelization
53
+ # of different folds
54
+ # FIXME: we pass in self which currently does not inherit from AbstractModel
55
+ hpo_executor.register_resources(self, k_fold=1, **self._get_system_resources()) # type: ignore
56
+
57
+ time_start = time.time()
58
+ logger.debug(f"\tStarting hyperparameter tuning for {self.name}")
59
+ search_space = self._get_search_space()
60
+
61
+ try:
62
+ hpo_executor.validate_search_space(search_space, self.name)
63
+ except EmptySearchSpace:
64
+ return skip_hpo(self, train_data, val_data, time_limit=hpo_executor.time_limit)
65
+
66
+ train_path, val_path = self._save_with_data(train_data, val_data)
67
+
68
+ train_fn_kwargs = self._get_hpo_train_fn_kwargs(
69
+ model_cls=self.__class__,
70
+ init_params=self.get_params(),
71
+ time_start=time_start,
72
+ time_limit=hpo_executor.time_limit,
73
+ fit_kwargs=dict(
74
+ val_splitter=val_splitter,
75
+ refit_every_n_windows=refit_every_n_windows,
76
+ ),
77
+ train_path=train_path,
78
+ val_path=val_path,
79
+ hpo_executor=hpo_executor,
80
+ )
81
+
82
+ minimum_resources = self.get_minimum_resources(is_gpu_available=self._is_gpu_available())
83
+ hpo_context = disable_stdout if isinstance(hpo_executor, RayHpoExecutor) else nullcontext
84
+
85
+ minimum_cpu_per_trial = minimum_resources.get("num_cpus", 1)
86
+ if not isinstance(minimum_cpu_per_trial, int):
87
+ logger.warning(
88
+ f"Minimum number of CPUs per trial for {self.name} is not an integer. "
89
+ f"Setting to 1. Minimum number of CPUs per trial: {minimum_cpu_per_trial}"
90
+ )
91
+ minimum_cpu_per_trial = 1
92
+
93
+ with hpo_context(), warning_filter(): # prevent Ray from outputting its results to stdout with print
94
+ hpo_executor.execute(
95
+ model_trial=model_trial,
96
+ train_fn_kwargs=train_fn_kwargs,
97
+ directory=self.path,
98
+ minimum_cpu_per_trial=minimum_cpu_per_trial,
99
+ minimum_gpu_per_trial=minimum_resources.get("num_gpus", 0),
100
+ model_estimate_memory_usage=None, # type: ignore
101
+ adapter_type="timeseries",
102
+ )
103
+
104
+ assert self.path_root is not None
105
+ hpo_models, analysis = hpo_executor.get_hpo_results(
106
+ model_name=self.name,
107
+ model_path_root=self.path_root,
108
+ time_start=time_start,
109
+ )
110
+
111
+ return hpo_models, analysis
112
+
113
+ def _get_default_hpo_executor(self) -> HpoExecutor:
114
+ backend = (
115
+ self._get_model_base()._get_hpo_backend()
116
+ ) # If ensemble, will use the base model to determine backend
117
+ if backend == RAY_BACKEND:
118
+ try:
119
+ try_import_ray()
120
+ except Exception as e:
121
+ warning_msg = f"Will use custom hpo logic because ray import failed. Reason: {str(e)}"
122
+ dup_filter.attach_filter_targets(warning_msg)
123
+ logger.warning(warning_msg)
124
+ backend = CUSTOM_BACKEND
125
+ hpo_executor = HpoExecutorFactory.get_hpo_executor(backend)() # type: ignore
126
+ return hpo_executor
127
+
128
+ def _get_hpo_backend(self) -> str:
129
+ """Choose which backend("ray" or "custom") to use for hpo"""
130
+ if DistributedContext.is_distributed_mode():
131
+ return RAY_BACKEND
132
+ return CUSTOM_BACKEND
133
+
134
+ def _get_hpo_train_fn_kwargs(self, **train_fn_kwargs) -> dict:
135
+ """Update kwargs passed to model_trial depending on the model configuration.
136
+
137
+ These kwargs need to be updated, for example, by MultiWindowBacktestingModel.
138
+ """
139
+ return train_fn_kwargs
140
+
141
+ def estimate_memory_usage(self, *args, **kwargs) -> float | None:
142
+ """Return the estimated memory usage of the model. None if memory usage cannot be
143
+ estimated.
144
+ """
145
+ return None
146
+
147
+ def get_minimum_resources(self, is_gpu_available: bool = False) -> dict[str, int | float]:
148
+ return {
149
+ "num_cpus": 1,
150
+ }
151
+
152
+ def _save_with_data(
153
+ self, train_data: TimeSeriesDataFrame, val_data: TimeSeriesDataFrame | None
154
+ ) -> tuple[str, str]:
155
+ self.path = os.path.abspath(self.path)
156
+ self.path_root = self.path.rsplit(self.name, 1)[0]
157
+
158
+ dataset_train_filename = "dataset_train.pkl"
159
+ train_path = os.path.join(self.path, dataset_train_filename)
160
+ save_pkl.save(path=train_path, object=train_data)
161
+
162
+ dataset_val_filename = "dataset_val.pkl"
163
+ val_path = os.path.join(self.path, dataset_val_filename)
164
+ save_pkl.save(path=val_path, object=val_data)
165
+ return train_path, val_path
166
+
167
+ @abstractmethod
168
+ def _get_model_base(self) -> Self:
169
+ pass
170
+
171
+ @abstractmethod
172
+ def _is_gpu_available(self) -> bool:
173
+ pass
174
+
175
+ @abstractmethod
176
+ def _get_search_space(self) -> dict[str, Any]:
177
+ pass
178
+
179
+ @abstractmethod
180
+ def get_params(self) -> dict:
181
+ """Return a clean copy of constructor parameters that can be used to
182
+ clone the current model.
183
+ """
184
+ pass
185
+
186
+ @staticmethod
187
+ @abstractmethod
188
+ def _get_system_resources() -> dict[str, Any]:
189
+ pass
@@ -1,6 +1,8 @@
1
1
  from .mlforecast import DirectTabularModel, RecursiveTabularModel
2
+ from .per_step import PerStepTabularModel
2
3
 
3
4
  __all__ = [
4
5
  "DirectTabularModel",
5
6
  "RecursiveTabularModel",
7
+ "PerStepTabularModel",
6
8
  ]