autogluon.timeseries 1.2.1b20250224__py3-none-any.whl → 1.4.1b20251215__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of autogluon.timeseries might be problematic. Click here for more details.
- autogluon/timeseries/configs/__init__.py +3 -2
- autogluon/timeseries/configs/hyperparameter_presets.py +62 -0
- autogluon/timeseries/configs/predictor_presets.py +106 -0
- autogluon/timeseries/dataset/ts_dataframe.py +256 -141
- autogluon/timeseries/learner.py +86 -52
- autogluon/timeseries/metrics/__init__.py +42 -8
- autogluon/timeseries/metrics/abstract.py +89 -19
- autogluon/timeseries/metrics/point.py +142 -53
- autogluon/timeseries/metrics/quantile.py +46 -21
- autogluon/timeseries/metrics/utils.py +4 -4
- autogluon/timeseries/models/__init__.py +8 -2
- autogluon/timeseries/models/abstract/__init__.py +2 -2
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py +361 -592
- autogluon/timeseries/models/abstract/model_trial.py +2 -1
- autogluon/timeseries/models/abstract/tunable.py +189 -0
- autogluon/timeseries/models/autogluon_tabular/__init__.py +2 -0
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py +282 -194
- autogluon/timeseries/models/autogluon_tabular/per_step.py +513 -0
- autogluon/timeseries/models/autogluon_tabular/transforms.py +25 -18
- autogluon/timeseries/models/chronos/__init__.py +2 -1
- autogluon/timeseries/models/chronos/chronos2.py +361 -0
- autogluon/timeseries/models/chronos/model.py +219 -138
- autogluon/timeseries/models/chronos/{pipeline/utils.py → utils.py} +81 -50
- autogluon/timeseries/models/ensemble/__init__.py +37 -2
- autogluon/timeseries/models/ensemble/abstract.py +107 -0
- autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
- autogluon/timeseries/models/ensemble/array_based/abstract.py +240 -0
- autogluon/timeseries/models/ensemble/array_based/models.py +185 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +12 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +88 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py +186 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +94 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +107 -0
- autogluon/timeseries/models/ensemble/ensemble_selection.py +167 -0
- autogluon/timeseries/models/ensemble/per_item_greedy.py +172 -0
- autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
- autogluon/timeseries/models/ensemble/weighted/abstract.py +45 -0
- autogluon/timeseries/models/ensemble/weighted/basic.py +91 -0
- autogluon/timeseries/models/ensemble/weighted/greedy.py +62 -0
- autogluon/timeseries/models/gluonts/__init__.py +1 -1
- autogluon/timeseries/models/gluonts/{abstract_gluonts.py → abstract.py} +148 -208
- autogluon/timeseries/models/gluonts/dataset.py +109 -0
- autogluon/timeseries/models/gluonts/{torch/models.py → models.py} +38 -22
- autogluon/timeseries/models/local/__init__.py +0 -7
- autogluon/timeseries/models/local/abstract_local_model.py +71 -74
- autogluon/timeseries/models/local/naive.py +13 -9
- autogluon/timeseries/models/local/npts.py +9 -2
- autogluon/timeseries/models/local/statsforecast.py +52 -36
- autogluon/timeseries/models/multi_window/multi_window_model.py +65 -45
- autogluon/timeseries/models/registry.py +64 -0
- autogluon/timeseries/models/toto/__init__.py +3 -0
- autogluon/timeseries/models/toto/_internal/__init__.py +9 -0
- autogluon/timeseries/models/toto/_internal/backbone/__init__.py +3 -0
- autogluon/timeseries/models/toto/_internal/backbone/attention.py +196 -0
- autogluon/timeseries/models/toto/_internal/backbone/backbone.py +262 -0
- autogluon/timeseries/models/toto/_internal/backbone/distribution.py +70 -0
- autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +136 -0
- autogluon/timeseries/models/toto/_internal/backbone/rope.py +89 -0
- autogluon/timeseries/models/toto/_internal/backbone/rotary_embedding_torch.py +342 -0
- autogluon/timeseries/models/toto/_internal/backbone/scaler.py +305 -0
- autogluon/timeseries/models/toto/_internal/backbone/transformer.py +333 -0
- autogluon/timeseries/models/toto/_internal/dataset.py +165 -0
- autogluon/timeseries/models/toto/_internal/forecaster.py +423 -0
- autogluon/timeseries/models/toto/dataloader.py +108 -0
- autogluon/timeseries/models/toto/hf_pretrained_model.py +200 -0
- autogluon/timeseries/models/toto/model.py +249 -0
- autogluon/timeseries/predictor.py +685 -297
- autogluon/timeseries/regressor.py +94 -44
- autogluon/timeseries/splitter.py +8 -32
- autogluon/timeseries/trainer/__init__.py +3 -0
- autogluon/timeseries/trainer/ensemble_composer.py +444 -0
- autogluon/timeseries/trainer/model_set_builder.py +256 -0
- autogluon/timeseries/trainer/prediction_cache.py +149 -0
- autogluon/timeseries/{trainer.py → trainer/trainer.py} +387 -390
- autogluon/timeseries/trainer/utils.py +17 -0
- autogluon/timeseries/transforms/__init__.py +2 -13
- autogluon/timeseries/transforms/covariate_scaler.py +34 -40
- autogluon/timeseries/transforms/target_scaler.py +37 -20
- autogluon/timeseries/utils/constants.py +10 -0
- autogluon/timeseries/utils/datetime/lags.py +3 -5
- autogluon/timeseries/utils/datetime/seasonality.py +1 -3
- autogluon/timeseries/utils/datetime/time_features.py +2 -2
- autogluon/timeseries/utils/features.py +70 -47
- autogluon/timeseries/utils/forecast.py +19 -14
- autogluon/timeseries/utils/timer.py +173 -0
- autogluon/timeseries/utils/warning_filters.py +4 -2
- autogluon/timeseries/version.py +1 -1
- autogluon.timeseries-1.4.1b20251215-py3.11-nspkg.pth +1 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/METADATA +49 -36
- autogluon_timeseries-1.4.1b20251215.dist-info/RECORD +103 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/WHEEL +1 -1
- autogluon/timeseries/configs/presets_configs.py +0 -79
- autogluon/timeseries/evaluator.py +0 -6
- autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -11
- autogluon/timeseries/models/chronos/pipeline/base.py +0 -160
- autogluon/timeseries/models/chronos/pipeline/chronos.py +0 -585
- autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -518
- autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py +0 -78
- autogluon/timeseries/models/ensemble/greedy_ensemble.py +0 -170
- autogluon/timeseries/models/gluonts/torch/__init__.py +0 -0
- autogluon/timeseries/models/presets.py +0 -360
- autogluon.timeseries-1.2.1b20250224-py3.9-nspkg.pth +0 -1
- autogluon.timeseries-1.2.1b20250224.dist-info/RECORD +0 -68
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info/licenses}/LICENSE +0 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info/licenses}/NOTICE +0 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/namespace_packages.txt +0 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/top_level.txt +0 -0
- {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/zip-safe +0 -0
|
@@ -76,7 +76,8 @@ def fit_and_save_model(model, fit_kwargs, train_data, val_data, eval_metric, tim
|
|
|
76
76
|
time_fit_start = time.time()
|
|
77
77
|
model.fit(train_data=train_data, val_data=val_data, time_limit=time_left, **fit_kwargs)
|
|
78
78
|
model.fit_time = time.time() - time_fit_start
|
|
79
|
-
|
|
79
|
+
if val_data is not None:
|
|
80
|
+
model.score_and_cache_oof(val_data, store_val_score=True, store_predict_time=True)
|
|
80
81
|
|
|
81
82
|
logger.debug(f"\tHyperparameter tune run: {model.name}")
|
|
82
83
|
logger.debug(f"\t\t{model.val_score:<7.4f}".ljust(15) + f"= Validation score ({eval_metric.name_with_sign})")
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
import time
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
from contextlib import nullcontext
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from typing_extensions import Self
|
|
11
|
+
|
|
12
|
+
from autogluon.common.savers import save_pkl
|
|
13
|
+
from autogluon.common.utils.distribute_utils import DistributedContext
|
|
14
|
+
from autogluon.common.utils.log_utils import DuplicateFilter
|
|
15
|
+
from autogluon.common.utils.try_import import try_import_ray
|
|
16
|
+
from autogluon.core.hpo.constants import CUSTOM_BACKEND, RAY_BACKEND
|
|
17
|
+
from autogluon.core.hpo.exceptions import EmptySearchSpace
|
|
18
|
+
from autogluon.core.hpo.executors import HpoExecutor, HpoExecutorFactory, RayHpoExecutor
|
|
19
|
+
from autogluon.core.models import Tunable
|
|
20
|
+
from autogluon.timeseries.dataset import TimeSeriesDataFrame
|
|
21
|
+
from autogluon.timeseries.utils.warning_filters import disable_stdout, warning_filter
|
|
22
|
+
|
|
23
|
+
from .model_trial import model_trial, skip_hpo
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
dup_filter = DuplicateFilter()
|
|
27
|
+
logger.addFilter(dup_filter)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class TimeSeriesTunable(Tunable, ABC):
|
|
31
|
+
@abstractmethod
|
|
32
|
+
def __init__(self) -> None:
|
|
33
|
+
self.name: str
|
|
34
|
+
self.path: str
|
|
35
|
+
self.path_root: str
|
|
36
|
+
|
|
37
|
+
def hyperparameter_tune(
|
|
38
|
+
self,
|
|
39
|
+
train_data: TimeSeriesDataFrame,
|
|
40
|
+
val_data: TimeSeriesDataFrame | None,
|
|
41
|
+
val_splitter: Any = None,
|
|
42
|
+
default_num_trials: int | None = 1,
|
|
43
|
+
refit_every_n_windows: int | None = 1,
|
|
44
|
+
hyperparameter_tune_kwargs: str | dict = "auto",
|
|
45
|
+
time_limit: float | None = None,
|
|
46
|
+
) -> tuple[dict[str, Any], Any]:
|
|
47
|
+
hpo_executor = self._get_default_hpo_executor()
|
|
48
|
+
hpo_executor.initialize(
|
|
49
|
+
hyperparameter_tune_kwargs, default_num_trials=default_num_trials, time_limit=time_limit
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# we use k_fold=1 to circumvent autogluon.core logic to manage resources during parallelization
|
|
53
|
+
# of different folds
|
|
54
|
+
# FIXME: we pass in self which currently does not inherit from AbstractModel
|
|
55
|
+
hpo_executor.register_resources(self, k_fold=1, **self._get_system_resources()) # type: ignore
|
|
56
|
+
|
|
57
|
+
time_start = time.time()
|
|
58
|
+
logger.debug(f"\tStarting hyperparameter tuning for {self.name}")
|
|
59
|
+
search_space = self._get_search_space()
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
hpo_executor.validate_search_space(search_space, self.name)
|
|
63
|
+
except EmptySearchSpace:
|
|
64
|
+
return skip_hpo(self, train_data, val_data, time_limit=hpo_executor.time_limit)
|
|
65
|
+
|
|
66
|
+
train_path, val_path = self._save_with_data(train_data, val_data)
|
|
67
|
+
|
|
68
|
+
train_fn_kwargs = self._get_hpo_train_fn_kwargs(
|
|
69
|
+
model_cls=self.__class__,
|
|
70
|
+
init_params=self.get_params(),
|
|
71
|
+
time_start=time_start,
|
|
72
|
+
time_limit=hpo_executor.time_limit,
|
|
73
|
+
fit_kwargs=dict(
|
|
74
|
+
val_splitter=val_splitter,
|
|
75
|
+
refit_every_n_windows=refit_every_n_windows,
|
|
76
|
+
),
|
|
77
|
+
train_path=train_path,
|
|
78
|
+
val_path=val_path,
|
|
79
|
+
hpo_executor=hpo_executor,
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
minimum_resources = self.get_minimum_resources(is_gpu_available=self._is_gpu_available())
|
|
83
|
+
hpo_context = disable_stdout if isinstance(hpo_executor, RayHpoExecutor) else nullcontext
|
|
84
|
+
|
|
85
|
+
minimum_cpu_per_trial = minimum_resources.get("num_cpus", 1)
|
|
86
|
+
if not isinstance(minimum_cpu_per_trial, int):
|
|
87
|
+
logger.warning(
|
|
88
|
+
f"Minimum number of CPUs per trial for {self.name} is not an integer. "
|
|
89
|
+
f"Setting to 1. Minimum number of CPUs per trial: {minimum_cpu_per_trial}"
|
|
90
|
+
)
|
|
91
|
+
minimum_cpu_per_trial = 1
|
|
92
|
+
|
|
93
|
+
with hpo_context(), warning_filter(): # prevent Ray from outputting its results to stdout with print
|
|
94
|
+
hpo_executor.execute(
|
|
95
|
+
model_trial=model_trial,
|
|
96
|
+
train_fn_kwargs=train_fn_kwargs,
|
|
97
|
+
directory=self.path,
|
|
98
|
+
minimum_cpu_per_trial=minimum_cpu_per_trial,
|
|
99
|
+
minimum_gpu_per_trial=minimum_resources.get("num_gpus", 0),
|
|
100
|
+
model_estimate_memory_usage=None, # type: ignore
|
|
101
|
+
adapter_type="timeseries",
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
assert self.path_root is not None
|
|
105
|
+
hpo_models, analysis = hpo_executor.get_hpo_results(
|
|
106
|
+
model_name=self.name,
|
|
107
|
+
model_path_root=self.path_root,
|
|
108
|
+
time_start=time_start,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
return hpo_models, analysis
|
|
112
|
+
|
|
113
|
+
def _get_default_hpo_executor(self) -> HpoExecutor:
|
|
114
|
+
backend = (
|
|
115
|
+
self._get_model_base()._get_hpo_backend()
|
|
116
|
+
) # If ensemble, will use the base model to determine backend
|
|
117
|
+
if backend == RAY_BACKEND:
|
|
118
|
+
try:
|
|
119
|
+
try_import_ray()
|
|
120
|
+
except Exception as e:
|
|
121
|
+
warning_msg = f"Will use custom hpo logic because ray import failed. Reason: {str(e)}"
|
|
122
|
+
dup_filter.attach_filter_targets(warning_msg)
|
|
123
|
+
logger.warning(warning_msg)
|
|
124
|
+
backend = CUSTOM_BACKEND
|
|
125
|
+
hpo_executor = HpoExecutorFactory.get_hpo_executor(backend)() # type: ignore
|
|
126
|
+
return hpo_executor
|
|
127
|
+
|
|
128
|
+
def _get_hpo_backend(self) -> str:
|
|
129
|
+
"""Choose which backend("ray" or "custom") to use for hpo"""
|
|
130
|
+
if DistributedContext.is_distributed_mode():
|
|
131
|
+
return RAY_BACKEND
|
|
132
|
+
return CUSTOM_BACKEND
|
|
133
|
+
|
|
134
|
+
def _get_hpo_train_fn_kwargs(self, **train_fn_kwargs) -> dict:
|
|
135
|
+
"""Update kwargs passed to model_trial depending on the model configuration.
|
|
136
|
+
|
|
137
|
+
These kwargs need to be updated, for example, by MultiWindowBacktestingModel.
|
|
138
|
+
"""
|
|
139
|
+
return train_fn_kwargs
|
|
140
|
+
|
|
141
|
+
def estimate_memory_usage(self, *args, **kwargs) -> float | None:
|
|
142
|
+
"""Return the estimated memory usage of the model. None if memory usage cannot be
|
|
143
|
+
estimated.
|
|
144
|
+
"""
|
|
145
|
+
return None
|
|
146
|
+
|
|
147
|
+
def get_minimum_resources(self, is_gpu_available: bool = False) -> dict[str, int | float]:
|
|
148
|
+
return {
|
|
149
|
+
"num_cpus": 1,
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
def _save_with_data(
|
|
153
|
+
self, train_data: TimeSeriesDataFrame, val_data: TimeSeriesDataFrame | None
|
|
154
|
+
) -> tuple[str, str]:
|
|
155
|
+
self.path = os.path.abspath(self.path)
|
|
156
|
+
self.path_root = self.path.rsplit(self.name, 1)[0]
|
|
157
|
+
|
|
158
|
+
dataset_train_filename = "dataset_train.pkl"
|
|
159
|
+
train_path = os.path.join(self.path, dataset_train_filename)
|
|
160
|
+
save_pkl.save(path=train_path, object=train_data)
|
|
161
|
+
|
|
162
|
+
dataset_val_filename = "dataset_val.pkl"
|
|
163
|
+
val_path = os.path.join(self.path, dataset_val_filename)
|
|
164
|
+
save_pkl.save(path=val_path, object=val_data)
|
|
165
|
+
return train_path, val_path
|
|
166
|
+
|
|
167
|
+
@abstractmethod
|
|
168
|
+
def _get_model_base(self) -> Self:
|
|
169
|
+
pass
|
|
170
|
+
|
|
171
|
+
@abstractmethod
|
|
172
|
+
def _is_gpu_available(self) -> bool:
|
|
173
|
+
pass
|
|
174
|
+
|
|
175
|
+
@abstractmethod
|
|
176
|
+
def _get_search_space(self) -> dict[str, Any]:
|
|
177
|
+
pass
|
|
178
|
+
|
|
179
|
+
@abstractmethod
|
|
180
|
+
def get_params(self) -> dict:
|
|
181
|
+
"""Return a clean copy of constructor parameters that can be used to
|
|
182
|
+
clone the current model.
|
|
183
|
+
"""
|
|
184
|
+
pass
|
|
185
|
+
|
|
186
|
+
@staticmethod
|
|
187
|
+
@abstractmethod
|
|
188
|
+
def _get_system_resources() -> dict[str, Any]:
|
|
189
|
+
pass
|