autogluon.timeseries 1.4.1b20251215__py3-none-any.whl → 1.5.1b20260122__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of autogluon.timeseries might be problematic. Click here for more details.
- autogluon/timeseries/configs/hyperparameter_presets.py +11 -26
- autogluon/timeseries/configs/predictor_presets.py +1 -39
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py +0 -11
- autogluon/timeseries/models/autogluon_tabular/per_step.py +1 -1
- autogluon/timeseries/models/chronos/chronos2.py +43 -9
- autogluon/timeseries/models/chronos/model.py +2 -2
- autogluon/timeseries/models/chronos/utils.py +1 -1
- autogluon/timeseries/models/ensemble/array_based/models.py +2 -2
- autogluon/timeseries/models/ensemble/per_item_greedy.py +1 -1
- autogluon/timeseries/models/ensemble/weighted/greedy.py +2 -0
- autogluon/timeseries/models/gluonts/models.py +0 -7
- autogluon/timeseries/models/local/statsforecast.py +1 -1
- autogluon/timeseries/predictor.py +84 -24
- autogluon/timeseries/trainer/trainer.py +0 -4
- autogluon/timeseries/version.py +1 -1
- {autogluon_timeseries-1.4.1b20251215.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/METADATA +8 -8
- {autogluon_timeseries-1.4.1b20251215.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/RECORD +24 -24
- /autogluon.timeseries-1.4.1b20251215-py3.11-nspkg.pth → /autogluon.timeseries-1.5.1b20260122-py3.11-nspkg.pth +0 -0
- {autogluon_timeseries-1.4.1b20251215.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/WHEEL +0 -0
- {autogluon_timeseries-1.4.1b20251215.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/licenses/LICENSE +0 -0
- {autogluon_timeseries-1.4.1b20251215.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/licenses/NOTICE +0 -0
- {autogluon_timeseries-1.4.1b20251215.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/namespace_packages.txt +0 -0
- {autogluon_timeseries-1.4.1b20251215.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/top_level.txt +0 -0
- {autogluon_timeseries-1.4.1b20251215.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/zip-safe +0 -0
|
@@ -12,51 +12,36 @@ def get_hyperparameter_presets() -> dict[str, dict[str, dict[str, Any] | list[di
|
|
|
12
12
|
"DirectTabular": {"max_num_samples": 100_000},
|
|
13
13
|
},
|
|
14
14
|
"light": {
|
|
15
|
-
"Naive": {},
|
|
16
15
|
"SeasonalNaive": {},
|
|
17
16
|
"ETS": {},
|
|
18
17
|
"Theta": {},
|
|
19
18
|
"RecursiveTabular": {},
|
|
20
19
|
"DirectTabular": {},
|
|
21
20
|
"TemporalFusionTransformer": {},
|
|
22
|
-
"
|
|
23
|
-
},
|
|
24
|
-
"light_inference": {
|
|
25
|
-
"SeasonalNaive": {},
|
|
26
|
-
"DirectTabular": {},
|
|
27
|
-
"RecursiveTabular": {},
|
|
28
|
-
"TemporalFusionTransformer": {},
|
|
29
|
-
"PatchTST": {},
|
|
21
|
+
"Chronos2": {"model_path": "autogluon/chronos-2-small"},
|
|
30
22
|
},
|
|
31
23
|
"default": {
|
|
32
24
|
"SeasonalNaive": {},
|
|
33
25
|
"AutoETS": {},
|
|
34
|
-
"NPTS": {},
|
|
35
26
|
"DynamicOptimizedTheta": {},
|
|
36
27
|
"RecursiveTabular": {},
|
|
37
28
|
"DirectTabular": {},
|
|
38
29
|
"TemporalFusionTransformer": {},
|
|
39
|
-
"PatchTST": {},
|
|
40
30
|
"DeepAR": {},
|
|
41
|
-
"
|
|
42
|
-
{
|
|
43
|
-
"ag_args": {"name_suffix": "ZeroShot"},
|
|
44
|
-
"model_path": "bolt_base",
|
|
45
|
-
},
|
|
31
|
+
"Chronos2": [
|
|
32
|
+
{},
|
|
46
33
|
{
|
|
47
|
-
"ag_args": {"name_suffix": "
|
|
48
|
-
"model_path": "
|
|
34
|
+
"ag_args": {"name_suffix": "SmallFineTuned"},
|
|
35
|
+
"model_path": "autogluon/chronos-2-small",
|
|
49
36
|
"fine_tune": True,
|
|
50
|
-
"
|
|
51
|
-
"covariate_regressor": {"model_name": "CAT", "model_hyperparameters": {"iterations": 1_000}},
|
|
37
|
+
"eval_during_fine_tune": True,
|
|
52
38
|
},
|
|
53
39
|
],
|
|
54
|
-
"
|
|
55
|
-
"
|
|
56
|
-
"
|
|
57
|
-
"
|
|
58
|
-
"
|
|
59
|
-
"lr": 1e-4,
|
|
40
|
+
"Chronos": {
|
|
41
|
+
"ag_args": {"name_suffix": "WithRegressor"},
|
|
42
|
+
"model_path": "bolt_small",
|
|
43
|
+
"target_scaler": "standard",
|
|
44
|
+
"covariate_regressor": {"model_name": "CAT", "model_hyperparameters": {"iterations": 1000}},
|
|
60
45
|
},
|
|
61
46
|
},
|
|
62
47
|
}
|
|
@@ -2,10 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from typing import Any
|
|
4
4
|
|
|
5
|
-
from . import get_hyperparameter_presets
|
|
6
|
-
|
|
7
5
|
TIMESERIES_PRESETS_ALIASES = dict(
|
|
8
|
-
chronos="chronos_small",
|
|
9
6
|
best="best_quality",
|
|
10
7
|
high="high_quality",
|
|
11
8
|
medium="medium_quality",
|
|
@@ -16,10 +13,8 @@ TIMESERIES_PRESETS_ALIASES = dict(
|
|
|
16
13
|
|
|
17
14
|
|
|
18
15
|
def get_predictor_presets() -> dict[str, Any]:
|
|
19
|
-
hp_presets = get_hyperparameter_presets()
|
|
20
|
-
|
|
21
16
|
predictor_presets = dict(
|
|
22
|
-
best_quality={"hyperparameters": "default", "num_val_windows":
|
|
17
|
+
best_quality={"hyperparameters": "default", "num_val_windows": "auto", "refit_every_n_windows": "auto"},
|
|
23
18
|
high_quality={"hyperparameters": "default"},
|
|
24
19
|
medium_quality={"hyperparameters": "light"},
|
|
25
20
|
fast_training={"hyperparameters": "very_light"},
|
|
@@ -62,39 +57,6 @@ def get_predictor_presets() -> dict[str, Any]:
|
|
|
62
57
|
"hyperparameters": {"Chronos": {"model_path": "bolt_base"}},
|
|
63
58
|
"skip_model_selection": True,
|
|
64
59
|
},
|
|
65
|
-
# Original Chronos models
|
|
66
|
-
chronos_tiny={
|
|
67
|
-
"hyperparameters": {"Chronos": {"model_path": "tiny"}},
|
|
68
|
-
"skip_model_selection": True,
|
|
69
|
-
},
|
|
70
|
-
chronos_mini={
|
|
71
|
-
"hyperparameters": {"Chronos": {"model_path": "mini"}},
|
|
72
|
-
"skip_model_selection": True,
|
|
73
|
-
},
|
|
74
|
-
chronos_small={
|
|
75
|
-
"hyperparameters": {"Chronos": {"model_path": "small"}},
|
|
76
|
-
"skip_model_selection": True,
|
|
77
|
-
},
|
|
78
|
-
chronos_base={
|
|
79
|
-
"hyperparameters": {"Chronos": {"model_path": "base"}},
|
|
80
|
-
"skip_model_selection": True,
|
|
81
|
-
},
|
|
82
|
-
chronos_large={
|
|
83
|
-
"hyperparameters": {"Chronos": {"model_path": "large", "batch_size": 8}},
|
|
84
|
-
"skip_model_selection": True,
|
|
85
|
-
},
|
|
86
|
-
chronos_ensemble={
|
|
87
|
-
"hyperparameters": {
|
|
88
|
-
"Chronos": {"model_path": "small"},
|
|
89
|
-
**hp_presets["light_inference"],
|
|
90
|
-
}
|
|
91
|
-
},
|
|
92
|
-
chronos_large_ensemble={
|
|
93
|
-
"hyperparameters": {
|
|
94
|
-
"Chronos": {"model_path": "large", "batch_size": 8},
|
|
95
|
-
**hp_presets["light_inference"],
|
|
96
|
-
}
|
|
97
|
-
},
|
|
98
60
|
)
|
|
99
61
|
|
|
100
62
|
# update with aliases
|
|
@@ -668,7 +668,6 @@ class AbstractTimeSeriesModel(TimeSeriesModelBase, TimeSeriesTunable, metaclass=
|
|
|
668
668
|
pass
|
|
669
669
|
|
|
670
670
|
def _preprocess_time_limit(self, time_limit: float) -> float:
|
|
671
|
-
original_time_limit = time_limit
|
|
672
671
|
max_time_limit_ratio = self._extra_ag_args.get("max_time_limit_ratio", self.default_max_time_limit_ratio)
|
|
673
672
|
max_time_limit = self._extra_ag_args.get("max_time_limit")
|
|
674
673
|
|
|
@@ -677,16 +676,6 @@ class AbstractTimeSeriesModel(TimeSeriesModelBase, TimeSeriesTunable, metaclass=
|
|
|
677
676
|
if max_time_limit is not None:
|
|
678
677
|
time_limit = min(time_limit, max_time_limit)
|
|
679
678
|
|
|
680
|
-
if original_time_limit != time_limit:
|
|
681
|
-
time_limit_og_str = f"{original_time_limit:.2f}s" if original_time_limit is not None else "None"
|
|
682
|
-
time_limit_str = f"{time_limit:.2f}s" if time_limit is not None else "None"
|
|
683
|
-
logger.debug(
|
|
684
|
-
f"\tTime limit adjusted due to model hyperparameters: "
|
|
685
|
-
f"{time_limit_og_str} -> {time_limit_str} "
|
|
686
|
-
f"(ag.max_time_limit={max_time_limit}, "
|
|
687
|
-
f"ag.max_time_limit_ratio={max_time_limit_ratio}"
|
|
688
|
-
)
|
|
689
|
-
|
|
690
679
|
return time_limit
|
|
691
680
|
|
|
692
681
|
def _get_search_space(self):
|
|
@@ -76,9 +76,14 @@ class Chronos2Model(AbstractTimeSeriesModel):
|
|
|
76
76
|
Extra keyword arguments passed to ``transformers.TrainingArguments``
|
|
77
77
|
revision : str, default = None
|
|
78
78
|
Model revision to use (branch name or commit hash). If None, the default branch (usually "main") is used.
|
|
79
|
+
disable_known_covariates : bool, default = False
|
|
80
|
+
If True, known covariates won't be used by the model even if they are present in the dataset.
|
|
81
|
+
disable_past_covariates : bool, default = False
|
|
82
|
+
If True, past covariates won't be used by the model even if they are present in the dataset.
|
|
79
83
|
"""
|
|
80
84
|
|
|
81
85
|
ag_model_aliases = ["Chronos-2"]
|
|
86
|
+
ag_priority = 75
|
|
82
87
|
fine_tuned_ckpt_name: str = "fine-tuned-ckpt"
|
|
83
88
|
|
|
84
89
|
_supports_known_covariates = True
|
|
@@ -142,7 +147,7 @@ class Chronos2Model(AbstractTimeSeriesModel):
|
|
|
142
147
|
self.load_model_pipeline()
|
|
143
148
|
|
|
144
149
|
# NOTE: This must be placed after load_model_pipeline to ensure that the loggers are available in loggerDict
|
|
145
|
-
self._update_transformers_loggers(logging.ERROR if verbosity <= 3 else logging.
|
|
150
|
+
self._update_transformers_loggers(logging.ERROR if verbosity <= 3 else logging.WARNING)
|
|
146
151
|
|
|
147
152
|
if self.get_hyperparameter("fine_tune"):
|
|
148
153
|
self._fine_tune(train_data, val_data, time_limit=time_limit, verbosity=verbosity)
|
|
@@ -175,6 +180,8 @@ class Chronos2Model(AbstractTimeSeriesModel):
|
|
|
175
180
|
"fine_tune_eval_max_items": 256,
|
|
176
181
|
"fine_tune_lora_config": None,
|
|
177
182
|
"revision": None,
|
|
183
|
+
"disable_known_covariates": False,
|
|
184
|
+
"disable_past_covariates": False,
|
|
178
185
|
}
|
|
179
186
|
|
|
180
187
|
@property
|
|
@@ -196,14 +203,34 @@ class Chronos2Model(AbstractTimeSeriesModel):
|
|
|
196
203
|
"fine_tune_lora_config",
|
|
197
204
|
"fine_tune_trainer_kwargs",
|
|
198
205
|
"revision",
|
|
206
|
+
"disable_known_covariates",
|
|
207
|
+
"disable_past_covariates",
|
|
199
208
|
]
|
|
200
209
|
|
|
210
|
+
def _remove_disabled_covariates(
|
|
211
|
+
self, past_df: pd.DataFrame, future_df: pd.DataFrame | None
|
|
212
|
+
) -> tuple[pd.DataFrame, pd.DataFrame | None]:
|
|
213
|
+
"""Remove covariates from dataframes based on disable flags."""
|
|
214
|
+
cols_to_remove = []
|
|
215
|
+
if self.get_hyperparameter("disable_past_covariates"):
|
|
216
|
+
cols_to_remove.extend(self.covariate_metadata.past_covariates)
|
|
217
|
+
if self.get_hyperparameter("disable_known_covariates"):
|
|
218
|
+
cols_to_remove.extend(self.covariate_metadata.known_covariates)
|
|
219
|
+
future_df = None
|
|
220
|
+
|
|
221
|
+
if cols_to_remove:
|
|
222
|
+
past_df = past_df.drop(columns=cols_to_remove)
|
|
223
|
+
|
|
224
|
+
return past_df, future_df
|
|
225
|
+
|
|
201
226
|
def _predict(
|
|
202
227
|
self,
|
|
203
228
|
data: TimeSeriesDataFrame,
|
|
204
229
|
known_covariates: TimeSeriesDataFrame | None = None,
|
|
205
230
|
**kwargs,
|
|
206
231
|
) -> TimeSeriesDataFrame:
|
|
232
|
+
from .utils import timeout_callback
|
|
233
|
+
|
|
207
234
|
if self._model_pipeline is None:
|
|
208
235
|
self.load_model_pipeline()
|
|
209
236
|
assert self._model_pipeline is not None
|
|
@@ -224,6 +251,9 @@ class Chronos2Model(AbstractTimeSeriesModel):
|
|
|
224
251
|
cross_learning = self.get_hyperparameter("cross_learning")
|
|
225
252
|
context_length = self.get_hyperparameter("context_length")
|
|
226
253
|
future_df = known_covariates.reset_index().to_data_frame() if known_covariates is not None else None
|
|
254
|
+
time_limit = kwargs.get("time_limit")
|
|
255
|
+
|
|
256
|
+
context_df, future_df = self._remove_disabled_covariates(context_df, future_df)
|
|
227
257
|
|
|
228
258
|
forecast_df = self._model_pipeline.predict_df(
|
|
229
259
|
df=context_df,
|
|
@@ -235,6 +265,7 @@ class Chronos2Model(AbstractTimeSeriesModel):
|
|
|
235
265
|
batch_size=batch_size,
|
|
236
266
|
validate_inputs=False,
|
|
237
267
|
cross_learning=cross_learning,
|
|
268
|
+
after_batch=timeout_callback(time_limit),
|
|
238
269
|
)
|
|
239
270
|
|
|
240
271
|
forecast_df = forecast_df.rename(columns={"predictions": "mean"}).drop(columns="target_name")
|
|
@@ -277,8 +308,11 @@ class Chronos2Model(AbstractTimeSeriesModel):
|
|
|
277
308
|
from .utils import LoggerCallback, TimeLimitCallback
|
|
278
309
|
|
|
279
310
|
def convert_data(df: TimeSeriesDataFrame):
|
|
311
|
+
past_df = df.reset_index().to_data_frame()
|
|
312
|
+
past_df, _ = self._remove_disabled_covariates(past_df, None)
|
|
313
|
+
|
|
280
314
|
inputs, _, _ = convert_df_input_to_list_of_dicts_input(
|
|
281
|
-
df=
|
|
315
|
+
df=past_df,
|
|
282
316
|
future_df=None,
|
|
283
317
|
target_columns=[self.target],
|
|
284
318
|
prediction_length=self.prediction_length,
|
|
@@ -288,13 +322,13 @@ class Chronos2Model(AbstractTimeSeriesModel):
|
|
|
288
322
|
# The above utility will only split the dataframe into target and past_covariates, where past_covariates contains
|
|
289
323
|
# past values of both past-only and known-future covariates. We need to add future_covariates to enable fine-tuning
|
|
290
324
|
# with known covariates by indicating which covariates are known in the future.
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
325
|
+
if not self.get_hyperparameter("disable_known_covariates"):
|
|
326
|
+
known_covariates = self.covariate_metadata.known_covariates
|
|
327
|
+
if len(known_covariates) > 0:
|
|
328
|
+
for input_dict in inputs:
|
|
329
|
+
# NOTE: the covariates are empty because the actual values are not used
|
|
330
|
+
# This only indicates which covariates are known in the future
|
|
331
|
+
input_dict["future_covariates"] = {name: np.array([]) for name in known_covariates}
|
|
298
332
|
|
|
299
333
|
return inputs
|
|
300
334
|
|
|
@@ -471,7 +471,7 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
|
471
471
|
for logger_name in logging.root.manager.loggerDict:
|
|
472
472
|
if "transformers" in logger_name:
|
|
473
473
|
transformers_logger = logging.getLogger(logger_name)
|
|
474
|
-
transformers_logger.setLevel(logging.ERROR if verbosity <= 3 else logging.
|
|
474
|
+
transformers_logger.setLevel(logging.ERROR if verbosity <= 3 else logging.WARNING)
|
|
475
475
|
|
|
476
476
|
self._check_fit_params()
|
|
477
477
|
self._log_unused_hyperparameters()
|
|
@@ -639,7 +639,7 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
|
639
639
|
batch_size=batch_size,
|
|
640
640
|
shuffle=False,
|
|
641
641
|
num_workers=num_workers,
|
|
642
|
-
|
|
642
|
+
after_batch=timeout_callback(seconds=time_limit),
|
|
643
643
|
)
|
|
644
644
|
|
|
645
645
|
def _get_context_length(self, data: TimeSeriesDataFrame) -> int:
|
|
@@ -255,7 +255,7 @@ class ChronosInferenceDataset:
|
|
|
255
255
|
|
|
256
256
|
class ChronosInferenceDataLoader(torch.utils.data.DataLoader):
|
|
257
257
|
def __init__(self, *args, **kwargs):
|
|
258
|
-
self.callback: Callable = kwargs.pop("
|
|
258
|
+
self.callback: Callable = kwargs.pop("after_batch", lambda: None)
|
|
259
259
|
super().__init__(*args, **kwargs)
|
|
260
260
|
|
|
261
261
|
def __iter__(self): # type: ignore
|
|
@@ -38,7 +38,7 @@ class BaseTabularEnsemble(ArrayBasedTimeSeriesEnsembleModel, ABC):
|
|
|
38
38
|
|
|
39
39
|
def _get_default_hyperparameters(self) -> dict[str, Any]:
|
|
40
40
|
default_hps = super()._get_default_hyperparameters()
|
|
41
|
-
default_hps.update({"model_name": "
|
|
41
|
+
default_hps.update({"model_name": "CAT", "model_hyperparameters": {}})
|
|
42
42
|
return default_hps
|
|
43
43
|
|
|
44
44
|
def _get_ensemble_regressor(self):
|
|
@@ -60,7 +60,7 @@ class TabularEnsemble(BaseTabularEnsemble):
|
|
|
60
60
|
|
|
61
61
|
Other Parameters
|
|
62
62
|
----------------
|
|
63
|
-
model_name : str, default = "
|
|
63
|
+
model_name : str, default = "CAT"
|
|
64
64
|
Name of the AutoGluon-Tabular model to use for ensemble learning. Model name should be registered
|
|
65
65
|
in AutoGluon-Tabular model registry.
|
|
66
66
|
model_hyperparameters : dict, default = {}
|
|
@@ -102,7 +102,7 @@ class PerItemGreedyEnsemble(AbstractTimeSeriesEnsembleModel):
|
|
|
102
102
|
self.average_weight = self.average_weight[models_to_keep]
|
|
103
103
|
|
|
104
104
|
weights_for_printing = {model: round(float(weight), 2) for model, weight in self.average_weight.items()}
|
|
105
|
-
logger.info(f"\tAverage ensemble weights: {pprint.pformat(weights_for_printing, width=
|
|
105
|
+
logger.info(f"\tAverage ensemble weights: {pprint.pformat(weights_for_printing, width=1000)}")
|
|
106
106
|
|
|
107
107
|
def _split_predictions_per_item(
|
|
108
108
|
self, predictions_per_window: dict[str, list[TimeSeriesDataFrame]]
|
|
@@ -14,6 +14,8 @@ class GreedyEnsemble(AbstractWeightedTimeSeriesEnsembleModel):
|
|
|
14
14
|
"""Greedy ensemble selection algorithm that iteratively builds an ensemble by selecting models with
|
|
15
15
|
replacement.
|
|
16
16
|
|
|
17
|
+
Also known as ``WeightedEnsemble`` for backward compatibility.
|
|
18
|
+
|
|
17
19
|
This class implements the Ensemble Selection algorithm by Caruana et al. [Car2004]_, which starts
|
|
18
20
|
with an empty ensemble and repeatedly adds the model that most improves the ensemble's validation
|
|
19
21
|
performance. Models can be selected multiple times, allowing the algorithm to assign higher effective
|
|
@@ -41,10 +41,8 @@ class DeepARModel(AbstractGluonTSModel):
|
|
|
41
41
|
Number of steps to unroll the RNN for before computing predictions
|
|
42
42
|
disable_static_features : bool, default = False
|
|
43
43
|
If True, static features won't be used by the model even if they are present in the dataset.
|
|
44
|
-
If False, static features will be used by the model if they are present in the dataset.
|
|
45
44
|
disable_known_covariates : bool, default = False
|
|
46
45
|
If True, known covariates won't be used by the model even if they are present in the dataset.
|
|
47
|
-
If False, known covariates will be used by the model if they are present in the dataset.
|
|
48
46
|
num_layers : int, default = 2
|
|
49
47
|
Number of RNN layers
|
|
50
48
|
hidden_size : int, default = 40
|
|
@@ -170,13 +168,10 @@ class TemporalFusionTransformerModel(AbstractGluonTSModel):
|
|
|
170
168
|
Distribution output object that defines how the model output is converted to a forecast, and how the loss is computed.
|
|
171
169
|
disable_static_features : bool, default = False
|
|
172
170
|
If True, static features won't be used by the model even if they are present in the dataset.
|
|
173
|
-
If False, static features will be used by the model if they are present in the dataset.
|
|
174
171
|
disable_known_covariates : bool, default = False
|
|
175
172
|
If True, known covariates won't be used by the model even if they are present in the dataset.
|
|
176
|
-
If False, known covariates will be used by the model if they are present in the dataset.
|
|
177
173
|
disable_past_covariates : bool, default = False
|
|
178
174
|
If True, past covariates won't be used by the model even if they are present in the dataset.
|
|
179
|
-
If False, past covariates will be used by the model if they are present in the dataset.
|
|
180
175
|
hidden_dim : int, default = 32
|
|
181
176
|
Size of the LSTM & transformer hidden states.
|
|
182
177
|
variable_dim : int, default = 32
|
|
@@ -470,10 +465,8 @@ class TiDEModel(AbstractGluonTSModel):
|
|
|
470
465
|
Number of past values used for prediction.
|
|
471
466
|
disable_static_features : bool, default = False
|
|
472
467
|
If True, static features won't be used by the model even if they are present in the dataset.
|
|
473
|
-
If False, static features will be used by the model if they are present in the dataset.
|
|
474
468
|
disable_known_covariates : bool, default = False
|
|
475
469
|
If True, known covariates won't be used by the model even if they are present in the dataset.
|
|
476
|
-
If False, known covariates will be used by the model if they are present in the dataset.
|
|
477
470
|
feat_proj_hidden_dim : int, default = 4
|
|
478
471
|
Size of the feature projection layer.
|
|
479
472
|
encoder_hidden_dim : int, default = 64
|
|
@@ -269,7 +269,7 @@ class AutoETSModel(AbstractProbabilisticStatsForecastModel):
|
|
|
269
269
|
This significantly speeds up fitting and usually leads to no change in accuracy.
|
|
270
270
|
"""
|
|
271
271
|
|
|
272
|
-
ag_priority =
|
|
272
|
+
ag_priority = 60
|
|
273
273
|
init_time_in_seconds = 0 # C++ models require no compilation
|
|
274
274
|
allowed_local_model_args = [
|
|
275
275
|
"damped",
|
|
@@ -10,6 +10,7 @@ from typing import Any, Literal, Type, cast, overload
|
|
|
10
10
|
import numpy as np
|
|
11
11
|
import pandas as pd
|
|
12
12
|
|
|
13
|
+
from autogluon.common.utils.decorators import apply_presets
|
|
13
14
|
from autogluon.common.utils.log_utils import (
|
|
14
15
|
add_log_to_file,
|
|
15
16
|
set_logger_verbosity,
|
|
@@ -17,7 +18,6 @@ from autogluon.common.utils.log_utils import (
|
|
|
17
18
|
)
|
|
18
19
|
from autogluon.common.utils.system_info import get_ag_system_info
|
|
19
20
|
from autogluon.common.utils.utils import check_saved_predictor_version, setup_outputdir
|
|
20
|
-
from autogluon.core.utils.decorators import apply_presets
|
|
21
21
|
from autogluon.core.utils.loaders import load_pkl, load_str
|
|
22
22
|
from autogluon.core.utils.savers import save_pkl, save_str
|
|
23
23
|
from autogluon.timeseries import __version__ as current_ag_version
|
|
@@ -414,9 +414,9 @@ class TimeSeriesPredictor:
|
|
|
414
414
|
hyperparameter_tune_kwargs: str | dict | None = None,
|
|
415
415
|
excluded_model_types: list[str] | None = None,
|
|
416
416
|
ensemble_hyperparameters: dict[str, Any] | list[dict[str, Any]] | None = None,
|
|
417
|
-
num_val_windows: int | tuple[int, ...] = 1,
|
|
417
|
+
num_val_windows: int | tuple[int, ...] | Literal["auto"] = 1,
|
|
418
418
|
val_step_size: int | None = None,
|
|
419
|
-
refit_every_n_windows: int | None = 1,
|
|
419
|
+
refit_every_n_windows: int | None | Literal["auto"] = 1,
|
|
420
420
|
refit_full: bool = False,
|
|
421
421
|
enable_ensemble: bool = True,
|
|
422
422
|
skip_model_selection: bool = False,
|
|
@@ -487,18 +487,23 @@ class TimeSeriesPredictor:
|
|
|
487
487
|
Available presets:
|
|
488
488
|
|
|
489
489
|
- ``"fast_training"``: Simple statistical and tree-based ML models. These models are fast to train but may not be very accurate.
|
|
490
|
-
- ``"medium_quality"``: Same models as above, plus deep learning models ``TemporalFusionTransformer`` and Chronos-
|
|
490
|
+
- ``"medium_quality"``: Same models as above, plus deep learning models ``TemporalFusionTransformer`` and Chronos-2 (small). Produces good forecasts with reasonable training time.
|
|
491
491
|
- ``"high_quality"``: A mix of multiple DL, ML and statistical forecasting models available in AutoGluon that offers the best forecast accuracy. Much more accurate than ``medium_quality``, but takes longer to train.
|
|
492
492
|
- ``"best_quality"``: Same models as in ``"high_quality"``, but performs validation with multiple backtests. Usually better than ``high_quality``, but takes even longer to train.
|
|
493
493
|
|
|
494
|
-
Available presets with the `Chronos-Bolt <https://github.com/amazon-science/chronos-forecasting>`_
|
|
494
|
+
Available presets with the `Chronos-2 and Chronos-Bolt <https://github.com/amazon-science/chronos-forecasting>`_ models:
|
|
495
495
|
|
|
496
|
+
- ``"chronos2"``: `Chronos-2 <https://huggingface.co/amazon/chronos-2>`_ base model for zero-shot forecasting.
|
|
497
|
+
- ``"chronos2_small"``: Smaller Chronos-2 model for faster zero-shot forecasting with lower memory footprint.
|
|
498
|
+
- ``"chronos2_ensemble"``: Ensemble combining zero-shot Chronos-2 base model with fine-tuned Chronos-2 small model for improved accuracy.
|
|
496
499
|
- ``"bolt_{model_size}"``: where model size is one of ``tiny,mini,small,base``. Uses the Chronos-Bolt pretrained model for zero-shot forecasting.
|
|
497
|
-
See the documentation for ``ChronosModel`` or see `Hugging Face <https://huggingface.co/collections/amazon/chronos-models-65f1791d630a8d57cb718444>`_ for more information.
|
|
498
500
|
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
501
|
+
See the documentation for ``Chronos2`` and ``Chronos`` models in :ref:`Forecasting Time Series - Model Zoo <forecasting_model_zoo>`
|
|
502
|
+
or see `Hugging Face <https://huggingface.co/collections/amazon/chronos-models-65f1791d630a8d57cb718444>`_ for more information.
|
|
503
|
+
|
|
504
|
+
Exact definitions of all presets can be found in the source code
|
|
505
|
+
[`1 <https://github.com/autogluon/autogluon/blob/stable/timeseries/src/autogluon/timeseries/configs/predictor_presets.py>`_,
|
|
506
|
+
`2 <https://github.com/autogluon/autogluon/blob/stable/timeseries/src/autogluon/timeseries/configs/hyperparameter_presets.py>`_].
|
|
502
507
|
|
|
503
508
|
If no ``presets`` are selected, user-provided values for ``hyperparameters`` will be used (defaulting to their
|
|
504
509
|
default values specified below).
|
|
@@ -574,6 +579,8 @@ class TimeSeriesPredictor:
|
|
|
574
579
|
* "bayes": Perform HPO with HyperOpt on GluonTS-backed models via Ray tune. Perform random search on other models.
|
|
575
580
|
* "auto": alias for "bayes"
|
|
576
581
|
|
|
582
|
+
To enable HyperOpt, install the corresponding extra with ``pip install "autogluon.timeseries[ray]"``.
|
|
583
|
+
|
|
577
584
|
The "scheduler" and "searcher" key are required when providing a dict.
|
|
578
585
|
|
|
579
586
|
Example::
|
|
@@ -618,10 +625,13 @@ class TimeSeriesPredictor:
|
|
|
618
625
|
)
|
|
619
626
|
|
|
620
627
|
When using multi-layer ensembling, ``num_val_windows`` must be a tuple of integers, and ``len(ensemble_hyperparameters)`` must match ``len(num_val_windows)``.
|
|
621
|
-
num_val_windows : int | tuple[int, ...], default = 1
|
|
628
|
+
num_val_windows : int | tuple[int, ...] | "auto", default = 1
|
|
622
629
|
Number of backtests done on ``train_data`` for each trained model to estimate the validation performance.
|
|
623
630
|
This parameter is also used to control multi-layer ensembling.
|
|
624
631
|
|
|
632
|
+
If set to ``"auto"``, the value will be determined automatically based on dataset properties (number of
|
|
633
|
+
time series and median time series length).
|
|
634
|
+
|
|
625
635
|
Increasing this parameter increases the training time roughly by a factor of
|
|
626
636
|
``num_val_windows // refit_every_n_windows``. See ``refit_every_n_windows`` and ``val_step_size`` for
|
|
627
637
|
details.
|
|
@@ -663,11 +673,13 @@ class TimeSeriesPredictor:
|
|
|
663
673
|
provided when creating the predictor.
|
|
664
674
|
|
|
665
675
|
If ``tuning_data`` is provided and ``len(ensemble_hyperparameters) == 1``, then this parameter is ignored.
|
|
666
|
-
refit_every_n_windows: int
|
|
676
|
+
refit_every_n_windows: int | None | "auto", default = 1
|
|
667
677
|
When performing cross validation, each model will be retrained every ``refit_every_n_windows`` validation
|
|
668
678
|
windows, where the number of validation windows is specified by ``num_val_windows``. Note that in the
|
|
669
679
|
default setting where ``num_val_windows=1``, this argument has no effect.
|
|
670
680
|
|
|
681
|
+
If set to ``"auto"``, the value will be determined automatically based on ``num_val_windows``.
|
|
682
|
+
|
|
671
683
|
If set to ``None``, models will only be fit once for the first (oldest) validation window. By default,
|
|
672
684
|
``refit_every_n_windows=1``, i.e., all models will be refit for each validation window.
|
|
673
685
|
refit_full : bool, default = False
|
|
@@ -739,12 +751,33 @@ class TimeSeriesPredictor:
|
|
|
739
751
|
|
|
740
752
|
if val_step_size is None:
|
|
741
753
|
val_step_size = self.prediction_length
|
|
754
|
+
median_timeseries_length = int(train_data.num_timesteps_per_item().median())
|
|
755
|
+
|
|
756
|
+
# Early validation: check length mismatch when num_val_windows is explicitly provided
|
|
757
|
+
if num_val_windows != "auto" and ensemble_hyperparameters is not None:
|
|
758
|
+
num_layers = len(ensemble_hyperparameters) if isinstance(ensemble_hyperparameters, list) else 1
|
|
759
|
+
num_windows_tuple = num_val_windows if isinstance(num_val_windows, tuple) else (num_val_windows,)
|
|
760
|
+
if len(num_windows_tuple) != num_layers:
|
|
761
|
+
raise ValueError(
|
|
762
|
+
f"Length mismatch: num_val_windows has {len(num_windows_tuple)} element(s) but "
|
|
763
|
+
f"ensemble_hyperparameters has {num_layers} layer(s). These must match when num_val_windows "
|
|
764
|
+
f"is explicitly provided. Use num_val_windows='auto' to automatically determine the number of windows."
|
|
765
|
+
)
|
|
766
|
+
|
|
767
|
+
if num_val_windows == "auto":
|
|
768
|
+
num_val_windows = self._recommend_num_val_windows_auto(
|
|
769
|
+
median_timeseries_length=median_timeseries_length,
|
|
770
|
+
val_step_size=val_step_size,
|
|
771
|
+
num_items=train_data.num_items,
|
|
772
|
+
ensemble_hyperparameters=ensemble_hyperparameters,
|
|
773
|
+
)
|
|
774
|
+
logger.info(f"Automatically setting num_val_windows={num_val_windows} based on dataset properties")
|
|
742
775
|
|
|
743
776
|
num_val_windows, ensemble_hyperparameters = self._validate_and_normalize_validation_and_ensemble_inputs(
|
|
744
777
|
num_val_windows=num_val_windows,
|
|
745
778
|
ensemble_hyperparameters=ensemble_hyperparameters,
|
|
746
779
|
val_step_size=val_step_size,
|
|
747
|
-
median_timeseries_length=
|
|
780
|
+
median_timeseries_length=median_timeseries_length,
|
|
748
781
|
tuning_data_provided=tuning_data is not None,
|
|
749
782
|
)
|
|
750
783
|
|
|
@@ -753,6 +786,12 @@ class TimeSeriesPredictor:
|
|
|
753
786
|
tuning_data = self._check_and_prepare_data_frame_for_evaluation(tuning_data, name="tuning_data")
|
|
754
787
|
logger.info(f"Provided tuning_data has {self._get_dataset_stats(tuning_data)}")
|
|
755
788
|
|
|
789
|
+
if refit_every_n_windows == "auto":
|
|
790
|
+
refit_every_n_windows = self._recommend_refit_every_n_windows_auto(num_val_windows)
|
|
791
|
+
logger.info(
|
|
792
|
+
f"Automatically setting refit_every_n_windows={refit_every_n_windows} based on num_val_windows"
|
|
793
|
+
)
|
|
794
|
+
|
|
756
795
|
if sum(num_val_windows) <= 1 and refit_every_n_windows is not None and refit_every_n_windows > 1:
|
|
757
796
|
logger.warning(
|
|
758
797
|
f"\trefit_every_n_windows provided as {refit_every_n_windows} but num_val_windows is set to "
|
|
@@ -790,6 +829,37 @@ class TimeSeriesPredictor:
|
|
|
790
829
|
self.save()
|
|
791
830
|
return self
|
|
792
831
|
|
|
832
|
+
def _recommend_num_val_windows_auto(
|
|
833
|
+
self,
|
|
834
|
+
num_items: int,
|
|
835
|
+
median_timeseries_length: int,
|
|
836
|
+
val_step_size: int,
|
|
837
|
+
ensemble_hyperparameters: dict[str, Any] | list[dict[str, Any]] | None = None,
|
|
838
|
+
) -> tuple[int, ...]:
|
|
839
|
+
if num_items < 20:
|
|
840
|
+
recommended_windows = 5
|
|
841
|
+
elif num_items < 100:
|
|
842
|
+
recommended_windows = 3
|
|
843
|
+
else:
|
|
844
|
+
recommended_windows = 2
|
|
845
|
+
|
|
846
|
+
min_train_length = max(2 * self.prediction_length + 1, 10)
|
|
847
|
+
max_windows = int((median_timeseries_length - min_train_length - self.prediction_length) // val_step_size + 1)
|
|
848
|
+
total_windows = min(recommended_windows, max(1, max_windows))
|
|
849
|
+
|
|
850
|
+
num_layers = len(ensemble_hyperparameters) if isinstance(ensemble_hyperparameters, list) else 1
|
|
851
|
+
if total_windows >= num_layers:
|
|
852
|
+
# Distribute windows: most to first layer, 1 to each remaining layer
|
|
853
|
+
return (total_windows - num_layers + 1,) + (1,) * (num_layers - 1)
|
|
854
|
+
else:
|
|
855
|
+
# Insufficient windows: return tuple matching num_layers, will be reduced downstream
|
|
856
|
+
return (1,) * num_layers
|
|
857
|
+
|
|
858
|
+
def _recommend_refit_every_n_windows_auto(self, num_val_windows: tuple[int, ...]) -> int:
|
|
859
|
+
# Simple mapping for total_windows -> refit_ever_n_windows: 1 -> 1, 2 -> 1, 3 -> 2, 4 -> 2, 5 -> 2
|
|
860
|
+
total_windows = sum(num_val_windows)
|
|
861
|
+
return int(round(total_windows**0.5))
|
|
862
|
+
|
|
793
863
|
def _validate_and_normalize_validation_and_ensemble_inputs(
|
|
794
864
|
self,
|
|
795
865
|
num_val_windows: int | tuple[int, ...],
|
|
@@ -799,18 +869,8 @@ class TimeSeriesPredictor:
|
|
|
799
869
|
tuning_data_provided: bool,
|
|
800
870
|
) -> tuple[tuple[int, ...], list[dict[str, Any]] | None]:
|
|
801
871
|
"""Validate and normalize num_val_windows and ensemble_hyperparameters for multi-layer ensembling."""
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
if ensemble_hyperparameters is not None:
|
|
805
|
-
if isinstance(ensemble_hyperparameters, dict):
|
|
806
|
-
ensemble_hyperparameters = [ensemble_hyperparameters]
|
|
807
|
-
|
|
808
|
-
if len(ensemble_hyperparameters) != len(original_num_val_windows):
|
|
809
|
-
raise ValueError(
|
|
810
|
-
f"Length mismatch: num_val_windows has {len(original_num_val_windows)} layers but "
|
|
811
|
-
f"ensemble_hyperparameters has {len(ensemble_hyperparameters)} layers. "
|
|
812
|
-
f"These must match for multi-layer ensembling."
|
|
813
|
-
)
|
|
872
|
+
if ensemble_hyperparameters is not None and isinstance(ensemble_hyperparameters, dict):
|
|
873
|
+
ensemble_hyperparameters = [ensemble_hyperparameters]
|
|
814
874
|
|
|
815
875
|
num_val_windows = self._normalize_num_val_windows_input(num_val_windows, tuning_data_provided)
|
|
816
876
|
num_val_windows = self._reduce_num_val_windows_if_necessary(
|
|
@@ -495,7 +495,6 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
495
495
|
time_reserved_for_ensemble = min(
|
|
496
496
|
self.max_ensemble_time_limit, time_left / (num_base_models - i + 1)
|
|
497
497
|
)
|
|
498
|
-
logger.debug(f"Reserving {time_reserved_for_ensemble:.1f}s for ensemble")
|
|
499
498
|
else:
|
|
500
499
|
time_reserved_for_ensemble = 0.0
|
|
501
500
|
time_left_for_model = (time_left - time_reserved_for_ensemble) / (num_base_models - i)
|
|
@@ -869,9 +868,6 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
869
868
|
# start timer and cap subsample size if it's greater than the number of items in the provided data set
|
|
870
869
|
time_start = time.time()
|
|
871
870
|
if subsample_size > data.num_items:
|
|
872
|
-
logger.info(
|
|
873
|
-
f"Subsample_size {subsample_size} is larger than the number of items in the data and will be ignored"
|
|
874
|
-
)
|
|
875
871
|
subsample_size = data.num_items
|
|
876
872
|
|
|
877
873
|
# set default number of iterations and cap iterations if the number of items in the data is smaller
|
autogluon/timeseries/version.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: autogluon.timeseries
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.5.1b20260122
|
|
4
4
|
Summary: Fast and Accurate ML in 3 Lines of Code
|
|
5
5
|
Home-page: https://github.com/autogluon/autogluon
|
|
6
6
|
Author: AutoGluon Community
|
|
@@ -53,22 +53,22 @@ Requires-Dist: fugue>=0.9.0
|
|
|
53
53
|
Requires-Dist: tqdm<5,>=4.38
|
|
54
54
|
Requires-Dist: orjson~=3.9
|
|
55
55
|
Requires-Dist: einops<1,>=0.7
|
|
56
|
-
Requires-Dist: chronos-forecasting<2.4,>=2.2.
|
|
56
|
+
Requires-Dist: chronos-forecasting<2.4,>=2.2.2
|
|
57
57
|
Requires-Dist: peft<0.18,>=0.13.0
|
|
58
58
|
Requires-Dist: tensorboard<3,>=2.9
|
|
59
|
-
Requires-Dist: autogluon.core==1.
|
|
60
|
-
Requires-Dist: autogluon.common==1.
|
|
61
|
-
Requires-Dist: autogluon.features==1.
|
|
62
|
-
Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.
|
|
59
|
+
Requires-Dist: autogluon.core==1.5.1b20260122
|
|
60
|
+
Requires-Dist: autogluon.common==1.5.1b20260122
|
|
61
|
+
Requires-Dist: autogluon.features==1.5.1b20260122
|
|
62
|
+
Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.5.1b20260122
|
|
63
63
|
Provides-Extra: tests
|
|
64
64
|
Requires-Dist: pytest; extra == "tests"
|
|
65
65
|
Requires-Dist: ruff>=0.0.285; extra == "tests"
|
|
66
66
|
Requires-Dist: flaky<4,>=3.7; extra == "tests"
|
|
67
67
|
Requires-Dist: pytest-timeout<3,>=2.1; extra == "tests"
|
|
68
68
|
Provides-Extra: ray
|
|
69
|
-
Requires-Dist: autogluon.core[raytune]==1.
|
|
69
|
+
Requires-Dist: autogluon.core[raytune]==1.5.1b20260122; extra == "ray"
|
|
70
70
|
Provides-Extra: all
|
|
71
|
-
Requires-Dist: autogluon.core[raytune]==1.
|
|
71
|
+
Requires-Dist: autogluon.core[raytune]==1.5.1b20260122; extra == "all"
|
|
72
72
|
Dynamic: author
|
|
73
73
|
Dynamic: classifier
|
|
74
74
|
Dynamic: description
|
|
@@ -1,13 +1,13 @@
|
|
|
1
|
-
autogluon.timeseries-1.
|
|
1
|
+
autogluon.timeseries-1.5.1b20260122-py3.11-nspkg.pth,sha256=kAlKxjI5mE3Pwwqphu2maN5OBQk8W8ew70e_qbI1c6A,482
|
|
2
2
|
autogluon/timeseries/__init__.py,sha256=_CrLLc1fkjen7UzWoO0Os8WZoHOgvZbHKy46I8v_4k4,304
|
|
3
3
|
autogluon/timeseries/learner.py,sha256=9kGn0ACGfbyZRlZmwkrgBbkwq7c2715yKDCh1EK3EWQ,14961
|
|
4
|
-
autogluon/timeseries/predictor.py,sha256=
|
|
4
|
+
autogluon/timeseries/predictor.py,sha256=egBu6YKVe8GRrLJ1khOKflzSu-VDSH5_mCCQuaXQNRY,106686
|
|
5
5
|
autogluon/timeseries/regressor.py,sha256=HDdqi7MYRheW3uZy5c50sqVDAHap0ooyQBdOvKEKkWM,11718
|
|
6
6
|
autogluon/timeseries/splitter.py,sha256=2rypDxDKkqOC2v5nPJ6m0cmHQTZ9D6qUFrQV1HC9lz4,2329
|
|
7
|
-
autogluon/timeseries/version.py,sha256=
|
|
7
|
+
autogluon/timeseries/version.py,sha256=MnD7Xa8VoA14eqZRxJp2qccaD7BlVLoiG_OaIqHKK5E,91
|
|
8
8
|
autogluon/timeseries/configs/__init__.py,sha256=wiLBwxZkDTQBJkSJ9-xz3p_yJxX0dbHe108dS1P5O6A,183
|
|
9
|
-
autogluon/timeseries/configs/hyperparameter_presets.py,sha256=
|
|
10
|
-
autogluon/timeseries/configs/predictor_presets.py,sha256=
|
|
9
|
+
autogluon/timeseries/configs/hyperparameter_presets.py,sha256=F38QMemh3LR4cT60xMZctI6O1XTOgNpVSleGOKMfhQQ,1586
|
|
10
|
+
autogluon/timeseries/configs/predictor_presets.py,sha256=2CkUXIFtup5w5sQkIhoU5G84b9jiNfcUC0yEug3izGY,2327
|
|
11
11
|
autogluon/timeseries/dataset/__init__.py,sha256=UvnhAN5tjgxXTHoZMQDy64YMDj4Xxa68yY7NP4vAw0o,81
|
|
12
12
|
autogluon/timeseries/dataset/ts_dataframe.py,sha256=IOIkwV_VPV3JvilNt98gZ77gMHIpk-Ug-trDvqSk_Jg,52228
|
|
13
13
|
autogluon/timeseries/metrics/__init__.py,sha256=iFGLMOtDJ470dbmmx1BsdUKBx4RwI6ZQGFat3Z-wpzI,3567
|
|
@@ -18,25 +18,25 @@ autogluon/timeseries/metrics/utils.py,sha256=_Nz6GLbs91WhqN1PoA53wD4xEEuPIQ0juV5
|
|
|
18
18
|
autogluon/timeseries/models/__init__.py,sha256=zPdwxiveOTGU9658tDPMFXbflZ5fzd_AJdbCacbfZ0s,1375
|
|
19
19
|
autogluon/timeseries/models/registry.py,sha256=dkuyKG5UK2xiGtXcsuyRDXrI-YC84zkPre8Z3wt9T_A,2115
|
|
20
20
|
autogluon/timeseries/models/abstract/__init__.py,sha256=Htfkjjc3vo92RvyM8rIlQ0PLWt3jcrCKZES07UvCMV0,146
|
|
21
|
-
autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=
|
|
21
|
+
autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=80ShlklwAnGoMEKjMDjgoVrkGG6x2Eg3nOwbjJVMCqk,31909
|
|
22
22
|
autogluon/timeseries/models/abstract/model_trial.py,sha256=xKD6Nw8hIqAq4HxNVcGUhr9BuEqzFn7FX0TenvZHU0Q,3753
|
|
23
23
|
autogluon/timeseries/models/abstract/tunable.py,sha256=thl_wJjB9ao1T5NNF1RVH5k3yFqmao0irX-eUNqDs8k,7111
|
|
24
24
|
autogluon/timeseries/models/autogluon_tabular/__init__.py,sha256=E5fZsdFPgVdyCVyj5bGmn_lQFlCMn2NvuRLBMcCFvhM,205
|
|
25
25
|
autogluon/timeseries/models/autogluon_tabular/mlforecast.py,sha256=FJlYqMZJaltTlh54LMrDOgICgGanIymBI2F4OevVQ6A,36690
|
|
26
|
-
autogluon/timeseries/models/autogluon_tabular/per_step.py,sha256=
|
|
26
|
+
autogluon/timeseries/models/autogluon_tabular/per_step.py,sha256=q2V8dSyHns7gkcDwAIpczFse3vHHyYSm4LjJ4KICsWo,23360
|
|
27
27
|
autogluon/timeseries/models/autogluon_tabular/transforms.py,sha256=AkXEInK4GocApU5GylECH01qgz5cLLLqC9apuN0eUbQ,2873
|
|
28
28
|
autogluon/timeseries/models/autogluon_tabular/utils.py,sha256=Fn3Vu_Q0PCtEUbtNgLp1xIblg7dOdpFlF3W5kLHgruI,63
|
|
29
29
|
autogluon/timeseries/models/chronos/__init__.py,sha256=dIoAImmZc0dTlut4CZkJxcg1bpuHKZkS8x8Y6fBoUAY,113
|
|
30
|
-
autogluon/timeseries/models/chronos/chronos2.py,sha256=
|
|
31
|
-
autogluon/timeseries/models/chronos/model.py,sha256=
|
|
32
|
-
autogluon/timeseries/models/chronos/utils.py,sha256=
|
|
30
|
+
autogluon/timeseries/models/chronos/chronos2.py,sha256=N8TW1EaYwqPqeRbntlkhz9L7uADBna0LDZPMYWH4w3c,16800
|
|
31
|
+
autogluon/timeseries/models/chronos/model.py,sha256=npTcHR6nSi7lfCzZfVl_9G6iHJwIMRm3wfqjOAnBcIQ,33681
|
|
32
|
+
autogluon/timeseries/models/chronos/utils.py,sha256=33_kycc7AVasS3c7-AuVFtqBTZzV_yszr-MpKe28S3M,14449
|
|
33
33
|
autogluon/timeseries/models/ensemble/__init__.py,sha256=3_Vn6RHpjouthrEoXs1guKUpUX6JoUgMVCgxPt2pyLw,1302
|
|
34
34
|
autogluon/timeseries/models/ensemble/abstract.py,sha256=gAaspq4f67MTfs7KW6ADVU0KfPeBKySPstCqUeC7JYs,4579
|
|
35
35
|
autogluon/timeseries/models/ensemble/ensemble_selection.py,sha256=hepycVJTtbibzTKq5Sk04L_vUuYlLFItkSybaCc_Jv8,6366
|
|
36
|
-
autogluon/timeseries/models/ensemble/per_item_greedy.py,sha256=
|
|
36
|
+
autogluon/timeseries/models/ensemble/per_item_greedy.py,sha256=XaUEUO6fQD7Ck7X0882pvax5e3g55o_g8uMBnSP-c5M,7869
|
|
37
37
|
autogluon/timeseries/models/ensemble/array_based/__init__.py,sha256=u4vGTH9gP6oATYKkxnvoiDZvc5rqfnfgrODHxIvHP7U,207
|
|
38
38
|
autogluon/timeseries/models/ensemble/array_based/abstract.py,sha256=Oci1XEgFFTle0JF5Z8PhnMjG1iPrhhtunoKUPUPhTLw,10190
|
|
39
|
-
autogluon/timeseries/models/ensemble/array_based/models.py,sha256=
|
|
39
|
+
autogluon/timeseries/models/ensemble/array_based/models.py,sha256=UOV3t3QH_j0AGg2y3gJIWZ5rS5tHI39z3yUJlhkEyA0,8603
|
|
40
40
|
autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py,sha256=OJPZZzowllw7Ks0aXF8Hye1_1Ql8XhRfdtv3e3A_4AE,424
|
|
41
41
|
autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py,sha256=MSeYWwxH1mL3lrsHbDpzAg61Bovs2Fxkxl3qzj5QrXE,2771
|
|
42
42
|
autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py,sha256=4rUYEXcyyZ8hPITzg1tSDWmHSGfwqrTp5dd-b7MP5Hs,7245
|
|
@@ -45,16 +45,16 @@ autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py,sha256=prH
|
|
|
45
45
|
autogluon/timeseries/models/ensemble/weighted/__init__.py,sha256=_LipTsDnYvTFmjZWsb1Vrm-eALsVVfUlF2gOpcaqE2Q,206
|
|
46
46
|
autogluon/timeseries/models/ensemble/weighted/abstract.py,sha256=meGVoSfPOjmEwTKGRTUQJ1N9bZtpewJ217TGqKNye04,1839
|
|
47
47
|
autogluon/timeseries/models/ensemble/weighted/basic.py,sha256=KsFcdmhkjywqSYvx9rdWoFzjLO-czKsOj3CWuC61SS4,3715
|
|
48
|
-
autogluon/timeseries/models/ensemble/weighted/greedy.py,sha256=
|
|
48
|
+
autogluon/timeseries/models/ensemble/weighted/greedy.py,sha256=ziyudzlLDPLW_vGuh2U9uZ2YH0h478mRpM3H9q7nf4M,2657
|
|
49
49
|
autogluon/timeseries/models/gluonts/__init__.py,sha256=YfyNYOkhhNsloA4MAavfmqKO29_q6o4lwPoV7L4_h7M,355
|
|
50
50
|
autogluon/timeseries/models/gluonts/abstract.py,sha256=qJ60DSkzSI4E1kx5RGeGBehkiMvcAVGSUXYSpZXo8nk,27699
|
|
51
51
|
autogluon/timeseries/models/gluonts/dataset.py,sha256=ApR-r4o0OV4jQ2hYUppJ4yjvWX02JoHod5O4acEKiHw,5074
|
|
52
|
-
autogluon/timeseries/models/gluonts/models.py,sha256=
|
|
52
|
+
autogluon/timeseries/models/gluonts/models.py,sha256=Djb2R_2ZSK-xQ1wvFwWGXxshSQeFD9WsMLdF4yxuGnQ,25232
|
|
53
53
|
autogluon/timeseries/models/local/__init__.py,sha256=TiKY7M6Foy8vtshfZiStEH58_XG62w4oF1TQYAQ1B0s,344
|
|
54
54
|
autogluon/timeseries/models/local/abstract_local_model.py,sha256=7pbyE4vhXgoCEcHAhxpxBVCOEG-LSrBptGwjLXd-s8o,11335
|
|
55
55
|
autogluon/timeseries/models/local/naive.py,sha256=w0XuMcgcTvTUEi2iXcd6BGvyHKB-kpqbv9c9iK4pMOA,7490
|
|
56
56
|
autogluon/timeseries/models/local/npts.py,sha256=mKuDsGnaYV8QkIgGR8se-1pXb2JAxzafESt2g_21ENA,4530
|
|
57
|
-
autogluon/timeseries/models/local/statsforecast.py,sha256=
|
|
57
|
+
autogluon/timeseries/models/local/statsforecast.py,sha256=h_WpYGAEA761ehhZv6RZXsGC-WVr4BkPh1C8xUbcKuQ,33275
|
|
58
58
|
autogluon/timeseries/models/multi_window/__init__.py,sha256=Bq7AT2Jxdd4WNqmjTdzeqgNiwn1NCyWp4tBIWaM-zfI,60
|
|
59
59
|
autogluon/timeseries/models/multi_window/multi_window_model.py,sha256=bv8_ux-7JXPwhbFXeBN893xQo6echCCMwqH4aEMK250,12937
|
|
60
60
|
autogluon/timeseries/models/toto/__init__.py,sha256=rQaVjZJV5ZsJGC0jhQ6CA4nYeXdV1KtlyDz2i2usQnY,54
|
|
@@ -77,7 +77,7 @@ autogluon/timeseries/trainer/__init__.py,sha256=_tw3iioJfvtIV7wnjtEMv0yS8oabmCFx
|
|
|
77
77
|
autogluon/timeseries/trainer/ensemble_composer.py,sha256=zGa8vocPQGsHf-7ti8DsHwjEA176FkCt7up2LwWCK4Y,19465
|
|
78
78
|
autogluon/timeseries/trainer/model_set_builder.py,sha256=kROApbu10_ro-GVYlnx3oTKZj2TcNswWbOFB1QyBCOc,10737
|
|
79
79
|
autogluon/timeseries/trainer/prediction_cache.py,sha256=KKs22UUGrVfQN_81IgzL7Bfc8tjWk3k6YW3uHURaSs0,5496
|
|
80
|
-
autogluon/timeseries/trainer/trainer.py,sha256=
|
|
80
|
+
autogluon/timeseries/trainer/trainer.py,sha256=vraCZtARPV3gNHlhktmDlbTr8Mn59H-JOIaXAdpTDw8,56057
|
|
81
81
|
autogluon/timeseries/trainer/utils.py,sha256=7N4vRP6GFUlRAahxQ9PqppdIMFqMz3wpZ5u-_onR24M,588
|
|
82
82
|
autogluon/timeseries/transforms/__init__.py,sha256=fKlT4pkJ_8Gl7IUTc3uSDzt2Xow5iH5w6fPB3ePNrTg,127
|
|
83
83
|
autogluon/timeseries/transforms/covariate_scaler.py,sha256=CpTtokiE1uEg_RJa4kEUUuBwXZpPL11OC2fgCkRpGlQ,6986
|
|
@@ -93,11 +93,11 @@ autogluon/timeseries/utils/datetime/base.py,sha256=3NdsH3NDq4cVAOSoy3XpaNixyNlbj
|
|
|
93
93
|
autogluon/timeseries/utils/datetime/lags.py,sha256=dijskkPDJXhXbRHGQZPhUFuEom3typKbOeET7cxkHGY,5965
|
|
94
94
|
autogluon/timeseries/utils/datetime/seasonality.py,sha256=-w3bULdkIZKP-JrO1ahHLyNCanLhejocHlasZShuwA0,802
|
|
95
95
|
autogluon/timeseries/utils/datetime/time_features.py,sha256=kEOFls4Nzh8nO0Pcz1DwLsC_NA3hMI4JUlZI3kuvuts,2666
|
|
96
|
-
autogluon_timeseries-1.
|
|
97
|
-
autogluon_timeseries-1.
|
|
98
|
-
autogluon_timeseries-1.
|
|
99
|
-
autogluon_timeseries-1.
|
|
100
|
-
autogluon_timeseries-1.
|
|
101
|
-
autogluon_timeseries-1.
|
|
102
|
-
autogluon_timeseries-1.
|
|
103
|
-
autogluon_timeseries-1.
|
|
96
|
+
autogluon_timeseries-1.5.1b20260122.dist-info/licenses/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
|
|
97
|
+
autogluon_timeseries-1.5.1b20260122.dist-info/licenses/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
|
|
98
|
+
autogluon_timeseries-1.5.1b20260122.dist-info/METADATA,sha256=8qZ5DKjeik_GWTCy8Ayk2m-H1lKa3z142aGAHhYGy6A,13425
|
|
99
|
+
autogluon_timeseries-1.5.1b20260122.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
|
|
100
|
+
autogluon_timeseries-1.5.1b20260122.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
|
101
|
+
autogluon_timeseries-1.5.1b20260122.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
|
102
|
+
autogluon_timeseries-1.5.1b20260122.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
103
|
+
autogluon_timeseries-1.5.1b20260122.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|