autogluon.timeseries 1.2.1b20250416__py3-none-any.whl → 1.2.1b20250417__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autogluon/timeseries/learner.py +7 -5
- autogluon/timeseries/metrics/abstract.py +1 -1
- autogluon/timeseries/metrics/point.py +4 -4
- autogluon/timeseries/metrics/quantile.py +2 -2
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py +115 -196
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py +7 -7
- autogluon/timeseries/models/chronos/model.py +37 -30
- autogluon/timeseries/models/gluonts/abstract_gluonts.py +10 -10
- autogluon/timeseries/models/gluonts/torch/models.py +8 -8
- autogluon/timeseries/models/local/abstract_local_model.py +1 -1
- autogluon/timeseries/models/local/naive.py +2 -2
- autogluon/timeseries/models/multi_window/multi_window_model.py +0 -3
- autogluon/timeseries/predictor.py +74 -58
- autogluon/timeseries/trainer.py +3 -3
- autogluon/timeseries/utils/forecast.py +13 -8
- autogluon/timeseries/version.py +1 -1
- {autogluon.timeseries-1.2.1b20250416.dist-info → autogluon.timeseries-1.2.1b20250417.dist-info}/METADATA +4 -4
- {autogluon.timeseries-1.2.1b20250416.dist-info → autogluon.timeseries-1.2.1b20250417.dist-info}/RECORD +25 -25
- /autogluon.timeseries-1.2.1b20250416-py3.9-nspkg.pth → /autogluon.timeseries-1.2.1b20250417-py3.9-nspkg.pth +0 -0
- {autogluon.timeseries-1.2.1b20250416.dist-info → autogluon.timeseries-1.2.1b20250417.dist-info}/LICENSE +0 -0
- {autogluon.timeseries-1.2.1b20250416.dist-info → autogluon.timeseries-1.2.1b20250417.dist-info}/NOTICE +0 -0
- {autogluon.timeseries-1.2.1b20250416.dist-info → autogluon.timeseries-1.2.1b20250417.dist-info}/WHEEL +0 -0
- {autogluon.timeseries-1.2.1b20250416.dist-info → autogluon.timeseries-1.2.1b20250417.dist-info}/namespace_packages.txt +0 -0
- {autogluon.timeseries-1.2.1b20250416.dist-info → autogluon.timeseries-1.2.1b20250417.dist-info}/top_level.txt +0 -0
- {autogluon.timeseries-1.2.1b20250416.dist-info → autogluon.timeseries-1.2.1b20250417.dist-info}/zip-safe +0 -0
@@ -228,7 +228,7 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
228
228
|
fine_tune_ckpt_path = Path(model.path) / cls.fine_tuned_ckpt_name
|
229
229
|
if fine_tune_ckpt_path.exists():
|
230
230
|
logger.debug(f"\tFine-tuned checkpoint exists, setting model_path to {fine_tune_ckpt_path}")
|
231
|
-
model.model_path = fine_tune_ckpt_path
|
231
|
+
model.model_path = str(fine_tune_ckpt_path)
|
232
232
|
|
233
233
|
return model
|
234
234
|
|
@@ -249,8 +249,10 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
249
249
|
"""The default configuration of the model used by AutoGluon if the model is one of those
|
250
250
|
defined in MODEL_CONFIGS. For now, these are ``autogluon/chronos-t5-*`` family of models.
|
251
251
|
"""
|
252
|
-
|
253
|
-
|
252
|
+
for k in MODEL_CONFIGS:
|
253
|
+
if k in self.model_path:
|
254
|
+
return MODEL_CONFIGS[k]
|
255
|
+
return {}
|
254
256
|
|
255
257
|
@property
|
256
258
|
def min_num_gpus(self) -> int:
|
@@ -314,28 +316,38 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
314
316
|
|
315
317
|
return torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8
|
316
318
|
|
317
|
-
def
|
319
|
+
def get_hyperparameters(self) -> dict:
|
318
320
|
"""Gets params that are passed to the inner model."""
|
319
|
-
init_args = super().
|
320
|
-
|
321
|
-
init_args.setdefault("batch_size", self.default_batch_size)
|
322
|
-
init_args.setdefault("num_samples", self.default_num_samples)
|
323
|
-
init_args.setdefault("device", None)
|
324
|
-
# if the model requires a GPU, set the torch dtype to bfloat16
|
325
|
-
init_args.setdefault("torch_dtype", self.default_torch_dtype)
|
326
|
-
init_args.setdefault("data_loader_num_workers", 0)
|
327
|
-
init_args.setdefault("context_length", None)
|
328
|
-
init_args.setdefault("optimization_strategy", None)
|
329
|
-
init_args.setdefault("fine_tune", False)
|
330
|
-
init_args.setdefault("keep_transformers_logs", False)
|
331
|
-
init_args.setdefault("fine_tune_lr", 1e-5)
|
332
|
-
init_args.setdefault("fine_tune_steps", 1000)
|
333
|
-
init_args.setdefault("fine_tune_batch_size", 32)
|
334
|
-
init_args.setdefault("eval_during_fine_tune", False)
|
335
|
-
init_args.setdefault("fine_tune_eval_max_items", 256)
|
336
|
-
init_args.setdefault("fine_tune_shuffle_buffer_size", 10_000)
|
321
|
+
init_args = super().get_hyperparameters()
|
337
322
|
|
338
323
|
eval_during_fine_tune = init_args["eval_during_fine_tune"]
|
324
|
+
fine_tune_trainer_kwargs = self._get_fine_tune_trainer_kwargs(init_args, eval_during_fine_tune)
|
325
|
+
user_fine_tune_trainer_kwargs = init_args.get("fine_tune_trainer_kwargs", {})
|
326
|
+
fine_tune_trainer_kwargs.update(user_fine_tune_trainer_kwargs)
|
327
|
+
init_args["fine_tune_trainer_kwargs"] = fine_tune_trainer_kwargs
|
328
|
+
|
329
|
+
return init_args.copy()
|
330
|
+
|
331
|
+
def _get_default_hyperparameters(self) -> Dict:
|
332
|
+
return {
|
333
|
+
"batch_size": self.default_batch_size,
|
334
|
+
"num_samples": self.default_num_samples,
|
335
|
+
"device": None,
|
336
|
+
"torch_dtype": self.default_torch_dtype,
|
337
|
+
"data_loader_num_workers": 0,
|
338
|
+
"context_length": None,
|
339
|
+
"optimization_strategy": None,
|
340
|
+
"fine_tune": False,
|
341
|
+
"keep_transformers_logs": False,
|
342
|
+
"fine_tune_lr": 1e-5,
|
343
|
+
"fine_tune_steps": 1000,
|
344
|
+
"fine_tune_batch_size": 32,
|
345
|
+
"eval_during_fine_tune": False,
|
346
|
+
"fine_tune_eval_max_items": 256,
|
347
|
+
"fine_tune_shuffle_buffer_size": 10_000,
|
348
|
+
}
|
349
|
+
|
350
|
+
def _get_fine_tune_trainer_kwargs(self, init_args, eval_during_fine_tune: bool):
|
339
351
|
output_dir = Path(self.path) / "transformers_logs"
|
340
352
|
fine_tune_trainer_kwargs = dict(
|
341
353
|
output_dir=str(output_dir),
|
@@ -364,12 +376,8 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
364
376
|
load_best_model_at_end=True if eval_during_fine_tune else False,
|
365
377
|
metric_for_best_model="eval_loss" if eval_during_fine_tune else None,
|
366
378
|
)
|
367
|
-
user_fine_tune_trainer_kwargs = init_args.get("fine_tune_trainer_kwargs", {})
|
368
|
-
fine_tune_trainer_kwargs.update(user_fine_tune_trainer_kwargs)
|
369
379
|
|
370
|
-
|
371
|
-
|
372
|
-
return init_args
|
380
|
+
return fine_tune_trainer_kwargs
|
373
381
|
|
374
382
|
def _validate_and_assign_attributes(self, model_params: dict):
|
375
383
|
# we validate the params here because their values are concrete,
|
@@ -432,8 +440,7 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
432
440
|
transformers_logger.setLevel(logging.ERROR if verbosity <= 3 else logging.INFO)
|
433
441
|
|
434
442
|
self._check_fit_params()
|
435
|
-
|
436
|
-
model_params = self._get_model_params()
|
443
|
+
model_params = self.get_hyperparameters()
|
437
444
|
self._validate_and_assign_attributes(model_params)
|
438
445
|
do_fine_tune = model_params["fine_tune"]
|
439
446
|
|
@@ -655,7 +662,7 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
655
662
|
return TimeSeriesDataFrame(df)
|
656
663
|
|
657
664
|
def _more_tags(self) -> Dict:
|
658
|
-
do_fine_tune = self.
|
665
|
+
do_fine_tune = self.get_hyperparameters()["fine_tune"]
|
659
666
|
return {
|
660
667
|
"allow_nan": True,
|
661
668
|
"can_use_train_data": do_fine_tune,
|
@@ -245,9 +245,9 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
|
|
245
245
|
def _get_hpo_backend(self):
|
246
246
|
return RAY_BACKEND
|
247
247
|
|
248
|
-
def
|
249
|
-
"""Update GluonTS specific
|
250
|
-
model_params = self.
|
248
|
+
def _deferred_init_hyperparameters(self, dataset: TimeSeriesDataFrame) -> None:
|
249
|
+
"""Update GluonTS specific hyperparameters with information available only at training time."""
|
250
|
+
model_params = self.get_hyperparameters()
|
251
251
|
disable_static_features = model_params.get("disable_static_features", False)
|
252
252
|
if not disable_static_features:
|
253
253
|
self.num_feat_static_cat = len(self.metadata.static_features_cat)
|
@@ -301,7 +301,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
|
|
301
301
|
|
302
302
|
self.negative_data = (dataset[self.target] < 0).any()
|
303
303
|
|
304
|
-
def
|
304
|
+
def _get_default_hyperparameters(self):
|
305
305
|
"""Gets default parameters for GluonTS estimator initialization that are available after
|
306
306
|
AbstractTimeSeriesModel initialization (i.e., before deferred initialization). Models may
|
307
307
|
override this method to update default parameters.
|
@@ -319,7 +319,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
|
|
319
319
|
"covariate_scaler": "global",
|
320
320
|
}
|
321
321
|
|
322
|
-
def
|
322
|
+
def get_hyperparameters(self) -> dict:
|
323
323
|
"""Gets params that are passed to the inner model."""
|
324
324
|
# for backward compatibility with the old GluonTS MXNet API
|
325
325
|
parameter_name_aliases = {
|
@@ -327,7 +327,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
|
|
327
327
|
"learning_rate": "lr",
|
328
328
|
}
|
329
329
|
|
330
|
-
init_args = super().
|
330
|
+
init_args = super().get_hyperparameters()
|
331
331
|
for alias, actual in parameter_name_aliases.items():
|
332
332
|
if alias in init_args:
|
333
333
|
if actual in init_args:
|
@@ -335,12 +335,12 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
|
|
335
335
|
else:
|
336
336
|
init_args[actual] = init_args.pop(alias)
|
337
337
|
|
338
|
-
return self.
|
338
|
+
return self._get_default_hyperparameters() | init_args
|
339
339
|
|
340
340
|
def _get_estimator_init_args(self) -> Dict[str, Any]:
|
341
|
-
"""Get GluonTS specific constructor arguments for estimator objects, an alias to `self.
|
341
|
+
"""Get GluonTS specific constructor arguments for estimator objects, an alias to `self.get_hyperparameters`
|
342
342
|
for better readability."""
|
343
|
-
return self.
|
343
|
+
return self.get_hyperparameters()
|
344
344
|
|
345
345
|
def _get_estimator_class(self) -> Type[GluonTSEstimator]:
|
346
346
|
raise NotImplementedError
|
@@ -517,7 +517,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
|
|
517
517
|
time_limit=time_limit,
|
518
518
|
early_stopping_patience=None if val_data is None else init_args["early_stopping_patience"],
|
519
519
|
)
|
520
|
-
self.
|
520
|
+
self._deferred_init_hyperparameters(train_data)
|
521
521
|
|
522
522
|
estimator = self._get_estimator()
|
523
523
|
with warning_filter(), disable_root_logger(), gluonts.core.settings.let(gluonts_env, use_tqdm=False):
|
@@ -208,8 +208,8 @@ class TemporalFusionTransformerModel(AbstractGluonTSModel):
|
|
208
208
|
|
209
209
|
return TemporalFusionTransformerEstimator
|
210
210
|
|
211
|
-
def
|
212
|
-
return super().
|
211
|
+
def _get_default_hyperparameters(self):
|
212
|
+
return super()._get_default_hyperparameters() | {
|
213
213
|
"context_length": min(512, max(64, 2 * self.prediction_length)),
|
214
214
|
}
|
215
215
|
|
@@ -281,8 +281,8 @@ class DLinearModel(AbstractGluonTSModel):
|
|
281
281
|
If True, ``lightning_logs`` directory will NOT be removed after the model finished training.
|
282
282
|
"""
|
283
283
|
|
284
|
-
def
|
285
|
-
return super().
|
284
|
+
def _get_default_hyperparameters(self):
|
285
|
+
return super()._get_default_hyperparameters() | {
|
286
286
|
"context_length": 96,
|
287
287
|
}
|
288
288
|
|
@@ -346,8 +346,8 @@ class PatchTSTModel(AbstractGluonTSModel):
|
|
346
346
|
|
347
347
|
return PatchTSTEstimator
|
348
348
|
|
349
|
-
def
|
350
|
-
return super().
|
349
|
+
def _get_default_hyperparameters(self):
|
350
|
+
return super()._get_default_hyperparameters() | {"context_length": 96, "patch_len": 16}
|
351
351
|
|
352
352
|
def _get_estimator_init_args(self) -> Dict[str, Any]:
|
353
353
|
init_kwargs = super()._get_estimator_init_args()
|
@@ -515,8 +515,8 @@ class TiDEModel(AbstractGluonTSModel):
|
|
515
515
|
|
516
516
|
return TiDEEstimator
|
517
517
|
|
518
|
-
def
|
519
|
-
return super().
|
518
|
+
def _get_default_hyperparameters(self):
|
519
|
+
return super()._get_default_hyperparameters() | {
|
520
520
|
"context_length": min(512, max(64, 2 * self.prediction_length)),
|
521
521
|
"encoder_hidden_dim": 64,
|
522
522
|
"decoder_hidden_dim": 64,
|
@@ -112,7 +112,7 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
|
|
112
112
|
raise TimeLimitExceeded
|
113
113
|
|
114
114
|
# Initialize parameters passed to each local model
|
115
|
-
raw_local_model_args = self.
|
115
|
+
raw_local_model_args = self.get_hyperparameters().copy()
|
116
116
|
|
117
117
|
unused_local_model_args = []
|
118
118
|
local_model_args = {}
|
@@ -85,7 +85,7 @@ class SeasonalNaiveModel(AbstractLocalModel):
|
|
85
85
|
|
86
86
|
|
87
87
|
class AverageModel(AbstractLocalModel):
|
88
|
-
"""Baseline model that sets the forecast equal to the
|
88
|
+
"""Baseline model that sets the forecast equal to the historical average or quantile.
|
89
89
|
|
90
90
|
Other Parameters
|
91
91
|
----------------
|
@@ -117,7 +117,7 @@ class AverageModel(AbstractLocalModel):
|
|
117
117
|
|
118
118
|
|
119
119
|
class SeasonalAverageModel(AbstractLocalModel):
|
120
|
-
"""Baseline model that sets the forecast equal to the
|
120
|
+
"""Baseline model that sets the forecast equal to the historical average or quantile in the same season.
|
121
121
|
|
122
122
|
Other Parameters
|
123
123
|
----------------
|
@@ -212,9 +212,6 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
|
|
212
212
|
if store_predict_time:
|
213
213
|
assert self.predict_time is not None
|
214
214
|
|
215
|
-
def get_user_params(self) -> dict:
|
216
|
-
return self.model_base.get_user_params()
|
217
|
-
|
218
215
|
def _get_search_space(self):
|
219
216
|
return self.model_base._get_search_space()
|
220
217
|
|
@@ -27,6 +27,7 @@ from autogluon.timeseries.learner import TimeSeriesLearner
|
|
27
27
|
from autogluon.timeseries.metrics import TimeSeriesScorer, check_get_evaluation_metric
|
28
28
|
from autogluon.timeseries.splitter import ExpandingWindowSplitter
|
29
29
|
from autogluon.timeseries.trainer import TimeSeriesTrainer
|
30
|
+
from autogluon.timeseries.utils.forecast import make_future_data_frame
|
30
31
|
|
31
32
|
logger = logging.getLogger("autogluon.timeseries")
|
32
33
|
|
@@ -305,11 +306,11 @@ class TimeSeriesPredictor:
|
|
305
306
|
return df
|
306
307
|
|
307
308
|
def _check_data_for_evaluation(self, data: TimeSeriesDataFrame, name: str = "data") -> None:
|
308
|
-
"""Make sure that provided evaluation data includes both
|
309
|
+
"""Make sure that provided evaluation data includes both historical and future time series values."""
|
309
310
|
if data.num_timesteps_per_item().min() <= self.prediction_length:
|
310
311
|
raise ValueError(
|
311
312
|
f"Cannot reserve last prediction_length={self.prediction_length} time steps for evaluation in some "
|
312
|
-
f"time series in {name}. Please make sure that {name} includes both
|
313
|
+
f"time series in {name}. Please make sure that {name} includes both historical and future data, and that"
|
313
314
|
f"all time series have length > prediction_length (at least {self.prediction_length + 1})"
|
314
315
|
)
|
315
316
|
|
@@ -441,9 +442,8 @@ class TimeSeriesPredictor:
|
|
441
442
|
|
442
443
|
data.static_features["store_id"] = data.static_features["store_id"].astype("category")
|
443
444
|
|
444
|
-
If provided data is a
|
445
|
-
|
446
|
-
|
445
|
+
If provided data is a `pandas.DataFrame`, AutoGluon will attempt to convert it to a `TimeSeriesDataFrame`.
|
446
|
+
If a `str` or a `Path` is provided, AutoGluon will attempt to load this file.
|
447
447
|
tuning_data : Union[TimeSeriesDataFrame, pd.DataFrame, Path, str], optional
|
448
448
|
Data reserved for model selection and hyperparameter tuning, rather than training individual models. Also
|
449
449
|
used to compute the validation scores. Note that only the last ``prediction_length`` time steps of each
|
@@ -455,16 +455,10 @@ class TimeSeriesPredictor:
|
|
455
455
|
Leaving this argument empty and letting AutoGluon automatically generate the validation set from
|
456
456
|
``train_data`` is a good default.
|
457
457
|
|
458
|
-
|
459
|
-
the columns listed in ``known_covariates_names`` with the covariates values aligned with the target time
|
460
|
-
series.
|
461
|
-
|
462
|
-
If ``train_data`` has past covariates or static features, ``tuning_data`` must have also include them (with
|
463
|
-
same columns names and dtypes).
|
464
|
-
|
465
|
-
If provided data is a path or a pandas.DataFrame, AutoGluon will attempt to automatically convert it to a
|
466
|
-
``TimeSeriesDataFrame``.
|
458
|
+
The names and dtypes of columns and static features in ``tuning_data`` must match the ``train_data``.
|
467
459
|
|
460
|
+
If provided data is a `pandas.DataFrame`, AutoGluon will attempt to convert it to a `TimeSeriesDataFrame`.
|
461
|
+
If a `str` or a `Path` is provided, AutoGluon will attempt to load this file.
|
468
462
|
time_limit : int, optional
|
469
463
|
Approximately how long :meth:`~autogluon.timeseries.TimeSeriesPredictor.fit` will run (wall-clock time in
|
470
464
|
seconds). If not specified, :meth:`~autogluon.timeseries.TimeSeriesPredictor.fit` will run until all models
|
@@ -775,23 +769,23 @@ class TimeSeriesPredictor:
|
|
775
769
|
Parameters
|
776
770
|
----------
|
777
771
|
data : Union[TimeSeriesDataFrame, pd.DataFrame, Path, str]
|
778
|
-
|
779
|
-
|
780
|
-
If ``known_covariates_names`` were specified when creating the predictor, ``data`` must include the columns
|
781
|
-
listed in ``known_covariates_names`` with the covariates values aligned with the target time series.
|
772
|
+
Historical time series data for which the forecast needs to be made.
|
782
773
|
|
783
|
-
|
784
|
-
|
774
|
+
The names and dtypes of columns and static features in ``data`` must match the ``train_data`` used to train
|
775
|
+
the predictor.
|
785
776
|
|
786
|
-
If provided data is
|
787
|
-
|
777
|
+
If provided data is a `pandas.DataFrame`, AutoGluon will attempt to convert it to a `TimeSeriesDataFrame`.
|
778
|
+
If a `str` or a `Path` is provided, AutoGluon will attempt to load this file.
|
788
779
|
known_covariates : Union[TimeSeriesDataFrame, pd.DataFrame, Path, str], optional
|
789
780
|
If ``known_covariates_names`` were specified when creating the predictor, it is necessary to provide the
|
790
|
-
values of the known covariates for each time series during the forecast horizon.
|
781
|
+
values of the known covariates for each time series during the forecast horizon. Specifically:
|
791
782
|
|
792
|
-
-
|
793
|
-
-
|
794
|
-
-
|
783
|
+
- Must contain all columns listed in ``known_covariates_names``.
|
784
|
+
- Must include all ``item_id`` values present in the input ``data``.
|
785
|
+
- Must include ``timestamp`` values for the full forecast horizon (i.e., ``prediction_length`` time steps) following the end of each series in the input ``data``.
|
786
|
+
|
787
|
+
You can use :meth:`autogluon.timeseries.TimeSeriesPredictor.make_future_data_frame` to generate a template
|
788
|
+
containing the required ``item_id`` and ``timestamp`` combinations for the `known_covariates` data frame.
|
795
789
|
|
796
790
|
See example below.
|
797
791
|
model : str, optional
|
@@ -872,17 +866,14 @@ class TimeSeriesPredictor:
|
|
872
866
|
The data to evaluate the best model on. The last ``prediction_length`` time steps of each time series in
|
873
867
|
``data`` will be held out for prediction and forecast accuracy will be calculated on these time steps.
|
874
868
|
|
875
|
-
Must include both
|
869
|
+
Must include both historical and future data (i.e., length of all time series in ``data`` must be at least
|
876
870
|
``prediction_length + 1``).
|
877
871
|
|
878
|
-
|
879
|
-
|
880
|
-
|
881
|
-
If ``train_data`` used to train the predictor contained past covariates or static features, then ``data``
|
882
|
-
must also include them (with same column names and dtypes).
|
872
|
+
The names and dtypes of columns and static features in ``data`` must match the ``train_data`` used to train
|
873
|
+
the predictor.
|
883
874
|
|
884
|
-
If provided data is
|
885
|
-
|
875
|
+
If provided data is a `pandas.DataFrame`, AutoGluon will attempt to convert it to a `TimeSeriesDataFrame`.
|
876
|
+
If a `str` or a `Path` is provided, AutoGluon will attempt to load this file.
|
886
877
|
model : str, optional
|
887
878
|
Name of the model that you would like to evaluate. By default, the best model during training
|
888
879
|
(with highest validation score) will be used.
|
@@ -948,15 +939,11 @@ class TimeSeriesPredictor:
|
|
948
939
|
item, will be held out for prediction and forecast accuracy will be calculated on these time steps.
|
949
940
|
More accurate feature importances will be obtained from new data that was held-out during ``fit()``.
|
950
941
|
|
951
|
-
|
952
|
-
|
953
|
-
This data must contain the label column with the same column name as specified during ``fit()``.
|
954
|
-
|
955
|
-
If ``train_data`` used to train the predictor contained past covariates or static features, then ``data``
|
956
|
-
must also include them (with same column names and dtypes).
|
942
|
+
The names and dtypes of columns and static features in ``data`` must match the ``train_data`` used to train
|
943
|
+
the predictor.
|
957
944
|
|
958
|
-
If provided data is
|
959
|
-
|
945
|
+
If provided data is a `pandas.DataFrame`, AutoGluon will attempt to convert it to a `TimeSeriesDataFrame`.
|
946
|
+
If a `str` or a `Path` is provided, AutoGluon will attempt to load this file.
|
960
947
|
|
961
948
|
If ``data`` is not provided, then validation (tuning) data provided during training (or the held out data used for
|
962
949
|
validation if ``tuning_data`` was not explicitly provided ``fit()``) will be used.
|
@@ -1228,18 +1215,14 @@ class TimeSeriesPredictor:
|
|
1228
1215
|
Parameters
|
1229
1216
|
----------
|
1230
1217
|
data : Union[TimeSeriesDataFrame, pd.DataFrame, Path, str], optional
|
1231
|
-
dataset used for additional evaluation. Must include both
|
1218
|
+
dataset used for additional evaluation. Must include both historical and future data (i.e., length of all
|
1232
1219
|
time series in ``data`` must be at least ``prediction_length + 1``).
|
1233
1220
|
|
1234
|
-
|
1235
|
-
|
1236
|
-
|
1237
|
-
If ``train_data`` used to train the predictor contained past covariates or static features, then ``data``
|
1238
|
-
must also include them (with same column names and dtypes).
|
1239
|
-
|
1240
|
-
If provided data is a path or a pandas.DataFrame, AutoGluon will attempt to automatically convert it to a
|
1241
|
-
``TimeSeriesDataFrame``.
|
1221
|
+
The names and dtypes of columns and static features in ``data`` must match the ``train_data`` used to train
|
1222
|
+
the predictor.
|
1242
1223
|
|
1224
|
+
If provided data is a `pandas.DataFrame`, AutoGluon will attempt to convert it to a `TimeSeriesDataFrame`.
|
1225
|
+
If a `str` or a `Path` is provided, AutoGluon will attempt to load this file.
|
1243
1226
|
extra_info : bool, default = False
|
1244
1227
|
If True, the leaderboard will contain an additional column `hyperparameters` with the hyperparameters used
|
1245
1228
|
by each model during training. An empty dictionary `{}` means that the model was trained with default
|
@@ -1288,6 +1271,44 @@ class TimeSeriesPredictor:
|
|
1288
1271
|
print(leaderboard)
|
1289
1272
|
return leaderboard
|
1290
1273
|
|
1274
|
+
def make_future_data_frame(self, data: Union[TimeSeriesDataFrame, pd.DataFrame, Path, str]) -> pd.DataFrame:
|
1275
|
+
"""Generate a data frame with the `item_id` and `timestamp` values corresponding to the forecast horizon.
|
1276
|
+
|
1277
|
+
Parameters
|
1278
|
+
----------
|
1279
|
+
data : Union[TimeSeriesDataFrame, pd.DataFrame, Path, str]
|
1280
|
+
Historical time series data.
|
1281
|
+
|
1282
|
+
Returns
|
1283
|
+
-------
|
1284
|
+
forecast_horizon : pd.DataFrame
|
1285
|
+
Data frame with columns `item_id` and `timestamp` corresponding to the forecast horizon. For each item ID
|
1286
|
+
in `data`, `forecast_horizon` will contain the timestamps for the next `prediction_length` time steps,
|
1287
|
+
following the end of each series in the input data.
|
1288
|
+
|
1289
|
+
Examples
|
1290
|
+
--------
|
1291
|
+
>>> print(data)
|
1292
|
+
target
|
1293
|
+
item_id timestamp
|
1294
|
+
A 2024-01-01 0
|
1295
|
+
2024-01-02 1
|
1296
|
+
2024-01-03 2
|
1297
|
+
B 2024-04-07 3
|
1298
|
+
2024-04-08 4
|
1299
|
+
>>> predictor = TimeSeriesPredictor(prediction_length=2, freq="D")
|
1300
|
+
>>> print(predictor.make_future_data_frame(data))
|
1301
|
+
item_id timestamp
|
1302
|
+
0 A 2024-01-04
|
1303
|
+
0 A 2024-01-05
|
1304
|
+
1 B 2024-04-09
|
1305
|
+
1 B 2024-04-10
|
1306
|
+
"""
|
1307
|
+
if self.freq is None:
|
1308
|
+
raise ValueError("Please fit the predictor before calling `make_future_data_frame`")
|
1309
|
+
data = self._check_and_prepare_data_frame(data)
|
1310
|
+
return make_future_data_frame(data, prediction_length=self.prediction_length, freq=self.freq)
|
1311
|
+
|
1291
1312
|
def fit_summary(self, verbosity: int = 1) -> Dict[str, Any]:
|
1292
1313
|
"""Output summary of information about models produced during
|
1293
1314
|
:meth:`~autogluon.timeseries.TimeSeriesPredictor.fit`.
|
@@ -1323,7 +1344,7 @@ class TimeSeriesPredictor:
|
|
1323
1344
|
model_hyperparams = {}
|
1324
1345
|
for model_name in self.model_names():
|
1325
1346
|
model_obj = self._trainer.load_model(model_name)
|
1326
|
-
model_hyperparams[model_name] = model_obj.
|
1347
|
+
model_hyperparams[model_name] = model_obj.get_hyperparameters()
|
1327
1348
|
|
1328
1349
|
results["model_hyperparams"] = model_hyperparams
|
1329
1350
|
results["leaderboard"] = self._learner.leaderboard()
|
@@ -1400,11 +1421,6 @@ class TimeSeriesPredictor:
|
|
1400
1421
|
)
|
1401
1422
|
return refit_full_dict
|
1402
1423
|
|
1403
|
-
def __dir__(self) -> List[str]:
|
1404
|
-
# This hides method from IPython autocomplete, but not VSCode autocomplete
|
1405
|
-
deprecated = ["score", "get_model_best", "get_model_names"]
|
1406
|
-
return [d for d in super().__dir__() if d not in deprecated]
|
1407
|
-
|
1408
1424
|
def _simulation_artifact(self, test_data: TimeSeriesDataFrame) -> dict:
|
1409
1425
|
"""[Advanced] Computes and returns the necessary information to perform offline ensemble simulation."""
|
1410
1426
|
|
@@ -1463,7 +1479,7 @@ class TimeSeriesPredictor:
|
|
1463
1479
|
point_forecast_column: Optional[str] = None,
|
1464
1480
|
matplotlib_rc_params: Optional[dict] = None,
|
1465
1481
|
):
|
1466
|
-
"""Plot
|
1482
|
+
"""Plot historical time series values and the forecasts.
|
1467
1483
|
|
1468
1484
|
Parameters
|
1469
1485
|
----------
|
autogluon/timeseries/trainer.py
CHANGED
@@ -434,7 +434,7 @@ class TimeSeriesTrainer(AbstractTrainer[AbstractTimeSeriesModel]):
|
|
434
434
|
"When `skip_model_selection=True`, only a single model must be provided via `hyperparameters` "
|
435
435
|
f"but {len(models)} models were given"
|
436
436
|
)
|
437
|
-
if contains_searchspace(models[0].
|
437
|
+
if contains_searchspace(models[0].get_hyperparameters()):
|
438
438
|
raise ValueError(
|
439
439
|
"When `skip_model_selection=True`, model configuration should contain no search spaces."
|
440
440
|
)
|
@@ -462,7 +462,7 @@ class TimeSeriesTrainer(AbstractTrainer[AbstractTimeSeriesModel]):
|
|
462
462
|
if random_seed is not None:
|
463
463
|
seed_everything(random_seed + i)
|
464
464
|
|
465
|
-
if contains_searchspace(model.
|
465
|
+
if contains_searchspace(model.get_hyperparameters()):
|
466
466
|
fit_log_message = f"Hyperparameter tuning model {model.name}. "
|
467
467
|
if time_left is not None:
|
468
468
|
fit_log_message += (
|
@@ -629,7 +629,7 @@ class TimeSeriesTrainer(AbstractTrainer[AbstractTimeSeriesModel]):
|
|
629
629
|
if isinstance(model, MultiWindowBacktestingModel):
|
630
630
|
model = model.most_recent_model
|
631
631
|
assert model is not None
|
632
|
-
model_info[model_name]["hyperparameters"] = model.
|
632
|
+
model_info[model_name]["hyperparameters"] = model.get_hyperparameters()
|
633
633
|
|
634
634
|
if extra_metrics is None:
|
635
635
|
extra_metrics = []
|
@@ -4,6 +4,7 @@ from typing import Optional
|
|
4
4
|
import numpy as np
|
5
5
|
import pandas as pd
|
6
6
|
|
7
|
+
from autogluon.common.utils.deprecated_utils import Deprecated
|
7
8
|
from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TIMESTAMP, TimeSeriesDataFrame
|
8
9
|
|
9
10
|
|
@@ -18,20 +19,24 @@ def get_forecast_horizon_index_single_time_series(
|
|
18
19
|
return pd.date_range(start=start_ts, periods=prediction_length, freq=freq, name=TIMESTAMP)
|
19
20
|
|
20
21
|
|
21
|
-
|
22
|
-
|
22
|
+
@Deprecated(
|
23
|
+
min_version_to_warn="1.3", min_version_to_error="2.0", new="TimeSeriesPredictor.forecast_horizon_data_frame"
|
24
|
+
)
|
25
|
+
def get_forecast_horizon_index_ts_dataframe(*args, **kwargs) -> pd.MultiIndex:
|
26
|
+
return pd.MultiIndex.from_frame(make_future_data_frame(*args, **kwargs))
|
27
|
+
|
28
|
+
|
29
|
+
def make_future_data_frame(
|
23
30
|
ts_dataframe: TimeSeriesDataFrame,
|
24
31
|
prediction_length: int,
|
25
32
|
freq: Optional[str] = None,
|
26
|
-
) -> pd.
|
33
|
+
) -> pd.DataFrame:
|
27
34
|
"""For each item in the dataframe, get timestamps for the next `prediction_length` time steps into the future.
|
28
35
|
|
29
|
-
Returns a pandas.
|
30
|
-
- level 0 ("item_id") contains the same item_ids as the input ts_dataframe.
|
31
|
-
- level 1 ("timestamp") contains the next prediction_length time steps starting from the end of each time series.
|
36
|
+
Returns a pandas.DataFrame, with columns "item_id" and "timestamp" corresponding to the forecast horizon.
|
32
37
|
"""
|
33
38
|
last = ts_dataframe.reset_index()[[ITEMID, TIMESTAMP]].groupby(by=ITEMID, sort=False, as_index=False).last()
|
34
|
-
item_ids = np.repeat(last[ITEMID], prediction_length)
|
39
|
+
item_ids = np.repeat(last[ITEMID].to_numpy(), prediction_length)
|
35
40
|
|
36
41
|
if freq is None:
|
37
42
|
freq = ts_dataframe.freq
|
@@ -41,4 +46,4 @@ def get_forecast_horizon_index_ts_dataframe(
|
|
41
46
|
with warnings.catch_warnings():
|
42
47
|
warnings.simplefilter("ignore", category=pd.errors.PerformanceWarning)
|
43
48
|
timestamps = np.dstack([last_ts + step * offset for step in range(1, prediction_length + 1)]).ravel() # type: ignore[operator]
|
44
|
-
return pd.
|
49
|
+
return pd.DataFrame({ITEMID: item_ids, TIMESTAMP: timestamps})
|
autogluon/timeseries/version.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: autogluon.timeseries
|
3
|
-
Version: 1.2.
|
3
|
+
Version: 1.2.1b20250417
|
4
4
|
Summary: Fast and Accurate ML in 3 Lines of Code
|
5
5
|
Home-page: https://github.com/autogluon/autogluon
|
6
6
|
Author: AutoGluon Community
|
@@ -55,9 +55,9 @@ Requires-Dist: fugue>=0.9.0
|
|
55
55
|
Requires-Dist: tqdm<5,>=4.38
|
56
56
|
Requires-Dist: orjson~=3.9
|
57
57
|
Requires-Dist: tensorboard<3,>=2.9
|
58
|
-
Requires-Dist: autogluon.core[raytune]==1.2.
|
59
|
-
Requires-Dist: autogluon.common==1.2.
|
60
|
-
Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.2.
|
58
|
+
Requires-Dist: autogluon.core[raytune]==1.2.1b20250417
|
59
|
+
Requires-Dist: autogluon.common==1.2.1b20250417
|
60
|
+
Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.2.1b20250417
|
61
61
|
Provides-Extra: all
|
62
62
|
Provides-Extra: chronos-onnx
|
63
63
|
Requires-Dist: optimum[onnxruntime]<1.23,>=1.17; extra == "chronos-onnx"
|