autogluon.timeseries 1.4.1b20250824__tar.gz → 1.4.1b20250828__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of autogluon.timeseries might be problematic. Click here for more details.
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/PKG-INFO +1 -1
- autogluon.timeseries-1.4.1b20250828/src/autogluon/timeseries/configs/__init__.py +4 -0
- autogluon.timeseries-1.4.1b20250828/src/autogluon/timeseries/configs/hyperparameter_presets.py +62 -0
- autogluon.timeseries-1.4.1b20250828/src/autogluon/timeseries/configs/predictor_presets.py +84 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/metrics/abstract.py +2 -3
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/__init__.py +2 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/multi_window/multi_window_model.py +29 -18
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/predictor.py +2 -2
- autogluon.timeseries-1.4.1b20250828/src/autogluon/timeseries/trainer/__init__.py +3 -0
- autogluon.timeseries-1.4.1b20250828/src/autogluon/timeseries/trainer/model_set_builder.py +256 -0
- {autogluon.timeseries-1.4.1b20250824/src/autogluon/timeseries → autogluon.timeseries-1.4.1b20250828/src/autogluon/timeseries/trainer}/trainer.py +13 -14
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/utils/features.py +1 -1
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/version.py +1 -1
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon.timeseries.egg-info/PKG-INFO +1 -1
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon.timeseries.egg-info/SOURCES.txt +5 -3
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon.timeseries.egg-info/requires.txt +4 -4
- autogluon.timeseries-1.4.1b20250824/src/autogluon/timeseries/configs/__init__.py +0 -3
- autogluon.timeseries-1.4.1b20250824/src/autogluon/timeseries/configs/presets_configs.py +0 -79
- autogluon.timeseries-1.4.1b20250824/src/autogluon/timeseries/models/presets.py +0 -280
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/setup.cfg +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/setup.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/__init__.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/dataset/__init__.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/dataset/ts_dataframe.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/evaluator.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/learner.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/metrics/__init__.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/metrics/point.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/metrics/quantile.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/metrics/utils.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/abstract/__init__.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/abstract/model_trial.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/abstract/tunable.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/autogluon_tabular/__init__.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/autogluon_tabular/per_step.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/autogluon_tabular/transforms.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/autogluon_tabular/utils.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/chronos/__init__.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/chronos/model.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/chronos/pipeline/base.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/chronos/pipeline/chronos.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/chronos/pipeline/utils.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/ensemble/__init__.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/ensemble/abstract.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/ensemble/basic.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/ensemble/greedy.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/gluonts/__init__.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/gluonts/abstract.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/gluonts/dataset.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/gluonts/models.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/local/__init__.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/local/abstract_local_model.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/local/naive.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/local/npts.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/local/statsforecast.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/multi_window/__init__.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/models/registry.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/regressor.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/splitter.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/transforms/__init__.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/transforms/covariate_scaler.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/transforms/target_scaler.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/utils/__init__.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/utils/datetime/__init__.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/utils/datetime/base.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/utils/datetime/lags.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/utils/datetime/seasonality.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/utils/datetime/time_features.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/utils/forecast.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon/timeseries/utils/warning_filters.py +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon.timeseries.egg-info/dependency_links.txt +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon.timeseries.egg-info/namespace_packages.txt +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon.timeseries.egg-info/top_level.txt +0 -0
- {autogluon.timeseries-1.4.1b20250824 → autogluon.timeseries-1.4.1b20250828}/src/autogluon.timeseries.egg-info/zip-safe +0 -0
autogluon.timeseries-1.4.1b20250828/src/autogluon/timeseries/configs/hyperparameter_presets.py
ADDED
@@ -0,0 +1,62 @@
|
|
1
|
+
from typing import Any, Union
|
2
|
+
|
3
|
+
|
4
|
+
def get_hyperparameter_presets() -> dict[str, dict[str, Union[dict[str, Any], list[dict[str, Any]]]]]:
|
5
|
+
return {
|
6
|
+
"very_light": {
|
7
|
+
"Naive": {},
|
8
|
+
"SeasonalNaive": {},
|
9
|
+
"ETS": {},
|
10
|
+
"Theta": {},
|
11
|
+
"RecursiveTabular": {"max_num_samples": 100_000},
|
12
|
+
"DirectTabular": {"max_num_samples": 100_000},
|
13
|
+
},
|
14
|
+
"light": {
|
15
|
+
"Naive": {},
|
16
|
+
"SeasonalNaive": {},
|
17
|
+
"ETS": {},
|
18
|
+
"Theta": {},
|
19
|
+
"RecursiveTabular": {},
|
20
|
+
"DirectTabular": {},
|
21
|
+
"TemporalFusionTransformer": {},
|
22
|
+
"Chronos": {"model_path": "bolt_small"},
|
23
|
+
},
|
24
|
+
"light_inference": {
|
25
|
+
"SeasonalNaive": {},
|
26
|
+
"DirectTabular": {},
|
27
|
+
"RecursiveTabular": {},
|
28
|
+
"TemporalFusionTransformer": {},
|
29
|
+
"PatchTST": {},
|
30
|
+
},
|
31
|
+
"default": {
|
32
|
+
"SeasonalNaive": {},
|
33
|
+
"AutoETS": {},
|
34
|
+
"NPTS": {},
|
35
|
+
"DynamicOptimizedTheta": {},
|
36
|
+
"RecursiveTabular": {},
|
37
|
+
"DirectTabular": {},
|
38
|
+
"TemporalFusionTransformer": {},
|
39
|
+
"PatchTST": {},
|
40
|
+
"DeepAR": {},
|
41
|
+
"Chronos": [
|
42
|
+
{
|
43
|
+
"ag_args": {"name_suffix": "ZeroShot"},
|
44
|
+
"model_path": "bolt_base",
|
45
|
+
},
|
46
|
+
{
|
47
|
+
"ag_args": {"name_suffix": "FineTuned"},
|
48
|
+
"model_path": "bolt_small",
|
49
|
+
"fine_tune": True,
|
50
|
+
"target_scaler": "standard",
|
51
|
+
"covariate_regressor": {"model_name": "CAT", "model_hyperparameters": {"iterations": 1_000}},
|
52
|
+
},
|
53
|
+
],
|
54
|
+
"TiDE": {
|
55
|
+
"encoder_hidden_dim": 256,
|
56
|
+
"decoder_hidden_dim": 256,
|
57
|
+
"temporal_hidden_dim": 64,
|
58
|
+
"num_batches_per_epoch": 100,
|
59
|
+
"lr": 1e-4,
|
60
|
+
},
|
61
|
+
},
|
62
|
+
}
|
@@ -0,0 +1,84 @@
|
|
1
|
+
"""Preset configurations for autogluon.timeseries Predictors"""
|
2
|
+
|
3
|
+
from typing import Any
|
4
|
+
|
5
|
+
from . import get_hyperparameter_presets
|
6
|
+
|
7
|
+
TIMESERIES_PRESETS_ALIASES = dict(
|
8
|
+
chronos="chronos_small",
|
9
|
+
best="best_quality",
|
10
|
+
high="high_quality",
|
11
|
+
medium="medium_quality",
|
12
|
+
bq="best_quality",
|
13
|
+
hq="high_quality",
|
14
|
+
mq="medium_quality",
|
15
|
+
)
|
16
|
+
|
17
|
+
|
18
|
+
def get_predictor_presets() -> dict[str, Any]:
|
19
|
+
hp_presets = get_hyperparameter_presets()
|
20
|
+
|
21
|
+
predictor_presets = dict(
|
22
|
+
best_quality={"hyperparameters": "default", "num_val_windows": 2},
|
23
|
+
high_quality={"hyperparameters": "default"},
|
24
|
+
medium_quality={"hyperparameters": "light"},
|
25
|
+
fast_training={"hyperparameters": "very_light"},
|
26
|
+
# Chronos-Bolt models
|
27
|
+
bolt_tiny={
|
28
|
+
"hyperparameters": {"Chronos": {"model_path": "bolt_tiny"}},
|
29
|
+
"skip_model_selection": True,
|
30
|
+
},
|
31
|
+
bolt_mini={
|
32
|
+
"hyperparameters": {"Chronos": {"model_path": "bolt_mini"}},
|
33
|
+
"skip_model_selection": True,
|
34
|
+
},
|
35
|
+
bolt_small={
|
36
|
+
"hyperparameters": {"Chronos": {"model_path": "bolt_small"}},
|
37
|
+
"skip_model_selection": True,
|
38
|
+
},
|
39
|
+
bolt_base={
|
40
|
+
"hyperparameters": {"Chronos": {"model_path": "bolt_base"}},
|
41
|
+
"skip_model_selection": True,
|
42
|
+
},
|
43
|
+
# Original Chronos models
|
44
|
+
chronos_tiny={
|
45
|
+
"hyperparameters": {"Chronos": {"model_path": "tiny"}},
|
46
|
+
"skip_model_selection": True,
|
47
|
+
},
|
48
|
+
chronos_mini={
|
49
|
+
"hyperparameters": {"Chronos": {"model_path": "mini"}},
|
50
|
+
"skip_model_selection": True,
|
51
|
+
},
|
52
|
+
chronos_small={
|
53
|
+
"hyperparameters": {"Chronos": {"model_path": "small"}},
|
54
|
+
"skip_model_selection": True,
|
55
|
+
},
|
56
|
+
chronos_base={
|
57
|
+
"hyperparameters": {"Chronos": {"model_path": "base"}},
|
58
|
+
"skip_model_selection": True,
|
59
|
+
},
|
60
|
+
chronos_large={
|
61
|
+
"hyperparameters": {"Chronos": {"model_path": "large", "batch_size": 8}},
|
62
|
+
"skip_model_selection": True,
|
63
|
+
},
|
64
|
+
chronos_ensemble={
|
65
|
+
"hyperparameters": {
|
66
|
+
"Chronos": {"model_path": "small"},
|
67
|
+
**hp_presets["light_inference"],
|
68
|
+
}
|
69
|
+
},
|
70
|
+
chronos_large_ensemble={
|
71
|
+
"hyperparameters": {
|
72
|
+
"Chronos": {"model_path": "large", "batch_size": 8},
|
73
|
+
**hp_presets["light_inference"],
|
74
|
+
}
|
75
|
+
},
|
76
|
+
)
|
77
|
+
|
78
|
+
# update with aliases
|
79
|
+
predictor_presets = {
|
80
|
+
**predictor_presets,
|
81
|
+
**{k: predictor_presets[v].copy() for k, v in TIMESERIES_PRESETS_ALIASES.items()},
|
82
|
+
}
|
83
|
+
|
84
|
+
return predictor_presets
|
@@ -2,7 +2,6 @@ import warnings
|
|
2
2
|
from typing import Optional, Sequence, Union, overload
|
3
3
|
|
4
4
|
import numpy as np
|
5
|
-
import numpy.typing as npt
|
6
5
|
import pandas as pd
|
7
6
|
|
8
7
|
from autogluon.timeseries import TimeSeriesDataFrame
|
@@ -242,12 +241,12 @@ class TimeSeriesScorer:
|
|
242
241
|
@staticmethod
|
243
242
|
def check_get_horizon_weight(
|
244
243
|
horizon_weight: Union[Sequence[float], np.ndarray], prediction_length: int
|
245
|
-
) ->
|
244
|
+
) -> np.ndarray: ...
|
246
245
|
|
247
246
|
@staticmethod
|
248
247
|
def check_get_horizon_weight(
|
249
248
|
horizon_weight: Union[Sequence[float], np.ndarray, None], prediction_length: int
|
250
|
-
) -> Optional[
|
249
|
+
) -> Optional[np.ndarray]:
|
251
250
|
"""Convert horizon_weight to a non-negative numpy array that sums up to prediction_length.
|
252
251
|
Raises an exception if horizon_weight has an invalid shape or contains invalid values.
|
253
252
|
|
@@ -27,6 +27,7 @@ from .local import (
|
|
27
27
|
ThetaModel,
|
28
28
|
ZeroModel,
|
29
29
|
)
|
30
|
+
from .registry import ModelRegistry
|
30
31
|
|
31
32
|
__all__ = [
|
32
33
|
"ADIDAModel",
|
@@ -43,6 +44,7 @@ __all__ = [
|
|
43
44
|
"ETSModel",
|
44
45
|
"IMAPAModel",
|
45
46
|
"ChronosModel",
|
47
|
+
"ModelRegistry",
|
46
48
|
"NPTSModel",
|
47
49
|
"NaiveModel",
|
48
50
|
"PatchTSTModel",
|
@@ -7,6 +7,7 @@ import time
|
|
7
7
|
from typing import Any, Optional, Type, Union
|
8
8
|
|
9
9
|
import numpy as np
|
10
|
+
from typing_extensions import Self
|
10
11
|
|
11
12
|
import autogluon.core as ag
|
12
13
|
from autogluon.timeseries.dataset.ts_dataframe import TimeSeriesDataFrame
|
@@ -73,10 +74,6 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
|
|
73
74
|
def supports_past_covariates(self) -> bool:
|
74
75
|
return self.model_base.supports_past_covariates
|
75
76
|
|
76
|
-
@property
|
77
|
-
def supports_cat_covariates(self) -> bool:
|
78
|
-
return self.model_base.supports_cat_covariates
|
79
|
-
|
80
77
|
def _get_model_base(self):
|
81
78
|
return self.model_base
|
82
79
|
|
@@ -86,15 +83,18 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
|
|
86
83
|
def _is_gpu_available(self) -> bool:
|
87
84
|
return self._get_model_base()._is_gpu_available()
|
88
85
|
|
89
|
-
def get_minimum_resources(self, is_gpu_available: bool = False) ->
|
86
|
+
def get_minimum_resources(self, is_gpu_available: bool = False) -> dict[str, Union[int, float]]:
|
90
87
|
return self._get_model_base().get_minimum_resources(is_gpu_available)
|
91
88
|
|
92
89
|
def _fit(
|
93
90
|
self,
|
94
91
|
train_data: TimeSeriesDataFrame,
|
95
92
|
val_data: Optional[TimeSeriesDataFrame] = None,
|
96
|
-
time_limit: Optional[
|
97
|
-
|
93
|
+
time_limit: Optional[float] = None,
|
94
|
+
num_cpus: Optional[int] = None,
|
95
|
+
num_gpus: Optional[int] = None,
|
96
|
+
verbosity: int = 2,
|
97
|
+
val_splitter: Optional[AbstractWindowSplitter] = None,
|
98
98
|
refit_every_n_windows: Optional[int] = 1,
|
99
99
|
**kwargs,
|
100
100
|
):
|
@@ -111,11 +111,15 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
|
|
111
111
|
|
112
112
|
oof_predictions_per_window = []
|
113
113
|
global_fit_start_time = time.time()
|
114
|
+
model: Optional[AbstractTimeSeriesModel] = None
|
114
115
|
|
115
116
|
for window_index, (train_fold, val_fold) in enumerate(val_splitter.split(train_data)):
|
116
117
|
logger.debug(f"\tWindow {window_index}")
|
118
|
+
|
117
119
|
# refit_this_window is always True for the 0th window
|
118
120
|
refit_this_window = window_index % refit_every_n_windows == 0
|
121
|
+
assert window_index != 0 or refit_this_window
|
122
|
+
|
119
123
|
if time_limit is None:
|
120
124
|
time_left_for_window = None
|
121
125
|
else:
|
@@ -148,6 +152,7 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
|
|
148
152
|
else:
|
149
153
|
time_left_for_prediction = time_limit - (time.time() - global_fit_start_time)
|
150
154
|
|
155
|
+
assert model is not None
|
151
156
|
model.score_and_cache_oof(
|
152
157
|
val_fold, store_val_score=True, store_predict_time=True, time_limit=time_left_for_prediction
|
153
158
|
)
|
@@ -172,11 +177,13 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
|
|
172
177
|
|
173
178
|
# Only the model trained on most recent data is saved & used for prediction
|
174
179
|
self.most_recent_model = model
|
175
|
-
self.
|
180
|
+
assert self.most_recent_model is not None
|
181
|
+
|
182
|
+
self.most_recent_model_folder = most_recent_refit_window # type: ignore
|
176
183
|
self.predict_time = self.most_recent_model.predict_time
|
177
|
-
self.fit_time = time.time() - global_fit_start_time - self.predict_time
|
184
|
+
self.fit_time = time.time() - global_fit_start_time - self.predict_time # type: ignore
|
178
185
|
self._oof_predictions = oof_predictions_per_window
|
179
|
-
self.val_score = np.mean([info["val_score"] for info in self.info_per_val_window])
|
186
|
+
self.val_score = np.mean([info["val_score"] for info in self.info_per_val_window]) # type: ignore
|
180
187
|
|
181
188
|
def get_info(self) -> dict:
|
182
189
|
info = super().get_info()
|
@@ -227,7 +234,7 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
|
|
227
234
|
train_fn_kwargs["init_params"]["model_base_kwargs"] = self.get_params()
|
228
235
|
return train_fn_kwargs
|
229
236
|
|
230
|
-
def save(self, path: str = None, verbose=True) -> str:
|
237
|
+
def save(self, path: Optional[str] = None, verbose: bool = True) -> str:
|
231
238
|
most_recent_model = self.most_recent_model
|
232
239
|
self.most_recent_model = None
|
233
240
|
save_path = super().save(path, verbose)
|
@@ -238,32 +245,36 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
|
|
238
245
|
most_recent_model.save()
|
239
246
|
return save_path
|
240
247
|
|
241
|
-
def persist(self):
|
248
|
+
def persist(self) -> Self:
|
242
249
|
if self.most_recent_model is None:
|
243
250
|
raise ValueError(f"{self.name} must be fit before persisting")
|
244
251
|
self.most_recent_model.persist()
|
252
|
+
return self
|
245
253
|
|
246
254
|
@classmethod
|
247
255
|
def load(
|
248
256
|
cls, path: str, reset_paths: bool = True, load_oof: bool = False, verbose: bool = True
|
249
257
|
) -> AbstractTimeSeriesModel:
|
250
258
|
model = super().load(path=path, reset_paths=reset_paths, load_oof=load_oof, verbose=verbose)
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
259
|
+
if model.most_recent_model_folder is not None:
|
260
|
+
most_recent_model_path = os.path.join(model.path, model.most_recent_model_folder)
|
261
|
+
model.most_recent_model = model.model_base_type.load(
|
262
|
+
most_recent_model_path,
|
263
|
+
reset_paths=reset_paths,
|
264
|
+
verbose=verbose,
|
265
|
+
)
|
257
266
|
return model
|
258
267
|
|
259
268
|
def convert_to_refit_full_template(self) -> AbstractTimeSeriesModel:
|
260
269
|
# refit_model is an instance of base model type, not MultiWindowBacktestingModel
|
270
|
+
assert self.most_recent_model is not None, "Most recent model is None. Model must be fit first."
|
261
271
|
refit_model = self.most_recent_model.convert_to_refit_full_template()
|
262
272
|
refit_model.rename(self.name + ag.constants.REFIT_FULL_SUFFIX)
|
263
273
|
return refit_model
|
264
274
|
|
265
275
|
def convert_to_refit_full_via_copy(self) -> AbstractTimeSeriesModel:
|
266
276
|
# refit_model is an instance of base model type, not MultiWindowBacktestingModel
|
277
|
+
assert self.most_recent_model is not None, "Most recent model is None. Model must be fit first."
|
267
278
|
refit_model = self.most_recent_model.convert_to_refit_full_via_copy()
|
268
279
|
refit_model.rename(self.name + ag.constants.REFIT_FULL_SUFFIX)
|
269
280
|
return refit_model
|
@@ -21,7 +21,7 @@ from autogluon.core.utils.decorators import apply_presets
|
|
21
21
|
from autogluon.core.utils.loaders import load_pkl, load_str
|
22
22
|
from autogluon.core.utils.savers import save_pkl, save_str
|
23
23
|
from autogluon.timeseries import __version__ as current_ag_version
|
24
|
-
from autogluon.timeseries.configs import
|
24
|
+
from autogluon.timeseries.configs import get_predictor_presets
|
25
25
|
from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
|
26
26
|
from autogluon.timeseries.learner import TimeSeriesLearner
|
27
27
|
from autogluon.timeseries.metrics import TimeSeriesScorer, check_get_evaluation_metric
|
@@ -432,7 +432,7 @@ class TimeSeriesPredictor:
|
|
432
432
|
)
|
433
433
|
return train_data
|
434
434
|
|
435
|
-
@apply_presets(
|
435
|
+
@apply_presets(get_predictor_presets())
|
436
436
|
def fit(
|
437
437
|
self,
|
438
438
|
train_data: Union[TimeSeriesDataFrame, pd.DataFrame, Path, str],
|
@@ -0,0 +1,256 @@
|
|
1
|
+
import copy
|
2
|
+
import logging
|
3
|
+
import re
|
4
|
+
from collections import defaultdict
|
5
|
+
from typing import Any, Optional, Type, Union
|
6
|
+
|
7
|
+
from autogluon.common import space
|
8
|
+
from autogluon.core import constants
|
9
|
+
from autogluon.timeseries.configs import get_hyperparameter_presets
|
10
|
+
from autogluon.timeseries.metrics import TimeSeriesScorer
|
11
|
+
from autogluon.timeseries.models import ModelRegistry
|
12
|
+
from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
|
13
|
+
from autogluon.timeseries.models.multi_window import MultiWindowBacktestingModel
|
14
|
+
from autogluon.timeseries.utils.features import CovariateMetadata
|
15
|
+
|
16
|
+
logger = logging.getLogger(__name__)
|
17
|
+
|
18
|
+
|
19
|
+
ModelKey = Union[str, Type[AbstractTimeSeriesModel]]
|
20
|
+
ModelHyperparameters = dict[str, Any]
|
21
|
+
TrainerHyperparameterSpec = dict[ModelKey, list[ModelHyperparameters]]
|
22
|
+
|
23
|
+
|
24
|
+
class TrainableModelSetBuilder:
|
25
|
+
"""Responsible for building a list of model objects, in priority order, that will be trained by the
|
26
|
+
Trainer."""
|
27
|
+
|
28
|
+
VALID_AG_ARGS_KEYS = {
|
29
|
+
"name",
|
30
|
+
"name_prefix",
|
31
|
+
"name_suffix",
|
32
|
+
}
|
33
|
+
|
34
|
+
def __init__(
|
35
|
+
self,
|
36
|
+
path: str,
|
37
|
+
freq: Optional[str],
|
38
|
+
prediction_length: int,
|
39
|
+
eval_metric: TimeSeriesScorer,
|
40
|
+
target: str,
|
41
|
+
quantile_levels: list[float],
|
42
|
+
covariate_metadata: CovariateMetadata,
|
43
|
+
multi_window: bool,
|
44
|
+
):
|
45
|
+
self.path = path
|
46
|
+
self.freq = freq
|
47
|
+
self.prediction_length = prediction_length
|
48
|
+
self.eval_metric = eval_metric
|
49
|
+
self.target = target
|
50
|
+
self.quantile_levels = quantile_levels
|
51
|
+
self.covariate_metadata = covariate_metadata
|
52
|
+
self.multi_window = multi_window
|
53
|
+
|
54
|
+
def get_model_set(
|
55
|
+
self,
|
56
|
+
hyperparameters: Union[str, dict, None],
|
57
|
+
hyperparameter_tune: bool,
|
58
|
+
excluded_model_types: Optional[list[str]],
|
59
|
+
banned_model_names: Optional[list[str]] = None,
|
60
|
+
) -> list[AbstractTimeSeriesModel]:
|
61
|
+
"""Resolve hyperparameters and create the requested list of models"""
|
62
|
+
models = []
|
63
|
+
banned_model_names = [] if banned_model_names is None else banned_model_names.copy()
|
64
|
+
|
65
|
+
# resolve and normalize hyperparameters
|
66
|
+
model_hp_map: TrainerHyperparameterSpec = HyperparameterBuilder(
|
67
|
+
hyperparameters=hyperparameters,
|
68
|
+
hyperparameter_tune=hyperparameter_tune,
|
69
|
+
excluded_model_types=excluded_model_types,
|
70
|
+
).get_hyperparameters()
|
71
|
+
|
72
|
+
for k in model_hp_map.keys():
|
73
|
+
if isinstance(k, type) and not issubclass(k, AbstractTimeSeriesModel):
|
74
|
+
raise ValueError(f"Custom model type {k} must inherit from `AbstractTimeSeriesModel`.")
|
75
|
+
|
76
|
+
model_priority_list = sorted(
|
77
|
+
model_hp_map.keys(), key=lambda x: ModelRegistry.get_model_priority(x), reverse=True
|
78
|
+
)
|
79
|
+
|
80
|
+
for model_key in model_priority_list:
|
81
|
+
model_type = self._get_model_type(model_key)
|
82
|
+
|
83
|
+
for model_hps in model_hp_map[model_key]:
|
84
|
+
ag_args = model_hps.pop(constants.AG_ARGS, {})
|
85
|
+
|
86
|
+
for key in ag_args:
|
87
|
+
if key not in self.VALID_AG_ARGS_KEYS:
|
88
|
+
raise ValueError(
|
89
|
+
f"Model {model_type} received unknown ag_args key: {key} (valid keys {self.VALID_AG_ARGS_KEYS})"
|
90
|
+
)
|
91
|
+
model_name_base = self._get_model_name(ag_args, model_type)
|
92
|
+
|
93
|
+
model_type_kwargs: dict[str, Any] = dict(
|
94
|
+
name=model_name_base,
|
95
|
+
hyperparameters=model_hps,
|
96
|
+
**self._get_default_model_init_kwargs(),
|
97
|
+
)
|
98
|
+
|
99
|
+
# add models while preventing name collisions
|
100
|
+
model = model_type(**model_type_kwargs)
|
101
|
+
model_type_kwargs.pop("name", None)
|
102
|
+
|
103
|
+
increment = 1
|
104
|
+
while model.name in banned_model_names:
|
105
|
+
increment += 1
|
106
|
+
model = model_type(name=f"{model_name_base}_{increment}", **model_type_kwargs)
|
107
|
+
|
108
|
+
if self.multi_window:
|
109
|
+
model = MultiWindowBacktestingModel(model_base=model, name=model.name, **model_type_kwargs) # type: ignore
|
110
|
+
|
111
|
+
banned_model_names.append(model.name)
|
112
|
+
models.append(model)
|
113
|
+
|
114
|
+
return models
|
115
|
+
|
116
|
+
def _get_model_type(self, model: ModelKey) -> Type[AbstractTimeSeriesModel]:
|
117
|
+
if isinstance(model, str):
|
118
|
+
model_type: Type[AbstractTimeSeriesModel] = ModelRegistry.get_model_class(model)
|
119
|
+
elif isinstance(model, type):
|
120
|
+
model_type = model
|
121
|
+
else:
|
122
|
+
raise ValueError(
|
123
|
+
f"Keys of the `hyperparameters` dictionary must be strings or types, received {type(model)}."
|
124
|
+
)
|
125
|
+
|
126
|
+
return model_type
|
127
|
+
|
128
|
+
def _get_default_model_init_kwargs(self) -> dict[str, Any]:
|
129
|
+
return dict(
|
130
|
+
path=self.path,
|
131
|
+
freq=self.freq,
|
132
|
+
prediction_length=self.prediction_length,
|
133
|
+
eval_metric=self.eval_metric,
|
134
|
+
target=self.target,
|
135
|
+
quantile_levels=self.quantile_levels,
|
136
|
+
covariate_metadata=self.covariate_metadata,
|
137
|
+
)
|
138
|
+
|
139
|
+
def _get_model_name(self, ag_args: dict[str, Any], model_type: Type[AbstractTimeSeriesModel]) -> str:
|
140
|
+
name = ag_args.get("name")
|
141
|
+
if name is None:
|
142
|
+
name_stem = re.sub(r"Model$", "", model_type.__name__)
|
143
|
+
name_prefix = ag_args.get("name_prefix", "")
|
144
|
+
name_suffix = ag_args.get("name_suffix", "")
|
145
|
+
name = name_prefix + name_stem + name_suffix
|
146
|
+
return name
|
147
|
+
|
148
|
+
|
149
|
+
class HyperparameterBuilder:
|
150
|
+
"""Given user hyperparameter specifications, this class resolves them against presets, removes
|
151
|
+
excluded model types and canonicalizes the hyperparameter specification.
|
152
|
+
"""
|
153
|
+
|
154
|
+
def __init__(
|
155
|
+
self,
|
156
|
+
hyperparameters: Union[str, dict, None],
|
157
|
+
hyperparameter_tune: bool,
|
158
|
+
excluded_model_types: Optional[list[str]],
|
159
|
+
):
|
160
|
+
self.hyperparameters = hyperparameters
|
161
|
+
self.hyperparameter_tune = hyperparameter_tune
|
162
|
+
self.excluded_model_types = excluded_model_types
|
163
|
+
|
164
|
+
def get_hyperparameters(self) -> TrainerHyperparameterSpec:
|
165
|
+
hyperparameter_dict = {}
|
166
|
+
hp_presets = get_hyperparameter_presets()
|
167
|
+
|
168
|
+
if self.hyperparameters is None:
|
169
|
+
hyperparameter_dict = hp_presets["default"]
|
170
|
+
elif isinstance(self.hyperparameters, str):
|
171
|
+
try:
|
172
|
+
hyperparameter_dict = hp_presets[self.hyperparameters]
|
173
|
+
except KeyError:
|
174
|
+
raise ValueError(f"{self.hyperparameters} is not a valid preset.")
|
175
|
+
elif isinstance(self.hyperparameters, dict):
|
176
|
+
hyperparameter_dict = copy.deepcopy(self.hyperparameters)
|
177
|
+
else:
|
178
|
+
raise ValueError(
|
179
|
+
f"hyperparameters must be a dict, a string or None (received {type(self.hyperparameters)}). "
|
180
|
+
f"Please see the documentation for TimeSeriesPredictor.fit"
|
181
|
+
)
|
182
|
+
|
183
|
+
return self._check_and_clean_hyperparameters(hyperparameter_dict) # type: ignore
|
184
|
+
|
185
|
+
def _check_and_clean_hyperparameters(
|
186
|
+
self,
|
187
|
+
hyperparameters: dict[ModelKey, Union[ModelHyperparameters, list[ModelHyperparameters]]],
|
188
|
+
) -> TrainerHyperparameterSpec:
|
189
|
+
"""Convert the hyperparameters dictionary to a unified format:
|
190
|
+
- Remove 'Model' suffix from model names, if present
|
191
|
+
- Make sure that each value in the hyperparameters dict is a list with model configurations
|
192
|
+
- Checks if hyperparameters contain searchspaces
|
193
|
+
"""
|
194
|
+
excluded_models = self._get_excluded_models()
|
195
|
+
hyperparameters_clean = defaultdict(list)
|
196
|
+
for model_name, model_hyperparameters in hyperparameters.items():
|
197
|
+
# Handle model names ending with "Model", e.g., "DeepARModel" is mapped to "DeepAR"
|
198
|
+
if isinstance(model_name, str):
|
199
|
+
model_name = self._normalize_model_type_name(model_name)
|
200
|
+
if model_name in excluded_models:
|
201
|
+
logger.info(
|
202
|
+
f"\tFound '{model_name}' model in `hyperparameters`, but '{model_name}' "
|
203
|
+
"is present in `excluded_model_types` and will be removed."
|
204
|
+
)
|
205
|
+
continue
|
206
|
+
if not isinstance(model_hyperparameters, list):
|
207
|
+
model_hyperparameters = [model_hyperparameters]
|
208
|
+
hyperparameters_clean[model_name].extend(model_hyperparameters)
|
209
|
+
|
210
|
+
self._verify_searchspaces(hyperparameters_clean)
|
211
|
+
|
212
|
+
return dict(hyperparameters_clean)
|
213
|
+
|
214
|
+
def _get_excluded_models(self) -> set[str]:
|
215
|
+
excluded_models = set()
|
216
|
+
if self.excluded_model_types is not None and len(self.excluded_model_types) > 0:
|
217
|
+
if not isinstance(self.excluded_model_types, list):
|
218
|
+
raise ValueError(f"`excluded_model_types` must be a list, received {type(self.excluded_model_types)}")
|
219
|
+
logger.info(f"Excluded model types: {self.excluded_model_types}")
|
220
|
+
for model in self.excluded_model_types:
|
221
|
+
if not isinstance(model, str):
|
222
|
+
raise ValueError(f"Each entry in `excluded_model_types` must be a string, received {type(model)}")
|
223
|
+
excluded_models.add(self._normalize_model_type_name(model))
|
224
|
+
return excluded_models
|
225
|
+
|
226
|
+
@staticmethod
|
227
|
+
def _normalize_model_type_name(model_name: str) -> str:
|
228
|
+
return model_name.removesuffix("Model")
|
229
|
+
|
230
|
+
def _verify_searchspaces(self, hyperparameters: dict[str, list[ModelHyperparameters]]):
|
231
|
+
if self.hyperparameter_tune:
|
232
|
+
for model, model_hps_list in hyperparameters.items():
|
233
|
+
for model_hps in model_hps_list:
|
234
|
+
if contains_searchspace(model_hps):
|
235
|
+
return
|
236
|
+
|
237
|
+
raise ValueError(
|
238
|
+
"Hyperparameter tuning specified, but no model contains a hyperparameter search space. "
|
239
|
+
"Please disable hyperparameter tuning with `hyperparameter_tune_kwargs=None` or provide a search space "
|
240
|
+
"for at least one model."
|
241
|
+
)
|
242
|
+
else:
|
243
|
+
for model, model_hps_list in hyperparameters.items():
|
244
|
+
for model_hps in model_hps_list:
|
245
|
+
if contains_searchspace(model_hps):
|
246
|
+
raise ValueError(
|
247
|
+
f"Hyperparameter tuning not specified, so hyperparameters must have fixed values. "
|
248
|
+
f"However, for model {model} hyperparameters {model_hps} contain a search space."
|
249
|
+
)
|
250
|
+
|
251
|
+
|
252
|
+
def contains_searchspace(model_hyperparameters: ModelHyperparameters) -> bool:
|
253
|
+
for hp_value in model_hyperparameters.values():
|
254
|
+
if isinstance(hp_value, space.Space):
|
255
|
+
return True
|
256
|
+
return False
|