autogluon.timeseries 1.3.2b20250712__py3-none-any.whl → 1.4.1b20251116__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autogluon/timeseries/configs/__init__.py +3 -2
- autogluon/timeseries/configs/hyperparameter_presets.py +62 -0
- autogluon/timeseries/configs/predictor_presets.py +84 -0
- autogluon/timeseries/dataset/ts_dataframe.py +98 -72
- autogluon/timeseries/learner.py +19 -18
- autogluon/timeseries/metrics/__init__.py +5 -5
- autogluon/timeseries/metrics/abstract.py +17 -17
- autogluon/timeseries/metrics/point.py +1 -1
- autogluon/timeseries/metrics/quantile.py +2 -2
- autogluon/timeseries/metrics/utils.py +4 -4
- autogluon/timeseries/models/__init__.py +4 -0
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py +52 -75
- autogluon/timeseries/models/abstract/tunable.py +6 -6
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py +72 -76
- autogluon/timeseries/models/autogluon_tabular/per_step.py +104 -46
- autogluon/timeseries/models/autogluon_tabular/transforms.py +9 -7
- autogluon/timeseries/models/chronos/model.py +115 -78
- autogluon/timeseries/models/chronos/{pipeline/utils.py → utils.py} +76 -44
- autogluon/timeseries/models/ensemble/__init__.py +29 -2
- autogluon/timeseries/models/ensemble/abstract.py +16 -52
- autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
- autogluon/timeseries/models/ensemble/array_based/abstract.py +247 -0
- autogluon/timeseries/models/ensemble/array_based/models.py +50 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +10 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +87 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +133 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +141 -0
- autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
- autogluon/timeseries/models/ensemble/weighted/abstract.py +41 -0
- autogluon/timeseries/models/ensemble/{basic.py → weighted/basic.py} +8 -18
- autogluon/timeseries/models/ensemble/{greedy.py → weighted/greedy.py} +13 -13
- autogluon/timeseries/models/gluonts/abstract.py +26 -26
- autogluon/timeseries/models/gluonts/dataset.py +4 -4
- autogluon/timeseries/models/gluonts/models.py +27 -12
- autogluon/timeseries/models/local/abstract_local_model.py +14 -14
- autogluon/timeseries/models/local/naive.py +4 -0
- autogluon/timeseries/models/local/npts.py +1 -0
- autogluon/timeseries/models/local/statsforecast.py +30 -14
- autogluon/timeseries/models/multi_window/multi_window_model.py +34 -23
- autogluon/timeseries/models/registry.py +65 -0
- autogluon/timeseries/models/toto/__init__.py +3 -0
- autogluon/timeseries/models/toto/_internal/__init__.py +9 -0
- autogluon/timeseries/models/toto/_internal/backbone/__init__.py +3 -0
- autogluon/timeseries/models/toto/_internal/backbone/attention.py +197 -0
- autogluon/timeseries/models/toto/_internal/backbone/backbone.py +262 -0
- autogluon/timeseries/models/toto/_internal/backbone/distribution.py +70 -0
- autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +136 -0
- autogluon/timeseries/models/toto/_internal/backbone/rope.py +94 -0
- autogluon/timeseries/models/toto/_internal/backbone/scaler.py +306 -0
- autogluon/timeseries/models/toto/_internal/backbone/transformer.py +333 -0
- autogluon/timeseries/models/toto/_internal/dataset.py +165 -0
- autogluon/timeseries/models/toto/_internal/forecaster.py +423 -0
- autogluon/timeseries/models/toto/dataloader.py +108 -0
- autogluon/timeseries/models/toto/hf_pretrained_model.py +119 -0
- autogluon/timeseries/models/toto/model.py +236 -0
- autogluon/timeseries/predictor.py +94 -107
- autogluon/timeseries/regressor.py +31 -27
- autogluon/timeseries/splitter.py +7 -31
- autogluon/timeseries/trainer/__init__.py +3 -0
- autogluon/timeseries/trainer/ensemble_composer.py +250 -0
- autogluon/timeseries/trainer/model_set_builder.py +256 -0
- autogluon/timeseries/trainer/prediction_cache.py +149 -0
- autogluon/timeseries/{trainer.py → trainer/trainer.py} +182 -307
- autogluon/timeseries/trainer/utils.py +18 -0
- autogluon/timeseries/transforms/covariate_scaler.py +4 -4
- autogluon/timeseries/transforms/target_scaler.py +14 -14
- autogluon/timeseries/utils/datetime/lags.py +2 -2
- autogluon/timeseries/utils/datetime/time_features.py +2 -2
- autogluon/timeseries/utils/features.py +41 -37
- autogluon/timeseries/utils/forecast.py +5 -5
- autogluon/timeseries/utils/warning_filters.py +3 -1
- autogluon/timeseries/version.py +1 -1
- autogluon.timeseries-1.4.1b20251116-py3.9-nspkg.pth +1 -0
- {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/METADATA +32 -17
- autogluon_timeseries-1.4.1b20251116.dist-info/RECORD +96 -0
- {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/WHEEL +1 -1
- autogluon/timeseries/configs/presets_configs.py +0 -79
- autogluon/timeseries/evaluator.py +0 -6
- autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -10
- autogluon/timeseries/models/chronos/pipeline/base.py +0 -160
- autogluon/timeseries/models/chronos/pipeline/chronos.py +0 -544
- autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -530
- autogluon/timeseries/models/presets.py +0 -358
- autogluon.timeseries-1.3.2b20250712-py3.9-nspkg.pth +0 -1
- autogluon.timeseries-1.3.2b20250712.dist-info/RECORD +0 -71
- {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info/licenses}/LICENSE +0 -0
- {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info/licenses}/NOTICE +0 -0
- {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/namespace_packages.txt +0 -0
- {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/top_level.txt +0 -0
- {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/zip-safe +0 -0
|
@@ -1,79 +0,0 @@
|
|
|
1
|
-
"""Preset configurations for autogluon.timeseries Predictors"""
|
|
2
|
-
|
|
3
|
-
from autogluon.timeseries.models.presets import get_default_hps
|
|
4
|
-
|
|
5
|
-
# TODO: change default HPO settings when other HPO strategies (e.g., Ray tune) are available
|
|
6
|
-
# TODO: add refit_full arguments once refitting is available
|
|
7
|
-
|
|
8
|
-
TIMESERIES_PRESETS_CONFIGS = dict(
|
|
9
|
-
best_quality={"hyperparameters": "default", "num_val_windows": 2},
|
|
10
|
-
high_quality={"hyperparameters": "default"},
|
|
11
|
-
medium_quality={"hyperparameters": "light"},
|
|
12
|
-
fast_training={"hyperparameters": "very_light"},
|
|
13
|
-
# Chronos-Bolt models
|
|
14
|
-
bolt_tiny={
|
|
15
|
-
"hyperparameters": {"Chronos": {"model_path": "bolt_tiny"}},
|
|
16
|
-
"skip_model_selection": True,
|
|
17
|
-
},
|
|
18
|
-
bolt_mini={
|
|
19
|
-
"hyperparameters": {"Chronos": {"model_path": "bolt_mini"}},
|
|
20
|
-
"skip_model_selection": True,
|
|
21
|
-
},
|
|
22
|
-
bolt_small={
|
|
23
|
-
"hyperparameters": {"Chronos": {"model_path": "bolt_small"}},
|
|
24
|
-
"skip_model_selection": True,
|
|
25
|
-
},
|
|
26
|
-
bolt_base={
|
|
27
|
-
"hyperparameters": {"Chronos": {"model_path": "bolt_base"}},
|
|
28
|
-
"skip_model_selection": True,
|
|
29
|
-
},
|
|
30
|
-
# Original Chronos models
|
|
31
|
-
chronos_tiny={
|
|
32
|
-
"hyperparameters": {"Chronos": {"model_path": "tiny"}},
|
|
33
|
-
"skip_model_selection": True,
|
|
34
|
-
},
|
|
35
|
-
chronos_mini={
|
|
36
|
-
"hyperparameters": {"Chronos": {"model_path": "mini"}},
|
|
37
|
-
"skip_model_selection": True,
|
|
38
|
-
},
|
|
39
|
-
chronos_small={
|
|
40
|
-
"hyperparameters": {"Chronos": {"model_path": "small"}},
|
|
41
|
-
"skip_model_selection": True,
|
|
42
|
-
},
|
|
43
|
-
chronos_base={
|
|
44
|
-
"hyperparameters": {"Chronos": {"model_path": "base"}},
|
|
45
|
-
"skip_model_selection": True,
|
|
46
|
-
},
|
|
47
|
-
chronos_large={
|
|
48
|
-
"hyperparameters": {"Chronos": {"model_path": "large", "batch_size": 8}},
|
|
49
|
-
"skip_model_selection": True,
|
|
50
|
-
},
|
|
51
|
-
chronos_ensemble={
|
|
52
|
-
"hyperparameters": {
|
|
53
|
-
"Chronos": {"model_path": "small"},
|
|
54
|
-
**get_default_hps("light_inference"),
|
|
55
|
-
}
|
|
56
|
-
},
|
|
57
|
-
chronos_large_ensemble={
|
|
58
|
-
"hyperparameters": {
|
|
59
|
-
"Chronos": {"model_path": "large", "batch_size": 8},
|
|
60
|
-
**get_default_hps("light_inference"),
|
|
61
|
-
}
|
|
62
|
-
},
|
|
63
|
-
)
|
|
64
|
-
|
|
65
|
-
TIMESERIES_PRESETS_ALIASES = dict(
|
|
66
|
-
chronos="chronos_small",
|
|
67
|
-
best="best_quality",
|
|
68
|
-
high="high_quality",
|
|
69
|
-
medium="medium_quality",
|
|
70
|
-
bq="best_quality",
|
|
71
|
-
hq="high_quality",
|
|
72
|
-
mq="medium_quality",
|
|
73
|
-
)
|
|
74
|
-
|
|
75
|
-
# update with aliases
|
|
76
|
-
TIMESERIES_PRESETS_CONFIGS = {
|
|
77
|
-
**TIMESERIES_PRESETS_CONFIGS,
|
|
78
|
-
**{k: TIMESERIES_PRESETS_CONFIGS[v].copy() for k, v in TIMESERIES_PRESETS_ALIASES.items()},
|
|
79
|
-
}
|
|
@@ -1,160 +0,0 @@
|
|
|
1
|
-
# Authors: Lorenzo Stella <stellalo@amazon.com>, Caner Turkmen <atturkm@amazon.com>
|
|
2
|
-
|
|
3
|
-
from enum import Enum
|
|
4
|
-
from pathlib import Path
|
|
5
|
-
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
|
6
|
-
|
|
7
|
-
import torch
|
|
8
|
-
|
|
9
|
-
from .utils import left_pad_and_stack_1D
|
|
10
|
-
|
|
11
|
-
if TYPE_CHECKING:
|
|
12
|
-
from transformers import PreTrainedModel
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class ForecastType(Enum):
|
|
16
|
-
SAMPLES = "samples"
|
|
17
|
-
QUANTILES = "quantiles"
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class PipelineRegistry(type):
|
|
21
|
-
REGISTRY: Dict[str, "PipelineRegistry"] = {}
|
|
22
|
-
|
|
23
|
-
def __new__(cls, name, bases, attrs):
|
|
24
|
-
"""See, https://github.com/faif/python-patterns."""
|
|
25
|
-
new_cls = type.__new__(cls, name, bases, attrs)
|
|
26
|
-
if name is not None:
|
|
27
|
-
cls.REGISTRY[name] = new_cls
|
|
28
|
-
if aliases := attrs.get("_aliases"):
|
|
29
|
-
for alias in aliases:
|
|
30
|
-
cls.REGISTRY[alias] = new_cls
|
|
31
|
-
return new_cls
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
class BaseChronosPipeline(metaclass=PipelineRegistry):
|
|
35
|
-
forecast_type: ForecastType
|
|
36
|
-
dtypes = {
|
|
37
|
-
"bfloat16": torch.bfloat16,
|
|
38
|
-
"float32": torch.float32,
|
|
39
|
-
"float64": torch.float64,
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
def __init__(self, inner_model: "PreTrainedModel"):
|
|
43
|
-
"""
|
|
44
|
-
Parameters
|
|
45
|
-
----------
|
|
46
|
-
inner_model : PreTrainedModel
|
|
47
|
-
A hugging-face transformers PreTrainedModel, e.g., T5ForConditionalGeneration
|
|
48
|
-
"""
|
|
49
|
-
# for easy access to the inner HF-style model
|
|
50
|
-
self.inner_model = inner_model
|
|
51
|
-
|
|
52
|
-
def _prepare_and_validate_context(self, context: Union[torch.Tensor, List[torch.Tensor]]):
|
|
53
|
-
if isinstance(context, list):
|
|
54
|
-
context = left_pad_and_stack_1D(context)
|
|
55
|
-
assert isinstance(context, torch.Tensor)
|
|
56
|
-
if context.ndim == 1:
|
|
57
|
-
context = context.unsqueeze(0)
|
|
58
|
-
assert context.ndim == 2
|
|
59
|
-
|
|
60
|
-
return context
|
|
61
|
-
|
|
62
|
-
def predict(
|
|
63
|
-
self,
|
|
64
|
-
context: Union[torch.Tensor, List[torch.Tensor]],
|
|
65
|
-
prediction_length: Optional[int] = None,
|
|
66
|
-
**kwargs,
|
|
67
|
-
):
|
|
68
|
-
"""
|
|
69
|
-
Get forecasts for the given time series.
|
|
70
|
-
|
|
71
|
-
Parameters
|
|
72
|
-
----------
|
|
73
|
-
context
|
|
74
|
-
Input series. This is either a 1D tensor, or a list
|
|
75
|
-
of 1D tensors, or a 2D tensor whose first dimension
|
|
76
|
-
is batch. In the latter case, use left-padding with
|
|
77
|
-
``torch.nan`` to align series of different lengths.
|
|
78
|
-
prediction_length
|
|
79
|
-
Time steps to predict. Defaults to a model-dependent
|
|
80
|
-
value if not given.
|
|
81
|
-
|
|
82
|
-
Returns
|
|
83
|
-
-------
|
|
84
|
-
forecasts
|
|
85
|
-
Tensor containing forecasts. The layout and meaning
|
|
86
|
-
of the forecasts values depends on ``self.forecast_type``.
|
|
87
|
-
"""
|
|
88
|
-
raise NotImplementedError()
|
|
89
|
-
|
|
90
|
-
def predict_quantiles(
|
|
91
|
-
self, context: torch.Tensor, prediction_length: int, quantile_levels: List[float], **kwargs
|
|
92
|
-
) -> Tuple[torch.Tensor, torch.Tensor]:
|
|
93
|
-
"""
|
|
94
|
-
Get quantile and mean forecasts for given time series. All
|
|
95
|
-
predictions are returned on the CPU.
|
|
96
|
-
|
|
97
|
-
Parameters
|
|
98
|
-
----------
|
|
99
|
-
context
|
|
100
|
-
Input series. This is either a 1D tensor, or a list
|
|
101
|
-
of 1D tensors, or a 2D tensor whose first dimension
|
|
102
|
-
is batch. In the latter case, use left-padding with
|
|
103
|
-
``torch.nan`` to align series of different lengths.
|
|
104
|
-
prediction_length
|
|
105
|
-
Time steps to predict. Defaults to a model-dependent
|
|
106
|
-
value if not given.
|
|
107
|
-
quantile_levels: List[float]
|
|
108
|
-
Quantile levels to compute
|
|
109
|
-
|
|
110
|
-
Returns
|
|
111
|
-
-------
|
|
112
|
-
quantiles
|
|
113
|
-
Tensor containing quantile forecasts. Shape
|
|
114
|
-
(batch_size, prediction_length, num_quantiles)
|
|
115
|
-
mean
|
|
116
|
-
Tensor containing mean (point) forecasts. Shape
|
|
117
|
-
(batch_size, prediction_length)
|
|
118
|
-
"""
|
|
119
|
-
raise NotImplementedError()
|
|
120
|
-
|
|
121
|
-
@classmethod
|
|
122
|
-
def from_pretrained(
|
|
123
|
-
cls,
|
|
124
|
-
pretrained_model_name_or_path: Union[str, Path],
|
|
125
|
-
*model_args,
|
|
126
|
-
force=False,
|
|
127
|
-
**kwargs,
|
|
128
|
-
):
|
|
129
|
-
"""
|
|
130
|
-
Load the model, either from a local path or from the HuggingFace Hub.
|
|
131
|
-
Supports the same arguments as ``AutoConfig`` and ``AutoModel``
|
|
132
|
-
from ``transformers``.
|
|
133
|
-
|
|
134
|
-
When a local path is provided, supports both a folder or a .tar.gz archive.
|
|
135
|
-
"""
|
|
136
|
-
from transformers import AutoConfig
|
|
137
|
-
|
|
138
|
-
kwargs.setdefault("resume_download", None) # silence huggingface_hub warning
|
|
139
|
-
if str(pretrained_model_name_or_path).startswith("s3://"):
|
|
140
|
-
from .utils import cache_model_from_s3
|
|
141
|
-
|
|
142
|
-
local_model_path = cache_model_from_s3(str(pretrained_model_name_or_path), force=force)
|
|
143
|
-
return cls.from_pretrained(local_model_path, *model_args, **kwargs)
|
|
144
|
-
|
|
145
|
-
torch_dtype = kwargs.get("torch_dtype", "auto")
|
|
146
|
-
if torch_dtype != "auto" and isinstance(torch_dtype, str):
|
|
147
|
-
kwargs["torch_dtype"] = cls.dtypes[torch_dtype]
|
|
148
|
-
|
|
149
|
-
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
|
|
150
|
-
is_valid_config = hasattr(config, "chronos_pipeline_class") or hasattr(config, "chronos_config")
|
|
151
|
-
|
|
152
|
-
if not is_valid_config:
|
|
153
|
-
raise ValueError("Not a Chronos config file")
|
|
154
|
-
|
|
155
|
-
pipeline_class_name = getattr(config, "chronos_pipeline_class", "ChronosPipeline")
|
|
156
|
-
class_: Optional[BaseChronosPipeline] = PipelineRegistry.REGISTRY.get(pipeline_class_name) # type: ignore
|
|
157
|
-
if class_ is None:
|
|
158
|
-
raise ValueError(f"Trying to load unknown pipeline class: {pipeline_class_name}")
|
|
159
|
-
|
|
160
|
-
return class_.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
|