autogluon.timeseries 1.0.1b20240323__tar.gz → 1.0.1b20240325__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of autogluon.timeseries might be problematic. Click here for more details.

Files changed (63) hide show
  1. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/PKG-INFO +3 -2
  2. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/setup.py +8 -3
  3. autogluon.timeseries-1.0.1b20240325/src/autogluon/timeseries/configs/presets_configs.py +55 -0
  4. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/__init__.py +2 -0
  5. autogluon.timeseries-1.0.1b20240325/src/autogluon/timeseries/models/chronos/__init__.py +3 -0
  6. autogluon.timeseries-1.0.1b20240325/src/autogluon/timeseries/models/chronos/chronos.py +487 -0
  7. autogluon.timeseries-1.0.1b20240325/src/autogluon/timeseries/models/chronos/model.py +319 -0
  8. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/presets.py +3 -0
  9. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/predictor.py +15 -3
  10. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/utils/warning_filters.py +22 -4
  11. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/version.py +1 -1
  12. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon.timeseries.egg-info/PKG-INFO +3 -2
  13. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon.timeseries.egg-info/SOURCES.txt +3 -0
  14. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon.timeseries.egg-info/requires.txt +10 -6
  15. autogluon.timeseries-1.0.1b20240323/src/autogluon/timeseries/configs/presets_configs.py +0 -11
  16. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/setup.cfg +0 -0
  17. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/__init__.py +0 -0
  18. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/configs/__init__.py +0 -0
  19. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/dataset/__init__.py +0 -0
  20. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/dataset/ts_dataframe.py +0 -0
  21. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/evaluator.py +0 -0
  22. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/learner.py +0 -0
  23. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/metrics/__init__.py +0 -0
  24. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/metrics/abstract.py +0 -0
  25. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/metrics/point.py +0 -0
  26. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/metrics/quantile.py +0 -0
  27. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/metrics/utils.py +0 -0
  28. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/abstract/__init__.py +0 -0
  29. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py +0 -0
  30. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/abstract/model_trial.py +0 -0
  31. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/autogluon_tabular/__init__.py +0 -0
  32. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py +0 -0
  33. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/autogluon_tabular/utils.py +0 -0
  34. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/ensemble/__init__.py +0 -0
  35. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py +0 -0
  36. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/ensemble/greedy_ensemble.py +0 -0
  37. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/gluonts/__init__.py +0 -0
  38. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py +0 -0
  39. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/gluonts/torch/__init__.py +0 -0
  40. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/gluonts/torch/models.py +0 -0
  41. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/local/__init__.py +0 -0
  42. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/local/abstract_local_model.py +0 -0
  43. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/local/naive.py +0 -0
  44. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/local/npts.py +0 -0
  45. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/local/statsforecast.py +0 -0
  46. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/multi_window/__init__.py +0 -0
  47. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/models/multi_window/multi_window_model.py +0 -0
  48. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/splitter.py +0 -0
  49. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/trainer/__init__.py +0 -0
  50. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/trainer/abstract_trainer.py +0 -0
  51. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/trainer/auto_trainer.py +0 -0
  52. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/utils/__init__.py +0 -0
  53. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/utils/datetime/__init__.py +0 -0
  54. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/utils/datetime/base.py +0 -0
  55. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/utils/datetime/lags.py +0 -0
  56. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/utils/datetime/seasonality.py +0 -0
  57. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/utils/datetime/time_features.py +0 -0
  58. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/utils/features.py +0 -0
  59. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon/timeseries/utils/forecast.py +0 -0
  60. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon.timeseries.egg-info/dependency_links.txt +0 -0
  61. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon.timeseries.egg-info/namespace_packages.txt +0 -0
  62. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon.timeseries.egg-info/top_level.txt +0 -0
  63. {autogluon.timeseries-1.0.1b20240323 → autogluon.timeseries-1.0.1b20240325}/src/autogluon.timeseries.egg-info/zip-safe +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.timeseries
3
- Version: 1.0.1b20240323
3
+ Version: 1.0.1b20240325
4
4
  Summary: AutoML for Image, Text, and Tabular Data
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -132,5 +132,6 @@ Classifier: Topic :: Scientific/Engineering :: Image Recognition
132
132
  Requires-Python: >=3.8, <3.12
133
133
  Description-Content-Type: text/markdown
134
134
  Provides-Extra: tests
135
- Provides-Extra: chronos-cpu
135
+ Provides-Extra: chronos-openvino
136
+ Provides-Extra: chronos-onnx
136
137
  Provides-Extra: all
@@ -55,12 +55,17 @@ extras_require = {
55
55
  "isort>=5.10",
56
56
  "black~=23.0",
57
57
  ],
58
- "chronos-cpu": [ # for faster CPU inference in pretrained models
59
- "optimum[onnxruntime,openvino,nncf]>=1.17,<1.18",
58
+ "chronos-openvino": [ # for faster CPU inference in pretrained models with OpenVINO
59
+ "optimum[openvino,nncf]>=1.17,<1.18",
60
+ ],
61
+ "chronos-onnx": [ # for faster CPU inference in pretrained models with ONNX
62
+ "optimum[onnxruntime]>=1.17,<1.18",
60
63
  ],
61
64
  }
62
65
 
63
- extras_require["all"] = list(set.union(*(set(extras_require[extra]) for extra in ["chronos-cpu"])))
66
+ extras_require["all"] = list(
67
+ set.union(*(set(extras_require[extra]) for extra in ["chronos-onnx", "chronos-openvino"]))
68
+ )
64
69
 
65
70
  install_requires = ag.get_dependency_version_ranges(install_requires)
66
71
 
@@ -0,0 +1,55 @@
1
+ """Preset configurations for autogluon.timeseries Predictors"""
2
+ from autogluon.timeseries.models.presets import get_default_hps
3
+
4
+ # TODO: change default HPO settings when other HPO strategies (e.g., Ray tune) are available
5
+ # TODO: add refit_full arguments once refitting is available
6
+
7
+ TIMESERIES_PRESETS_CONFIGS = dict(
8
+ best_quality={"hyperparameters": "default", "num_val_windows": 2},
9
+ high_quality={"hyperparameters": "default"},
10
+ medium_quality={"hyperparameters": "light"},
11
+ fast_training={"hyperparameters": "very_light"},
12
+ chronos_tiny={
13
+ "hyperparameters": {"Chronos": {"model_path": "tiny"}},
14
+ },
15
+ chronos_mini={
16
+ "hyperparameters": {"Chronos": {"model_path": "mini"}},
17
+ },
18
+ chronos_small={
19
+ "hyperparameters": {"Chronos": {"model_path": "small"}},
20
+ },
21
+ chronos_base={
22
+ "hyperparameters": {"Chronos": {"model_path": "base"}},
23
+ },
24
+ chronos_large={
25
+ "hyperparameters": {"Chronos": {"model_path": "large", "batch_size": 8}},
26
+ },
27
+ chronos_ensemble={
28
+ "hyperparameters": {
29
+ "Chronos": {"model_path": "small"},
30
+ **get_default_hps("default"),
31
+ }
32
+ },
33
+ chronos_large_ensemble={
34
+ "hyperparameters": {
35
+ "Chronos": {"model_path": "large", "batch_size": 8},
36
+ **get_default_hps("default"),
37
+ }
38
+ },
39
+ )
40
+
41
+ TIMESERIES_PRESETS_ALIASES = dict(
42
+ chronos="chronos_small",
43
+ best="best_quality",
44
+ high="high_quality",
45
+ medium="medium_quality",
46
+ bq="best_quality",
47
+ hq="high_quality",
48
+ mq="medium_quality",
49
+ )
50
+
51
+ # update with aliases
52
+ TIMESERIES_PRESETS_CONFIGS = {
53
+ **TIMESERIES_PRESETS_CONFIGS,
54
+ **{k: TIMESERIES_PRESETS_CONFIGS[v].copy() for k, v in TIMESERIES_PRESETS_ALIASES.items()},
55
+ }
@@ -1,4 +1,5 @@
1
1
  from .autogluon_tabular import DirectTabularModel, RecursiveTabularModel
2
+ from .chronos import ChronosModel
2
3
  from .gluonts import (
3
4
  DeepARModel,
4
5
  DLinearModel,
@@ -44,6 +45,7 @@ __all__ = [
44
45
  "DynamicOptimizedThetaModel",
45
46
  "ETSModel",
46
47
  "IMAPAModel",
48
+ "ChronosModel",
47
49
  "NPTSModel",
48
50
  "NaiveModel",
49
51
  "PatchTSTModel",
@@ -0,0 +1,3 @@
1
+ from .model import ChronosModel
2
+
3
+ __all__ = ["ChronosModel"]
@@ -0,0 +1,487 @@
1
+ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # Original Source: https://github.com/amazon-science/chronos-forecasting
5
+ # Author: Lorenzo Stella <stellalo@amazon.com>
6
+
7
+ import logging
8
+ import warnings
9
+ from dataclasses import dataclass
10
+ from typing import Any, Dict, List, Literal, Optional, Tuple, Union
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForSeq2SeqLM, GenerationConfig, PreTrainedModel
15
+
16
+ from autogluon.timeseries.utils.warning_filters import set_loggers_level
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ @dataclass
22
+ class ChronosConfig:
23
+ """
24
+ This class holds all the configuration parameters to be used
25
+ by ``ChronosTokenizer`` and ``ChronosPretrainedModel``.
26
+ """
27
+
28
+ tokenizer_class: str
29
+ tokenizer_kwargs: Dict[str, Any]
30
+ n_tokens: int
31
+ n_special_tokens: int
32
+ pad_token_id: int
33
+ eos_token_id: int
34
+ use_eos_token: bool
35
+ model_type: Literal["causal", "seq2seq"]
36
+ context_length: int
37
+ prediction_length: int
38
+ num_samples: int
39
+ temperature: float
40
+ top_k: int
41
+ top_p: float
42
+
43
+ def __post_init__(self):
44
+ assert (
45
+ self.pad_token_id < self.n_special_tokens and self.eos_token_id < self.n_special_tokens
46
+ ), f"Special token id's must be smaller than {self.n_special_tokens=}"
47
+
48
+ def create_tokenizer(self) -> "ChronosTokenizer":
49
+ if self.tokenizer_class == "MeanScaleUniformBins":
50
+ return MeanScaleUniformBins(**self.tokenizer_kwargs, config=self)
51
+ raise ValueError
52
+
53
+
54
+ class ChronosTokenizer:
55
+ """
56
+ A ``ChronosTokenizer`` defines how time series are mapped into token IDs
57
+ and back.
58
+
59
+ For details, see the ``input_transform`` and ``output_transform`` methods,
60
+ which concrete classes must implement.
61
+ """
62
+
63
+ def input_transform(self, context: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, Any]:
64
+ """
65
+ Turn a batch of time series into token IDs, attention map, and scale.
66
+
67
+ Parameters
68
+ ----------
69
+ context
70
+ A tensor shaped (batch_size, time_length), containing the
71
+ timeseries to forecast. Use left-padding with ``torch.nan``
72
+ to align time series of different lengths.
73
+
74
+ Returns
75
+ -------
76
+ token_ids
77
+ A tensor of integers, shaped (batch_size, time_length + 1)
78
+ if ``config.use_eos_token`` and (batch_size, time_length)
79
+ otherwise, containing token IDs for the input series.
80
+ attention_mask
81
+ A boolean tensor, same shape as ``token_ids``, indicating
82
+ which input observations are not ``torch.nan`` (i.e. not
83
+ missing nor padding).
84
+ decoding_context
85
+ An object that will be passed to ``output_transform``.
86
+ Contains the relevant context to decode output samples into
87
+ real values, such as location and scale parameters.
88
+ """
89
+ raise NotImplementedError()
90
+
91
+ def output_transform(self, samples: torch.Tensor, decoding_context: Any) -> torch.Tensor:
92
+ """
93
+ Turn a batch of sample token IDs into real values.
94
+
95
+ Parameters
96
+ ----------
97
+ samples
98
+ A tensor of integers, shaped (batch_size, num_samples, time_length),
99
+ containing token IDs of sample trajectories.
100
+ decoding_context
101
+ An object returned by ``input_transform`` containing
102
+ relevant context to decode samples, such as location and scale.
103
+ The nature of this depends on the specific tokenizer.
104
+
105
+ Returns
106
+ -------
107
+ forecasts
108
+ A real tensor, shaped (batch_size, num_samples, time_length),
109
+ containing forecasted sample paths.
110
+ """
111
+ raise NotImplementedError()
112
+
113
+
114
+ class MeanScaleUniformBins(ChronosTokenizer):
115
+ def __init__(self, low_limit: float, high_limit: float, config: ChronosConfig) -> None:
116
+ self.config = config
117
+ self.centers = torch.linspace(
118
+ low_limit,
119
+ high_limit,
120
+ config.n_tokens - config.n_special_tokens - 1,
121
+ )
122
+ self.boundaries = torch.concat(
123
+ (
124
+ torch.tensor([-1e20], device=self.centers.device),
125
+ (self.centers[1:] + self.centers[:-1]) / 2,
126
+ torch.tensor([1e20], device=self.centers.device),
127
+ )
128
+ )
129
+
130
+ def input_transform(self, context: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
131
+ batch_size, length = context.shape
132
+
133
+ if length > self.config.context_length:
134
+ context = context[..., -self.config.context_length :]
135
+ elif length < self.config.context_length:
136
+ padding_size = (
137
+ *context.shape[:-1],
138
+ self.config.context_length - length,
139
+ )
140
+ padding = torch.full(size=padding_size, fill_value=torch.nan)
141
+ context = torch.concat((padding, context), dim=-1)
142
+
143
+ attention_mask = ~torch.isnan(context)
144
+ scale = torch.nansum(torch.abs(context) * attention_mask, dim=-1) / torch.nansum(attention_mask, dim=-1)
145
+ scale[~(scale > 0)] = 1.0
146
+ scaled_context = context / scale.unsqueeze(dim=-1)
147
+ token_ids = (
148
+ torch.bucketize(
149
+ input=scaled_context,
150
+ boundaries=self.boundaries,
151
+ # buckets are open to the right, see:
152
+ # https://pytorch.org/docs/2.1/generated/torch.bucketize.html#torch-bucketize
153
+ right=True,
154
+ )
155
+ + self.config.n_special_tokens
156
+ )
157
+ token_ids[~attention_mask] = self.config.pad_token_id
158
+
159
+ if self.config.use_eos_token:
160
+ eos_tokens = torch.full((batch_size, 1), fill_value=self.config.eos_token_id)
161
+ token_ids = torch.concat((token_ids, eos_tokens), dim=1)
162
+ eos_mask = torch.full((batch_size, 1), fill_value=True)
163
+ attention_mask = torch.concat((attention_mask, eos_mask), dim=1)
164
+
165
+ return token_ids, attention_mask, scale
166
+
167
+ def output_transform(self, samples: torch.Tensor, scale: torch.Tensor) -> torch.Tensor:
168
+ scale_unsqueezed = scale.unsqueeze(-1).unsqueeze(-1)
169
+ indices = torch.clamp(
170
+ samples - self.config.n_special_tokens,
171
+ min=0,
172
+ max=len(self.centers) - 1,
173
+ )
174
+ return self.centers[indices] * scale_unsqueezed
175
+
176
+
177
+ class ChronosPretrainedModel(nn.Module):
178
+ """
179
+ A ``ChronosPretrainedModel`` wraps a ``PreTrainedModel`` object from ``transformers``
180
+ and uses it to predict sample paths for time series tokens.
181
+
182
+ Parameters
183
+ ----------
184
+ config
185
+ The configuration to use.
186
+ model
187
+ The pre-trained model to use.
188
+ """
189
+
190
+ def __init__(self, config: ChronosConfig, model: PreTrainedModel) -> None:
191
+ super().__init__()
192
+ self.config = config
193
+ self.model = model
194
+ self.device = model.device
195
+
196
+ def forward(
197
+ self,
198
+ input_ids: torch.Tensor,
199
+ attention_mask: torch.Tensor,
200
+ prediction_length: Optional[int] = None,
201
+ num_samples: Optional[int] = None,
202
+ temperature: Optional[float] = None,
203
+ top_k: Optional[int] = None,
204
+ top_p: Optional[float] = None,
205
+ ) -> torch.Tensor:
206
+ """
207
+ Predict future sample tokens for the given token sequences.
208
+
209
+ Arguments ``prediction_length``, ``num_samples``, ``temperature``,
210
+ ``top_k``, ``top_p`` can be used to customize the model inference,
211
+ and default to the corresponding attributes in ``self.config`` if
212
+ not provided.
213
+
214
+ Returns
215
+ -------
216
+ samples
217
+ A tensor of integers, shaped (batch_size, num_samples, time_length),
218
+ containing forecasted sample paths.
219
+ """
220
+ if prediction_length is None:
221
+ prediction_length = self.config.prediction_length
222
+ if num_samples is None:
223
+ num_samples = self.config.num_samples
224
+ if temperature is None:
225
+ temperature = self.config.temperature
226
+ if top_k is None:
227
+ top_k = self.config.top_k
228
+ if top_p is None:
229
+ top_p = self.config.top_p
230
+
231
+ preds = self.model.generate(
232
+ input_ids=input_ids,
233
+ attention_mask=attention_mask.long(), # int64 (long) type conversion needed for ONNX
234
+ generation_config=GenerationConfig(
235
+ min_new_tokens=prediction_length,
236
+ max_new_tokens=prediction_length,
237
+ do_sample=True,
238
+ num_return_sequences=num_samples,
239
+ eos_token_id=self.config.eos_token_id,
240
+ pad_token_id=self.config.pad_token_id,
241
+ temperature=temperature,
242
+ top_k=top_k,
243
+ top_p=top_p,
244
+ ),
245
+ )
246
+
247
+ if self.config.model_type == "seq2seq":
248
+ preds = preds[..., 1:] # remove the decoder start token
249
+ else:
250
+ assert self.config.model_type == "causal"
251
+ assert preds.size(-1) == input_ids.size(-1) + prediction_length
252
+ preds = preds[..., -prediction_length:]
253
+
254
+ return preds.reshape(input_ids.size(0), num_samples, -1)
255
+
256
+
257
+ def left_pad_and_stack_1D(tensors: List[torch.Tensor]):
258
+ max_len = max(len(c) for c in tensors)
259
+ padded = []
260
+ for c in tensors:
261
+ assert isinstance(c, torch.Tensor)
262
+ assert c.ndim == 1
263
+ padding = torch.full(size=(max_len - len(c),), fill_value=torch.nan, device=c.device)
264
+ padded.append(torch.concat((padding, c), dim=-1))
265
+ return torch.stack(padded)
266
+
267
+
268
+ class ChronosPipeline:
269
+ """
270
+ A ``ChronosPipeline`` uses the given tokenizer and model to forecast
271
+ input time series.
272
+
273
+ Use the ``from_pretrained`` class method to load serialized models.
274
+ Use the ``predict`` method to get forecasts.
275
+
276
+ Parameters
277
+ ----------
278
+ tokenizer
279
+ The tokenizer object to use.
280
+ model
281
+ The model to use.
282
+ """
283
+
284
+ tokenizer: ChronosTokenizer
285
+ model: ChronosPretrainedModel
286
+
287
+ def __init__(self, tokenizer, model):
288
+ self.tokenizer = tokenizer
289
+ self.model = model
290
+
291
+ def predict(
292
+ self,
293
+ context: Union[torch.Tensor, List[torch.Tensor]],
294
+ prediction_length: Optional[int] = None,
295
+ num_samples: Optional[int] = None,
296
+ temperature: Optional[float] = None,
297
+ top_k: Optional[int] = None,
298
+ top_p: Optional[float] = None,
299
+ limit_prediction_length: bool = True,
300
+ ) -> torch.Tensor:
301
+ """
302
+ Get forecasts for the given time series.
303
+
304
+ Parameters
305
+ ----------
306
+ context
307
+ Input series. This is either a 1D tensor, or a list
308
+ of 1D tensors, or a 2D tensor whose first dimension
309
+ is batch. In the latter case, use left-padding with
310
+ ``torch.nan`` to align series of different lengths.
311
+ prediction_length
312
+ Time steps to predict. Defaults to what specified
313
+ in ``self.model.config``.
314
+ num_samples
315
+ Number of sample paths to predict. Defaults to what
316
+ specified in ``self.model.config``.
317
+ temperature
318
+ Temperature to use for generating sample tokens.
319
+ Defaults to what specified in ``self.model.config``.
320
+ top_k
321
+ Top-k parameter to use for generating sample tokens.
322
+ Defaults to what specified in ``self.model.config``.
323
+ top_p
324
+ Top-p parameter to use for generating sample tokens.
325
+ Defaults to what specified in ``self.model.config``.
326
+ limit_prediction_length
327
+ Force prediction length smaller or equal than the
328
+ built-in prediction length from the model. True by
329
+ default. When true, fail loudly if longer predictions
330
+ are requested, otherwise longer predictions are allowed.
331
+
332
+ Returns
333
+ -------
334
+ samples
335
+ Tensor of sample forecasts, of shape
336
+ (batch_size, num_samples, prediction_length).
337
+ """
338
+ if isinstance(context, list):
339
+ context = left_pad_and_stack_1D(context)
340
+ assert isinstance(context, torch.Tensor)
341
+ if context.ndim == 1:
342
+ context = context.unsqueeze(0)
343
+ assert context.ndim == 2
344
+
345
+ if prediction_length is None:
346
+ prediction_length = self.model.config.prediction_length
347
+
348
+ if prediction_length > self.model.config.prediction_length:
349
+ msg = (
350
+ f"We recommend keeping prediction length <= {self.model.config.prediction_length}. "
351
+ f"The quality of longer predictions may degrade since the model is not optimized for it. "
352
+ )
353
+ if limit_prediction_length:
354
+ msg += "You can turn off this check by setting `limit_prediction_length=False`."
355
+ raise ValueError(msg)
356
+ warnings.warn(msg, stacklevel=2)
357
+
358
+ predictions = []
359
+ remaining = prediction_length
360
+
361
+ while remaining > 0:
362
+ token_ids, attention_mask, scale = self.tokenizer.input_transform(context)
363
+ samples = self.model(
364
+ token_ids.to(self.model.device),
365
+ attention_mask.to(self.model.device),
366
+ min(remaining, self.model.config.prediction_length),
367
+ num_samples,
368
+ temperature,
369
+ top_k,
370
+ top_p,
371
+ )
372
+ prediction = self.tokenizer.output_transform(samples.to(scale.device), scale)
373
+
374
+ predictions.append(prediction)
375
+ remaining -= prediction.shape[-1]
376
+
377
+ if remaining <= 0:
378
+ break
379
+
380
+ context = torch.cat([context, prediction.median(dim=1).values], dim=-1)
381
+
382
+ return torch.cat(predictions, dim=-1)
383
+
384
+ @classmethod
385
+ def from_pretrained(cls, *args, **kwargs):
386
+ """
387
+ Load the model, either from a local path or from the HuggingFace Hub.
388
+ Supports the same arguments as ``AutoConfig`` and ``AutoModel``
389
+ from ``transformers``.
390
+ """
391
+
392
+ config = AutoConfig.from_pretrained(*args, **kwargs)
393
+
394
+ assert hasattr(config, "chronos_config"), "Not a Chronos config file"
395
+
396
+ chronos_config = ChronosConfig(**config.chronos_config)
397
+
398
+ if chronos_config.model_type == "seq2seq":
399
+ inner_model = AutoModelForSeq2SeqLM.from_pretrained(*args, **kwargs)
400
+ else:
401
+ assert config.model_type == "causal"
402
+ inner_model = AutoModelForCausalLM.from_pretrained(*args, **kwargs)
403
+
404
+ return cls(
405
+ tokenizer=chronos_config.create_tokenizer(),
406
+ model=ChronosPretrainedModel(config=chronos_config, model=inner_model),
407
+ )
408
+
409
+
410
+ class OptimizedChronosPipeline(ChronosPipeline):
411
+ """A wrapper around the ChronosPipeline object for CPU-optimized model classes from
412
+ HuggingFace optimum.
413
+ """
414
+
415
+ dtypes = {
416
+ "bfloat16": torch.bfloat16,
417
+ "float32": torch.float32,
418
+ "float64": torch.float64,
419
+ }
420
+
421
+ @classmethod
422
+ def from_pretrained(cls, *args, **kwargs):
423
+ """
424
+ Load the model, either from a local path or from the HuggingFace Hub.
425
+ Supports the same arguments as ``AutoConfig`` and ``AutoModel``
426
+ from ``transformers``.
427
+ """
428
+ kwargs = kwargs.copy()
429
+
430
+ optimization_strategy = kwargs.pop("optimization_strategy", None)
431
+ context_length = kwargs.pop("context_length", None)
432
+
433
+ config = AutoConfig.from_pretrained(*args, **kwargs)
434
+ assert hasattr(config, "chronos_config"), "Not a Chronos config file"
435
+
436
+ if context_length is not None:
437
+ config.chronos_config["context_length"] = context_length
438
+ chronos_config = ChronosConfig(**config.chronos_config)
439
+
440
+ torch_dtype = kwargs.get("torch_dtype", "auto")
441
+ if torch_dtype != "auto" and isinstance(torch_dtype, str):
442
+ kwargs["torch_dtype"] = cls.dtypes[torch_dtype]
443
+
444
+ if chronos_config.model_type == "seq2seq":
445
+ if optimization_strategy is None:
446
+ inner_model = AutoModelForSeq2SeqLM.from_pretrained(*args, **kwargs)
447
+ else:
448
+ assert optimization_strategy in [
449
+ "onnx",
450
+ "openvino",
451
+ ], "optimization_strategy not recognized. Please provide one of `onnx` or `openvino`"
452
+ torch_dtype = kwargs.pop("torch_dtype", "auto")
453
+ if torch_dtype != "auto":
454
+ logger.warning(
455
+ f"\t`torch_dtype` will be ignored for optimization_strategy {optimization_strategy}"
456
+ )
457
+
458
+ if optimization_strategy == "onnx":
459
+ try:
460
+ from optimum.onnxruntime import ORTModelForSeq2SeqLM
461
+ except ImportError:
462
+ raise ImportError(
463
+ "Huggingface Optimum library must be installed with ONNX for using the `onnx` strategy"
464
+ )
465
+
466
+ assert kwargs.pop("device_map", "cpu") in ["cpu", "auto"], "ONNX mode only available on the CPU"
467
+ with set_loggers_level(regex=r"^optimum.*", level=logging.ERROR):
468
+ inner_model = ORTModelForSeq2SeqLM.from_pretrained(*args, **{**kwargs, "export": True})
469
+ elif optimization_strategy == "openvino":
470
+ try:
471
+ from optimum.intel import OVModelForSeq2SeqLM
472
+ except ImportError:
473
+ raise ImportError(
474
+ "Huggingface Optimum library must be installed with OpenVINO for using the `openvino` strategy"
475
+ )
476
+ with set_loggers_level(regex=r"^optimum.*", level=logging.ERROR):
477
+ inner_model = OVModelForSeq2SeqLM.from_pretrained(
478
+ *args, **{**kwargs, "device_map": "cpu", "export": True}
479
+ )
480
+ else:
481
+ assert config.model_type == "causal"
482
+ inner_model = AutoModelForCausalLM.from_pretrained(*args, **kwargs)
483
+
484
+ return cls(
485
+ tokenizer=chronos_config.create_tokenizer(),
486
+ model=ChronosPretrainedModel(config=chronos_config, model=inner_model),
487
+ )
@@ -0,0 +1,319 @@
1
+ import logging
2
+ import os
3
+ from typing import Any, Dict, Literal, Optional, Union
4
+
5
+ import numpy as np
6
+ import pandas as pd
7
+
8
+ from autogluon.common.loaders import load_pkl
9
+ from autogluon.timeseries.dataset.ts_dataframe import TimeSeriesDataFrame
10
+ from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
11
+ from autogluon.timeseries.utils.forecast import get_forecast_horizon_index_ts_dataframe
12
+ from autogluon.timeseries.utils.warning_filters import warning_filter
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ # allowed HuggingFace model paths with custom parameter definitions
18
+ MODEL_CONFIGS = {
19
+ "amazon/chronos-t5-tiny": {
20
+ "num_gpus": 0, # minimum number of required GPUs
21
+ },
22
+ "amazon/chronos-t5-mini": {"num_gpus": 0},
23
+ "amazon/chronos-t5-small": {"num_gpus": 1},
24
+ "amazon/chronos-t5-base": {"num_gpus": 1},
25
+ "amazon/chronos-t5-large": {"num_gpus": 1},
26
+ }
27
+
28
+
29
+ MODEL_ALIASES = {
30
+ "tiny": "amazon/chronos-t5-tiny",
31
+ "mini": "amazon/chronos-t5-mini",
32
+ "small": "amazon/chronos-t5-small",
33
+ "base": "amazon/chronos-t5-base",
34
+ "large": "amazon/chronos-t5-large",
35
+ }
36
+
37
+
38
+ class ChronosInferenceDataset:
39
+ """A container for time series datasets that implements the ``torch.utils.data.Dataset`` interface"""
40
+
41
+ def __init__(
42
+ self,
43
+ target_df: TimeSeriesDataFrame,
44
+ context_length: int,
45
+ target_column: str = "target",
46
+ ):
47
+ assert context_length > 0
48
+ self.context_length = context_length
49
+ self.target_array = target_df[target_column].to_numpy(dtype=np.float32)
50
+ self.freq = target_df.freq
51
+
52
+ # store pointer to start:end of each time series
53
+ cum_sizes = target_df.num_timesteps_per_item().values.cumsum()
54
+ self.indptr = np.append(0, cum_sizes).astype(np.int32)
55
+
56
+ def __len__(self):
57
+ return len(self.indptr) - 1 # noqa
58
+
59
+ def _get_context(self, a: np.ndarray, pad_value=np.nan):
60
+ a = a[-self.context_length :]
61
+ pad_size = self.context_length - len(a)
62
+ if pad_size > 0:
63
+ pad = np.full(shape=(pad_size,), fill_value=pad_value)
64
+ a = np.concatenate((pad, a))
65
+ return a
66
+
67
+ def __getitem__(self, idx) -> np.ndarray:
68
+ start_idx = self.indptr[idx]
69
+ end_idx = self.indptr[idx + 1]
70
+
71
+ return self._get_context(self.target_array[start_idx:end_idx])
72
+
73
+
74
+ class ChronosModel(AbstractTimeSeriesModel):
75
+ """Chronos pretrained time series forecasting models, based on the original
76
+ `ChronosModel <https://github.com/amazon-science/chronos-forecasting>`_ implementation.
77
+
78
+ Chronos is family of pretrained models, based on the T5 family, with number of parameters ranging between 8M and 710M.
79
+ The full collection of Chronos models is available on
80
+ `Hugging Face <https://huggingface.co/collections/amazon/chronos-models-65f1791d630a8d57cb718444>`_. For Chronos small,
81
+ base, and large variants a GPU is required to perform inference efficiently.
82
+
83
+ Chronos takes a minimalistic approach to pretraining time series models, by discretizing time series data directly into bins
84
+ which are treated as tokens, effectively performing regression by classification. This results in a simple and flexible framework
85
+ for using any language model in the context of time series forecasting. See [Ansari2024]_ for more information.
86
+
87
+ References
88
+ ----------
89
+ .. [Ansari2024] Ansari, Abdul Fatir, Stella, Lorenzo et al.
90
+ "Chronos: Learning the Language of Time Series."
91
+ http://arxiv.org/abs/2403.07815
92
+
93
+
94
+ Other Parameters
95
+ ----------------
96
+ model_path: str, default = "amazon/chronos-t5-small"
97
+ Model path used for the model, i.e., a HuggingFace transformers ``name_or_path``. Can be a
98
+ compatible model name on HuggingFace Hub or a local path to a model directory. Original
99
+ Chronos models (i.e., ``amazon/chronos-t5-{model_size}``) can be specified with aliases
100
+ ``tiny``, ``mini`` , ``small``, ``base``, and ``large``.
101
+ batch_size : int, default = 16
102
+ Size of batches used during inference
103
+ num_samples : int, default = 20
104
+ Number of samples used during inference
105
+ device : str, default = None
106
+ Device to use for inference. If None, model will use the GPU if available. For larger model sizes
107
+ `small`, `base`, and `large`; inference will fail if no GPU is available.
108
+ context_length : int or None, default = None
109
+ The context length to use in the model. Shorter context lengths will decrease model accuracy, but result
110
+ in faster inference. If None, the model will infer context length from the data set length at inference
111
+ time, but set it to a maximum of 512.
112
+ optimization_strategy : {None, "onnx", "openvino"}, default = None
113
+ Optimization strategy to use for inference on CPUs. If None, the model will use the default implementation.
114
+ If `onnx`, the model will be converted to ONNX and the inference will be performed using ONNX. If ``openvino``,
115
+ inference will be performed with the model compiled to OpenVINO.
116
+ torch_dtype : torch.dtype or {"auto", "bfloat16", "float32", "float64"}, default = "auto"
117
+ Torch data type for model weights, provided to ``from_pretrained`` method of Hugging Face AutoModels. If
118
+ original Chronos models are specified and the model size is ``small``, ``base``, or ``large``, the
119
+ ``torch_dtype`` will be set to ``bfloat16`` to enable inference on GPUs.
120
+ data_loader_num_workers : int, default = 0
121
+ Number of worker processes to be used in the data loader. See documentation on ``torch.utils.data.DataLoader``
122
+ for more information.
123
+ """
124
+
125
+ # default number of samples for prediction
126
+ default_num_samples: int = 20
127
+ default_batch_size: int = 16
128
+ default_model_path = "amazon/chronos-t5-small"
129
+ maximum_context_length = 512
130
+
131
+ def __init__(
132
+ self,
133
+ freq: Optional[str] = None,
134
+ prediction_length: int = 1,
135
+ path: Optional[str] = None,
136
+ name: Optional[str] = None,
137
+ eval_metric: str = None,
138
+ hyperparameters: Dict[str, Any] = None,
139
+ **kwargs, # noqa
140
+ ):
141
+ hyperparameters = hyperparameters if hyperparameters is not None else {}
142
+
143
+ model_path_input = hyperparameters.get("model_path", self.default_model_path)
144
+ self.model_path = MODEL_ALIASES.get(model_path_input, model_path_input)
145
+
146
+ # TODO: automatically determine batch size based on GPU / memory availability
147
+ self.batch_size = hyperparameters.get("batch_size", self.default_batch_size)
148
+ self.num_samples = hyperparameters.get("num_samples", self.default_num_samples)
149
+ self.device = hyperparameters.get("device")
150
+
151
+ # if the model requires a GPU, set the torch dtype to bfloat16
152
+ self.torch_dtype = hyperparameters.get("torch_dtype", "auto" if self.min_num_gpus == 0 else "bfloat16")
153
+
154
+ self.data_loader_num_workers = hyperparameters.get("data_loader_num_workers", 0)
155
+ self.optimization_strategy: Optional[Literal["onnx", "openvino"]] = hyperparameters.get(
156
+ "optimization_strategy", None
157
+ )
158
+ self.context_length = hyperparameters.get("context_length")
159
+
160
+ if self.context_length is not None and self.context_length > self.maximum_context_length:
161
+ logger.warning(
162
+ f"\tContext length {self.context_length} exceeds maximum context length {self.maximum_context_length}."
163
+ f"Context length will be set to {self.maximum_context_length}."
164
+ )
165
+ self.context_length = self.maximum_context_length
166
+
167
+ model_path_safe = str.replace(model_path_input, "/", "__")
168
+ name = (name if name is not None else "Chronos") + f"[{model_path_safe}]"
169
+
170
+ super().__init__(
171
+ path=path,
172
+ freq=freq,
173
+ prediction_length=prediction_length,
174
+ name=name,
175
+ eval_metric=eval_metric,
176
+ hyperparameters=hyperparameters,
177
+ **kwargs,
178
+ )
179
+
180
+ self.model_pipeline: Optional[Any] = None # of type OptimizedChronosPipeline
181
+
182
+ def save(self, path: str = None, verbose: bool = True) -> str:
183
+ pipeline = self.model_pipeline
184
+ self.model_pipeline = None
185
+ path = super().save(path=path, verbose=verbose)
186
+ self.model_pipeline = pipeline
187
+
188
+ return str(path)
189
+
190
+ @classmethod
191
+ def load(cls, path: str, reset_paths: bool = True, verbose: bool = True) -> "ChronosModel":
192
+ model = load_pkl.load(path=os.path.join(path, cls.model_file_name), verbose=verbose)
193
+ if reset_paths:
194
+ model.set_contexts(path)
195
+ return model
196
+
197
+ def _is_gpu_available(self) -> bool:
198
+ import torch.cuda
199
+
200
+ return torch.cuda.is_available()
201
+
202
+ @property
203
+ def min_num_gpus(self):
204
+ return MODEL_CONFIGS.get(self.model_path, {}).get("num_gpus", 0)
205
+
206
+ def get_minimum_resources(self, is_gpu_available: bool = False) -> Dict[str, Union[int, float]]:
207
+ minimum_resources = {"num_cpus": 1}
208
+ # if GPU is available, we train with 1 GPU per trial
209
+ if is_gpu_available:
210
+ minimum_resources["num_gpus"] = self.min_num_gpus
211
+ return minimum_resources
212
+
213
+ def load_model_pipeline(self, context_length: Optional[int] = None):
214
+ from .chronos import OptimizedChronosPipeline
215
+
216
+ gpu_available = self._is_gpu_available()
217
+
218
+ if not gpu_available and self.min_num_gpus > 0:
219
+ raise RuntimeError(
220
+ f"{self.name} requires a GPU to run, but no GPU was detected. "
221
+ "Please make sure that you are using a computer with a CUDA-compatible GPU and "
222
+ "`import torch; torch.cuda.is_available()` returns `True`."
223
+ )
224
+
225
+ device = self.device or ("cuda" if gpu_available else "auto")
226
+
227
+ pipeline = OptimizedChronosPipeline.from_pretrained(
228
+ self.model_path,
229
+ device_map=device,
230
+ optimization_strategy=self.optimization_strategy,
231
+ torch_dtype=self.torch_dtype,
232
+ context_length=context_length or self.context_length,
233
+ )
234
+
235
+ self.model_pipeline = pipeline
236
+
237
+ def _fit(
238
+ self,
239
+ train_data: TimeSeriesDataFrame,
240
+ val_data: Optional[TimeSeriesDataFrame] = None,
241
+ time_limit: int = None,
242
+ **kwargs,
243
+ ) -> None:
244
+ self._check_fit_params()
245
+
246
+ def _get_inference_data_loader(
247
+ self,
248
+ data: TimeSeriesDataFrame,
249
+ context_length: int,
250
+ num_workers: int = 0,
251
+ ):
252
+ import torch
253
+
254
+ chronos_dataset = ChronosInferenceDataset(
255
+ target_df=data,
256
+ target_column=self.target,
257
+ context_length=context_length,
258
+ )
259
+
260
+ return torch.utils.data.DataLoader(
261
+ chronos_dataset,
262
+ batch_size=self.batch_size,
263
+ shuffle=False,
264
+ num_workers=num_workers,
265
+ )
266
+
267
+ def _predict(
268
+ self,
269
+ data: TimeSeriesDataFrame,
270
+ known_covariates: Optional[TimeSeriesDataFrame] = None,
271
+ **kwargs,
272
+ ) -> TimeSeriesDataFrame:
273
+ # We defer initialization of the model pipeline. i.e., the model is only loaded to device memory
274
+ # during inference. We also infer the maximum length of the time series in the inference data set
275
+ # and use that to determine the context length of the model. If the context length is specified
276
+ # during initialization, this is always used. If not, the context length is set to the longest
277
+ # item length. The context length is always capped by self.maximum_context_length.
278
+ context_length = self.context_length or min(
279
+ data.num_timesteps_per_item().max(),
280
+ self.maximum_context_length,
281
+ )
282
+
283
+ with warning_filter(all_warnings=True):
284
+ import torch
285
+
286
+ # load model pipeline to device memory
287
+ self.load_model_pipeline(context_length=context_length)
288
+
289
+ self.model_pipeline.model.eval()
290
+ with torch.inference_mode():
291
+ prediction_samples = [
292
+ self.model_pipeline.predict(
293
+ batch,
294
+ prediction_length=self.prediction_length,
295
+ num_samples=self.num_samples,
296
+ limit_prediction_length=False,
297
+ )
298
+ .detach()
299
+ .cpu()
300
+ .numpy()
301
+ for batch in self._get_inference_data_loader(
302
+ data=data,
303
+ num_workers=self.data_loader_num_workers,
304
+ context_length=context_length,
305
+ )
306
+ ]
307
+
308
+ samples = np.concatenate(prediction_samples, axis=0).swapaxes(1, 2).reshape(-1, self.num_samples)
309
+
310
+ mean = samples.mean(axis=-1, keepdims=True)
311
+ quantiles = np.quantile(samples, self.quantile_levels, axis=-1).T
312
+
313
+ df = pd.DataFrame(
314
+ np.concatenate([mean, quantiles], axis=1),
315
+ columns=["mean"] + [str(q) for q in self.quantile_levels],
316
+ index=get_forecast_horizon_index_ts_dataframe(data, self.prediction_length),
317
+ )
318
+
319
+ return TimeSeriesDataFrame(df)
@@ -14,6 +14,7 @@ from . import (
14
14
  AutoCESModel,
15
15
  AutoETSModel,
16
16
  AverageModel,
17
+ ChronosModel,
17
18
  CrostonSBAModel,
18
19
  DeepARModel,
19
20
  DirectTabularModel,
@@ -68,6 +69,7 @@ MODEL_TYPES = dict(
68
69
  ADIDA=ADIDAModel,
69
70
  CrostonSBA=CrostonSBAModel,
70
71
  IMAPA=IMAPAModel,
72
+ Chronos=ChronosModel,
71
73
  )
72
74
 
73
75
  DEFAULT_MODEL_NAMES = {v: k for k, v in MODEL_TYPES.items()}
@@ -85,6 +87,7 @@ DEFAULT_MODEL_PRIORITY = dict(
85
87
  AutoETS=80,
86
88
  AutoARIMA=70,
87
89
  RecursiveTabular=60,
90
+ Chronos=50,
88
91
  DirectTabular=50,
89
92
  DeepAR=40,
90
93
  TemporalFusionTransformer=30,
@@ -496,11 +496,23 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
496
496
 
497
497
  Available presets:
498
498
 
499
- - ``"fast_training"``: fit simple statistical models (``ETS``, ``Theta``, ``Naive``, ``SeasonalNaive``) + fast tree-based models ``RecursiveTabular`` and ``DirectTabular``. These models are fast to train but may not be very accurate.
500
- - ``"medium_quality"``: all models mentioned above + deep learning model ``TemporalFusionTransformer``. Default setting that produces good forecasts with reasonable training time.
501
- - ``"high_quality"``: All ML models available in AutoGluon + additional statistical models (``NPTS``, ``AutoETS``, ``AutoARIMA``, ``CrostonSBA``, ``DynamicOptimizedTheta``). Much more accurate than ``medium_quality``, but takes longer to train.
499
+ - ``"fast_training"``: fit simple statistical models (``ETS``, ``Theta``, ``Naive``, ``SeasonalNaive``) + fast tree-based models ``RecursiveTabular``
500
+ and ``DirectTabular``. These models are fast to train but may not be very accurate.
501
+ - ``"medium_quality"``: all models mentioned above + deep learning model ``TemporalFusionTransformer``. Default setting that produces good forecasts
502
+ with reasonable training time.
503
+ - ``"high_quality"``: All ML models available in AutoGluon + additional statistical models (``NPTS``, ``AutoETS``, ``AutoARIMA``, ``CrostonSBA``,
504
+ ``DynamicOptimizedTheta``). Much more accurate than ``medium_quality``, but takes longer to train.
502
505
  - ``"best_quality"``: Same models as in ``"high_quality"`, but performs validation with multiple backtests. Usually better than ``high_quality``, but takes even longer to train.
503
506
 
507
+ Available presets with the `Chronos <https://github.com/amazon-science/chronos-forecasting>`_ model:
508
+
509
+ - ``"chronos_{model_size}"``: where model size is one of ``tiny,mini,small,base,large``. Uses the Chronos pretrained model for zero-shot forecasting.
510
+ See the documentation for ``ChronosModel`` or see `Hugging Face <https://huggingface.co/collections/amazon/chronos-models-65f1791d630a8d57cb718444>`_ for more information.
511
+ Note that a GPU is required for model sizes ``small``, ``base`` and ``large``.
512
+ - ``"chronos"``: alias for ``"chronos_small"``.
513
+ - ``"chronos_ensemble"``: builds an ensemble of the models specified in ``"high_quality"`` and ``"chronos_small"``.
514
+ - ``"chronos_large_ensemble"``: builds an ensemble of the models specified in ``"high_quality"`` and ``"chronos_large"``.
515
+
504
516
  Details for these presets can be found in ``autogluon/timeseries/configs/presets_configs.py``. If not
505
517
  provided, user-provided values for ``hyperparameters`` and ``hyperparameter_tune_kwargs`` will be used
506
518
  (defaulting to their default values specified below).
@@ -3,6 +3,7 @@ import functools
3
3
  import io
4
4
  import logging
5
5
  import os
6
+ import re
6
7
  import sys
7
8
  import warnings
8
9
 
@@ -12,10 +13,13 @@ __all__ = ["warning_filter", "disable_root_logger", "disable_tqdm"]
12
13
 
13
14
 
14
15
  @contextlib.contextmanager
15
- def warning_filter():
16
+ def warning_filter(all_warnings: bool = False):
17
+ categories = [RuntimeWarning, UserWarning, ConvergenceWarning, ValueWarning, FutureWarning]
18
+ if all_warnings:
19
+ categories.append(Warning)
16
20
  with warnings.catch_warnings():
17
21
  env_py_warnings = os.environ.get("PYTHONWARNINGS", "")
18
- for warning_category in [RuntimeWarning, UserWarning, ConvergenceWarning, ValueWarning, FutureWarning]:
22
+ for warning_category in categories:
19
23
  warnings.simplefilter("ignore", category=warning_category)
20
24
  try:
21
25
  os.environ["PYTHONWARNINGS"] = "ignore"
@@ -25,14 +29,28 @@ def warning_filter():
25
29
 
26
30
 
27
31
  @contextlib.contextmanager
28
- def disable_root_logger():
32
+ def disable_root_logger(root_log_level=logging.ERROR):
29
33
  try:
30
- logging.getLogger().setLevel(logging.ERROR)
34
+ logging.getLogger().setLevel(root_log_level)
31
35
  yield
32
36
  finally:
33
37
  logging.getLogger().setLevel(logging.INFO)
34
38
 
35
39
 
40
+ @contextlib.contextmanager
41
+ def set_loggers_level(regex: str, level=logging.ERROR):
42
+ log_levels = {}
43
+ try:
44
+ for logger_name in logging.root.manager.loggerDict:
45
+ if re.match(regex, logger_name):
46
+ log_levels[logger_name] = logging.getLogger(logger_name).level
47
+ logging.getLogger(logger_name).setLevel(level)
48
+ yield
49
+ finally:
50
+ for logger_name, level in log_levels.items():
51
+ logging.getLogger(logger_name).setLevel(level)
52
+
53
+
36
54
  @contextlib.contextmanager
37
55
  def disable_tqdm():
38
56
  """monkey-patch tqdm to disable it within context"""
@@ -1,3 +1,3 @@
1
1
  """This is the autogluon version file."""
2
- __version__ = '1.0.1b20240323'
2
+ __version__ = '1.0.1b20240325'
3
3
  __lite__ = False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.timeseries
3
- Version: 1.0.1b20240323
3
+ Version: 1.0.1b20240325
4
4
  Summary: AutoML for Image, Text, and Tabular Data
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -132,5 +132,6 @@ Classifier: Topic :: Scientific/Engineering :: Image Recognition
132
132
  Requires-Python: >=3.8, <3.12
133
133
  Description-Content-Type: text/markdown
134
134
  Provides-Extra: tests
135
- Provides-Extra: chronos-cpu
135
+ Provides-Extra: chronos-openvino
136
+ Provides-Extra: chronos-onnx
136
137
  Provides-Extra: all
@@ -29,6 +29,9 @@ src/autogluon/timeseries/models/abstract/model_trial.py
29
29
  src/autogluon/timeseries/models/autogluon_tabular/__init__.py
30
30
  src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py
31
31
  src/autogluon/timeseries/models/autogluon_tabular/utils.py
32
+ src/autogluon/timeseries/models/chronos/__init__.py
33
+ src/autogluon/timeseries/models/chronos/chronos.py
34
+ src/autogluon/timeseries/models/chronos/model.py
32
35
  src/autogluon/timeseries/models/ensemble/__init__.py
33
36
  src/autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py
34
37
  src/autogluon/timeseries/models/ensemble/greedy_ensemble.py
@@ -16,15 +16,19 @@ utilsforecast<0.0.11,>=0.0.10
16
16
  tqdm<5,>=4.38
17
17
  orjson~=3.9
18
18
  tensorboard<3,>=2.9
19
- autogluon.core[raytune]==1.0.1b20240323
20
- autogluon.common==1.0.1b20240323
21
- autogluon.tabular[catboost,lightgbm,xgboost]==1.0.1b20240323
19
+ autogluon.core[raytune]==1.0.1b20240325
20
+ autogluon.common==1.0.1b20240325
21
+ autogluon.tabular[catboost,lightgbm,xgboost]==1.0.1b20240325
22
22
 
23
23
  [all]
24
- optimum[nncf,onnxruntime,openvino]<1.18,>=1.17
24
+ optimum[nncf,openvino]<1.18,>=1.17
25
+ optimum[onnxruntime]<1.18,>=1.17
25
26
 
26
- [chronos-cpu]
27
- optimum[nncf,onnxruntime,openvino]<1.18,>=1.17
27
+ [chronos-onnx]
28
+ optimum[onnxruntime]<1.18,>=1.17
29
+
30
+ [chronos-openvino]
31
+ optimum[nncf,openvino]<1.18,>=1.17
28
32
 
29
33
  [tests]
30
34
  pytest
@@ -1,11 +0,0 @@
1
- """Preset configurations for autogluon.timeseries Predictors"""
2
-
3
- # TODO: change default HPO settings when other HPO strategies (e.g., Ray tune) are available
4
- # TODO: add refit_full arguments once refitting is available
5
-
6
- TIMESERIES_PRESETS_CONFIGS = dict(
7
- best_quality={"hyperparameters": "default", "num_val_windows": 2},
8
- high_quality={"hyperparameters": "default"},
9
- medium_quality={"hyperparameters": "light"},
10
- fast_training={"hyperparameters": "very_light"},
11
- )