autogluon.timeseries 1.4.1b20250907__py3-none-any.whl → 1.5.1b20260122__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of autogluon.timeseries might be problematic. Click here for more details.
- autogluon/timeseries/configs/hyperparameter_presets.py +13 -28
- autogluon/timeseries/configs/predictor_presets.py +23 -39
- autogluon/timeseries/dataset/ts_dataframe.py +97 -86
- autogluon/timeseries/learner.py +70 -35
- autogluon/timeseries/metrics/__init__.py +4 -4
- autogluon/timeseries/metrics/abstract.py +8 -8
- autogluon/timeseries/metrics/point.py +9 -9
- autogluon/timeseries/metrics/quantile.py +5 -5
- autogluon/timeseries/metrics/utils.py +4 -4
- autogluon/timeseries/models/__init__.py +4 -1
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py +52 -50
- autogluon/timeseries/models/abstract/model_trial.py +2 -1
- autogluon/timeseries/models/abstract/tunable.py +8 -8
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py +58 -62
- autogluon/timeseries/models/autogluon_tabular/per_step.py +27 -16
- autogluon/timeseries/models/autogluon_tabular/transforms.py +11 -9
- autogluon/timeseries/models/chronos/__init__.py +2 -1
- autogluon/timeseries/models/chronos/chronos2.py +395 -0
- autogluon/timeseries/models/chronos/model.py +127 -89
- autogluon/timeseries/models/chronos/{pipeline/utils.py → utils.py} +69 -37
- autogluon/timeseries/models/ensemble/__init__.py +36 -2
- autogluon/timeseries/models/ensemble/abstract.py +14 -46
- autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
- autogluon/timeseries/models/ensemble/array_based/abstract.py +240 -0
- autogluon/timeseries/models/ensemble/array_based/models.py +185 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +12 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +88 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py +186 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +94 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +107 -0
- autogluon/timeseries/models/ensemble/{greedy.py → ensemble_selection.py} +41 -61
- autogluon/timeseries/models/ensemble/per_item_greedy.py +172 -0
- autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
- autogluon/timeseries/models/ensemble/weighted/abstract.py +45 -0
- autogluon/timeseries/models/ensemble/{basic.py → weighted/basic.py} +25 -22
- autogluon/timeseries/models/ensemble/weighted/greedy.py +64 -0
- autogluon/timeseries/models/gluonts/abstract.py +32 -31
- autogluon/timeseries/models/gluonts/dataset.py +11 -11
- autogluon/timeseries/models/gluonts/models.py +0 -7
- autogluon/timeseries/models/local/__init__.py +0 -7
- autogluon/timeseries/models/local/abstract_local_model.py +15 -18
- autogluon/timeseries/models/local/naive.py +2 -2
- autogluon/timeseries/models/local/npts.py +7 -1
- autogluon/timeseries/models/local/statsforecast.py +13 -13
- autogluon/timeseries/models/multi_window/multi_window_model.py +39 -24
- autogluon/timeseries/models/registry.py +3 -4
- autogluon/timeseries/models/toto/__init__.py +3 -0
- autogluon/timeseries/models/toto/_internal/__init__.py +9 -0
- autogluon/timeseries/models/toto/_internal/backbone/__init__.py +3 -0
- autogluon/timeseries/models/toto/_internal/backbone/attention.py +196 -0
- autogluon/timeseries/models/toto/_internal/backbone/backbone.py +262 -0
- autogluon/timeseries/models/toto/_internal/backbone/distribution.py +70 -0
- autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +136 -0
- autogluon/timeseries/models/toto/_internal/backbone/rope.py +89 -0
- autogluon/timeseries/models/toto/_internal/backbone/rotary_embedding_torch.py +342 -0
- autogluon/timeseries/models/toto/_internal/backbone/scaler.py +305 -0
- autogluon/timeseries/models/toto/_internal/backbone/transformer.py +333 -0
- autogluon/timeseries/models/toto/_internal/dataset.py +165 -0
- autogluon/timeseries/models/toto/_internal/forecaster.py +423 -0
- autogluon/timeseries/models/toto/dataloader.py +108 -0
- autogluon/timeseries/models/toto/hf_pretrained_model.py +200 -0
- autogluon/timeseries/models/toto/model.py +249 -0
- autogluon/timeseries/predictor.py +541 -162
- autogluon/timeseries/regressor.py +27 -30
- autogluon/timeseries/splitter.py +3 -27
- autogluon/timeseries/trainer/ensemble_composer.py +444 -0
- autogluon/timeseries/trainer/model_set_builder.py +9 -9
- autogluon/timeseries/trainer/prediction_cache.py +16 -16
- autogluon/timeseries/trainer/trainer.py +300 -279
- autogluon/timeseries/trainer/utils.py +17 -0
- autogluon/timeseries/transforms/covariate_scaler.py +8 -8
- autogluon/timeseries/transforms/target_scaler.py +15 -15
- autogluon/timeseries/utils/constants.py +10 -0
- autogluon/timeseries/utils/datetime/lags.py +1 -3
- autogluon/timeseries/utils/datetime/seasonality.py +1 -3
- autogluon/timeseries/utils/features.py +31 -14
- autogluon/timeseries/utils/forecast.py +6 -7
- autogluon/timeseries/utils/timer.py +173 -0
- autogluon/timeseries/version.py +1 -1
- autogluon.timeseries-1.5.1b20260122-py3.11-nspkg.pth +1 -0
- {autogluon.timeseries-1.4.1b20250907.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/METADATA +39 -22
- autogluon_timeseries-1.5.1b20260122.dist-info/RECORD +103 -0
- {autogluon.timeseries-1.4.1b20250907.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/WHEEL +1 -1
- autogluon/timeseries/evaluator.py +0 -6
- autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -10
- autogluon/timeseries/models/chronos/pipeline/base.py +0 -160
- autogluon/timeseries/models/chronos/pipeline/chronos.py +0 -544
- autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -580
- autogluon.timeseries-1.4.1b20250907-py3.9-nspkg.pth +0 -1
- autogluon.timeseries-1.4.1b20250907.dist-info/RECORD +0 -75
- {autogluon.timeseries-1.4.1b20250907.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info/licenses}/LICENSE +0 -0
- {autogluon.timeseries-1.4.1b20250907.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info/licenses}/NOTICE +0 -0
- {autogluon.timeseries-1.4.1b20250907.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/namespace_packages.txt +0 -0
- {autogluon.timeseries-1.4.1b20250907.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/top_level.txt +0 -0
- {autogluon.timeseries-1.4.1b20250907.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/zip-safe +0 -0
|
@@ -1,544 +0,0 @@
|
|
|
1
|
-
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
2
|
-
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
-
|
|
4
|
-
# Original Source: https://github.com/amazon-science/chronos-forecasting
|
|
5
|
-
# Authors: Lorenzo Stella <stellalo@amazon.com>, Abdul Fatir Ansari <ansarnd@amazon.com>
|
|
6
|
-
|
|
7
|
-
import logging
|
|
8
|
-
import warnings
|
|
9
|
-
from dataclasses import dataclass
|
|
10
|
-
from typing import Any, Literal, Optional, Union
|
|
11
|
-
|
|
12
|
-
import torch
|
|
13
|
-
import torch.nn as nn
|
|
14
|
-
from transformers import AutoConfig, AutoModelForSeq2SeqLM, GenerationConfig, PreTrainedModel
|
|
15
|
-
|
|
16
|
-
from .base import BaseChronosPipeline, ForecastType
|
|
17
|
-
|
|
18
|
-
logger = logging.getLogger("autogluon.timeseries.models.chronos")
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
__all__ = ["ChronosConfig", "ChronosPipeline"]
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
@dataclass
|
|
25
|
-
class ChronosConfig:
|
|
26
|
-
"""
|
|
27
|
-
This class holds all the configuration parameters to be used
|
|
28
|
-
by ``ChronosTokenizer`` and ``ChronosPretrainedModel``.
|
|
29
|
-
"""
|
|
30
|
-
|
|
31
|
-
tokenizer_class: str
|
|
32
|
-
tokenizer_kwargs: dict[str, Any]
|
|
33
|
-
n_tokens: int
|
|
34
|
-
n_special_tokens: int
|
|
35
|
-
pad_token_id: int
|
|
36
|
-
eos_token_id: int
|
|
37
|
-
use_eos_token: bool
|
|
38
|
-
model_type: Literal["seq2seq"]
|
|
39
|
-
context_length: int
|
|
40
|
-
prediction_length: int
|
|
41
|
-
num_samples: int
|
|
42
|
-
temperature: float
|
|
43
|
-
top_k: int
|
|
44
|
-
top_p: float
|
|
45
|
-
|
|
46
|
-
def __post_init__(self):
|
|
47
|
-
assert self.pad_token_id < self.n_special_tokens and self.eos_token_id < self.n_special_tokens, (
|
|
48
|
-
f"Special token id's must be smaller than {self.n_special_tokens=}"
|
|
49
|
-
)
|
|
50
|
-
|
|
51
|
-
def create_tokenizer(self) -> "ChronosTokenizer":
|
|
52
|
-
if self.tokenizer_class == "MeanScaleUniformBins":
|
|
53
|
-
return MeanScaleUniformBins(**self.tokenizer_kwargs, config=self)
|
|
54
|
-
raise ValueError
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
class ChronosTokenizer:
|
|
58
|
-
"""
|
|
59
|
-
A ``ChronosTokenizer`` defines how time series are mapped into token IDs
|
|
60
|
-
and back.
|
|
61
|
-
|
|
62
|
-
For details, see the ``input_transform`` and ``output_transform`` methods,
|
|
63
|
-
which concrete classes must implement.
|
|
64
|
-
"""
|
|
65
|
-
|
|
66
|
-
def context_input_transform(
|
|
67
|
-
self,
|
|
68
|
-
context: torch.Tensor,
|
|
69
|
-
) -> tuple:
|
|
70
|
-
"""
|
|
71
|
-
Turn a batch of time series into token IDs, attention mask, and tokenizer_state.
|
|
72
|
-
|
|
73
|
-
Parameters
|
|
74
|
-
----------
|
|
75
|
-
context
|
|
76
|
-
A tensor shaped (batch_size, time_length), containing the
|
|
77
|
-
timeseries to forecast. Use left-padding with ``torch.nan``
|
|
78
|
-
to align time series of different lengths.
|
|
79
|
-
|
|
80
|
-
Returns
|
|
81
|
-
-------
|
|
82
|
-
token_ids
|
|
83
|
-
A tensor of integers, shaped (batch_size, time_length + 1)
|
|
84
|
-
if ``config.use_eos_token`` and (batch_size, time_length)
|
|
85
|
-
otherwise, containing token IDs for the input series.
|
|
86
|
-
attention_mask
|
|
87
|
-
A boolean tensor, same shape as ``token_ids``, indicating
|
|
88
|
-
which input observations are not ``torch.nan`` (i.e. not
|
|
89
|
-
missing nor padding).
|
|
90
|
-
tokenizer_state
|
|
91
|
-
An object that can be passed to ``label_input_transform``
|
|
92
|
-
and ``output_transform``. Contains the relevant information
|
|
93
|
-
to decode output samples into real values,
|
|
94
|
-
such as location and scale parameters.
|
|
95
|
-
"""
|
|
96
|
-
raise NotImplementedError()
|
|
97
|
-
|
|
98
|
-
def label_input_transform(self, label: torch.Tensor, tokenizer_state: Any) -> tuple:
|
|
99
|
-
"""
|
|
100
|
-
Turn a batch of label slices of time series into token IDs and attention mask
|
|
101
|
-
using the ``tokenizer_state`` provided by ``context_input_transform``.
|
|
102
|
-
|
|
103
|
-
Parameters
|
|
104
|
-
----------
|
|
105
|
-
label
|
|
106
|
-
A tensor shaped (batch_size, time_length), containing the
|
|
107
|
-
timeseries label, i.e., the ground-truth future values.
|
|
108
|
-
tokenizer_state
|
|
109
|
-
An object returned by ``context_input_transform`` containing
|
|
110
|
-
relevant information to preprocess data, such as location and
|
|
111
|
-
scale. The nature of this depends on the specific tokenizer.
|
|
112
|
-
This is used for tokenizing the label, in order to use the same
|
|
113
|
-
scaling used to tokenize the context.
|
|
114
|
-
|
|
115
|
-
Returns
|
|
116
|
-
-------
|
|
117
|
-
token_ids
|
|
118
|
-
A tensor of integers, shaped (batch_size, time_length + 1)
|
|
119
|
-
if ``config.use_eos_token`` and (batch_size, time_length)
|
|
120
|
-
otherwise, containing token IDs for the input series.
|
|
121
|
-
attention_mask
|
|
122
|
-
A boolean tensor, same shape as ``token_ids``, indicating
|
|
123
|
-
which input observations are not ``torch.nan`` (i.e. not
|
|
124
|
-
missing nor padding).
|
|
125
|
-
"""
|
|
126
|
-
raise NotImplementedError()
|
|
127
|
-
|
|
128
|
-
def output_transform(self, samples: torch.Tensor, tokenizer_state: Any) -> torch.Tensor:
|
|
129
|
-
"""
|
|
130
|
-
Turn a batch of sample token IDs into real values.
|
|
131
|
-
|
|
132
|
-
Parameters
|
|
133
|
-
----------
|
|
134
|
-
samples
|
|
135
|
-
A tensor of integers, shaped (batch_size, num_samples, time_length),
|
|
136
|
-
containing token IDs of sample trajectories.
|
|
137
|
-
tokenizer_state
|
|
138
|
-
An object returned by ``input_transform`` containing
|
|
139
|
-
relevant context to decode samples, such as location and scale.
|
|
140
|
-
The nature of this depends on the specific tokenizer.
|
|
141
|
-
|
|
142
|
-
Returns
|
|
143
|
-
-------
|
|
144
|
-
forecasts
|
|
145
|
-
A real tensor, shaped (batch_size, num_samples, time_length),
|
|
146
|
-
containing forecasted sample paths.
|
|
147
|
-
"""
|
|
148
|
-
raise NotImplementedError()
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
class MeanScaleUniformBins(ChronosTokenizer):
|
|
152
|
-
"""
|
|
153
|
-
A tokenizer that performs mean scaling and then quantizes the scaled time series into
|
|
154
|
-
uniformly-spaced bins between some bounds on the real line.
|
|
155
|
-
"""
|
|
156
|
-
|
|
157
|
-
def __init__(self, low_limit: float, high_limit: float, config: ChronosConfig) -> None:
|
|
158
|
-
self.config = config
|
|
159
|
-
self.centers = torch.linspace(
|
|
160
|
-
low_limit,
|
|
161
|
-
high_limit,
|
|
162
|
-
config.n_tokens - config.n_special_tokens - 1,
|
|
163
|
-
)
|
|
164
|
-
self.boundaries = torch.concat(
|
|
165
|
-
(
|
|
166
|
-
torch.tensor([-1e20], device=self.centers.device),
|
|
167
|
-
(self.centers[1:] + self.centers[:-1]) / 2,
|
|
168
|
-
torch.tensor([1e20], device=self.centers.device),
|
|
169
|
-
)
|
|
170
|
-
)
|
|
171
|
-
|
|
172
|
-
def _input_transform(
|
|
173
|
-
self, context: torch.Tensor, scale: Optional[torch.Tensor] = None
|
|
174
|
-
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
|
175
|
-
attention_mask = ~torch.isnan(context)
|
|
176
|
-
|
|
177
|
-
if scale is None:
|
|
178
|
-
scale = torch.nansum(torch.abs(context) * attention_mask, dim=-1) / torch.nansum(attention_mask, dim=-1)
|
|
179
|
-
scale[~(scale > 0)] = 1.0
|
|
180
|
-
|
|
181
|
-
scaled_context = context / scale.unsqueeze(dim=-1)
|
|
182
|
-
token_ids = (
|
|
183
|
-
torch.bucketize(
|
|
184
|
-
input=scaled_context,
|
|
185
|
-
boundaries=self.boundaries,
|
|
186
|
-
# buckets are open to the right, see:
|
|
187
|
-
# https://pytorch.org/docs/2.1/generated/torch.bucketize.html#torch-bucketize
|
|
188
|
-
right=True,
|
|
189
|
-
)
|
|
190
|
-
+ self.config.n_special_tokens
|
|
191
|
-
)
|
|
192
|
-
token_ids[~attention_mask] = self.config.pad_token_id
|
|
193
|
-
token_ids.clamp_(0, self.config.n_tokens - 1)
|
|
194
|
-
|
|
195
|
-
return token_ids, attention_mask, scale
|
|
196
|
-
|
|
197
|
-
def _append_eos_token(
|
|
198
|
-
self, token_ids: torch.Tensor, attention_mask: torch.Tensor
|
|
199
|
-
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
200
|
-
batch_size = token_ids.shape[0]
|
|
201
|
-
eos_tokens = torch.full((batch_size, 1), fill_value=self.config.eos_token_id)
|
|
202
|
-
token_ids = torch.concat((token_ids, eos_tokens), dim=1)
|
|
203
|
-
eos_mask = torch.full((batch_size, 1), fill_value=True)
|
|
204
|
-
attention_mask = torch.concat((attention_mask, eos_mask), dim=1)
|
|
205
|
-
|
|
206
|
-
return token_ids, attention_mask
|
|
207
|
-
|
|
208
|
-
def context_input_transform(self, context: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
|
209
|
-
length = context.shape[-1]
|
|
210
|
-
|
|
211
|
-
if length > self.config.context_length:
|
|
212
|
-
context = context[..., -self.config.context_length :]
|
|
213
|
-
|
|
214
|
-
token_ids, attention_mask, scale = self._input_transform(context=context)
|
|
215
|
-
|
|
216
|
-
if self.config.use_eos_token and self.config.model_type == "seq2seq":
|
|
217
|
-
token_ids, attention_mask = self._append_eos_token(token_ids=token_ids, attention_mask=attention_mask)
|
|
218
|
-
|
|
219
|
-
return token_ids, attention_mask, scale
|
|
220
|
-
|
|
221
|
-
def label_input_transform(self, label: torch.Tensor, scale: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
|
|
222
|
-
token_ids, attention_mask, _ = self._input_transform(context=label, scale=scale)
|
|
223
|
-
|
|
224
|
-
if self.config.use_eos_token:
|
|
225
|
-
token_ids, attention_mask = self._append_eos_token(token_ids=token_ids, attention_mask=attention_mask)
|
|
226
|
-
|
|
227
|
-
return token_ids, attention_mask
|
|
228
|
-
|
|
229
|
-
def output_transform(self, samples: torch.Tensor, scale: torch.Tensor) -> torch.Tensor:
|
|
230
|
-
scale_unsqueezed = scale.unsqueeze(-1).unsqueeze(-1)
|
|
231
|
-
indices = torch.clamp(
|
|
232
|
-
samples - self.config.n_special_tokens - 1,
|
|
233
|
-
min=0,
|
|
234
|
-
max=len(self.centers) - 1,
|
|
235
|
-
)
|
|
236
|
-
return self.centers[indices] * scale_unsqueezed
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
class ChronosPretrainedModel(nn.Module):
|
|
240
|
-
"""
|
|
241
|
-
A ``ChronosPretrainedModel`` wraps a ``PreTrainedModel`` object from ``transformers``
|
|
242
|
-
and uses it to predict sample paths for time series tokens.
|
|
243
|
-
|
|
244
|
-
Parameters
|
|
245
|
-
----------
|
|
246
|
-
config
|
|
247
|
-
The configuration to use.
|
|
248
|
-
model
|
|
249
|
-
The pre-trained model to use.
|
|
250
|
-
"""
|
|
251
|
-
|
|
252
|
-
def __init__(self, config: ChronosConfig, model: PreTrainedModel) -> None:
|
|
253
|
-
super().__init__()
|
|
254
|
-
self.config = config
|
|
255
|
-
self.model = model
|
|
256
|
-
|
|
257
|
-
@property
|
|
258
|
-
def device(self):
|
|
259
|
-
return self.model.device
|
|
260
|
-
|
|
261
|
-
def encode(
|
|
262
|
-
self,
|
|
263
|
-
input_ids: torch.Tensor,
|
|
264
|
-
attention_mask: torch.Tensor,
|
|
265
|
-
):
|
|
266
|
-
"""
|
|
267
|
-
Extract the encoder embedding for the given token sequences.
|
|
268
|
-
|
|
269
|
-
Parameters
|
|
270
|
-
----------
|
|
271
|
-
input_ids
|
|
272
|
-
Tensor of indices of input sequence tokens in the vocabulary
|
|
273
|
-
with shape (batch_size, sequence_length).
|
|
274
|
-
attention_mask
|
|
275
|
-
A mask tensor of the same shape as input_ids to avoid attending
|
|
276
|
-
on padding or missing tokens.
|
|
277
|
-
|
|
278
|
-
Returns
|
|
279
|
-
-------
|
|
280
|
-
embedding
|
|
281
|
-
A tensor of encoder embeddings with shape
|
|
282
|
-
(batch_size, sequence_length, d_model).
|
|
283
|
-
"""
|
|
284
|
-
assert self.config.model_type == "seq2seq", "Encoder embeddings are only supported for encoder-decoder models"
|
|
285
|
-
return self.model.encoder(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state
|
|
286
|
-
|
|
287
|
-
def forward(
|
|
288
|
-
self,
|
|
289
|
-
input_ids: torch.Tensor,
|
|
290
|
-
attention_mask: torch.Tensor,
|
|
291
|
-
prediction_length: Optional[int] = None,
|
|
292
|
-
num_samples: Optional[int] = None,
|
|
293
|
-
temperature: Optional[float] = None,
|
|
294
|
-
top_k: Optional[int] = None,
|
|
295
|
-
top_p: Optional[float] = None,
|
|
296
|
-
) -> torch.Tensor:
|
|
297
|
-
"""
|
|
298
|
-
Predict future sample tokens for the given token sequences.
|
|
299
|
-
|
|
300
|
-
Arguments ``prediction_length``, ``num_samples``, ``temperature``,
|
|
301
|
-
``top_k``, ``top_p`` can be used to customize the model inference,
|
|
302
|
-
and default to the corresponding attributes in ``self.config`` if
|
|
303
|
-
not provided.
|
|
304
|
-
|
|
305
|
-
Returns
|
|
306
|
-
-------
|
|
307
|
-
samples
|
|
308
|
-
A tensor of integers, shaped (batch_size, num_samples, time_length),
|
|
309
|
-
containing forecasted sample paths.
|
|
310
|
-
"""
|
|
311
|
-
if prediction_length is None:
|
|
312
|
-
prediction_length = self.config.prediction_length
|
|
313
|
-
if num_samples is None:
|
|
314
|
-
num_samples = self.config.num_samples
|
|
315
|
-
if temperature is None:
|
|
316
|
-
temperature = self.config.temperature
|
|
317
|
-
if top_k is None:
|
|
318
|
-
top_k = self.config.top_k
|
|
319
|
-
if top_p is None:
|
|
320
|
-
top_p = self.config.top_p
|
|
321
|
-
|
|
322
|
-
preds = self.model.generate(
|
|
323
|
-
input_ids=input_ids,
|
|
324
|
-
attention_mask=attention_mask.long(), # int64 (long) type conversion needed for ONNX
|
|
325
|
-
generation_config=GenerationConfig(
|
|
326
|
-
min_new_tokens=prediction_length,
|
|
327
|
-
max_new_tokens=prediction_length,
|
|
328
|
-
do_sample=True,
|
|
329
|
-
num_return_sequences=num_samples,
|
|
330
|
-
eos_token_id=self.config.eos_token_id,
|
|
331
|
-
pad_token_id=self.config.pad_token_id,
|
|
332
|
-
temperature=temperature,
|
|
333
|
-
top_k=top_k,
|
|
334
|
-
top_p=top_p,
|
|
335
|
-
),
|
|
336
|
-
)
|
|
337
|
-
|
|
338
|
-
if self.config.model_type == "seq2seq":
|
|
339
|
-
preds = preds[..., 1:] # remove the decoder start token
|
|
340
|
-
else:
|
|
341
|
-
assert self.config.model_type == "causal"
|
|
342
|
-
assert preds.size(-1) == input_ids.size(-1) + prediction_length
|
|
343
|
-
preds = preds[..., -prediction_length:]
|
|
344
|
-
|
|
345
|
-
return preds.reshape(input_ids.size(0), num_samples, -1)
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
class ChronosPipeline(BaseChronosPipeline):
|
|
349
|
-
"""
|
|
350
|
-
A ``ChronosPipeline`` uses the given tokenizer and model to forecast
|
|
351
|
-
input time series.
|
|
352
|
-
|
|
353
|
-
Use the ``from_pretrained`` class method to load serialized models.
|
|
354
|
-
Use the ``predict`` method to get forecasts.
|
|
355
|
-
|
|
356
|
-
Parameters
|
|
357
|
-
----------
|
|
358
|
-
tokenizer
|
|
359
|
-
The tokenizer object to use.
|
|
360
|
-
model
|
|
361
|
-
The model to use.
|
|
362
|
-
"""
|
|
363
|
-
|
|
364
|
-
tokenizer: ChronosTokenizer
|
|
365
|
-
model: ChronosPretrainedModel
|
|
366
|
-
forecast_type: ForecastType = ForecastType.SAMPLES
|
|
367
|
-
|
|
368
|
-
def __init__(self, tokenizer, model):
|
|
369
|
-
super().__init__(inner_model=model.model)
|
|
370
|
-
self.tokenizer = tokenizer
|
|
371
|
-
self.model = model
|
|
372
|
-
|
|
373
|
-
@torch.no_grad()
|
|
374
|
-
def embed(self, context: Union[torch.Tensor, list[torch.Tensor]]) -> tuple[torch.Tensor, Any]:
|
|
375
|
-
"""
|
|
376
|
-
Get encoder embeddings for the given time series.
|
|
377
|
-
|
|
378
|
-
Parameters
|
|
379
|
-
----------
|
|
380
|
-
context
|
|
381
|
-
Input series. This is either a 1D tensor, or a list
|
|
382
|
-
of 1D tensors, or a 2D tensor whose first dimension
|
|
383
|
-
is batch. In the latter case, use left-padding with
|
|
384
|
-
``torch.nan`` to align series of different lengths.
|
|
385
|
-
|
|
386
|
-
Returns
|
|
387
|
-
-------
|
|
388
|
-
embeddings, tokenizer_state
|
|
389
|
-
A tuple of two tensors: the encoder embeddings and the tokenizer_state,
|
|
390
|
-
e.g., the scale of the time series in the case of mean scaling.
|
|
391
|
-
The encoder embeddings are shaped (batch_size, context_length, d_model)
|
|
392
|
-
or (batch_size, context_length + 1, d_model), where context_length
|
|
393
|
-
is the size of the context along the time axis if a 2D tensor was provided
|
|
394
|
-
or the length of the longest time series, if a list of 1D tensors was
|
|
395
|
-
provided, and the extra 1 is for EOS.
|
|
396
|
-
"""
|
|
397
|
-
context_tensor = self._prepare_and_validate_context(context=context)
|
|
398
|
-
token_ids, attention_mask, tokenizer_state = self.tokenizer.context_input_transform(context_tensor)
|
|
399
|
-
embeddings = self.model.encode(
|
|
400
|
-
input_ids=token_ids.to(self.model.device),
|
|
401
|
-
attention_mask=attention_mask.to(self.model.device),
|
|
402
|
-
).cpu()
|
|
403
|
-
return embeddings, tokenizer_state
|
|
404
|
-
|
|
405
|
-
def predict(
|
|
406
|
-
self,
|
|
407
|
-
context: Union[torch.Tensor, list[torch.Tensor]],
|
|
408
|
-
prediction_length: Optional[int] = None,
|
|
409
|
-
num_samples: Optional[int] = None,
|
|
410
|
-
temperature: Optional[float] = None,
|
|
411
|
-
top_k: Optional[int] = None,
|
|
412
|
-
top_p: Optional[float] = None,
|
|
413
|
-
limit_prediction_length: bool = False,
|
|
414
|
-
**kwargs,
|
|
415
|
-
) -> torch.Tensor:
|
|
416
|
-
"""
|
|
417
|
-
Get forecasts for the given time series.
|
|
418
|
-
|
|
419
|
-
Parameters
|
|
420
|
-
----------
|
|
421
|
-
context
|
|
422
|
-
Input series. This is either a 1D tensor, or a list
|
|
423
|
-
of 1D tensors, or a 2D tensor whose first dimension
|
|
424
|
-
is batch. In the latter case, use left-padding with
|
|
425
|
-
``torch.nan`` to align series of different lengths.
|
|
426
|
-
prediction_length
|
|
427
|
-
Time steps to predict. Defaults to what specified
|
|
428
|
-
in ``self.model.config``.
|
|
429
|
-
num_samples
|
|
430
|
-
Number of sample paths to predict. Defaults to what
|
|
431
|
-
specified in ``self.model.config``.
|
|
432
|
-
temperature
|
|
433
|
-
Temperature to use for generating sample tokens.
|
|
434
|
-
Defaults to what specified in ``self.model.config``.
|
|
435
|
-
top_k
|
|
436
|
-
Top-k parameter to use for generating sample tokens.
|
|
437
|
-
Defaults to what specified in ``self.model.config``.
|
|
438
|
-
top_p
|
|
439
|
-
Top-p parameter to use for generating sample tokens.
|
|
440
|
-
Defaults to what specified in ``self.model.config``.
|
|
441
|
-
limit_prediction_length
|
|
442
|
-
Force prediction length smaller or equal than the
|
|
443
|
-
built-in prediction length from the model. True by
|
|
444
|
-
default. When true, fail loudly if longer predictions
|
|
445
|
-
are requested, otherwise longer predictions are allowed.
|
|
446
|
-
|
|
447
|
-
Returns
|
|
448
|
-
-------
|
|
449
|
-
samples
|
|
450
|
-
Tensor of sample forecasts, of shape
|
|
451
|
-
(batch_size, num_samples, prediction_length).
|
|
452
|
-
"""
|
|
453
|
-
context_tensor = self._prepare_and_validate_context(context=context)
|
|
454
|
-
if prediction_length is None:
|
|
455
|
-
prediction_length = self.model.config.prediction_length
|
|
456
|
-
|
|
457
|
-
if prediction_length > self.model.config.prediction_length:
|
|
458
|
-
msg = (
|
|
459
|
-
f"We recommend keeping prediction length <= {self.model.config.prediction_length}. "
|
|
460
|
-
f"The quality of longer predictions may degrade since the model is not optimized for it. "
|
|
461
|
-
)
|
|
462
|
-
if limit_prediction_length:
|
|
463
|
-
msg += "You can turn off this check by setting `limit_prediction_length=False`."
|
|
464
|
-
raise ValueError(msg)
|
|
465
|
-
warnings.warn(msg, stacklevel=2)
|
|
466
|
-
|
|
467
|
-
predictions = []
|
|
468
|
-
remaining = prediction_length
|
|
469
|
-
|
|
470
|
-
while remaining > 0:
|
|
471
|
-
token_ids, attention_mask, scale = self.tokenizer.context_input_transform(context_tensor)
|
|
472
|
-
samples = self.model(
|
|
473
|
-
token_ids.to(self.model.device),
|
|
474
|
-
attention_mask.to(self.model.device),
|
|
475
|
-
min(remaining, self.model.config.prediction_length),
|
|
476
|
-
num_samples,
|
|
477
|
-
temperature,
|
|
478
|
-
top_k,
|
|
479
|
-
top_p,
|
|
480
|
-
)
|
|
481
|
-
prediction = self.tokenizer.output_transform(samples.to(scale.device), scale)
|
|
482
|
-
|
|
483
|
-
predictions.append(prediction)
|
|
484
|
-
remaining -= prediction.shape[-1]
|
|
485
|
-
|
|
486
|
-
if remaining <= 0:
|
|
487
|
-
break
|
|
488
|
-
|
|
489
|
-
context_tensor = torch.cat([context_tensor, prediction.median(dim=1).values], dim=-1)
|
|
490
|
-
|
|
491
|
-
return torch.cat(predictions, dim=-1)
|
|
492
|
-
|
|
493
|
-
def predict_quantiles(
|
|
494
|
-
self,
|
|
495
|
-
context: torch.Tensor,
|
|
496
|
-
prediction_length: int,
|
|
497
|
-
quantile_levels: list[float],
|
|
498
|
-
num_samples: Optional[int] = None,
|
|
499
|
-
**kwargs,
|
|
500
|
-
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
501
|
-
num_samples = num_samples or self.model.config.num_samples
|
|
502
|
-
prediction_samples = (
|
|
503
|
-
self.predict(
|
|
504
|
-
context,
|
|
505
|
-
prediction_length=prediction_length,
|
|
506
|
-
num_samples=num_samples,
|
|
507
|
-
)
|
|
508
|
-
.detach()
|
|
509
|
-
.cpu()
|
|
510
|
-
.swapaxes(1, 2)
|
|
511
|
-
)
|
|
512
|
-
mean = prediction_samples.mean(dim=-1, keepdim=True)
|
|
513
|
-
quantiles = torch.quantile(
|
|
514
|
-
prediction_samples,
|
|
515
|
-
q=torch.tensor(quantile_levels, dtype=prediction_samples.dtype),
|
|
516
|
-
dim=-1,
|
|
517
|
-
).permute(1, 2, 0)
|
|
518
|
-
|
|
519
|
-
return quantiles, mean
|
|
520
|
-
|
|
521
|
-
@classmethod
|
|
522
|
-
def from_pretrained(cls, *args, **kwargs):
|
|
523
|
-
"""
|
|
524
|
-
Load the model, either from a local path or from the HuggingFace Hub.
|
|
525
|
-
Supports the same arguments as ``AutoConfig`` and ``AutoModel``
|
|
526
|
-
from ``transformers``.
|
|
527
|
-
"""
|
|
528
|
-
kwargs = kwargs.copy()
|
|
529
|
-
|
|
530
|
-
context_length = kwargs.pop("context_length", None)
|
|
531
|
-
|
|
532
|
-
config = AutoConfig.from_pretrained(*args, **kwargs)
|
|
533
|
-
assert hasattr(config, "chronos_config"), "Not a Chronos config file"
|
|
534
|
-
|
|
535
|
-
if context_length is not None:
|
|
536
|
-
config.chronos_config["context_length"] = context_length
|
|
537
|
-
chronos_config = ChronosConfig(**config.chronos_config)
|
|
538
|
-
|
|
539
|
-
assert chronos_config.model_type == "seq2seq"
|
|
540
|
-
inner_model = AutoModelForSeq2SeqLM.from_pretrained(*args, **kwargs)
|
|
541
|
-
return cls(
|
|
542
|
-
tokenizer=chronos_config.create_tokenizer(),
|
|
543
|
-
model=ChronosPretrainedModel(config=chronos_config, model=inner_model),
|
|
544
|
-
)
|