autogluon.timeseries 1.4.1b20250830__py3-none-any.whl → 1.4.1b20251116__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autogluon/timeseries/dataset/ts_dataframe.py +66 -53
- autogluon/timeseries/learner.py +5 -4
- autogluon/timeseries/metrics/quantile.py +1 -1
- autogluon/timeseries/metrics/utils.py +4 -4
- autogluon/timeseries/models/__init__.py +2 -0
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py +28 -36
- autogluon/timeseries/models/autogluon_tabular/per_step.py +14 -5
- autogluon/timeseries/models/autogluon_tabular/transforms.py +9 -7
- autogluon/timeseries/models/chronos/model.py +104 -68
- autogluon/timeseries/models/chronos/{pipeline/utils.py → utils.py} +64 -32
- autogluon/timeseries/models/ensemble/__init__.py +29 -2
- autogluon/timeseries/models/ensemble/abstract.py +1 -37
- autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
- autogluon/timeseries/models/ensemble/array_based/abstract.py +247 -0
- autogluon/timeseries/models/ensemble/array_based/models.py +50 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +10 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +87 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +133 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +141 -0
- autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
- autogluon/timeseries/models/ensemble/weighted/abstract.py +41 -0
- autogluon/timeseries/models/ensemble/{basic.py → weighted/basic.py} +0 -10
- autogluon/timeseries/models/gluonts/abstract.py +2 -2
- autogluon/timeseries/models/gluonts/dataset.py +2 -2
- autogluon/timeseries/models/local/abstract_local_model.py +2 -2
- autogluon/timeseries/models/multi_window/multi_window_model.py +1 -1
- autogluon/timeseries/models/toto/__init__.py +3 -0
- autogluon/timeseries/models/toto/_internal/__init__.py +9 -0
- autogluon/timeseries/models/toto/_internal/backbone/__init__.py +3 -0
- autogluon/timeseries/models/toto/_internal/backbone/attention.py +197 -0
- autogluon/timeseries/models/toto/_internal/backbone/backbone.py +262 -0
- autogluon/timeseries/models/toto/_internal/backbone/distribution.py +70 -0
- autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +136 -0
- autogluon/timeseries/models/toto/_internal/backbone/rope.py +94 -0
- autogluon/timeseries/models/toto/_internal/backbone/scaler.py +306 -0
- autogluon/timeseries/models/toto/_internal/backbone/transformer.py +333 -0
- autogluon/timeseries/models/toto/_internal/dataset.py +165 -0
- autogluon/timeseries/models/toto/_internal/forecaster.py +423 -0
- autogluon/timeseries/models/toto/dataloader.py +108 -0
- autogluon/timeseries/models/toto/hf_pretrained_model.py +119 -0
- autogluon/timeseries/models/toto/model.py +236 -0
- autogluon/timeseries/predictor.py +10 -26
- autogluon/timeseries/regressor.py +9 -7
- autogluon/timeseries/splitter.py +1 -25
- autogluon/timeseries/trainer/ensemble_composer.py +250 -0
- autogluon/timeseries/trainer/trainer.py +124 -193
- autogluon/timeseries/trainer/utils.py +18 -0
- autogluon/timeseries/transforms/covariate_scaler.py +1 -1
- autogluon/timeseries/transforms/target_scaler.py +7 -7
- autogluon/timeseries/utils/features.py +9 -5
- autogluon/timeseries/utils/forecast.py +5 -5
- autogluon/timeseries/version.py +1 -1
- autogluon.timeseries-1.4.1b20251116-py3.9-nspkg.pth +1 -0
- {autogluon.timeseries-1.4.1b20250830.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/METADATA +28 -13
- autogluon_timeseries-1.4.1b20251116.dist-info/RECORD +96 -0
- {autogluon.timeseries-1.4.1b20250830.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/WHEEL +1 -1
- autogluon/timeseries/evaluator.py +0 -6
- autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -10
- autogluon/timeseries/models/chronos/pipeline/base.py +0 -160
- autogluon/timeseries/models/chronos/pipeline/chronos.py +0 -544
- autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -530
- autogluon.timeseries-1.4.1b20250830-py3.9-nspkg.pth +0 -1
- autogluon.timeseries-1.4.1b20250830.dist-info/RECORD +0 -75
- /autogluon/timeseries/models/ensemble/{greedy.py → weighted/greedy.py} +0 -0
- {autogluon.timeseries-1.4.1b20250830.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info/licenses}/LICENSE +0 -0
- {autogluon.timeseries-1.4.1b20250830.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info/licenses}/NOTICE +0 -0
- {autogluon.timeseries-1.4.1b20250830.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/namespace_packages.txt +0 -0
- {autogluon.timeseries-1.4.1b20250830.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/top_level.txt +0 -0
- {autogluon.timeseries-1.4.1b20250830.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/zip-safe +0 -0
|
@@ -1,530 +0,0 @@
|
|
|
1
|
-
# Implements Chronos with T5 architecture but with patched inputs instead of
|
|
2
|
-
# per-time-step tokenization. a.k.a. Chronos-Bolt
|
|
3
|
-
|
|
4
|
-
# Authors: Abdul Fatir Ansari <ansarnd@amazon.com>, Lorenzo Stella <stellalo@amazon.com>, Caner Turkmen <atturkm@amazon.com>
|
|
5
|
-
|
|
6
|
-
import copy
|
|
7
|
-
import logging
|
|
8
|
-
import warnings
|
|
9
|
-
from dataclasses import dataclass, fields
|
|
10
|
-
from typing import Optional, Union
|
|
11
|
-
|
|
12
|
-
import torch
|
|
13
|
-
import torch.nn as nn
|
|
14
|
-
from transformers import AutoConfig
|
|
15
|
-
from transformers.models.t5.modeling_t5 import (
|
|
16
|
-
ACT2FN,
|
|
17
|
-
T5Config,
|
|
18
|
-
T5LayerNorm,
|
|
19
|
-
T5PreTrainedModel,
|
|
20
|
-
T5Stack,
|
|
21
|
-
)
|
|
22
|
-
from transformers.utils import ModelOutput
|
|
23
|
-
|
|
24
|
-
from .base import BaseChronosPipeline, ForecastType
|
|
25
|
-
|
|
26
|
-
logger = logging.getLogger("autogluon.timeseries.models.chronos")
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
@dataclass
|
|
30
|
-
class ChronosBoltConfig:
|
|
31
|
-
context_length: int
|
|
32
|
-
prediction_length: int
|
|
33
|
-
input_patch_size: int
|
|
34
|
-
input_patch_stride: int
|
|
35
|
-
quantiles: list[float]
|
|
36
|
-
use_reg_token: bool = False
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
@dataclass
|
|
40
|
-
class ChronosBoltOutput(ModelOutput):
|
|
41
|
-
loss: Optional[torch.Tensor] = None
|
|
42
|
-
quantile_preds: Optional[torch.Tensor] = None
|
|
43
|
-
attentions: Optional[torch.Tensor] = None
|
|
44
|
-
cross_attentions: Optional[torch.Tensor] = None
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
class Patch(nn.Module):
|
|
48
|
-
def __init__(self, patch_size: int, patch_stride: int) -> None:
|
|
49
|
-
super().__init__()
|
|
50
|
-
self.patch_size = patch_size
|
|
51
|
-
self.patch_stride = patch_stride
|
|
52
|
-
|
|
53
|
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
54
|
-
length = x.shape[-1]
|
|
55
|
-
|
|
56
|
-
if length % self.patch_size != 0:
|
|
57
|
-
padding_size = (
|
|
58
|
-
*x.shape[:-1],
|
|
59
|
-
self.patch_size - (length % self.patch_size),
|
|
60
|
-
)
|
|
61
|
-
padding = torch.full(size=padding_size, fill_value=torch.nan, dtype=x.dtype, device=x.device)
|
|
62
|
-
x = torch.concat((padding, x), dim=-1)
|
|
63
|
-
|
|
64
|
-
x = x.unfold(dimension=-1, size=self.patch_size, step=self.patch_stride)
|
|
65
|
-
return x
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
class InstanceNorm(nn.Module):
|
|
69
|
-
"""
|
|
70
|
-
See, also, RevIN. Apply standardization along the last dimension.
|
|
71
|
-
"""
|
|
72
|
-
|
|
73
|
-
def __init__(self, eps: float = 1e-5) -> None:
|
|
74
|
-
super().__init__()
|
|
75
|
-
self.eps = eps
|
|
76
|
-
|
|
77
|
-
def forward(
|
|
78
|
-
self,
|
|
79
|
-
x: torch.Tensor,
|
|
80
|
-
loc_scale: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
|
|
81
|
-
) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
|
|
82
|
-
if loc_scale is None:
|
|
83
|
-
loc = torch.nan_to_num(torch.nanmean(x, dim=-1, keepdim=True), nan=0.0)
|
|
84
|
-
scale = torch.nan_to_num((x - loc).square().nanmean(dim=-1, keepdim=True).sqrt(), nan=1.0)
|
|
85
|
-
scale = torch.where(scale == 0, self.eps, scale)
|
|
86
|
-
else:
|
|
87
|
-
loc, scale = loc_scale
|
|
88
|
-
|
|
89
|
-
return (x - loc) / scale, (loc, scale)
|
|
90
|
-
|
|
91
|
-
def inverse(self, x: torch.Tensor, loc_scale: tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
|
|
92
|
-
loc, scale = loc_scale
|
|
93
|
-
return x * scale + loc
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
class ResidualBlock(nn.Module):
|
|
97
|
-
def __init__(
|
|
98
|
-
self,
|
|
99
|
-
in_dim: int,
|
|
100
|
-
h_dim: int,
|
|
101
|
-
out_dim: int,
|
|
102
|
-
act_fn_name: str,
|
|
103
|
-
dropout_p: float = 0.0,
|
|
104
|
-
use_layer_norm: bool = False,
|
|
105
|
-
) -> None:
|
|
106
|
-
super().__init__()
|
|
107
|
-
|
|
108
|
-
self.dropout = nn.Dropout(dropout_p)
|
|
109
|
-
self.hidden_layer = nn.Linear(in_dim, h_dim)
|
|
110
|
-
self.act = ACT2FN[act_fn_name]
|
|
111
|
-
self.output_layer = nn.Linear(h_dim, out_dim)
|
|
112
|
-
self.residual_layer = nn.Linear(in_dim, out_dim)
|
|
113
|
-
|
|
114
|
-
self.use_layer_norm = use_layer_norm
|
|
115
|
-
if use_layer_norm:
|
|
116
|
-
self.layer_norm = T5LayerNorm(out_dim)
|
|
117
|
-
|
|
118
|
-
def forward(self, x: torch.Tensor):
|
|
119
|
-
hid = self.act(self.hidden_layer(x))
|
|
120
|
-
out = self.dropout(self.output_layer(hid))
|
|
121
|
-
res = self.residual_layer(x)
|
|
122
|
-
|
|
123
|
-
out = out + res
|
|
124
|
-
|
|
125
|
-
if self.use_layer_norm:
|
|
126
|
-
return self.layer_norm(out)
|
|
127
|
-
return out
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
class ChronosBoltModelForForecasting(T5PreTrainedModel):
|
|
131
|
-
_keys_to_ignore_on_load_missing = [
|
|
132
|
-
r"input_patch_embedding\.",
|
|
133
|
-
r"output_patch_embedding\.",
|
|
134
|
-
]
|
|
135
|
-
_keys_to_ignore_on_load_unexpected = [r"lm_head.weight"]
|
|
136
|
-
_tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
|
|
137
|
-
|
|
138
|
-
def __init__(self, config: T5Config):
|
|
139
|
-
assert hasattr(config, "chronos_config"), "Not a Chronos config file"
|
|
140
|
-
|
|
141
|
-
super().__init__(config)
|
|
142
|
-
self.model_dim = config.d_model
|
|
143
|
-
|
|
144
|
-
# TODO: remove filtering eventually, added for backward compatibility
|
|
145
|
-
config_fields = {f.name for f in fields(ChronosBoltConfig)}
|
|
146
|
-
self.chronos_config = ChronosBoltConfig(
|
|
147
|
-
**{k: v for k, v in config.chronos_config.items() if k in config_fields}
|
|
148
|
-
)
|
|
149
|
-
|
|
150
|
-
# Only decoder_start_id (and optionally REG token)
|
|
151
|
-
if self.chronos_config.use_reg_token:
|
|
152
|
-
config.reg_token_id = 1
|
|
153
|
-
|
|
154
|
-
config.vocab_size = 2 if self.chronos_config.use_reg_token else 1
|
|
155
|
-
self.shared = nn.Embedding(config.vocab_size, config.d_model)
|
|
156
|
-
|
|
157
|
-
# Input patch embedding layer
|
|
158
|
-
self.input_patch_embedding = ResidualBlock(
|
|
159
|
-
in_dim=self.chronos_config.input_patch_size * 2,
|
|
160
|
-
h_dim=config.d_ff,
|
|
161
|
-
out_dim=config.d_model,
|
|
162
|
-
act_fn_name=config.dense_act_fn,
|
|
163
|
-
dropout_p=config.dropout_rate,
|
|
164
|
-
)
|
|
165
|
-
|
|
166
|
-
# patching layer
|
|
167
|
-
self.patch = Patch(
|
|
168
|
-
patch_size=self.chronos_config.input_patch_size,
|
|
169
|
-
patch_stride=self.chronos_config.input_patch_stride,
|
|
170
|
-
)
|
|
171
|
-
|
|
172
|
-
# instance normalization, also referred to as "scaling" in Chronos and GluonTS
|
|
173
|
-
self.instance_norm = InstanceNorm()
|
|
174
|
-
|
|
175
|
-
encoder_config = copy.deepcopy(config)
|
|
176
|
-
encoder_config.is_decoder = False
|
|
177
|
-
encoder_config.use_cache = False
|
|
178
|
-
encoder_config.is_encoder_decoder = False
|
|
179
|
-
self.encoder = T5Stack(encoder_config, self.shared)
|
|
180
|
-
|
|
181
|
-
self._init_decoder(config)
|
|
182
|
-
|
|
183
|
-
self.num_quantiles = len(self.chronos_config.quantiles)
|
|
184
|
-
quantiles = torch.tensor(self.chronos_config.quantiles, dtype=self.dtype)
|
|
185
|
-
self.register_buffer("quantiles", quantiles, persistent=False)
|
|
186
|
-
|
|
187
|
-
self.output_patch_embedding = ResidualBlock(
|
|
188
|
-
in_dim=config.d_model,
|
|
189
|
-
h_dim=config.d_ff,
|
|
190
|
-
out_dim=self.num_quantiles * self.chronos_config.prediction_length,
|
|
191
|
-
act_fn_name=config.dense_act_fn,
|
|
192
|
-
dropout_p=config.dropout_rate,
|
|
193
|
-
)
|
|
194
|
-
|
|
195
|
-
# Initialize weights and apply final processing
|
|
196
|
-
self.post_init()
|
|
197
|
-
|
|
198
|
-
# Model parallel
|
|
199
|
-
self.model_parallel = False
|
|
200
|
-
self.device_map = None
|
|
201
|
-
|
|
202
|
-
def _init_weights(self, module):
|
|
203
|
-
super()._init_weights(module)
|
|
204
|
-
"""Initialize the weights"""
|
|
205
|
-
factor = self.config.initializer_factor
|
|
206
|
-
if isinstance(module, (self.__class__)):
|
|
207
|
-
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
|
|
208
|
-
elif isinstance(module, ResidualBlock):
|
|
209
|
-
module.hidden_layer.weight.data.normal_(
|
|
210
|
-
mean=0.0,
|
|
211
|
-
std=factor * ((self.chronos_config.input_patch_size * 2) ** -0.5),
|
|
212
|
-
)
|
|
213
|
-
if hasattr(module.hidden_layer, "bias") and module.hidden_layer.bias is not None:
|
|
214
|
-
module.hidden_layer.bias.data.zero_()
|
|
215
|
-
|
|
216
|
-
module.residual_layer.weight.data.normal_(
|
|
217
|
-
mean=0.0,
|
|
218
|
-
std=factor * ((self.chronos_config.input_patch_size * 2) ** -0.5),
|
|
219
|
-
)
|
|
220
|
-
if hasattr(module.residual_layer, "bias") and module.residual_layer.bias is not None:
|
|
221
|
-
module.residual_layer.bias.data.zero_()
|
|
222
|
-
|
|
223
|
-
module.output_layer.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
|
|
224
|
-
if hasattr(module.output_layer, "bias") and module.output_layer.bias is not None:
|
|
225
|
-
module.output_layer.bias.data.zero_()
|
|
226
|
-
|
|
227
|
-
def forward(
|
|
228
|
-
self,
|
|
229
|
-
context: torch.Tensor,
|
|
230
|
-
mask: Optional[torch.Tensor] = None,
|
|
231
|
-
target: Optional[torch.Tensor] = None,
|
|
232
|
-
target_mask: Optional[torch.Tensor] = None,
|
|
233
|
-
) -> ChronosBoltOutput:
|
|
234
|
-
mask = mask.to(context.dtype) if mask is not None else torch.isnan(context).logical_not().to(context.dtype)
|
|
235
|
-
|
|
236
|
-
batch_size, _ = context.shape
|
|
237
|
-
if context.shape[-1] > self.chronos_config.context_length:
|
|
238
|
-
context = context[..., -self.chronos_config.context_length :]
|
|
239
|
-
mask = mask[..., -self.chronos_config.context_length :]
|
|
240
|
-
|
|
241
|
-
# scaling
|
|
242
|
-
context, loc_scale = self.instance_norm(context)
|
|
243
|
-
|
|
244
|
-
# the scaling op above is done in 32-bit precision,
|
|
245
|
-
# then the context is moved to model's dtype
|
|
246
|
-
context = context.to(self.dtype)
|
|
247
|
-
mask = mask.to(self.dtype)
|
|
248
|
-
|
|
249
|
-
# patching
|
|
250
|
-
patched_context = self.patch(context)
|
|
251
|
-
patched_mask = torch.nan_to_num(self.patch(mask), nan=0.0)
|
|
252
|
-
patched_context[~(patched_mask > 0)] = 0.0
|
|
253
|
-
# concat context and mask along patch dim
|
|
254
|
-
patched_context = torch.cat([patched_context, patched_mask], dim=-1)
|
|
255
|
-
|
|
256
|
-
# attention_mask = 1 if at least one item in the patch is observed
|
|
257
|
-
attention_mask = patched_mask.sum(dim=-1) > 0 # (batch_size, patched_seq_length)
|
|
258
|
-
|
|
259
|
-
input_embeds = self.input_patch_embedding(patched_context)
|
|
260
|
-
|
|
261
|
-
if self.chronos_config.use_reg_token:
|
|
262
|
-
# Append [REG]
|
|
263
|
-
reg_input_ids = torch.full(
|
|
264
|
-
(batch_size, 1),
|
|
265
|
-
self.config.reg_token_id,
|
|
266
|
-
device=input_embeds.device,
|
|
267
|
-
)
|
|
268
|
-
reg_embeds = self.shared(reg_input_ids)
|
|
269
|
-
input_embeds = torch.cat([input_embeds, reg_embeds], dim=-2)
|
|
270
|
-
attention_mask = torch.cat([attention_mask, torch.ones_like(reg_input_ids)], dim=-1)
|
|
271
|
-
|
|
272
|
-
encoder_outputs = self.encoder(
|
|
273
|
-
attention_mask=attention_mask,
|
|
274
|
-
inputs_embeds=input_embeds,
|
|
275
|
-
)
|
|
276
|
-
hidden_states = encoder_outputs[0]
|
|
277
|
-
|
|
278
|
-
sequence_output = self.decode(input_embeds, attention_mask, hidden_states)
|
|
279
|
-
|
|
280
|
-
quantile_preds_shape = (
|
|
281
|
-
batch_size,
|
|
282
|
-
self.num_quantiles,
|
|
283
|
-
self.chronos_config.prediction_length,
|
|
284
|
-
)
|
|
285
|
-
quantile_preds = self.output_patch_embedding(sequence_output).view(*quantile_preds_shape)
|
|
286
|
-
|
|
287
|
-
loss = None
|
|
288
|
-
if target is not None:
|
|
289
|
-
# normalize target
|
|
290
|
-
target, _ = self.instance_norm(target, loc_scale)
|
|
291
|
-
target = target.unsqueeze(1) # type: ignore
|
|
292
|
-
assert self.chronos_config.prediction_length >= target.shape[-1]
|
|
293
|
-
|
|
294
|
-
target = target.to(quantile_preds.device)
|
|
295
|
-
target_mask = (
|
|
296
|
-
target_mask.unsqueeze(1).to(quantile_preds.device) if target_mask is not None else ~torch.isnan(target)
|
|
297
|
-
)
|
|
298
|
-
target[~target_mask] = 0.0
|
|
299
|
-
|
|
300
|
-
# pad target and target_mask if they are shorter than model's prediction_length
|
|
301
|
-
if self.chronos_config.prediction_length > target.shape[-1]:
|
|
302
|
-
padding_shape = (*target.shape[:-1], self.chronos_config.prediction_length - target.shape[-1])
|
|
303
|
-
target = torch.cat([target, torch.zeros(padding_shape).to(target)], dim=-1)
|
|
304
|
-
target_mask = torch.cat([target_mask, torch.zeros(padding_shape).to(target_mask)], dim=-1)
|
|
305
|
-
|
|
306
|
-
loss = (
|
|
307
|
-
2
|
|
308
|
-
* torch.abs(
|
|
309
|
-
(target - quantile_preds)
|
|
310
|
-
* ((target <= quantile_preds).float() - self.quantiles.view(1, self.num_quantiles, 1))
|
|
311
|
-
)
|
|
312
|
-
* target_mask.float()
|
|
313
|
-
)
|
|
314
|
-
loss = loss.mean(dim=-2) # Mean over prediction horizon
|
|
315
|
-
loss = loss.sum(dim=-1) # Sum over quantile levels
|
|
316
|
-
loss = loss.mean() # Mean over batch
|
|
317
|
-
|
|
318
|
-
# Unscale predictions
|
|
319
|
-
quantile_preds = self.instance_norm.inverse(
|
|
320
|
-
quantile_preds.view(batch_size, -1),
|
|
321
|
-
loc_scale,
|
|
322
|
-
).view(*quantile_preds_shape)
|
|
323
|
-
|
|
324
|
-
return ChronosBoltOutput(
|
|
325
|
-
loss=loss,
|
|
326
|
-
quantile_preds=quantile_preds,
|
|
327
|
-
)
|
|
328
|
-
|
|
329
|
-
def _init_decoder(self, config):
|
|
330
|
-
decoder_config = copy.deepcopy(config)
|
|
331
|
-
decoder_config.is_decoder = True
|
|
332
|
-
decoder_config.is_encoder_decoder = False
|
|
333
|
-
decoder_config.num_layers = config.num_decoder_layers
|
|
334
|
-
self.decoder = T5Stack(decoder_config, self.shared)
|
|
335
|
-
|
|
336
|
-
def decode(
|
|
337
|
-
self,
|
|
338
|
-
input_embeds,
|
|
339
|
-
attention_mask,
|
|
340
|
-
hidden_states,
|
|
341
|
-
output_attentions=False,
|
|
342
|
-
):
|
|
343
|
-
"""
|
|
344
|
-
Parameters
|
|
345
|
-
----------
|
|
346
|
-
input_embeds
|
|
347
|
-
Patched and embedded inputs. Shape (batch_size, patched_context_length, d_model)
|
|
348
|
-
attention_mask
|
|
349
|
-
Attention mask for the patched context. Shape (batch_size, patched_context_length), type: torch.int64
|
|
350
|
-
hidden_states
|
|
351
|
-
Hidden states returned by the encoder. Shape (batch_size, patched_context_length, d_model)
|
|
352
|
-
|
|
353
|
-
Returns
|
|
354
|
-
-------
|
|
355
|
-
last_hidden_state
|
|
356
|
-
Last hidden state returned by the decoder, of shape (batch_size, 1, d_model)
|
|
357
|
-
"""
|
|
358
|
-
batch_size = input_embeds.shape[0]
|
|
359
|
-
decoder_input_ids = torch.full(
|
|
360
|
-
(batch_size, 1),
|
|
361
|
-
self.config.decoder_start_token_id,
|
|
362
|
-
device=input_embeds.device,
|
|
363
|
-
)
|
|
364
|
-
decoder_outputs = self.decoder(
|
|
365
|
-
input_ids=decoder_input_ids,
|
|
366
|
-
encoder_hidden_states=hidden_states,
|
|
367
|
-
encoder_attention_mask=attention_mask,
|
|
368
|
-
output_attentions=output_attentions,
|
|
369
|
-
return_dict=True,
|
|
370
|
-
)
|
|
371
|
-
|
|
372
|
-
return decoder_outputs.last_hidden_state # sequence_outputs, b x 1 x d_model
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
class ChronosBoltPipeline(BaseChronosPipeline):
|
|
376
|
-
forecast_type: ForecastType = ForecastType.QUANTILES
|
|
377
|
-
default_context_length: int = 2048
|
|
378
|
-
# register this class name with this alias for backward compatibility
|
|
379
|
-
_aliases = ["PatchedT5Pipeline"]
|
|
380
|
-
|
|
381
|
-
def __init__(self, model: ChronosBoltModelForForecasting):
|
|
382
|
-
super().__init__(inner_model=model)
|
|
383
|
-
self.model = model
|
|
384
|
-
self.model_context_length: int = self.model.config.chronos_config["context_length"]
|
|
385
|
-
self.model_prediction_length: int = self.model.config.chronos_config["prediction_length"]
|
|
386
|
-
|
|
387
|
-
@property
|
|
388
|
-
def quantiles(self) -> list[float]:
|
|
389
|
-
return self.model.config.chronos_config["quantiles"]
|
|
390
|
-
|
|
391
|
-
def predict( # type: ignore[override]
|
|
392
|
-
self,
|
|
393
|
-
context: Union[torch.Tensor, list[torch.Tensor]],
|
|
394
|
-
prediction_length: Optional[int] = None,
|
|
395
|
-
limit_prediction_length: bool = False,
|
|
396
|
-
):
|
|
397
|
-
context_tensor = self._prepare_and_validate_context(context=context)
|
|
398
|
-
|
|
399
|
-
if prediction_length is None:
|
|
400
|
-
prediction_length = self.model_prediction_length
|
|
401
|
-
|
|
402
|
-
if prediction_length > self.model_prediction_length:
|
|
403
|
-
msg = (
|
|
404
|
-
f"We recommend keeping prediction length <= {self.model_prediction_length}. "
|
|
405
|
-
"The quality of longer predictions may degrade since the model is not optimized for it. "
|
|
406
|
-
)
|
|
407
|
-
if limit_prediction_length:
|
|
408
|
-
msg += "You can turn off this check by setting `limit_prediction_length=False`."
|
|
409
|
-
raise ValueError(msg)
|
|
410
|
-
warnings.warn(msg)
|
|
411
|
-
|
|
412
|
-
predictions = []
|
|
413
|
-
remaining = prediction_length
|
|
414
|
-
|
|
415
|
-
# We truncate the context here because otherwise batches with very long
|
|
416
|
-
# context could take up large amounts of GPU memory unnecessarily.
|
|
417
|
-
if context_tensor.shape[-1] > self.model_context_length:
|
|
418
|
-
context_tensor = context_tensor[..., -self.model_context_length :]
|
|
419
|
-
|
|
420
|
-
context_tensor = context_tensor.to(device=self.model.device, dtype=torch.float32)
|
|
421
|
-
# First block prediction
|
|
422
|
-
with torch.no_grad():
|
|
423
|
-
prediction: torch.Tensor = self.model(context=context_tensor).quantile_preds.to(context_tensor)
|
|
424
|
-
|
|
425
|
-
predictions.append(prediction)
|
|
426
|
-
remaining -= prediction.shape[-1]
|
|
427
|
-
|
|
428
|
-
# NOTE: The following heuristic for better prediction intervals with long-horizon forecasts
|
|
429
|
-
# uses all quantiles generated by the model for the first `model_prediction_length` steps,
|
|
430
|
-
# concatenating each quantile with the context and generating the next `model_prediction_length` steps.
|
|
431
|
-
# The `num_quantiles * num_quantiles` "samples" thus generated are then reduced to `num_quantiles`
|
|
432
|
-
# by computing empirical quantiles. Note that this option scales the batch size by `num_quantiles`
|
|
433
|
-
# when the `prediction_length` is greater than `model_prediction_length`.
|
|
434
|
-
|
|
435
|
-
if remaining > 0:
|
|
436
|
-
# Expand the context along quantile axis
|
|
437
|
-
context_tensor = context_tensor.unsqueeze(1).repeat(1, len(self.quantiles), 1)
|
|
438
|
-
|
|
439
|
-
quantile_tensor = torch.tensor(self.quantiles, device=context_tensor.device)
|
|
440
|
-
while remaining > 0:
|
|
441
|
-
# Append the prediction to context
|
|
442
|
-
context_tensor = torch.cat([context_tensor, prediction], dim=-1)[..., -self.model_context_length :]
|
|
443
|
-
(batch_size, n_quantiles, context_length) = context_tensor.shape
|
|
444
|
-
|
|
445
|
-
with torch.no_grad():
|
|
446
|
-
# Reshape (batch, n_quantiles, context_length) -> (batch * n_quantiles, context_length)
|
|
447
|
-
prediction = self.model(
|
|
448
|
-
context=context_tensor.reshape(batch_size * n_quantiles, context_length)
|
|
449
|
-
).quantile_preds.to(context_tensor)
|
|
450
|
-
# Reshape predictions from (batch * n_quantiles, n_quantiles, model_prediction_length) to (batch, n_quantiles * n_quantiles, model_prediction_length)
|
|
451
|
-
prediction = prediction.reshape(batch_size, n_quantiles * n_quantiles, -1)
|
|
452
|
-
# Reduce `n_quantiles * n_quantiles` to n_quantiles and transpose back to (batch_size, n_quantiles, model_prediction_length)
|
|
453
|
-
prediction = torch.quantile(prediction, q=quantile_tensor, dim=1).transpose(0, 1)
|
|
454
|
-
|
|
455
|
-
predictions.append(prediction)
|
|
456
|
-
remaining -= prediction.shape[-1]
|
|
457
|
-
|
|
458
|
-
return torch.cat(predictions, dim=-1)[..., :prediction_length]
|
|
459
|
-
|
|
460
|
-
def predict_quantiles(
|
|
461
|
-
self, context: torch.Tensor, prediction_length: int, quantile_levels: list[float], **kwargs
|
|
462
|
-
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
463
|
-
# shape (batch_size, prediction_length, len(training_quantile_levels))
|
|
464
|
-
predictions = (
|
|
465
|
-
self.predict(
|
|
466
|
-
context,
|
|
467
|
-
prediction_length=prediction_length,
|
|
468
|
-
)
|
|
469
|
-
.detach()
|
|
470
|
-
.cpu()
|
|
471
|
-
.swapaxes(1, 2)
|
|
472
|
-
)
|
|
473
|
-
|
|
474
|
-
training_quantile_levels = self.quantiles
|
|
475
|
-
|
|
476
|
-
if set(quantile_levels).issubset(set(training_quantile_levels)):
|
|
477
|
-
# no need to perform intra/extrapolation
|
|
478
|
-
quantiles = predictions[..., [training_quantile_levels.index(q) for q in quantile_levels]]
|
|
479
|
-
else:
|
|
480
|
-
# we rely on torch for interpolating quantiles if quantiles that
|
|
481
|
-
# Chronos Bolt was trained on were not provided
|
|
482
|
-
if min(quantile_levels) < min(training_quantile_levels) or max(quantile_levels) > max(
|
|
483
|
-
training_quantile_levels
|
|
484
|
-
):
|
|
485
|
-
logger.warning(
|
|
486
|
-
f"\tQuantiles to be predicted ({quantile_levels}) are not within the range of "
|
|
487
|
-
f"quantiles that Chronos-Bolt was trained on ({training_quantile_levels}). "
|
|
488
|
-
"Quantile predictions will be set to the minimum/maximum levels at which Chronos-Bolt "
|
|
489
|
-
"was trained on. This may significantly affect the quality of the predictions."
|
|
490
|
-
)
|
|
491
|
-
|
|
492
|
-
# TODO: this is a hack that assumes the model's quantiles during training (training_quantile_levels)
|
|
493
|
-
# made up an equidistant grid along the quantile dimension. i.e., they were (0.1, 0.2, ..., 0.9).
|
|
494
|
-
# While this holds for official Chronos-Bolt models, this may not be true in the future, and this
|
|
495
|
-
# function may have to be revised.
|
|
496
|
-
augmented_predictions = torch.cat(
|
|
497
|
-
[predictions[..., [0]], predictions, predictions[..., [-1]]],
|
|
498
|
-
dim=-1,
|
|
499
|
-
)
|
|
500
|
-
quantiles = torch.quantile(
|
|
501
|
-
augmented_predictions, q=torch.tensor(quantile_levels, dtype=augmented_predictions.dtype), dim=-1
|
|
502
|
-
).permute(1, 2, 0)
|
|
503
|
-
mean = predictions[:, :, training_quantile_levels.index(0.5)]
|
|
504
|
-
return quantiles, mean
|
|
505
|
-
|
|
506
|
-
@classmethod
|
|
507
|
-
def from_pretrained(cls, *args, **kwargs):
|
|
508
|
-
"""
|
|
509
|
-
Load the model, either from a local path or from the HuggingFace Hub.
|
|
510
|
-
Supports the same arguments as ``AutoConfig`` and ``AutoModel``
|
|
511
|
-
from ``transformers``.
|
|
512
|
-
"""
|
|
513
|
-
config = AutoConfig.from_pretrained(*args, **kwargs)
|
|
514
|
-
assert hasattr(config, "chronos_config"), "Not a Chronos config file"
|
|
515
|
-
|
|
516
|
-
context_length = kwargs.pop("context_length", None)
|
|
517
|
-
if context_length is not None:
|
|
518
|
-
config.chronos_config["context_length"] = context_length
|
|
519
|
-
|
|
520
|
-
architecture = config.architectures[0]
|
|
521
|
-
class_ = globals().get(architecture)
|
|
522
|
-
|
|
523
|
-
# TODO: remove this once all models carry the correct architecture names in their configuration
|
|
524
|
-
# and raise an error instead.
|
|
525
|
-
if class_ is None:
|
|
526
|
-
logger.warning(f"Unknown architecture: {architecture}, defaulting to ChronosBoltModelForForecasting")
|
|
527
|
-
class_ = ChronosBoltModelForForecasting
|
|
528
|
-
|
|
529
|
-
model = class_.from_pretrained(*args, **kwargs)
|
|
530
|
-
return cls(model=model)
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
import sys, types, os;has_mfs = sys.version_info > (3, 5);p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('autogluon',));importlib = has_mfs and __import__('importlib.util');has_mfs and __import__('importlib.machinery');m = has_mfs and sys.modules.setdefault('autogluon', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('autogluon', [os.path.dirname(p)])));m = m or sys.modules.setdefault('autogluon', types.ModuleType('autogluon'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)
|
|
@@ -1,75 +0,0 @@
|
|
|
1
|
-
autogluon.timeseries-1.4.1b20250830-py3.9-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
|
|
2
|
-
autogluon/timeseries/__init__.py,sha256=_CrLLc1fkjen7UzWoO0Os8WZoHOgvZbHKy46I8v_4k4,304
|
|
3
|
-
autogluon/timeseries/evaluator.py,sha256=l642tYfTHsl8WVIq_vV6qhgAFVFr9UuZD7gLra3A_Kc,250
|
|
4
|
-
autogluon/timeseries/learner.py,sha256=eQrqFVOmL-2JC85LgCMkbyoLpKS02Dilg1T8RUeS_LI,13887
|
|
5
|
-
autogluon/timeseries/predictor.py,sha256=7X4YsWYa3Xk2RI1Irf2O-c3-I82Zqhg-cgj8cj_4AoA,88427
|
|
6
|
-
autogluon/timeseries/regressor.py,sha256=lc8Qr3-8v4oxajtCnV3sxpUaW6vxXXJOA6Kr-qVne4k,11926
|
|
7
|
-
autogluon/timeseries/splitter.py,sha256=8ACkuCXeUhQGUx4jz_Vv17q814WrHJQeKvq2v4-oE6s,3158
|
|
8
|
-
autogluon/timeseries/version.py,sha256=1LLPRzXVZj4uays1y13gP-npP7vnQKZjr7gnSOkOTuo,91
|
|
9
|
-
autogluon/timeseries/configs/__init__.py,sha256=wiLBwxZkDTQBJkSJ9-xz3p_yJxX0dbHe108dS1P5O6A,183
|
|
10
|
-
autogluon/timeseries/configs/hyperparameter_presets.py,sha256=GbI2sd3uakWtaeaMyF7B5z_lmyfb6ToK6PZEUZTyG9w,2031
|
|
11
|
-
autogluon/timeseries/configs/predictor_presets.py,sha256=B5HFHIelh91hhG0YYE5SJ7_14P7sylFAABgHX8n_53M,2712
|
|
12
|
-
autogluon/timeseries/dataset/__init__.py,sha256=UvnhAN5tjgxXTHoZMQDy64YMDj4Xxa68yY7NP4vAw0o,81
|
|
13
|
-
autogluon/timeseries/dataset/ts_dataframe.py,sha256=EwxKBScspwKnJTqIk2Icukk8vIrbKYObOMAkNIn4zc8,51760
|
|
14
|
-
autogluon/timeseries/metrics/__init__.py,sha256=YJPXxsJ0tRDXq7p-sTZSLb0DuXMJH6sT1PgbZ3tMt30,3594
|
|
15
|
-
autogluon/timeseries/metrics/abstract.py,sha256=6jbluvHXfLc_cuK1Fx0ZYle2sR4WGG6YxFQhkor46Q8,11545
|
|
16
|
-
autogluon/timeseries/metrics/point.py,sha256=sS__n_Em7m4CUaBu3PNWQ_dHw1YCOHbEyC15fhytFL8,18308
|
|
17
|
-
autogluon/timeseries/metrics/quantile.py,sha256=x0cq44fXRoMiuI4BVQ7mpWk1YgrK4OwLTlJAhCHQ7Xg,4634
|
|
18
|
-
autogluon/timeseries/metrics/utils.py,sha256=HuDe1BNe8yJU4f_DKM913nNrUueoRaw6zhxm1-S20s0,910
|
|
19
|
-
autogluon/timeseries/models/__init__.py,sha256=9YnqkOILtVEkbICk7J3VlMkMNySs-f5ErIUKrE5-fys,1294
|
|
20
|
-
autogluon/timeseries/models/registry.py,sha256=8n7W04ql0ckNQUzKcAW7bxreLI8wTAUTymACgLklH9M,2158
|
|
21
|
-
autogluon/timeseries/models/abstract/__init__.py,sha256=Htfkjjc3vo92RvyM8rIlQ0PLWt3jcrCKZES07UvCMV0,146
|
|
22
|
-
autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=97HOi7fRPxtx8Y9hq-xdJI-kLMp6Z-8LUSvcfBjXFsM,31978
|
|
23
|
-
autogluon/timeseries/models/abstract/model_trial.py,sha256=ENPg_7nsdxIvaNM0o0UShZ3x8jFlRmwRc5m0fGPC0TM,3720
|
|
24
|
-
autogluon/timeseries/models/abstract/tunable.py,sha256=jA6p-FPZkMva67B-1foqvHK-1rr0IdEfp9RvGW1WS9I,7155
|
|
25
|
-
autogluon/timeseries/models/autogluon_tabular/__init__.py,sha256=E5fZsdFPgVdyCVyj5bGmn_lQFlCMn2NvuRLBMcCFvhM,205
|
|
26
|
-
autogluon/timeseries/models/autogluon_tabular/mlforecast.py,sha256=k3a0JqBeuLQfjCtZ8MA7UvS2eqHjwbw0-4kN_StMMUQ,37623
|
|
27
|
-
autogluon/timeseries/models/autogluon_tabular/per_step.py,sha256=M5rhj_jjcQz27wPYm6NEBEE0aHgXe0Bl6HFc2NIuLdU,23159
|
|
28
|
-
autogluon/timeseries/models/autogluon_tabular/transforms.py,sha256=aI1QJLJaOB5Xy2WA0jo6Jh25MRVyyZ8ONrqlV96kpw0,2735
|
|
29
|
-
autogluon/timeseries/models/autogluon_tabular/utils.py,sha256=Fn3Vu_Q0PCtEUbtNgLp1xIblg7dOdpFlF3W5kLHgruI,63
|
|
30
|
-
autogluon/timeseries/models/chronos/__init__.py,sha256=wT77HzTtmQxW3sw2k0mA5Ot6PSHivX-Uvn5fjM05EU4,60
|
|
31
|
-
autogluon/timeseries/models/chronos/model.py,sha256=UYLI1HVwsW5KfA-jXqJdBel-7N6l6ggdBVrjLLaq9P0,32333
|
|
32
|
-
autogluon/timeseries/models/chronos/pipeline/__init__.py,sha256=bkTR0LSKIxAaKFOr9A0HSkCtnRdikDPUPp810WOKgxE,247
|
|
33
|
-
autogluon/timeseries/models/chronos/pipeline/base.py,sha256=Us-TUpHSN3mM3ut05IVc2a9Q6KYq1n9pTb7JZG7b6kA,5546
|
|
34
|
-
autogluon/timeseries/models/chronos/pipeline/chronos.py,sha256=bgow5FkHG7y5qWBXcggqXemnistJUfrl0lWFXcGXg5g,20197
|
|
35
|
-
autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py,sha256=KJYgxASTW2VhS0ObkP5DUQXyfnTRTXzjRD5Gm-FQFI4,21355
|
|
36
|
-
autogluon/timeseries/models/chronos/pipeline/utils.py,sha256=WYeCKFP5dxs4u09XTncBI2486VV22O1DiM9a3ZvZ1OE,12790
|
|
37
|
-
autogluon/timeseries/models/ensemble/__init__.py,sha256=x2Y6dWk15XugTEWNUKq8U5z6nIjelo3UjpI-TfS13OE,159
|
|
38
|
-
autogluon/timeseries/models/ensemble/abstract.py,sha256=wvtXNZTwiYpIurPkOYSzsi3XTRRx5guJLMYLmXTdOeQ,5695
|
|
39
|
-
autogluon/timeseries/models/ensemble/basic.py,sha256=aSQRYylUpFZVk_Lpv5GY8uYgmE0_ipLy_tx6ELTZyWc,3426
|
|
40
|
-
autogluon/timeseries/models/ensemble/greedy.py,sha256=zXJFenn1XxNNvCp4TlmIq1Dx3pUDWjKG1K3HsejmDeY,7323
|
|
41
|
-
autogluon/timeseries/models/gluonts/__init__.py,sha256=YfyNYOkhhNsloA4MAavfmqKO29_q6o4lwPoV7L4_h7M,355
|
|
42
|
-
autogluon/timeseries/models/gluonts/abstract.py,sha256=fYXV5fQk79LtPtpa4uk8MzCUGZ5J6n47ClEYuYSBDLY,27770
|
|
43
|
-
autogluon/timeseries/models/gluonts/dataset.py,sha256=iUVKZyec1efVW2-71AWn-m9cowXYYj5hJM1JWuioscA,5115
|
|
44
|
-
autogluon/timeseries/models/gluonts/models.py,sha256=1Z3x3-jVoae5X4cSnDIgJMvTJ9_O94aDSW8HEnBaL5k,25907
|
|
45
|
-
autogluon/timeseries/models/local/__init__.py,sha256=e2UImoJhmj70E148IIObv90C_bHxgyLNk6YsS4p7pfs,701
|
|
46
|
-
autogluon/timeseries/models/local/abstract_local_model.py,sha256=A3sNYMA67UbbEKIIN30BcBdE_NpwaBtcG22O5mVWS6k,11482
|
|
47
|
-
autogluon/timeseries/models/local/naive.py,sha256=xur3WWhLaS9Iix_p_yfaStbr58nL5K4rV0dReTm3BQQ,7496
|
|
48
|
-
autogluon/timeseries/models/local/npts.py,sha256=VRZk5tEJOIentt0tLM6lxyoU8US736nHOvhSAgagYMc,4203
|
|
49
|
-
autogluon/timeseries/models/local/statsforecast.py,sha256=sZ6aEFzAyPNZX3rMULGWFht0Toapjb3EwHe5Rb76ZxA,33318
|
|
50
|
-
autogluon/timeseries/models/multi_window/__init__.py,sha256=Bq7AT2Jxdd4WNqmjTdzeqgNiwn1NCyWp4tBIWaM-zfI,60
|
|
51
|
-
autogluon/timeseries/models/multi_window/multi_window_model.py,sha256=Hn-H2jLdeuB0_TxhAdununS8ti-iO-WSl3FOoxzcEJA,12369
|
|
52
|
-
autogluon/timeseries/trainer/__init__.py,sha256=_tw3iioJfvtIV7wnjtEMv0yS8oabmCFxDnGRodYE7RI,72
|
|
53
|
-
autogluon/timeseries/trainer/model_set_builder.py,sha256=s6tozfND3lLfst6Vxa_oP_wgCmDapyCJYFmCjkEn-es,10788
|
|
54
|
-
autogluon/timeseries/trainer/prediction_cache.py,sha256=Vi6EbMiMheq_smA93U_MoMxYUV85RdPm0dvJFdsM8K4,5551
|
|
55
|
-
autogluon/timeseries/trainer/trainer.py,sha256=LF2X5UNnrU8w5h_i09SphGWvGFvZ6KvPDq89Z3GzZZQ,54959
|
|
56
|
-
autogluon/timeseries/transforms/__init__.py,sha256=fKlT4pkJ_8Gl7IUTc3uSDzt2Xow5iH5w6fPB3ePNrTg,127
|
|
57
|
-
autogluon/timeseries/transforms/covariate_scaler.py,sha256=9lEfDS4wnVZohQNnm9OcAXr3voUl83RCnctKR3O66iU,7030
|
|
58
|
-
autogluon/timeseries/transforms/target_scaler.py,sha256=kTQrXAsDHCnYuqfpaVuvefyTgyp_ylDpUIPz7pArjeY,6043
|
|
59
|
-
autogluon/timeseries/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
60
|
-
autogluon/timeseries/utils/features.py,sha256=tdL7jZKeySO7dgB09FweR44wPCmfWg8-ZM5uVzeyvYQ,22593
|
|
61
|
-
autogluon/timeseries/utils/forecast.py,sha256=yK1_eNtRUPYGs0R-VWMO4c81LrTGF57ih3yzsXVHyGY,2191
|
|
62
|
-
autogluon/timeseries/utils/warning_filters.py,sha256=SroNhLU3kwbD8anM58vdxWq36Z8j_uiY42mEt0ya-JI,2589
|
|
63
|
-
autogluon/timeseries/utils/datetime/__init__.py,sha256=bTMR8jLh1LW55vHjbOr1zvWRMF_PqbvxpS-cUcNIDWI,173
|
|
64
|
-
autogluon/timeseries/utils/datetime/base.py,sha256=3NdsH3NDq4cVAOSoy3XpaNixyNlbjy4DJ_YYOGuu9x4,1341
|
|
65
|
-
autogluon/timeseries/utils/datetime/lags.py,sha256=rjJtdBU0M41R1jwfmvCbo045s-6XBjhGVnGBQJ9-U1E,5997
|
|
66
|
-
autogluon/timeseries/utils/datetime/seasonality.py,sha256=YK_2k8hvYIMW-sJPnjGWRtCnvIOthwA2hATB3nwVoD4,834
|
|
67
|
-
autogluon/timeseries/utils/datetime/time_features.py,sha256=kEOFls4Nzh8nO0Pcz1DwLsC_NA3hMI4JUlZI3kuvuts,2666
|
|
68
|
-
autogluon.timeseries-1.4.1b20250830.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
|
|
69
|
-
autogluon.timeseries-1.4.1b20250830.dist-info/METADATA,sha256=eStbpOycDQRMqTzu2HyK8t-mFPsjHfjUNaE0X9L5hKo,12463
|
|
70
|
-
autogluon.timeseries-1.4.1b20250830.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
|
|
71
|
-
autogluon.timeseries-1.4.1b20250830.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
|
72
|
-
autogluon.timeseries-1.4.1b20250830.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
|
73
|
-
autogluon.timeseries-1.4.1b20250830.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
|
74
|
-
autogluon.timeseries-1.4.1b20250830.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
75
|
-
autogluon.timeseries-1.4.1b20250830.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|