autogluon.timeseries 1.4.1b20251016__py3-none-any.whl → 1.4.1b20251218__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of autogluon.timeseries might be problematic. Click here for more details.

Files changed (90) hide show
  1. autogluon/timeseries/configs/hyperparameter_presets.py +7 -21
  2. autogluon/timeseries/configs/predictor_presets.py +23 -39
  3. autogluon/timeseries/dataset/ts_dataframe.py +97 -86
  4. autogluon/timeseries/learner.py +70 -35
  5. autogluon/timeseries/metrics/__init__.py +4 -4
  6. autogluon/timeseries/metrics/abstract.py +8 -8
  7. autogluon/timeseries/metrics/point.py +9 -9
  8. autogluon/timeseries/metrics/quantile.py +5 -5
  9. autogluon/timeseries/metrics/utils.py +4 -4
  10. autogluon/timeseries/models/__init__.py +2 -1
  11. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +52 -39
  12. autogluon/timeseries/models/abstract/model_trial.py +2 -1
  13. autogluon/timeseries/models/abstract/tunable.py +8 -8
  14. autogluon/timeseries/models/autogluon_tabular/mlforecast.py +58 -62
  15. autogluon/timeseries/models/autogluon_tabular/per_step.py +26 -15
  16. autogluon/timeseries/models/autogluon_tabular/transforms.py +11 -9
  17. autogluon/timeseries/models/chronos/__init__.py +2 -1
  18. autogluon/timeseries/models/chronos/chronos2.py +395 -0
  19. autogluon/timeseries/models/chronos/model.py +126 -88
  20. autogluon/timeseries/models/chronos/{pipeline/utils.py → utils.py} +69 -37
  21. autogluon/timeseries/models/ensemble/__init__.py +36 -2
  22. autogluon/timeseries/models/ensemble/abstract.py +14 -46
  23. autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
  24. autogluon/timeseries/models/ensemble/array_based/abstract.py +240 -0
  25. autogluon/timeseries/models/ensemble/array_based/models.py +185 -0
  26. autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +12 -0
  27. autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +88 -0
  28. autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py +186 -0
  29. autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +94 -0
  30. autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +107 -0
  31. autogluon/timeseries/models/ensemble/{greedy.py → ensemble_selection.py} +41 -61
  32. autogluon/timeseries/models/ensemble/per_item_greedy.py +172 -0
  33. autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
  34. autogluon/timeseries/models/ensemble/weighted/abstract.py +45 -0
  35. autogluon/timeseries/models/ensemble/{basic.py → weighted/basic.py} +25 -22
  36. autogluon/timeseries/models/ensemble/weighted/greedy.py +62 -0
  37. autogluon/timeseries/models/gluonts/abstract.py +32 -31
  38. autogluon/timeseries/models/gluonts/dataset.py +11 -11
  39. autogluon/timeseries/models/gluonts/models.py +0 -7
  40. autogluon/timeseries/models/local/__init__.py +0 -7
  41. autogluon/timeseries/models/local/abstract_local_model.py +15 -18
  42. autogluon/timeseries/models/local/naive.py +2 -2
  43. autogluon/timeseries/models/local/npts.py +7 -1
  44. autogluon/timeseries/models/local/statsforecast.py +12 -12
  45. autogluon/timeseries/models/multi_window/multi_window_model.py +39 -24
  46. autogluon/timeseries/models/registry.py +3 -4
  47. autogluon/timeseries/models/toto/_internal/backbone/attention.py +3 -4
  48. autogluon/timeseries/models/toto/_internal/backbone/backbone.py +6 -6
  49. autogluon/timeseries/models/toto/_internal/backbone/rope.py +4 -9
  50. autogluon/timeseries/models/toto/_internal/backbone/rotary_embedding_torch.py +342 -0
  51. autogluon/timeseries/models/toto/_internal/backbone/scaler.py +2 -3
  52. autogluon/timeseries/models/toto/_internal/backbone/transformer.py +10 -10
  53. autogluon/timeseries/models/toto/_internal/dataset.py +2 -2
  54. autogluon/timeseries/models/toto/_internal/forecaster.py +8 -8
  55. autogluon/timeseries/models/toto/dataloader.py +4 -4
  56. autogluon/timeseries/models/toto/hf_pretrained_model.py +97 -16
  57. autogluon/timeseries/models/toto/model.py +35 -20
  58. autogluon/timeseries/predictor.py +527 -155
  59. autogluon/timeseries/regressor.py +27 -30
  60. autogluon/timeseries/splitter.py +3 -27
  61. autogluon/timeseries/trainer/ensemble_composer.py +444 -0
  62. autogluon/timeseries/trainer/model_set_builder.py +9 -9
  63. autogluon/timeseries/trainer/prediction_cache.py +16 -16
  64. autogluon/timeseries/trainer/trainer.py +300 -278
  65. autogluon/timeseries/trainer/utils.py +17 -0
  66. autogluon/timeseries/transforms/covariate_scaler.py +8 -8
  67. autogluon/timeseries/transforms/target_scaler.py +15 -15
  68. autogluon/timeseries/utils/constants.py +10 -0
  69. autogluon/timeseries/utils/datetime/lags.py +1 -3
  70. autogluon/timeseries/utils/datetime/seasonality.py +1 -3
  71. autogluon/timeseries/utils/features.py +31 -14
  72. autogluon/timeseries/utils/forecast.py +6 -7
  73. autogluon/timeseries/utils/timer.py +173 -0
  74. autogluon/timeseries/version.py +1 -1
  75. autogluon.timeseries-1.4.1b20251218-py3.11-nspkg.pth +1 -0
  76. {autogluon.timeseries-1.4.1b20251016.dist-info → autogluon_timeseries-1.4.1b20251218.dist-info}/METADATA +39 -27
  77. autogluon_timeseries-1.4.1b20251218.dist-info/RECORD +103 -0
  78. {autogluon.timeseries-1.4.1b20251016.dist-info → autogluon_timeseries-1.4.1b20251218.dist-info}/WHEEL +1 -1
  79. autogluon/timeseries/evaluator.py +0 -6
  80. autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -10
  81. autogluon/timeseries/models/chronos/pipeline/base.py +0 -160
  82. autogluon/timeseries/models/chronos/pipeline/chronos.py +0 -544
  83. autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -580
  84. autogluon.timeseries-1.4.1b20251016-py3.9-nspkg.pth +0 -1
  85. autogluon.timeseries-1.4.1b20251016.dist-info/RECORD +0 -90
  86. {autogluon.timeseries-1.4.1b20251016.dist-info → autogluon_timeseries-1.4.1b20251218.dist-info/licenses}/LICENSE +0 -0
  87. {autogluon.timeseries-1.4.1b20251016.dist-info → autogluon_timeseries-1.4.1b20251218.dist-info/licenses}/NOTICE +0 -0
  88. {autogluon.timeseries-1.4.1b20251016.dist-info → autogluon_timeseries-1.4.1b20251218.dist-info}/namespace_packages.txt +0 -0
  89. {autogluon.timeseries-1.4.1b20251016.dist-info → autogluon_timeseries-1.4.1b20251218.dist-info}/top_level.txt +0 -0
  90. {autogluon.timeseries-1.4.1b20251016.dist-info → autogluon_timeseries-1.4.1b20251218.dist-info}/zip-safe +0 -0
@@ -1,580 +0,0 @@
1
- # Implements Chronos with T5 architecture but with patched inputs instead of
2
- # per-time-step tokenization. a.k.a. Chronos-Bolt
3
-
4
- # Authors: Abdul Fatir Ansari <ansarnd@amazon.com>, Lorenzo Stella <stellalo@amazon.com>, Caner Turkmen <atturkm@amazon.com>
5
-
6
- import copy
7
- import logging
8
- import warnings
9
- from dataclasses import dataclass, fields
10
- from typing import Optional, Union
11
-
12
- import torch
13
- import torch.nn as nn
14
- from transformers import AutoConfig
15
- from transformers.models.t5.modeling_t5 import (
16
- ACT2FN,
17
- T5Config,
18
- T5LayerNorm,
19
- T5PreTrainedModel,
20
- T5Stack,
21
- )
22
- from transformers.utils import ModelOutput
23
-
24
- from .base import BaseChronosPipeline, ForecastType
25
-
26
- logger = logging.getLogger("autogluon.timeseries.models.chronos")
27
-
28
-
29
- @dataclass
30
- class ChronosBoltConfig:
31
- context_length: int
32
- prediction_length: int
33
- input_patch_size: int
34
- input_patch_stride: int
35
- quantiles: list[float]
36
- use_reg_token: bool = False
37
-
38
-
39
- @dataclass
40
- class ChronosBoltOutput(ModelOutput):
41
- loss: Optional[torch.Tensor] = None
42
- quantile_preds: Optional[torch.Tensor] = None
43
- attentions: Optional[torch.Tensor] = None
44
- cross_attentions: Optional[torch.Tensor] = None
45
-
46
-
47
- class Patch(nn.Module):
48
- def __init__(self, patch_size: int, patch_stride: int) -> None:
49
- super().__init__()
50
- self.patch_size = patch_size
51
- self.patch_stride = patch_stride
52
-
53
- def forward(self, x: torch.Tensor) -> torch.Tensor:
54
- length = x.shape[-1]
55
-
56
- if length % self.patch_size != 0:
57
- padding_size = (
58
- *x.shape[:-1],
59
- self.patch_size - (length % self.patch_size),
60
- )
61
- padding = torch.full(size=padding_size, fill_value=torch.nan, dtype=x.dtype, device=x.device)
62
- x = torch.concat((padding, x), dim=-1)
63
-
64
- x = x.unfold(dimension=-1, size=self.patch_size, step=self.patch_stride)
65
- return x
66
-
67
-
68
- class InstanceNorm(nn.Module):
69
- """
70
- See, also, RevIN. Apply standardization along the last dimension.
71
- """
72
-
73
- def __init__(self, eps: float = 1e-5) -> None:
74
- super().__init__()
75
- self.eps = eps
76
-
77
- def forward(
78
- self,
79
- x: torch.Tensor,
80
- loc_scale: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
81
- ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
82
- if loc_scale is None:
83
- loc = torch.nan_to_num(torch.nanmean(x, dim=-1, keepdim=True), nan=0.0)
84
- scale = torch.nan_to_num((x - loc).square().nanmean(dim=-1, keepdim=True).sqrt(), nan=1.0)
85
- scale = torch.where(scale == 0, self.eps, scale)
86
- else:
87
- loc, scale = loc_scale
88
-
89
- return (x - loc) / scale, (loc, scale)
90
-
91
- def inverse(self, x: torch.Tensor, loc_scale: tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
92
- loc, scale = loc_scale
93
- return x * scale + loc
94
-
95
-
96
- class ResidualBlock(nn.Module):
97
- def __init__(
98
- self,
99
- in_dim: int,
100
- h_dim: int,
101
- out_dim: int,
102
- act_fn_name: str,
103
- dropout_p: float = 0.0,
104
- use_layer_norm: bool = False,
105
- ) -> None:
106
- super().__init__()
107
-
108
- self.dropout = nn.Dropout(dropout_p)
109
- self.hidden_layer = nn.Linear(in_dim, h_dim)
110
- self.act = ACT2FN[act_fn_name]
111
- self.output_layer = nn.Linear(h_dim, out_dim)
112
- self.residual_layer = nn.Linear(in_dim, out_dim)
113
-
114
- self.use_layer_norm = use_layer_norm
115
- if use_layer_norm:
116
- self.layer_norm = T5LayerNorm(out_dim)
117
-
118
- def forward(self, x: torch.Tensor):
119
- hid = self.act(self.hidden_layer(x))
120
- out = self.dropout(self.output_layer(hid))
121
- res = self.residual_layer(x)
122
-
123
- out = out + res
124
-
125
- if self.use_layer_norm:
126
- return self.layer_norm(out)
127
- return out
128
-
129
-
130
- class ChronosBoltModelForForecasting(T5PreTrainedModel):
131
- _keys_to_ignore_on_load_missing = [
132
- r"input_patch_embedding\.",
133
- r"output_patch_embedding\.",
134
- ]
135
- _keys_to_ignore_on_load_unexpected = [r"lm_head.weight"]
136
- _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
137
-
138
- def __init__(self, config: T5Config):
139
- assert hasattr(config, "chronos_config"), "Not a Chronos config file"
140
-
141
- super().__init__(config)
142
- self.model_dim = config.d_model
143
-
144
- # TODO: remove filtering eventually, added for backward compatibility
145
- config_fields = {f.name for f in fields(ChronosBoltConfig)}
146
- self.chronos_config = ChronosBoltConfig(
147
- **{k: v for k, v in config.chronos_config.items() if k in config_fields}
148
- )
149
-
150
- # Only decoder_start_id (and optionally REG token)
151
- if self.chronos_config.use_reg_token:
152
- config.reg_token_id = 1
153
-
154
- config.vocab_size = 2 if self.chronos_config.use_reg_token else 1
155
- self.shared = nn.Embedding(config.vocab_size, config.d_model)
156
-
157
- # Input patch embedding layer
158
- self.input_patch_embedding = ResidualBlock(
159
- in_dim=self.chronos_config.input_patch_size * 2,
160
- h_dim=config.d_ff,
161
- out_dim=config.d_model,
162
- act_fn_name=config.dense_act_fn,
163
- dropout_p=config.dropout_rate,
164
- )
165
-
166
- # patching layer
167
- self.patch = Patch(
168
- patch_size=self.chronos_config.input_patch_size,
169
- patch_stride=self.chronos_config.input_patch_stride,
170
- )
171
-
172
- # instance normalization, also referred to as "scaling" in Chronos and GluonTS
173
- self.instance_norm = InstanceNorm()
174
-
175
- encoder_config = copy.deepcopy(config)
176
- encoder_config.is_decoder = False
177
- encoder_config.use_cache = False
178
- encoder_config.is_encoder_decoder = False
179
- self.encoder = T5Stack(encoder_config, self.shared)
180
-
181
- self._init_decoder(config)
182
-
183
- self.num_quantiles = len(self.chronos_config.quantiles)
184
- quantiles = torch.tensor(self.chronos_config.quantiles, dtype=self.dtype)
185
- self.register_buffer("quantiles", quantiles, persistent=False)
186
-
187
- self.output_patch_embedding = ResidualBlock(
188
- in_dim=config.d_model,
189
- h_dim=config.d_ff,
190
- out_dim=self.num_quantiles * self.chronos_config.prediction_length,
191
- act_fn_name=config.dense_act_fn,
192
- dropout_p=config.dropout_rate,
193
- )
194
-
195
- # Initialize weights and apply final processing
196
- self.post_init()
197
-
198
- # Model parallel
199
- self.model_parallel = False
200
- self.device_map = None
201
-
202
- def _init_weights(self, module):
203
- super()._init_weights(module)
204
- """Initialize the weights"""
205
- factor = self.config.initializer_factor
206
- if isinstance(module, (self.__class__)):
207
- module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
208
- elif isinstance(module, ResidualBlock):
209
- module.hidden_layer.weight.data.normal_(
210
- mean=0.0,
211
- std=factor * ((self.chronos_config.input_patch_size * 2) ** -0.5),
212
- )
213
- if hasattr(module.hidden_layer, "bias") and module.hidden_layer.bias is not None:
214
- module.hidden_layer.bias.data.zero_()
215
-
216
- module.residual_layer.weight.data.normal_(
217
- mean=0.0,
218
- std=factor * ((self.chronos_config.input_patch_size * 2) ** -0.5),
219
- )
220
- if hasattr(module.residual_layer, "bias") and module.residual_layer.bias is not None:
221
- module.residual_layer.bias.data.zero_()
222
-
223
- module.output_layer.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
224
- if hasattr(module.output_layer, "bias") and module.output_layer.bias is not None:
225
- module.output_layer.bias.data.zero_()
226
-
227
- def forward(
228
- self,
229
- context: torch.Tensor,
230
- mask: Optional[torch.Tensor] = None,
231
- target: Optional[torch.Tensor] = None,
232
- target_mask: Optional[torch.Tensor] = None,
233
- ) -> ChronosBoltOutput:
234
- mask = mask.to(context.dtype) if mask is not None else torch.isnan(context).logical_not().to(context.dtype)
235
-
236
- batch_size, _ = context.shape
237
- if context.shape[-1] > self.chronos_config.context_length:
238
- context = context[..., -self.chronos_config.context_length :]
239
- mask = mask[..., -self.chronos_config.context_length :]
240
-
241
- # scaling
242
- context, loc_scale = self.instance_norm(context)
243
-
244
- # the scaling op above is done in 32-bit precision,
245
- # then the context is moved to model's dtype
246
- context = context.to(self.dtype)
247
- mask = mask.to(self.dtype)
248
-
249
- # patching
250
- patched_context = self.patch(context)
251
- patched_mask = torch.nan_to_num(self.patch(mask), nan=0.0)
252
- patched_context[~(patched_mask > 0)] = 0.0
253
- # concat context and mask along patch dim
254
- patched_context = torch.cat([patched_context, patched_mask], dim=-1)
255
-
256
- # attention_mask = 1 if at least one item in the patch is observed
257
- attention_mask = patched_mask.sum(dim=-1) > 0 # (batch_size, patched_seq_length)
258
-
259
- input_embeds = self.input_patch_embedding(patched_context)
260
-
261
- if self.chronos_config.use_reg_token:
262
- # Append [REG]
263
- reg_input_ids = torch.full(
264
- (batch_size, 1),
265
- self.config.reg_token_id,
266
- device=input_embeds.device,
267
- )
268
- reg_embeds = self.shared(reg_input_ids)
269
- input_embeds = torch.cat([input_embeds, reg_embeds], dim=-2)
270
- attention_mask = torch.cat([attention_mask, torch.ones_like(reg_input_ids)], dim=-1)
271
-
272
- encoder_outputs = self.encoder(
273
- attention_mask=attention_mask,
274
- inputs_embeds=input_embeds,
275
- )
276
- hidden_states = encoder_outputs[0]
277
-
278
- sequence_output = self.decode(input_embeds, attention_mask, hidden_states)
279
-
280
- quantile_preds_shape = (
281
- batch_size,
282
- self.num_quantiles,
283
- self.chronos_config.prediction_length,
284
- )
285
- quantile_preds = self.output_patch_embedding(sequence_output).view(*quantile_preds_shape)
286
-
287
- loss = None
288
- if target is not None:
289
- # normalize target
290
- target, _ = self.instance_norm(target, loc_scale)
291
- target = target.unsqueeze(1) # type: ignore
292
- assert self.chronos_config.prediction_length >= target.shape[-1]
293
-
294
- target = target.to(quantile_preds.device)
295
- target_mask = (
296
- target_mask.unsqueeze(1).to(quantile_preds.device) if target_mask is not None else ~torch.isnan(target)
297
- )
298
- target[~target_mask] = 0.0
299
-
300
- # pad target and target_mask if they are shorter than model's prediction_length
301
- if self.chronos_config.prediction_length > target.shape[-1]:
302
- padding_shape = (*target.shape[:-1], self.chronos_config.prediction_length - target.shape[-1])
303
- target = torch.cat([target, torch.zeros(padding_shape).to(target)], dim=-1)
304
- target_mask = torch.cat([target_mask, torch.zeros(padding_shape).to(target_mask)], dim=-1)
305
-
306
- loss = (
307
- 2
308
- * torch.abs(
309
- (target - quantile_preds)
310
- * ((target <= quantile_preds).float() - self.quantiles.view(1, self.num_quantiles, 1))
311
- )
312
- * target_mask.float()
313
- )
314
- loss = loss.mean(dim=-2) # Mean over prediction horizon
315
- loss = loss.sum(dim=-1) # Sum over quantile levels
316
- loss = loss.mean() # Mean over batch
317
-
318
- # Unscale predictions
319
- quantile_preds = self.instance_norm.inverse(
320
- quantile_preds.view(batch_size, -1),
321
- loc_scale,
322
- ).view(*quantile_preds_shape)
323
-
324
- return ChronosBoltOutput(
325
- loss=loss,
326
- quantile_preds=quantile_preds,
327
- )
328
-
329
- def _init_decoder(self, config):
330
- decoder_config = copy.deepcopy(config)
331
- decoder_config.is_decoder = True
332
- decoder_config.is_encoder_decoder = False
333
- decoder_config.num_layers = config.num_decoder_layers
334
- self.decoder = T5Stack(decoder_config, self.shared)
335
-
336
- def decode(
337
- self,
338
- input_embeds,
339
- attention_mask,
340
- hidden_states,
341
- output_attentions=False,
342
- ):
343
- """
344
- Parameters
345
- ----------
346
- input_embeds
347
- Patched and embedded inputs. Shape (batch_size, patched_context_length, d_model)
348
- attention_mask
349
- Attention mask for the patched context. Shape (batch_size, patched_context_length), type: torch.int64
350
- hidden_states
351
- Hidden states returned by the encoder. Shape (batch_size, patched_context_length, d_model)
352
-
353
- Returns
354
- -------
355
- last_hidden_state
356
- Last hidden state returned by the decoder, of shape (batch_size, 1, d_model)
357
- """
358
- batch_size = input_embeds.shape[0]
359
- decoder_input_ids = torch.full(
360
- (batch_size, 1),
361
- self.config.decoder_start_token_id,
362
- device=input_embeds.device,
363
- )
364
- decoder_outputs = self.decoder(
365
- input_ids=decoder_input_ids,
366
- encoder_hidden_states=hidden_states,
367
- encoder_attention_mask=attention_mask,
368
- output_attentions=output_attentions,
369
- return_dict=True,
370
- )
371
-
372
- return decoder_outputs.last_hidden_state # sequence_outputs, b x 1 x d_model
373
-
374
- def update_output_quantiles(self, new_quantiles: list[float]) -> None:
375
- """In-place updates model's output layer to support only the specified new quantiles by copying weights from closest existing quantiles."""
376
- old_quantiles = self.chronos_config.quantiles
377
- new_quantiles = sorted(new_quantiles)
378
-
379
- if new_quantiles == old_quantiles:
380
- return
381
-
382
- self.chronos_config.quantiles = new_quantiles
383
- self.num_quantiles = len(new_quantiles)
384
- self.register_buffer("quantiles", torch.tensor(new_quantiles, dtype=self.dtype), persistent=False)
385
-
386
- old_output_layer = self.output_patch_embedding
387
- new_output_layer = ResidualBlock(
388
- in_dim=self.config.d_model,
389
- h_dim=self.config.d_ff,
390
- out_dim=len(new_quantiles) * self.chronos_config.prediction_length,
391
- act_fn_name=self.config.dense_act_fn,
392
- dropout_p=self.config.dropout_rate,
393
- )
394
-
395
- # hidden_layer is shared across all quantiles
396
- new_output_layer.hidden_layer.weight.data.copy_(old_output_layer.hidden_layer.weight.data)
397
- if old_output_layer.hidden_layer.bias is not None:
398
- new_output_layer.hidden_layer.bias.data.copy_(old_output_layer.hidden_layer.bias.data)
399
-
400
- def copy_quantile_weights(src_idx: int, dst_idx: int):
401
- """Copy weights for one quantile from src_idx to dst_idx"""
402
- prediction_length = self.chronos_config.prediction_length
403
- src_start, src_end = src_idx * prediction_length, (src_idx + 1) * prediction_length
404
- dst_start, dst_end = dst_idx * prediction_length, (dst_idx + 1) * prediction_length
405
-
406
- for layer_name in ["output_layer", "residual_layer"]:
407
- old_layer_attr = getattr(old_output_layer, layer_name)
408
- new_layer_attr = getattr(new_output_layer, layer_name)
409
-
410
- new_layer_attr.weight[dst_start:dst_end] = old_layer_attr.weight[src_start:src_end]
411
- if old_layer_attr.bias is not None:
412
- new_layer_attr.bias[dst_start:dst_end] = old_layer_attr.bias[src_start:src_end]
413
-
414
- with torch.no_grad():
415
- for new_idx, new_q in enumerate(new_quantiles):
416
- closest_q = min(old_quantiles, key=lambda x: abs(x - new_q))
417
- closest_idx = old_quantiles.index(closest_q)
418
- copy_quantile_weights(closest_idx, new_idx)
419
-
420
- self.output_patch_embedding = new_output_layer
421
- self.config.chronos_config["quantiles"] = new_quantiles
422
- self.chronos_config.quantiles = new_quantiles
423
-
424
-
425
- class ChronosBoltPipeline(BaseChronosPipeline):
426
- forecast_type: ForecastType = ForecastType.QUANTILES
427
- default_context_length: int = 2048
428
- # register this class name with this alias for backward compatibility
429
- _aliases = ["PatchedT5Pipeline"]
430
-
431
- def __init__(self, model: ChronosBoltModelForForecasting):
432
- super().__init__(inner_model=model)
433
- self.model = model
434
- self.model_context_length: int = self.model.config.chronos_config["context_length"]
435
- self.model_prediction_length: int = self.model.config.chronos_config["prediction_length"]
436
-
437
- @property
438
- def quantiles(self) -> list[float]:
439
- return self.model.config.chronos_config["quantiles"]
440
-
441
- def predict( # type: ignore[override]
442
- self,
443
- context: Union[torch.Tensor, list[torch.Tensor]],
444
- prediction_length: Optional[int] = None,
445
- limit_prediction_length: bool = False,
446
- ):
447
- context_tensor = self._prepare_and_validate_context(context=context)
448
-
449
- if prediction_length is None:
450
- prediction_length = self.model_prediction_length
451
-
452
- if prediction_length > self.model_prediction_length:
453
- msg = (
454
- f"We recommend keeping prediction length <= {self.model_prediction_length}. "
455
- "The quality of longer predictions may degrade since the model is not optimized for it. "
456
- )
457
- if limit_prediction_length:
458
- msg += "You can turn off this check by setting `limit_prediction_length=False`."
459
- raise ValueError(msg)
460
- warnings.warn(msg)
461
-
462
- predictions = []
463
- remaining = prediction_length
464
-
465
- # We truncate the context here because otherwise batches with very long
466
- # context could take up large amounts of GPU memory unnecessarily.
467
- if context_tensor.shape[-1] > self.model_context_length:
468
- context_tensor = context_tensor[..., -self.model_context_length :]
469
-
470
- context_tensor = context_tensor.to(device=self.model.device, dtype=torch.float32)
471
- # First block prediction
472
- with torch.no_grad():
473
- prediction: torch.Tensor = self.model(context=context_tensor).quantile_preds.to(context_tensor)
474
-
475
- predictions.append(prediction)
476
- remaining -= prediction.shape[-1]
477
-
478
- # NOTE: The following heuristic for better prediction intervals with long-horizon forecasts
479
- # uses all quantiles generated by the model for the first `model_prediction_length` steps,
480
- # concatenating each quantile with the context and generating the next `model_prediction_length` steps.
481
- # The `num_quantiles * num_quantiles` "samples" thus generated are then reduced to `num_quantiles`
482
- # by computing empirical quantiles. Note that this option scales the batch size by `num_quantiles`
483
- # when the `prediction_length` is greater than `model_prediction_length`.
484
-
485
- if remaining > 0:
486
- # Expand the context along quantile axis
487
- context_tensor = context_tensor.unsqueeze(1).repeat(1, len(self.quantiles), 1)
488
-
489
- quantile_tensor = torch.tensor(self.quantiles, device=context_tensor.device)
490
- while remaining > 0:
491
- # Append the prediction to context
492
- context_tensor = torch.cat([context_tensor, prediction], dim=-1)[..., -self.model_context_length :]
493
- (batch_size, n_quantiles, context_length) = context_tensor.shape
494
-
495
- with torch.no_grad():
496
- # Reshape (batch, n_quantiles, context_length) -> (batch * n_quantiles, context_length)
497
- prediction = self.model(
498
- context=context_tensor.reshape(batch_size * n_quantiles, context_length)
499
- ).quantile_preds.to(context_tensor)
500
- # Reshape predictions from (batch * n_quantiles, n_quantiles, model_prediction_length) to (batch, n_quantiles * n_quantiles, model_prediction_length)
501
- prediction = prediction.reshape(batch_size, n_quantiles * n_quantiles, -1)
502
- # Reduce `n_quantiles * n_quantiles` to n_quantiles and transpose back to (batch_size, n_quantiles, model_prediction_length)
503
- prediction = torch.quantile(prediction, q=quantile_tensor, dim=1).transpose(0, 1)
504
-
505
- predictions.append(prediction)
506
- remaining -= prediction.shape[-1]
507
-
508
- return torch.cat(predictions, dim=-1)[..., :prediction_length]
509
-
510
- def predict_quantiles(
511
- self, context: torch.Tensor, prediction_length: int, quantile_levels: list[float], **kwargs
512
- ) -> tuple[torch.Tensor, torch.Tensor]:
513
- # shape (batch_size, prediction_length, len(training_quantile_levels))
514
- predictions = (
515
- self.predict(
516
- context,
517
- prediction_length=prediction_length,
518
- )
519
- .detach()
520
- .cpu()
521
- .swapaxes(1, 2)
522
- )
523
-
524
- training_quantile_levels = self.quantiles
525
-
526
- if set(quantile_levels).issubset(set(training_quantile_levels)):
527
- # no need to perform intra/extrapolation
528
- quantiles = predictions[..., [training_quantile_levels.index(q) for q in quantile_levels]]
529
- else:
530
- # we rely on torch for interpolating quantiles if quantiles that
531
- # Chronos Bolt was trained on were not provided
532
- if min(quantile_levels) < min(training_quantile_levels) or max(quantile_levels) > max(
533
- training_quantile_levels
534
- ):
535
- logger.warning(
536
- f"\tQuantiles to be predicted ({quantile_levels}) are not within the range of "
537
- f"quantiles that Chronos-Bolt was trained on ({training_quantile_levels}). "
538
- "Quantile predictions will be set to the minimum/maximum levels at which Chronos-Bolt "
539
- "was trained on. This may significantly affect the quality of the predictions."
540
- )
541
-
542
- # TODO: this is a hack that assumes the model's quantiles during training (training_quantile_levels)
543
- # made up an equidistant grid along the quantile dimension. i.e., they were (0.1, 0.2, ..., 0.9).
544
- # While this holds for official Chronos-Bolt models, this may not be true in the future, and this
545
- # function may have to be revised.
546
- augmented_predictions = torch.cat(
547
- [predictions[..., [0]], predictions, predictions[..., [-1]]],
548
- dim=-1,
549
- )
550
- quantiles = torch.quantile(
551
- augmented_predictions, q=torch.tensor(quantile_levels, dtype=augmented_predictions.dtype), dim=-1
552
- ).permute(1, 2, 0)
553
- mean = predictions[:, :, training_quantile_levels.index(0.5)]
554
- return quantiles, mean
555
-
556
- @classmethod
557
- def from_pretrained(cls, *args, **kwargs):
558
- """
559
- Load the model, either from a local path or from the HuggingFace Hub.
560
- Supports the same arguments as ``AutoConfig`` and ``AutoModel``
561
- from ``transformers``.
562
- """
563
- config = AutoConfig.from_pretrained(*args, **kwargs)
564
- assert hasattr(config, "chronos_config"), "Not a Chronos config file"
565
-
566
- context_length = kwargs.pop("context_length", None)
567
- if context_length is not None:
568
- config.chronos_config["context_length"] = context_length
569
-
570
- architecture = config.architectures[0]
571
- class_ = globals().get(architecture)
572
-
573
- # TODO: remove this once all models carry the correct architecture names in their configuration
574
- # and raise an error instead.
575
- if class_ is None:
576
- logger.warning(f"Unknown architecture: {architecture}, defaulting to ChronosBoltModelForForecasting")
577
- class_ = ChronosBoltModelForForecasting
578
-
579
- model = class_.from_pretrained(*args, **kwargs)
580
- return cls(model=model)
@@ -1 +0,0 @@
1
- import sys, types, os;has_mfs = sys.version_info > (3, 5);p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('autogluon',));importlib = has_mfs and __import__('importlib.util');has_mfs and __import__('importlib.machinery');m = has_mfs and sys.modules.setdefault('autogluon', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('autogluon', [os.path.dirname(p)])));m = m or sys.modules.setdefault('autogluon', types.ModuleType('autogluon'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)
@@ -1,90 +0,0 @@
1
- autogluon.timeseries-1.4.1b20251016-py3.9-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
2
- autogluon/timeseries/__init__.py,sha256=_CrLLc1fkjen7UzWoO0Os8WZoHOgvZbHKy46I8v_4k4,304
3
- autogluon/timeseries/evaluator.py,sha256=l642tYfTHsl8WVIq_vV6qhgAFVFr9UuZD7gLra3A_Kc,250
4
- autogluon/timeseries/learner.py,sha256=eQrqFVOmL-2JC85LgCMkbyoLpKS02Dilg1T8RUeS_LI,13887
5
- autogluon/timeseries/predictor.py,sha256=7X4YsWYa3Xk2RI1Irf2O-c3-I82Zqhg-cgj8cj_4AoA,88427
6
- autogluon/timeseries/regressor.py,sha256=lc8Qr3-8v4oxajtCnV3sxpUaW6vxXXJOA6Kr-qVne4k,11926
7
- autogluon/timeseries/splitter.py,sha256=8ACkuCXeUhQGUx4jz_Vv17q814WrHJQeKvq2v4-oE6s,3158
8
- autogluon/timeseries/version.py,sha256=etlOgeNYmza14ltQktjXXl8r5n60lbmoeRf7BACKo74,91
9
- autogluon/timeseries/configs/__init__.py,sha256=wiLBwxZkDTQBJkSJ9-xz3p_yJxX0dbHe108dS1P5O6A,183
10
- autogluon/timeseries/configs/hyperparameter_presets.py,sha256=GbI2sd3uakWtaeaMyF7B5z_lmyfb6ToK6PZEUZTyG9w,2031
11
- autogluon/timeseries/configs/predictor_presets.py,sha256=B5HFHIelh91hhG0YYE5SJ7_14P7sylFAABgHX8n_53M,2712
12
- autogluon/timeseries/dataset/__init__.py,sha256=UvnhAN5tjgxXTHoZMQDy64YMDj4Xxa68yY7NP4vAw0o,81
13
- autogluon/timeseries/dataset/ts_dataframe.py,sha256=EwxKBScspwKnJTqIk2Icukk8vIrbKYObOMAkNIn4zc8,51760
14
- autogluon/timeseries/metrics/__init__.py,sha256=YJPXxsJ0tRDXq7p-sTZSLb0DuXMJH6sT1PgbZ3tMt30,3594
15
- autogluon/timeseries/metrics/abstract.py,sha256=6jbluvHXfLc_cuK1Fx0ZYle2sR4WGG6YxFQhkor46Q8,11545
16
- autogluon/timeseries/metrics/point.py,sha256=sS__n_Em7m4CUaBu3PNWQ_dHw1YCOHbEyC15fhytFL8,18308
17
- autogluon/timeseries/metrics/quantile.py,sha256=x0cq44fXRoMiuI4BVQ7mpWk1YgrK4OwLTlJAhCHQ7Xg,4634
18
- autogluon/timeseries/metrics/utils.py,sha256=HuDe1BNe8yJU4f_DKM913nNrUueoRaw6zhxm1-S20s0,910
19
- autogluon/timeseries/models/__init__.py,sha256=9NY9mqYaZe_7XB70M6psHARH-Lpkfroj4toUUPO9BmI,1339
20
- autogluon/timeseries/models/registry.py,sha256=8n7W04ql0ckNQUzKcAW7bxreLI8wTAUTymACgLklH9M,2158
21
- autogluon/timeseries/models/abstract/__init__.py,sha256=Htfkjjc3vo92RvyM8rIlQ0PLWt3jcrCKZES07UvCMV0,146
22
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=97HOi7fRPxtx8Y9hq-xdJI-kLMp6Z-8LUSvcfBjXFsM,31978
23
- autogluon/timeseries/models/abstract/model_trial.py,sha256=ENPg_7nsdxIvaNM0o0UShZ3x8jFlRmwRc5m0fGPC0TM,3720
24
- autogluon/timeseries/models/abstract/tunable.py,sha256=jA6p-FPZkMva67B-1foqvHK-1rr0IdEfp9RvGW1WS9I,7155
25
- autogluon/timeseries/models/autogluon_tabular/__init__.py,sha256=E5fZsdFPgVdyCVyj5bGmn_lQFlCMn2NvuRLBMcCFvhM,205
26
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py,sha256=k3a0JqBeuLQfjCtZ8MA7UvS2eqHjwbw0-4kN_StMMUQ,37623
27
- autogluon/timeseries/models/autogluon_tabular/per_step.py,sha256=M5rhj_jjcQz27wPYm6NEBEE0aHgXe0Bl6HFc2NIuLdU,23159
28
- autogluon/timeseries/models/autogluon_tabular/transforms.py,sha256=aI1QJLJaOB5Xy2WA0jo6Jh25MRVyyZ8ONrqlV96kpw0,2735
29
- autogluon/timeseries/models/autogluon_tabular/utils.py,sha256=Fn3Vu_Q0PCtEUbtNgLp1xIblg7dOdpFlF3W5kLHgruI,63
30
- autogluon/timeseries/models/chronos/__init__.py,sha256=wT77HzTtmQxW3sw2k0mA5Ot6PSHivX-Uvn5fjM05EU4,60
31
- autogluon/timeseries/models/chronos/model.py,sha256=-z6Y5Fyo5_X-U8BCeSZBhqQqaJaGBCNIAYDd5y6WaMQ,32614
32
- autogluon/timeseries/models/chronos/pipeline/__init__.py,sha256=bkTR0LSKIxAaKFOr9A0HSkCtnRdikDPUPp810WOKgxE,247
33
- autogluon/timeseries/models/chronos/pipeline/base.py,sha256=Us-TUpHSN3mM3ut05IVc2a9Q6KYq1n9pTb7JZG7b6kA,5546
34
- autogluon/timeseries/models/chronos/pipeline/chronos.py,sha256=bgow5FkHG7y5qWBXcggqXemnistJUfrl0lWFXcGXg5g,20197
35
- autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py,sha256=5zM8G6K9id7qrWhRT37z_xoPVE-BJXwms1SwjF0TBG4,23949
36
- autogluon/timeseries/models/chronos/pipeline/utils.py,sha256=WYeCKFP5dxs4u09XTncBI2486VV22O1DiM9a3ZvZ1OE,12790
37
- autogluon/timeseries/models/ensemble/__init__.py,sha256=x2Y6dWk15XugTEWNUKq8U5z6nIjelo3UjpI-TfS13OE,159
38
- autogluon/timeseries/models/ensemble/abstract.py,sha256=wvtXNZTwiYpIurPkOYSzsi3XTRRx5guJLMYLmXTdOeQ,5695
39
- autogluon/timeseries/models/ensemble/basic.py,sha256=aSQRYylUpFZVk_Lpv5GY8uYgmE0_ipLy_tx6ELTZyWc,3426
40
- autogluon/timeseries/models/ensemble/greedy.py,sha256=zXJFenn1XxNNvCp4TlmIq1Dx3pUDWjKG1K3HsejmDeY,7323
41
- autogluon/timeseries/models/gluonts/__init__.py,sha256=YfyNYOkhhNsloA4MAavfmqKO29_q6o4lwPoV7L4_h7M,355
42
- autogluon/timeseries/models/gluonts/abstract.py,sha256=fYXV5fQk79LtPtpa4uk8MzCUGZ5J6n47ClEYuYSBDLY,27770
43
- autogluon/timeseries/models/gluonts/dataset.py,sha256=iUVKZyec1efVW2-71AWn-m9cowXYYj5hJM1JWuioscA,5115
44
- autogluon/timeseries/models/gluonts/models.py,sha256=1Z3x3-jVoae5X4cSnDIgJMvTJ9_O94aDSW8HEnBaL5k,25907
45
- autogluon/timeseries/models/local/__init__.py,sha256=e2UImoJhmj70E148IIObv90C_bHxgyLNk6YsS4p7pfs,701
46
- autogluon/timeseries/models/local/abstract_local_model.py,sha256=A3sNYMA67UbbEKIIN30BcBdE_NpwaBtcG22O5mVWS6k,11482
47
- autogluon/timeseries/models/local/naive.py,sha256=xur3WWhLaS9Iix_p_yfaStbr58nL5K4rV0dReTm3BQQ,7496
48
- autogluon/timeseries/models/local/npts.py,sha256=VRZk5tEJOIentt0tLM6lxyoU8US736nHOvhSAgagYMc,4203
49
- autogluon/timeseries/models/local/statsforecast.py,sha256=sZ6aEFzAyPNZX3rMULGWFht0Toapjb3EwHe5Rb76ZxA,33318
50
- autogluon/timeseries/models/multi_window/__init__.py,sha256=Bq7AT2Jxdd4WNqmjTdzeqgNiwn1NCyWp4tBIWaM-zfI,60
51
- autogluon/timeseries/models/multi_window/multi_window_model.py,sha256=Hn-H2jLdeuB0_TxhAdununS8ti-iO-WSl3FOoxzcEJA,12369
52
- autogluon/timeseries/models/toto/__init__.py,sha256=rQaVjZJV5ZsJGC0jhQ6CA4nYeXdV1KtlyDz2i2usQnY,54
53
- autogluon/timeseries/models/toto/dataloader.py,sha256=A5WHhnAe0J7fPo2KKG43hYLSrtUBGNweuqxMmClu3_A,3598
54
- autogluon/timeseries/models/toto/hf_pretrained_model.py,sha256=Q8bVUaSlQVE4xFn_v7H0h_NFTxzHiM1V17KFytc50jk,4783
55
- autogluon/timeseries/models/toto/model.py,sha256=eP0SAoUjv9l_ExK4eoPl9ZZHW_MXa-OVLYxhj3f1bl4,8809
56
- autogluon/timeseries/models/toto/_internal/__init__.py,sha256=tKkiux9bD2Xu0AuVyTEx_sNOZutcluC7-d7tn7wsmec,193
57
- autogluon/timeseries/models/toto/_internal/dataset.py,sha256=xuAEOhoQNJGMoCxkLVLrgpdoOJuukAYbrSrnrkwFob0,6103
58
- autogluon/timeseries/models/toto/_internal/forecaster.py,sha256=UXiohiySn_Gs8kLheeVcVCO8qoEtYlEfMH1tukAOHsk,18520
59
- autogluon/timeseries/models/toto/_internal/backbone/__init__.py,sha256=hq5W62boH6HiEP8z3sHkI6_KM-Dd6TkDfWDm6DYE3J8,63
60
- autogluon/timeseries/models/toto/_internal/backbone/attention.py,sha256=HLUFoyqR8EqxUMT1BK-AjI4ClS8au35LcUo7Jx7Xhm0,9394
61
- autogluon/timeseries/models/toto/_internal/backbone/backbone.py,sha256=HUjpY2ZWed74UYKjp31erXF2ZHf3mmQMw_5_cCFeJGg,10104
62
- autogluon/timeseries/models/toto/_internal/backbone/distribution.py,sha256=8NXiaEVLuvjTW7L1t1RzooZFNERWv50zyLddbAwuYpo,2502
63
- autogluon/timeseries/models/toto/_internal/backbone/kvcache.py,sha256=QSVCrnbS2oD7wkJodZbP9XMVmrfCH6M3Zp44siF28Fg,5399
64
- autogluon/timeseries/models/toto/_internal/backbone/rope.py,sha256=Ghngo08DjHbwbyp6b-GXCyLeYR10dH-Y_RMOTYwIxPY,3527
65
- autogluon/timeseries/models/toto/_internal/backbone/scaler.py,sha256=opqyhHIZ6mPdPlrr3gA0qt9FFogIAYNDSq-P7CyQiqE,13728
66
- autogluon/timeseries/models/toto/_internal/backbone/transformer.py,sha256=5c-ngj4XHKlaedz1NkgdfQgqD2kUGkMn4mtGH_lTXsE,12410
67
- autogluon/timeseries/trainer/__init__.py,sha256=_tw3iioJfvtIV7wnjtEMv0yS8oabmCFxDnGRodYE7RI,72
68
- autogluon/timeseries/trainer/model_set_builder.py,sha256=s6tozfND3lLfst6Vxa_oP_wgCmDapyCJYFmCjkEn-es,10788
69
- autogluon/timeseries/trainer/prediction_cache.py,sha256=Vi6EbMiMheq_smA93U_MoMxYUV85RdPm0dvJFdsM8K4,5551
70
- autogluon/timeseries/trainer/trainer.py,sha256=LF2X5UNnrU8w5h_i09SphGWvGFvZ6KvPDq89Z3GzZZQ,54959
71
- autogluon/timeseries/transforms/__init__.py,sha256=fKlT4pkJ_8Gl7IUTc3uSDzt2Xow5iH5w6fPB3ePNrTg,127
72
- autogluon/timeseries/transforms/covariate_scaler.py,sha256=9lEfDS4wnVZohQNnm9OcAXr3voUl83RCnctKR3O66iU,7030
73
- autogluon/timeseries/transforms/target_scaler.py,sha256=kTQrXAsDHCnYuqfpaVuvefyTgyp_ylDpUIPz7pArjeY,6043
74
- autogluon/timeseries/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
75
- autogluon/timeseries/utils/features.py,sha256=tdL7jZKeySO7dgB09FweR44wPCmfWg8-ZM5uVzeyvYQ,22593
76
- autogluon/timeseries/utils/forecast.py,sha256=yK1_eNtRUPYGs0R-VWMO4c81LrTGF57ih3yzsXVHyGY,2191
77
- autogluon/timeseries/utils/warning_filters.py,sha256=SroNhLU3kwbD8anM58vdxWq36Z8j_uiY42mEt0ya-JI,2589
78
- autogluon/timeseries/utils/datetime/__init__.py,sha256=bTMR8jLh1LW55vHjbOr1zvWRMF_PqbvxpS-cUcNIDWI,173
79
- autogluon/timeseries/utils/datetime/base.py,sha256=3NdsH3NDq4cVAOSoy3XpaNixyNlbjy4DJ_YYOGuu9x4,1341
80
- autogluon/timeseries/utils/datetime/lags.py,sha256=rjJtdBU0M41R1jwfmvCbo045s-6XBjhGVnGBQJ9-U1E,5997
81
- autogluon/timeseries/utils/datetime/seasonality.py,sha256=YK_2k8hvYIMW-sJPnjGWRtCnvIOthwA2hATB3nwVoD4,834
82
- autogluon/timeseries/utils/datetime/time_features.py,sha256=kEOFls4Nzh8nO0Pcz1DwLsC_NA3hMI4JUlZI3kuvuts,2666
83
- autogluon.timeseries-1.4.1b20251016.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
84
- autogluon.timeseries-1.4.1b20251016.dist-info/METADATA,sha256=Qa4lnLJDLImIueXA46HcrrCnGjNSkez3atZfoFpqhIk,12702
85
- autogluon.timeseries-1.4.1b20251016.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
86
- autogluon.timeseries-1.4.1b20251016.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
87
- autogluon.timeseries-1.4.1b20251016.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
88
- autogluon.timeseries-1.4.1b20251016.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
89
- autogluon.timeseries-1.4.1b20251016.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
90
- autogluon.timeseries-1.4.1b20251016.dist-info/RECORD,,