autogluon.timeseries 1.1.2b20241109__py3-none-any.whl → 1.1.2b20241112__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autogluon/timeseries/dataset/ts_dataframe.py +5 -1
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py +73 -5
- autogluon/timeseries/models/chronos/model.py +67 -38
- autogluon/timeseries/models/chronos/pipeline/__init__.py +11 -0
- autogluon/timeseries/models/chronos/pipeline/base.py +146 -0
- autogluon/timeseries/models/chronos/{pipeline.py → pipeline/chronos.py} +66 -102
- autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +511 -0
- autogluon/timeseries/models/chronos/{utils.py → pipeline/utils.py} +37 -1
- autogluon/timeseries/models/gluonts/abstract_gluonts.py +1 -0
- autogluon/timeseries/models/gluonts/torch/models.py +3 -0
- autogluon/timeseries/models/local/abstract_local_model.py +4 -1
- autogluon/timeseries/models/local/statsforecast.py +3 -0
- autogluon/timeseries/models/multi_window/multi_window_model.py +5 -0
- autogluon/timeseries/predictor.py +1 -1
- autogluon/timeseries/regressor.py +146 -0
- autogluon/timeseries/transforms/scaler.py +1 -1
- autogluon/timeseries/utils/warning_filters.py +20 -0
- autogluon/timeseries/version.py +1 -1
- {autogluon.timeseries-1.1.2b20241109.dist-info → autogluon.timeseries-1.1.2b20241112.dist-info}/METADATA +5 -5
- {autogluon.timeseries-1.1.2b20241109.dist-info → autogluon.timeseries-1.1.2b20241112.dist-info}/RECORD +27 -23
- /autogluon.timeseries-1.1.2b20241109-py3.8-nspkg.pth → /autogluon.timeseries-1.1.2b20241112-py3.8-nspkg.pth +0 -0
- {autogluon.timeseries-1.1.2b20241109.dist-info → autogluon.timeseries-1.1.2b20241112.dist-info}/LICENSE +0 -0
- {autogluon.timeseries-1.1.2b20241109.dist-info → autogluon.timeseries-1.1.2b20241112.dist-info}/NOTICE +0 -0
- {autogluon.timeseries-1.1.2b20241109.dist-info → autogluon.timeseries-1.1.2b20241112.dist-info}/WHEEL +0 -0
- {autogluon.timeseries-1.1.2b20241109.dist-info → autogluon.timeseries-1.1.2b20241112.dist-info}/namespace_packages.txt +0 -0
- {autogluon.timeseries-1.1.2b20241109.dist-info → autogluon.timeseries-1.1.2b20241112.dist-info}/top_level.txt +0 -0
- {autogluon.timeseries-1.1.2b20241109.dist-info → autogluon.timeseries-1.1.2b20241112.dist-info}/zip-safe +0 -0
@@ -11,14 +11,16 @@ from typing import Any, Dict, List, Literal, Optional, Tuple, Union
|
|
11
11
|
|
12
12
|
import torch
|
13
13
|
import torch.nn as nn
|
14
|
-
from transformers import AutoConfig,
|
14
|
+
from transformers import AutoConfig, AutoModelForSeq2SeqLM, GenerationConfig, PreTrainedModel
|
15
15
|
|
16
16
|
from autogluon.timeseries.utils.warning_filters import set_loggers_level
|
17
17
|
|
18
|
-
|
18
|
+
from .base import BaseChronosPipeline, ForecastType
|
19
19
|
|
20
|
+
logger = logging.getLogger("autogluon.timeseries.models.chronos")
|
20
21
|
|
21
|
-
|
22
|
+
|
23
|
+
__all__ = ["ChronosConfig", "ChronosPipeline"]
|
22
24
|
|
23
25
|
|
24
26
|
@dataclass
|
@@ -35,7 +37,7 @@ class ChronosConfig:
|
|
35
37
|
pad_token_id: int
|
36
38
|
eos_token_id: int
|
37
39
|
use_eos_token: bool
|
38
|
-
model_type: Literal["
|
40
|
+
model_type: Literal["seq2seq"]
|
39
41
|
context_length: int
|
40
42
|
prediction_length: int
|
41
43
|
num_samples: int
|
@@ -279,18 +281,7 @@ class ChronosPretrainedModel(nn.Module):
|
|
279
281
|
return preds.reshape(input_ids.size(0), num_samples, -1)
|
280
282
|
|
281
283
|
|
282
|
-
|
283
|
-
max_len = max(len(c) for c in tensors)
|
284
|
-
padded = []
|
285
|
-
for c in tensors:
|
286
|
-
assert isinstance(c, torch.Tensor)
|
287
|
-
assert c.ndim == 1
|
288
|
-
padding = torch.full(size=(max_len - len(c),), fill_value=torch.nan, device=c.device)
|
289
|
-
padded.append(torch.concat((padding, c), dim=-1))
|
290
|
-
return torch.stack(padded)
|
291
|
-
|
292
|
-
|
293
|
-
class ChronosPipeline:
|
284
|
+
class ChronosPipeline(BaseChronosPipeline):
|
294
285
|
"""
|
295
286
|
A ``ChronosPipeline`` uses the given tokenizer and model to forecast
|
296
287
|
input time series.
|
@@ -308,21 +299,12 @@ class ChronosPipeline:
|
|
308
299
|
|
309
300
|
tokenizer: ChronosTokenizer
|
310
301
|
model: ChronosPretrainedModel
|
302
|
+
forecast_type: ForecastType = ForecastType.SAMPLES
|
311
303
|
|
312
304
|
def __init__(self, tokenizer, model):
|
313
305
|
self.tokenizer = tokenizer
|
314
306
|
self.model = model
|
315
307
|
|
316
|
-
def _prepare_and_validate_context(self, context: Union[torch.Tensor, List[torch.Tensor]]):
|
317
|
-
if isinstance(context, list):
|
318
|
-
context = left_pad_and_stack_1D(context)
|
319
|
-
assert isinstance(context, torch.Tensor)
|
320
|
-
if context.ndim == 1:
|
321
|
-
context = context.unsqueeze(0)
|
322
|
-
assert context.ndim == 2
|
323
|
-
|
324
|
-
return context
|
325
|
-
|
326
308
|
@torch.no_grad()
|
327
309
|
def embed(self, context: Union[torch.Tensor, List[torch.Tensor]]) -> Tuple[torch.Tensor, Any]:
|
328
310
|
"""
|
@@ -363,7 +345,7 @@ class ChronosPipeline:
|
|
363
345
|
temperature: Optional[float] = None,
|
364
346
|
top_k: Optional[int] = None,
|
365
347
|
top_p: Optional[float] = None,
|
366
|
-
limit_prediction_length: bool =
|
348
|
+
limit_prediction_length: bool = False,
|
367
349
|
) -> torch.Tensor:
|
368
350
|
"""
|
369
351
|
Get forecasts for the given time series.
|
@@ -442,42 +424,33 @@ class ChronosPipeline:
|
|
442
424
|
|
443
425
|
return torch.cat(predictions, dim=-1)
|
444
426
|
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
inner_model = AutoModelForCausalLM.from_pretrained(*args, **kwargs)
|
464
|
-
|
465
|
-
return cls(
|
466
|
-
tokenizer=chronos_config.create_tokenizer(),
|
467
|
-
model=ChronosPretrainedModel(config=chronos_config, model=inner_model),
|
427
|
+
def predict_quantiles(
|
428
|
+
self,
|
429
|
+
context: torch.Tensor,
|
430
|
+
prediction_length: int,
|
431
|
+
quantile_levels: List[float],
|
432
|
+
num_samples: Optional[int] = None,
|
433
|
+
**kwargs,
|
434
|
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
435
|
+
num_samples = num_samples or self.model.config.num_samples
|
436
|
+
prediction_samples = (
|
437
|
+
self.predict(
|
438
|
+
context,
|
439
|
+
prediction_length=prediction_length,
|
440
|
+
num_samples=num_samples,
|
441
|
+
)
|
442
|
+
.detach()
|
443
|
+
.cpu()
|
444
|
+
.swapaxes(1, 2)
|
468
445
|
)
|
446
|
+
mean = prediction_samples.mean(axis=-1, keepdims=True)
|
447
|
+
quantiles = torch.quantile(
|
448
|
+
prediction_samples,
|
449
|
+
q=torch.tensor(quantile_levels, dtype=prediction_samples.dtype),
|
450
|
+
dim=-1,
|
451
|
+
).permute(1, 2, 0)
|
469
452
|
|
470
|
-
|
471
|
-
class OptimizedChronosPipeline(ChronosPipeline):
|
472
|
-
"""A wrapper around the ChronosPipeline object for CPU-optimized model classes from
|
473
|
-
HuggingFace optimum.
|
474
|
-
"""
|
475
|
-
|
476
|
-
dtypes = {
|
477
|
-
"bfloat16": torch.bfloat16,
|
478
|
-
"float32": torch.float32,
|
479
|
-
"float64": torch.float64,
|
480
|
-
}
|
453
|
+
return quantiles, mean
|
481
454
|
|
482
455
|
@classmethod
|
483
456
|
def from_pretrained(cls, *args, **kwargs):
|
@@ -498,49 +471,40 @@ class OptimizedChronosPipeline(ChronosPipeline):
|
|
498
471
|
config.chronos_config["context_length"] = context_length
|
499
472
|
chronos_config = ChronosConfig(**config.chronos_config)
|
500
473
|
|
501
|
-
|
502
|
-
if
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
474
|
+
assert chronos_config.model_type == "seq2seq"
|
475
|
+
if optimization_strategy is None:
|
476
|
+
inner_model = AutoModelForSeq2SeqLM.from_pretrained(*args, **kwargs)
|
477
|
+
else:
|
478
|
+
assert optimization_strategy in [
|
479
|
+
"onnx",
|
480
|
+
"openvino",
|
481
|
+
], "optimization_strategy not recognized. Please provide one of `onnx` or `openvino`"
|
482
|
+
torch_dtype = kwargs.pop("torch_dtype", "auto")
|
483
|
+
if torch_dtype != "auto":
|
484
|
+
logger.warning(f"\t`torch_dtype` will be ignored for optimization_strategy {optimization_strategy}")
|
485
|
+
|
486
|
+
if optimization_strategy == "onnx":
|
487
|
+
try:
|
488
|
+
from optimum.onnxruntime import ORTModelForSeq2SeqLM
|
489
|
+
except ImportError:
|
490
|
+
raise ImportError(
|
491
|
+
"Huggingface Optimum library must be installed with ONNX for using the `onnx` strategy"
|
517
492
|
)
|
518
493
|
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
except ImportError:
|
534
|
-
raise ImportError(
|
535
|
-
"Huggingface Optimum library must be installed with OpenVINO for using the `openvino` strategy"
|
536
|
-
)
|
537
|
-
with set_loggers_level(regex=r"^optimum.*", level=logging.ERROR):
|
538
|
-
inner_model = OVModelForSeq2SeqLM.from_pretrained(
|
539
|
-
*args, **{**kwargs, "device_map": "cpu", "export": True}
|
540
|
-
)
|
541
|
-
else:
|
542
|
-
assert config.model_type == "causal"
|
543
|
-
inner_model = AutoModelForCausalLM.from_pretrained(*args, **kwargs)
|
494
|
+
assert kwargs.pop("device_map", "cpu") in ["cpu", "auto"], "ONNX mode only available on the CPU"
|
495
|
+
with set_loggers_level(regex=r"^optimum.*", level=logging.ERROR):
|
496
|
+
inner_model = ORTModelForSeq2SeqLM.from_pretrained(*args, **{**kwargs, "export": True})
|
497
|
+
elif optimization_strategy == "openvino":
|
498
|
+
try:
|
499
|
+
from optimum.intel import OVModelForSeq2SeqLM
|
500
|
+
except ImportError:
|
501
|
+
raise ImportError(
|
502
|
+
"Huggingface Optimum library must be installed with OpenVINO for using the `openvino` strategy"
|
503
|
+
)
|
504
|
+
with set_loggers_level(regex=r"^optimum.*", level=logging.ERROR):
|
505
|
+
inner_model = OVModelForSeq2SeqLM.from_pretrained(
|
506
|
+
*args, **{**kwargs, "device_map": "cpu", "export": True}
|
507
|
+
)
|
544
508
|
|
545
509
|
return cls(
|
546
510
|
tokenizer=chronos_config.create_tokenizer(),
|