autogluon.timeseries 1.4.1b20250907__py3-none-any.whl → 1.5.1b20260122__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of autogluon.timeseries might be problematic. Click here for more details.

Files changed (95) hide show
  1. autogluon/timeseries/configs/hyperparameter_presets.py +13 -28
  2. autogluon/timeseries/configs/predictor_presets.py +23 -39
  3. autogluon/timeseries/dataset/ts_dataframe.py +97 -86
  4. autogluon/timeseries/learner.py +70 -35
  5. autogluon/timeseries/metrics/__init__.py +4 -4
  6. autogluon/timeseries/metrics/abstract.py +8 -8
  7. autogluon/timeseries/metrics/point.py +9 -9
  8. autogluon/timeseries/metrics/quantile.py +5 -5
  9. autogluon/timeseries/metrics/utils.py +4 -4
  10. autogluon/timeseries/models/__init__.py +4 -1
  11. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +52 -50
  12. autogluon/timeseries/models/abstract/model_trial.py +2 -1
  13. autogluon/timeseries/models/abstract/tunable.py +8 -8
  14. autogluon/timeseries/models/autogluon_tabular/mlforecast.py +58 -62
  15. autogluon/timeseries/models/autogluon_tabular/per_step.py +27 -16
  16. autogluon/timeseries/models/autogluon_tabular/transforms.py +11 -9
  17. autogluon/timeseries/models/chronos/__init__.py +2 -1
  18. autogluon/timeseries/models/chronos/chronos2.py +395 -0
  19. autogluon/timeseries/models/chronos/model.py +127 -89
  20. autogluon/timeseries/models/chronos/{pipeline/utils.py → utils.py} +69 -37
  21. autogluon/timeseries/models/ensemble/__init__.py +36 -2
  22. autogluon/timeseries/models/ensemble/abstract.py +14 -46
  23. autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
  24. autogluon/timeseries/models/ensemble/array_based/abstract.py +240 -0
  25. autogluon/timeseries/models/ensemble/array_based/models.py +185 -0
  26. autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +12 -0
  27. autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +88 -0
  28. autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py +186 -0
  29. autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +94 -0
  30. autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +107 -0
  31. autogluon/timeseries/models/ensemble/{greedy.py → ensemble_selection.py} +41 -61
  32. autogluon/timeseries/models/ensemble/per_item_greedy.py +172 -0
  33. autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
  34. autogluon/timeseries/models/ensemble/weighted/abstract.py +45 -0
  35. autogluon/timeseries/models/ensemble/{basic.py → weighted/basic.py} +25 -22
  36. autogluon/timeseries/models/ensemble/weighted/greedy.py +64 -0
  37. autogluon/timeseries/models/gluonts/abstract.py +32 -31
  38. autogluon/timeseries/models/gluonts/dataset.py +11 -11
  39. autogluon/timeseries/models/gluonts/models.py +0 -7
  40. autogluon/timeseries/models/local/__init__.py +0 -7
  41. autogluon/timeseries/models/local/abstract_local_model.py +15 -18
  42. autogluon/timeseries/models/local/naive.py +2 -2
  43. autogluon/timeseries/models/local/npts.py +7 -1
  44. autogluon/timeseries/models/local/statsforecast.py +13 -13
  45. autogluon/timeseries/models/multi_window/multi_window_model.py +39 -24
  46. autogluon/timeseries/models/registry.py +3 -4
  47. autogluon/timeseries/models/toto/__init__.py +3 -0
  48. autogluon/timeseries/models/toto/_internal/__init__.py +9 -0
  49. autogluon/timeseries/models/toto/_internal/backbone/__init__.py +3 -0
  50. autogluon/timeseries/models/toto/_internal/backbone/attention.py +196 -0
  51. autogluon/timeseries/models/toto/_internal/backbone/backbone.py +262 -0
  52. autogluon/timeseries/models/toto/_internal/backbone/distribution.py +70 -0
  53. autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +136 -0
  54. autogluon/timeseries/models/toto/_internal/backbone/rope.py +89 -0
  55. autogluon/timeseries/models/toto/_internal/backbone/rotary_embedding_torch.py +342 -0
  56. autogluon/timeseries/models/toto/_internal/backbone/scaler.py +305 -0
  57. autogluon/timeseries/models/toto/_internal/backbone/transformer.py +333 -0
  58. autogluon/timeseries/models/toto/_internal/dataset.py +165 -0
  59. autogluon/timeseries/models/toto/_internal/forecaster.py +423 -0
  60. autogluon/timeseries/models/toto/dataloader.py +108 -0
  61. autogluon/timeseries/models/toto/hf_pretrained_model.py +200 -0
  62. autogluon/timeseries/models/toto/model.py +249 -0
  63. autogluon/timeseries/predictor.py +541 -162
  64. autogluon/timeseries/regressor.py +27 -30
  65. autogluon/timeseries/splitter.py +3 -27
  66. autogluon/timeseries/trainer/ensemble_composer.py +444 -0
  67. autogluon/timeseries/trainer/model_set_builder.py +9 -9
  68. autogluon/timeseries/trainer/prediction_cache.py +16 -16
  69. autogluon/timeseries/trainer/trainer.py +300 -279
  70. autogluon/timeseries/trainer/utils.py +17 -0
  71. autogluon/timeseries/transforms/covariate_scaler.py +8 -8
  72. autogluon/timeseries/transforms/target_scaler.py +15 -15
  73. autogluon/timeseries/utils/constants.py +10 -0
  74. autogluon/timeseries/utils/datetime/lags.py +1 -3
  75. autogluon/timeseries/utils/datetime/seasonality.py +1 -3
  76. autogluon/timeseries/utils/features.py +31 -14
  77. autogluon/timeseries/utils/forecast.py +6 -7
  78. autogluon/timeseries/utils/timer.py +173 -0
  79. autogluon/timeseries/version.py +1 -1
  80. autogluon.timeseries-1.5.1b20260122-py3.11-nspkg.pth +1 -0
  81. {autogluon.timeseries-1.4.1b20250907.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/METADATA +39 -22
  82. autogluon_timeseries-1.5.1b20260122.dist-info/RECORD +103 -0
  83. {autogluon.timeseries-1.4.1b20250907.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/WHEEL +1 -1
  84. autogluon/timeseries/evaluator.py +0 -6
  85. autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -10
  86. autogluon/timeseries/models/chronos/pipeline/base.py +0 -160
  87. autogluon/timeseries/models/chronos/pipeline/chronos.py +0 -544
  88. autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -580
  89. autogluon.timeseries-1.4.1b20250907-py3.9-nspkg.pth +0 -1
  90. autogluon.timeseries-1.4.1b20250907.dist-info/RECORD +0 -75
  91. {autogluon.timeseries-1.4.1b20250907.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info/licenses}/LICENSE +0 -0
  92. {autogluon.timeseries-1.4.1b20250907.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info/licenses}/NOTICE +0 -0
  93. {autogluon.timeseries-1.4.1b20250907.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/namespace_packages.txt +0 -0
  94. {autogluon.timeseries-1.4.1b20250907.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/top_level.txt +0 -0
  95. {autogluon.timeseries-1.4.1b20250907.dist-info → autogluon_timeseries-1.5.1b20260122.dist-info}/zip-safe +0 -0
@@ -0,0 +1,200 @@
1
+ import json
2
+ import logging
3
+ import os
4
+ from pathlib import Path
5
+
6
+ from transformers import PretrainedConfig, PreTrainedModel
7
+
8
+ from ._internal.backbone import TotoBackbone
9
+
10
+
11
+ class TotoConfig(PretrainedConfig):
12
+ model_type = "toto"
13
+
14
+ def __init__(
15
+ self,
16
+ dropout: float = 0.0,
17
+ embed_dim: int = 768,
18
+ num_heads: int = 12,
19
+ num_layers: int = 12,
20
+ output_distribution_classes: list[str] | None = None,
21
+ output_distribution_kwargs: dict | None = None,
22
+ patch_size: int = 64,
23
+ scale_factor_exponent: float = 10.0,
24
+ spacewise_every_n_layers: int = 12,
25
+ spacewise_first: bool = False,
26
+ stabilize_with_global: bool = True,
27
+ stride: int = 64,
28
+ transformers_version: str = "4.49.0",
29
+ use_memory_efficient_attention: bool = False,
30
+ **kwargs,
31
+ ):
32
+ self.dropout = dropout
33
+ self.embed_dim = embed_dim
34
+ self.num_heads = num_heads
35
+ self.num_layers = num_layers
36
+ self.output_distribution_classes = output_distribution_classes or ["MixtureOfStudentTsOutput"]
37
+ self.output_distribution_kwargs = output_distribution_kwargs or {"k_components": 24}
38
+ self.patch_size = patch_size
39
+ self.scale_factor_exponent = scale_factor_exponent
40
+ self.spacewise_every_n_layers = spacewise_every_n_layers
41
+ self.spacewise_first = spacewise_first
42
+ self.stabilize_with_global = stabilize_with_global
43
+ self.stride = stride
44
+ self.transformers_version = transformers_version
45
+ self.use_memory_efficient_attention = use_memory_efficient_attention
46
+
47
+ super().__init__(**kwargs)
48
+
49
+
50
+ class TotoPretrainedModel(PreTrainedModel):
51
+ config_class = TotoConfig
52
+ base_model_prefix = "model" # optional, used for weight naming conventions
53
+
54
+ def __init__(self, config: TotoConfig):
55
+ super().__init__(config)
56
+ self.model = TotoBackbone(
57
+ patch_size=config.patch_size,
58
+ stride=config.stride,
59
+ embed_dim=config.embed_dim,
60
+ num_layers=config.num_layers,
61
+ num_heads=config.num_heads,
62
+ mlp_hidden_dim=getattr(config, "mlp_hidden_dim", 3072),
63
+ dropout=config.dropout,
64
+ spacewise_every_n_layers=config.spacewise_every_n_layers,
65
+ scaler_cls=getattr(config, "scaler_cls", "model.scaler.CausalPatchStdMeanScaler"),
66
+ output_distribution_classes=config.output_distribution_classes,
67
+ spacewise_first=config.spacewise_first,
68
+ output_distribution_kwargs=config.output_distribution_kwargs,
69
+ use_memory_efficient_attention=False,
70
+ stabilize_with_global=config.stabilize_with_global,
71
+ scale_factor_exponent=config.scale_factor_exponent,
72
+ **getattr(config, "extra_kwargs", {}),
73
+ )
74
+ self.post_init()
75
+
76
+ @staticmethod
77
+ def _remap_state_dict_keys(state_dict):
78
+ remap = {
79
+ "mlp.0.w12.weight": "mlp.0.weight",
80
+ "mlp.0.w12.bias": "mlp.0.bias",
81
+ "mlp.0.w3.weight": "mlp.2.weight",
82
+ "mlp.0.w3.bias": "mlp.2.bias",
83
+ }
84
+
85
+ new_state = {}
86
+ keys_to_remap = []
87
+ for key in list(state_dict.keys()):
88
+ for old, new in remap.items():
89
+ if old in key:
90
+ new_key = key.replace(old, new)
91
+ keys_to_remap.append((key, new_key))
92
+ break
93
+
94
+ new_state = state_dict.copy()
95
+ for old_key, new_key in keys_to_remap:
96
+ new_state[new_key] = new_state.pop(old_key)
97
+
98
+ return new_state
99
+
100
+ @classmethod
101
+ def load_from_checkpoint(
102
+ cls,
103
+ checkpoint_path,
104
+ device_map: str = "cpu",
105
+ strict=True,
106
+ **model_kwargs,
107
+ ):
108
+ """
109
+ Custom checkpoint loading. Used to load a local
110
+ safetensors checkpoint with an optional config.json file.
111
+ """
112
+ import safetensors.torch as safetorch
113
+
114
+ if os.path.isdir(checkpoint_path):
115
+ safetensors_file = os.path.join(checkpoint_path, "model.safetensors")
116
+ else:
117
+ safetensors_file = checkpoint_path
118
+
119
+ if os.path.exists(safetensors_file):
120
+ model_state = safetorch.load_file(safetensors_file, device=device_map)
121
+ else:
122
+ raise FileNotFoundError(f"Model checkpoint not found at: {safetensors_file}")
123
+
124
+ # Load configuration from config.json if it exists.
125
+ config_file = os.path.join(checkpoint_path, "config.json")
126
+ config = {}
127
+ if os.path.exists(config_file):
128
+ with open(config_file, "r") as f:
129
+ config = json.load(f)
130
+
131
+ # Merge any extra kwargs into the configuration.
132
+ config.update(model_kwargs)
133
+
134
+ remapped_state_dict = cls._remap_state_dict_keys(model_state)
135
+
136
+ instance = cls(**config)
137
+
138
+ # Filter out unexpected keys
139
+ filtered_remapped_state_dict = {
140
+ k: v
141
+ for k, v in remapped_state_dict.items()
142
+ if k in instance.state_dict() and not k.endswith("rotary_emb.freqs")
143
+ }
144
+
145
+ instance.load_state_dict(filtered_remapped_state_dict, strict=strict)
146
+ instance.to(device_map) # type: ignore
147
+
148
+ return instance
149
+
150
+ @classmethod
151
+ def from_pretrained(
152
+ cls,
153
+ *,
154
+ model_id: str,
155
+ revision: str | None = None,
156
+ cache_dir: Path | str | None = None,
157
+ force_download: bool = False,
158
+ proxies: dict | None = None,
159
+ resume_download: bool | None = None,
160
+ local_files_only: bool = False,
161
+ token: str | bool | None = None,
162
+ device_map: str = "cpu",
163
+ strict: bool = False,
164
+ **model_kwargs,
165
+ ):
166
+ """Load Pytorch pretrained weights and return the loaded model."""
167
+ from huggingface_hub import constants, hf_hub_download
168
+
169
+ transformers_logger = logging.getLogger("transformers.modeling_utils")
170
+ original_level = transformers_logger.level
171
+
172
+ try:
173
+ # Here we suppress transformers logger's "some weights were not initialized" error since the
174
+ # remapping hook is only called after the initial model loading.
175
+ transformers_logger.setLevel(logging.ERROR)
176
+
177
+ if os.path.isdir(model_id):
178
+ print("Loading weights from local directory")
179
+ model_file = os.path.join(model_id, constants.SAFETENSORS_SINGLE_FILE)
180
+ model = cls.load_from_checkpoint(model_file, device_map, strict, **model_kwargs)
181
+ else:
182
+ model_file = hf_hub_download(
183
+ repo_id=model_id,
184
+ filename=constants.SAFETENSORS_SINGLE_FILE,
185
+ revision=revision,
186
+ cache_dir=cache_dir,
187
+ force_download=force_download,
188
+ proxies=proxies,
189
+ resume_download=resume_download,
190
+ token=token,
191
+ local_files_only=local_files_only,
192
+ )
193
+ model = cls.load_from_checkpoint(model_file, device_map, strict, **model_kwargs)
194
+ finally:
195
+ transformers_logger.setLevel(original_level)
196
+
197
+ return model
198
+
199
+ def forward(self, *args, **kwargs):
200
+ return self.model(*args, **kwargs)
@@ -0,0 +1,249 @@
1
+ import logging
2
+ import os
3
+ from typing import TYPE_CHECKING, Any, Sequence
4
+
5
+ import numpy as np
6
+ import pandas as pd
7
+ from typing_extensions import Self
8
+
9
+ from autogluon.common.loaders import load_pkl
10
+ from autogluon.timeseries import TimeSeriesDataFrame
11
+ from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
12
+ from autogluon.timeseries.utils.features import CovariateMetadata
13
+
14
+ if TYPE_CHECKING:
15
+ from ._internal import TotoForecaster
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class TotoModel(AbstractTimeSeriesModel):
21
+ """Toto (Time-Series-Optimized Transformer for Observability) [CohenKhwajaetal2025]_ pretrained time series forecasting model.
22
+
23
+ Toto is a 151M parameter model trained on over 1T data points from DataDog's internal observability systems, as well as
24
+ the GIFT-eval pretrain, Chronos pretraining, and synthetically generated time series corpora. It is a decoder-only
25
+ architecture that autoregressively outputs parametric distribution forecasts. More details can be found on
26
+ `Hugging Face <https://huggingface.co/Datadog/Toto-Open-Base-1.0>`_ and `GitHub <https://github.com/DataDog/toto>`_.
27
+
28
+ The AutoGluon implementation of Toto is on a port of the original implementation. AutoGluon supports Toto for
29
+ **inference only**, i.e., the model will not be trained or fine-tuned on the provided training data. Toto is optimized
30
+ for easy maintenance with the rest of the AutoGluon model zoo, and does not feature some important optimizations such
31
+ as xformers and flash-attention available in the original model repository. The AutoGluon implementation of Toto
32
+ requires a CUDA-compatible GPU.
33
+
34
+ References
35
+ ----------
36
+ .. [CohenKhwajaetal2025] Cohen, Ben, Khwaja, Emaad et al.
37
+ "This Time is Different: An Observability Perspective on Time Series Foundation Models."
38
+ https://arxiv.org/abs/2505.14766
39
+
40
+
41
+ Other Parameters
42
+ ----------------
43
+ model_path : str, default = "Datadog/Toto-Open-Base-1.0"
44
+ Model path used for the model, i.e., a HuggingFace transformers ``name_or_path``. Can be a
45
+ compatible model name on HuggingFace Hub or a local path to a model directory.
46
+ batch_size : int, default = 24
47
+ Size of batches used during inference.
48
+ num_samples : int, default = 256
49
+ Number of samples used during inference.
50
+ device : str, default = "cuda"
51
+ Device to use for inference. Toto requires a CUDA-compatible GPU to run.
52
+ context_length : int or None, default = 4096
53
+ The context length to use in the model. Shorter context lengths will decrease model accuracy, but result
54
+ in faster inference.
55
+ compile_model : bool, default = True
56
+ Whether to compile the model using torch.compile() for faster inference. May increase initial loading time
57
+ but can provide speedups during inference.
58
+ """
59
+
60
+ default_model_path: str = "Datadog/Toto-Open-Base-1.0"
61
+
62
+ def __init__(
63
+ self,
64
+ path: str | None = None,
65
+ name: str | None = None,
66
+ hyperparameters: dict[str, Any] | None = None,
67
+ freq: str | None = None,
68
+ prediction_length: int = 1,
69
+ covariate_metadata: CovariateMetadata | None = None,
70
+ target: str = "target",
71
+ quantile_levels: Sequence[float] = (0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
72
+ eval_metric: Any = None,
73
+ ):
74
+ hyperparameters = hyperparameters if hyperparameters is not None else {}
75
+
76
+ self.model_path = hyperparameters.get("model_path", self.default_model_path)
77
+
78
+ super().__init__(
79
+ path=path,
80
+ name=name,
81
+ hyperparameters=hyperparameters,
82
+ freq=freq,
83
+ prediction_length=prediction_length,
84
+ covariate_metadata=covariate_metadata,
85
+ target=target,
86
+ quantile_levels=quantile_levels,
87
+ eval_metric=eval_metric,
88
+ )
89
+
90
+ self._forecaster: TotoForecaster | None = None
91
+
92
+ def save(self, path: str | None = None, verbose: bool = True) -> str:
93
+ forecaster = self._forecaster
94
+ self._forecaster = None
95
+ path = super().save(path=path, verbose=verbose)
96
+ self._forecaster = forecaster
97
+
98
+ return str(path)
99
+
100
+ @classmethod
101
+ def load(cls, path: str, reset_paths: bool = True, load_oof: bool = False, verbose: bool = True) -> Self:
102
+ model = load_pkl.load(path=os.path.join(path, cls.model_file_name), verbose=verbose)
103
+ if reset_paths:
104
+ model.set_contexts(path)
105
+
106
+ return model
107
+
108
+ def _is_gpu_available(self) -> bool:
109
+ import torch.cuda
110
+
111
+ return torch.cuda.is_available()
112
+
113
+ def get_minimum_resources(self, is_gpu_available: bool = False) -> dict[str, int | float]:
114
+ return {"num_cpus": 1, "num_gpus": 1}
115
+
116
+ def load_forecaster(self):
117
+ from ._internal import TotoForecaster
118
+ from .hf_pretrained_model import TotoConfig, TotoPretrainedModel
119
+
120
+ if not self._is_gpu_available():
121
+ raise RuntimeError(
122
+ f"{self.name} requires a GPU to run, but no GPU was detected. "
123
+ "Please make sure that you are using a computer with a CUDA-compatible GPU and "
124
+ "`import torch; torch.cuda.is_available()` returns `True`."
125
+ )
126
+
127
+ hyperparameters = self.get_hyperparameters()
128
+ pretrained_model = TotoPretrainedModel.from_pretrained(
129
+ model_id=self.model_path,
130
+ config=TotoConfig.from_pretrained(self.model_path),
131
+ device_map=hyperparameters["device"],
132
+ )
133
+
134
+ if hyperparameters["compile_model"]:
135
+ pretrained_model.model.compile()
136
+
137
+ self._forecaster = TotoForecaster(model=pretrained_model.model)
138
+
139
+ def persist(self) -> Self:
140
+ if self._forecaster is None:
141
+ self.load_forecaster()
142
+ return self
143
+
144
+ def _get_default_hyperparameters(self) -> dict:
145
+ return {
146
+ "batch_size": 24,
147
+ "num_samples": 256,
148
+ "device": "cuda",
149
+ "context_length": 4096,
150
+ "compile_model": False,
151
+ }
152
+
153
+ def _get_sample_batch_size(self) -> int:
154
+ num_samples = self.get_hyperparameter("num_samples")
155
+ batch_size = num_samples
156
+ while batch_size > 32:
157
+ for factor in range(2, int(batch_size**0.5) + 1):
158
+ if batch_size % factor == 0:
159
+ batch_size //= factor
160
+ break
161
+ else: # batch_size is prime
162
+ return batch_size
163
+ return batch_size
164
+
165
+ @property
166
+ def allowed_hyperparameters(self) -> list[str]:
167
+ return super().allowed_hyperparameters + [
168
+ "model_path",
169
+ "batch_size",
170
+ "num_samples",
171
+ "device",
172
+ "context_length",
173
+ "compile_model",
174
+ ]
175
+
176
+ def _more_tags(self) -> dict:
177
+ return {
178
+ "allow_nan": True,
179
+ "can_use_train_data": False,
180
+ "can_use_val_data": False,
181
+ }
182
+
183
+ def _fit(
184
+ self,
185
+ train_data: TimeSeriesDataFrame,
186
+ val_data: TimeSeriesDataFrame | None = None,
187
+ time_limit: float | None = None,
188
+ num_cpus: int | None = None,
189
+ num_gpus: int | None = None,
190
+ verbosity: int = 2,
191
+ **kwargs,
192
+ ) -> None:
193
+ self._check_fit_params()
194
+ self.load_forecaster()
195
+
196
+ def _predict(
197
+ self, data: TimeSeriesDataFrame, known_covariates: TimeSeriesDataFrame | None = None, **kwargs
198
+ ) -> TimeSeriesDataFrame:
199
+ import torch
200
+
201
+ from .dataloader import TotoDataLoader, TotoInferenceDataset
202
+
203
+ hyperparameters = self.get_hyperparameters()
204
+
205
+ if self._forecaster is None:
206
+ self.load_forecaster()
207
+ assert self._forecaster, "Toto model failed to load"
208
+ device = self._forecaster.model.device
209
+
210
+ dataset = TotoInferenceDataset(
211
+ target_df=data.fill_missing_values("auto"),
212
+ max_context_length=hyperparameters["context_length"],
213
+ target_column=self.target,
214
+ )
215
+ loader = TotoDataLoader(
216
+ dataset,
217
+ freq=self.freq,
218
+ batch_size=hyperparameters["batch_size"],
219
+ time_limit=kwargs.get("time_limit"),
220
+ device=device,
221
+ )
222
+
223
+ batch_means, batch_quantiles = [], []
224
+ with torch.inference_mode():
225
+ for masked_timeseries in loader:
226
+ forecast = self._forecaster.forecast(
227
+ masked_timeseries,
228
+ prediction_length=self.prediction_length,
229
+ num_samples=hyperparameters["num_samples"],
230
+ samples_per_batch=self._get_sample_batch_size(),
231
+ )
232
+
233
+ batch_means.append(forecast.mean.cpu().numpy())
234
+ qs = np.array([forecast.quantile(q).cpu().numpy() for q in self.quantile_levels])
235
+ batch_quantiles.append(qs.squeeze(2).transpose(1, 2, 0))
236
+
237
+ df = pd.DataFrame(
238
+ np.concatenate(
239
+ [
240
+ np.concatenate(batch_means, axis=0).reshape(-1, 1),
241
+ np.concatenate(batch_quantiles, axis=0).reshape(-1, len(self.quantile_levels)),
242
+ ],
243
+ axis=1,
244
+ ),
245
+ columns=["mean"] + [str(q) for q in self.quantile_levels],
246
+ index=self.get_forecast_horizon_index(data),
247
+ )
248
+
249
+ return TimeSeriesDataFrame(df)