autogluon.timeseries 1.4.1b20250906__py3-none-any.whl → 1.4.1b20251210__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of autogluon.timeseries might be problematic. Click here for more details.

Files changed (93) hide show
  1. autogluon/timeseries/configs/hyperparameter_presets.py +2 -2
  2. autogluon/timeseries/dataset/ts_dataframe.py +97 -86
  3. autogluon/timeseries/learner.py +68 -35
  4. autogluon/timeseries/metrics/__init__.py +4 -4
  5. autogluon/timeseries/metrics/abstract.py +8 -8
  6. autogluon/timeseries/metrics/point.py +9 -9
  7. autogluon/timeseries/metrics/quantile.py +5 -5
  8. autogluon/timeseries/metrics/utils.py +4 -4
  9. autogluon/timeseries/models/__init__.py +4 -1
  10. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +52 -39
  11. autogluon/timeseries/models/abstract/model_trial.py +2 -1
  12. autogluon/timeseries/models/abstract/tunable.py +8 -8
  13. autogluon/timeseries/models/autogluon_tabular/mlforecast.py +58 -62
  14. autogluon/timeseries/models/autogluon_tabular/per_step.py +26 -15
  15. autogluon/timeseries/models/autogluon_tabular/transforms.py +11 -9
  16. autogluon/timeseries/models/chronos/__init__.py +2 -1
  17. autogluon/timeseries/models/chronos/chronos2.py +361 -0
  18. autogluon/timeseries/models/chronos/model.py +125 -87
  19. autogluon/timeseries/models/chronos/{pipeline/utils.py → utils.py} +68 -36
  20. autogluon/timeseries/models/ensemble/__init__.py +34 -2
  21. autogluon/timeseries/models/ensemble/abstract.py +5 -42
  22. autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
  23. autogluon/timeseries/models/ensemble/array_based/abstract.py +236 -0
  24. autogluon/timeseries/models/ensemble/array_based/models.py +73 -0
  25. autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +12 -0
  26. autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +88 -0
  27. autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py +167 -0
  28. autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +94 -0
  29. autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +107 -0
  30. autogluon/timeseries/models/ensemble/{greedy.py → ensemble_selection.py} +41 -61
  31. autogluon/timeseries/models/ensemble/per_item_greedy.py +162 -0
  32. autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
  33. autogluon/timeseries/models/ensemble/weighted/abstract.py +40 -0
  34. autogluon/timeseries/models/ensemble/{basic.py → weighted/basic.py} +6 -16
  35. autogluon/timeseries/models/ensemble/weighted/greedy.py +57 -0
  36. autogluon/timeseries/models/gluonts/abstract.py +25 -25
  37. autogluon/timeseries/models/gluonts/dataset.py +11 -11
  38. autogluon/timeseries/models/local/__init__.py +0 -7
  39. autogluon/timeseries/models/local/abstract_local_model.py +15 -18
  40. autogluon/timeseries/models/local/naive.py +2 -2
  41. autogluon/timeseries/models/local/npts.py +1 -1
  42. autogluon/timeseries/models/local/statsforecast.py +12 -12
  43. autogluon/timeseries/models/multi_window/multi_window_model.py +39 -24
  44. autogluon/timeseries/models/registry.py +3 -4
  45. autogluon/timeseries/models/toto/__init__.py +3 -0
  46. autogluon/timeseries/models/toto/_internal/__init__.py +9 -0
  47. autogluon/timeseries/models/toto/_internal/backbone/__init__.py +3 -0
  48. autogluon/timeseries/models/toto/_internal/backbone/attention.py +196 -0
  49. autogluon/timeseries/models/toto/_internal/backbone/backbone.py +262 -0
  50. autogluon/timeseries/models/toto/_internal/backbone/distribution.py +70 -0
  51. autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +136 -0
  52. autogluon/timeseries/models/toto/_internal/backbone/rope.py +89 -0
  53. autogluon/timeseries/models/toto/_internal/backbone/rotary_embedding_torch.py +342 -0
  54. autogluon/timeseries/models/toto/_internal/backbone/scaler.py +305 -0
  55. autogluon/timeseries/models/toto/_internal/backbone/transformer.py +333 -0
  56. autogluon/timeseries/models/toto/_internal/dataset.py +165 -0
  57. autogluon/timeseries/models/toto/_internal/forecaster.py +423 -0
  58. autogluon/timeseries/models/toto/dataloader.py +108 -0
  59. autogluon/timeseries/models/toto/hf_pretrained_model.py +118 -0
  60. autogluon/timeseries/models/toto/model.py +236 -0
  61. autogluon/timeseries/predictor.py +301 -103
  62. autogluon/timeseries/regressor.py +27 -30
  63. autogluon/timeseries/splitter.py +3 -27
  64. autogluon/timeseries/trainer/ensemble_composer.py +439 -0
  65. autogluon/timeseries/trainer/model_set_builder.py +9 -9
  66. autogluon/timeseries/trainer/prediction_cache.py +16 -16
  67. autogluon/timeseries/trainer/trainer.py +300 -275
  68. autogluon/timeseries/trainer/utils.py +17 -0
  69. autogluon/timeseries/transforms/covariate_scaler.py +8 -8
  70. autogluon/timeseries/transforms/target_scaler.py +15 -15
  71. autogluon/timeseries/utils/constants.py +10 -0
  72. autogluon/timeseries/utils/datetime/lags.py +1 -3
  73. autogluon/timeseries/utils/datetime/seasonality.py +1 -3
  74. autogluon/timeseries/utils/features.py +18 -14
  75. autogluon/timeseries/utils/forecast.py +6 -7
  76. autogluon/timeseries/utils/timer.py +173 -0
  77. autogluon/timeseries/version.py +1 -1
  78. autogluon.timeseries-1.4.1b20251210-py3.11-nspkg.pth +1 -0
  79. {autogluon.timeseries-1.4.1b20250906.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/METADATA +39 -22
  80. autogluon_timeseries-1.4.1b20251210.dist-info/RECORD +103 -0
  81. {autogluon.timeseries-1.4.1b20250906.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/WHEEL +1 -1
  82. autogluon/timeseries/evaluator.py +0 -6
  83. autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -10
  84. autogluon/timeseries/models/chronos/pipeline/base.py +0 -160
  85. autogluon/timeseries/models/chronos/pipeline/chronos.py +0 -544
  86. autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -580
  87. autogluon.timeseries-1.4.1b20250906-py3.9-nspkg.pth +0 -1
  88. autogluon.timeseries-1.4.1b20250906.dist-info/RECORD +0 -75
  89. {autogluon.timeseries-1.4.1b20250906.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info/licenses}/LICENSE +0 -0
  90. {autogluon.timeseries-1.4.1b20250906.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info/licenses}/NOTICE +0 -0
  91. {autogluon.timeseries-1.4.1b20250906.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/namespace_packages.txt +0 -0
  92. {autogluon.timeseries-1.4.1b20250906.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/top_level.txt +0 -0
  93. {autogluon.timeseries-1.4.1b20250906.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/zip-safe +0 -0
@@ -4,13 +4,13 @@ import logging
4
4
  import math
5
5
  import os
6
6
  import time
7
- from typing import Any, Optional, Type, Union
7
+ from typing import Any, Type
8
8
 
9
9
  import numpy as np
10
10
  from typing_extensions import Self
11
11
 
12
12
  import autogluon.core as ag
13
- from autogluon.timeseries.dataset.ts_dataframe import TimeSeriesDataFrame
13
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
14
14
  from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
15
15
  from autogluon.timeseries.models.local.abstract_local_model import AbstractLocalModel
16
16
  from autogluon.timeseries.splitter import AbstractWindowSplitter, ExpandingWindowSplitter
@@ -38,8 +38,8 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
38
38
 
39
39
  def __init__(
40
40
  self,
41
- model_base: Union[AbstractTimeSeriesModel, Type[AbstractTimeSeriesModel]],
42
- model_base_kwargs: Optional[dict[str, Any]] = None,
41
+ model_base: AbstractTimeSeriesModel | Type[AbstractTimeSeriesModel],
42
+ model_base_kwargs: dict[str, Any] | None = None,
43
43
  **kwargs,
44
44
  ):
45
45
  if inspect.isclass(model_base) and issubclass(model_base, AbstractTimeSeriesModel):
@@ -58,8 +58,8 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
58
58
  self.model_base_type = type(self.model_base)
59
59
  self.info_per_val_window = []
60
60
 
61
- self.most_recent_model: Optional[AbstractTimeSeriesModel] = None
62
- self.most_recent_model_folder: Optional[str] = None
61
+ self.most_recent_model: AbstractTimeSeriesModel | None = None
62
+ self.most_recent_model_folder: str | None = None
63
63
  super().__init__(**kwargs)
64
64
 
65
65
  @property
@@ -83,19 +83,19 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
83
83
  def _is_gpu_available(self) -> bool:
84
84
  return self._get_model_base()._is_gpu_available()
85
85
 
86
- def get_minimum_resources(self, is_gpu_available: bool = False) -> dict[str, Union[int, float]]:
86
+ def get_minimum_resources(self, is_gpu_available: bool = False) -> dict[str, int | float]:
87
87
  return self._get_model_base().get_minimum_resources(is_gpu_available)
88
88
 
89
89
  def _fit(
90
90
  self,
91
91
  train_data: TimeSeriesDataFrame,
92
- val_data: Optional[TimeSeriesDataFrame] = None,
93
- time_limit: Optional[float] = None,
94
- num_cpus: Optional[int] = None,
95
- num_gpus: Optional[int] = None,
92
+ val_data: TimeSeriesDataFrame | None = None,
93
+ time_limit: float | None = None,
94
+ num_cpus: int | None = None,
95
+ num_gpus: int | None = None,
96
96
  verbosity: int = 2,
97
- val_splitter: Optional[AbstractWindowSplitter] = None,
98
- refit_every_n_windows: Optional[int] = 1,
97
+ val_splitter: AbstractWindowSplitter | None = None,
98
+ refit_every_n_windows: int | None = 1,
99
99
  **kwargs,
100
100
  ):
101
101
  # TODO: use incremental training for GluonTS models?
@@ -109,9 +109,9 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
109
109
  if refit_every_n_windows is None:
110
110
  refit_every_n_windows = val_splitter.num_val_windows + 1 # only fit model for the first window
111
111
 
112
- oof_predictions_per_window = []
112
+ oof_predictions_per_window: list[TimeSeriesDataFrame] = []
113
113
  global_fit_start_time = time.time()
114
- model: Optional[AbstractTimeSeriesModel] = None
114
+ model: AbstractTimeSeriesModel | None = None
115
115
 
116
116
  for window_index, (train_fold, val_fold) in enumerate(val_splitter.split(train_data)):
117
117
  logger.debug(f"\tWindow {window_index}")
@@ -142,6 +142,7 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
142
142
  train_data=train_fold,
143
143
  val_data=val_fold,
144
144
  time_limit=time_left_for_window,
145
+ verbosity=verbosity,
145
146
  **kwargs,
146
147
  )
147
148
  model.fit_time = time.time() - model_fit_start_time
@@ -182,8 +183,9 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
182
183
  self.most_recent_model_folder = most_recent_refit_window # type: ignore
183
184
  self.predict_time = self.most_recent_model.predict_time
184
185
  self.fit_time = time.time() - global_fit_start_time - self.predict_time # type: ignore
185
- self._oof_predictions = oof_predictions_per_window
186
- self.val_score = np.mean([info["val_score"] for info in self.info_per_val_window]) # type: ignore
186
+ self.cache_oof_predictions(oof_predictions_per_window)
187
+
188
+ self.val_score = float(np.mean([info["val_score"] for info in self.info_per_val_window]))
187
189
 
188
190
  def get_info(self) -> dict:
189
191
  info = super().get_info()
@@ -198,7 +200,7 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
198
200
  def _predict(
199
201
  self,
200
202
  data: TimeSeriesDataFrame,
201
- known_covariates: Optional[TimeSeriesDataFrame] = None,
203
+ known_covariates: TimeSeriesDataFrame | None = None,
202
204
  **kwargs,
203
205
  ) -> TimeSeriesDataFrame:
204
206
  if self.most_recent_model is None:
@@ -212,12 +214,25 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
212
214
  store_predict_time: bool = False,
213
215
  **predict_kwargs,
214
216
  ) -> None:
215
- # self.val_score, self.predict_time, self._oof_predictions already saved during _fit()
216
- assert self._oof_predictions is not None
217
- if store_val_score:
218
- assert self.val_score is not None
217
+ if self._oof_predictions is None or self.most_recent_model is None:
218
+ raise ValueError(f"{self.name} must be fit before calling score_and_cache_oof")
219
+
220
+ # Score on val_data using the most recent model
221
+ past_data, known_covariates = val_data.get_model_inputs_for_scoring(
222
+ prediction_length=self.prediction_length, known_covariates_names=self.covariate_metadata.known_covariates
223
+ )
224
+ predict_start_time = time.time()
225
+ val_predictions = self.most_recent_model.predict(
226
+ past_data, known_covariates=known_covariates, **predict_kwargs
227
+ )
228
+
229
+ self._oof_predictions.append(val_predictions)
230
+
219
231
  if store_predict_time:
220
- assert self.predict_time is not None
232
+ self.predict_time = time.time() - predict_start_time
233
+
234
+ if store_val_score:
235
+ self.val_score = self._score_with_predictions(val_data, val_predictions)
221
236
 
222
237
  def _get_search_space(self):
223
238
  return self.model_base._get_search_space()
@@ -234,7 +249,7 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
234
249
  train_fn_kwargs["init_params"]["model_base_kwargs"] = self.get_params()
235
250
  return train_fn_kwargs
236
251
 
237
- def save(self, path: Optional[str] = None, verbose: bool = True) -> str:
252
+ def save(self, path: str | None = None, verbose: bool = True) -> str:
238
253
  most_recent_model = self.most_recent_model
239
254
  self.most_recent_model = None
240
255
  save_path = super().save(path, verbose)
@@ -1,7 +1,6 @@
1
1
  from abc import ABCMeta
2
2
  from dataclasses import dataclass
3
3
  from inspect import isabstract
4
- from typing import Union
5
4
 
6
5
 
7
6
  @dataclass
@@ -44,7 +43,7 @@ class ModelRegistry(ABCMeta):
44
43
  cls.REGISTRY[alias] = record
45
44
 
46
45
  @classmethod
47
- def _get_model_record(cls, alias: Union[str, type]) -> ModelRecord:
46
+ def _get_model_record(cls, alias: str | type) -> ModelRecord:
48
47
  if isinstance(alias, type):
49
48
  alias = alias.__name__
50
49
  alias = alias.removesuffix("Model")
@@ -53,11 +52,11 @@ class ModelRegistry(ABCMeta):
53
52
  return cls.REGISTRY[alias]
54
53
 
55
54
  @classmethod
56
- def get_model_class(cls, alias: Union[str, type]) -> type:
55
+ def get_model_class(cls, alias: str | type) -> type:
57
56
  return cls._get_model_record(alias).model_class
58
57
 
59
58
  @classmethod
60
- def get_model_priority(cls, alias: Union[str, type]) -> int:
59
+ def get_model_priority(cls, alias: str | type) -> int:
61
60
  return cls._get_model_record(alias).ag_priority
62
61
 
63
62
  @classmethod
@@ -0,0 +1,3 @@
1
+ from .model import TotoModel
2
+
3
+ __all__ = ["TotoModel"]
@@ -0,0 +1,9 @@
1
+ from .backbone import TotoBackbone
2
+ from .dataset import MaskedTimeseries
3
+ from .forecaster import TotoForecaster
4
+
5
+ __all__ = [
6
+ "MaskedTimeseries",
7
+ "TotoBackbone",
8
+ "TotoForecaster",
9
+ ]
@@ -0,0 +1,3 @@
1
+ from .backbone import TotoBackbone
2
+
3
+ __all__ = ["TotoBackbone"]
@@ -0,0 +1,196 @@
1
+ # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
2
+ #
3
+ # This product includes software developed at Datadog (https://www.datadoghq.com/)
4
+ # Copyright 2025 Datadog, Inc.
5
+
6
+ import logging
7
+ from enum import Enum
8
+
9
+ import torch
10
+ from einops import rearrange
11
+ from torch.nn.functional import scaled_dot_product_attention
12
+
13
+ from .rope import TimeAwareRotaryEmbedding
14
+
15
+ log = logging.getLogger(__name__)
16
+
17
+
18
+ class AttentionAxis(Enum):
19
+ TIME = 1
20
+ SPACE = 2
21
+
22
+
23
+ class BaseMultiheadAttention(torch.nn.Module):
24
+ def __init__(
25
+ self,
26
+ embed_dim: int,
27
+ num_heads: int,
28
+ dropout: float,
29
+ rotary_emb: TimeAwareRotaryEmbedding | None,
30
+ use_memory_efficient_attention: bool,
31
+ ):
32
+ super().__init__()
33
+ self.embed_dim = embed_dim
34
+ self.num_heads = num_heads
35
+ assert embed_dim % num_heads == 0, "Embedding dimension must be divisible by number of heads."
36
+ self.head_dim = embed_dim // num_heads
37
+ self.rotary_emb = rotary_emb
38
+
39
+ # We allocate a single tensor for the q, k, and v projection matrices,
40
+ # multiply them with the inputs, and then split the projected tensors into q, k, and v using unbind.
41
+ # This reduces overhead a bit vs. having multiple separate Linear layers,
42
+ # which need to be initialized, tracked by the optimizer, etc.
43
+ self.wQKV = torch.nn.Linear(embed_dim, embed_dim * 3)
44
+ self.dropout = dropout
45
+ self.use_memory_efficient_attention = use_memory_efficient_attention
46
+ self.wO = torch.nn.Linear(embed_dim, embed_dim)
47
+
48
+ assert not self.use_memory_efficient_attention, (
49
+ "xformers is not available, so use_memory_efficient_attention must be False"
50
+ )
51
+
52
+ if not hasattr(self, "attention_axis") or self.attention_axis not in (AttentionAxis.TIME, AttentionAxis.SPACE):
53
+ raise ValueError("Child class must define attention_axis as AttentionAxis.TIME or AttentionAxis.SPACE.")
54
+
55
+ def rearrange_inputs(self, inputs: torch.Tensor) -> torch.Tensor:
56
+ pattern = (
57
+ "batch variate seq_len embed_dim -> (batch variate) seq_len embed_dim"
58
+ if self.attention_axis == AttentionAxis.TIME
59
+ else "batch variate seq_len embed_dim -> (batch seq_len) variate embed_dim"
60
+ )
61
+
62
+ return rearrange(inputs, pattern)
63
+
64
+ def get_qkv(
65
+ self,
66
+ inputs: torch.Tensor,
67
+ ) -> tuple[torch.Tensor, ...]:
68
+ pattern: str = ""
69
+ if self.attention_axis == AttentionAxis.TIME and self.use_memory_efficient_attention:
70
+ pattern = "batch_X_variate seq_len (qkv head_dim n_heads) -> qkv batch_X_variate seq_len n_heads head_dim"
71
+ elif self.attention_axis == AttentionAxis.TIME and not self.use_memory_efficient_attention:
72
+ pattern = "batch_X_variate seq_len (qkv head_dim n_heads) -> qkv batch_X_variate n_heads seq_len head_dim"
73
+ elif self.attention_axis == AttentionAxis.SPACE and self.use_memory_efficient_attention:
74
+ pattern = "batch_X_seq_len variate (qkv head_dim n_heads) -> qkv batch_X_seq_len variate n_heads head_dim"
75
+ elif self.attention_axis == AttentionAxis.SPACE and not self.use_memory_efficient_attention:
76
+ pattern = "batch_X_seq_len variate (qkv head_dim n_heads) -> qkv batch_X_seq_len n_heads variate head_dim"
77
+
78
+ assert pattern
79
+ qkv = self.wQKV(inputs.contiguous())
80
+ return rearrange(qkv, pattern, qkv=3, head_dim=self.head_dim, n_heads=self.num_heads).unbind(dim=0)
81
+
82
+ def positional_embedding(self, q, k, v, kv_cache, layer_idx):
83
+ # Apply the rotary embeddings
84
+ seq_pos_offset = 0
85
+ if self.rotary_emb is not None and self.attention_axis == AttentionAxis.TIME:
86
+ if kv_cache is not None:
87
+ seq_pos_offset = kv_cache.seq_len(layer_idx)
88
+
89
+ # We need to permute because rotary embeddings expect the sequence dimension to be the second-to-last dimension
90
+ q, k = self.rotary_emb.rotate_queries_and_keys(q, k, seq_pos_offset=seq_pos_offset)
91
+
92
+ if kv_cache is not None and self.attention_axis == AttentionAxis.TIME:
93
+ # First, we append the current input key and value tensors to the cache.
94
+ # This concatenates the current key and value tensors to the existing key and value tensors
95
+ kv_cache.append(layer_idx, (k, v))
96
+ # Then, we retrieve the key and value tensors from the cache.
97
+ # This includes all the key and value tensors from previous time steps
98
+ # as well as the current time step.
99
+ k, v = kv_cache[layer_idx]
100
+
101
+ q = q.contiguous()
102
+ k = k.contiguous().to(q.dtype) # Ensure k is the same dtype as q; this is necessary when using mixed precision
103
+ v = v.contiguous().to(q.dtype) # Ensure v is the same dtype as q; this is necessary when using mixed precision
104
+
105
+ return q, k, v, seq_pos_offset
106
+
107
+ def rearrange_output(self, output: torch.Tensor, batch: int, variate: int, seq_len: int) -> torch.Tensor:
108
+ if self.attention_axis == AttentionAxis.TIME and self.use_memory_efficient_attention:
109
+ pattern = "(batch variate) seq_len n_heads head_dim -> batch variate seq_len (n_heads head_dim)"
110
+ elif self.attention_axis == AttentionAxis.TIME and not self.use_memory_efficient_attention:
111
+ pattern = "(batch variate) n_heads seq_len head_dim -> batch variate seq_len (n_heads head_dim)"
112
+ elif self.attention_axis == AttentionAxis.SPACE and self.use_memory_efficient_attention:
113
+ pattern = "(batch seq_len) variate n_heads head_dim -> batch variate seq_len (n_heads head_dim)"
114
+ elif self.attention_axis == AttentionAxis.SPACE and not self.use_memory_efficient_attention:
115
+ pattern = "(batch seq_len) n_heads variate head_dim -> batch variate seq_len (n_heads head_dim)"
116
+
117
+ return rearrange(output, pattern, batch=batch, variate=variate, seq_len=seq_len) # type: ignore
118
+
119
+ def run_attention(self, attention_mask, q, k, v, seq_pos_offset, dropout, seq_len, variate):
120
+ # Determine dimension ranges for attention
121
+ # Ensure the last query vector index is used from the cache
122
+ q_dim_start, q_dim_end = seq_pos_offset, seq_pos_offset + seq_len
123
+ kv_dim_start, kv_dim_end = 0, v.shape[1] if self.use_memory_efficient_attention else v.shape[2]
124
+ if self.attention_axis == AttentionAxis.TIME:
125
+ attention_mask = (
126
+ attention_mask[..., q_dim_start:q_dim_end, kv_dim_start:kv_dim_end]
127
+ if torch.is_tensor(attention_mask)
128
+ else None
129
+ )
130
+ return scaled_dot_product_attention(
131
+ q,
132
+ k,
133
+ v,
134
+ attn_mask=attention_mask,
135
+ dropout_p=dropout,
136
+ is_causal=(attention_mask is None and seq_pos_offset == 0),
137
+ )
138
+ elif self.attention_axis == AttentionAxis.SPACE:
139
+ # We don't use causal masking for space-wise attention
140
+ attention_mask = (
141
+ attention_mask[..., kv_dim_start:kv_dim_end, kv_dim_start:kv_dim_end]
142
+ if torch.is_tensor(attention_mask)
143
+ else None
144
+ )
145
+ return scaled_dot_product_attention(q, k, v, attn_mask=attention_mask, dropout_p=dropout, is_causal=False)
146
+ else:
147
+ raise ValueError("Invalid attention axis")
148
+
149
+ def forward(
150
+ self,
151
+ layer_idx: int,
152
+ inputs: torch.Tensor,
153
+ attention_mask: torch.Tensor | None = None,
154
+ kv_cache=None,
155
+ ) -> torch.Tensor:
156
+ batch_size, variate, seq_len, _ = inputs.shape
157
+ dropout = self.dropout if self.training else 0.0
158
+
159
+ rearranged_inputs = self.rearrange_inputs(inputs)
160
+ q, k, v = self.get_qkv(rearranged_inputs)
161
+
162
+ q, k, v, seq_pos_offset = self.positional_embedding(q, k, v, kv_cache, layer_idx)
163
+
164
+ output = self.run_attention(attention_mask, q, k, v, seq_pos_offset, dropout, seq_len, variate)
165
+
166
+ output = self.rearrange_output(output, batch_size, variate, seq_len)
167
+ return self.wO(output)
168
+
169
+
170
+ class TimeWiseMultiheadAttention(BaseMultiheadAttention):
171
+ """
172
+ Computes standard multihead causal attention over the time axis.
173
+ It does this by flattening out the variates along the batch dimension.
174
+ It also applies rotary position embeddings to the query and key matrices
175
+ in order to incorporate relative positional information.
176
+ """
177
+
178
+ attention_axis = AttentionAxis.TIME
179
+
180
+
181
+ class SpaceWiseMultiheadAttention(BaseMultiheadAttention):
182
+ """
183
+ Computes bidirectional multihead attention over the space axis (i.e. across variates within
184
+ a multi-variate time series). This is done by flattening out the time axis along the batch dimension.
185
+ This allows the model to attend to different variates at the same time point. By alternating
186
+ between time-wise and space-wise attention, the model can learn both temporal and cross-variate
187
+ dependencies in the data.
188
+
189
+ Unlike with time-wise attention, don't apply rotary embeddings here
190
+ because we want cross-variate attention to be invariant to the order of the variates.
191
+ """
192
+
193
+ attention_axis = AttentionAxis.SPACE
194
+
195
+
196
+ MultiHeadAttention = TimeWiseMultiheadAttention | SpaceWiseMultiheadAttention
@@ -0,0 +1,262 @@
1
+ # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
2
+ #
3
+ # This product includes software developed at Datadog (https://www.datadoghq.com/)
4
+ # Copyright 2025 Datadog, Inc.
5
+
6
+ import math
7
+ from typing import NamedTuple
8
+
9
+ import torch
10
+
11
+ from .distribution import MixtureOfStudentTsOutput
12
+ from .kvcache import KVCache
13
+ from .scaler import CausalPatchStdMeanScaler
14
+ from .transformer import Transformer
15
+
16
+
17
+ class TotoOutput(NamedTuple):
18
+ """
19
+ Output of the Toto model. Contains the output distribution, the location parameters,
20
+ and the scale parameters.
21
+ """
22
+
23
+ distribution: torch.distributions.Distribution
24
+ loc: torch.Tensor
25
+ scale: torch.Tensor
26
+
27
+
28
+ def patchify_id_mask(id_mask: torch.Tensor, patch_size: int) -> torch.Tensor:
29
+ patched_id_mask = id_mask.unfold(dimension=-1, size=patch_size, step=patch_size)
30
+ patched_id_mask_min = patched_id_mask.min(-1).values
31
+ patched_id_mask_max = patched_id_mask.max(-1).values
32
+ assert torch.eq(patched_id_mask_min, patched_id_mask_max).all(), "Patches cannot span multiple datasets"
33
+ return patched_id_mask_min
34
+
35
+
36
+ class PatchEmbedding(torch.nn.Module):
37
+ """
38
+ Multivariate time series patch embedding.
39
+ Patchifies each variate separately.
40
+ """
41
+
42
+ def __init__(self, patch_size: int, stride: int, embed_dim: int):
43
+ super().__init__()
44
+ self.patch_size = patch_size
45
+ self.embed_dim = embed_dim
46
+ self.stride = stride
47
+ self.projection = torch.nn.Linear(self.patch_size, self.embed_dim)
48
+
49
+ def _patchify(self, x: torch.Tensor) -> torch.Tensor:
50
+ return x.unfold(dimension=-1, size=self.patch_size, step=self.stride)
51
+
52
+ def forward(
53
+ self,
54
+ x: torch.Tensor,
55
+ id_mask: torch.Tensor,
56
+ ) -> tuple[torch.Tensor, torch.Tensor]:
57
+ assert x.shape[-1] % self.patch_size == 0, (
58
+ f"Series length ({x.shape=}) must be divisible by ({self.patch_size=})"
59
+ )
60
+ x_patched: torch.Tensor = self._patchify(x)
61
+ id_mask_patched: torch.Tensor = self._patchify(id_mask)
62
+
63
+ assert torch.eq(id_mask_patched.min(-1).values, id_mask_patched.max(-1).values).all(), (
64
+ "Patches cannot span multiple datasets"
65
+ )
66
+
67
+ return (
68
+ self.projection(x_patched),
69
+ id_mask_patched.min(-1).values,
70
+ )
71
+
72
+
73
+ class TotoBackbone(torch.nn.Module):
74
+ """
75
+ Toto (Timeseries-Optimized Transformer for Observability) is a transformer-based model for multivariate
76
+ time series forecasting. It applies a patch embedding to the input data, followed by a transformer
77
+ that alternates between time-wise and space-wise attention. The transformer is followed by a linear projection
78
+ that maps the transformer output to the output distribution.
79
+
80
+ The output distribution can be a single distribution (e.g. Gaussian) or a mixture of distributions.
81
+ If a mixture of distributions is used, the model will learn to predict the mixture weights
82
+ as well as the parameters of the individual distributions.
83
+
84
+ Parameters
85
+ ----------
86
+ patch_size
87
+ Size of the patch to use for the patch embedding.
88
+ stride
89
+ Stride to use for the patch embedding.
90
+ embed_dim
91
+ Dimension of the model's latent space.
92
+ num_layers
93
+ Number of transformer layers to use.
94
+ num_heads
95
+ Number of attention heads to use in each self-attention layer.
96
+ mlp_hidden_dim
97
+ Dimension of the hidden layer in the feedforward network.
98
+ dropout
99
+ Dropout rate to use in the model.
100
+ spacewise_every_n_layers
101
+ How many time-wise transformer layers to apply between each space-wise transformer layer.
102
+ spacewise_first
103
+ Whether to apply space-wise attention before time-wise attention.
104
+ scaler_cls
105
+ Class to use for scaling the input data.
106
+ output_distribution_classes
107
+ List of classes to use for the output distribution. If a single class is provided, the model
108
+ will output a single distribution. If multiple classes are provided, the model will output a
109
+ learned mixture of distributions.
110
+ output_distribution_kwargs
111
+ Keyword arguments to pass to the output distribution class. Note: this currently only works
112
+ with a single output distribution class.
113
+ use_memory_efficient_attention:
114
+ Whether to use memory-efficient attention. If True, the model will use the memory-efficient from xFormers.
115
+ stabilize_with_global:
116
+ Whether to use global statistics to stabilize causal statistics by clamping extreme values. Only applies to causal scalers.
117
+ scale_factor_exponent:
118
+ Exponent that controls the allowed range of deviation from global scale for causal scalers.
119
+ """
120
+
121
+ def __init__(
122
+ self,
123
+ patch_size: int,
124
+ stride: int,
125
+ embed_dim: int,
126
+ num_layers: int,
127
+ num_heads: int,
128
+ mlp_hidden_dim: int,
129
+ dropout: float,
130
+ spacewise_every_n_layers: int,
131
+ scaler_cls: str,
132
+ output_distribution_classes: list[str],
133
+ spacewise_first: bool = True,
134
+ output_distribution_kwargs: dict | None = None,
135
+ use_memory_efficient_attention: bool = True,
136
+ stabilize_with_global: bool = True,
137
+ scale_factor_exponent: float = 10.0,
138
+ ):
139
+ super().__init__()
140
+ self.embed_dim = embed_dim
141
+ # strings are used when loading a safetensors checkpoint
142
+ # Initialize patch-based scalers with the correct patch_size
143
+
144
+ self.scaler = CausalPatchStdMeanScaler(
145
+ patch_size=patch_size,
146
+ stabilize_with_global=stabilize_with_global,
147
+ scale_factor_exponent=scale_factor_exponent,
148
+ )
149
+ self.patch_embed = PatchEmbedding(patch_size, stride, embed_dim)
150
+ self.dropout = dropout
151
+ self.num_layers = num_layers
152
+ self.use_memory_efficient_attention = use_memory_efficient_attention
153
+ self.transformer = Transformer(
154
+ embed_dim=embed_dim,
155
+ num_heads=num_heads,
156
+ num_layers=self.num_layers,
157
+ mlp_hidden_dim=mlp_hidden_dim,
158
+ dropout=dropout,
159
+ spacewise_every_n_layers=spacewise_every_n_layers,
160
+ spacewise_first=spacewise_first,
161
+ use_memory_efficient_attention=self.use_memory_efficient_attention,
162
+ )
163
+ self.unembed = torch.nn.Linear(embed_dim, embed_dim * patch_size)
164
+
165
+ # TODO[BEN] this doesn't need to be a list
166
+ output_distribution_classes_ = [MixtureOfStudentTsOutput]
167
+ self.output_distribution = output_distribution_classes_[0](embed_dim, **(output_distribution_kwargs or {}))
168
+
169
+ def allocate_kv_cache(
170
+ self,
171
+ batch_size: int,
172
+ num_variates: int,
173
+ max_time_steps: int,
174
+ device: torch.device,
175
+ dtype: torch.dtype,
176
+ ) -> KVCache:
177
+ return KVCache(
178
+ batch_size=batch_size,
179
+ num_variates=num_variates,
180
+ transformer_layers=list(self.transformer.layers),
181
+ num_layers=self.num_layers,
182
+ embed_dim=self.embed_dim,
183
+ num_heads=self.transformer.layers[0].num_heads, # type: ignore
184
+ max_seq_len=math.ceil(max_time_steps / self.patch_embed.stride),
185
+ device=device,
186
+ dtype=dtype,
187
+ use_memory_efficient_attention=self.use_memory_efficient_attention,
188
+ )
189
+
190
+ def backbone(
191
+ self,
192
+ inputs: torch.Tensor,
193
+ input_padding_mask: torch.Tensor,
194
+ id_mask: torch.Tensor,
195
+ kv_cache: KVCache | None = None,
196
+ scaling_prefix_length: int | None = None,
197
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
198
+ scaled_inputs: torch.Tensor
199
+ loc: torch.Tensor
200
+ scale: torch.Tensor
201
+
202
+ # Standard scaling operation, same API but without ID mask.
203
+ scaled_inputs, loc, scale = self.scaler(
204
+ inputs,
205
+ weights=torch.ones_like(inputs, device=inputs.device),
206
+ padding_mask=input_padding_mask,
207
+ prefix_length=scaling_prefix_length,
208
+ )
209
+
210
+ if kv_cache is not None:
211
+ prefix_len = self.patch_embed.stride * kv_cache.current_len(0)
212
+
213
+ # Truncate inputs so that the transformer only processes
214
+ # the last patch in the sequence. We'll use the KVCache
215
+ # for the earlier patches.
216
+ scaled_inputs = scaled_inputs[:, :, prefix_len:]
217
+
218
+ # As a simplification, when using kv cache we only allow decoding
219
+ # one step at a time after the initial forward pass.
220
+ assert (prefix_len == 0) or (scaled_inputs.shape[-1] == self.patch_embed.stride), (
221
+ "Must decode one step at a time."
222
+ )
223
+
224
+ input_padding_mask = input_padding_mask[:, :, prefix_len:]
225
+ id_mask = id_mask[:, :, prefix_len:]
226
+
227
+ embeddings: torch.Tensor
228
+ reduced_id_mask: torch.Tensor
229
+
230
+ embeddings, reduced_id_mask = self.patch_embed(scaled_inputs, id_mask)
231
+
232
+ # Apply the transformer on the embeddings
233
+ transformed: torch.Tensor = self.transformer(embeddings, reduced_id_mask, kv_cache)
234
+
235
+ # Unembed and flatten the sequence
236
+ unembedded = self.unembed(transformed)
237
+ batch_size, num_variates, seq_len = unembedded.shape[:3]
238
+ patch_size = unembedded.shape[-1] // self.embed_dim
239
+ flattened = unembedded.view(batch_size, num_variates, seq_len * patch_size, self.embed_dim)
240
+ return flattened, loc, scale
241
+
242
+ def forward(
243
+ self,
244
+ inputs: torch.Tensor,
245
+ input_padding_mask: torch.Tensor,
246
+ id_mask: torch.Tensor,
247
+ kv_cache: KVCache | None = None,
248
+ scaling_prefix_length: int | None = None,
249
+ ) -> TotoOutput:
250
+ flattened, loc, scale = self.backbone(
251
+ inputs,
252
+ input_padding_mask,
253
+ id_mask,
254
+ kv_cache,
255
+ scaling_prefix_length,
256
+ )
257
+
258
+ return TotoOutput(self.output_distribution(flattened), loc, scale)
259
+
260
+ @property
261
+ def device(self):
262
+ return next(self.parameters()).device