autogluon.timeseries 1.3.2b20250712__py3-none-any.whl → 1.4.1b20251116__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. autogluon/timeseries/configs/__init__.py +3 -2
  2. autogluon/timeseries/configs/hyperparameter_presets.py +62 -0
  3. autogluon/timeseries/configs/predictor_presets.py +84 -0
  4. autogluon/timeseries/dataset/ts_dataframe.py +98 -72
  5. autogluon/timeseries/learner.py +19 -18
  6. autogluon/timeseries/metrics/__init__.py +5 -5
  7. autogluon/timeseries/metrics/abstract.py +17 -17
  8. autogluon/timeseries/metrics/point.py +1 -1
  9. autogluon/timeseries/metrics/quantile.py +2 -2
  10. autogluon/timeseries/metrics/utils.py +4 -4
  11. autogluon/timeseries/models/__init__.py +4 -0
  12. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +52 -75
  13. autogluon/timeseries/models/abstract/tunable.py +6 -6
  14. autogluon/timeseries/models/autogluon_tabular/mlforecast.py +72 -76
  15. autogluon/timeseries/models/autogluon_tabular/per_step.py +104 -46
  16. autogluon/timeseries/models/autogluon_tabular/transforms.py +9 -7
  17. autogluon/timeseries/models/chronos/model.py +115 -78
  18. autogluon/timeseries/models/chronos/{pipeline/utils.py → utils.py} +76 -44
  19. autogluon/timeseries/models/ensemble/__init__.py +29 -2
  20. autogluon/timeseries/models/ensemble/abstract.py +16 -52
  21. autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
  22. autogluon/timeseries/models/ensemble/array_based/abstract.py +247 -0
  23. autogluon/timeseries/models/ensemble/array_based/models.py +50 -0
  24. autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +10 -0
  25. autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +87 -0
  26. autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +133 -0
  27. autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +141 -0
  28. autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
  29. autogluon/timeseries/models/ensemble/weighted/abstract.py +41 -0
  30. autogluon/timeseries/models/ensemble/{basic.py → weighted/basic.py} +8 -18
  31. autogluon/timeseries/models/ensemble/{greedy.py → weighted/greedy.py} +13 -13
  32. autogluon/timeseries/models/gluonts/abstract.py +26 -26
  33. autogluon/timeseries/models/gluonts/dataset.py +4 -4
  34. autogluon/timeseries/models/gluonts/models.py +27 -12
  35. autogluon/timeseries/models/local/abstract_local_model.py +14 -14
  36. autogluon/timeseries/models/local/naive.py +4 -0
  37. autogluon/timeseries/models/local/npts.py +1 -0
  38. autogluon/timeseries/models/local/statsforecast.py +30 -14
  39. autogluon/timeseries/models/multi_window/multi_window_model.py +34 -23
  40. autogluon/timeseries/models/registry.py +65 -0
  41. autogluon/timeseries/models/toto/__init__.py +3 -0
  42. autogluon/timeseries/models/toto/_internal/__init__.py +9 -0
  43. autogluon/timeseries/models/toto/_internal/backbone/__init__.py +3 -0
  44. autogluon/timeseries/models/toto/_internal/backbone/attention.py +197 -0
  45. autogluon/timeseries/models/toto/_internal/backbone/backbone.py +262 -0
  46. autogluon/timeseries/models/toto/_internal/backbone/distribution.py +70 -0
  47. autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +136 -0
  48. autogluon/timeseries/models/toto/_internal/backbone/rope.py +94 -0
  49. autogluon/timeseries/models/toto/_internal/backbone/scaler.py +306 -0
  50. autogluon/timeseries/models/toto/_internal/backbone/transformer.py +333 -0
  51. autogluon/timeseries/models/toto/_internal/dataset.py +165 -0
  52. autogluon/timeseries/models/toto/_internal/forecaster.py +423 -0
  53. autogluon/timeseries/models/toto/dataloader.py +108 -0
  54. autogluon/timeseries/models/toto/hf_pretrained_model.py +119 -0
  55. autogluon/timeseries/models/toto/model.py +236 -0
  56. autogluon/timeseries/predictor.py +94 -107
  57. autogluon/timeseries/regressor.py +31 -27
  58. autogluon/timeseries/splitter.py +7 -31
  59. autogluon/timeseries/trainer/__init__.py +3 -0
  60. autogluon/timeseries/trainer/ensemble_composer.py +250 -0
  61. autogluon/timeseries/trainer/model_set_builder.py +256 -0
  62. autogluon/timeseries/trainer/prediction_cache.py +149 -0
  63. autogluon/timeseries/{trainer.py → trainer/trainer.py} +182 -307
  64. autogluon/timeseries/trainer/utils.py +18 -0
  65. autogluon/timeseries/transforms/covariate_scaler.py +4 -4
  66. autogluon/timeseries/transforms/target_scaler.py +14 -14
  67. autogluon/timeseries/utils/datetime/lags.py +2 -2
  68. autogluon/timeseries/utils/datetime/time_features.py +2 -2
  69. autogluon/timeseries/utils/features.py +41 -37
  70. autogluon/timeseries/utils/forecast.py +5 -5
  71. autogluon/timeseries/utils/warning_filters.py +3 -1
  72. autogluon/timeseries/version.py +1 -1
  73. autogluon.timeseries-1.4.1b20251116-py3.9-nspkg.pth +1 -0
  74. {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/METADATA +32 -17
  75. autogluon_timeseries-1.4.1b20251116.dist-info/RECORD +96 -0
  76. {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/WHEEL +1 -1
  77. autogluon/timeseries/configs/presets_configs.py +0 -79
  78. autogluon/timeseries/evaluator.py +0 -6
  79. autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -10
  80. autogluon/timeseries/models/chronos/pipeline/base.py +0 -160
  81. autogluon/timeseries/models/chronos/pipeline/chronos.py +0 -544
  82. autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -530
  83. autogluon/timeseries/models/presets.py +0 -358
  84. autogluon.timeseries-1.3.2b20250712-py3.9-nspkg.pth +0 -1
  85. autogluon.timeseries-1.3.2b20250712.dist-info/RECORD +0 -71
  86. {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info/licenses}/LICENSE +0 -0
  87. {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info/licenses}/NOTICE +0 -0
  88. {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/namespace_packages.txt +0 -0
  89. {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/top_level.txt +0 -0
  90. {autogluon.timeseries-1.3.2b20250712.dist-info → autogluon_timeseries-1.4.1b20251116.dist-info}/zip-safe +0 -0
@@ -0,0 +1,18 @@
1
+ import logging
2
+ from typing import Optional
3
+
4
+ logger = logging.getLogger("autogluon.timeseries.trainer")
5
+
6
+
7
+ def log_scores_and_times(
8
+ val_score: Optional[float],
9
+ fit_time: Optional[float],
10
+ predict_time: Optional[float],
11
+ eval_metric_name: str,
12
+ ):
13
+ if val_score is not None:
14
+ logger.info(f"\t{val_score:<7.4f}".ljust(15) + f"= Validation score ({eval_metric_name})")
15
+ if fit_time is not None:
16
+ logger.info(f"\t{fit_time:<7.2f} s".ljust(15) + "= Training runtime")
17
+ if predict_time is not None:
18
+ logger.info(f"\t{predict_time:<7.2f} s".ljust(15) + "= Validation (prediction) runtime")
@@ -1,12 +1,12 @@
1
1
  import logging
2
- from typing import Dict, List, Literal, Optional, Protocol, overload, runtime_checkable
2
+ from typing import Literal, Optional, Protocol, overload, runtime_checkable
3
3
 
4
4
  import numpy as np
5
5
  import pandas as pd
6
6
  from sklearn.compose import ColumnTransformer
7
7
  from sklearn.preprocessing import QuantileTransformer, StandardScaler
8
8
 
9
- from autogluon.timeseries.dataset.ts_dataframe import TimeSeriesDataFrame
9
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
10
10
  from autogluon.timeseries.utils.features import CovariateMetadata
11
11
  from autogluon.timeseries.utils.warning_filters import warning_filter
12
12
 
@@ -53,7 +53,7 @@ class GlobalCovariateScaler(CovariateScaler):
53
53
  self.use_past_covariates = use_past_covariates
54
54
  self.use_static_features = use_static_features
55
55
  self.skew_threshold = skew_threshold
56
- self._column_transformers: Optional[Dict[Literal["known", "past", "static"], ColumnTransformer]] = None
56
+ self._column_transformers: Optional[dict[Literal["known", "past", "static"], ColumnTransformer]] = None
57
57
 
58
58
  def is_fit(self) -> bool:
59
59
  return self._column_transformers is not None
@@ -117,7 +117,7 @@ class GlobalCovariateScaler(CovariateScaler):
117
117
  known_covariates[columns] = self._column_transformers["known"].transform(known_covariates[columns])
118
118
  return known_covariates
119
119
 
120
- def _get_transformer_for_columns(self, df: pd.DataFrame, columns: List[str]) -> ColumnTransformer:
120
+ def _get_transformer_for_columns(self, df: pd.DataFrame, columns: list[str]) -> ColumnTransformer:
121
121
  """Passthrough bool features, use QuantileTransform for skewed features, and use StandardScaler for the rest.
122
122
 
123
123
  The preprocessing logic is similar to the TORCH_NN model from Tabular.
@@ -1,10 +1,10 @@
1
- from typing import Literal, Optional, Protocol, Tuple, Union, overload
1
+ from typing import Literal, Optional, Protocol, Union, overload
2
2
 
3
3
  import numpy as np
4
4
  import pandas as pd
5
5
  from typing_extensions import Self
6
6
 
7
- from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
7
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
8
8
 
9
9
 
10
10
  class TargetScaler(Protocol):
@@ -30,7 +30,7 @@ class LocalTargetScaler(TargetScaler):
30
30
  self.loc: Optional[pd.Series] = None
31
31
  self.scale: Optional[pd.Series] = None
32
32
 
33
- def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[Optional[pd.Series], Optional[pd.Series]]:
33
+ def _compute_loc_scale(self, target_series: pd.Series) -> tuple[Optional[pd.Series], Optional[pd.Series]]:
34
34
  raise NotImplementedError
35
35
 
36
36
  def fit_transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
@@ -45,7 +45,7 @@ class LocalTargetScaler(TargetScaler):
45
45
  self.scale = self.scale.clip(lower=self.min_scale).replace([np.inf, -np.inf], np.nan).fillna(1.0)
46
46
  return self
47
47
 
48
- def _reindex_loc_scale(self, item_index: pd.Index) -> Tuple[Union[np.ndarray, float], Union[np.ndarray, float]]:
48
+ def _reindex_loc_scale(self, item_index: pd.Index) -> tuple[Union[np.ndarray, float], Union[np.ndarray, float]]:
49
49
  """Reindex loc and scale parameters for the given item_ids and convert them to an array-like."""
50
50
  if self.loc is not None:
51
51
  loc = self.loc.reindex(item_index).to_numpy()
@@ -59,12 +59,12 @@ class LocalTargetScaler(TargetScaler):
59
59
 
60
60
  def transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
61
61
  """Apply scaling to the target column in the dataframe."""
62
- loc, scale = self._reindex_loc_scale(item_index=data.index.get_level_values(ITEMID))
62
+ loc, scale = self._reindex_loc_scale(item_index=data.index.get_level_values(TimeSeriesDataFrame.ITEMID))
63
63
  return data.assign(**{self.target: (data[self.target] - loc) / scale})
64
64
 
65
65
  def inverse_transform(self, predictions: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
66
66
  """Apply inverse scaling to all columns in the predictions dataframe."""
67
- loc, scale = self._reindex_loc_scale(item_index=predictions.index.get_level_values(ITEMID))
67
+ loc, scale = self._reindex_loc_scale(item_index=predictions.index.get_level_values(TimeSeriesDataFrame.ITEMID))
68
68
  return predictions.assign(**{col: predictions[col] * scale + loc for col in predictions.columns})
69
69
 
70
70
 
@@ -74,16 +74,16 @@ class LocalStandardScaler(LocalTargetScaler):
74
74
  The resulting affine transformation is (x - loc) / scale, where scale = std(x), loc = mean(x).
75
75
  """
76
76
 
77
- def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[pd.Series, pd.Series]:
78
- stats = target_series.groupby(level=ITEMID, sort=False).agg(["mean", "std"])
77
+ def _compute_loc_scale(self, target_series: pd.Series) -> tuple[pd.Series, pd.Series]:
78
+ stats = target_series.groupby(level=TimeSeriesDataFrame.ITEMID, sort=False).agg(["mean", "std"])
79
79
  return stats["mean"], stats["std"]
80
80
 
81
81
 
82
82
  class LocalMeanAbsScaler(LocalTargetScaler):
83
83
  """Applies mean absolute scaling to each time series in the dataset."""
84
84
 
85
- def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[Optional[pd.Series], pd.Series]:
86
- scale = target_series.abs().groupby(level=ITEMID, sort=False).agg("mean")
85
+ def _compute_loc_scale(self, target_series: pd.Series) -> tuple[Optional[pd.Series], pd.Series]:
86
+ scale = target_series.abs().groupby(level=TimeSeriesDataFrame.ITEMID, sort=False).agg("mean")
87
87
  return None, scale
88
88
 
89
89
 
@@ -93,8 +93,8 @@ class LocalMinMaxScaler(LocalTargetScaler):
93
93
  The resulting affine transformation is (x - loc) / scale, where scale = max(x) - min(x), loc = min(x) / scale.
94
94
  """
95
95
 
96
- def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[pd.Series, pd.Series]:
97
- stats = target_series.abs().groupby(level=ITEMID, sort=False).agg(["min", "max"])
96
+ def _compute_loc_scale(self, target_series: pd.Series) -> tuple[pd.Series, pd.Series]:
97
+ stats = target_series.abs().groupby(level=TimeSeriesDataFrame.ITEMID, sort=False).agg(["min", "max"])
98
98
  scale = (stats["max"] - stats["min"]).clip(lower=self.min_scale)
99
99
  loc = stats["min"]
100
100
  return loc, scale
@@ -117,8 +117,8 @@ class LocalRobustScaler(LocalTargetScaler):
117
117
  self.q_max = 0.75
118
118
  assert 0 < self.q_min < self.q_max < 1
119
119
 
120
- def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[pd.Series, pd.Series]:
121
- grouped = target_series.groupby(level=ITEMID, sort=False)
120
+ def _compute_loc_scale(self, target_series: pd.Series) -> tuple[pd.Series, pd.Series]:
121
+ grouped = target_series.groupby(level=TimeSeriesDataFrame.ITEMID, sort=False)
122
122
  loc = grouped.median()
123
123
  lower = grouped.quantile(self.q_min)
124
124
  upper = grouped.quantile(self.q_max)
@@ -2,7 +2,7 @@
2
2
  Generate lag indices based on frequency string. Adapted from gluonts.time_feature.lag.
3
3
  """
4
4
 
5
- from typing import List, Optional
5
+ from typing import Optional
6
6
 
7
7
  import numpy as np
8
8
  import pandas as pd
@@ -72,7 +72,7 @@ def get_lags_for_frequency(
72
72
  lag_ub: int = 1200,
73
73
  num_lags: Optional[int] = None,
74
74
  num_default_lags: int = 7,
75
- ) -> List[int]:
75
+ ) -> list[int]:
76
76
  """
77
77
  Generates a list of lags that that are appropriate for the given frequency
78
78
  string.
@@ -2,7 +2,7 @@
2
2
  Generate time features based on frequency string. Adapted from gluonts.time_feature.time_feature.
3
3
  """
4
4
 
5
- from typing import Callable, List
5
+ from typing import Callable
6
6
 
7
7
  import numpy as np
8
8
  import pandas as pd
@@ -56,7 +56,7 @@ def second_of_minute(index: pd.DatetimeIndex) -> np.ndarray:
56
56
  return _normalize(index.second, num=60)
57
57
 
58
58
 
59
- def get_time_features_for_frequency(freq) -> List[Callable]:
59
+ def get_time_features_for_frequency(freq) -> list[Callable]:
60
60
  features_by_offset_name = {
61
61
  "YE": [],
62
62
  "QE": [quarter_of_year],
@@ -2,7 +2,7 @@ import logging
2
2
  import reprlib
3
3
  import time
4
4
  from dataclasses import asdict, dataclass, field
5
- from typing import Any, Dict, List, Literal, Optional, Tuple
5
+ from typing import Any, Literal, Optional
6
6
 
7
7
  import numpy as np
8
8
  import pandas as pd
@@ -14,7 +14,7 @@ from autogluon.features.generators import (
14
14
  IdentityFeatureGenerator,
15
15
  PipelineFeatureGenerator,
16
16
  )
17
- from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
17
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
18
18
  from autogluon.timeseries.utils.warning_filters import warning_filter
19
19
 
20
20
  logger = logging.getLogger(__name__)
@@ -24,50 +24,50 @@ logger = logging.getLogger(__name__)
24
24
  class CovariateMetadata:
25
25
  """Provides mapping from different covariate types to columns in the dataset."""
26
26
 
27
- static_features_cat: List[str] = field(default_factory=list)
28
- static_features_real: List[str] = field(default_factory=list)
29
- known_covariates_real: List[str] = field(default_factory=list)
30
- known_covariates_cat: List[str] = field(default_factory=list)
31
- past_covariates_real: List[str] = field(default_factory=list)
32
- past_covariates_cat: List[str] = field(default_factory=list)
27
+ static_features_cat: list[str] = field(default_factory=list)
28
+ static_features_real: list[str] = field(default_factory=list)
29
+ known_covariates_real: list[str] = field(default_factory=list)
30
+ known_covariates_cat: list[str] = field(default_factory=list)
31
+ past_covariates_real: list[str] = field(default_factory=list)
32
+ past_covariates_cat: list[str] = field(default_factory=list)
33
33
 
34
34
  @property
35
- def static_features(self) -> List[str]:
35
+ def static_features(self) -> list[str]:
36
36
  return self.static_features_cat + self.static_features_real
37
37
 
38
38
  @property
39
- def known_covariates(self) -> List[str]:
39
+ def known_covariates(self) -> list[str]:
40
40
  return self.known_covariates_cat + self.known_covariates_real
41
41
 
42
42
  @property
43
- def past_covariates(self) -> List[str]:
43
+ def past_covariates(self) -> list[str]:
44
44
  return self.past_covariates_cat + self.past_covariates_real
45
45
 
46
46
  @property
47
- def covariates(self) -> List[str]:
47
+ def covariates(self) -> list[str]:
48
48
  return self.known_covariates + self.past_covariates
49
49
 
50
50
  @property
51
- def covariates_real(self) -> List[str]:
51
+ def covariates_real(self) -> list[str]:
52
52
  return self.known_covariates_real + self.past_covariates_real
53
53
 
54
54
  @property
55
- def covariates_cat(self) -> List[str]:
55
+ def covariates_cat(self) -> list[str]:
56
56
  return self.known_covariates_cat + self.past_covariates_cat
57
57
 
58
58
  @property
59
- def real_features(self) -> List[str]:
59
+ def real_features(self) -> list[str]:
60
60
  return self.static_features_real + self.covariates_real
61
61
 
62
62
  @property
63
- def cat_features(self) -> List[str]:
63
+ def cat_features(self) -> list[str]:
64
64
  return self.static_features_cat + self.covariates_cat
65
65
 
66
66
  @property
67
- def all_features(self) -> List[str]:
67
+ def all_features(self) -> list[str]:
68
68
  return self.static_features + self.covariates
69
69
 
70
- def to_dict(self) -> Dict[str, Any]:
70
+ def to_dict(self) -> dict[str, Any]:
71
71
  return asdict(self)
72
72
 
73
73
 
@@ -120,13 +120,13 @@ class TimeSeriesFeatureGenerator:
120
120
 
121
121
  Parameters
122
122
  ----------
123
- target : str
123
+ target
124
124
  Name of the target column.
125
- known_covariates_names : List[str]
125
+ known_covariates_names
126
126
  Columns that contain covariates that are known into the future.
127
- float_dtype : str, default = "float32"
127
+ float_dtype
128
128
  Numpy float dtype to which all numeric columns (float, int, bool) will be converted both in static & dynamic dfs.
129
- num_samples : int or None, default = 20_000
129
+ num_samples
130
130
  Number of rows sampled from the training dataset to speed up computation of the median (used later for imputation).
131
131
  If set to `None`, median will be computed using all rows.
132
132
  """
@@ -134,7 +134,7 @@ class TimeSeriesFeatureGenerator:
134
134
  def __init__(
135
135
  self,
136
136
  target: str,
137
- known_covariates_names: List[str],
137
+ known_covariates_names: list[str],
138
138
  float_dtype: str = "float32",
139
139
  num_samples: Optional[int] = 20_000,
140
140
  ):
@@ -143,8 +143,8 @@ class TimeSeriesFeatureGenerator:
143
143
  self.num_samples = num_samples
144
144
 
145
145
  self._is_fit = False
146
- self.known_covariates_names: List[str] = list(known_covariates_names)
147
- self.past_covariates_names: List[str] = []
146
+ self.known_covariates_names: list[str] = list(known_covariates_names)
147
+ self.past_covariates_names: list[str] = []
148
148
  self.known_covariates_pipeline = ContinuousAndCategoricalFeatureGenerator()
149
149
  self.past_covariates_pipeline = ContinuousAndCategoricalFeatureGenerator()
150
150
  # Cat features with cat_count=1 are fine in static_features since they are repeated for all time steps in a TS
@@ -154,7 +154,7 @@ class TimeSeriesFeatureGenerator:
154
154
  self._train_static_real_median: Optional[pd.Series] = None
155
155
 
156
156
  @property
157
- def required_column_names(self) -> List[str]:
157
+ def required_column_names(self) -> list[str]:
158
158
  return [self.target] + list(self.known_covariates_names) + list(self.past_covariates_names)
159
159
 
160
160
  @property
@@ -262,13 +262,13 @@ class TimeSeriesFeatureGenerator:
262
262
  return self._impute_covariates(ts_df, column_names=self.covariate_metadata.covariates_real)
263
263
 
264
264
  @staticmethod
265
- def _concat_dfs(dfs_to_concat: List[pd.DataFrame]) -> pd.DataFrame:
265
+ def _concat_dfs(dfs_to_concat: list[pd.DataFrame]) -> pd.DataFrame:
266
266
  if len(dfs_to_concat) == 1:
267
267
  return dfs_to_concat[0]
268
268
  else:
269
269
  return pd.concat(dfs_to_concat, axis=1, copy=False)
270
270
 
271
- def _impute_covariates(self, ts_df: TimeSeriesDataFrame, column_names: List[str]) -> TimeSeriesDataFrame:
271
+ def _impute_covariates(self, ts_df: TimeSeriesDataFrame, column_names: list[str]) -> TimeSeriesDataFrame:
272
272
  """Impute missing values in selected columns with ffill, bfill, and median imputation."""
273
273
  if len(column_names) > 0:
274
274
  # ffill + bfill covariates that have at least some observed values
@@ -346,10 +346,10 @@ class TimeSeriesFeatureGenerator:
346
346
  return None
347
347
 
348
348
  @staticmethod
349
- def _detect_and_log_column_types(transformed_df: pd.DataFrame) -> Tuple[List[str], List[str]]:
349
+ def _detect_and_log_column_types(transformed_df: pd.DataFrame) -> tuple[list[str], list[str]]:
350
350
  """Log & return names of categorical and real-valued columns in the DataFrame."""
351
- cat_column_names: List[str] = []
352
- real_column_names: List[str] = []
351
+ cat_column_names: list[str] = []
352
+ real_column_names: list[str] = []
353
353
  for column_name, column_dtype in transformed_df.dtypes.items():
354
354
  if isinstance(column_dtype, pd.CategoricalDtype):
355
355
  cat_column_names.append(str(column_name))
@@ -362,9 +362,9 @@ class TimeSeriesFeatureGenerator:
362
362
 
363
363
  @staticmethod
364
364
  def _check_required_columns_are_present(
365
- data: TimeSeriesDataFrame, required_column_names: List[str], data_frame_name: str
365
+ data: TimeSeriesDataFrame, required_column_names: list[str], data_frame_name: str
366
366
  ) -> None:
367
- missing_columns = pd.Index(required_column_names).difference(data.columns)
367
+ missing_columns = pd.Index(required_column_names).difference(data.columns) # type: ignore
368
368
  if len(missing_columns) > 0:
369
369
  raise ValueError(
370
370
  f"{len(missing_columns)} columns are missing from {data_frame_name}: {reprlib.repr(missing_columns.to_list())}"
@@ -415,7 +415,9 @@ class AbstractFeatureImportanceTransform:
415
415
  if feature_name in self.covariate_metadata.past_covariates:
416
416
  # we'll have to work on the history of the data alone
417
417
  data[feature_name] = data[feature_name].copy()
418
- feature_data = data[feature_name].groupby(level=ITEMID, sort=False).head(-self.prediction_length)
418
+ feature_data = (
419
+ data[feature_name].groupby(level=TimeSeriesDataFrame.ITEMID, sort=False).head(-self.prediction_length)
420
+ )
419
421
  # Silence spurious FutureWarning raised by DataFrame.update https://github.com/pandas-dev/pandas/issues/57124
420
422
  with warning_filter():
421
423
  data[feature_name].update(self._transform_series(feature_data, is_categorical=is_categorical))
@@ -455,7 +457,7 @@ class PermutationFeatureImportanceTransform(AbstractFeatureImportanceTransform):
455
457
  rng = np.random.RandomState(self.random_seed)
456
458
 
457
459
  if self.shuffle_type == "itemwise":
458
- return feature_data.groupby(level=ITEMID, sort=False).transform(
460
+ return feature_data.groupby(level=TimeSeriesDataFrame.ITEMID, sort=False).transform(
459
461
  lambda x: x.sample(frac=1, random_state=rng).values
460
462
  )
461
463
  elif self.shuffle_type == "naive":
@@ -483,6 +485,8 @@ class ConstantReplacementFeatureImportanceTransform(AbstractFeatureImportanceTra
483
485
 
484
486
  def _transform_series(self, feature_data: pd.Series, is_categorical: bool) -> pd.Series:
485
487
  if is_categorical:
486
- return feature_data.groupby(level=ITEMID, sort=False).transform(lambda x: x.mode()[0])
488
+ return feature_data.groupby(level=TimeSeriesDataFrame.ITEMID, sort=False).transform(lambda x: x.mode()[0])
487
489
  else:
488
- return feature_data.groupby(level=ITEMID, sort=False).transform(self.real_value_aggregation)
490
+ return feature_data.groupby(level=TimeSeriesDataFrame.ITEMID, sort=False).transform(
491
+ self.real_value_aggregation
492
+ ) # type: ignore
@@ -5,7 +5,7 @@ import numpy as np
5
5
  import pandas as pd
6
6
 
7
7
  from autogluon.common.utils.deprecated_utils import Deprecated
8
- from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TIMESTAMP, TimeSeriesDataFrame
8
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
9
9
 
10
10
 
11
11
  def get_forecast_horizon_index_single_time_series(
@@ -16,7 +16,7 @@ def get_forecast_horizon_index_single_time_series(
16
16
  if offset is None:
17
17
  raise ValueError(f"Invalid frequency: {freq}")
18
18
  start_ts = past_timestamps.max() + 1 * offset
19
- return pd.date_range(start=start_ts, periods=prediction_length, freq=freq, name=TIMESTAMP)
19
+ return pd.date_range(start=start_ts, periods=prediction_length, freq=freq, name=TimeSeriesDataFrame.TIMESTAMP)
20
20
 
21
21
 
22
22
  @Deprecated(
@@ -37,14 +37,14 @@ def make_future_data_frame(
37
37
  """
38
38
  indptr = ts_dataframe.get_indptr()
39
39
  last = ts_dataframe.index[indptr[1:] - 1].to_frame(index=False)
40
- item_ids = np.repeat(last[ITEMID].to_numpy(), prediction_length)
40
+ item_ids = np.repeat(last[TimeSeriesDataFrame.ITEMID].to_numpy(), prediction_length)
41
41
 
42
42
  if freq is None:
43
43
  freq = ts_dataframe.freq
44
44
  offset = pd.tseries.frequencies.to_offset(freq)
45
- last_ts = pd.DatetimeIndex(last[TIMESTAMP])
45
+ last_ts = pd.DatetimeIndex(last[TimeSeriesDataFrame.TIMESTAMP])
46
46
  # Non-vectorized offsets like BusinessDay may produce a PerformanceWarning - we filter them
47
47
  with warnings.catch_warnings():
48
48
  warnings.simplefilter("ignore", category=pd.errors.PerformanceWarning)
49
49
  timestamps = np.dstack([last_ts + step * offset for step in range(1, prediction_length + 1)]).ravel() # type: ignore[operator]
50
- return pd.DataFrame({ITEMID: item_ids, TIMESTAMP: timestamps})
50
+ return pd.DataFrame({TimeSeriesDataFrame.ITEMID: item_ids, TimeSeriesDataFrame.TIMESTAMP: timestamps})
@@ -8,12 +8,14 @@ import sys
8
8
  import warnings
9
9
  from collections import Counter
10
10
 
11
+ import pandas as pd
12
+
11
13
  __all__ = ["warning_filter", "disable_root_logger", "disable_tqdm"]
12
14
 
13
15
 
14
16
  @contextlib.contextmanager
15
17
  def warning_filter(all_warnings: bool = False):
16
- categories = [RuntimeWarning, UserWarning, FutureWarning]
18
+ categories = [RuntimeWarning, UserWarning, FutureWarning, pd.errors.PerformanceWarning]
17
19
  if all_warnings:
18
20
  categories.append(Warning)
19
21
  with warnings.catch_warnings():
@@ -1,4 +1,4 @@
1
1
  """This is the autogluon version file."""
2
2
 
3
- __version__ = "1.3.2b20250712"
3
+ __version__ = "1.4.1b20251116"
4
4
  __lite__ = False
@@ -0,0 +1 @@
1
+ import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('autogluon',));importlib = __import__('importlib.util');__import__('importlib.machinery');m = sys.modules.setdefault('autogluon', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('autogluon', [os.path.dirname(p)])));m = m or sys.modules.setdefault('autogluon', types.ModuleType('autogluon'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: autogluon.timeseries
3
- Version: 1.3.2b20250712
3
+ Version: 1.4.1b20251116
4
4
  Summary: Fast and Accurate ML in 3 Lines of Code
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -9,7 +9,6 @@ Project-URL: Documentation, https://auto.gluon.ai
9
9
  Project-URL: Bug Reports, https://github.com/autogluon/autogluon/issues
10
10
  Project-URL: Source, https://github.com/autogluon/autogluon/
11
11
  Project-URL: Contribute!, https://github.com/autogluon/autogluon/blob/master/CONTRIBUTING.md
12
- Platform: UNKNOWN
13
12
  Classifier: Development Status :: 4 - Beta
14
13
  Classifier: Intended Audience :: Education
15
14
  Classifier: Intended Audience :: Developers
@@ -34,15 +33,15 @@ Classifier: Topic :: Scientific/Engineering :: Information Analysis
34
33
  Classifier: Topic :: Scientific/Engineering :: Image Recognition
35
34
  Requires-Python: >=3.9, <3.13
36
35
  Description-Content-Type: text/markdown
37
- License-File: ../LICENSE
38
- License-File: ../NOTICE
39
- Requires-Dist: joblib<2,>=1.1
36
+ License-File: LICENSE
37
+ License-File: NOTICE
38
+ Requires-Dist: joblib<1.7,>=1.2
40
39
  Requires-Dist: numpy<2.4.0,>=1.25.0
41
40
  Requires-Dist: scipy<1.17,>=1.5.4
42
41
  Requires-Dist: pandas<2.4.0,>=2.0.0
43
- Requires-Dist: torch<2.8,>=2.2
44
- Requires-Dist: lightning<2.8,>=2.2
45
- Requires-Dist: pytorch-lightning
42
+ Requires-Dist: torch<2.8,>=2.6
43
+ Requires-Dist: lightning<2.8,>=2.5.1
44
+ Requires-Dist: pytorch_lightning
46
45
  Requires-Dist: transformers[sentencepiece]<4.50,>=4.38.0
47
46
  Requires-Dist: accelerate<2.0,>=0.34.0
48
47
  Requires-Dist: gluonts<0.17,>=0.15.0
@@ -54,17 +53,35 @@ Requires-Dist: coreforecast<0.0.17,>=0.0.12
54
53
  Requires-Dist: fugue>=0.9.0
55
54
  Requires-Dist: tqdm<5,>=4.38
56
55
  Requires-Dist: orjson~=3.9
56
+ Requires-Dist: chronos-forecasting<3,>=2.0.1
57
57
  Requires-Dist: tensorboard<3,>=2.9
58
- Requires-Dist: autogluon.core[raytune]==1.3.2b20250712
59
- Requires-Dist: autogluon.common==1.3.2b20250712
60
- Requires-Dist: autogluon.features==1.3.2b20250712
61
- Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.3.2b20250712
62
- Provides-Extra: all
58
+ Requires-Dist: autogluon.core[raytune]==1.4.1b20251116
59
+ Requires-Dist: autogluon.common==1.4.1b20251116
60
+ Requires-Dist: autogluon.features==1.4.1b20251116
61
+ Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.4.1b20251116
63
62
  Provides-Extra: tests
64
63
  Requires-Dist: pytest; extra == "tests"
65
64
  Requires-Dist: ruff>=0.0.285; extra == "tests"
66
65
  Requires-Dist: flaky<4,>=3.7; extra == "tests"
67
66
  Requires-Dist: pytest-timeout<3,>=2.1; extra == "tests"
67
+ Provides-Extra: toto
68
+ Requires-Dist: einops<1,>=0.7; extra == "toto"
69
+ Requires-Dist: rotary-embedding-torch<1,>=0.8; extra == "toto"
70
+ Provides-Extra: all
71
+ Requires-Dist: rotary-embedding-torch<1,>=0.8; extra == "all"
72
+ Requires-Dist: einops<1,>=0.7; extra == "all"
73
+ Dynamic: author
74
+ Dynamic: classifier
75
+ Dynamic: description
76
+ Dynamic: description-content-type
77
+ Dynamic: home-page
78
+ Dynamic: license
79
+ Dynamic: license-file
80
+ Dynamic: project-url
81
+ Dynamic: provides-extra
82
+ Dynamic: requires-dist
83
+ Dynamic: requires-python
84
+ Dynamic: summary
68
85
 
69
86
 
70
87
 
@@ -108,7 +125,7 @@ Build accurate end-to-end ML models in just 3 lines of code!
108
125
 
109
126
  ```python
110
127
  from autogluon.tabular import TabularPredictor
111
- predictor = TabularPredictor(label="class").fit("train.csv")
128
+ predictor = TabularPredictor(label="class").fit("train.csv", presets="best")
112
129
  predictions = predictor.predict("test.csv")
113
130
  ```
114
131
 
@@ -165,5 +182,3 @@ We are actively accepting code contributions to the AutoGluon project. If you ar
165
182
  ## :classical_building: License
166
183
 
167
184
  This library is licensed under the Apache 2.0 License.
168
-
169
-
@@ -0,0 +1,96 @@
1
+ autogluon.timeseries-1.4.1b20251116-py3.9-nspkg.pth,sha256=kAlKxjI5mE3Pwwqphu2maN5OBQk8W8ew70e_qbI1c6A,482
2
+ autogluon/timeseries/__init__.py,sha256=_CrLLc1fkjen7UzWoO0Os8WZoHOgvZbHKy46I8v_4k4,304
3
+ autogluon/timeseries/learner.py,sha256=XTQgfZs5ZQf_7mWUz-CNnavewrfNy3ENwtGMRJWwwPQ,13889
4
+ autogluon/timeseries/predictor.py,sha256=khISLnhVxTWMhE0WVCcTgm79K4Q9IuC-jHe01A9w1go,87468
5
+ autogluon/timeseries/regressor.py,sha256=X9ItbQ0e3GyLpKqusjMls5uavqw8w53AH0tXfSFmVno,12049
6
+ autogluon/timeseries/splitter.py,sha256=wK335v7cUAVPbo_9Bok1C6TFg0rB9SH3D031m0vn9-A,2342
7
+ autogluon/timeseries/version.py,sha256=i1AvtCpe0Hg2dM7cUojAmubBtfodplDajXyAsahYUGg,91
8
+ autogluon/timeseries/configs/__init__.py,sha256=wiLBwxZkDTQBJkSJ9-xz3p_yJxX0dbHe108dS1P5O6A,183
9
+ autogluon/timeseries/configs/hyperparameter_presets.py,sha256=GbI2sd3uakWtaeaMyF7B5z_lmyfb6ToK6PZEUZTyG9w,2031
10
+ autogluon/timeseries/configs/predictor_presets.py,sha256=B5HFHIelh91hhG0YYE5SJ7_14P7sylFAABgHX8n_53M,2712
11
+ autogluon/timeseries/dataset/__init__.py,sha256=UvnhAN5tjgxXTHoZMQDy64YMDj4Xxa68yY7NP4vAw0o,81
12
+ autogluon/timeseries/dataset/ts_dataframe.py,sha256=49Itgcrjej-x22HYMCXPGD2gjCTRkyHpY2H83aD9U9k,52384
13
+ autogluon/timeseries/metrics/__init__.py,sha256=YJPXxsJ0tRDXq7p-sTZSLb0DuXMJH6sT1PgbZ3tMt30,3594
14
+ autogluon/timeseries/metrics/abstract.py,sha256=6jbluvHXfLc_cuK1Fx0ZYle2sR4WGG6YxFQhkor46Q8,11545
15
+ autogluon/timeseries/metrics/point.py,sha256=sS__n_Em7m4CUaBu3PNWQ_dHw1YCOHbEyC15fhytFL8,18308
16
+ autogluon/timeseries/metrics/quantile.py,sha256=3XLKn01R2roLPZqcyAcxAIy_O89hdr0b4IKHyzRrXYA,4621
17
+ autogluon/timeseries/metrics/utils.py,sha256=_Nz6GLbs91WhqN1PoA53wD4xEEuPIQ0juV5l9rDmkFo,970
18
+ autogluon/timeseries/models/__init__.py,sha256=9NY9mqYaZe_7XB70M6psHARH-Lpkfroj4toUUPO9BmI,1339
19
+ autogluon/timeseries/models/registry.py,sha256=8n7W04ql0ckNQUzKcAW7bxreLI8wTAUTymACgLklH9M,2158
20
+ autogluon/timeseries/models/abstract/__init__.py,sha256=Htfkjjc3vo92RvyM8rIlQ0PLWt3jcrCKZES07UvCMV0,146
21
+ autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=97HOi7fRPxtx8Y9hq-xdJI-kLMp6Z-8LUSvcfBjXFsM,31978
22
+ autogluon/timeseries/models/abstract/model_trial.py,sha256=ENPg_7nsdxIvaNM0o0UShZ3x8jFlRmwRc5m0fGPC0TM,3720
23
+ autogluon/timeseries/models/abstract/tunable.py,sha256=jA6p-FPZkMva67B-1foqvHK-1rr0IdEfp9RvGW1WS9I,7155
24
+ autogluon/timeseries/models/autogluon_tabular/__init__.py,sha256=E5fZsdFPgVdyCVyj5bGmn_lQFlCMn2NvuRLBMcCFvhM,205
25
+ autogluon/timeseries/models/autogluon_tabular/mlforecast.py,sha256=3p3ukQxWN4WQHKt3ocmIb_5VlZfHwWJikQYUhSbDbtE,36457
26
+ autogluon/timeseries/models/autogluon_tabular/per_step.py,sha256=keEW7M4SIsu3hC4EFuxcrj5s7QjF9k_7NBARuMXmYgA,23329
27
+ autogluon/timeseries/models/autogluon_tabular/transforms.py,sha256=XtxvaRsnmVF8strfvzEfWO5a_Q8p_wMyxHyglpO1R1c,2886
28
+ autogluon/timeseries/models/autogluon_tabular/utils.py,sha256=Fn3Vu_Q0PCtEUbtNgLp1xIblg7dOdpFlF3W5kLHgruI,63
29
+ autogluon/timeseries/models/chronos/__init__.py,sha256=wT77HzTtmQxW3sw2k0mA5Ot6PSHivX-Uvn5fjM05EU4,60
30
+ autogluon/timeseries/models/chronos/model.py,sha256=N6tjC8gSOLcL5eX29JYcOgfxlRATGI2qtTZucCD83t8,33437
31
+ autogluon/timeseries/models/chronos/utils.py,sha256=6y2wphSVYR1ylscSGdb3NvrTU4ZDgbx56Gluxht_j-k,14465
32
+ autogluon/timeseries/models/ensemble/__init__.py,sha256=9fthsA6ozZoTC7A33O0hGhiHAMzcAgG206-b4PIF9Yc,1070
33
+ autogluon/timeseries/models/ensemble/abstract.py,sha256=ePsz2lzmludxq4x_R1jjYgPvxMc0yqVRqHbU1Fq_pvo,4264
34
+ autogluon/timeseries/models/ensemble/array_based/__init__.py,sha256=xCzFHS9YTPsC0LPfhh8mOWzUTYxXGz1RJ15ox0Wgr98,159
35
+ autogluon/timeseries/models/ensemble/array_based/abstract.py,sha256=RC0PL4LvU7REF_FdQwqGT9TmeETNjFlHOJSBTeJrER8,10330
36
+ autogluon/timeseries/models/ensemble/array_based/models.py,sha256=yvqWgXZU2iKxSe4J-kbEYHA3Lah8bYUG2-hdMNMlLP4,1640
37
+ autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py,sha256=Fw5m77f8Z5Y6UrgYFsK7bi1fIgLWdqzvoWZqkfAVmmY,327
38
+ autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py,sha256=cYsmZcjUg84EROimaBUI3X-EPIT4xGyEEqHfHFbiGYQ,2615
39
+ autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py,sha256=oJezyB0Kv2GPChF-Ym9IsyRX4N3OYcUx32hejvMVMTI,5061
40
+ autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py,sha256=2FnOiBDVgaldOnQcPD77mNzXJq7EDb1FGMpwjA3KAlE,4763
41
+ autogluon/timeseries/models/ensemble/weighted/__init__.py,sha256=_LipTsDnYvTFmjZWsb1Vrm-eALsVVfUlF2gOpcaqE2Q,206
42
+ autogluon/timeseries/models/ensemble/weighted/abstract.py,sha256=7vQVBK4TMBpESJ2EwnVklcljxmA2qWPQ9xpSREbtUwg,1543
43
+ autogluon/timeseries/models/ensemble/weighted/basic.py,sha256=Kr8y0dlHRZg_q9AqBc3HIp1a5k_sXjrnQPlVi-63DCE,3066
44
+ autogluon/timeseries/models/ensemble/weighted/greedy.py,sha256=zXJFenn1XxNNvCp4TlmIq1Dx3pUDWjKG1K3HsejmDeY,7323
45
+ autogluon/timeseries/models/gluonts/__init__.py,sha256=YfyNYOkhhNsloA4MAavfmqKO29_q6o4lwPoV7L4_h7M,355
46
+ autogluon/timeseries/models/gluonts/abstract.py,sha256=WKuUBy3ZF9VU87gaD9Us3c_xK2G1-XLeh1etVipf8hg,27769
47
+ autogluon/timeseries/models/gluonts/dataset.py,sha256=wfEp5SPuB8bam7iTpX3Tf0FGdXp5vnZtpgC9G4VJ4tw,5111
48
+ autogluon/timeseries/models/gluonts/models.py,sha256=1Z3x3-jVoae5X4cSnDIgJMvTJ9_O94aDSW8HEnBaL5k,25907
49
+ autogluon/timeseries/models/local/__init__.py,sha256=e2UImoJhmj70E148IIObv90C_bHxgyLNk6YsS4p7pfs,701
50
+ autogluon/timeseries/models/local/abstract_local_model.py,sha256=ASIZWBYs_cP0BwdrzHwblaNianPYcK5OqpqpiNxbxA0,11481
51
+ autogluon/timeseries/models/local/naive.py,sha256=xur3WWhLaS9Iix_p_yfaStbr58nL5K4rV0dReTm3BQQ,7496
52
+ autogluon/timeseries/models/local/npts.py,sha256=VRZk5tEJOIentt0tLM6lxyoU8US736nHOvhSAgagYMc,4203
53
+ autogluon/timeseries/models/local/statsforecast.py,sha256=sZ6aEFzAyPNZX3rMULGWFht0Toapjb3EwHe5Rb76ZxA,33318
54
+ autogluon/timeseries/models/multi_window/__init__.py,sha256=Bq7AT2Jxdd4WNqmjTdzeqgNiwn1NCyWp4tBIWaM-zfI,60
55
+ autogluon/timeseries/models/multi_window/multi_window_model.py,sha256=PBnNhDXPJJatRIm9FXg9DXU_0ZkGSs2yvEqfaTwBVxM,12356
56
+ autogluon/timeseries/models/toto/__init__.py,sha256=rQaVjZJV5ZsJGC0jhQ6CA4nYeXdV1KtlyDz2i2usQnY,54
57
+ autogluon/timeseries/models/toto/dataloader.py,sha256=A5WHhnAe0J7fPo2KKG43hYLSrtUBGNweuqxMmClu3_A,3598
58
+ autogluon/timeseries/models/toto/hf_pretrained_model.py,sha256=Q8bVUaSlQVE4xFn_v7H0h_NFTxzHiM1V17KFytc50jk,4783
59
+ autogluon/timeseries/models/toto/model.py,sha256=3-5nR9qNqBFQLP6rNqBNlF4PBfnJHTcyjvz2GwdWwTg,8948
60
+ autogluon/timeseries/models/toto/_internal/__init__.py,sha256=tKkiux9bD2Xu0AuVyTEx_sNOZutcluC7-d7tn7wsmec,193
61
+ autogluon/timeseries/models/toto/_internal/dataset.py,sha256=xuAEOhoQNJGMoCxkLVLrgpdoOJuukAYbrSrnrkwFob0,6103
62
+ autogluon/timeseries/models/toto/_internal/forecaster.py,sha256=UXiohiySn_Gs8kLheeVcVCO8qoEtYlEfMH1tukAOHsk,18520
63
+ autogluon/timeseries/models/toto/_internal/backbone/__init__.py,sha256=hq5W62boH6HiEP8z3sHkI6_KM-Dd6TkDfWDm6DYE3J8,63
64
+ autogluon/timeseries/models/toto/_internal/backbone/attention.py,sha256=HLUFoyqR8EqxUMT1BK-AjI4ClS8au35LcUo7Jx7Xhm0,9394
65
+ autogluon/timeseries/models/toto/_internal/backbone/backbone.py,sha256=HUjpY2ZWed74UYKjp31erXF2ZHf3mmQMw_5_cCFeJGg,10104
66
+ autogluon/timeseries/models/toto/_internal/backbone/distribution.py,sha256=8NXiaEVLuvjTW7L1t1RzooZFNERWv50zyLddbAwuYpo,2502
67
+ autogluon/timeseries/models/toto/_internal/backbone/kvcache.py,sha256=QSVCrnbS2oD7wkJodZbP9XMVmrfCH6M3Zp44siF28Fg,5399
68
+ autogluon/timeseries/models/toto/_internal/backbone/rope.py,sha256=Ghngo08DjHbwbyp6b-GXCyLeYR10dH-Y_RMOTYwIxPY,3527
69
+ autogluon/timeseries/models/toto/_internal/backbone/scaler.py,sha256=opqyhHIZ6mPdPlrr3gA0qt9FFogIAYNDSq-P7CyQiqE,13728
70
+ autogluon/timeseries/models/toto/_internal/backbone/transformer.py,sha256=5c-ngj4XHKlaedz1NkgdfQgqD2kUGkMn4mtGH_lTXsE,12410
71
+ autogluon/timeseries/trainer/__init__.py,sha256=_tw3iioJfvtIV7wnjtEMv0yS8oabmCFxDnGRodYE7RI,72
72
+ autogluon/timeseries/trainer/ensemble_composer.py,sha256=Vc8LfhGVUED70Y4DcIs3Jhpiur2EFXqVubgInixcb2I,9751
73
+ autogluon/timeseries/trainer/model_set_builder.py,sha256=s6tozfND3lLfst6Vxa_oP_wgCmDapyCJYFmCjkEn-es,10788
74
+ autogluon/timeseries/trainer/prediction_cache.py,sha256=Vi6EbMiMheq_smA93U_MoMxYUV85RdPm0dvJFdsM8K4,5551
75
+ autogluon/timeseries/trainer/trainer.py,sha256=yAHbpTjGKzVBepzepKuXEF5SvCQXDbsnyURV6mKLqaU,52002
76
+ autogluon/timeseries/trainer/utils.py,sha256=_hSAWOYRZsp1qX2J6pJSxLrAAWwhVROc4_cvtfiTRzU,625
77
+ autogluon/timeseries/transforms/__init__.py,sha256=fKlT4pkJ_8Gl7IUTc3uSDzt2Xow5iH5w6fPB3ePNrTg,127
78
+ autogluon/timeseries/transforms/covariate_scaler.py,sha256=8E5DDRLUQ3SCNDR2Yw8FZDx7DnWVdokKhNNxbp_S-9I,7017
79
+ autogluon/timeseries/transforms/target_scaler.py,sha256=tucfrWuXwTGv0WcJMo0bSk6--CkqGMDxiFPiUFl0RB8,6142
80
+ autogluon/timeseries/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
81
+ autogluon/timeseries/utils/features.py,sha256=GpemZRV7QiFRjZwP6NqpCVBg6m3KGBgp-eWUFzcpx54,22714
82
+ autogluon/timeseries/utils/forecast.py,sha256=y3VV1rVCxOuh_p-2U9ftT_I5oU4gQQovxlw14jRGwyM,2259
83
+ autogluon/timeseries/utils/warning_filters.py,sha256=SroNhLU3kwbD8anM58vdxWq36Z8j_uiY42mEt0ya-JI,2589
84
+ autogluon/timeseries/utils/datetime/__init__.py,sha256=bTMR8jLh1LW55vHjbOr1zvWRMF_PqbvxpS-cUcNIDWI,173
85
+ autogluon/timeseries/utils/datetime/base.py,sha256=3NdsH3NDq4cVAOSoy3XpaNixyNlbjy4DJ_YYOGuu9x4,1341
86
+ autogluon/timeseries/utils/datetime/lags.py,sha256=rjJtdBU0M41R1jwfmvCbo045s-6XBjhGVnGBQJ9-U1E,5997
87
+ autogluon/timeseries/utils/datetime/seasonality.py,sha256=YK_2k8hvYIMW-sJPnjGWRtCnvIOthwA2hATB3nwVoD4,834
88
+ autogluon/timeseries/utils/datetime/time_features.py,sha256=kEOFls4Nzh8nO0Pcz1DwLsC_NA3hMI4JUlZI3kuvuts,2666
89
+ autogluon_timeseries-1.4.1b20251116.dist-info/licenses/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
90
+ autogluon_timeseries-1.4.1b20251116.dist-info/licenses/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
91
+ autogluon_timeseries-1.4.1b20251116.dist-info/METADATA,sha256=2t9sW-KP5XI6x7WUxIkqXQqBCKRruP017VYThdeOPoA,12980
92
+ autogluon_timeseries-1.4.1b20251116.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
93
+ autogluon_timeseries-1.4.1b20251116.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
94
+ autogluon_timeseries-1.4.1b20251116.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
95
+ autogluon_timeseries-1.4.1b20251116.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
96
+ autogluon_timeseries-1.4.1b20251116.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.45.1)
2
+ Generator: setuptools (79.0.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5