autogluon.timeseries 1.2.1b20250224__py3-none-any.whl → 1.4.1b20251215__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of autogluon.timeseries might be problematic. Click here for more details.

Files changed (108) hide show
  1. autogluon/timeseries/configs/__init__.py +3 -2
  2. autogluon/timeseries/configs/hyperparameter_presets.py +62 -0
  3. autogluon/timeseries/configs/predictor_presets.py +106 -0
  4. autogluon/timeseries/dataset/ts_dataframe.py +256 -141
  5. autogluon/timeseries/learner.py +86 -52
  6. autogluon/timeseries/metrics/__init__.py +42 -8
  7. autogluon/timeseries/metrics/abstract.py +89 -19
  8. autogluon/timeseries/metrics/point.py +142 -53
  9. autogluon/timeseries/metrics/quantile.py +46 -21
  10. autogluon/timeseries/metrics/utils.py +4 -4
  11. autogluon/timeseries/models/__init__.py +8 -2
  12. autogluon/timeseries/models/abstract/__init__.py +2 -2
  13. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +361 -592
  14. autogluon/timeseries/models/abstract/model_trial.py +2 -1
  15. autogluon/timeseries/models/abstract/tunable.py +189 -0
  16. autogluon/timeseries/models/autogluon_tabular/__init__.py +2 -0
  17. autogluon/timeseries/models/autogluon_tabular/mlforecast.py +282 -194
  18. autogluon/timeseries/models/autogluon_tabular/per_step.py +513 -0
  19. autogluon/timeseries/models/autogluon_tabular/transforms.py +25 -18
  20. autogluon/timeseries/models/chronos/__init__.py +2 -1
  21. autogluon/timeseries/models/chronos/chronos2.py +361 -0
  22. autogluon/timeseries/models/chronos/model.py +219 -138
  23. autogluon/timeseries/models/chronos/{pipeline/utils.py → utils.py} +81 -50
  24. autogluon/timeseries/models/ensemble/__init__.py +37 -2
  25. autogluon/timeseries/models/ensemble/abstract.py +107 -0
  26. autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
  27. autogluon/timeseries/models/ensemble/array_based/abstract.py +240 -0
  28. autogluon/timeseries/models/ensemble/array_based/models.py +185 -0
  29. autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +12 -0
  30. autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +88 -0
  31. autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py +186 -0
  32. autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +94 -0
  33. autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +107 -0
  34. autogluon/timeseries/models/ensemble/ensemble_selection.py +167 -0
  35. autogluon/timeseries/models/ensemble/per_item_greedy.py +172 -0
  36. autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
  37. autogluon/timeseries/models/ensemble/weighted/abstract.py +45 -0
  38. autogluon/timeseries/models/ensemble/weighted/basic.py +91 -0
  39. autogluon/timeseries/models/ensemble/weighted/greedy.py +62 -0
  40. autogluon/timeseries/models/gluonts/__init__.py +1 -1
  41. autogluon/timeseries/models/gluonts/{abstract_gluonts.py → abstract.py} +148 -208
  42. autogluon/timeseries/models/gluonts/dataset.py +109 -0
  43. autogluon/timeseries/models/gluonts/{torch/models.py → models.py} +38 -22
  44. autogluon/timeseries/models/local/__init__.py +0 -7
  45. autogluon/timeseries/models/local/abstract_local_model.py +71 -74
  46. autogluon/timeseries/models/local/naive.py +13 -9
  47. autogluon/timeseries/models/local/npts.py +9 -2
  48. autogluon/timeseries/models/local/statsforecast.py +52 -36
  49. autogluon/timeseries/models/multi_window/multi_window_model.py +65 -45
  50. autogluon/timeseries/models/registry.py +64 -0
  51. autogluon/timeseries/models/toto/__init__.py +3 -0
  52. autogluon/timeseries/models/toto/_internal/__init__.py +9 -0
  53. autogluon/timeseries/models/toto/_internal/backbone/__init__.py +3 -0
  54. autogluon/timeseries/models/toto/_internal/backbone/attention.py +196 -0
  55. autogluon/timeseries/models/toto/_internal/backbone/backbone.py +262 -0
  56. autogluon/timeseries/models/toto/_internal/backbone/distribution.py +70 -0
  57. autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +136 -0
  58. autogluon/timeseries/models/toto/_internal/backbone/rope.py +89 -0
  59. autogluon/timeseries/models/toto/_internal/backbone/rotary_embedding_torch.py +342 -0
  60. autogluon/timeseries/models/toto/_internal/backbone/scaler.py +305 -0
  61. autogluon/timeseries/models/toto/_internal/backbone/transformer.py +333 -0
  62. autogluon/timeseries/models/toto/_internal/dataset.py +165 -0
  63. autogluon/timeseries/models/toto/_internal/forecaster.py +423 -0
  64. autogluon/timeseries/models/toto/dataloader.py +108 -0
  65. autogluon/timeseries/models/toto/hf_pretrained_model.py +200 -0
  66. autogluon/timeseries/models/toto/model.py +249 -0
  67. autogluon/timeseries/predictor.py +685 -297
  68. autogluon/timeseries/regressor.py +94 -44
  69. autogluon/timeseries/splitter.py +8 -32
  70. autogluon/timeseries/trainer/__init__.py +3 -0
  71. autogluon/timeseries/trainer/ensemble_composer.py +444 -0
  72. autogluon/timeseries/trainer/model_set_builder.py +256 -0
  73. autogluon/timeseries/trainer/prediction_cache.py +149 -0
  74. autogluon/timeseries/{trainer.py → trainer/trainer.py} +387 -390
  75. autogluon/timeseries/trainer/utils.py +17 -0
  76. autogluon/timeseries/transforms/__init__.py +2 -13
  77. autogluon/timeseries/transforms/covariate_scaler.py +34 -40
  78. autogluon/timeseries/transforms/target_scaler.py +37 -20
  79. autogluon/timeseries/utils/constants.py +10 -0
  80. autogluon/timeseries/utils/datetime/lags.py +3 -5
  81. autogluon/timeseries/utils/datetime/seasonality.py +1 -3
  82. autogluon/timeseries/utils/datetime/time_features.py +2 -2
  83. autogluon/timeseries/utils/features.py +70 -47
  84. autogluon/timeseries/utils/forecast.py +19 -14
  85. autogluon/timeseries/utils/timer.py +173 -0
  86. autogluon/timeseries/utils/warning_filters.py +4 -2
  87. autogluon/timeseries/version.py +1 -1
  88. autogluon.timeseries-1.4.1b20251215-py3.11-nspkg.pth +1 -0
  89. {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/METADATA +49 -36
  90. autogluon_timeseries-1.4.1b20251215.dist-info/RECORD +103 -0
  91. {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/WHEEL +1 -1
  92. autogluon/timeseries/configs/presets_configs.py +0 -79
  93. autogluon/timeseries/evaluator.py +0 -6
  94. autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -11
  95. autogluon/timeseries/models/chronos/pipeline/base.py +0 -160
  96. autogluon/timeseries/models/chronos/pipeline/chronos.py +0 -585
  97. autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -518
  98. autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py +0 -78
  99. autogluon/timeseries/models/ensemble/greedy_ensemble.py +0 -170
  100. autogluon/timeseries/models/gluonts/torch/__init__.py +0 -0
  101. autogluon/timeseries/models/presets.py +0 -360
  102. autogluon.timeseries-1.2.1b20250224-py3.9-nspkg.pth +0 -1
  103. autogluon.timeseries-1.2.1b20250224.dist-info/RECORD +0 -68
  104. {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info/licenses}/LICENSE +0 -0
  105. {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info/licenses}/NOTICE +0 -0
  106. {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/namespace_packages.txt +0 -0
  107. {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/top_level.txt +0 -0
  108. {autogluon.timeseries-1.2.1b20250224.dist-info → autogluon_timeseries-1.4.1b20251215.dist-info}/zip-safe +0 -0
@@ -0,0 +1,17 @@
1
+ import logging
2
+
3
+ logger = logging.getLogger("autogluon.timeseries.trainer")
4
+
5
+
6
+ def log_scores_and_times(
7
+ val_score: float | None,
8
+ fit_time: float | None,
9
+ predict_time: float | None,
10
+ eval_metric_name: str,
11
+ ):
12
+ if val_score is not None:
13
+ logger.info(f"\t{val_score:<7.4f}".ljust(15) + f"= Validation score ({eval_metric_name})")
14
+ if fit_time is not None:
15
+ logger.info(f"\t{fit_time:<7.2f} s".ljust(15) + "= Training runtime")
16
+ if predict_time is not None:
17
+ logger.info(f"\t{predict_time:<7.2f} s".ljust(15) + "= Validation (prediction) runtime")
@@ -1,13 +1,2 @@
1
- from .covariate_scaler import (
2
- CovariateScaler,
3
- GlobalCovariateScaler,
4
- get_covariate_scaler_from_name,
5
- )
6
- from .target_scaler import (
7
- LocalStandardScaler,
8
- LocalMinMaxScaler,
9
- LocalMeanAbsScaler,
10
- LocalRobustScaler,
11
- LocalTargetScaler,
12
- get_target_scaler_from_name
13
- )
1
+ from .covariate_scaler import CovariateScaler, get_covariate_scaler
2
+ from .target_scaler import TargetScaler, get_target_scaler
@@ -1,47 +1,32 @@
1
1
  import logging
2
- from typing import Dict, List, Literal, Optional
2
+ from typing import Literal, Protocol, overload, runtime_checkable
3
3
 
4
4
  import numpy as np
5
5
  import pandas as pd
6
6
  from sklearn.compose import ColumnTransformer
7
7
  from sklearn.preprocessing import QuantileTransformer, StandardScaler
8
8
 
9
- from autogluon.timeseries.dataset.ts_dataframe import TimeSeriesDataFrame
9
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
10
10
  from autogluon.timeseries.utils.features import CovariateMetadata
11
11
  from autogluon.timeseries.utils.warning_filters import warning_filter
12
12
 
13
13
  logger = logging.getLogger(__name__)
14
14
 
15
15
 
16
- class CovariateScaler:
16
+ @runtime_checkable
17
+ class CovariateScaler(Protocol):
17
18
  """Apply scaling to covariates and static features.
18
19
 
19
20
  This can be helpful for deep learning models that assume that the inputs are normalized.
20
21
  """
21
22
 
22
- def __init__(
23
- self,
24
- metadata: CovariateMetadata,
25
- use_known_covariates: bool = True,
26
- use_past_covariates: bool = True,
27
- use_static_features: bool = True,
28
- **kwargs,
29
- ):
30
- self.metadata = metadata
31
- self.use_known_covariates = use_known_covariates
32
- self.use_past_covariates = use_past_covariates
33
- self.use_static_features = use_static_features
34
-
35
- def fit_transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
36
- raise NotImplementedError
23
+ def fit_transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame: ...
37
24
 
38
- def transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
39
- raise NotImplementedError
25
+ def transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame: ...
40
26
 
41
27
  def transform_known_covariates(
42
- self, known_covariates: Optional[TimeSeriesDataFrame] = None
43
- ) -> Optional[TimeSeriesDataFrame]:
44
- raise NotImplementedError
28
+ self, known_covariates: TimeSeriesDataFrame | None = None
29
+ ) -> TimeSeriesDataFrame | None: ...
45
30
 
46
31
 
47
32
  class GlobalCovariateScaler(CovariateScaler):
@@ -57,15 +42,18 @@ class GlobalCovariateScaler(CovariateScaler):
57
42
 
58
43
  def __init__(
59
44
  self,
60
- metadata: CovariateMetadata,
45
+ covariate_metadata: CovariateMetadata,
61
46
  use_known_covariates: bool = True,
62
47
  use_past_covariates: bool = True,
63
48
  use_static_features: bool = True,
64
49
  skew_threshold: float = 0.99,
65
50
  ):
66
- super().__init__(metadata, use_known_covariates, use_past_covariates, use_static_features)
51
+ self.covariate_metadata = covariate_metadata
52
+ self.use_known_covariates = use_known_covariates
53
+ self.use_past_covariates = use_past_covariates
54
+ self.use_static_features = use_static_features
67
55
  self.skew_threshold = skew_threshold
68
- self._column_transformers: Optional[Dict[Literal["known", "past", "static"], ColumnTransformer]] = None
56
+ self._column_transformers: dict[Literal["known", "past", "static"], ColumnTransformer] | None = None
69
57
 
70
58
  def is_fit(self) -> bool:
71
59
  return self._column_transformers is not None
@@ -73,18 +61,18 @@ class GlobalCovariateScaler(CovariateScaler):
73
61
  def fit(self, data: TimeSeriesDataFrame) -> "GlobalCovariateScaler":
74
62
  self._column_transformers = {}
75
63
 
76
- if self.use_known_covariates and len(self.metadata.known_covariates_real) > 0:
64
+ if self.use_known_covariates and len(self.covariate_metadata.known_covariates_real) > 0:
77
65
  self._column_transformers["known"] = self._get_transformer_for_columns(
78
- data, columns=self.metadata.known_covariates_real
66
+ data, columns=self.covariate_metadata.known_covariates_real
79
67
  )
80
- if self.use_past_covariates and len(self.metadata.past_covariates_real) > 0:
68
+ if self.use_past_covariates and len(self.covariate_metadata.past_covariates_real) > 0:
81
69
  self._column_transformers["past"] = self._get_transformer_for_columns(
82
- data, columns=self.metadata.past_covariates_real
70
+ data, columns=self.covariate_metadata.past_covariates_real
83
71
  )
84
- if self.use_static_features and len(self.metadata.static_features_real) > 0:
72
+ if self.use_static_features and len(self.covariate_metadata.static_features_real) > 0:
85
73
  assert data.static_features is not None
86
74
  self._column_transformers["static"] = self._get_transformer_for_columns(
87
- data.static_features, columns=self.metadata.static_features_real
75
+ data.static_features, columns=self.covariate_metadata.static_features_real
88
76
  )
89
77
 
90
78
  return self
@@ -100,15 +88,15 @@ class GlobalCovariateScaler(CovariateScaler):
100
88
  assert self._column_transformers is not None, "CovariateScaler must be fit before transform can be called"
101
89
 
102
90
  if "known" in self._column_transformers:
103
- columns = self.metadata.known_covariates_real
91
+ columns = self.covariate_metadata.known_covariates_real
104
92
  data[columns] = self._column_transformers["known"].transform(data[columns])
105
93
 
106
94
  if "past" in self._column_transformers:
107
- columns = self.metadata.past_covariates_real
95
+ columns = self.covariate_metadata.past_covariates_real
108
96
  data[columns] = self._column_transformers["past"].transform(data[columns])
109
97
 
110
98
  if "static" in self._column_transformers:
111
- columns = self.metadata.static_features_real
99
+ columns = self.covariate_metadata.static_features_real
112
100
  assert data.static_features is not None
113
101
 
114
102
  data.static_features[columns] = self._column_transformers["static"].transform(
@@ -117,19 +105,19 @@ class GlobalCovariateScaler(CovariateScaler):
117
105
  return data
118
106
 
119
107
  def transform_known_covariates(
120
- self, known_covariates: Optional[TimeSeriesDataFrame] = None
121
- ) -> Optional[TimeSeriesDataFrame]:
108
+ self, known_covariates: TimeSeriesDataFrame | None = None
109
+ ) -> TimeSeriesDataFrame | None:
122
110
  assert self._column_transformers is not None, "CovariateScaler must be fit before transform can be called"
123
111
 
124
112
  if "known" in self._column_transformers:
125
- columns = self.metadata.known_covariates_real
113
+ columns = self.covariate_metadata.known_covariates_real
126
114
  assert known_covariates is not None
127
115
 
128
116
  known_covariates = known_covariates.copy()
129
117
  known_covariates[columns] = self._column_transformers["known"].transform(known_covariates[columns])
130
118
  return known_covariates
131
119
 
132
- def _get_transformer_for_columns(self, df: pd.DataFrame, columns: List[str]) -> ColumnTransformer:
120
+ def _get_transformer_for_columns(self, df: pd.DataFrame, columns: list[str]) -> ColumnTransformer:
133
121
  """Passthrough bool features, use QuantileTransform for skewed features, and use StandardScaler for the rest.
134
122
 
135
123
  The preprocessing logic is similar to the TORCH_NN model from Tabular.
@@ -162,7 +150,13 @@ AVAILABLE_COVARIATE_SCALERS = {
162
150
  }
163
151
 
164
152
 
165
- def get_covariate_scaler_from_name(name: Literal["global"], **scaler_kwargs) -> CovariateScaler:
153
+ @overload
154
+ def get_covariate_scaler(name: None, **scaler_kwargs) -> None: ...
155
+ @overload
156
+ def get_covariate_scaler(name: Literal["global"], **scaler_kwargs) -> GlobalCovariateScaler: ...
157
+ def get_covariate_scaler(name: Literal["global"] | None = None, **scaler_kwargs) -> CovariateScaler | None:
158
+ if name is None:
159
+ return None
166
160
  if name not in AVAILABLE_COVARIATE_SCALERS:
167
161
  raise KeyError(
168
162
  f"Covariate scaler type {name} not supported. Available scalers: {list(AVAILABLE_COVARIATE_SCALERS)}"
@@ -1,12 +1,23 @@
1
- from typing import Literal, Optional, Tuple, Union
1
+ from typing import Literal, Protocol, overload
2
2
 
3
3
  import numpy as np
4
4
  import pandas as pd
5
+ from typing_extensions import Self
5
6
 
6
- from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
7
+ from autogluon.timeseries.dataset import TimeSeriesDataFrame
7
8
 
8
9
 
9
- class LocalTargetScaler:
10
+ class TargetScaler(Protocol):
11
+ def fit_transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame: ...
12
+
13
+ def fit(self, data: TimeSeriesDataFrame) -> Self: ...
14
+
15
+ def transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame: ...
16
+
17
+ def inverse_transform(self, predictions: TimeSeriesDataFrame) -> TimeSeriesDataFrame: ...
18
+
19
+
20
+ class LocalTargetScaler(TargetScaler):
10
21
  """Applies an affine transformation (x - loc) / scale independently to each time series in the dataset."""
11
22
 
12
23
  def __init__(
@@ -16,10 +27,10 @@ class LocalTargetScaler:
16
27
  ):
17
28
  self.target = target
18
29
  self.min_scale = min_scale
19
- self.loc: Optional[pd.Series] = None
20
- self.scale: Optional[pd.Series] = None
30
+ self.loc: pd.Series | None = None
31
+ self.scale: pd.Series | None = None
21
32
 
22
- def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[Optional[pd.Series], Optional[pd.Series]]:
33
+ def _compute_loc_scale(self, target_series: pd.Series) -> tuple[pd.Series | None, pd.Series | None]:
23
34
  raise NotImplementedError
24
35
 
25
36
  def fit_transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
@@ -34,7 +45,7 @@ class LocalTargetScaler:
34
45
  self.scale = self.scale.clip(lower=self.min_scale).replace([np.inf, -np.inf], np.nan).fillna(1.0)
35
46
  return self
36
47
 
37
- def _reindex_loc_scale(self, item_index: pd.Index) -> Tuple[Union[np.ndarray, float], Union[np.ndarray, float]]:
48
+ def _reindex_loc_scale(self, item_index: pd.Index) -> tuple[np.ndarray | float, np.ndarray | float]:
38
49
  """Reindex loc and scale parameters for the given item_ids and convert them to an array-like."""
39
50
  if self.loc is not None:
40
51
  loc = self.loc.reindex(item_index).to_numpy()
@@ -48,12 +59,12 @@ class LocalTargetScaler:
48
59
 
49
60
  def transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
50
61
  """Apply scaling to the target column in the dataframe."""
51
- loc, scale = self._reindex_loc_scale(item_index=data.index.get_level_values(ITEMID))
62
+ loc, scale = self._reindex_loc_scale(item_index=data.index.get_level_values(TimeSeriesDataFrame.ITEMID))
52
63
  return data.assign(**{self.target: (data[self.target] - loc) / scale})
53
64
 
54
65
  def inverse_transform(self, predictions: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
55
66
  """Apply inverse scaling to all columns in the predictions dataframe."""
56
- loc, scale = self._reindex_loc_scale(item_index=predictions.index.get_level_values(ITEMID))
67
+ loc, scale = self._reindex_loc_scale(item_index=predictions.index.get_level_values(TimeSeriesDataFrame.ITEMID))
57
68
  return predictions.assign(**{col: predictions[col] * scale + loc for col in predictions.columns})
58
69
 
59
70
 
@@ -63,16 +74,16 @@ class LocalStandardScaler(LocalTargetScaler):
63
74
  The resulting affine transformation is (x - loc) / scale, where scale = std(x), loc = mean(x).
64
75
  """
65
76
 
66
- def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[pd.Series, pd.Series]:
67
- stats = target_series.groupby(level=ITEMID, sort=False).agg(["mean", "std"])
77
+ def _compute_loc_scale(self, target_series: pd.Series) -> tuple[pd.Series, pd.Series]:
78
+ stats = target_series.groupby(level=TimeSeriesDataFrame.ITEMID, sort=False).agg(["mean", "std"])
68
79
  return stats["mean"], stats["std"]
69
80
 
70
81
 
71
82
  class LocalMeanAbsScaler(LocalTargetScaler):
72
83
  """Applies mean absolute scaling to each time series in the dataset."""
73
84
 
74
- def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[Optional[pd.Series], pd.Series]:
75
- scale = target_series.abs().groupby(level=ITEMID, sort=False).agg("mean")
85
+ def _compute_loc_scale(self, target_series: pd.Series) -> tuple[pd.Series | None, pd.Series]:
86
+ scale = target_series.abs().groupby(level=TimeSeriesDataFrame.ITEMID, sort=False).agg("mean")
76
87
  return None, scale
77
88
 
78
89
 
@@ -82,8 +93,8 @@ class LocalMinMaxScaler(LocalTargetScaler):
82
93
  The resulting affine transformation is (x - loc) / scale, where scale = max(x) - min(x), loc = min(x) / scale.
83
94
  """
84
95
 
85
- def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[pd.Series, pd.Series]:
86
- stats = target_series.abs().groupby(level=ITEMID, sort=False).agg(["min", "max"])
96
+ def _compute_loc_scale(self, target_series: pd.Series) -> tuple[pd.Series, pd.Series]:
97
+ stats = target_series.abs().groupby(level=TimeSeriesDataFrame.ITEMID, sort=False).agg(["min", "max"])
87
98
  scale = (stats["max"] - stats["min"]).clip(lower=self.min_scale)
88
99
  loc = stats["min"]
89
100
  return loc, scale
@@ -106,8 +117,8 @@ class LocalRobustScaler(LocalTargetScaler):
106
117
  self.q_max = 0.75
107
118
  assert 0 < self.q_min < self.q_max < 1
108
119
 
109
- def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[pd.Series, pd.Series]:
110
- grouped = target_series.groupby(level=ITEMID, sort=False)
120
+ def _compute_loc_scale(self, target_series: pd.Series) -> tuple[pd.Series, pd.Series]:
121
+ grouped = target_series.groupby(level=TimeSeriesDataFrame.ITEMID, sort=False)
111
122
  loc = grouped.median()
112
123
  lower = grouped.quantile(self.q_min)
113
124
  upper = grouped.quantile(self.q_max)
@@ -123,10 +134,16 @@ AVAILABLE_TARGET_SCALERS = {
123
134
  }
124
135
 
125
136
 
126
- def get_target_scaler_from_name(
127
- name: Literal["standard", "mean_abs", "min_max", "robust"], **scaler_kwargs
128
- ) -> LocalTargetScaler:
137
+ @overload
138
+ def get_target_scaler(name: None, **scaler_kwargs) -> None: ...
139
+ @overload
140
+ def get_target_scaler(name: Literal["standard", "mean_abs", "min_max", "robust"], **scaler_kwargs) -> TargetScaler: ...
141
+ def get_target_scaler(
142
+ name: Literal["standard", "mean_abs", "min_max", "robust"] | None, **scaler_kwargs
143
+ ) -> TargetScaler | None:
129
144
  """Get LocalTargetScaler object from a string."""
145
+ if name is None:
146
+ return None
130
147
  if name not in AVAILABLE_TARGET_SCALERS:
131
148
  raise KeyError(f"Scaler type {name} not supported. Available scalers: {list(AVAILABLE_TARGET_SCALERS)}")
132
149
  return AVAILABLE_TARGET_SCALERS[name](**scaler_kwargs)
@@ -0,0 +1,10 @@
1
+ import joblib.externals.loky
2
+ from joblib import cpu_count
3
+
4
+ # By default, joblib w/ loky backend kills processes that take >300MB of RAM assuming that this is caused by a memory
5
+ # leak. This leads to problems for some memory-hungry models like AutoARIMA/Theta.
6
+ # This monkey patch removes this undesired behavior
7
+ joblib.externals.loky.process_executor._MAX_MEMORY_LEAK_SIZE = int(3e10)
8
+
9
+ # We use the same default n_jobs across AG-TS to ensure that Joblib reuses the process pool
10
+ AG_DEFAULT_N_JOBS = max(cpu_count(only_physical_cores=True), 1)
@@ -2,8 +2,6 @@
2
2
  Generate lag indices based on frequency string. Adapted from gluonts.time_feature.lag.
3
3
  """
4
4
 
5
- from typing import List, Optional
6
-
7
5
  import numpy as np
8
6
  import pandas as pd
9
7
 
@@ -70,9 +68,9 @@ def _make_lags_for_semi_month(multiple, num_cycles=3):
70
68
  def get_lags_for_frequency(
71
69
  freq: str,
72
70
  lag_ub: int = 1200,
73
- num_lags: Optional[int] = None,
71
+ num_lags: int | None = None,
74
72
  num_default_lags: int = 7,
75
- ) -> List[int]:
73
+ ) -> list[int]:
76
74
  """
77
75
  Generates a list of lags that that are appropriate for the given frequency
78
76
  string.
@@ -164,7 +162,7 @@ def get_lags_for_frequency(
164
162
  raise Exception(f"Cannot get lags for unsupported frequency {freq}")
165
163
 
166
164
  # flatten lags list and filter
167
- lags = [int(lag) for sub_list in lags for lag in sub_list if 7 < lag <= lag_ub]
165
+ lags = [int(lag) for sub_list in lags for lag in sub_list if num_default_lags < lag <= lag_ub]
168
166
  lags = list(range(1, num_default_lags + 1)) + sorted(list(set(lags)))
169
167
 
170
168
  return sorted(set(lags))[:num_lags]
@@ -1,5 +1,3 @@
1
- from typing import Union
2
-
3
1
  import pandas as pd
4
2
 
5
3
  from .base import norm_freq_str
@@ -22,7 +20,7 @@ DEFAULT_SEASONALITIES = {
22
20
  }
23
21
 
24
22
 
25
- def get_seasonality(freq: Union[str, None]) -> int:
23
+ def get_seasonality(freq: str | None) -> int:
26
24
  """Return the seasonality of a given frequency. Adapted from ``gluonts.time_feature.seasonality``."""
27
25
  if freq is None:
28
26
  return 1
@@ -2,7 +2,7 @@
2
2
  Generate time features based on frequency string. Adapted from gluonts.time_feature.time_feature.
3
3
  """
4
4
 
5
- from typing import Callable, List
5
+ from typing import Callable
6
6
 
7
7
  import numpy as np
8
8
  import pandas as pd
@@ -56,7 +56,7 @@ def second_of_minute(index: pd.DatetimeIndex) -> np.ndarray:
56
56
  return _normalize(index.second, num=60)
57
57
 
58
58
 
59
- def get_time_features_for_frequency(freq) -> List[Callable]:
59
+ def get_time_features_for_frequency(freq) -> list[Callable]:
60
60
  features_by_offset_name = {
61
61
  "YE": [],
62
62
  "QE": [quarter_of_year],