autogluon.timeseries 1.1.2b20241115__py3-none-any.whl → 1.1.2b20241117__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +65 -19
  2. autogluon/timeseries/models/autogluon_tabular/mlforecast.py +9 -4
  3. autogluon/timeseries/models/autogluon_tabular/transforms.py +1 -1
  4. autogluon/timeseries/models/chronos/model.py +6 -1
  5. autogluon/timeseries/models/gluonts/abstract_gluonts.py +2 -71
  6. autogluon/timeseries/models/local/abstract_local_model.py +8 -2
  7. autogluon/timeseries/models/multi_window/multi_window_model.py +3 -11
  8. autogluon/timeseries/regressor.py +42 -8
  9. autogluon/timeseries/trainer/abstract_trainer.py +4 -1
  10. autogluon/timeseries/transforms/__init__.py +6 -1
  11. autogluon/timeseries/transforms/covariate_scaler.py +159 -0
  12. autogluon/timeseries/transforms/{scaler.py → target_scaler.py} +4 -4
  13. autogluon/timeseries/version.py +1 -1
  14. {autogluon.timeseries-1.1.2b20241115.dist-info → autogluon.timeseries-1.1.2b20241117.dist-info}/METADATA +4 -4
  15. {autogluon.timeseries-1.1.2b20241115.dist-info → autogluon.timeseries-1.1.2b20241117.dist-info}/RECORD +22 -21
  16. /autogluon.timeseries-1.1.2b20241115-py3.8-nspkg.pth → /autogluon.timeseries-1.1.2b20241117-py3.8-nspkg.pth +0 -0
  17. {autogluon.timeseries-1.1.2b20241115.dist-info → autogluon.timeseries-1.1.2b20241117.dist-info}/LICENSE +0 -0
  18. {autogluon.timeseries-1.1.2b20241115.dist-info → autogluon.timeseries-1.1.2b20241117.dist-info}/NOTICE +0 -0
  19. {autogluon.timeseries-1.1.2b20241115.dist-info → autogluon.timeseries-1.1.2b20241117.dist-info}/WHEEL +0 -0
  20. {autogluon.timeseries-1.1.2b20241115.dist-info → autogluon.timeseries-1.1.2b20241117.dist-info}/namespace_packages.txt +0 -0
  21. {autogluon.timeseries-1.1.2b20241115.dist-info → autogluon.timeseries-1.1.2b20241117.dist-info}/top_level.txt +0 -0
  22. {autogluon.timeseries-1.1.2b20241115.dist-info → autogluon.timeseries-1.1.2b20241117.dist-info}/zip-safe +0 -0
@@ -3,7 +3,7 @@ import os
3
3
  import re
4
4
  import time
5
5
  from contextlib import nullcontext
6
- from typing import Dict, List, Optional, Union
6
+ from typing import Dict, List, Optional, Tuple, Union
7
7
 
8
8
  import pandas as pd
9
9
 
@@ -16,7 +16,12 @@ from autogluon.core.models import AbstractModel
16
16
  from autogluon.timeseries.dataset import TimeSeriesDataFrame
17
17
  from autogluon.timeseries.metrics import TimeSeriesScorer, check_get_evaluation_metric
18
18
  from autogluon.timeseries.regressor import CovariateRegressor
19
- from autogluon.timeseries.transforms import LocalTargetScaler, get_target_scaler_from_name
19
+ from autogluon.timeseries.transforms import (
20
+ CovariateScaler,
21
+ LocalTargetScaler,
22
+ get_covariate_scaler_from_name,
23
+ get_target_scaler_from_name,
24
+ )
20
25
  from autogluon.timeseries.utils.features import CovariateMetadata
21
26
  from autogluon.timeseries.utils.forecast import get_forecast_horizon_index_ts_dataframe
22
27
  from autogluon.timeseries.utils.warning_filters import disable_stdout, warning_filter
@@ -60,6 +65,8 @@ class AbstractTimeSeriesModel(AbstractModel):
60
65
  """
61
66
 
62
67
  _oof_filename = "oof.pkl"
68
+ # TODO: For which models should we override this parameter?
69
+ _covariate_regressor_fit_time_fraction: float = 0.5
63
70
 
64
71
  # TODO: refactor "pruned" methods after AbstractModel is refactored
65
72
  predict_proba = None
@@ -128,6 +135,8 @@ class AbstractTimeSeriesModel(AbstractModel):
128
135
 
129
136
  self._oof_predictions: Optional[List[TimeSeriesDataFrame]] = None
130
137
  self.target_scaler: Optional[LocalTargetScaler] = None
138
+ self.covariate_scaler: Optional[CovariateScaler] = None
139
+ self.covariate_regressor: Optional[CovariateRegressor] = None
131
140
 
132
141
  def __repr__(self) -> str:
133
142
  return self.name
@@ -169,6 +178,7 @@ class AbstractTimeSeriesModel(AbstractModel):
169
178
  self._init_params_aux()
170
179
  self._init_params()
171
180
  self.target_scaler = self._create_target_scaler()
181
+ self.covariate_scaler = self._create_covariate_scaler()
172
182
  self.covariate_regressor = self._create_covariate_regressor()
173
183
 
174
184
  def _compute_fit_metadata(self, val_data: TimeSeriesDataFrame = None, **kwargs):
@@ -255,22 +265,36 @@ class AbstractTimeSeriesModel(AbstractModel):
255
265
  """
256
266
  start_time = time.monotonic()
257
267
  self.initialize(**kwargs)
268
+
258
269
  if self.target_scaler is not None:
259
270
  train_data = self.target_scaler.fit_transform(train_data)
260
271
 
272
+ if self.covariate_scaler is not None:
273
+ train_data = self.covariate_scaler.fit_transform(train_data)
274
+
261
275
  if self.covariate_regressor is not None:
262
- train_data = self.covariate_regressor.fit_transform(
276
+ covariate_regressor_time_limit = (
277
+ self._covariate_regressor_fit_time_fraction * time_limit if time_limit is not None else None
278
+ )
279
+ self.covariate_regressor.fit(
263
280
  train_data,
264
- time_limit=0.5 * time_limit if time_limit is not None else None,
281
+ time_limit=covariate_regressor_time_limit,
282
+ verbosity=kwargs.get("verbosity", 2) - 1,
265
283
  )
266
284
 
267
- train_data = self.preprocess(train_data, is_train=True)
285
+ if self._get_tags()["can_use_train_data"]:
286
+ if self.covariate_regressor is not None:
287
+ train_data = self.covariate_regressor.transform(train_data)
288
+ train_data, _ = self.preprocess(train_data, is_train=True)
289
+
268
290
  if self._get_tags()["can_use_val_data"] and val_data is not None:
269
291
  if self.target_scaler is not None:
270
292
  val_data = self.target_scaler.transform(val_data)
293
+ if self.covariate_scaler is not None:
294
+ val_data = self.covariate_scaler.transform(val_data)
271
295
  if self.covariate_regressor is not None:
272
296
  val_data = self.covariate_regressor.transform(val_data)
273
- val_data = self.preprocess(val_data, is_train=False)
297
+ val_data, _ = self.preprocess(val_data, is_train=False)
274
298
 
275
299
  if time_limit is not None:
276
300
  time_limit = time_limit - (time.monotonic() - start_time)
@@ -290,21 +314,37 @@ class AbstractTimeSeriesModel(AbstractModel):
290
314
  else:
291
315
  return None
292
316
 
317
+ def _create_covariate_scaler(self) -> Optional[CovariateScaler]:
318
+ """Create a CovariateScaler object based on the value of the `covariate_scaler` hyperparameter."""
319
+ covariate_scaler_type = self._get_model_params().get("covariate_scaler")
320
+ if covariate_scaler_type is not None:
321
+ return get_covariate_scaler_from_name(
322
+ covariate_scaler_type,
323
+ metadata=self.metadata,
324
+ use_static_features=self.supports_static_features,
325
+ use_known_covariates=self.supports_known_covariates,
326
+ use_past_covariates=self.supports_past_covariates,
327
+ )
328
+ else:
329
+ return None
330
+
293
331
  def _create_covariate_regressor(self) -> Optional[CovariateRegressor]:
294
332
  """Create a CovariateRegressor object based on the value of the `covariate_regressor` hyperparameter."""
295
333
  covariate_regressor = self._get_model_params().get("covariate_regressor")
296
334
  if covariate_regressor is not None:
297
335
  if len(self.metadata.known_covariates + self.metadata.static_features) == 0:
298
- logger.debug(
299
- "Skipping CovariateRegressor since the dataset contains no covariates or static features."
336
+ logger.info(
337
+ "\tSkipping covariate_regressor since the dataset contains no covariates or static features."
300
338
  )
301
339
  return None
302
340
  else:
303
341
  if isinstance(covariate_regressor, str):
304
342
  return CovariateRegressor(covariate_regressor, target=self.target, metadata=self.metadata)
343
+ elif isinstance(covariate_regressor, dict):
344
+ return CovariateRegressor(**covariate_regressor, target=self.target, metadata=self.metadata)
305
345
  elif isinstance(covariate_regressor, CovariateRegressor):
306
346
  logger.warning(
307
- "Using a custom CovariateRegressor object is experimental functionality that may break in the future!"
347
+ "\tUsing a custom covariate_regressor is experimental functionality that may break in the future!"
308
348
  )
309
349
  covariate_regressor.target = self.target
310
350
  covariate_regressor.metadata = self.metadata
@@ -371,11 +411,13 @@ class AbstractTimeSeriesModel(AbstractModel):
371
411
  """
372
412
  if self.target_scaler is not None:
373
413
  data = self.target_scaler.fit_transform(data)
414
+ if self.covariate_scaler is not None:
415
+ data = self.covariate_scaler.fit_transform(data)
416
+ known_covariates = self.covariate_scaler.transform_known_covariates(known_covariates)
374
417
  if self.covariate_regressor is not None:
375
418
  data = self.covariate_regressor.fit_transform(data)
376
419
 
377
- data = self.preprocess(data, is_train=False)
378
- known_covariates = self.preprocess_known_covariates(known_covariates)
420
+ data, known_covariates = self.preprocess(data, known_covariates, is_train=False)
379
421
 
380
422
  # FIXME: Set self.covariate_regressor=None so to avoid copying it across processes during _predict
381
423
  # FIXME: The clean solution is to convert all methods executed in parallel to @classmethod
@@ -597,13 +639,15 @@ class AbstractTimeSeriesModel(AbstractModel):
597
639
 
598
640
  return hpo_models, analysis
599
641
 
600
- def preprocess(self, data: TimeSeriesDataFrame, is_train: bool = False, **kwargs) -> TimeSeriesDataFrame:
601
- return data
602
-
603
- def preprocess_known_covariates(
604
- self, known_covariates: Optional[TimeSeriesDataFrame]
605
- ) -> Optional[TimeSeriesDataFrame]:
606
- return known_covariates
642
+ def preprocess(
643
+ self,
644
+ data: TimeSeriesDataFrame,
645
+ known_covariates: Optional[TimeSeriesDataFrame] = None,
646
+ is_train: bool = False,
647
+ **kwargs,
648
+ ) -> Tuple[TimeSeriesDataFrame, Optional[TimeSeriesDataFrame]]:
649
+ """Method that implements model-specific preprocessing logic."""
650
+ return data, known_covariates
607
651
 
608
652
  def get_memory_size(self, **kwargs) -> Optional[int]:
609
653
  return None
@@ -630,10 +674,12 @@ class AbstractTimeSeriesModel(AbstractModel):
630
674
  - allow_nan: Can the model handle data with missing values represented by np.nan?
631
675
  - can_refit_full: Does it make sense to retrain the model without validation data?
632
676
  See `autogluon.core.models.abstract._tags._DEFAULT_TAGS` for more details.
633
- - can_use_val_data: Can model use val_data if it's provided to model.fit()?
677
+ - can_use_train_data: Can the model use train_data if it's provided to model.fit()?
678
+ - can_use_val_data: Can the model use val_data if it's provided to model.fit()?
634
679
  """
635
680
  return {
636
681
  "allow_nan": False,
637
682
  "can_refit_full": False,
683
+ "can_use_train_data": True,
638
684
  "can_use_val_data": False,
639
685
  }
@@ -107,19 +107,24 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
107
107
  model._mlf.models_["mean"].predictor = TabularPredictor.load(model.tabular_predictor_path)
108
108
  return model
109
109
 
110
- def preprocess(self, data: TimeSeriesDataFrame, is_train: bool = False, **kwargs) -> Any:
110
+ def preprocess(
111
+ self,
112
+ data: TimeSeriesDataFrame,
113
+ known_covariates: Optional[TimeSeriesDataFrame] = None,
114
+ is_train: bool = False,
115
+ **kwargs,
116
+ ) -> Tuple[TimeSeriesDataFrame, Optional[TimeSeriesDataFrame]]:
111
117
  if is_train:
112
118
  # All-NaN series are removed; partially-NaN series in train_data are handled inside _generate_train_val_dfs
113
119
  all_nan_items = data.item_ids[data[self.target].isna().groupby(ITEMID, sort=False).all()]
114
120
  if len(all_nan_items):
115
121
  data = data.query("item_id not in @all_nan_items")
116
- return data
117
122
  else:
118
123
  data = data.fill_missing_values()
119
124
  # Fill time series consisting of all NaNs with the median of target in train_data
120
125
  if data.isna().any(axis=None):
121
126
  data[self.target] = data[self.target].fillna(value=self._train_target_median)
122
- return data
127
+ return data, known_covariates
123
128
 
124
129
  def _get_extra_tabular_init_kwargs(self) -> dict:
125
130
  raise NotImplementedError
@@ -288,7 +293,7 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
288
293
  fit_start_time = time.time()
289
294
  self._train_target_median = train_data[self.target].median()
290
295
  for col in self.metadata.known_covariates_real:
291
- if not train_data[col].isin([0, 1]).all():
296
+ if not set(train_data[col].unique()) == set([0, 1]):
292
297
  self._non_boolean_real_covariates.append(col)
293
298
  # TabularEstimator is passed to MLForecast later to include tuning_data
294
299
  model_params = self._get_model_params()
@@ -13,7 +13,7 @@ from autogluon.timeseries.dataset.ts_dataframe import (
13
13
  TIMESTAMP,
14
14
  TimeSeriesDataFrame,
15
15
  )
16
- from autogluon.timeseries.transforms.scaler import LocalTargetScaler, get_target_scaler_from_name
16
+ from autogluon.timeseries.transforms.target_scaler import LocalTargetScaler, get_target_scaler_from_name
17
17
 
18
18
  from .utils import MLF_ITEMID, MLF_TIMESTAMP
19
19
 
@@ -629,7 +629,12 @@ class ChronosModel(AbstractTimeSeriesModel):
629
629
  return TimeSeriesDataFrame(df)
630
630
 
631
631
  def _more_tags(self) -> Dict:
632
- return {"allow_nan": True, "can_use_val_data": self._get_model_params()["fine_tune"]}
632
+ do_fine_tune = self._get_model_params()["fine_tune"]
633
+ return {
634
+ "allow_nan": True,
635
+ "can_use_train_data": do_fine_tune,
636
+ "can_use_val_data": do_fine_tune,
637
+ }
633
638
 
634
639
  def score_and_cache_oof(
635
640
  self,
@@ -3,7 +3,7 @@ import os
3
3
  import shutil
4
4
  from datetime import timedelta
5
5
  from pathlib import Path
6
- from typing import Any, Callable, Dict, Iterator, List, Literal, Optional, Type, Union
6
+ from typing import Any, Callable, Dict, Iterator, List, Optional, Type, Union
7
7
 
8
8
  import gluonts
9
9
  import gluonts.core.settings
@@ -15,8 +15,6 @@ from gluonts.dataset.field_names import FieldName
15
15
  from gluonts.model.estimator import Estimator as GluonTSEstimator
16
16
  from gluonts.model.forecast import Forecast, QuantileForecast, SampleForecast
17
17
  from gluonts.model.predictor import Predictor as GluonTSPredictor
18
- from sklearn.compose import ColumnTransformer
19
- from sklearn.preprocessing import QuantileTransformer, StandardScaler
20
18
 
21
19
  from autogluon.common.loaders import load_pkl
22
20
  from autogluon.core.hpo.constants import RAY_BACKEND
@@ -186,7 +184,6 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
186
184
  **kwargs,
187
185
  )
188
186
  self.gts_predictor: Optional[GluonTSPredictor] = None
189
- self._real_column_transformers: Dict[Literal["known", "past", "static"], ColumnTransformer] = {}
190
187
  self._ohe_generator_known: Optional[OneHotEncoder] = None
191
188
  self._ohe_generator_past: Optional[OneHotEncoder] = None
192
189
  self.callbacks = []
@@ -286,73 +283,6 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
286
283
 
287
284
  self.negative_data = (dataset[self.target] < 0).any()
288
285
 
289
- def preprocess(self, data: TimeSeriesDataFrame, is_train: bool = False, **kwargs) -> TimeSeriesDataFrame:
290
- # Copy data to avoid SettingWithCopyWarning from pandas
291
- data = data.copy()
292
- if self.supports_known_covariates and len(self.metadata.known_covariates_real) > 0:
293
- columns = self.metadata.known_covariates_real
294
- if is_train:
295
- self._real_column_transformers["known"] = self._get_transformer_for_columns(data, columns=columns)
296
- assert "known" in self._real_column_transformers, "Preprocessing pipeline must be fit first"
297
- data[columns] = self._real_column_transformers["known"].transform(data[columns])
298
-
299
- if self.supports_past_covariates and len(self.metadata.past_covariates_real) > 0:
300
- columns = self.metadata.past_covariates_real
301
- if is_train:
302
- self._real_column_transformers["past"] = self._get_transformer_for_columns(data, columns=columns)
303
- assert "past" in self._real_column_transformers, "Preprocessing pipeline must be fit first"
304
- data[columns] = self._real_column_transformers["past"].transform(data[columns])
305
-
306
- if self.supports_static_features and len(self.metadata.static_features_real) > 0:
307
- columns = self.metadata.static_features_real
308
- if is_train:
309
- self._real_column_transformers["static"] = self._get_transformer_for_columns(
310
- data.static_features, columns=columns
311
- )
312
- assert "static" in self._real_column_transformers, "Preprocessing pipeline must be fit first"
313
- data.static_features[columns] = self._real_column_transformers["static"].transform(
314
- data.static_features[columns]
315
- )
316
- return data
317
-
318
- def _get_transformer_for_columns(self, df: pd.DataFrame, columns: List[str]) -> Dict[str, str]:
319
- """Passthrough bool features, use QuantileTransform for skewed features, and use StandardScaler for the rest.
320
-
321
- The preprocessing logic is similar to the TORCH_NN model from Tabular.
322
- """
323
- skew_threshold = self._get_model_params().get("proc.skew_threshold", 0.99)
324
- bool_features = []
325
- skewed_features = []
326
- continuous_features = []
327
- for col in columns:
328
- if df[col].isin([0, 1]).all():
329
- bool_features.append(col)
330
- elif np.abs(df[col].skew()) > skew_threshold:
331
- skewed_features.append(col)
332
- else:
333
- continuous_features.append(col)
334
- transformers = []
335
- logger.debug(
336
- f"\tbool_features: {bool_features}, continuous_features: {continuous_features}, skewed_features: {skewed_features}"
337
- )
338
- if continuous_features:
339
- transformers.append(("scaler", StandardScaler(), continuous_features))
340
- if skewed_features:
341
- transformers.append(("skew", QuantileTransformer(output_distribution="normal"), skewed_features))
342
- with warning_filter():
343
- column_transformer = ColumnTransformer(transformers=transformers, remainder="passthrough").fit(df[columns])
344
- return column_transformer
345
-
346
- def preprocess_known_covariates(
347
- self, known_covariates: Optional[TimeSeriesDataFrame]
348
- ) -> Optional[TimeSeriesDataFrame]:
349
- columns = self.metadata.known_covariates_real
350
- if self.supports_known_covariates and len(columns) > 0:
351
- assert "known" in self._real_column_transformers, "Preprocessing pipeline must be fit first"
352
- known_covariates = known_covariates.copy()
353
- known_covariates[columns] = self._real_column_transformers["known"].transform(known_covariates[columns])
354
- return known_covariates
355
-
356
286
  def _get_default_params(self):
357
287
  """Gets default parameters for GluonTS estimator initialization that are available after
358
288
  AbstractTimeSeriesModel initialization (i.e., before deferred initialization). Models may
@@ -368,6 +298,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
368
298
  "freq": self._dummy_gluonts_freq,
369
299
  "prediction_length": self.prediction_length,
370
300
  "quantiles": self.quantile_levels,
301
+ "covariate_scaler": "global",
371
302
  }
372
303
 
373
304
  def _get_model_params(self) -> dict:
@@ -95,10 +95,16 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
95
95
  + self.allowed_local_model_args
96
96
  )
97
97
 
98
- def preprocess(self, data: TimeSeriesDataFrame, is_train: bool = False, **kwargs) -> Any:
98
+ def preprocess(
99
+ self,
100
+ data: TimeSeriesDataFrame,
101
+ known_covariates: Optional[TimeSeriesDataFrame] = None,
102
+ is_train: bool = False,
103
+ **kwargs,
104
+ ) -> Tuple[TimeSeriesDataFrame, Optional[TimeSeriesDataFrame]]:
99
105
  if not self._get_tags()["allow_nan"]:
100
106
  data = data.fill_missing_values()
101
- return data
107
+ return data, known_covariates
102
108
 
103
109
  def _fit(self, train_data: TimeSeriesDataFrame, time_limit: Optional[int] = None, **kwargs):
104
110
  self._check_fit_params()
@@ -12,9 +12,7 @@ import autogluon.core as ag
12
12
  from autogluon.timeseries.dataset.ts_dataframe import TimeSeriesDataFrame
13
13
  from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
14
14
  from autogluon.timeseries.models.local.abstract_local_model import AbstractLocalModel
15
- from autogluon.timeseries.regressor import CovariateRegressor
16
15
  from autogluon.timeseries.splitter import AbstractWindowSplitter, ExpandingWindowSplitter
17
- from autogluon.timeseries.transforms import LocalTargetScaler
18
16
 
19
17
  logger = logging.getLogger(__name__)
20
18
 
@@ -86,14 +84,6 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
86
84
  def get_minimum_resources(self, is_gpu_available: bool = False) -> bool:
87
85
  return self._get_model_base().get_minimum_resources(is_gpu_available)
88
86
 
89
- def _create_target_scaler(self) -> Optional[LocalTargetScaler]:
90
- # Do not use scaler in the MultiWindowModel to avoid duplication; it will be created in the inner model
91
- return None
92
-
93
- def _create_covariates_regressor(self) -> Optional[CovariateRegressor]:
94
- # Do not use regressor in the MultiWindowModel to avoid duplication; it will be created in the inner model
95
- return None
96
-
97
87
  def _fit(
98
88
  self,
99
89
  train_data: TimeSeriesDataFrame,
@@ -217,7 +207,9 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
217
207
  return self.model_base._get_search_space()
218
208
 
219
209
  def _initialize(self, **kwargs) -> None:
220
- super()._initialize(**kwargs)
210
+ self._init_params_aux()
211
+ self._init_params()
212
+ # Do not initialize the target_scaler and covariate_regressor in the multi window model!
221
213
  self.model_base.initialize(**kwargs)
222
214
 
223
215
  def _get_hpo_train_fn_kwargs(self, **train_fn_kwargs) -> dict:
@@ -1,3 +1,5 @@
1
+ import logging
2
+ import time
1
3
  from typing import Any, Dict, Optional
2
4
 
3
5
  import numpy as np
@@ -8,6 +10,8 @@ from autogluon.tabular.trainer.model_presets.presets import MODEL_TYPES as TABUL
8
10
  from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
9
11
  from autogluon.timeseries.utils.features import CovariateMetadata
10
12
 
13
+ logger = logging.getLogger(__name__)
14
+
11
15
 
12
16
  class CovariateRegressor:
13
17
  """Predicts target values from the covariates for the same observation.
@@ -36,11 +40,16 @@ class CovariateRegressor:
36
40
  validation_frac : float, optional
37
41
  Fraction of observations that are reserved as the validation set during training (starting from the end of each
38
42
  time series).
43
+ fit_time_fraction: float
44
+ The fraction of the time_limit that will be reserved for model training. The remainder (1 - fit_time_fraction)
45
+ will be reserved for prediction.
46
+
47
+ If the estimated prediction time exceeds `(1 - fit_time_fraction) * time_limit`, the regressor will be disabled.
39
48
  """
40
49
 
41
50
  def __init__(
42
51
  self,
43
- model_name: str = "GBM",
52
+ model_name: str = "CAT",
44
53
  model_hyperparameters: Optional[Dict[str, Any]] = None,
45
54
  eval_metric: str = "mean_absolute_error",
46
55
  refit_during_predict: bool = False,
@@ -48,6 +57,7 @@ class CovariateRegressor:
48
57
  metadata: Optional[CovariateMetadata] = None,
49
58
  target: str = "target",
50
59
  validation_fraction: Optional[float] = 0.1,
60
+ fit_time_fraction: float = 0.5,
51
61
  ):
52
62
  if model_name not in TABULAR_MODEL_TYPES:
53
63
  raise ValueError(
@@ -61,7 +71,10 @@ class CovariateRegressor:
61
71
  self.tabular_eval_metric = eval_metric
62
72
  self.max_num_samples = max_num_samples
63
73
  self.validation_fraction = validation_fraction
74
+ self.fit_time_fraction = fit_time_fraction
75
+
64
76
  self.model: Optional[AbstractModel] = None
77
+ self.disabled_due_to_time_limit = False
65
78
  self.metadata = metadata or CovariateMetadata()
66
79
 
67
80
  def is_fit(self) -> bool:
@@ -69,12 +82,13 @@ class CovariateRegressor:
69
82
 
70
83
  def fit(self, data: TimeSeriesDataFrame, time_limit: Optional[float] = None, **kwargs) -> "CovariateRegressor":
71
84
  """Fit the tabular regressor on the target column using covariates as features."""
85
+ start_time = time.monotonic()
72
86
  tabular_df = self._get_tabular_df(data, static_features=data.static_features, include_target=True)
73
87
  tabular_df = tabular_df.query(f"{self.target}.notnull()")
74
88
 
75
89
  median_ts_length = data.num_timesteps_per_item().median()
76
90
  if self.validation_fraction is not None:
77
- grouped_df = tabular_df.groupby(ITEMID)
91
+ grouped_df = tabular_df.groupby(ITEMID, observed=False, sort=False)
78
92
  val_size = max(int(self.validation_fraction * median_ts_length), 1)
79
93
  train_df = self._subsample_df(grouped_df.head(-val_size))
80
94
  val_df = self._subsample_df(grouped_df.tail(val_size))
@@ -91,16 +105,34 @@ class CovariateRegressor:
91
105
 
92
106
  self.model = self.model_type(
93
107
  problem_type="regression",
94
- hyperparameters=self.model_hyperparameters,
108
+ hyperparameters={
109
+ **self.model_hyperparameters,
110
+ "ag_args_fit": {"predict_1_batch_size": 10000}, # needed to compute predict_1_time
111
+ },
95
112
  eval_metric=self.tabular_eval_metric,
96
113
  )
97
- self.model.fit(X=X, y=y, X_val=X_val, y_val=y_val, time_limit=time_limit, **kwargs)
114
+ if time_limit is not None:
115
+ time_limit_fit = self.fit_time_fraction * (time_limit - (time.monotonic() - start_time))
116
+ else:
117
+ time_limit_fit = None
118
+ self.model.fit(X=X, y=y, X_val=X_val, y_val=y_val, time_limit=time_limit_fit, **kwargs)
119
+
120
+ if time_limit is not None:
121
+ time_left = time_limit - (time.monotonic() - start_time)
122
+ estimated_predict_time = self.model.predict_1_time * len(data)
123
+ if estimated_predict_time > time_left:
124
+ logger.warning(
125
+ f"\tDisabling the covariate_regressor since {estimated_predict_time=:.1f} exceeds {time_left=:.1f}."
126
+ )
127
+ self.disabled_due_to_time_limit = True
98
128
  return self
99
129
 
100
130
  def transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
101
131
  """Subtract the tabular regressor predictions from the target column."""
102
- y_pred = self._predict(data, static_features=data.static_features)
103
- return data.assign(**{self.target: data[self.target] - y_pred})
132
+ if not self.disabled_due_to_time_limit:
133
+ y_pred = self._predict(data, static_features=data.static_features)
134
+ data = data.assign(**{self.target: data[self.target] - y_pred})
135
+ return data
104
136
 
105
137
  def fit_transform(
106
138
  self, data: TimeSeriesDataFrame, time_limit: Optional[float] = None, **kwargs
@@ -116,8 +148,10 @@ class CovariateRegressor:
116
148
  static_features: Optional[pd.DataFrame],
117
149
  ) -> TimeSeriesDataFrame:
118
150
  """Add the tabular regressor predictions to the target column."""
119
- y_pred = self._predict(known_covariates, static_features=static_features)
120
- return predictions.assign(**{col: predictions[col] + y_pred for col in predictions.columns})
151
+ if not self.disabled_due_to_time_limit:
152
+ y_pred = self._predict(known_covariates, static_features=static_features)
153
+ predictions = predictions.assign(**{col: predictions[col] + y_pred for col in predictions.columns})
154
+ return predictions
121
155
 
122
156
  def _predict(self, data: TimeSeriesDataFrame, static_features: Optional[pd.DataFrame]) -> np.ndarray:
123
157
  """Construct the tabular features matrix and make predictions"""
@@ -249,6 +249,7 @@ class AbstractTimeSeriesTrainer(SimpleAbstractTrainer):
249
249
 
250
250
  max_rel_importance_score: float = 1e5
251
251
  eps_abs_importance_score: float = 1e-5
252
+ max_ensemble_time_limit: float = 600.0
252
253
 
253
254
  def __init__(
254
255
  self,
@@ -608,7 +609,9 @@ class AbstractTimeSeriesTrainer(SimpleAbstractTrainer):
608
609
  else:
609
610
  time_left = time_limit - (time.time() - time_start)
610
611
  if num_base_models > 1 and self.enable_ensemble:
611
- time_reserved_for_ensemble = min(600.0, time_left / (num_base_models - i + 1))
612
+ time_reserved_for_ensemble = min(
613
+ self.max_ensemble_time_limit, time_left / (num_base_models - i + 1)
614
+ )
612
615
  logger.debug(f"Reserving {time_reserved_for_ensemble:.1f}s for ensemble")
613
616
  else:
614
617
  time_reserved_for_ensemble = 0.0
@@ -1,4 +1,9 @@
1
- from .scaler import (
1
+ from .covariate_scaler import (
2
+ CovariateScaler,
3
+ GlobalCovariateScaler,
4
+ get_covariate_scaler_from_name,
5
+ )
6
+ from .target_scaler import (
2
7
  LocalStandardScaler,
3
8
  LocalMinMaxScaler,
4
9
  LocalMeanAbsScaler,
@@ -0,0 +1,159 @@
1
+ import logging
2
+ from typing import Dict, List, Literal, Optional
3
+
4
+ import numpy as np
5
+ import pandas as pd
6
+ from sklearn.compose import ColumnTransformer
7
+ from sklearn.preprocessing import QuantileTransformer, StandardScaler
8
+
9
+ from autogluon.timeseries.dataset.ts_dataframe import TimeSeriesDataFrame
10
+ from autogluon.timeseries.utils.features import CovariateMetadata
11
+ from autogluon.timeseries.utils.warning_filters import warning_filter
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class CovariateScaler:
17
+ """Apply scaling to covariates and static features.
18
+
19
+ This can be helpful for deep learning models that assume that the inputs are normalized.
20
+ """
21
+
22
+ def __init__(
23
+ self,
24
+ metadata: CovariateMetadata,
25
+ use_known_covariates: bool = True,
26
+ use_past_covariates: bool = True,
27
+ use_static_features: bool = True,
28
+ **kwargs,
29
+ ):
30
+ self.metadata = metadata
31
+ self.use_known_covariates = use_known_covariates
32
+ self.use_past_covariates = use_past_covariates
33
+ self.use_static_features = use_static_features
34
+
35
+ def fit_transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
36
+ raise NotImplementedError
37
+
38
+ def transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
39
+ raise NotImplementedError
40
+
41
+ def transform_known_covariates(
42
+ self, known_covariates: Optional[TimeSeriesDataFrame] = None
43
+ ) -> Optional[TimeSeriesDataFrame]:
44
+ raise NotImplementedError
45
+
46
+
47
+ class GlobalCovariateScaler(CovariateScaler):
48
+ """Applies preprocessing logic similar to tabular's NN_TORCH model to the covariates.
49
+
50
+ Performs following preprocessing for real-valued columns:
51
+ - sklearn.preprocessing.QuantileTransform for skewed features
52
+ - passthrough (ignore) boolean features
53
+ - sklearn.preprocessing.StandardScaler for the rest of the features
54
+
55
+ Preprocessing is done globally across all items.
56
+ """
57
+
58
+ def __init__(
59
+ self,
60
+ metadata: CovariateMetadata,
61
+ use_known_covariates: bool = True,
62
+ use_past_covariates: bool = True,
63
+ use_static_features: bool = True,
64
+ skew_threshold: float = 0.99,
65
+ ):
66
+ super().__init__(metadata, use_known_covariates, use_past_covariates, use_static_features)
67
+ self.skew_threshold = skew_threshold
68
+ self._column_transformers: Optional[Dict[Literal["known", "past", "static"], ColumnTransformer]] = None
69
+
70
+ def is_fit(self) -> bool:
71
+ return self._column_transformers is not None
72
+
73
+ def fit(self, data: TimeSeriesDataFrame) -> "GlobalCovariateScaler":
74
+ self._column_transformers = {}
75
+
76
+ if self.use_known_covariates and len(self.metadata.known_covariates_real) > 0:
77
+ self._column_transformers["known"] = self._get_transformer_for_columns(
78
+ data, columns=self.metadata.known_covariates_real
79
+ )
80
+ if self.use_past_covariates and len(self.metadata.past_covariates_real) > 0:
81
+ self._column_transformers["past"] = self._get_transformer_for_columns(
82
+ data, columns=self.metadata.past_covariates_real
83
+ )
84
+ if self.use_static_features and len(self.metadata.static_features_real) > 0:
85
+ self._column_transformers["static"] = self._get_transformer_for_columns(
86
+ data.static_features, columns=self.metadata.static_features_real
87
+ )
88
+
89
+ def fit_transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
90
+ if not self.is_fit():
91
+ self.fit(data=data)
92
+ return self.transform(data=data)
93
+
94
+ def transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
95
+ # Copy data to avoid inplace modification
96
+ data = data.copy()
97
+ if "known" in self._column_transformers:
98
+ columns = self.metadata.known_covariates_real
99
+ data[columns] = self._column_transformers["known"].transform(data[columns])
100
+
101
+ if "past" in self._column_transformers:
102
+ columns = self.metadata.past_covariates_real
103
+ data[columns] = self._column_transformers["past"].transform(data[columns])
104
+
105
+ if "static" in self._column_transformers:
106
+ columns = self.metadata.static_features_real
107
+ data.static_features[columns] = self._column_transformers["static"].transform(
108
+ data.static_features[columns]
109
+ )
110
+ return data
111
+
112
+ def transform_known_covariates(
113
+ self, known_covariates: Optional[TimeSeriesDataFrame] = None
114
+ ) -> Optional[TimeSeriesDataFrame]:
115
+ if "known" in self._column_transformers:
116
+ columns = self.metadata.known_covariates_real
117
+ known_covariates = known_covariates.copy()
118
+ known_covariates[columns] = self._column_transformers["known"].transform(known_covariates[columns])
119
+ return known_covariates
120
+
121
+ def _get_transformer_for_columns(self, df: pd.DataFrame, columns: List[str]) -> Dict[str, str]:
122
+ """Passthrough bool features, use QuantileTransform for skewed features, and use StandardScaler for the rest.
123
+
124
+ The preprocessing logic is similar to the TORCH_NN model from Tabular.
125
+ """
126
+ bool_features = []
127
+ skewed_features = []
128
+ continuous_features = []
129
+ for col in columns:
130
+ if set(df[col].unique()) == set([0, 1]):
131
+ bool_features.append(col)
132
+ elif np.abs(df[col].skew()) > self.skew_threshold:
133
+ skewed_features.append(col)
134
+ else:
135
+ continuous_features.append(col)
136
+ transformers = []
137
+ logger.debug(
138
+ f"\tbool_features: {bool_features}, continuous_features: {continuous_features}, skewed_features: {skewed_features}"
139
+ )
140
+ if continuous_features:
141
+ transformers.append(("scaler", StandardScaler(), continuous_features))
142
+ if skewed_features:
143
+ transformers.append(("skew", QuantileTransformer(output_distribution="normal"), skewed_features))
144
+ with warning_filter():
145
+ column_transformer = ColumnTransformer(transformers=transformers, remainder="passthrough").fit(df[columns])
146
+ return column_transformer
147
+
148
+
149
+ AVAILABLE_COVARIATE_SCALERS = {
150
+ "global": GlobalCovariateScaler,
151
+ }
152
+
153
+
154
+ def get_covariate_scaler_from_name(name: Literal["global"], **scaler_kwargs) -> CovariateScaler:
155
+ if name not in AVAILABLE_COVARIATE_SCALERS:
156
+ raise KeyError(
157
+ f"Covariate scaler type {name} not supported. Available scalers: {list(AVAILABLE_COVARIATE_SCALERS)}"
158
+ )
159
+ return AVAILABLE_COVARIATE_SCALERS[name](**scaler_kwargs)
@@ -115,7 +115,7 @@ class LocalRobustScaler(LocalTargetScaler):
115
115
  return loc, scale
116
116
 
117
117
 
118
- AVAILABLE_SCALERS = {
118
+ AVAILABLE_TARGET_SCALERS = {
119
119
  "standard": LocalStandardScaler,
120
120
  "mean_abs": LocalMeanAbsScaler,
121
121
  "min_max": LocalMinMaxScaler,
@@ -127,6 +127,6 @@ def get_target_scaler_from_name(
127
127
  name: Literal["standard", "mean_abs", "min_max", "robust"], **scaler_kwargs
128
128
  ) -> LocalTargetScaler:
129
129
  """Get LocalTargetScaler object from a string."""
130
- if name not in AVAILABLE_SCALERS:
131
- raise KeyError(f"Scaler type {name} not supported. Available scalers: {list(AVAILABLE_SCALERS)}")
132
- return AVAILABLE_SCALERS[name](**scaler_kwargs)
130
+ if name not in AVAILABLE_TARGET_SCALERS:
131
+ raise KeyError(f"Scaler type {name} not supported. Available scalers: {list(AVAILABLE_TARGET_SCALERS)}")
132
+ return AVAILABLE_TARGET_SCALERS[name](**scaler_kwargs)
@@ -1,3 +1,3 @@
1
1
  """This is the autogluon version file."""
2
- __version__ = '1.1.2b20241115'
2
+ __version__ = '1.1.2b20241117'
3
3
  __lite__ = False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.timeseries
3
- Version: 1.1.2b20241115
3
+ Version: 1.1.2b20241117
4
4
  Summary: Fast and Accurate ML in 3 Lines of Code
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -53,9 +53,9 @@ Requires-Dist: fugue>=0.9.0
53
53
  Requires-Dist: tqdm<5,>=4.38
54
54
  Requires-Dist: orjson~=3.9
55
55
  Requires-Dist: tensorboard<3,>=2.9
56
- Requires-Dist: autogluon.core[raytune]==1.1.2b20241115
57
- Requires-Dist: autogluon.common==1.1.2b20241115
58
- Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.1.2b20241115
56
+ Requires-Dist: autogluon.core[raytune]==1.1.2b20241117
57
+ Requires-Dist: autogluon.common==1.1.2b20241117
58
+ Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.1.2b20241117
59
59
  Provides-Extra: all
60
60
  Requires-Dist: optimum[onnxruntime]<1.20,>=1.17; extra == "all"
61
61
  Provides-Extra: chronos-onnx
@@ -1,11 +1,11 @@
1
- autogluon.timeseries-1.1.2b20241115-py3.8-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
1
+ autogluon.timeseries-1.1.2b20241117-py3.8-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
2
2
  autogluon/timeseries/__init__.py,sha256=_CrLLc1fkjen7UzWoO0Os8WZoHOgvZbHKy46I8v_4k4,304
3
3
  autogluon/timeseries/evaluator.py,sha256=l642tYfTHsl8WVIq_vV6qhgAFVFr9UuZD7gLra3A_Kc,250
4
4
  autogluon/timeseries/learner.py,sha256=3dUxI-U6TGfNtRQUzWTvBIo1GKeXYOhxIX_q7Fed9eA,14013
5
5
  autogluon/timeseries/predictor.py,sha256=R9m-TYmlA4WoJbdYEL_AnEM26EhRIclynOfSmpO7mBk,84926
6
- autogluon/timeseries/regressor.py,sha256=wcYbvE7kFopdscubfhIfeLI3ovxKe_fUVtt0b1zWdV0,6823
6
+ autogluon/timeseries/regressor.py,sha256=tqQ2zWImxpuEyaAM0DeCjOZ-xcWUYZbCXsqd471xXxQ,8351
7
7
  autogluon/timeseries/splitter.py,sha256=eghGwAAN2_cxGk5aJBILgjGWtLzjxJcytMy49gg_q18,3061
8
- autogluon/timeseries/version.py,sha256=k5yEeIHMAa7wwNFT6Xs3BeQ8qrgzCQ6ICoNHeZ7GR9g,90
8
+ autogluon/timeseries/version.py,sha256=1hvS4kU1IUb-NlFd5oNdksEFtbUBGKd2TXKJ5K6_SwU,90
9
9
  autogluon/timeseries/configs/__init__.py,sha256=BTtHIPCYeGjqgOcvqb8qPD4VNX-ICKOg6wnkew1cPOE,98
10
10
  autogluon/timeseries/configs/presets_configs.py,sha256=94-yL9teDHKs2irWjP3kpewI7FE1ChYCgEgz9XHJ6gc,1965
11
11
  autogluon/timeseries/dataset/__init__.py,sha256=UvnhAN5tjgxXTHoZMQDy64YMDj4Xxa68yY7NP4vAw0o,81
@@ -18,14 +18,14 @@ autogluon/timeseries/metrics/utils.py,sha256=eJ63TCR-UwbeJ1c2Qm7B2q-8B3sFthPgioo
18
18
  autogluon/timeseries/models/__init__.py,sha256=MYD9JJ-wUDE5B6jW6E6LU2eXQ6vflfQBvqQJkdzJa3A,1189
19
19
  autogluon/timeseries/models/presets.py,sha256=ujNt_hft_5eNkh-Wj_Na9GBdBmI-JdnBnOEHq8X0qXc,11778
20
20
  autogluon/timeseries/models/abstract/__init__.py,sha256=wvDsQAZIV0N3AwBeMaGItoQ82trEfnT-nol2AAOIxBg,102
21
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=kVbJHyDWXmBJDL_4mUhEvpTG_d85vEjW5Og57d5CNN0,28092
21
+ autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=B1R0PBymUAwhIGkPIt29X-J9o9Ipdu-bQR0gK-nmcRU,30320
22
22
  autogluon/timeseries/models/abstract/model_trial.py,sha256=ENPg_7nsdxIvaNM0o0UShZ3x8jFlRmwRc5m0fGPC0TM,3720
23
23
  autogluon/timeseries/models/autogluon_tabular/__init__.py,sha256=r9i6jWcyeLHYClkcMSKRVsfrkBUMxpDrTATNTBc_qgQ,136
24
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py,sha256=C1WVcuNlTcqo_qGm3v0uPpraO06mdVnBNeflPbCPjNQ,32861
25
- autogluon/timeseries/models/autogluon_tabular/transforms.py,sha256=FozTzwcp1QjevEhrMLXsJHy8fymOcq1146oX4Al60wg,2517
24
+ autogluon/timeseries/models/autogluon_tabular/mlforecast.py,sha256=aU82AXXbpum-rtpOGGhK0r1CSYZclgDX5qG8nNl_2Mo,33018
25
+ autogluon/timeseries/models/autogluon_tabular/transforms.py,sha256=XVoy8KpvoeX38lHHAXq4Be9LCxKjxZ36SOFeSAICRFM,2524
26
26
  autogluon/timeseries/models/autogluon_tabular/utils.py,sha256=Fn3Vu_Q0PCtEUbtNgLp1xIblg7dOdpFlF3W5kLHgruI,63
27
27
  autogluon/timeseries/models/chronos/__init__.py,sha256=wT77HzTtmQxW3sw2k0mA5Ot6PSHivX-Uvn5fjM05EU4,60
28
- autogluon/timeseries/models/chronos/model.py,sha256=Z3CtfRux7W2x7mghbq8YLGFLu9kTAsozid43AN1lH_s,30151
28
+ autogluon/timeseries/models/chronos/model.py,sha256=pkT-V6yYCxz6TTgn1mQ5QHNTdpyn_wyj7jM80vnrDFQ,30270
29
29
  autogluon/timeseries/models/chronos/pipeline/__init__.py,sha256=N-YZH9BGBoi99r5cznJe1zEEjwjIg7cOYIHZkKuJq44,247
30
30
  autogluon/timeseries/models/chronos/pipeline/base.py,sha256=aAXCKy7Jmip4BI2UdPMoPe2gdDMbJHKxEolcTx_5SYQ,5463
31
31
  autogluon/timeseries/models/chronos/pipeline/chronos.py,sha256=iHKyw3Juml247jl7bEbGlabtMyp3ibYEoA7rHiUC9f8,22048
@@ -35,21 +35,22 @@ autogluon/timeseries/models/ensemble/__init__.py,sha256=kFr11Gmt7lQJu9Rr8HuIPphQ
35
35
  autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py,sha256=tifETwmiEGt-YtQ9eNK7ojJ3fBvtFMUJvisbfkIJ7gw,3393
36
36
  autogluon/timeseries/models/ensemble/greedy_ensemble.py,sha256=5HvZuW5osgsZg3V69k82nKEOy_YgeH1JTfQa7F3cU7s,7220
37
37
  autogluon/timeseries/models/gluonts/__init__.py,sha256=asC1PTj4j9xMbilvk1IT1julnpeoKbv5ZNuAR6-DFgA,361
38
- autogluon/timeseries/models/gluonts/abstract_gluonts.py,sha256=4_YPUjT0oIzELHn34C0a7n21zkhjc1Yijgz9egxxPdw,34501
38
+ autogluon/timeseries/models/gluonts/abstract_gluonts.py,sha256=M4vGs5oNZqr_ebE4UwBonqvb1g-yzHylPe5s9GOeGDY,30560
39
39
  autogluon/timeseries/models/gluonts/torch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
40
40
  autogluon/timeseries/models/gluonts/torch/models.py,sha256=Pu7f43jr1C5S3k_bVqRB8ENuBHNEWT4ssUTdZoA1J58,25556
41
41
  autogluon/timeseries/models/local/__init__.py,sha256=e2UImoJhmj70E148IIObv90C_bHxgyLNk6YsS4p7pfs,701
42
- autogluon/timeseries/models/local/abstract_local_model.py,sha256=OxEkqzfAd5diQDUYStw2nI-X2lo3H8GcMLDJ6-1XL_Y,12417
42
+ autogluon/timeseries/models/local/abstract_local_model.py,sha256=WBBJp2h2UMUVuguTrUmJOI21neil-ZnGGZDy-zJPD2M,12592
43
43
  autogluon/timeseries/models/local/naive.py,sha256=iwRcFMFmJKPWPbD9TWaIUS51oav69F_VAp6-jb_5SUE,7249
44
44
  autogluon/timeseries/models/local/npts.py,sha256=Bp74doKnfpGE8ywP4FWOCI_RwRMsmgocYDfGtq764DA,4143
45
45
  autogluon/timeseries/models/local/statsforecast.py,sha256=cFJ_A7LR2jTmFNGgMxt3xvEivQVYuV6bDCMii8-TKH0,32424
46
46
  autogluon/timeseries/models/multi_window/__init__.py,sha256=Bq7AT2Jxdd4WNqmjTdzeqgNiwn1NCyWp4tBIWaM-zfI,60
47
- autogluon/timeseries/models/multi_window/multi_window_model.py,sha256=aNS0W4bh9quaxgFRtyJesVziPKHvg4lfCDrAQutqEjk,12014
47
+ autogluon/timeseries/models/multi_window/multi_window_model.py,sha256=mTxqPBsN0Ri2c56MYZU6pMF9dDFyEl5eHmhqHPe3ouw,11596
48
48
  autogluon/timeseries/trainer/__init__.py,sha256=lxiOT-Gc6BEnr_yWQqra85kEngeM_wtH2SCaRbmC_qE,170
49
- autogluon/timeseries/trainer/abstract_trainer.py,sha256=hZI4QcsFvU1gxP2yv_DRCIMlc6q02ptR7UDA9EgJPoM,60409
49
+ autogluon/timeseries/trainer/abstract_trainer.py,sha256=zCzzVO8yssqSyteMoUkDT5zWi44Oc91pRWiRdQR1We8,60521
50
50
  autogluon/timeseries/trainer/auto_trainer.py,sha256=psJFZBwWWPlLjNwAgvO4OUJXsRW1sTN2YS9a4pdoeoE,3344
51
- autogluon/timeseries/transforms/__init__.py,sha256=lzDavxdgGIz5m_DmSpNa9ewNU9Evndam3YXfOEk6kwY,174
52
- autogluon/timeseries/transforms/scaler.py,sha256=jgj9-637zgDREJidNpavKIQbF0y6RB_zwPGKWAGa6lw,5344
51
+ autogluon/timeseries/transforms/__init__.py,sha256=Stym_998LZQgKPuFN4_w1AcJFh4_AeaQLXgXLzv53kY,299
52
+ autogluon/timeseries/transforms/covariate_scaler.py,sha256=iscshgfNTCn379Q73BJXyDUFFm1WRclzKdG2MIPTOEc,6587
53
+ autogluon/timeseries/transforms/target_scaler.py,sha256=8gxXy0zavR0ck48UVvnI2UXE8TV6ScFGoZdAvBrIF84,5372
53
54
  autogluon/timeseries/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
55
  autogluon/timeseries/utils/features.py,sha256=VvBQzaymSSzxI9khtcXbpir-qo1NWHe51O7F6ynyh_s,21943
55
56
  autogluon/timeseries/utils/forecast.py,sha256=p0WKM9Q0nLAwwmCgYZI1zi9mCOWXWJfllEt2lPRQl4M,1882
@@ -59,11 +60,11 @@ autogluon/timeseries/utils/datetime/base.py,sha256=3NdsH3NDq4cVAOSoy3XpaNixyNlbj
59
60
  autogluon/timeseries/utils/datetime/lags.py,sha256=GoLtvcZ8oKb3QkoBJ9E59LSPLOP7Qjxrr2UmMSZgjyw,5909
60
61
  autogluon/timeseries/utils/datetime/seasonality.py,sha256=h_4w00iEytAz_N_EpCENQ8RCXy7KQITczrYjBgVqWkQ,764
61
62
  autogluon/timeseries/utils/datetime/time_features.py,sha256=PAXbYbQ0z_5GFbkxSNi41zLY_2-U3x0Ynm1m_WhdtGc,2572
62
- autogluon.timeseries-1.1.2b20241115.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
63
- autogluon.timeseries-1.1.2b20241115.dist-info/METADATA,sha256=GWCpPfrJry0oxm90irCuNAdgEvasKyVcnn4bnbWnshA,12388
64
- autogluon.timeseries-1.1.2b20241115.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
65
- autogluon.timeseries-1.1.2b20241115.dist-info/WHEEL,sha256=bFJAMchF8aTQGUgMZzHJyDDMPTO3ToJ7x23SLJa1SVo,92
66
- autogluon.timeseries-1.1.2b20241115.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
67
- autogluon.timeseries-1.1.2b20241115.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
68
- autogluon.timeseries-1.1.2b20241115.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
69
- autogluon.timeseries-1.1.2b20241115.dist-info/RECORD,,
63
+ autogluon.timeseries-1.1.2b20241117.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
64
+ autogluon.timeseries-1.1.2b20241117.dist-info/METADATA,sha256=TDdc_3hRS9w8jJ4FFfvn2H_qt_yKWsBzwBr3kHgA3iM,12388
65
+ autogluon.timeseries-1.1.2b20241117.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
66
+ autogluon.timeseries-1.1.2b20241117.dist-info/WHEEL,sha256=bFJAMchF8aTQGUgMZzHJyDDMPTO3ToJ7x23SLJa1SVo,92
67
+ autogluon.timeseries-1.1.2b20241117.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
68
+ autogluon.timeseries-1.1.2b20241117.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
69
+ autogluon.timeseries-1.1.2b20241117.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
70
+ autogluon.timeseries-1.1.2b20241117.dist-info/RECORD,,