autogluon.timeseries 1.1.2b20240924__py3-none-any.whl → 1.1.2b20240926__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. autogluon/timeseries/learner.py +10 -2
  2. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +29 -0
  3. autogluon/timeseries/models/autogluon_tabular/mlforecast.py +40 -48
  4. autogluon/timeseries/models/autogluon_tabular/transforms.py +60 -0
  5. autogluon/timeseries/models/autogluon_tabular/utils.py +3 -51
  6. autogluon/timeseries/models/gluonts/torch/models.py +18 -5
  7. autogluon/timeseries/models/local/abstract_local_model.py +10 -1
  8. autogluon/timeseries/models/multi_window/multi_window_model.py +7 -0
  9. autogluon/timeseries/models/presets.py +1 -1
  10. autogluon/timeseries/predictor.py +22 -4
  11. autogluon/timeseries/trainer/abstract_trainer.py +28 -2
  12. autogluon/timeseries/transforms/__init__.py +8 -0
  13. autogluon/timeseries/transforms/scaler.py +132 -0
  14. autogluon/timeseries/version.py +1 -1
  15. {autogluon.timeseries-1.1.2b20240924.dist-info → autogluon.timeseries-1.1.2b20240926.dist-info}/METADATA +7 -6
  16. {autogluon.timeseries-1.1.2b20240924.dist-info → autogluon.timeseries-1.1.2b20240926.dist-info}/RECORD +23 -20
  17. /autogluon.timeseries-1.1.2b20240924-py3.8-nspkg.pth → /autogluon.timeseries-1.1.2b20240926-py3.8-nspkg.pth +0 -0
  18. {autogluon.timeseries-1.1.2b20240924.dist-info → autogluon.timeseries-1.1.2b20240926.dist-info}/LICENSE +0 -0
  19. {autogluon.timeseries-1.1.2b20240924.dist-info → autogluon.timeseries-1.1.2b20240926.dist-info}/NOTICE +0 -0
  20. {autogluon.timeseries-1.1.2b20240924.dist-info → autogluon.timeseries-1.1.2b20240926.dist-info}/WHEEL +0 -0
  21. {autogluon.timeseries-1.1.2b20240924.dist-info → autogluon.timeseries-1.1.2b20240926.dist-info}/namespace_packages.txt +0 -0
  22. {autogluon.timeseries-1.1.2b20240924.dist-info → autogluon.timeseries-1.1.2b20240926.dist-info}/top_level.txt +0 -0
  23. {autogluon.timeseries-1.1.2b20240924.dist-info → autogluon.timeseries-1.1.2b20240926.dist-info}/zip-safe +0 -0
@@ -280,10 +280,18 @@ class TimeSeriesLearner(AbstractLearner):
280
280
 
281
281
  return importance_df
282
282
 
283
- def leaderboard(self, data: Optional[TimeSeriesDataFrame] = None, use_cache: bool = True) -> pd.DataFrame:
283
+ def leaderboard(
284
+ self,
285
+ data: Optional[TimeSeriesDataFrame] = None,
286
+ extra_info: bool = False,
287
+ extra_metrics: Optional[List[Union[str, TimeSeriesScorer]]] = None,
288
+ use_cache: bool = True,
289
+ ) -> pd.DataFrame:
284
290
  if data is not None:
285
291
  data = self.feature_generator.transform(data)
286
- return self.load_trainer().leaderboard(data, use_cache=use_cache)
292
+ return self.load_trainer().leaderboard(
293
+ data, extra_info=extra_info, extra_metrics=extra_metrics, use_cache=use_cache
294
+ )
287
295
 
288
296
  def get_info(self, include_model_info: bool = False, **kwargs) -> Dict[str, Any]:
289
297
  learner_info = super().get_info(include_model_info=include_model_info)
@@ -13,6 +13,7 @@ from autogluon.core.hpo.executors import HpoExecutor, RayHpoExecutor
13
13
  from autogluon.core.models import AbstractModel
14
14
  from autogluon.timeseries.dataset import TimeSeriesDataFrame
15
15
  from autogluon.timeseries.metrics import TimeSeriesScorer, check_get_evaluation_metric
16
+ from autogluon.timeseries.transforms import LocalTargetScaler, get_target_scaler_from_name
16
17
  from autogluon.timeseries.utils.features import CovariateMetadata
17
18
  from autogluon.timeseries.utils.warning_filters import disable_stdout, warning_filter
18
19
 
@@ -122,6 +123,7 @@ class AbstractTimeSeriesModel(AbstractModel):
122
123
  self.must_drop_median = False
123
124
 
124
125
  self._oof_predictions: Optional[List[TimeSeriesDataFrame]] = None
126
+ self.target_scaler: Optional[LocalTargetScaler] = None
125
127
 
126
128
  def __repr__(self) -> str:
127
129
  return self.name
@@ -241,11 +243,32 @@ class AbstractTimeSeriesModel(AbstractModel):
241
243
  model: AbstractTimeSeriesModel
242
244
  The fitted model object
243
245
  """
246
+ self.initialize(**kwargs)
247
+ self.target_scaler = self._create_target_scaler()
248
+ if self.target_scaler is not None:
249
+ train_data = self.target_scaler.fit_transform(train_data)
250
+
244
251
  train_data = self.preprocess(train_data, is_train=True)
245
252
  if self._get_tags()["can_use_val_data"] and val_data is not None:
253
+ if self.target_scaler is not None:
254
+ val_data = self.target_scaler.transform(val_data)
246
255
  val_data = self.preprocess(val_data, is_train=False)
247
256
  return super().fit(train_data=train_data, val_data=val_data, **kwargs)
248
257
 
258
+ @property
259
+ def allowed_hyperparameters(self) -> List[str]:
260
+ """List of hyperparameters allowed by the model."""
261
+ return ["target_scaler"]
262
+
263
+ def _create_target_scaler(self) -> Optional[LocalTargetScaler]:
264
+ """Create a LocalTargetScaler object based on the value of the `target_scaler` hyperparameter."""
265
+ # TODO: Add support for custom target transforms (e.g., Box-Cox, log1p, ...)
266
+ target_scaler_type = self._get_model_params().get("target_scaler")
267
+ if target_scaler_type is not None:
268
+ return get_target_scaler_from_name(target_scaler_type, target=self.target)
269
+ else:
270
+ return None
271
+
249
272
  def _fit(
250
273
  self,
251
274
  train_data: TimeSeriesDataFrame,
@@ -299,6 +322,9 @@ class AbstractTimeSeriesModel(AbstractModel):
299
322
  data is given as a separate forecast item in the dictionary, keyed by the `item_id`s
300
323
  of input items.
301
324
  """
325
+ if self.target_scaler is not None:
326
+ data = self.target_scaler.fit_transform(data)
327
+
302
328
  data = self.preprocess(data, is_train=False)
303
329
  known_covariates = self.preprocess_known_covariates(known_covariates)
304
330
  predictions = self._predict(data=data, known_covariates=known_covariates, **kwargs)
@@ -309,6 +335,9 @@ class AbstractTimeSeriesModel(AbstractModel):
309
335
  predictions["mean"] = predictions["0.5"]
310
336
  if self.must_drop_median:
311
337
  predictions = predictions.drop("0.5", axis=1)
338
+
339
+ if self.target_scaler is not None:
340
+ predictions = self.target_scaler.inverse_transform(predictions)
312
341
  return predictions
313
342
 
314
343
  def _predict(
@@ -21,11 +21,9 @@ from autogluon.timeseries.utils.datetime import (
21
21
  from autogluon.timeseries.utils.forecast import get_forecast_horizon_index_ts_dataframe
22
22
  from autogluon.timeseries.utils.warning_filters import warning_filter
23
23
 
24
- logger = logging.getLogger(__name__)
24
+ from .utils import MLF_ITEMID, MLF_TARGET, MLF_TIMESTAMP
25
25
 
26
- MLF_TARGET = "y"
27
- MLF_ITEMID = "unique_id"
28
- MLF_TIMESTAMP = "ds"
26
+ logger = logging.getLogger(__name__)
29
27
 
30
28
 
31
29
  class TabularEstimator(BaseEstimator):
@@ -137,7 +135,7 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
137
135
  def _get_mlforecast_init_args(self, train_data: TimeSeriesDataFrame, model_params: dict) -> dict:
138
136
  from mlforecast.target_transforms import Differences
139
137
 
140
- from .utils import MeanAbsScaler, StandardScaler
138
+ from .transforms import MLForecastScaler
141
139
 
142
140
  lags = model_params.get("lags")
143
141
  if lags is None:
@@ -167,19 +165,10 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
167
165
  target_transforms.append(Differences(differences))
168
166
  self._sum_of_differences = sum(differences)
169
167
 
170
- scaler_name = model_params.get("scaler")
171
- if scaler_name is None:
172
- pass
173
- elif scaler_name == "standard":
174
- self._scaler = StandardScaler()
175
- elif scaler_name == "mean_abs":
176
- self._scaler = MeanAbsScaler()
177
- else:
178
- logger.warning(
179
- f"Unrecognized `scaler` {scaler_name} (supported options: ['standard', 'mean_abs', None]). Scaling disabled."
180
- )
181
-
182
- if self._scaler is not None:
168
+ # Support "scaler" for backward compatibility
169
+ scaler_type = model_params.get("target_scaler", model_params.get("scaler"))
170
+ if scaler_type is not None:
171
+ self._scaler = MLForecastScaler(scaler_type=scaler_type)
183
172
  target_transforms.append(self._scaler)
184
173
 
185
174
  return {
@@ -323,7 +312,7 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
323
312
  },
324
313
  predictor_fit_kwargs={
325
314
  "tuning_data": val_df.drop(columns=[MLF_ITEMID]),
326
- "time_limit": None if time_limit is None else time_limit - (time.time() - fit_start_time),
315
+ "time_limit": (None if time_limit is None else time_limit - (time.time() - fit_start_time)),
327
316
  "hyperparameters": model_params["tabular_hyperparameters"],
328
317
  **model_params["tabular_fit_kwargs"],
329
318
  },
@@ -340,14 +329,18 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
340
329
 
341
330
  Saves per-item residuals to `self.residuals_std_per_item` and average std to `self._avg_residuals_std`.
342
331
  """
343
- residuals = val_df[MLF_TARGET] - self._mlf.models_["mean"].predict(val_df)
344
- self._residuals_std_per_item = residuals.pow(2.0).groupby(val_df[MLF_ITEMID], sort=False).mean().pow(0.5)
332
+ residuals_df = val_df[[MLF_ITEMID, MLF_TARGET]]
333
+ residuals_df = residuals_df.assign(y_pred=self._mlf.models_["mean"].predict(val_df))
334
+ if self._scaler is not None:
335
+ # Scaler expects to find column MLF_TIMESTAMP even though it's not used - fill with dummy
336
+ residuals_df = residuals_df.assign(**{MLF_TIMESTAMP: 1})
337
+ residuals_df = self._scaler.inverse_transform(residuals_df)
338
+ residuals = residuals_df[MLF_TARGET] - residuals_df["y_pred"]
339
+ self._residuals_std_per_item = (
340
+ residuals.pow(2.0).groupby(val_df[MLF_ITEMID].values, sort=False).mean().pow(0.5)
341
+ )
345
342
  self._avg_residuals_std = np.sqrt(residuals.pow(2.0).mean())
346
343
 
347
- def _get_scale_per_item(self, item_ids: pd.Index) -> pd.Series:
348
- """Extract the '_scale' values from the scaler object, if available."""
349
- raise NotImplementedError
350
-
351
344
  def _remove_short_ts_and_generate_fallback_forecast(
352
345
  self,
353
346
  data: TimeSeriesDataFrame,
@@ -399,7 +392,6 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
399
392
  """
400
393
  from scipy.stats import norm
401
394
 
402
- scale_per_item = self._get_scale_per_item(repeated_item_ids.unique())
403
395
  num_items = int(len(predictions) / self.prediction_length)
404
396
  sqrt_h = np.sqrt(np.arange(1, self.prediction_length + 1))
405
397
  # Series where normal_scale_per_timestep.loc[item_id].loc[N] = sqrt(1 + N) for N in range(prediction_length)
@@ -409,7 +401,7 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
409
401
  # Use avg_residuals_std in case unseen item received for prediction
410
402
  if residuals_std_per_timestep.isna().any():
411
403
  residuals_std_per_timestep = residuals_std_per_timestep.fillna(value=self._avg_residuals_std)
412
- std_per_timestep = residuals_std_per_timestep * scale_per_item * normal_scale_per_timestep
404
+ std_per_timestep = residuals_std_per_timestep * normal_scale_per_timestep
413
405
  for q in self.quantile_levels:
414
406
  predictions[str(q)] = predictions["mean"] + norm.ppf(q) * std_per_timestep.to_numpy()
415
407
  return predictions
@@ -417,6 +409,10 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
417
409
  def _more_tags(self) -> dict:
418
410
  return {"allow_nan": True, "can_refit_full": True}
419
411
 
412
+ def _create_target_scaler(self):
413
+ # Do not create a scaler in the model, scaler will be passed to MLForecast
414
+ return None
415
+
420
416
 
421
417
  class DirectTabularModel(AbstractMLForecastModel):
422
418
  """Predict all future time series values simultaneously using TabularPredictor from AutoGluon-Tabular.
@@ -449,8 +445,8 @@ class DirectTabularModel(AbstractMLForecastModel):
449
445
  Differences to take of the target before computing the features. These are restored at the forecasting step.
450
446
  If None, will be set to ``[seasonal_period]``, where seasonal_period is determined based on the data frequency.
451
447
  Defaults to no differencing.
452
- scaler : {"standard", "mean_abs", None}, default = "mean_abs"
453
- Scaling applied to each time series.
448
+ target_scaler : {"standard", "mean_abs", "min_max", "robust", None}, default = "mean_abs"
449
+ Scaling applied to each time series. Scaling is applied after differencing.
454
450
  tabular_hyperparameters : Dict[Dict[str, Any]], optional
455
451
  Hyperparameters dictionary passed to ``TabularPredictor.fit``. Contains the names of models that should be fit.
456
452
  Defaults to ``{"GBM": {}}``.
@@ -472,8 +468,9 @@ class DirectTabularModel(AbstractMLForecastModel):
472
468
 
473
469
  def _get_model_params(self) -> dict:
474
470
  model_params = super()._get_model_params()
475
- model_params.setdefault("scaler", "mean_abs")
476
- model_params.setdefault("differences", [])
471
+ model_params.setdefault("target_scaler", "mean_abs")
472
+ if "differences" not in model_params or model_params["differences"] is None:
473
+ model_params["differences"] = []
477
474
  return model_params
478
475
 
479
476
  def _mask_df(self, df: pd.DataFrame) -> pd.DataFrame:
@@ -501,6 +498,8 @@ class DirectTabularModel(AbstractMLForecastModel):
501
498
  known_covariates: Optional[TimeSeriesDataFrame] = None,
502
499
  **kwargs,
503
500
  ) -> TimeSeriesDataFrame:
501
+ from .transforms import apply_inverse_transform
502
+
504
503
  original_item_id_order = data.item_ids
505
504
  data, known_covariates, forecast_for_short_series = self._remove_short_ts_and_generate_fallback_forecast(
506
505
  data=data, known_covariates=known_covariates
@@ -536,9 +535,12 @@ class DirectTabularModel(AbstractMLForecastModel):
536
535
  mlforecast_df_past = self._to_mlforecast_df(data, None)
537
536
  if self._max_ts_length is not None:
538
537
  mlforecast_df_past = self._shorten_all_series(mlforecast_df_past, self._max_ts_length)
539
- self._mlf.preprocess(mlforecast_df_past, static_features=[])
538
+ self._mlf.preprocess(mlforecast_df_past, static_features=[], dropna=False)
540
539
  for tfm in self._mlf.ts.target_transforms[::-1]:
541
- predictions = tfm.inverse_transform(predictions)
540
+ predictions = apply_inverse_transform(predictions, transform=tfm)
541
+
542
+ if not self.is_quantile_model:
543
+ predictions = self._add_gaussian_quantiles(predictions, repeated_item_ids=predictions[MLF_ITEMID])
542
544
  predictions = TimeSeriesDataFrame(predictions.rename(columns={MLF_ITEMID: ITEMID, MLF_TIMESTAMP: TIMESTAMP}))
543
545
 
544
546
  if forecast_for_short_series is not None:
@@ -553,15 +555,10 @@ class DirectTabularModel(AbstractMLForecastModel):
553
555
  predictions["mean"] = predictions["0.5"]
554
556
  else:
555
557
  predictions = pd.DataFrame(predictions, columns=["mean"])
556
- predictions = self._add_gaussian_quantiles(predictions, repeated_item_ids=repeated_item_ids)
557
558
 
558
559
  column_order = ["mean"] + [col for col in predictions.columns if col != "mean"]
559
560
  return predictions[column_order]
560
561
 
561
- def _get_scale_per_item(self, item_ids: pd.Index) -> pd.Series:
562
- # Rescaling is applied in the inverse_transform step, no need to scale predictions
563
- return pd.Series(1.0, index=item_ids)
564
-
565
562
  def _get_extra_tabular_init_kwargs(self) -> dict:
566
563
  if self.is_quantile_model:
567
564
  return {
@@ -603,8 +600,8 @@ class RecursiveTabularModel(AbstractMLForecastModel):
603
600
  differences : List[int], default = None
604
601
  Differences to take of the target before computing the features. These are restored at the forecasting step.
605
602
  If None, will be set to ``[seasonal_period]``, where seasonal_period is determined based on the data frequency.
606
- scaler : {"standard", "mean_abs", None}, default = "standard"
607
- Scaling applied to each time series.
603
+ target_scaler : {"standard", "mean_abs", "min_max", "robust", None}, default = "standard"
604
+ Scaling applied to each time series. Scaling is applied after differencing.
608
605
  tabular_hyperparameters : Dict[Dict[str, Any]], optional
609
606
  Hyperparameters dictionary passed to ``TabularPredictor.fit``. Contains the names of models that should be fit.
610
607
  Defaults to ``{"GBM": {}}``.
@@ -622,8 +619,9 @@ class RecursiveTabularModel(AbstractMLForecastModel):
622
619
 
623
620
  def _get_model_params(self) -> dict:
624
621
  model_params = super()._get_model_params()
625
- model_params.setdefault("scaler", "standard")
626
- model_params.setdefault("differences", [get_seasonality(self.freq)])
622
+ model_params.setdefault("target_scaler", "standard")
623
+ if "differences" not in model_params or model_params["differences"] is None:
624
+ model_params["differences"] = [get_seasonality(self.freq)]
627
625
  return model_params
628
626
 
629
627
  def _predict(
@@ -670,9 +668,3 @@ class RecursiveTabularModel(AbstractMLForecastModel):
670
668
  "problem_type": ag.constants.REGRESSION,
671
669
  "eval_metric": self.eval_metric.equivalent_tabular_regression_metric or "mean_absolute_error",
672
670
  }
673
-
674
- def _get_scale_per_item(self, item_ids: pd.Index) -> pd.Series:
675
- if self._scaler is not None:
676
- return self._scaler.stats_["_scale"].copy().reindex(item_ids)
677
- else:
678
- return pd.Series(1.0, index=item_ids)
@@ -0,0 +1,60 @@
1
+ from typing import Literal, Optional, Union
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+ from mlforecast.target_transforms import (
6
+ BaseTargetTransform,
7
+ GroupedArray,
8
+ _BaseGroupedArrayTargetTransform,
9
+ )
10
+
11
+ from autogluon.timeseries.dataset.ts_dataframe import (
12
+ ITEMID,
13
+ TIMESTAMP,
14
+ TimeSeriesDataFrame,
15
+ )
16
+ from autogluon.timeseries.transforms.scaler import LocalTargetScaler, get_target_scaler_from_name
17
+
18
+ from .utils import MLF_ITEMID, MLF_TIMESTAMP
19
+
20
+
21
+ class MLForecastScaler(BaseTargetTransform):
22
+ def __init__(self, scaler_type: Literal["standard", "robust", "min_max", "mean_abs"]):
23
+ # For backward compatibility
24
+ self.scaler_type = scaler_type
25
+ self.ag_scaler: Optional[LocalTargetScaler] = None
26
+
27
+ def _df_to_tsdf(self, df: pd.DataFrame) -> TimeSeriesDataFrame:
28
+ return df.rename(columns={self.id_col: ITEMID, self.time_col: TIMESTAMP}).set_index([ITEMID, TIMESTAMP])
29
+
30
+ def _tsdf_to_df(self, ts_df: TimeSeriesDataFrame) -> pd.DataFrame:
31
+ return pd.DataFrame(ts_df).reset_index().rename(columns={ITEMID: self.id_col, TIMESTAMP: self.time_col})
32
+
33
+ def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame:
34
+ self.ag_scaler = get_target_scaler_from_name(name=self.scaler_type, target=self.target_col)
35
+ transformed = self.ag_scaler.fit_transform(self._df_to_tsdf(df)).reset_index()
36
+ return self._tsdf_to_df(transformed)
37
+
38
+ def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame:
39
+ transformed = self.ag_scaler.inverse_transform(self._df_to_tsdf(df))
40
+ return self._tsdf_to_df(transformed)
41
+
42
+
43
+ def apply_inverse_transform(
44
+ df: pd.DataFrame,
45
+ transform: Union[_BaseGroupedArrayTargetTransform, BaseTargetTransform],
46
+ ) -> pd.DataFrame:
47
+ """Apply inverse transformation to a dataframe, converting to GroupedArray if necessary"""
48
+ if isinstance(transform, BaseTargetTransform):
49
+ return transform.inverse_transform(df=df)
50
+ elif isinstance(transform, _BaseGroupedArrayTargetTransform):
51
+ indptr = np.concatenate([[0], df[MLF_ITEMID].value_counts().cumsum()])
52
+ assignment = {}
53
+ for col in df.columns.drop([MLF_ITEMID, MLF_TIMESTAMP]):
54
+ ga = GroupedArray(data=df[col].to_numpy(), indptr=indptr)
55
+ assignment[col] = transform.inverse_transform(ga).data
56
+ return df.assign(**assignment)
57
+ else:
58
+ raise ValueError(
59
+ f"transform must be of type `_BaseGroupedArrayTargetTransform` or `BaseTargetTransform` (got {type(transform)})"
60
+ )
@@ -1,51 +1,3 @@
1
- import numpy as np
2
- import pandas as pd
3
- from mlforecast.target_transforms import BaseTargetTransform
4
-
5
-
6
- class StandardScaler(BaseTargetTransform):
7
- """Standardizes the series by subtracting mean and diving by standard deviation."""
8
-
9
- min_scale: float = 1e-2
10
-
11
- def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame:
12
- self.stats_ = (
13
- df.replace([np.inf, -np.inf], np.nan)
14
- .groupby(self.id_col)[self.target_col]
15
- .agg(["mean", "std"])
16
- .rename(columns={"mean": "_mean", "std": "_scale"})
17
- )
18
- self.stats_["_scale"] = self.stats_["_scale"].clip(lower=self.min_scale)
19
- df = df.merge(self.stats_, on=self.id_col)
20
- df[self.target_col] = (df[self.target_col] - df["_mean"]) / df["_scale"]
21
- df = df.drop(columns=["_mean", "_scale"])
22
- return df
23
-
24
- def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame:
25
- df = df.merge(self.stats_, on=self.id_col)
26
- for col in df.columns.drop([self.id_col, self.time_col, "_mean", "_scale"]):
27
- df[col] = df[col] * df["_scale"] + df["_mean"]
28
- df = df.drop(columns=["_mean", "_scale"])
29
- return df
30
-
31
-
32
- class MeanAbsScaler(BaseTargetTransform):
33
- """Scales time series by diving by their mean absolute value."""
34
-
35
- min_scale: float = 1e-2
36
-
37
- def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame:
38
- target = df[self.target_col].replace([np.inf, -np.inf], np.nan).abs()
39
- self.stats_ = target.groupby(df[self.id_col], sort=False).agg(["mean"]).rename(columns={"mean": "_scale"})
40
- self.stats_["_scale"] = self.stats_["_scale"].clip(lower=self.min_scale)
41
- df = df.merge(self.stats_, on=self.id_col)
42
- df[self.target_col] = df[self.target_col] / df["_scale"]
43
- df = df.drop(columns=["_scale"])
44
- return df
45
-
46
- def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame:
47
- df = df.merge(self.stats_, on=self.id_col)
48
- for col in df.columns.drop([self.id_col, self.time_col, "_scale"]):
49
- df[col] = df[col] * df["_scale"]
50
- df = df.drop(columns=["_scale"])
51
- return df
1
+ MLF_TARGET = "y"
2
+ MLF_ITEMID = "unique_id"
3
+ MLF_TIMESTAMP = "ds"
@@ -66,7 +66,8 @@ class DeepARModel(AbstractGluonTSModel):
66
66
  distr_output : gluonts.torch.distributions.DistributionOutput, default = StudentTOutput()
67
67
  Distribution to use to evaluate observations and sample predictions
68
68
  scaling: bool, default = True
69
- Whether to automatically scale the target values
69
+ If True, mean absolute scaling will be applied to each *context window* during training & prediction.
70
+ Note that this is different from the `target_scaler` that is applied to the *entire time series*.
70
71
  max_epochs : int, default = 100
71
72
  Number of epochs the model will be trained for
72
73
  batch_size : int, default = 64
@@ -85,6 +86,8 @@ class DeepARModel(AbstractGluonTSModel):
85
86
  If True, ``lightning_logs`` directory will NOT be removed after the model finished training.
86
87
  """
87
88
 
89
+ # TODO: Replace "scaling: bool" with "window_scaler": {"mean_abs", None} for consistency?
90
+
88
91
  supports_known_covariates = True
89
92
  supports_static_features = True
90
93
 
@@ -122,7 +125,8 @@ class SimpleFeedForwardModel(AbstractGluonTSModel):
122
125
  batch_normalization : bool, default = False
123
126
  Whether to use batch normalization
124
127
  mean_scaling : bool, default = True
125
- Scale the network input by the data mean and the network output by its inverse
128
+ If True, mean absolute scaling will be applied to each *context window* during training & prediction.
129
+ Note that this is different from the `target_scaler` that is applied to the *entire time series*.
126
130
  max_epochs : int, default = 100
127
131
  Number of epochs the model will be trained for
128
132
  batch_size : int, default = 64
@@ -254,7 +258,10 @@ class DLinearModel(AbstractGluonTSModel):
254
258
  distr_output : gluonts.torch.distributions.DistributionOutput, default = StudentTOutput()
255
259
  Distribution to fit.
256
260
  scaling : {"mean", "std", None}, default = "mean"
257
- Scaling applied to the inputs. One of ``"mean"`` (mean absolute scaling), ``"std"`` (standardization), ``None`` (no scaling).
261
+ Scaling applied to each *context window* during training & prediction.
262
+ One of ``"mean"`` (mean absolute scaling), ``"std"`` (standardization), ``None`` (no scaling).
263
+
264
+ Note that this is different from the `target_scaler` that is applied to the *entire time series*.
258
265
  max_epochs : int, default = 100
259
266
  Number of epochs the model will be trained for
260
267
  batch_size : int, default = 64
@@ -314,7 +321,10 @@ class PatchTSTModel(AbstractGluonTSModel):
314
321
  distr_output : gluonts.torch.distributions.DistributionOutput, default = StudentTOutput()
315
322
  Distribution to fit.
316
323
  scaling : {"mean", "std", None}, default = "mean"
317
- Scaling applied to the inputs. One of ``"mean"`` (mean absolute scaling), ``"std"`` (standardization), ``None`` (no scaling).
324
+ Scaling applied to each *context window* during training & prediction.
325
+ One of ``"mean"`` (mean absolute scaling), ``"std"`` (standardization), ``None`` (no scaling).
326
+
327
+ Note that this is different from the `target_scaler` that is applied to the *entire time series*.
318
328
  max_epochs : int, default = 100
319
329
  Number of epochs the model will be trained for
320
330
  batch_size : int, default = 64
@@ -477,7 +487,10 @@ class TiDEModel(AbstractGluonTSModel):
477
487
  layer_norm : bool, default = False
478
488
  Should layer normalization be enabled?
479
489
  scaling : {"mean", "std", None}, default = "mean"
480
- Scaling applied to the inputs. One of ``"mean"`` (mean absolute scaling), ``"std"`` (standardization), ``None`` (no scaling).
490
+ Scaling applied to each *context window* during training & prediction.
491
+ One of ``"mean"`` (mean absolute scaling), ``"std"`` (standardization), ``None`` (no scaling).
492
+
493
+ Note that this is different from the `target_scaler` that is applied to the *entire time series*.
481
494
  max_epochs : int, default = 100
482
495
  Number of epochs the model will be trained for
483
496
  batch_size : int, default = 64
@@ -87,6 +87,14 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
87
87
  self.time_limit: Optional[float] = None
88
88
  self._dummy_forecast: Optional[pd.DataFrame] = None
89
89
 
90
+ @property
91
+ def allowed_hyperparameters(self) -> List[str]:
92
+ return (
93
+ super().allowed_hyperparameters
94
+ + ["use_fallback_model", "max_ts_length", "n_jobs"]
95
+ + self.allowed_local_model_args
96
+ )
97
+
90
98
  def preprocess(self, data: TimeSeriesDataFrame, is_train: bool = False, **kwargs) -> Any:
91
99
  if not self._get_tags()["allow_nan"]:
92
100
  data = data.fill_missing_values()
@@ -103,8 +111,9 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
103
111
 
104
112
  unused_local_model_args = []
105
113
  local_model_args = {}
114
+ # TODO: Move filtering logic to AbstractTimeSeriesModel
106
115
  for key, value in raw_local_model_args.items():
107
- if key in self.allowed_local_model_args:
116
+ if key in self.allowed_hyperparameters:
108
117
  local_model_args[key] = value
109
118
  else:
110
119
  unused_local_model_args.append(key)
@@ -13,6 +13,7 @@ from autogluon.timeseries.dataset.ts_dataframe import TimeSeriesDataFrame
13
13
  from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
14
14
  from autogluon.timeseries.models.local.abstract_local_model import AbstractLocalModel
15
15
  from autogluon.timeseries.splitter import AbstractWindowSplitter, ExpandingWindowSplitter
16
+ from autogluon.timeseries.transforms import LocalTargetScaler
16
17
 
17
18
  logger = logging.getLogger(__name__)
18
19
 
@@ -32,6 +33,8 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
32
33
  kwargs used to initialize model_base if model_base is a class.
33
34
  """
34
35
 
36
+ # TODO: Remove the MultiWindowBacktestingModel class, move the logic to AbstractTimeSeriesTrainer
37
+
35
38
  def __init__(
36
39
  self,
37
40
  model_base: Union[AbstractTimeSeriesModel, Type[AbstractTimeSeriesModel]],
@@ -82,6 +85,10 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
82
85
  def get_minimum_resources(self, is_gpu_available: bool = False) -> bool:
83
86
  return self._get_model_base().get_minimum_resources(is_gpu_available)
84
87
 
88
+ def _create_target_scaler(self) -> Optional[LocalTargetScaler]:
89
+ # Do not use scaler in the MultiWindowModel to avoid duplication; it will be created in the inner model
90
+ return None
91
+
85
92
  def _fit(
86
93
  self,
87
94
  train_data: TimeSeriesDataFrame,
@@ -209,7 +209,7 @@ def get_preset_models(
209
209
  for model in model_priority_list:
210
210
  if isinstance(model, str):
211
211
  if model not in MODEL_TYPES:
212
- raise ValueError(f"Model {model} is not supported yet.")
212
+ raise ValueError(f"Model {model} is not supported. Available models: {sorted(MODEL_TYPES)}")
213
213
  if model in excluded_models:
214
214
  logger.info(
215
215
  f"\tFound '{model}' model in `hyperparameters`, but '{model}' "
@@ -110,8 +110,6 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
110
110
  known as dynamic features, exogenous variables, additional regressors or related time series. Examples of such
111
111
  covariates include holidays, promotions or weather forecasts.
112
112
 
113
- Currently, only numeric (float of integer dtype) are supported.
114
-
115
113
  If ``known_covariates_names`` are provided, then:
116
114
 
117
115
  - :meth:`~autogluon.timeseries.TimeSeriesPredictor.fit`, :meth:`~autogluon.timeseries.TimeSeriesPredictor.evaluate`, and :meth:`~autogluon.timeseries.TimeSeriesPredictor.leaderboard` will expect a data frame with columns listed in ``known_covariates_names`` (in addition to the ``target`` column).
@@ -437,7 +435,6 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
437
435
 
438
436
  If ``known_covariates_names`` were specified when creating the predictor, ``train_data`` must include the
439
437
  columns listed in ``known_covariates_names`` with the covariates values aligned with the target time series.
440
- The known covariates must have a numeric (float or integer) dtype.
441
438
 
442
439
  Columns of ``train_data`` except ``target`` and those listed in ``known_covariates_names`` will be
443
440
  interpreted as ``past_covariates`` - covariates that are known only in the past.
@@ -1199,6 +1196,8 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
1199
1196
  def leaderboard(
1200
1197
  self,
1201
1198
  data: Optional[Union[TimeSeriesDataFrame, pd.DataFrame, Path, str]] = None,
1199
+ extra_info: bool = False,
1200
+ extra_metrics: Optional[List[Union[str, TimeSeriesScorer]]] = None,
1202
1201
  display: bool = False,
1203
1202
  use_cache: bool = True,
1204
1203
  **kwargs,
@@ -1236,6 +1235,20 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
1236
1235
  If provided data is a path or a pandas.DataFrame, AutoGluon will attempt to automatically convert it to a
1237
1236
  ``TimeSeriesDataFrame``.
1238
1237
 
1238
+ extra_info : bool, default = False
1239
+ If True, the leaderboard will contain an additional column `hyperparameters` with the hyperparameters used
1240
+ by each model during training. An empty dictionary `{}` means that the model was trained with default
1241
+ hyperparameters.
1242
+ extra_metrics : List[Union[str, TimeSeriesScorer]], optional
1243
+ A list of metrics to calculate scores for and include in the output DataFrame.
1244
+
1245
+ Only valid when `data` is specified. The scores refer to the scores on `data` (same data as used to
1246
+ calculate the `score_test` column).
1247
+
1248
+ This list can contain any values which would also be valid for `eval_metric` when creating a :class:`~autogluon.timeseries.TimeSeriesPredictor`.
1249
+
1250
+ For each provided `metric`, a column with name `str(metric)` will be added to the leaderboard, containing
1251
+ the value of the metric computed on `data`.
1239
1252
  display : bool, default = False
1240
1253
  If True, the leaderboard DataFrame will be printed.
1241
1254
  use_cache : bool, default = True
@@ -1255,11 +1268,16 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
1255
1268
  if len(kwargs) > 0:
1256
1269
  for key in kwargs:
1257
1270
  raise TypeError(f"TimeSeriesPredictor.leaderboard() got an unexpected keyword argument '{key}'")
1271
+ if data is None and extra_metrics is not None:
1272
+ raise ValueError("`extra_metrics` is only valid when `data` is specified.")
1258
1273
 
1259
1274
  if data is not None:
1260
1275
  data = self._check_and_prepare_data_frame(data)
1261
1276
  self._check_data_for_evaluation(data)
1262
- leaderboard = self._learner.leaderboard(data, use_cache=use_cache)
1277
+
1278
+ leaderboard = self._learner.leaderboard(
1279
+ data, extra_info=extra_info, extra_metrics=extra_metrics, use_cache=use_cache
1280
+ )
1263
1281
  if display:
1264
1282
  with pd.option_context("display.max_rows", None, "display.max_columns", None, "display.width", 1000):
1265
1283
  print(leaderboard)
@@ -21,6 +21,7 @@ from autogluon.timeseries import TimeSeriesDataFrame
21
21
  from autogluon.timeseries.metrics import TimeSeriesScorer, check_get_evaluation_metric
22
22
  from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
23
23
  from autogluon.timeseries.models.ensemble import AbstractTimeSeriesEnsembleModel, TimeSeriesGreedyEnsemble
24
+ from autogluon.timeseries.models.multi_window import MultiWindowBacktestingModel
24
25
  from autogluon.timeseries.models.presets import contains_searchspace
25
26
  from autogluon.timeseries.splitter import AbstractWindowSplitter, ExpandingWindowSplitter
26
27
  from autogluon.timeseries.utils.features import (
@@ -755,7 +756,13 @@ class AbstractTimeSeriesTrainer(SimpleAbstractTrainer):
755
756
  self.save_model(model=ensemble)
756
757
  return ensemble.name
757
758
 
758
- def leaderboard(self, data: Optional[TimeSeriesDataFrame] = None, use_cache: bool = True) -> pd.DataFrame:
759
+ def leaderboard(
760
+ self,
761
+ data: Optional[TimeSeriesDataFrame] = None,
762
+ extra_info: bool = False,
763
+ extra_metrics: Optional[List[Union[str, TimeSeriesScorer]]] = None,
764
+ use_cache: bool = True,
765
+ ) -> pd.DataFrame:
759
766
  logger.debug("Generating leaderboard for all models trained")
760
767
 
761
768
  model_names = self.get_model_names()
@@ -771,6 +778,14 @@ class AbstractTimeSeriesTrainer(SimpleAbstractTrainer):
771
778
  "fit_time_marginal": self.get_model_attribute(model_name, "fit_time"),
772
779
  "pred_time_val": self.get_model_attribute(model_name, "predict_time"),
773
780
  }
781
+ if extra_info:
782
+ model = self.load_model(model_name=model_name)
783
+ if isinstance(model, MultiWindowBacktestingModel):
784
+ model = model.most_recent_model
785
+ model_info[model_name]["hyperparameters"] = model.params
786
+
787
+ if extra_metrics is None:
788
+ extra_metrics = []
774
789
 
775
790
  if data is not None:
776
791
  past_data, known_covariates = data.get_model_inputs_for_scoring(
@@ -799,6 +814,14 @@ class AbstractTimeSeriesTrainer(SimpleAbstractTrainer):
799
814
  model_info[model_name]["score_test"] = self._score_with_predictions(data, model_preds)
800
815
  model_info[model_name]["pred_time_test"] = pred_time_dict[model_name]
801
816
 
817
+ for metric in extra_metrics:
818
+ if model_preds is None:
819
+ model_info[model_name][str(metric)] = float("nan")
820
+ else:
821
+ model_info[model_name][str(metric)] = self._score_with_predictions(
822
+ data, model_preds, metric=metric
823
+ )
824
+
802
825
  explicit_column_order = [
803
826
  "model",
804
827
  "score_test",
@@ -808,15 +831,18 @@ class AbstractTimeSeriesTrainer(SimpleAbstractTrainer):
808
831
  "fit_time_marginal",
809
832
  "fit_order",
810
833
  ]
834
+ if extra_info:
835
+ explicit_column_order += ["hyperparameters"]
811
836
 
812
- df = pd.DataFrame(model_info.values(), columns=explicit_column_order)
813
837
  if data is None:
814
838
  explicit_column_order.remove("score_test")
815
839
  explicit_column_order.remove("pred_time_test")
816
840
  sort_column = "score_val"
817
841
  else:
818
842
  sort_column = "score_test"
843
+ explicit_column_order += [str(metric) for metric in extra_metrics]
819
844
 
845
+ df = pd.DataFrame(model_info.values(), columns=explicit_column_order)
820
846
  df.sort_values(by=[sort_column, "model"], ascending=[False, False], inplace=True)
821
847
  df.reset_index(drop=True, inplace=True)
822
848
 
@@ -0,0 +1,8 @@
1
+ from .scaler import (
2
+ LocalStandardScaler,
3
+ LocalMinMaxScaler,
4
+ LocalMeanAbsScaler,
5
+ LocalRobustScaler,
6
+ LocalTargetScaler,
7
+ get_target_scaler_from_name
8
+ )
@@ -0,0 +1,132 @@
1
+ from typing import Literal, Optional, Tuple, Union
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+
6
+ from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
7
+
8
+
9
+ class LocalTargetScaler:
10
+ """Applies an affine transformation (x - loc) / scale independently to each time series in the dataset."""
11
+
12
+ def __init__(
13
+ self,
14
+ target: str = "target",
15
+ min_scale: float = 1e-2,
16
+ ):
17
+ self.target = target
18
+ self.min_scale = min_scale
19
+ self.loc: Optional[pd.Series] = None
20
+ self.scale: Optional[pd.Series] = None
21
+
22
+ def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[Optional[pd.Series], Optional[pd.Series]]:
23
+ raise NotImplementedError
24
+
25
+ def fit_transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
26
+ return self.fit(data=data).transform(data=data)
27
+
28
+ def fit(self, data: TimeSeriesDataFrame) -> "LocalTargetScaler":
29
+ target_series = data[self.target].replace([np.inf, -np.inf], np.nan)
30
+ self.loc, self.scale = self._compute_loc_scale(target_series)
31
+ if self.loc is not None:
32
+ self.loc = self.loc.replace([np.inf, -np.inf], np.nan).fillna(0.0)
33
+ if self.scale is not None:
34
+ self.scale = self.scale.clip(lower=self.min_scale).replace([np.inf, -np.inf], np.nan).fillna(1.0)
35
+ return self
36
+
37
+ def _reindex_loc_scale(self, item_index: pd.Index) -> Tuple[Union[np.ndarray, float], Union[np.ndarray, float]]:
38
+ """Reindex loc and scale parameters for the given item_ids and convert them to an array-like."""
39
+ if self.loc is not None:
40
+ loc = self.loc.reindex(item_index).to_numpy()
41
+ else:
42
+ loc = 0.0
43
+ if self.scale is not None:
44
+ scale = self.scale.reindex(item_index).to_numpy()
45
+ else:
46
+ scale = 1.0
47
+ return loc, scale
48
+
49
+ def transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
50
+ """Apply scaling to the target column in the dataframe."""
51
+ loc, scale = self._reindex_loc_scale(item_index=data.index.get_level_values(ITEMID))
52
+ return data.assign(**{self.target: (data[self.target] - loc) / scale})
53
+
54
+ def inverse_transform(self, predictions: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
55
+ """Apply inverse scaling to all columns in the predictions dataframe."""
56
+ loc, scale = self._reindex_loc_scale(item_index=predictions.index.get_level_values(ITEMID))
57
+ return predictions.assign(**{col: predictions[col] * scale + loc for col in predictions.columns})
58
+
59
+
60
+ class LocalStandardScaler(LocalTargetScaler):
61
+ """Applies standard scaling to each time series in the dataset.
62
+
63
+ The resulting affine transformation is (x - loc) / scale, where scale = std(x), loc = mean(x).
64
+ """
65
+
66
+ def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[pd.Series, pd.Series]:
67
+ stats = target_series.groupby(level=ITEMID, sort=False).agg(["mean", "std"])
68
+ return stats["mean"], stats["std"]
69
+
70
+
71
+ class LocalMeanAbsScaler(LocalTargetScaler):
72
+ """Applies mean absolute scaling to each time series in the dataset."""
73
+
74
+ def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[pd.Series, pd.Series]:
75
+ scale = target_series.abs().groupby(level=ITEMID, sort=False).agg("mean")
76
+ return None, scale
77
+
78
+
79
+ class LocalMinMaxScaler(LocalTargetScaler):
80
+ """Applies min/max scaling to each time series in the dataset.
81
+
82
+ The resulting affine transformation is (x - loc) / scale, where scale = max(x) - min(x), loc = min(x) / scale.
83
+ """
84
+
85
+ def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[pd.Series, pd.Series]:
86
+ stats = target_series.abs().groupby(level=ITEMID, sort=False).agg(["min", "max"])
87
+ scale = (stats["max"] - stats["min"]).clip(lower=self.min_scale)
88
+ loc = stats["min"] / scale
89
+ return loc, scale
90
+
91
+
92
+ class LocalRobustScaler(LocalTargetScaler):
93
+ """Applies a robust scaler based on the interquartile range. Less sensitive to outliers compared to other scaler.
94
+
95
+ The resulting affine transformation is (x - loc) / scale, where scale = quantile(x, 0.75) - quantile(x, 0.25), loc = median(x).
96
+ """
97
+
98
+ def __init__(
99
+ self,
100
+ target: str = "target",
101
+ min_scale: float = 1e-2,
102
+ **kwargs,
103
+ ):
104
+ super().__init__(target=target, min_scale=min_scale)
105
+ self.q_min = 0.25
106
+ self.q_max = 0.75
107
+ assert 0 < self.q_min < self.q_max < 1
108
+
109
+ def _compute_loc_scale(self, target_series: pd.Series) -> Tuple[pd.Series, pd.Series]:
110
+ grouped = target_series.groupby(level=ITEMID, sort=False)
111
+ loc = grouped.median()
112
+ lower = grouped.quantile(self.q_min)
113
+ upper = grouped.quantile(self.q_max)
114
+ scale = upper - lower
115
+ return loc, scale
116
+
117
+
118
+ AVAILABLE_SCALERS = {
119
+ "standard": LocalStandardScaler,
120
+ "mean_abs": LocalMeanAbsScaler,
121
+ "min_max": LocalMinMaxScaler,
122
+ "robust": LocalRobustScaler,
123
+ }
124
+
125
+
126
+ def get_target_scaler_from_name(
127
+ name: Literal["standard", "mean_abs", "min_max", "robust"], **scaler_kwargs
128
+ ) -> LocalTargetScaler:
129
+ """Get LocalTargetScaler object from a string."""
130
+ if name not in AVAILABLE_SCALERS:
131
+ raise KeyError(f"Scaler type {name} not supported. Available scalers: {list(AVAILABLE_SCALERS)}")
132
+ return AVAILABLE_SCALERS[name](**scaler_kwargs)
@@ -1,3 +1,3 @@
1
1
  """This is the autogluon version file."""
2
- __version__ = '1.1.2b20240924'
2
+ __version__ = '1.1.2b20240926'
3
3
  __lite__ = False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.timeseries
3
- Version: 1.1.2b20240924
3
+ Version: 1.1.2b20240926
4
4
  Summary: Fast and Accurate ML in 3 Lines of Code
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -46,15 +46,16 @@ Requires-Dist: accelerate<0.22.0,>=0.21.0
46
46
  Requires-Dist: gluonts==0.15.1
47
47
  Requires-Dist: networkx<4,>=3.0
48
48
  Requires-Dist: statsforecast<1.8,>=1.7.0
49
- Requires-Dist: mlforecast<0.10.1,>=0.10.0
50
- Requires-Dist: utilsforecast<=0.1.9
49
+ Requires-Dist: mlforecast==0.13.4
50
+ Requires-Dist: utilsforecast<0.2.5,>=0.2.3
51
+ Requires-Dist: coreforecast==0.0.12
51
52
  Requires-Dist: fugue>=0.9.0
52
53
  Requires-Dist: tqdm<5,>=4.38
53
54
  Requires-Dist: orjson~=3.9
54
55
  Requires-Dist: tensorboard<3,>=2.9
55
- Requires-Dist: autogluon.core[raytune]==1.1.2b20240924
56
- Requires-Dist: autogluon.common==1.1.2b20240924
57
- Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.1.2b20240924
56
+ Requires-Dist: autogluon.core[raytune]==1.1.2b20240926
57
+ Requires-Dist: autogluon.common==1.1.2b20240926
58
+ Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.1.2b20240926
58
59
  Provides-Extra: all
59
60
  Requires-Dist: optimum[onnxruntime]<1.19,>=1.17; extra == "all"
60
61
  Provides-Extra: chronos-onnx
@@ -1,10 +1,10 @@
1
- autogluon.timeseries-1.1.2b20240924-py3.8-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
1
+ autogluon.timeseries-1.1.2b20240926-py3.8-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
2
2
  autogluon/timeseries/__init__.py,sha256=_CrLLc1fkjen7UzWoO0Os8WZoHOgvZbHKy46I8v_4k4,304
3
3
  autogluon/timeseries/evaluator.py,sha256=l642tYfTHsl8WVIq_vV6qhgAFVFr9UuZD7gLra3A_Kc,250
4
- autogluon/timeseries/learner.py,sha256=IYXpJSDyTzjZXjKL_SrTujt5Uke83mSJFA0sMj25_sM,13828
5
- autogluon/timeseries/predictor.py,sha256=IaooHyVzNTjYn6uVEVKvqecIdMTH9oEa4f3YMJbHdXM,83216
4
+ autogluon/timeseries/learner.py,sha256=NXhftyqMD8Bl1QHIBN82UKP0UlCV_ACughZqkmMf4oY,14043
5
+ autogluon/timeseries/predictor.py,sha256=NFe-y1H8987Rlnjcr4GqAxL7ivFaMuu94XWWiZrp9Uc,84351
6
6
  autogluon/timeseries/splitter.py,sha256=eghGwAAN2_cxGk5aJBILgjGWtLzjxJcytMy49gg_q18,3061
7
- autogluon/timeseries/version.py,sha256=7OEI1zXsnqzxER4gcsIWfxvNI5u5RNdbT6aW1MT0r4Y,90
7
+ autogluon/timeseries/version.py,sha256=Li6mwhhJy16eLGxm4jg4RzHnSfTH_RuoVZ576yp1Cx8,90
8
8
  autogluon/timeseries/configs/__init__.py,sha256=BTtHIPCYeGjqgOcvqb8qPD4VNX-ICKOg6wnkew1cPOE,98
9
9
  autogluon/timeseries/configs/presets_configs.py,sha256=94-yL9teDHKs2irWjP3kpewI7FE1ChYCgEgz9XHJ6gc,1965
10
10
  autogluon/timeseries/dataset/__init__.py,sha256=UvnhAN5tjgxXTHoZMQDy64YMDj4Xxa68yY7NP4vAw0o,81
@@ -15,13 +15,14 @@ autogluon/timeseries/metrics/point.py,sha256=xy8sKrBbuxZ7yTW21TDPayKnEj2FBj1AEse
15
15
  autogluon/timeseries/metrics/quantile.py,sha256=owMbOAJYwVyzdRkrJpuCGUXk937GU843QndCZyp5n9Y,3967
16
16
  autogluon/timeseries/metrics/utils.py,sha256=eJ63TCR-UwbeJ1c2Qm7B2q-8B3sFthPgiooEccrf2Kc,912
17
17
  autogluon/timeseries/models/__init__.py,sha256=WKV7DIpJkrwEj0cUfscESp67Ydap9hAqaNTYvgi2EIA,1303
18
- autogluon/timeseries/models/presets.py,sha256=lC-FGlJdpa6yg465Ks9FlTE0I4xfWt-LKNYilLrIep4,11637
18
+ autogluon/timeseries/models/presets.py,sha256=7ORBU-7fCwwYlpXaWCXEfNx0pss3mvB6KGSsQ1kyw2k,11673
19
19
  autogluon/timeseries/models/abstract/__init__.py,sha256=wvDsQAZIV0N3AwBeMaGItoQ82trEfnT-nol2AAOIxBg,102
20
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=MvLF529b3fo0icgle-qmS0oce-ftiiQ1jPBLnY-39fk,23435
20
+ autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=siy-OW4zflN61-pnuhvYawDvchm3zXb1ta8HUDLxhWY,24793
21
21
  autogluon/timeseries/models/abstract/model_trial.py,sha256=ENPg_7nsdxIvaNM0o0UShZ3x8jFlRmwRc5m0fGPC0TM,3720
22
22
  autogluon/timeseries/models/autogluon_tabular/__init__.py,sha256=r9i6jWcyeLHYClkcMSKRVsfrkBUMxpDrTATNTBc_qgQ,136
23
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py,sha256=Px5_VuQx8f74CpQYuClijtyK-yJLkNW7fSjCZrmJZ0s,32815
24
- autogluon/timeseries/models/autogluon_tabular/utils.py,sha256=4-gTrBtizxeMVQlsuscugPqw9unaXWXhS1TVVssfzYY,2125
23
+ autogluon/timeseries/models/autogluon_tabular/mlforecast.py,sha256=C1WVcuNlTcqo_qGm3v0uPpraO06mdVnBNeflPbCPjNQ,32861
24
+ autogluon/timeseries/models/autogluon_tabular/transforms.py,sha256=FozTzwcp1QjevEhrMLXsJHy8fymOcq1146oX4Al60wg,2517
25
+ autogluon/timeseries/models/autogluon_tabular/utils.py,sha256=Fn3Vu_Q0PCtEUbtNgLp1xIblg7dOdpFlF3W5kLHgruI,63
25
26
  autogluon/timeseries/models/chronos/__init__.py,sha256=wT77HzTtmQxW3sw2k0mA5Ot6PSHivX-Uvn5fjM05EU4,60
26
27
  autogluon/timeseries/models/chronos/model.py,sha256=vnKzRExX-2CAv9yD0HeeLRF4oY9HnZwrRIAEfLbAMtg,14703
27
28
  autogluon/timeseries/models/chronos/pipeline.py,sha256=vR1LbMkWFKDX6WZBcAtsGQYnRq1vkV935omTj3Zyr9U,20788
@@ -32,17 +33,19 @@ autogluon/timeseries/models/ensemble/greedy_ensemble.py,sha256=5HvZuW5osgsZg3V69
32
33
  autogluon/timeseries/models/gluonts/__init__.py,sha256=asC1PTj4j9xMbilvk1IT1julnpeoKbv5ZNuAR6-DFgA,361
33
34
  autogluon/timeseries/models/gluonts/abstract_gluonts.py,sha256=X4KChuSVSoxLOcrto1SgwAgiHeCuE5jFOaX8GxdBTeg,34017
34
35
  autogluon/timeseries/models/gluonts/torch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
- autogluon/timeseries/models/gluonts/torch/models.py,sha256=5fp2yEyTvMVDm-jQEUYB4ugLkVDcd364NBUjmePltO8,24058
36
+ autogluon/timeseries/models/gluonts/torch/models.py,sha256=85MWDXPwDncGwLijkm-K1tS-05LvGq4Xl-WbbIcYCO8,24906
36
37
  autogluon/timeseries/models/local/__init__.py,sha256=JyckWWgMG1BTIWJqFTW6e1O-eb0LPPOwtXwmb1ErohQ,756
37
- autogluon/timeseries/models/local/abstract_local_model.py,sha256=Ge4X4FgyYHsFU5h9q7mSVlEXKKJtyGqckjPJ4PXx3A0,11917
38
+ autogluon/timeseries/models/local/abstract_local_model.py,sha256=af3GFfUIGnVNzzZJ-WI61lw83lDFfgB0AfGxmkb-t_4,12226
38
39
  autogluon/timeseries/models/local/naive.py,sha256=iwRcFMFmJKPWPbD9TWaIUS51oav69F_VAp6-jb_5SUE,7249
39
40
  autogluon/timeseries/models/local/npts.py,sha256=Bp74doKnfpGE8ywP4FWOCI_RwRMsmgocYDfGtq764DA,4143
40
41
  autogluon/timeseries/models/local/statsforecast.py,sha256=79swW7g7bn1CmuGY79i7r0uj0QZr6WLIfH_x3p1FTDA,32742
41
42
  autogluon/timeseries/models/multi_window/__init__.py,sha256=Bq7AT2Jxdd4WNqmjTdzeqgNiwn1NCyWp4tBIWaM-zfI,60
42
- autogluon/timeseries/models/multi_window/multi_window_model.py,sha256=HiujLv8MJ31fWxRM5iXG2PzobFn4Mus0nJPu0MP2Rw4,11374
43
+ autogluon/timeseries/models/multi_window/multi_window_model.py,sha256=EAXzoQo96zTPNz9BTYDmV1878OVKb9F6h39y386N3zU,11740
43
44
  autogluon/timeseries/trainer/__init__.py,sha256=lxiOT-Gc6BEnr_yWQqra85kEngeM_wtH2SCaRbmC_qE,170
44
- autogluon/timeseries/trainer/abstract_trainer.py,sha256=ElEcSVsaS1b-UT6cLBiwM_5LqqfDEs1q5Sy-GlabMuI,59310
45
+ autogluon/timeseries/trainer/abstract_trainer.py,sha256=hZI4QcsFvU1gxP2yv_DRCIMlc6q02ptR7UDA9EgJPoM,60409
45
46
  autogluon/timeseries/trainer/auto_trainer.py,sha256=psJFZBwWWPlLjNwAgvO4OUJXsRW1sTN2YS9a4pdoeoE,3344
47
+ autogluon/timeseries/transforms/__init__.py,sha256=lzDavxdgGIz5m_DmSpNa9ewNU9Evndam3YXfOEk6kwY,174
48
+ autogluon/timeseries/transforms/scaler.py,sha256=30JrAnZwj58ntes-YP1H_XmeVLGtFepjWnRzPQQ-t4k,5352
46
49
  autogluon/timeseries/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
50
  autogluon/timeseries/utils/features.py,sha256=hEir-2lU8fvHjt5r_LG9tLZEk5wNdRdeLRE7qF5z3_Y,19585
48
51
  autogluon/timeseries/utils/forecast.py,sha256=p0WKM9Q0nLAwwmCgYZI1zi9mCOWXWJfllEt2lPRQl4M,1882
@@ -52,11 +55,11 @@ autogluon/timeseries/utils/datetime/base.py,sha256=3NdsH3NDq4cVAOSoy3XpaNixyNlbj
52
55
  autogluon/timeseries/utils/datetime/lags.py,sha256=GoLtvcZ8oKb3QkoBJ9E59LSPLOP7Qjxrr2UmMSZgjyw,5909
53
56
  autogluon/timeseries/utils/datetime/seasonality.py,sha256=h_4w00iEytAz_N_EpCENQ8RCXy7KQITczrYjBgVqWkQ,764
54
57
  autogluon/timeseries/utils/datetime/time_features.py,sha256=PAXbYbQ0z_5GFbkxSNi41zLY_2-U3x0Ynm1m_WhdtGc,2572
55
- autogluon.timeseries-1.1.2b20240924.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
56
- autogluon.timeseries-1.1.2b20240924.dist-info/METADATA,sha256=eQzL8Qm5iUT_6CwnSMe7D1XZFhEJlV8lWWi-DUb-lGM,12370
57
- autogluon.timeseries-1.1.2b20240924.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
58
- autogluon.timeseries-1.1.2b20240924.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
59
- autogluon.timeseries-1.1.2b20240924.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
60
- autogluon.timeseries-1.1.2b20240924.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
61
- autogluon.timeseries-1.1.2b20240924.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
62
- autogluon.timeseries-1.1.2b20240924.dist-info/RECORD,,
58
+ autogluon.timeseries-1.1.2b20240926.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
59
+ autogluon.timeseries-1.1.2b20240926.dist-info/METADATA,sha256=6EFhuCNkwsbg9HTtvJLuhfLIYbdUI5BW5HuPAxTLm40,12405
60
+ autogluon.timeseries-1.1.2b20240926.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
61
+ autogluon.timeseries-1.1.2b20240926.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
62
+ autogluon.timeseries-1.1.2b20240926.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
63
+ autogluon.timeseries-1.1.2b20240926.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
64
+ autogluon.timeseries-1.1.2b20240926.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
65
+ autogluon.timeseries-1.1.2b20240926.dist-info/RECORD,,