autogluon.timeseries 1.1.2b20241118__tar.gz → 1.1.2b20241120__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/PKG-INFO +1 -1
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/configs/presets_configs.py +18 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/learner.py +3 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/metrics/point.py +3 -3
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/metrics/quantile.py +2 -2
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/metrics/utils.py +2 -2
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py +6 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py +17 -10
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/chronos/model.py +24 -52
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/chronos/pipeline/base.py +1 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/chronos/pipeline/chronos.py +1 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/chronos/pipeline/utils.py +2 -1
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/local/abstract_local_model.py +1 -14
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/multi_window/multi_window_model.py +11 -3
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/presets.py +25 -6
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/predictor.py +1 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/trainer/abstract_trainer.py +16 -4
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/version.py +1 -1
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon.timeseries.egg-info/PKG-INFO +1 -1
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon.timeseries.egg-info/requires.txt +3 -3
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/setup.cfg +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/setup.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/configs/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/dataset/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/dataset/ts_dataframe.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/evaluator.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/metrics/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/metrics/abstract.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/abstract/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/abstract/model_trial.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/autogluon_tabular/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/autogluon_tabular/transforms.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/autogluon_tabular/utils.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/chronos/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/chronos/pipeline/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/ensemble/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/ensemble/greedy_ensemble.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/gluonts/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/gluonts/torch/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/gluonts/torch/models.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/local/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/local/naive.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/local/npts.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/local/statsforecast.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/models/multi_window/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/regressor.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/splitter.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/trainer/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/trainer/auto_trainer.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/transforms/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/transforms/covariate_scaler.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/transforms/target_scaler.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/utils/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/utils/datetime/__init__.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/utils/datetime/base.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/utils/datetime/lags.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/utils/datetime/seasonality.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/utils/datetime/time_features.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/utils/features.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/utils/forecast.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon/timeseries/utils/warning_filters.py +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon.timeseries.egg-info/SOURCES.txt +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon.timeseries.egg-info/dependency_links.txt +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon.timeseries.egg-info/namespace_packages.txt +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon.timeseries.egg-info/top_level.txt +0 -0
- {autogluon.timeseries-1.1.2b20241118 → autogluon.timeseries-1.1.2b20241120}/src/autogluon.timeseries.egg-info/zip-safe +0 -0
@@ -10,6 +10,24 @@ TIMESERIES_PRESETS_CONFIGS = dict(
|
|
10
10
|
high_quality={"hyperparameters": "default"},
|
11
11
|
medium_quality={"hyperparameters": "light"},
|
12
12
|
fast_training={"hyperparameters": "very_light"},
|
13
|
+
# Chronos-Bolt models
|
14
|
+
bolt_tiny={
|
15
|
+
"hyperparameters": {"Chronos": {"model_path": "bolt-tiny"}},
|
16
|
+
"skip_model_selection": True,
|
17
|
+
},
|
18
|
+
bolt_mini={
|
19
|
+
"hyperparameters": {"Chronos": {"model_path": "bolt-mini"}},
|
20
|
+
"skip_model_selection": True,
|
21
|
+
},
|
22
|
+
bolt_small={
|
23
|
+
"hyperparameters": {"Chronos": {"model_path": "bolt-small"}},
|
24
|
+
"skip_model_selection": True,
|
25
|
+
},
|
26
|
+
bolt_base={
|
27
|
+
"hyperparameters": {"Chronos": {"model_path": "bolt-base"}},
|
28
|
+
"skip_model_selection": True,
|
29
|
+
},
|
30
|
+
# Original Chronos models
|
13
31
|
chronos_tiny={
|
14
32
|
"hyperparameters": {"Chronos": {"model_path": "tiny"}},
|
15
33
|
"skip_model_selection": True,
|
@@ -32,6 +32,7 @@ class TimeSeriesLearner(AbstractLearner):
|
|
32
32
|
eval_metric_seasonal_period: Optional[int] = None,
|
33
33
|
prediction_length: int = 1,
|
34
34
|
cache_predictions: bool = True,
|
35
|
+
ensemble_model_type: Optional[Type] = None,
|
35
36
|
**kwargs,
|
36
37
|
):
|
37
38
|
super().__init__(path_context=path_context)
|
@@ -44,6 +45,7 @@ class TimeSeriesLearner(AbstractLearner):
|
|
44
45
|
self.quantile_levels = kwargs.get("quantile_levels", [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
|
45
46
|
self.cache_predictions = cache_predictions
|
46
47
|
self.freq: Optional[str] = None
|
48
|
+
self.ensemble_model_type = ensemble_model_type
|
47
49
|
|
48
50
|
self.feature_generator = TimeSeriesFeatureGenerator(
|
49
51
|
target=self.target, known_covariates_names=self.known_covariates_names
|
@@ -106,6 +108,7 @@ class TimeSeriesLearner(AbstractLearner):
|
|
106
108
|
val_splitter=val_splitter,
|
107
109
|
refit_every_n_windows=refit_every_n_windows,
|
108
110
|
cache_predictions=self.cache_predictions,
|
111
|
+
ensemble_model_type=self.ensemble_model_type,
|
109
112
|
)
|
110
113
|
)
|
111
114
|
self.trainer = self.trainer_type(**trainer_init_kwargs)
|
@@ -8,7 +8,7 @@ import pandas as pd
|
|
8
8
|
from autogluon.timeseries import TimeSeriesDataFrame
|
9
9
|
|
10
10
|
from .abstract import TimeSeriesScorer
|
11
|
-
from .utils import
|
11
|
+
from .utils import in_sample_abs_seasonal_error, in_sample_squared_seasonal_error
|
12
12
|
|
13
13
|
logger = logging.getLogger(__name__)
|
14
14
|
|
@@ -232,7 +232,7 @@ class MASE(TimeSeriesScorer):
|
|
232
232
|
def save_past_metrics(
|
233
233
|
self, data_past: TimeSeriesDataFrame, target: str = "target", seasonal_period: int = 1, **kwargs
|
234
234
|
) -> None:
|
235
|
-
self._past_abs_seasonal_error =
|
235
|
+
self._past_abs_seasonal_error = in_sample_abs_seasonal_error(
|
236
236
|
y_past=data_past[target], seasonal_period=seasonal_period
|
237
237
|
)
|
238
238
|
|
@@ -292,7 +292,7 @@ class RMSSE(TimeSeriesScorer):
|
|
292
292
|
def save_past_metrics(
|
293
293
|
self, data_past: TimeSeriesDataFrame, target: str = "target", seasonal_period: int = 1, **kwargs
|
294
294
|
) -> None:
|
295
|
-
self._past_squared_seasonal_error =
|
295
|
+
self._past_squared_seasonal_error = in_sample_squared_seasonal_error(
|
296
296
|
y_past=data_past[target], seasonal_period=seasonal_period
|
297
297
|
)
|
298
298
|
|
@@ -6,7 +6,7 @@ import pandas as pd
|
|
6
6
|
from autogluon.timeseries.dataset.ts_dataframe import TimeSeriesDataFrame
|
7
7
|
|
8
8
|
from .abstract import TimeSeriesScorer
|
9
|
-
from .utils import
|
9
|
+
from .utils import in_sample_abs_seasonal_error
|
10
10
|
|
11
11
|
|
12
12
|
class WQL(TimeSeriesScorer):
|
@@ -85,7 +85,7 @@ class SQL(TimeSeriesScorer):
|
|
85
85
|
def save_past_metrics(
|
86
86
|
self, data_past: TimeSeriesDataFrame, target: str = "target", seasonal_period: int = 1, **kwargs
|
87
87
|
) -> None:
|
88
|
-
self._past_abs_seasonal_error =
|
88
|
+
self._past_abs_seasonal_error = in_sample_abs_seasonal_error(
|
89
89
|
y_past=data_past[target], seasonal_period=seasonal_period
|
90
90
|
)
|
91
91
|
|
@@ -7,12 +7,12 @@ def _get_seasonal_diffs(*, y_past: pd.Series, seasonal_period: int = 1) -> pd.Se
|
|
7
7
|
return y_past.groupby(level=ITEMID, sort=False).diff(seasonal_period).abs()
|
8
8
|
|
9
9
|
|
10
|
-
def
|
10
|
+
def in_sample_abs_seasonal_error(*, y_past: pd.Series, seasonal_period: int = 1) -> pd.Series:
|
11
11
|
"""Compute seasonal naive forecast error (predict value from seasonal_period steps ago) for each time series."""
|
12
12
|
seasonal_diffs = _get_seasonal_diffs(y_past=y_past, seasonal_period=seasonal_period)
|
13
13
|
return seasonal_diffs.groupby(level=ITEMID, sort=False).mean().fillna(1.0)
|
14
14
|
|
15
15
|
|
16
|
-
def
|
16
|
+
def in_sample_squared_seasonal_error(*, y_past: pd.Series, seasonal_period: int = 1) -> pd.Series:
|
17
17
|
seasonal_diffs = _get_seasonal_diffs(y_past=y_past, seasonal_period=seasonal_period)
|
18
18
|
return seasonal_diffs.pow(2.0).groupby(level=ITEMID, sort=False).mean().fillna(1.0)
|
@@ -67,6 +67,7 @@ class AbstractTimeSeriesModel(AbstractModel):
|
|
67
67
|
_oof_filename = "oof.pkl"
|
68
68
|
# TODO: For which models should we override this parameter?
|
69
69
|
_covariate_regressor_fit_time_fraction: float = 0.5
|
70
|
+
default_max_time_limit_ratio: float = 0.9
|
70
71
|
|
71
72
|
# TODO: refactor "pruned" methods after AbstractModel is refactored
|
72
73
|
predict_proba = None
|
@@ -174,6 +175,11 @@ class AbstractTimeSeriesModel(AbstractModel):
|
|
174
175
|
self._oof_predictions = self.load_oof_predictions(self.path)
|
175
176
|
return self._oof_predictions
|
176
177
|
|
178
|
+
def _get_default_auxiliary_params(self) -> dict:
|
179
|
+
default_auxiliary_params = super()._get_default_auxiliary_params()
|
180
|
+
default_auxiliary_params["max_time_limit_ratio"] = self.default_max_time_limit_ratio
|
181
|
+
return default_auxiliary_params
|
182
|
+
|
177
183
|
def _initialize(self, **kwargs) -> None:
|
178
184
|
self._init_params_aux()
|
179
185
|
self._init_params()
|
@@ -11,6 +11,7 @@ from sklearn.base import BaseEstimator
|
|
11
11
|
import autogluon.core as ag
|
12
12
|
from autogluon.tabular import TabularPredictor
|
13
13
|
from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TIMESTAMP, TimeSeriesDataFrame
|
14
|
+
from autogluon.timeseries.metrics.utils import in_sample_squared_seasonal_error
|
14
15
|
from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
|
15
16
|
from autogluon.timeseries.models.local import SeasonalNaiveModel
|
16
17
|
from autogluon.timeseries.utils.datetime import (
|
@@ -82,7 +83,6 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
82
83
|
self._mlf: Optional[MLForecast] = None
|
83
84
|
self._scaler: Optional[BaseTargetTransform] = None
|
84
85
|
self._residuals_std_per_item: Optional[pd.Series] = None
|
85
|
-
self._avg_residuals_std: Optional[float] = None
|
86
86
|
self._train_target_median: Optional[float] = None
|
87
87
|
self._non_boolean_real_covariates: List[str] = []
|
88
88
|
|
@@ -332,7 +332,7 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
332
332
|
def _save_residuals_std(self, val_df: pd.DataFrame) -> None:
|
333
333
|
"""Compute standard deviation of residuals for each item using the validation set.
|
334
334
|
|
335
|
-
Saves per-item residuals to `self.residuals_std_per_item
|
335
|
+
Saves per-item residuals to `self.residuals_std_per_item`.
|
336
336
|
"""
|
337
337
|
residuals_df = val_df[[MLF_ITEMID, MLF_TARGET]]
|
338
338
|
residuals_df = residuals_df.assign(y_pred=self._mlf.models_["mean"].predict(val_df))
|
@@ -344,7 +344,6 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
344
344
|
self._residuals_std_per_item = (
|
345
345
|
residuals.pow(2.0).groupby(val_df[MLF_ITEMID].values, sort=False).mean().pow(0.5)
|
346
346
|
)
|
347
|
-
self._avg_residuals_std = np.sqrt(residuals.pow(2.0).mean())
|
348
347
|
|
349
348
|
def _remove_short_ts_and_generate_fallback_forecast(
|
350
349
|
self,
|
@@ -391,7 +390,7 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
391
390
|
forecast_for_short_series = None
|
392
391
|
return data_long, known_covariates_long, forecast_for_short_series
|
393
392
|
|
394
|
-
def _add_gaussian_quantiles(self, predictions: pd.DataFrame, repeated_item_ids: pd.Series):
|
393
|
+
def _add_gaussian_quantiles(self, predictions: pd.DataFrame, repeated_item_ids: pd.Series, past_target: pd.Series):
|
395
394
|
"""
|
396
395
|
Add quantile levels assuming that residuals follow normal distribution
|
397
396
|
"""
|
@@ -403,9 +402,14 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
403
402
|
normal_scale_per_timestep = pd.Series(np.tile(sqrt_h, num_items), index=repeated_item_ids)
|
404
403
|
|
405
404
|
residuals_std_per_timestep = self._residuals_std_per_item.reindex(repeated_item_ids)
|
406
|
-
# Use
|
407
|
-
|
408
|
-
|
405
|
+
# Use in-sample seasonal error in for items not seen during fit
|
406
|
+
items_not_seen_during_fit = residuals_std_per_timestep.index[residuals_std_per_timestep.isna()].unique()
|
407
|
+
if len(items_not_seen_during_fit) > 0:
|
408
|
+
scale_for_new_items: pd.Series = np.sqrt(
|
409
|
+
in_sample_squared_seasonal_error(y_past=past_target.loc[items_not_seen_during_fit])
|
410
|
+
)
|
411
|
+
residuals_std_per_timestep = residuals_std_per_timestep.fillna(scale_for_new_items)
|
412
|
+
|
409
413
|
std_per_timestep = residuals_std_per_timestep * normal_scale_per_timestep
|
410
414
|
for q in self.quantile_levels:
|
411
415
|
predictions[str(q)] = predictions["mean"] + norm.ppf(q) * std_per_timestep.to_numpy()
|
@@ -493,7 +497,6 @@ class DirectTabularModel(AbstractMLForecastModel):
|
|
493
497
|
if self.is_quantile_model:
|
494
498
|
# Quantile model does not require residuals to produce prediction intervals
|
495
499
|
self._residuals_std_per_item = pd.Series(1.0, index=val_df[MLF_ITEMID].unique())
|
496
|
-
self._avg_residuals_std = 1.0
|
497
500
|
else:
|
498
501
|
super()._save_residuals_std(val_df=val_df)
|
499
502
|
|
@@ -545,7 +548,9 @@ class DirectTabularModel(AbstractMLForecastModel):
|
|
545
548
|
predictions = apply_inverse_transform(predictions, transform=tfm)
|
546
549
|
|
547
550
|
if not self.is_quantile_model:
|
548
|
-
predictions = self._add_gaussian_quantiles(
|
551
|
+
predictions = self._add_gaussian_quantiles(
|
552
|
+
predictions, repeated_item_ids=predictions[MLF_ITEMID], past_target=data[self.target]
|
553
|
+
)
|
549
554
|
predictions = TimeSeriesDataFrame(predictions.rename(columns={MLF_ITEMID: ITEMID, MLF_TIMESTAMP: TIMESTAMP}))
|
550
555
|
|
551
556
|
if forecast_for_short_series is not None:
|
@@ -661,7 +666,9 @@ class RecursiveTabularModel(AbstractMLForecastModel):
|
|
661
666
|
)
|
662
667
|
predictions = raw_predictions.rename(columns={MLF_ITEMID: ITEMID, MLF_TIMESTAMP: TIMESTAMP})
|
663
668
|
predictions = TimeSeriesDataFrame(
|
664
|
-
self._add_gaussian_quantiles(
|
669
|
+
self._add_gaussian_quantiles(
|
670
|
+
predictions, repeated_item_ids=predictions[ITEMID], past_target=data[self.target]
|
671
|
+
)
|
665
672
|
)
|
666
673
|
|
667
674
|
if forecast_for_short_series is not None:
|
@@ -1,7 +1,6 @@
|
|
1
1
|
import logging
|
2
2
|
import os
|
3
3
|
import shutil
|
4
|
-
import time
|
5
4
|
from pathlib import Path
|
6
5
|
from typing import Any, Dict, Literal, Optional, Union
|
7
6
|
|
@@ -68,6 +67,7 @@ MODEL_ALIASES = {
|
|
68
67
|
"small": "autogluon/chronos-t5-small",
|
69
68
|
"base": "autogluon/chronos-t5-base",
|
70
69
|
"large": "autogluon/chronos-t5-large",
|
70
|
+
"bolt-tiny": "autogluon/chronos-bolt-tiny",
|
71
71
|
"bolt-mini": "autogluon/chronos-bolt-mini",
|
72
72
|
"bolt-small": "autogluon/chronos-bolt-small",
|
73
73
|
"bolt-base": "autogluon/chronos-bolt-base",
|
@@ -113,10 +113,12 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
113
113
|
batch_size : int, default = 16
|
114
114
|
Size of batches used during inference
|
115
115
|
num_samples : int, default = 20
|
116
|
-
Number of samples used during inference
|
116
|
+
Number of samples used during inference, only used for the original Chronos models
|
117
117
|
device : str, default = None
|
118
118
|
Device to use for inference (and fine-tuning, if enabled). If None, model will use the GPU if available.
|
119
|
-
For larger model sizes
|
119
|
+
For larger Chronos model sizes ``small``, ``base``, and ``large``; inference will fail if no GPU is available.
|
120
|
+
For Chronos-Bolt models, inference can be done on the CPU. Although fine-tuning the smaller Chronos models
|
121
|
+
(``tiny`` and ``mini``) and all Chronos-Bolt is allowed on the CPU, we recommend using a GPU for faster fine-tuning.
|
120
122
|
context_length : int or None, default = None
|
121
123
|
The context length to use in the model. Shorter context lengths will decrease model accuracy, but result
|
122
124
|
in faster inference. If None, the model will infer context length from the data set length at inference
|
@@ -140,9 +142,9 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
140
142
|
If True, the pretrained model will be fine-tuned
|
141
143
|
fine_tune_lr: float, default = 0.0001
|
142
144
|
The learning rate used for fine-tuning
|
143
|
-
fine_tune_steps : int, default =
|
145
|
+
fine_tune_steps : int, default = 1000
|
144
146
|
The number of gradient update steps to fine-tune for
|
145
|
-
fine_tune_batch_size : int, default =
|
147
|
+
fine_tune_batch_size : int, default = 32
|
146
148
|
The batch size to use for fine-tuning
|
147
149
|
fine_tune_shuffle_buffer_size : int, default = 10000
|
148
150
|
The size of the shuffle buffer to shuffle the data during fine-tuning. If None, shuffling will
|
@@ -162,6 +164,7 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
162
164
|
# default number of samples for prediction
|
163
165
|
default_num_samples: int = 20
|
164
166
|
default_model_path = "autogluon/chronos-t5-small"
|
167
|
+
default_max_time_limit_ratio = 0.8
|
165
168
|
maximum_context_length = 2048
|
166
169
|
fine_tuned_ckpt_name: str = "fine-tuned-ckpt"
|
167
170
|
|
@@ -216,7 +219,6 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
216
219
|
)
|
217
220
|
|
218
221
|
self.model_pipeline: Optional[Any] = None # of type BaseChronosPipeline
|
219
|
-
self.time_limit: Optional[float] = None
|
220
222
|
|
221
223
|
def save(self, path: str = None, verbose: bool = True) -> str:
|
222
224
|
pipeline = self.model_pipeline
|
@@ -234,7 +236,7 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
234
236
|
|
235
237
|
fine_tune_ckpt_path = Path(model.path) / cls.fine_tuned_ckpt_name
|
236
238
|
if fine_tune_ckpt_path.exists():
|
237
|
-
logger.debug(f"
|
239
|
+
logger.debug(f"\tFine-tuned checkpoint exists, setting model_path to {fine_tune_ckpt_path}")
|
238
240
|
model.model_path = fine_tune_ckpt_path
|
239
241
|
|
240
242
|
return model
|
@@ -320,8 +322,8 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
320
322
|
init_args.setdefault("fine_tune", False)
|
321
323
|
init_args.setdefault("keep_transformers_logs", False)
|
322
324
|
init_args.setdefault("fine_tune_lr", 1e-4)
|
323
|
-
init_args.setdefault("fine_tune_steps",
|
324
|
-
init_args.setdefault("fine_tune_batch_size",
|
325
|
+
init_args.setdefault("fine_tune_steps", 1000)
|
326
|
+
init_args.setdefault("fine_tune_batch_size", 32)
|
325
327
|
init_args.setdefault("eval_during_fine_tune", False)
|
326
328
|
init_args.setdefault("fine_tune_eval_max_items", 256)
|
327
329
|
init_args.setdefault("fine_tune_shuffle_buffer_size", 10_000)
|
@@ -399,7 +401,6 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
399
401
|
|
400
402
|
eval_during_fine_tune = val_data is not None and fine_tune_args["eval_during_fine_tune"]
|
401
403
|
|
402
|
-
start_time = time.monotonic()
|
403
404
|
if do_fine_tune:
|
404
405
|
context_length = self._get_context_length(train_data)
|
405
406
|
# load model pipeline to device memory
|
@@ -428,7 +429,7 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
428
429
|
|
429
430
|
if self.prediction_length != fine_tune_prediction_length:
|
430
431
|
logger.debug(
|
431
|
-
f"
|
432
|
+
f"\tChronosBolt models can only be fine-tuned with a maximum prediction_length of {model_prediction_length}. "
|
432
433
|
f"Fine-tuning prediction_length has been changed to {fine_tune_prediction_length}."
|
433
434
|
)
|
434
435
|
|
@@ -436,10 +437,15 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
436
437
|
fine_tune_trainer_kwargs["disable_tqdm"] = fine_tune_trainer_kwargs.get("disable_tqdm", (verbosity < 3))
|
437
438
|
fine_tune_trainer_kwargs["use_cpu"] = str(self.model_pipeline.inner_model.device) == "cpu"
|
438
439
|
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
440
|
+
if fine_tune_trainer_kwargs["use_cpu"]:
|
441
|
+
logger.info(
|
442
|
+
"\tFine-tuning on the CPU detected. We recommend using a GPU for faster fine-tuning of Chronos."
|
443
|
+
)
|
444
|
+
|
445
|
+
# TODO: adamw_torch_fused is not supported on CPU in torch <= 2.3. When torch 2.4 becomes the lower bound
|
446
|
+
# this if block can be removed because torch >= 2.4 supports AdamW optimizer with fused=True on CPU
|
447
|
+
if fine_tune_trainer_kwargs["optim"] == "adamw_torch_fused":
|
448
|
+
fine_tune_trainer_kwargs["optim"] = "adamw_torch"
|
443
449
|
|
444
450
|
output_dir = Path(fine_tune_trainer_kwargs["output_dir"])
|
445
451
|
|
@@ -509,37 +515,16 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
509
515
|
)
|
510
516
|
trainer.add_callback(LoggerCallback())
|
511
517
|
|
512
|
-
if val_data is not None:
|
513
|
-
# evaluate once before training
|
514
|
-
zero_shot_eval_loss = trainer.evaluate()["eval_loss"]
|
515
|
-
|
516
518
|
trainer.train()
|
517
519
|
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
best_train_eval_loss = log_history_df["eval_loss"].min()
|
522
|
-
elif val_data is not None:
|
523
|
-
# evaluate at the end of fine-tuning
|
524
|
-
best_train_eval_loss = trainer.evaluate()["eval_loss"]
|
525
|
-
|
526
|
-
if val_data is None or best_train_eval_loss <= zero_shot_eval_loss:
|
527
|
-
fine_tuned_ckpt_path = Path(self.path) / self.fine_tuned_ckpt_name
|
528
|
-
logger.info(f"Saving fine-tuned model to {fine_tuned_ckpt_path}")
|
529
|
-
self.model_pipeline.inner_model.save_pretrained(Path(self.path) / self.fine_tuned_ckpt_name)
|
530
|
-
else:
|
531
|
-
# Reset the model to its pretrained state
|
532
|
-
logger.info("Validation loss worsened after fine-tuning. Reverting to the pretrained model.")
|
533
|
-
self.model_pipeline = None
|
534
|
-
self.load_model_pipeline(is_training=False)
|
520
|
+
fine_tuned_ckpt_path = Path(self.path) / self.fine_tuned_ckpt_name
|
521
|
+
logger.info(f"\tSaving fine-tuned model to {fine_tuned_ckpt_path}")
|
522
|
+
self.model_pipeline.inner_model.save_pretrained(Path(self.path) / self.fine_tuned_ckpt_name)
|
535
523
|
|
536
524
|
if not fine_tune_args["keep_transformers_logs"]:
|
537
525
|
logger.debug(f"Removing transformers_logs directory {output_dir}")
|
538
526
|
shutil.rmtree(output_dir)
|
539
527
|
|
540
|
-
if time_limit is not None:
|
541
|
-
self.time_limit = time_limit - (time.monotonic() - start_time) # inference time budget
|
542
|
-
|
543
528
|
def _get_inference_data_loader(
|
544
529
|
self,
|
545
530
|
data: TimeSeriesDataFrame,
|
@@ -635,16 +620,3 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
635
620
|
"can_use_train_data": do_fine_tune,
|
636
621
|
"can_use_val_data": do_fine_tune,
|
637
622
|
}
|
638
|
-
|
639
|
-
def score_and_cache_oof(
|
640
|
-
self,
|
641
|
-
val_data: TimeSeriesDataFrame,
|
642
|
-
store_val_score: bool = False,
|
643
|
-
store_predict_time: bool = False,
|
644
|
-
**predict_kwargs,
|
645
|
-
) -> None:
|
646
|
-
# All computation happens during inference, so we provide the time_limit at prediction time
|
647
|
-
# TODO: Once custom predict_kwargs is allowed, make sure that `time_limit` is not among the keys
|
648
|
-
super().score_and_cache_oof(
|
649
|
-
val_data, store_val_score, store_predict_time, time_limit=self.time_limit, **predict_kwargs
|
650
|
-
)
|
@@ -135,6 +135,7 @@ class BaseChronosPipeline(metaclass=PipelineRegistry):
|
|
135
135
|
"""
|
136
136
|
from transformers import AutoConfig
|
137
137
|
|
138
|
+
kwargs.setdefault("resume_download", None) # silence huggingface_hub warning
|
138
139
|
if str(pretrained_model_name_or_path).startswith("s3://"):
|
139
140
|
from .utils import cache_model_from_s3
|
140
141
|
|
@@ -546,6 +546,7 @@ class ChronosPipeline(BaseChronosPipeline):
|
|
546
546
|
"onnx",
|
547
547
|
"openvino",
|
548
548
|
], "optimization_strategy not recognized. Please provide one of `onnx` or `openvino`"
|
549
|
+
kwargs.pop("resume_download", None) # Optimized pipeline does not support 'resume_download' kwargs
|
549
550
|
torch_dtype = kwargs.pop("torch_dtype", "auto")
|
550
551
|
if torch_dtype != "auto":
|
551
552
|
logger.warning(f"\t`torch_dtype` will be ignored for optimization_strategy {optimization_strategy}")
|
@@ -317,7 +317,8 @@ class TimeLimitCallback(TrainerCallback):
|
|
317
317
|
def on_step_end(self, args, state, control, **kwargs):
|
318
318
|
elapsed_time = time.monotonic() - self.start_time
|
319
319
|
if elapsed_time > self.time_limit:
|
320
|
-
|
320
|
+
logger.info("\tStopping fine-tuning since time_limit is reached")
|
321
|
+
control.should_training_stop = True
|
321
322
|
|
322
323
|
|
323
324
|
class LoggerCallback(TrainerCallback):
|
@@ -44,6 +44,7 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
|
|
44
44
|
allowed_local_model_args: List[str] = []
|
45
45
|
default_n_jobs: Union[int, float] = AG_DEFAULT_N_JOBS
|
46
46
|
default_max_ts_length: Optional[int] = 2500
|
47
|
+
default_max_time_limit_ratio = 1.0
|
47
48
|
init_time_in_seconds: int = 0
|
48
49
|
|
49
50
|
def __init__(
|
@@ -84,7 +85,6 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
|
|
84
85
|
|
85
86
|
self._local_model_args: Dict[str, Any] = None
|
86
87
|
self._seasonal_period: Optional[int] = None
|
87
|
-
self.time_limit: Optional[float] = None
|
88
88
|
self._dummy_forecast: Optional[pd.DataFrame] = None
|
89
89
|
|
90
90
|
@property
|
@@ -138,7 +138,6 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
|
|
138
138
|
self._seasonal_period = local_model_args["seasonal_period"]
|
139
139
|
|
140
140
|
self._local_model_args = self._update_local_model_args(local_model_args=local_model_args)
|
141
|
-
self.time_limit = time_limit
|
142
141
|
|
143
142
|
self._dummy_forecast = self._get_dummy_forecast(train_data)
|
144
143
|
return self
|
@@ -187,18 +186,6 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
|
|
187
186
|
predictions_df.index = get_forecast_horizon_index_ts_dataframe(data, self.prediction_length, freq=self.freq)
|
188
187
|
return TimeSeriesDataFrame(predictions_df)
|
189
188
|
|
190
|
-
def score_and_cache_oof(
|
191
|
-
self,
|
192
|
-
val_data: TimeSeriesDataFrame,
|
193
|
-
store_val_score: bool = False,
|
194
|
-
store_predict_time: bool = False,
|
195
|
-
**predict_kwargs,
|
196
|
-
) -> None:
|
197
|
-
# All computation happens during inference, so we provide the time_limit at prediction time
|
198
|
-
super().score_and_cache_oof(
|
199
|
-
val_data, store_val_score, store_predict_time, time_limit=self.time_limit, **predict_kwargs
|
200
|
-
)
|
201
|
-
|
202
189
|
def _predict_wrapper(self, time_series: pd.Series, end_time: Optional[float] = None) -> Tuple[pd.DataFrame, bool]:
|
203
190
|
if end_time is not None and time.time() >= end_time:
|
204
191
|
raise TimeLimitExceeded
|
@@ -33,6 +33,7 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
|
|
33
33
|
"""
|
34
34
|
|
35
35
|
# TODO: Remove the MultiWindowBacktestingModel class, move the logic to AbstractTimeSeriesTrainer
|
36
|
+
default_max_time_limit_ratio = 1.0
|
36
37
|
|
37
38
|
def __init__(
|
38
39
|
self,
|
@@ -124,8 +125,7 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
|
|
124
125
|
num_refits_remaining = math.ceil(
|
125
126
|
(val_splitter.num_val_windows - window_index) / refit_every_n_windows
|
126
127
|
)
|
127
|
-
|
128
|
-
time_left_for_window = 0.9 * time_left / num_refits_remaining
|
128
|
+
time_left_for_window = time_left / num_refits_remaining
|
129
129
|
|
130
130
|
if refit_this_window:
|
131
131
|
model = self.get_child_model(window_index)
|
@@ -138,7 +138,15 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
|
|
138
138
|
)
|
139
139
|
model.fit_time = time.time() - model_fit_start_time
|
140
140
|
most_recent_refit_window = f"W{window_index}"
|
141
|
-
|
141
|
+
|
142
|
+
if time_limit is None:
|
143
|
+
time_left_for_prediction = None
|
144
|
+
else:
|
145
|
+
time_left_for_prediction = time_limit - (time.time() - global_fit_start_time)
|
146
|
+
|
147
|
+
model.score_and_cache_oof(
|
148
|
+
val_fold, store_val_score=True, store_predict_time=True, time_limit=time_left_for_prediction
|
149
|
+
)
|
142
150
|
|
143
151
|
oof_predictions_per_window.append(model.get_oof_predictions()[0])
|
144
152
|
|
@@ -7,6 +7,7 @@ from typing import Any, Dict, List, Optional, Type, Union
|
|
7
7
|
from autogluon.common import space
|
8
8
|
from autogluon.core import constants
|
9
9
|
from autogluon.timeseries.metrics import TimeSeriesScorer
|
10
|
+
from autogluon.timeseries.utils.features import CovariateMetadata
|
10
11
|
|
11
12
|
from . import (
|
12
13
|
ADIDAModel,
|
@@ -133,6 +134,7 @@ def get_default_hps(key):
|
|
133
134
|
"RecursiveTabular": {},
|
134
135
|
"DirectTabular": {},
|
135
136
|
"TemporalFusionTransformer": {},
|
137
|
+
"Chronos": {"model_path": "bolt-small"},
|
136
138
|
},
|
137
139
|
"light_inference": {
|
138
140
|
"SeasonalNaive": {},
|
@@ -145,18 +147,33 @@ def get_default_hps(key):
|
|
145
147
|
"SeasonalNaive": {},
|
146
148
|
"Croston": {},
|
147
149
|
"AutoETS": {},
|
148
|
-
"AutoARIMA": {},
|
149
150
|
"NPTS": {},
|
150
151
|
"DynamicOptimizedTheta": {},
|
151
|
-
|
152
|
-
"RecursiveTabular": {
|
153
|
-
"tabular_hyperparameters": {"NN_TORCH": {"proc.impute_strategy": "constant"}, "GBM": {}},
|
154
|
-
},
|
152
|
+
"RecursiveTabular": {},
|
155
153
|
"DirectTabular": {},
|
156
154
|
"TemporalFusionTransformer": {},
|
157
155
|
"PatchTST": {},
|
158
156
|
"DeepAR": {},
|
159
|
-
"Chronos":
|
157
|
+
"Chronos": [
|
158
|
+
{
|
159
|
+
"ag_args": {"name_suffix": "ZeroShot"},
|
160
|
+
"model_path": "bolt-base",
|
161
|
+
},
|
162
|
+
{
|
163
|
+
"ag_args": {"name_suffix": "FineTuned"},
|
164
|
+
"model_path": "bolt-small",
|
165
|
+
"fine_tune": True,
|
166
|
+
"target_scaler": "standard",
|
167
|
+
"covariate_regressor": {"model_name": "CAT", "model_hyperparameters": {"iterations": 1_000}},
|
168
|
+
},
|
169
|
+
],
|
170
|
+
"TiDE": {
|
171
|
+
"encoder_hidden_dim": 256,
|
172
|
+
"decoder_hidden_dim": 256,
|
173
|
+
"temporal_hidden_dim": 64,
|
174
|
+
"num_batches_per_epoch": 100,
|
175
|
+
"lr": 1e-4,
|
176
|
+
},
|
160
177
|
},
|
161
178
|
}
|
162
179
|
return default_model_hps[key]
|
@@ -170,6 +187,7 @@ def get_preset_models(
|
|
170
187
|
eval_metric_seasonal_period: Optional[int],
|
171
188
|
hyperparameters: Union[str, Dict, None],
|
172
189
|
hyperparameter_tune: bool,
|
190
|
+
metadata: CovariateMetadata,
|
173
191
|
all_assigned_names: List[str],
|
174
192
|
excluded_model_types: List[str],
|
175
193
|
multi_window: bool = False,
|
@@ -247,6 +265,7 @@ def get_preset_models(
|
|
247
265
|
prediction_length=prediction_length,
|
248
266
|
eval_metric=eval_metric,
|
249
267
|
eval_metric_seasonal_period=eval_metric_seasonal_period,
|
268
|
+
metadata=metadata,
|
250
269
|
hyperparameters=model_hps,
|
251
270
|
**kwargs,
|
252
271
|
)
|
@@ -217,6 +217,7 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
|
|
217
217
|
prediction_length=self.prediction_length,
|
218
218
|
quantile_levels=self.quantile_levels,
|
219
219
|
cache_predictions=self.cache_predictions,
|
220
|
+
ensemble_model_type=kwargs.pop("ensemble_model_type", None),
|
220
221
|
)
|
221
222
|
)
|
222
223
|
# Using `TimeSeriesLearner` as default argument breaks doc generation with Sphnix
|
@@ -29,7 +29,7 @@ from autogluon.timeseries.utils.features import (
|
|
29
29
|
CovariateMetadata,
|
30
30
|
PermutationFeatureImportanceTransform,
|
31
31
|
)
|
32
|
-
from autogluon.timeseries.utils.warning_filters import disable_tqdm
|
32
|
+
from autogluon.timeseries.utils.warning_filters import disable_tqdm, warning_filter
|
33
33
|
|
34
34
|
logger = logging.getLogger("autogluon.timeseries.trainer")
|
35
35
|
|
@@ -264,6 +264,7 @@ class AbstractTimeSeriesTrainer(SimpleAbstractTrainer):
|
|
264
264
|
val_splitter: Optional[AbstractWindowSplitter] = None,
|
265
265
|
refit_every_n_windows: Optional[int] = 1,
|
266
266
|
cache_predictions: bool = True,
|
267
|
+
ensemble_model_type: Optional[Type] = None,
|
267
268
|
**kwargs,
|
268
269
|
):
|
269
270
|
super().__init__(path=path, save_data=save_data, low_memory=True, **kwargs)
|
@@ -276,7 +277,13 @@ class AbstractTimeSeriesTrainer(SimpleAbstractTrainer):
|
|
276
277
|
self.skip_model_selection = skip_model_selection
|
277
278
|
# Ensemble cannot be fit if val_scores are not computed
|
278
279
|
self.enable_ensemble = enable_ensemble and not skip_model_selection
|
279
|
-
|
280
|
+
if ensemble_model_type is None:
|
281
|
+
ensemble_model_type = TimeSeriesGreedyEnsemble
|
282
|
+
else:
|
283
|
+
logger.warning(
|
284
|
+
"Using a custom `ensemble_model_type` is experimental functionality that may break in future versions."
|
285
|
+
)
|
286
|
+
self.ensemble_model_type = ensemble_model_type
|
280
287
|
|
281
288
|
self.verbosity = verbosity
|
282
289
|
|
@@ -519,8 +526,12 @@ class AbstractTimeSeriesTrainer(SimpleAbstractTrainer):
|
|
519
526
|
fit_end_time = time.time()
|
520
527
|
model.fit_time = model.fit_time or (fit_end_time - fit_start_time)
|
521
528
|
|
529
|
+
if time_limit is not None:
|
530
|
+
time_limit = fit_end_time - fit_start_time
|
522
531
|
if val_data is not None and not self.skip_model_selection:
|
523
|
-
model.score_and_cache_oof(
|
532
|
+
model.score_and_cache_oof(
|
533
|
+
val_data, store_val_score=True, store_predict_time=True, time_limit=time_limit
|
534
|
+
)
|
524
535
|
|
525
536
|
self._log_scores_and_times(model.val_score, model.fit_time, model.predict_time)
|
526
537
|
|
@@ -736,7 +747,8 @@ class AbstractTimeSeriesTrainer(SimpleAbstractTrainer):
|
|
736
747
|
quantile_levels=self.quantile_levels,
|
737
748
|
metadata=self.metadata,
|
738
749
|
)
|
739
|
-
|
750
|
+
with warning_filter():
|
751
|
+
ensemble.fit_ensemble(model_preds, data_per_window=data_per_window, time_limit=time_limit)
|
740
752
|
ensemble.fit_time = time.time() - time_start
|
741
753
|
|
742
754
|
predict_time = 0
|
@@ -17,9 +17,9 @@ fugue>=0.9.0
|
|
17
17
|
tqdm<5,>=4.38
|
18
18
|
orjson~=3.9
|
19
19
|
tensorboard<3,>=2.9
|
20
|
-
autogluon.core[raytune]==1.1.
|
21
|
-
autogluon.common==1.1.
|
22
|
-
autogluon.tabular[catboost,lightgbm,xgboost]==1.1.
|
20
|
+
autogluon.core[raytune]==1.1.2b20241120
|
21
|
+
autogluon.common==1.1.2b20241120
|
22
|
+
autogluon.tabular[catboost,lightgbm,xgboost]==1.1.2b20241120
|
23
23
|
|
24
24
|
[all]
|
25
25
|
optimum[onnxruntime]<1.20,>=1.17
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|