autogluon.timeseries 1.1.0b20240417__tar.gz → 1.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of autogluon.timeseries might be problematic. Click here for more details.
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/PKG-INFO +2 -2
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/setup.py +1 -1
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/__init__.py +2 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py +34 -5
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/chronos/model.py +16 -15
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/chronos/pipeline.py +1 -1
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/gluonts/__init__.py +2 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/gluonts/torch/models.py +91 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/presets.py +3 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/predictor.py +31 -5
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/trainer/abstract_trainer.py +4 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/version.py +1 -1
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon.timeseries.egg-info/PKG-INFO +2 -2
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon.timeseries.egg-info/requires.txt +8 -8
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/setup.cfg +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/__init__.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/configs/__init__.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/configs/presets_configs.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/dataset/__init__.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/dataset/ts_dataframe.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/evaluator.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/learner.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/metrics/__init__.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/metrics/abstract.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/metrics/point.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/metrics/quantile.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/metrics/utils.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/abstract/__init__.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/abstract/model_trial.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/autogluon_tabular/__init__.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/autogluon_tabular/utils.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/chronos/__init__.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/chronos/utils.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/ensemble/__init__.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/ensemble/greedy_ensemble.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/gluonts/torch/__init__.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/local/__init__.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/local/abstract_local_model.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/local/naive.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/local/npts.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/local/statsforecast.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/multi_window/__init__.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/models/multi_window/multi_window_model.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/splitter.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/trainer/__init__.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/trainer/auto_trainer.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/utils/__init__.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/utils/datetime/__init__.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/utils/datetime/base.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/utils/datetime/lags.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/utils/datetime/seasonality.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/utils/datetime/time_features.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/utils/features.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/utils/forecast.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon/timeseries/utils/warning_filters.py +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon.timeseries.egg-info/SOURCES.txt +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon.timeseries.egg-info/dependency_links.txt +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon.timeseries.egg-info/namespace_packages.txt +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon.timeseries.egg-info/top_level.txt +0 -0
- {autogluon.timeseries-1.1.0b20240417 → autogluon.timeseries-1.1.1}/src/autogluon.timeseries.egg-info/zip-safe +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: autogluon.timeseries
|
|
3
|
-
Version: 1.1.
|
|
3
|
+
Version: 1.1.1
|
|
4
4
|
Summary: Fast and Accurate ML in 3 Lines of Code
|
|
5
5
|
Home-page: https://github.com/autogluon/autogluon
|
|
6
6
|
Author: AutoGluon Community
|
|
@@ -107,7 +107,7 @@ Description:
|
|
|
107
107
|
This library is licensed under the Apache 2.0 License.
|
|
108
108
|
|
|
109
109
|
Platform: UNKNOWN
|
|
110
|
-
Classifier: Development Status ::
|
|
110
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
111
111
|
Classifier: Intended Audience :: Education
|
|
112
112
|
Classifier: Intended Audience :: Developers
|
|
113
113
|
Classifier: Intended Audience :: Science/Research
|
|
@@ -30,7 +30,7 @@ install_requires = [
|
|
|
30
30
|
"pytorch_lightning", # version range defined in `core/_setup_utils.py`
|
|
31
31
|
"transformers[sentencepiece]", # version range defined in `core/_setup_utils.py`
|
|
32
32
|
"accelerate", # version range defined in `core/_setup_utils.py`
|
|
33
|
-
"gluonts
|
|
33
|
+
"gluonts==0.15.1",
|
|
34
34
|
"networkx", # version range defined in `core/_setup_utils.py`
|
|
35
35
|
# TODO: update statsforecast to v1.5.0 - resolve antlr4-python3-runtime dependency clash with multimodal
|
|
36
36
|
"statsforecast>=1.4.0,<1.5",
|
|
@@ -6,6 +6,7 @@ from .gluonts import (
|
|
|
6
6
|
PatchTSTModel,
|
|
7
7
|
SimpleFeedForwardModel,
|
|
8
8
|
TemporalFusionTransformerModel,
|
|
9
|
+
TiDEModel,
|
|
9
10
|
WaveNetModel,
|
|
10
11
|
)
|
|
11
12
|
from .local import (
|
|
@@ -55,6 +56,7 @@ __all__ = [
|
|
|
55
56
|
"SimpleFeedForwardModel",
|
|
56
57
|
"TemporalFusionTransformerModel",
|
|
57
58
|
"ThetaModel",
|
|
59
|
+
"TiDEModel",
|
|
58
60
|
"WaveNetModel",
|
|
59
61
|
"ZeroModel",
|
|
60
62
|
]
|
|
@@ -86,6 +86,28 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
|
86
86
|
self._residuals_std_per_item: Optional[pd.Series] = None
|
|
87
87
|
self._avg_residuals_std: Optional[float] = None
|
|
88
88
|
self._train_target_median: Optional[float] = None
|
|
89
|
+
self._non_boolean_real_covariates: List[str] = []
|
|
90
|
+
|
|
91
|
+
@property
|
|
92
|
+
def tabular_predictor_path(self) -> str:
|
|
93
|
+
return os.path.join(self.path, "tabular_predictor")
|
|
94
|
+
|
|
95
|
+
def save(self, path: str = None, verbose: bool = True) -> str:
|
|
96
|
+
assert "mean" in self._mlf.models_, "TabularPredictor must be trained before saving"
|
|
97
|
+
tabular_predictor = self._mlf.models_["mean"].predictor
|
|
98
|
+
self._mlf.models_["mean"].predictor = None
|
|
99
|
+
save_path = super().save(path=path, verbose=verbose)
|
|
100
|
+
self._mlf.models_["mean"].predictor = tabular_predictor
|
|
101
|
+
return save_path
|
|
102
|
+
|
|
103
|
+
@classmethod
|
|
104
|
+
def load(
|
|
105
|
+
cls, path: str, reset_paths: bool = True, load_oof: bool = False, verbose: bool = True
|
|
106
|
+
) -> "AbstractTimeSeriesModel":
|
|
107
|
+
model = super().load(path=path, reset_paths=reset_paths, load_oof=load_oof, verbose=verbose)
|
|
108
|
+
assert "mean" in model._mlf.models_, "Loaded model doesn't have a trained TabularPredictor"
|
|
109
|
+
model._mlf.models_["mean"].predictor = TabularPredictor.load(model.tabular_predictor_path)
|
|
110
|
+
return model
|
|
89
111
|
|
|
90
112
|
def preprocess(self, data: TimeSeriesDataFrame, is_train: bool = False, **kwargs) -> Any:
|
|
91
113
|
if is_train:
|
|
@@ -252,10 +274,9 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
|
252
274
|
if static_features is not None:
|
|
253
275
|
df = pd.merge(df, static_features, how="left", on=ITEMID, suffixes=(None, "_static_feat"))
|
|
254
276
|
|
|
255
|
-
for col in self.
|
|
277
|
+
for col in self._non_boolean_real_covariates:
|
|
256
278
|
# Normalize non-boolean features using mean_abs scaling
|
|
257
|
-
|
|
258
|
-
df[f"__scaled_{col}"] = df[col] / df[col].abs().groupby(df[ITEMID]).mean().reindex(df[ITEMID]).values
|
|
279
|
+
df[f"__scaled_{col}"] = df[col] / df[col].abs().groupby(df[ITEMID]).mean().reindex(df[ITEMID]).values
|
|
259
280
|
|
|
260
281
|
# Convert float64 to float32 to reduce memory usage
|
|
261
282
|
float64_cols = list(df.select_dtypes(include="float64"))
|
|
@@ -277,6 +298,9 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
|
277
298
|
self._check_fit_params()
|
|
278
299
|
fit_start_time = time.time()
|
|
279
300
|
self._train_target_median = train_data[self.target].median()
|
|
301
|
+
for col in self.metadata.known_covariates_real:
|
|
302
|
+
if not train_data[col].isin([0, 1]).all():
|
|
303
|
+
self._non_boolean_real_covariates.append(col)
|
|
280
304
|
# TabularEstimator is passed to MLForecast later to include tuning_data
|
|
281
305
|
model_params = self._get_model_params()
|
|
282
306
|
|
|
@@ -292,7 +316,7 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
|
292
316
|
|
|
293
317
|
estimator = TabularEstimator(
|
|
294
318
|
predictor_init_kwargs={
|
|
295
|
-
"path":
|
|
319
|
+
"path": self.tabular_predictor_path,
|
|
296
320
|
"verbosity": verbosity - 2,
|
|
297
321
|
"label": MLF_TARGET,
|
|
298
322
|
**self._get_extra_tabular_init_kwargs(),
|
|
@@ -349,7 +373,12 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
|
349
373
|
"Fallback model SeasonalNaive is used for these time series."
|
|
350
374
|
)
|
|
351
375
|
data_short = data.query("item_id in @short_series")
|
|
352
|
-
seasonal_naive = SeasonalNaiveModel(
|
|
376
|
+
seasonal_naive = SeasonalNaiveModel(
|
|
377
|
+
freq=self.freq,
|
|
378
|
+
prediction_length=self.prediction_length,
|
|
379
|
+
target=self.target,
|
|
380
|
+
quantile_levels=self.quantile_levels,
|
|
381
|
+
)
|
|
353
382
|
seasonal_naive.fit(train_data=data_short)
|
|
354
383
|
forecast_for_short_series = seasonal_naive.predict(data_short)
|
|
355
384
|
|
|
@@ -16,27 +16,27 @@ logger = logging.getLogger(__name__)
|
|
|
16
16
|
|
|
17
17
|
# allowed HuggingFace model paths with custom parameter definitions
|
|
18
18
|
MODEL_CONFIGS = {
|
|
19
|
-
"
|
|
19
|
+
"chronos-t5-tiny": {
|
|
20
20
|
"num_gpus": 0, # minimum number of required GPUs
|
|
21
21
|
"default_torch_dtype": "auto",
|
|
22
22
|
"default_batch_size": 16,
|
|
23
23
|
},
|
|
24
|
-
"
|
|
24
|
+
"chronos-t5-mini": {
|
|
25
25
|
"num_gpus": 0,
|
|
26
26
|
"default_torch_dtype": "auto",
|
|
27
27
|
"default_batch_size": 16,
|
|
28
28
|
},
|
|
29
|
-
"
|
|
29
|
+
"chronos-t5-small": {
|
|
30
30
|
"num_gpus": 1,
|
|
31
31
|
"default_torch_dtype": "bfloat16",
|
|
32
32
|
"default_batch_size": 16,
|
|
33
33
|
},
|
|
34
|
-
"
|
|
34
|
+
"chronos-t5-base": {
|
|
35
35
|
"num_gpus": 1,
|
|
36
36
|
"default_torch_dtype": "bfloat16",
|
|
37
37
|
"default_batch_size": 16,
|
|
38
38
|
},
|
|
39
|
-
"
|
|
39
|
+
"chronos-t5-large": {
|
|
40
40
|
"num_gpus": 1,
|
|
41
41
|
"default_torch_dtype": "bfloat16",
|
|
42
42
|
"default_batch_size": 8,
|
|
@@ -45,11 +45,11 @@ MODEL_CONFIGS = {
|
|
|
45
45
|
|
|
46
46
|
|
|
47
47
|
MODEL_ALIASES = {
|
|
48
|
-
"tiny": "
|
|
49
|
-
"mini": "
|
|
50
|
-
"small": "
|
|
51
|
-
"base": "
|
|
52
|
-
"large": "
|
|
48
|
+
"tiny": "autogluon/chronos-t5-tiny",
|
|
49
|
+
"mini": "autogluon/chronos-t5-mini",
|
|
50
|
+
"small": "autogluon/chronos-t5-small",
|
|
51
|
+
"base": "autogluon/chronos-t5-base",
|
|
52
|
+
"large": "autogluon/chronos-t5-large",
|
|
53
53
|
}
|
|
54
54
|
|
|
55
55
|
|
|
@@ -75,10 +75,10 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
|
75
75
|
|
|
76
76
|
Other Parameters
|
|
77
77
|
----------------
|
|
78
|
-
model_path: str, default = "
|
|
78
|
+
model_path: str, default = "autogluon/chronos-t5-small"
|
|
79
79
|
Model path used for the model, i.e., a HuggingFace transformers ``name_or_path``. Can be a
|
|
80
80
|
compatible model name on HuggingFace Hub or a local path to a model directory. Original
|
|
81
|
-
Chronos models (i.e., ``
|
|
81
|
+
Chronos models (i.e., ``autogluon/chronos-t5-{model_size}``) can be specified with aliases
|
|
82
82
|
``tiny``, ``mini`` , ``small``, ``base``, and ``large``.
|
|
83
83
|
batch_size : int, default = 16
|
|
84
84
|
Size of batches used during inference
|
|
@@ -106,7 +106,7 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
|
106
106
|
|
|
107
107
|
# default number of samples for prediction
|
|
108
108
|
default_num_samples: int = 20
|
|
109
|
-
default_model_path = "
|
|
109
|
+
default_model_path = "autogluon/chronos-t5-small"
|
|
110
110
|
maximum_context_length = 512
|
|
111
111
|
|
|
112
112
|
def __init__(
|
|
@@ -185,9 +185,10 @@ class ChronosModel(AbstractTimeSeriesModel):
|
|
|
185
185
|
@property
|
|
186
186
|
def ag_default_config(self) -> Dict[str, Any]:
|
|
187
187
|
"""The default configuration of the model used by AutoGluon if the model is one of those
|
|
188
|
-
defined in MODEL_CONFIGS. For now, these are ``
|
|
188
|
+
defined in MODEL_CONFIGS. For now, these are ``autogluon/chronos-t5-*`` family of models.
|
|
189
189
|
"""
|
|
190
|
-
|
|
190
|
+
model_name = str(self.model_path).split("/")[-1]
|
|
191
|
+
return MODEL_CONFIGS.get(model_name, {})
|
|
191
192
|
|
|
192
193
|
@property
|
|
193
194
|
def min_num_gpus(self) -> int:
|
|
@@ -163,7 +163,7 @@ class MeanScaleUniformBins(ChronosTokenizer):
|
|
|
163
163
|
def output_transform(self, samples: torch.Tensor, scale: torch.Tensor) -> torch.Tensor:
|
|
164
164
|
scale_unsqueezed = scale.unsqueeze(-1).unsqueeze(-1)
|
|
165
165
|
indices = torch.clamp(
|
|
166
|
-
samples - self.config.n_special_tokens,
|
|
166
|
+
samples - self.config.n_special_tokens - 1,
|
|
167
167
|
min=0,
|
|
168
168
|
max=len(self.centers) - 1,
|
|
169
169
|
)
|
|
@@ -4,6 +4,7 @@ from .torch.models import (
|
|
|
4
4
|
PatchTSTModel,
|
|
5
5
|
SimpleFeedForwardModel,
|
|
6
6
|
TemporalFusionTransformerModel,
|
|
7
|
+
TiDEModel,
|
|
7
8
|
WaveNetModel,
|
|
8
9
|
)
|
|
9
10
|
|
|
@@ -13,5 +14,6 @@ __all__ = [
|
|
|
13
14
|
"PatchTSTModel",
|
|
14
15
|
"SimpleFeedForwardModel",
|
|
15
16
|
"TemporalFusionTransformerModel",
|
|
17
|
+
"TiDEModel",
|
|
16
18
|
"WaveNetModel",
|
|
17
19
|
]
|
|
@@ -424,3 +424,94 @@ class WaveNetModel(AbstractGluonTSModel):
|
|
|
424
424
|
init_kwargs.setdefault("time_features", get_time_features_for_frequency(self.freq))
|
|
425
425
|
init_kwargs.setdefault("num_parallel_samples", self.default_num_samples)
|
|
426
426
|
return init_kwargs
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
class TiDEModel(AbstractGluonTSModel):
|
|
430
|
+
"""Time series dense encoder model from [Das2023]_.
|
|
431
|
+
|
|
432
|
+
Based on `gluonts.torch.model.tide.TiDEEstimator <https://ts.gluon.ai/stable/api/gluonts/gluonts.torch.model.tide.html>`_.
|
|
433
|
+
See GluonTS documentation for additional hyperparameters.
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
References
|
|
437
|
+
----------
|
|
438
|
+
.. [Das2023] Das, Abhimanyu, et al.
|
|
439
|
+
"Long-term Forecasting with TiDE: Time-series Dense Encoder."
|
|
440
|
+
Transactions of Machine Learning Research. 2023.
|
|
441
|
+
|
|
442
|
+
Other Parameters
|
|
443
|
+
----------------
|
|
444
|
+
context_length : int, default = max(64, 2 * prediction_length)
|
|
445
|
+
Number of past values used for prediction.
|
|
446
|
+
disable_static_features : bool, default = False
|
|
447
|
+
If True, static features won't be used by the model even if they are present in the dataset.
|
|
448
|
+
If False, static features will be used by the model if they are present in the dataset.
|
|
449
|
+
disable_known_covariates : bool, default = False
|
|
450
|
+
If True, known covariates won't be used by the model even if they are present in the dataset.
|
|
451
|
+
If False, known covariates will be used by the model if they are present in the dataset.
|
|
452
|
+
disable_past_covariates : bool, default = False
|
|
453
|
+
If True, past covariates won't be used by the model even if they are present in the dataset.
|
|
454
|
+
If False, past covariates will be used by the model if they are present in the dataset.
|
|
455
|
+
feat_proj_hidden_dim : int, default = 4
|
|
456
|
+
Size of the feature projection layer.
|
|
457
|
+
encoder_hidden_dim : int, default = 4
|
|
458
|
+
Size of the dense encoder layer.
|
|
459
|
+
decoder_hidden_dim : int, default = 4
|
|
460
|
+
Size of the dense decoder layer.
|
|
461
|
+
temporal_hidden_dim : int, default = 4
|
|
462
|
+
Size of the temporal decoder layer.
|
|
463
|
+
distr_hidden_dim : int, default = 4
|
|
464
|
+
Size of the distribution projection layer.
|
|
465
|
+
num_layers_encoder : int, default = 1
|
|
466
|
+
Number of layers in dense encoder.
|
|
467
|
+
num_layers_decoder : int, default = 1
|
|
468
|
+
Number of layers in dense decoder.
|
|
469
|
+
decoder_output_dim : int, default = 4
|
|
470
|
+
Output size of the dense decoder.
|
|
471
|
+
dropout_rate : float, default = 0.3
|
|
472
|
+
Dropout regularization parameter.
|
|
473
|
+
num_feat_dynamic_proj : int, default = 2
|
|
474
|
+
Output size of feature projection layer.
|
|
475
|
+
embedding_dimension : int, default = [16] * num_feat_static_cat
|
|
476
|
+
Dimension of the embeddings for categorical features
|
|
477
|
+
layer_norm : bool, default = False
|
|
478
|
+
Should layer normalization be enabled?
|
|
479
|
+
scaling : {"mean", "std", None}, default = "mean"
|
|
480
|
+
Scaling applied to the inputs. One of ``"mean"`` (mean absolute scaling), ``"std"`` (standardization), ``None`` (no scaling).
|
|
481
|
+
max_epochs : int, default = 100
|
|
482
|
+
Number of epochs the model will be trained for
|
|
483
|
+
batch_size : int, default = 64
|
|
484
|
+
Size of batches used during training
|
|
485
|
+
predict_batch_size : int, default = 500
|
|
486
|
+
Size of batches used during prediction.
|
|
487
|
+
num_batches_per_epoch : int, default = 50
|
|
488
|
+
Number of batches processed every epoch
|
|
489
|
+
lr : float, default = 1e-3,
|
|
490
|
+
Learning rate used during training
|
|
491
|
+
trainer_kwargs : dict, optional
|
|
492
|
+
Optional keyword arguments passed to ``lightning.Trainer``.
|
|
493
|
+
early_stopping_patience : int or None, default = 20
|
|
494
|
+
Early stop training if the validation loss doesn't improve for this many epochs.
|
|
495
|
+
keep_lightning_logs : bool, default = False
|
|
496
|
+
If True, ``lightning_logs`` directory will NOT be removed after the model finished training.
|
|
497
|
+
"""
|
|
498
|
+
|
|
499
|
+
supports_known_covariates = True
|
|
500
|
+
supports_static_features = True
|
|
501
|
+
|
|
502
|
+
@property
|
|
503
|
+
def default_context_length(self) -> int:
|
|
504
|
+
return min(512, max(64, 2 * self.prediction_length))
|
|
505
|
+
|
|
506
|
+
def _get_estimator_class(self) -> Type[GluonTSEstimator]:
|
|
507
|
+
from gluonts.torch.model.tide import TiDEEstimator
|
|
508
|
+
|
|
509
|
+
return TiDEEstimator
|
|
510
|
+
|
|
511
|
+
def _get_estimator_init_args(self) -> Dict[str, Any]:
|
|
512
|
+
init_kwargs = super()._get_estimator_init_args()
|
|
513
|
+
init_kwargs["num_feat_static_cat"] = self.num_feat_static_cat
|
|
514
|
+
init_kwargs["num_feat_static_real"] = self.num_feat_static_real
|
|
515
|
+
init_kwargs["cardinality"] = self.feat_static_cat_cardinality
|
|
516
|
+
init_kwargs["num_feat_dynamic_real"] = self.num_feat_dynamic_real
|
|
517
|
+
return init_kwargs
|
|
@@ -31,6 +31,7 @@ from . import (
|
|
|
31
31
|
SimpleFeedForwardModel,
|
|
32
32
|
TemporalFusionTransformerModel,
|
|
33
33
|
ThetaModel,
|
|
34
|
+
TiDEModel,
|
|
34
35
|
WaveNetModel,
|
|
35
36
|
ZeroModel,
|
|
36
37
|
)
|
|
@@ -51,6 +52,7 @@ MODEL_TYPES = dict(
|
|
|
51
52
|
DLinear=DLinearModel,
|
|
52
53
|
PatchTST=PatchTSTModel,
|
|
53
54
|
TemporalFusionTransformer=TemporalFusionTransformerModel,
|
|
55
|
+
TiDE=TiDEModel,
|
|
54
56
|
WaveNet=WaveNetModel,
|
|
55
57
|
RecursiveTabular=RecursiveTabularModel,
|
|
56
58
|
DirectTabular=DirectTabularModel,
|
|
@@ -93,6 +95,7 @@ DEFAULT_MODEL_PRIORITY = dict(
|
|
|
93
95
|
# Models that can early stop are trained at the end
|
|
94
96
|
TemporalFusionTransformer=45,
|
|
95
97
|
DeepAR=40,
|
|
98
|
+
TiDE=30,
|
|
96
99
|
PatchTST=30,
|
|
97
100
|
# Models below are not included in any presets
|
|
98
101
|
WaveNet=25,
|
|
@@ -146,7 +146,7 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
|
|
|
146
146
|
"""
|
|
147
147
|
|
|
148
148
|
predictor_file_name = "predictor.pkl"
|
|
149
|
-
_predictor_version_file_name = "
|
|
149
|
+
_predictor_version_file_name = "version.txt"
|
|
150
150
|
_predictor_log_file_name = "predictor_log.txt"
|
|
151
151
|
|
|
152
152
|
def __init__(
|
|
@@ -190,7 +190,7 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
|
|
|
190
190
|
raise ValueError(f"Target column {self.target} cannot be one of the known covariates.")
|
|
191
191
|
self.known_covariates_names = list(known_covariates_names)
|
|
192
192
|
|
|
193
|
-
self.prediction_length = prediction_length
|
|
193
|
+
self.prediction_length = int(prediction_length)
|
|
194
194
|
# For each validation fold, all time series in training set must have length >= _min_train_length
|
|
195
195
|
self._min_train_length = max(self.prediction_length + 1, 5)
|
|
196
196
|
self.freq = freq
|
|
@@ -580,7 +580,7 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
|
|
|
580
580
|
|
|
581
581
|
* "num_trials": How many HPO trials to run
|
|
582
582
|
* "scheduler": Which scheduler to use. Valid values:
|
|
583
|
-
* "local": Local
|
|
583
|
+
* "local": Local scheduler that schedules trials FIFO
|
|
584
584
|
* "searcher": Which searching algorithm to use. Valid values:
|
|
585
585
|
* "local_random": Uses the "random" searcher
|
|
586
586
|
* "random": Perform random search
|
|
@@ -1041,14 +1041,40 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
|
|
|
1041
1041
|
|
|
1042
1042
|
@classmethod
|
|
1043
1043
|
def _load_version_file(cls, path: str) -> str:
|
|
1044
|
+
"""
|
|
1045
|
+
Loads the version file that is part of the saved predictor artifact.
|
|
1046
|
+
|
|
1047
|
+
Parameters
|
|
1048
|
+
----------
|
|
1049
|
+
path: str
|
|
1050
|
+
The path that would be used to load the predictor via `predictor.load(path)`
|
|
1051
|
+
|
|
1052
|
+
Returns
|
|
1053
|
+
-------
|
|
1054
|
+
The version of AutoGluon used to fit the predictor, as a string.
|
|
1055
|
+
|
|
1056
|
+
"""
|
|
1044
1057
|
version_file_path = os.path.join(path, cls._predictor_version_file_name)
|
|
1045
|
-
|
|
1058
|
+
try:
|
|
1059
|
+
version = load_str.load(path=version_file_path)
|
|
1060
|
+
except:
|
|
1061
|
+
# Loads the old version file used in `autogluon.timeseries<=1.1.0`, named `__version__`.
|
|
1062
|
+
# This file name was changed because Kaggle does not allow uploading files named `__version__`.
|
|
1063
|
+
version_file_path = os.path.join(path, "__version__")
|
|
1064
|
+
version = load_str.load(path=version_file_path)
|
|
1046
1065
|
return version
|
|
1047
1066
|
|
|
1048
1067
|
@classmethod
|
|
1049
1068
|
def load(cls, path: Union[str, Path], require_version_match: bool = True) -> "TimeSeriesPredictor":
|
|
1050
1069
|
"""Load an existing ``TimeSeriesPredictor`` from given ``path``.
|
|
1051
1070
|
|
|
1071
|
+
.. warning::
|
|
1072
|
+
|
|
1073
|
+
:meth:`autogluon.timeseries.TimeSeriesPredictor.load` uses `pickle` module implicitly, which is known to
|
|
1074
|
+
be insecure. It is possible to construct malicious pickle data which will execute arbitrary code during
|
|
1075
|
+
unpickling. Never load data that could have come from an untrusted source, or that could have been tampered
|
|
1076
|
+
with. **Only load data you trust.**
|
|
1077
|
+
|
|
1052
1078
|
Parameters
|
|
1053
1079
|
----------
|
|
1054
1080
|
path : str or pathlib.Path
|
|
@@ -1077,7 +1103,7 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
|
|
|
1077
1103
|
except:
|
|
1078
1104
|
logger.warning(
|
|
1079
1105
|
f'WARNING: Could not find version file at "{os.path.join(path, cls._predictor_version_file_name)}".\n'
|
|
1080
|
-
f"This means that the predictor was fit in
|
|
1106
|
+
f"This means that the predictor was fit in an AutoGluon version `<=0.7.0`."
|
|
1081
1107
|
)
|
|
1082
1108
|
version_saved = "Unknown (Likely <=0.7.0)"
|
|
1083
1109
|
|
|
@@ -292,6 +292,10 @@ class AbstractTimeSeriesTrainer(SimpleAbstractTrainer):
|
|
|
292
292
|
self.cache_predictions = cache_predictions
|
|
293
293
|
self.hpo_results = {}
|
|
294
294
|
|
|
295
|
+
if self._cached_predictions_path.exists():
|
|
296
|
+
logger.debug(f"Removing existing cached predictions file {self._cached_predictions_path}")
|
|
297
|
+
self._cached_predictions_path.unlink()
|
|
298
|
+
|
|
295
299
|
def save_train_data(self, data: TimeSeriesDataFrame, verbose: bool = True) -> None:
|
|
296
300
|
path = os.path.join(self.path_data, "train.pkl")
|
|
297
301
|
save_pkl.save(path=path, object=data, verbose=verbose)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: autogluon.timeseries
|
|
3
|
-
Version: 1.1.
|
|
3
|
+
Version: 1.1.1
|
|
4
4
|
Summary: Fast and Accurate ML in 3 Lines of Code
|
|
5
5
|
Home-page: https://github.com/autogluon/autogluon
|
|
6
6
|
Author: AutoGluon Community
|
|
@@ -107,7 +107,7 @@ Description:
|
|
|
107
107
|
This library is licensed under the Apache 2.0 License.
|
|
108
108
|
|
|
109
109
|
Platform: UNKNOWN
|
|
110
|
-
Classifier: Development Status ::
|
|
110
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
111
111
|
Classifier: Intended Audience :: Education
|
|
112
112
|
Classifier: Intended Audience :: Developers
|
|
113
113
|
Classifier: Intended Audience :: Science/Research
|
|
@@ -2,12 +2,12 @@ joblib<2,>=1.1
|
|
|
2
2
|
numpy<1.29,>=1.21
|
|
3
3
|
scipy<1.13,>=1.5.4
|
|
4
4
|
pandas<2.3.0,>=2.0.0
|
|
5
|
-
torch<2.
|
|
6
|
-
lightning<2.
|
|
7
|
-
pytorch_lightning<2.
|
|
8
|
-
transformers[sentencepiece]<4.
|
|
5
|
+
torch<2.4,>=2.2
|
|
6
|
+
lightning<2.4,>=2.2
|
|
7
|
+
pytorch_lightning<2.4,>=2.2
|
|
8
|
+
transformers[sentencepiece]<4.41.0,>=4.38.0
|
|
9
9
|
accelerate<0.22.0,>=0.21.0
|
|
10
|
-
gluonts
|
|
10
|
+
gluonts==0.15.1
|
|
11
11
|
networkx<4,>=3.0
|
|
12
12
|
statsforecast<1.5,>=1.4.0
|
|
13
13
|
mlforecast<0.10.1,>=0.10.0
|
|
@@ -15,9 +15,9 @@ utilsforecast<0.0.11,>=0.0.10
|
|
|
15
15
|
tqdm<5,>=4.38
|
|
16
16
|
orjson~=3.9
|
|
17
17
|
tensorboard<3,>=2.9
|
|
18
|
-
autogluon.core[raytune]==1.1.
|
|
19
|
-
autogluon.common==1.1.
|
|
20
|
-
autogluon.tabular[catboost,lightgbm,xgboost]==1.1.
|
|
18
|
+
autogluon.core[raytune]==1.1.1
|
|
19
|
+
autogluon.common==1.1.1
|
|
20
|
+
autogluon.tabular[catboost,lightgbm,xgboost]==1.1.1
|
|
21
21
|
|
|
22
22
|
[all]
|
|
23
23
|
optimum[onnxruntime]<1.19,>=1.17
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|