autogluon.timeseries 1.4.1b20250820__py3-none-any.whl → 1.4.1b20250902__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autogluon/timeseries/configs/__init__.py +3 -2
- autogluon/timeseries/configs/hyperparameter_presets.py +62 -0
- autogluon/timeseries/configs/predictor_presets.py +84 -0
- autogluon/timeseries/dataset/ts_dataframe.py +9 -9
- autogluon/timeseries/learner.py +14 -14
- autogluon/timeseries/metrics/__init__.py +5 -5
- autogluon/timeseries/metrics/abstract.py +11 -12
- autogluon/timeseries/models/__init__.py +2 -0
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py +39 -41
- autogluon/timeseries/models/abstract/tunable.py +6 -6
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py +30 -30
- autogluon/timeseries/models/autogluon_tabular/per_step.py +12 -12
- autogluon/timeseries/models/chronos/model.py +10 -10
- autogluon/timeseries/models/chronos/pipeline/base.py +8 -8
- autogluon/timeseries/models/chronos/pipeline/chronos.py +12 -12
- autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py +12 -12
- autogluon/timeseries/models/chronos/pipeline/utils.py +12 -12
- autogluon/timeseries/models/ensemble/abstract.py +19 -19
- autogluon/timeseries/models/ensemble/basic.py +8 -8
- autogluon/timeseries/models/ensemble/greedy.py +13 -13
- autogluon/timeseries/models/gluonts/abstract.py +24 -24
- autogluon/timeseries/models/gluonts/dataset.py +2 -2
- autogluon/timeseries/models/gluonts/models.py +7 -7
- autogluon/timeseries/models/local/abstract_local_model.py +12 -12
- autogluon/timeseries/models/local/statsforecast.py +11 -11
- autogluon/timeseries/models/multi_window/multi_window_model.py +33 -22
- autogluon/timeseries/models/registry.py +3 -3
- autogluon/timeseries/predictor.py +37 -37
- autogluon/timeseries/regressor.py +13 -13
- autogluon/timeseries/splitter.py +6 -6
- autogluon/timeseries/trainer/__init__.py +3 -0
- autogluon/timeseries/trainer/model_set_builder.py +256 -0
- autogluon/timeseries/trainer/prediction_cache.py +149 -0
- autogluon/timeseries/{trainer.py → trainer/trainer.py} +72 -128
- autogluon/timeseries/transforms/covariate_scaler.py +3 -3
- autogluon/timeseries/transforms/target_scaler.py +7 -7
- autogluon/timeseries/utils/datetime/lags.py +2 -2
- autogluon/timeseries/utils/datetime/time_features.py +2 -2
- autogluon/timeseries/utils/features.py +32 -32
- autogluon/timeseries/version.py +1 -1
- {autogluon.timeseries-1.4.1b20250820.dist-info → autogluon.timeseries-1.4.1b20250902.dist-info}/METADATA +5 -5
- autogluon.timeseries-1.4.1b20250902.dist-info/RECORD +75 -0
- autogluon/timeseries/configs/presets_configs.py +0 -79
- autogluon/timeseries/models/presets.py +0 -280
- autogluon.timeseries-1.4.1b20250820.dist-info/RECORD +0 -72
- /autogluon.timeseries-1.4.1b20250820-py3.9-nspkg.pth → /autogluon.timeseries-1.4.1b20250902-py3.9-nspkg.pth +0 -0
- {autogluon.timeseries-1.4.1b20250820.dist-info → autogluon.timeseries-1.4.1b20250902.dist-info}/LICENSE +0 -0
- {autogluon.timeseries-1.4.1b20250820.dist-info → autogluon.timeseries-1.4.1b20250902.dist-info}/NOTICE +0 -0
- {autogluon.timeseries-1.4.1b20250820.dist-info → autogluon.timeseries-1.4.1b20250902.dist-info}/WHEEL +0 -0
- {autogluon.timeseries-1.4.1b20250820.dist-info → autogluon.timeseries-1.4.1b20250902.dist-info}/namespace_packages.txt +0 -0
- {autogluon.timeseries-1.4.1b20250820.dist-info → autogluon.timeseries-1.4.1b20250902.dist-info}/top_level.txt +0 -0
- {autogluon.timeseries-1.4.1b20250820.dist-info → autogluon.timeseries-1.4.1b20250902.dist-info}/zip-safe +0 -0
@@ -1,4 +1,4 @@
|
|
1
|
-
from typing import Literal, Optional, Protocol,
|
1
|
+
from typing import Literal, Optional, Protocol, Union, overload
|
2
2
|
|
3
3
|
import numpy as np
|
4
4
|
import pandas as pd
|
@@ -30,7 +30,7 @@ class LocalTargetScaler(TargetScaler):
|
|
30
30
|
self.loc: Optional[pd.Series] = None
|
31
31
|
self.scale: Optional[pd.Series] = None
|
32
32
|
|
33
|
-
def _compute_loc_scale(self, target_series: pd.Series) ->
|
33
|
+
def _compute_loc_scale(self, target_series: pd.Series) -> tuple[Optional[pd.Series], Optional[pd.Series]]:
|
34
34
|
raise NotImplementedError
|
35
35
|
|
36
36
|
def fit_transform(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
|
@@ -45,7 +45,7 @@ class LocalTargetScaler(TargetScaler):
|
|
45
45
|
self.scale = self.scale.clip(lower=self.min_scale).replace([np.inf, -np.inf], np.nan).fillna(1.0)
|
46
46
|
return self
|
47
47
|
|
48
|
-
def _reindex_loc_scale(self, item_index: pd.Index) ->
|
48
|
+
def _reindex_loc_scale(self, item_index: pd.Index) -> tuple[Union[np.ndarray, float], Union[np.ndarray, float]]:
|
49
49
|
"""Reindex loc and scale parameters for the given item_ids and convert them to an array-like."""
|
50
50
|
if self.loc is not None:
|
51
51
|
loc = self.loc.reindex(item_index).to_numpy()
|
@@ -74,7 +74,7 @@ class LocalStandardScaler(LocalTargetScaler):
|
|
74
74
|
The resulting affine transformation is (x - loc) / scale, where scale = std(x), loc = mean(x).
|
75
75
|
"""
|
76
76
|
|
77
|
-
def _compute_loc_scale(self, target_series: pd.Series) ->
|
77
|
+
def _compute_loc_scale(self, target_series: pd.Series) -> tuple[pd.Series, pd.Series]:
|
78
78
|
stats = target_series.groupby(level=ITEMID, sort=False).agg(["mean", "std"])
|
79
79
|
return stats["mean"], stats["std"]
|
80
80
|
|
@@ -82,7 +82,7 @@ class LocalStandardScaler(LocalTargetScaler):
|
|
82
82
|
class LocalMeanAbsScaler(LocalTargetScaler):
|
83
83
|
"""Applies mean absolute scaling to each time series in the dataset."""
|
84
84
|
|
85
|
-
def _compute_loc_scale(self, target_series: pd.Series) ->
|
85
|
+
def _compute_loc_scale(self, target_series: pd.Series) -> tuple[Optional[pd.Series], pd.Series]:
|
86
86
|
scale = target_series.abs().groupby(level=ITEMID, sort=False).agg("mean")
|
87
87
|
return None, scale
|
88
88
|
|
@@ -93,7 +93,7 @@ class LocalMinMaxScaler(LocalTargetScaler):
|
|
93
93
|
The resulting affine transformation is (x - loc) / scale, where scale = max(x) - min(x), loc = min(x) / scale.
|
94
94
|
"""
|
95
95
|
|
96
|
-
def _compute_loc_scale(self, target_series: pd.Series) ->
|
96
|
+
def _compute_loc_scale(self, target_series: pd.Series) -> tuple[pd.Series, pd.Series]:
|
97
97
|
stats = target_series.abs().groupby(level=ITEMID, sort=False).agg(["min", "max"])
|
98
98
|
scale = (stats["max"] - stats["min"]).clip(lower=self.min_scale)
|
99
99
|
loc = stats["min"]
|
@@ -117,7 +117,7 @@ class LocalRobustScaler(LocalTargetScaler):
|
|
117
117
|
self.q_max = 0.75
|
118
118
|
assert 0 < self.q_min < self.q_max < 1
|
119
119
|
|
120
|
-
def _compute_loc_scale(self, target_series: pd.Series) ->
|
120
|
+
def _compute_loc_scale(self, target_series: pd.Series) -> tuple[pd.Series, pd.Series]:
|
121
121
|
grouped = target_series.groupby(level=ITEMID, sort=False)
|
122
122
|
loc = grouped.median()
|
123
123
|
lower = grouped.quantile(self.q_min)
|
@@ -2,7 +2,7 @@
|
|
2
2
|
Generate lag indices based on frequency string. Adapted from gluonts.time_feature.lag.
|
3
3
|
"""
|
4
4
|
|
5
|
-
from typing import
|
5
|
+
from typing import Optional
|
6
6
|
|
7
7
|
import numpy as np
|
8
8
|
import pandas as pd
|
@@ -72,7 +72,7 @@ def get_lags_for_frequency(
|
|
72
72
|
lag_ub: int = 1200,
|
73
73
|
num_lags: Optional[int] = None,
|
74
74
|
num_default_lags: int = 7,
|
75
|
-
) ->
|
75
|
+
) -> list[int]:
|
76
76
|
"""
|
77
77
|
Generates a list of lags that that are appropriate for the given frequency
|
78
78
|
string.
|
@@ -2,7 +2,7 @@
|
|
2
2
|
Generate time features based on frequency string. Adapted from gluonts.time_feature.time_feature.
|
3
3
|
"""
|
4
4
|
|
5
|
-
from typing import Callable
|
5
|
+
from typing import Callable
|
6
6
|
|
7
7
|
import numpy as np
|
8
8
|
import pandas as pd
|
@@ -56,7 +56,7 @@ def second_of_minute(index: pd.DatetimeIndex) -> np.ndarray:
|
|
56
56
|
return _normalize(index.second, num=60)
|
57
57
|
|
58
58
|
|
59
|
-
def get_time_features_for_frequency(freq) ->
|
59
|
+
def get_time_features_for_frequency(freq) -> list[Callable]:
|
60
60
|
features_by_offset_name = {
|
61
61
|
"YE": [],
|
62
62
|
"QE": [quarter_of_year],
|
@@ -2,7 +2,7 @@ import logging
|
|
2
2
|
import reprlib
|
3
3
|
import time
|
4
4
|
from dataclasses import asdict, dataclass, field
|
5
|
-
from typing import Any,
|
5
|
+
from typing import Any, Literal, Optional
|
6
6
|
|
7
7
|
import numpy as np
|
8
8
|
import pandas as pd
|
@@ -24,50 +24,50 @@ logger = logging.getLogger(__name__)
|
|
24
24
|
class CovariateMetadata:
|
25
25
|
"""Provides mapping from different covariate types to columns in the dataset."""
|
26
26
|
|
27
|
-
static_features_cat:
|
28
|
-
static_features_real:
|
29
|
-
known_covariates_real:
|
30
|
-
known_covariates_cat:
|
31
|
-
past_covariates_real:
|
32
|
-
past_covariates_cat:
|
27
|
+
static_features_cat: list[str] = field(default_factory=list)
|
28
|
+
static_features_real: list[str] = field(default_factory=list)
|
29
|
+
known_covariates_real: list[str] = field(default_factory=list)
|
30
|
+
known_covariates_cat: list[str] = field(default_factory=list)
|
31
|
+
past_covariates_real: list[str] = field(default_factory=list)
|
32
|
+
past_covariates_cat: list[str] = field(default_factory=list)
|
33
33
|
|
34
34
|
@property
|
35
|
-
def static_features(self) ->
|
35
|
+
def static_features(self) -> list[str]:
|
36
36
|
return self.static_features_cat + self.static_features_real
|
37
37
|
|
38
38
|
@property
|
39
|
-
def known_covariates(self) ->
|
39
|
+
def known_covariates(self) -> list[str]:
|
40
40
|
return self.known_covariates_cat + self.known_covariates_real
|
41
41
|
|
42
42
|
@property
|
43
|
-
def past_covariates(self) ->
|
43
|
+
def past_covariates(self) -> list[str]:
|
44
44
|
return self.past_covariates_cat + self.past_covariates_real
|
45
45
|
|
46
46
|
@property
|
47
|
-
def covariates(self) ->
|
47
|
+
def covariates(self) -> list[str]:
|
48
48
|
return self.known_covariates + self.past_covariates
|
49
49
|
|
50
50
|
@property
|
51
|
-
def covariates_real(self) ->
|
51
|
+
def covariates_real(self) -> list[str]:
|
52
52
|
return self.known_covariates_real + self.past_covariates_real
|
53
53
|
|
54
54
|
@property
|
55
|
-
def covariates_cat(self) ->
|
55
|
+
def covariates_cat(self) -> list[str]:
|
56
56
|
return self.known_covariates_cat + self.past_covariates_cat
|
57
57
|
|
58
58
|
@property
|
59
|
-
def real_features(self) ->
|
59
|
+
def real_features(self) -> list[str]:
|
60
60
|
return self.static_features_real + self.covariates_real
|
61
61
|
|
62
62
|
@property
|
63
|
-
def cat_features(self) ->
|
63
|
+
def cat_features(self) -> list[str]:
|
64
64
|
return self.static_features_cat + self.covariates_cat
|
65
65
|
|
66
66
|
@property
|
67
|
-
def all_features(self) ->
|
67
|
+
def all_features(self) -> list[str]:
|
68
68
|
return self.static_features + self.covariates
|
69
69
|
|
70
|
-
def to_dict(self) ->
|
70
|
+
def to_dict(self) -> dict[str, Any]:
|
71
71
|
return asdict(self)
|
72
72
|
|
73
73
|
|
@@ -120,13 +120,13 @@ class TimeSeriesFeatureGenerator:
|
|
120
120
|
|
121
121
|
Parameters
|
122
122
|
----------
|
123
|
-
target
|
123
|
+
target
|
124
124
|
Name of the target column.
|
125
|
-
known_covariates_names
|
125
|
+
known_covariates_names
|
126
126
|
Columns that contain covariates that are known into the future.
|
127
|
-
float_dtype
|
127
|
+
float_dtype
|
128
128
|
Numpy float dtype to which all numeric columns (float, int, bool) will be converted both in static & dynamic dfs.
|
129
|
-
num_samples
|
129
|
+
num_samples
|
130
130
|
Number of rows sampled from the training dataset to speed up computation of the median (used later for imputation).
|
131
131
|
If set to `None`, median will be computed using all rows.
|
132
132
|
"""
|
@@ -134,7 +134,7 @@ class TimeSeriesFeatureGenerator:
|
|
134
134
|
def __init__(
|
135
135
|
self,
|
136
136
|
target: str,
|
137
|
-
known_covariates_names:
|
137
|
+
known_covariates_names: list[str],
|
138
138
|
float_dtype: str = "float32",
|
139
139
|
num_samples: Optional[int] = 20_000,
|
140
140
|
):
|
@@ -143,8 +143,8 @@ class TimeSeriesFeatureGenerator:
|
|
143
143
|
self.num_samples = num_samples
|
144
144
|
|
145
145
|
self._is_fit = False
|
146
|
-
self.known_covariates_names:
|
147
|
-
self.past_covariates_names:
|
146
|
+
self.known_covariates_names: list[str] = list(known_covariates_names)
|
147
|
+
self.past_covariates_names: list[str] = []
|
148
148
|
self.known_covariates_pipeline = ContinuousAndCategoricalFeatureGenerator()
|
149
149
|
self.past_covariates_pipeline = ContinuousAndCategoricalFeatureGenerator()
|
150
150
|
# Cat features with cat_count=1 are fine in static_features since they are repeated for all time steps in a TS
|
@@ -154,7 +154,7 @@ class TimeSeriesFeatureGenerator:
|
|
154
154
|
self._train_static_real_median: Optional[pd.Series] = None
|
155
155
|
|
156
156
|
@property
|
157
|
-
def required_column_names(self) ->
|
157
|
+
def required_column_names(self) -> list[str]:
|
158
158
|
return [self.target] + list(self.known_covariates_names) + list(self.past_covariates_names)
|
159
159
|
|
160
160
|
@property
|
@@ -262,13 +262,13 @@ class TimeSeriesFeatureGenerator:
|
|
262
262
|
return self._impute_covariates(ts_df, column_names=self.covariate_metadata.covariates_real)
|
263
263
|
|
264
264
|
@staticmethod
|
265
|
-
def _concat_dfs(dfs_to_concat:
|
265
|
+
def _concat_dfs(dfs_to_concat: list[pd.DataFrame]) -> pd.DataFrame:
|
266
266
|
if len(dfs_to_concat) == 1:
|
267
267
|
return dfs_to_concat[0]
|
268
268
|
else:
|
269
269
|
return pd.concat(dfs_to_concat, axis=1, copy=False)
|
270
270
|
|
271
|
-
def _impute_covariates(self, ts_df: TimeSeriesDataFrame, column_names:
|
271
|
+
def _impute_covariates(self, ts_df: TimeSeriesDataFrame, column_names: list[str]) -> TimeSeriesDataFrame:
|
272
272
|
"""Impute missing values in selected columns with ffill, bfill, and median imputation."""
|
273
273
|
if len(column_names) > 0:
|
274
274
|
# ffill + bfill covariates that have at least some observed values
|
@@ -346,10 +346,10 @@ class TimeSeriesFeatureGenerator:
|
|
346
346
|
return None
|
347
347
|
|
348
348
|
@staticmethod
|
349
|
-
def _detect_and_log_column_types(transformed_df: pd.DataFrame) ->
|
349
|
+
def _detect_and_log_column_types(transformed_df: pd.DataFrame) -> tuple[list[str], list[str]]:
|
350
350
|
"""Log & return names of categorical and real-valued columns in the DataFrame."""
|
351
|
-
cat_column_names:
|
352
|
-
real_column_names:
|
351
|
+
cat_column_names: list[str] = []
|
352
|
+
real_column_names: list[str] = []
|
353
353
|
for column_name, column_dtype in transformed_df.dtypes.items():
|
354
354
|
if isinstance(column_dtype, pd.CategoricalDtype):
|
355
355
|
cat_column_names.append(str(column_name))
|
@@ -362,9 +362,9 @@ class TimeSeriesFeatureGenerator:
|
|
362
362
|
|
363
363
|
@staticmethod
|
364
364
|
def _check_required_columns_are_present(
|
365
|
-
data: TimeSeriesDataFrame, required_column_names:
|
365
|
+
data: TimeSeriesDataFrame, required_column_names: list[str], data_frame_name: str
|
366
366
|
) -> None:
|
367
|
-
missing_columns = pd.Index(required_column_names).difference(data.columns)
|
367
|
+
missing_columns = pd.Index(required_column_names).difference(data.columns) # type: ignore
|
368
368
|
if len(missing_columns) > 0:
|
369
369
|
raise ValueError(
|
370
370
|
f"{len(missing_columns)} columns are missing from {data_frame_name}: {reprlib.repr(missing_columns.to_list())}"
|
autogluon/timeseries/version.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: autogluon.timeseries
|
3
|
-
Version: 1.4.
|
3
|
+
Version: 1.4.1b20250902
|
4
4
|
Summary: Fast and Accurate ML in 3 Lines of Code
|
5
5
|
Home-page: https://github.com/autogluon/autogluon
|
6
6
|
Author: AutoGluon Community
|
@@ -55,10 +55,10 @@ Requires-Dist: fugue>=0.9.0
|
|
55
55
|
Requires-Dist: tqdm<5,>=4.38
|
56
56
|
Requires-Dist: orjson~=3.9
|
57
57
|
Requires-Dist: tensorboard<3,>=2.9
|
58
|
-
Requires-Dist: autogluon.core[raytune]==1.4.
|
59
|
-
Requires-Dist: autogluon.common==1.4.
|
60
|
-
Requires-Dist: autogluon.features==1.4.
|
61
|
-
Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.4.
|
58
|
+
Requires-Dist: autogluon.core[raytune]==1.4.1b20250902
|
59
|
+
Requires-Dist: autogluon.common==1.4.1b20250902
|
60
|
+
Requires-Dist: autogluon.features==1.4.1b20250902
|
61
|
+
Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.4.1b20250902
|
62
62
|
Provides-Extra: all
|
63
63
|
Provides-Extra: tests
|
64
64
|
Requires-Dist: pytest; extra == "tests"
|
@@ -0,0 +1,75 @@
|
|
1
|
+
autogluon.timeseries-1.4.1b20250902-py3.9-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
|
2
|
+
autogluon/timeseries/__init__.py,sha256=_CrLLc1fkjen7UzWoO0Os8WZoHOgvZbHKy46I8v_4k4,304
|
3
|
+
autogluon/timeseries/evaluator.py,sha256=l642tYfTHsl8WVIq_vV6qhgAFVFr9UuZD7gLra3A_Kc,250
|
4
|
+
autogluon/timeseries/learner.py,sha256=eQrqFVOmL-2JC85LgCMkbyoLpKS02Dilg1T8RUeS_LI,13887
|
5
|
+
autogluon/timeseries/predictor.py,sha256=7X4YsWYa3Xk2RI1Irf2O-c3-I82Zqhg-cgj8cj_4AoA,88427
|
6
|
+
autogluon/timeseries/regressor.py,sha256=lc8Qr3-8v4oxajtCnV3sxpUaW6vxXXJOA6Kr-qVne4k,11926
|
7
|
+
autogluon/timeseries/splitter.py,sha256=8ACkuCXeUhQGUx4jz_Vv17q814WrHJQeKvq2v4-oE6s,3158
|
8
|
+
autogluon/timeseries/version.py,sha256=HXEe1UreeuIzhlJpXoFLQx29wpNs2WiCSUMbk708j5k,91
|
9
|
+
autogluon/timeseries/configs/__init__.py,sha256=wiLBwxZkDTQBJkSJ9-xz3p_yJxX0dbHe108dS1P5O6A,183
|
10
|
+
autogluon/timeseries/configs/hyperparameter_presets.py,sha256=GbI2sd3uakWtaeaMyF7B5z_lmyfb6ToK6PZEUZTyG9w,2031
|
11
|
+
autogluon/timeseries/configs/predictor_presets.py,sha256=B5HFHIelh91hhG0YYE5SJ7_14P7sylFAABgHX8n_53M,2712
|
12
|
+
autogluon/timeseries/dataset/__init__.py,sha256=UvnhAN5tjgxXTHoZMQDy64YMDj4Xxa68yY7NP4vAw0o,81
|
13
|
+
autogluon/timeseries/dataset/ts_dataframe.py,sha256=EwxKBScspwKnJTqIk2Icukk8vIrbKYObOMAkNIn4zc8,51760
|
14
|
+
autogluon/timeseries/metrics/__init__.py,sha256=YJPXxsJ0tRDXq7p-sTZSLb0DuXMJH6sT1PgbZ3tMt30,3594
|
15
|
+
autogluon/timeseries/metrics/abstract.py,sha256=6jbluvHXfLc_cuK1Fx0ZYle2sR4WGG6YxFQhkor46Q8,11545
|
16
|
+
autogluon/timeseries/metrics/point.py,sha256=sS__n_Em7m4CUaBu3PNWQ_dHw1YCOHbEyC15fhytFL8,18308
|
17
|
+
autogluon/timeseries/metrics/quantile.py,sha256=x0cq44fXRoMiuI4BVQ7mpWk1YgrK4OwLTlJAhCHQ7Xg,4634
|
18
|
+
autogluon/timeseries/metrics/utils.py,sha256=HuDe1BNe8yJU4f_DKM913nNrUueoRaw6zhxm1-S20s0,910
|
19
|
+
autogluon/timeseries/models/__init__.py,sha256=9YnqkOILtVEkbICk7J3VlMkMNySs-f5ErIUKrE5-fys,1294
|
20
|
+
autogluon/timeseries/models/registry.py,sha256=8n7W04ql0ckNQUzKcAW7bxreLI8wTAUTymACgLklH9M,2158
|
21
|
+
autogluon/timeseries/models/abstract/__init__.py,sha256=Htfkjjc3vo92RvyM8rIlQ0PLWt3jcrCKZES07UvCMV0,146
|
22
|
+
autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=97HOi7fRPxtx8Y9hq-xdJI-kLMp6Z-8LUSvcfBjXFsM,31978
|
23
|
+
autogluon/timeseries/models/abstract/model_trial.py,sha256=ENPg_7nsdxIvaNM0o0UShZ3x8jFlRmwRc5m0fGPC0TM,3720
|
24
|
+
autogluon/timeseries/models/abstract/tunable.py,sha256=jA6p-FPZkMva67B-1foqvHK-1rr0IdEfp9RvGW1WS9I,7155
|
25
|
+
autogluon/timeseries/models/autogluon_tabular/__init__.py,sha256=E5fZsdFPgVdyCVyj5bGmn_lQFlCMn2NvuRLBMcCFvhM,205
|
26
|
+
autogluon/timeseries/models/autogluon_tabular/mlforecast.py,sha256=k3a0JqBeuLQfjCtZ8MA7UvS2eqHjwbw0-4kN_StMMUQ,37623
|
27
|
+
autogluon/timeseries/models/autogluon_tabular/per_step.py,sha256=M5rhj_jjcQz27wPYm6NEBEE0aHgXe0Bl6HFc2NIuLdU,23159
|
28
|
+
autogluon/timeseries/models/autogluon_tabular/transforms.py,sha256=aI1QJLJaOB5Xy2WA0jo6Jh25MRVyyZ8ONrqlV96kpw0,2735
|
29
|
+
autogluon/timeseries/models/autogluon_tabular/utils.py,sha256=Fn3Vu_Q0PCtEUbtNgLp1xIblg7dOdpFlF3W5kLHgruI,63
|
30
|
+
autogluon/timeseries/models/chronos/__init__.py,sha256=wT77HzTtmQxW3sw2k0mA5Ot6PSHivX-Uvn5fjM05EU4,60
|
31
|
+
autogluon/timeseries/models/chronos/model.py,sha256=UYLI1HVwsW5KfA-jXqJdBel-7N6l6ggdBVrjLLaq9P0,32333
|
32
|
+
autogluon/timeseries/models/chronos/pipeline/__init__.py,sha256=bkTR0LSKIxAaKFOr9A0HSkCtnRdikDPUPp810WOKgxE,247
|
33
|
+
autogluon/timeseries/models/chronos/pipeline/base.py,sha256=Us-TUpHSN3mM3ut05IVc2a9Q6KYq1n9pTb7JZG7b6kA,5546
|
34
|
+
autogluon/timeseries/models/chronos/pipeline/chronos.py,sha256=bgow5FkHG7y5qWBXcggqXemnistJUfrl0lWFXcGXg5g,20197
|
35
|
+
autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py,sha256=KJYgxASTW2VhS0ObkP5DUQXyfnTRTXzjRD5Gm-FQFI4,21355
|
36
|
+
autogluon/timeseries/models/chronos/pipeline/utils.py,sha256=WYeCKFP5dxs4u09XTncBI2486VV22O1DiM9a3ZvZ1OE,12790
|
37
|
+
autogluon/timeseries/models/ensemble/__init__.py,sha256=x2Y6dWk15XugTEWNUKq8U5z6nIjelo3UjpI-TfS13OE,159
|
38
|
+
autogluon/timeseries/models/ensemble/abstract.py,sha256=wvtXNZTwiYpIurPkOYSzsi3XTRRx5guJLMYLmXTdOeQ,5695
|
39
|
+
autogluon/timeseries/models/ensemble/basic.py,sha256=aSQRYylUpFZVk_Lpv5GY8uYgmE0_ipLy_tx6ELTZyWc,3426
|
40
|
+
autogluon/timeseries/models/ensemble/greedy.py,sha256=zXJFenn1XxNNvCp4TlmIq1Dx3pUDWjKG1K3HsejmDeY,7323
|
41
|
+
autogluon/timeseries/models/gluonts/__init__.py,sha256=YfyNYOkhhNsloA4MAavfmqKO29_q6o4lwPoV7L4_h7M,355
|
42
|
+
autogluon/timeseries/models/gluonts/abstract.py,sha256=fYXV5fQk79LtPtpa4uk8MzCUGZ5J6n47ClEYuYSBDLY,27770
|
43
|
+
autogluon/timeseries/models/gluonts/dataset.py,sha256=iUVKZyec1efVW2-71AWn-m9cowXYYj5hJM1JWuioscA,5115
|
44
|
+
autogluon/timeseries/models/gluonts/models.py,sha256=1Z3x3-jVoae5X4cSnDIgJMvTJ9_O94aDSW8HEnBaL5k,25907
|
45
|
+
autogluon/timeseries/models/local/__init__.py,sha256=e2UImoJhmj70E148IIObv90C_bHxgyLNk6YsS4p7pfs,701
|
46
|
+
autogluon/timeseries/models/local/abstract_local_model.py,sha256=A3sNYMA67UbbEKIIN30BcBdE_NpwaBtcG22O5mVWS6k,11482
|
47
|
+
autogluon/timeseries/models/local/naive.py,sha256=xur3WWhLaS9Iix_p_yfaStbr58nL5K4rV0dReTm3BQQ,7496
|
48
|
+
autogluon/timeseries/models/local/npts.py,sha256=VRZk5tEJOIentt0tLM6lxyoU8US736nHOvhSAgagYMc,4203
|
49
|
+
autogluon/timeseries/models/local/statsforecast.py,sha256=sZ6aEFzAyPNZX3rMULGWFht0Toapjb3EwHe5Rb76ZxA,33318
|
50
|
+
autogluon/timeseries/models/multi_window/__init__.py,sha256=Bq7AT2Jxdd4WNqmjTdzeqgNiwn1NCyWp4tBIWaM-zfI,60
|
51
|
+
autogluon/timeseries/models/multi_window/multi_window_model.py,sha256=Hn-H2jLdeuB0_TxhAdununS8ti-iO-WSl3FOoxzcEJA,12369
|
52
|
+
autogluon/timeseries/trainer/__init__.py,sha256=_tw3iioJfvtIV7wnjtEMv0yS8oabmCFxDnGRodYE7RI,72
|
53
|
+
autogluon/timeseries/trainer/model_set_builder.py,sha256=s6tozfND3lLfst6Vxa_oP_wgCmDapyCJYFmCjkEn-es,10788
|
54
|
+
autogluon/timeseries/trainer/prediction_cache.py,sha256=Vi6EbMiMheq_smA93U_MoMxYUV85RdPm0dvJFdsM8K4,5551
|
55
|
+
autogluon/timeseries/trainer/trainer.py,sha256=LF2X5UNnrU8w5h_i09SphGWvGFvZ6KvPDq89Z3GzZZQ,54959
|
56
|
+
autogluon/timeseries/transforms/__init__.py,sha256=fKlT4pkJ_8Gl7IUTc3uSDzt2Xow5iH5w6fPB3ePNrTg,127
|
57
|
+
autogluon/timeseries/transforms/covariate_scaler.py,sha256=9lEfDS4wnVZohQNnm9OcAXr3voUl83RCnctKR3O66iU,7030
|
58
|
+
autogluon/timeseries/transforms/target_scaler.py,sha256=kTQrXAsDHCnYuqfpaVuvefyTgyp_ylDpUIPz7pArjeY,6043
|
59
|
+
autogluon/timeseries/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
60
|
+
autogluon/timeseries/utils/features.py,sha256=tdL7jZKeySO7dgB09FweR44wPCmfWg8-ZM5uVzeyvYQ,22593
|
61
|
+
autogluon/timeseries/utils/forecast.py,sha256=yK1_eNtRUPYGs0R-VWMO4c81LrTGF57ih3yzsXVHyGY,2191
|
62
|
+
autogluon/timeseries/utils/warning_filters.py,sha256=SroNhLU3kwbD8anM58vdxWq36Z8j_uiY42mEt0ya-JI,2589
|
63
|
+
autogluon/timeseries/utils/datetime/__init__.py,sha256=bTMR8jLh1LW55vHjbOr1zvWRMF_PqbvxpS-cUcNIDWI,173
|
64
|
+
autogluon/timeseries/utils/datetime/base.py,sha256=3NdsH3NDq4cVAOSoy3XpaNixyNlbjy4DJ_YYOGuu9x4,1341
|
65
|
+
autogluon/timeseries/utils/datetime/lags.py,sha256=rjJtdBU0M41R1jwfmvCbo045s-6XBjhGVnGBQJ9-U1E,5997
|
66
|
+
autogluon/timeseries/utils/datetime/seasonality.py,sha256=YK_2k8hvYIMW-sJPnjGWRtCnvIOthwA2hATB3nwVoD4,834
|
67
|
+
autogluon/timeseries/utils/datetime/time_features.py,sha256=kEOFls4Nzh8nO0Pcz1DwLsC_NA3hMI4JUlZI3kuvuts,2666
|
68
|
+
autogluon.timeseries-1.4.1b20250902.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
|
69
|
+
autogluon.timeseries-1.4.1b20250902.dist-info/METADATA,sha256=kLL8EOiGm0gBy69gkuclLiOKWuRa8mrYGAW1JjX5gec,12463
|
70
|
+
autogluon.timeseries-1.4.1b20250902.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
|
71
|
+
autogluon.timeseries-1.4.1b20250902.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
72
|
+
autogluon.timeseries-1.4.1b20250902.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
73
|
+
autogluon.timeseries-1.4.1b20250902.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
74
|
+
autogluon.timeseries-1.4.1b20250902.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
75
|
+
autogluon.timeseries-1.4.1b20250902.dist-info/RECORD,,
|
@@ -1,79 +0,0 @@
|
|
1
|
-
"""Preset configurations for autogluon.timeseries Predictors"""
|
2
|
-
|
3
|
-
from autogluon.timeseries.models.presets import get_default_hps
|
4
|
-
|
5
|
-
# TODO: change default HPO settings when other HPO strategies (e.g., Ray tune) are available
|
6
|
-
# TODO: add refit_full arguments once refitting is available
|
7
|
-
|
8
|
-
TIMESERIES_PRESETS_CONFIGS = dict(
|
9
|
-
best_quality={"hyperparameters": "default", "num_val_windows": 2},
|
10
|
-
high_quality={"hyperparameters": "default"},
|
11
|
-
medium_quality={"hyperparameters": "light"},
|
12
|
-
fast_training={"hyperparameters": "very_light"},
|
13
|
-
# Chronos-Bolt models
|
14
|
-
bolt_tiny={
|
15
|
-
"hyperparameters": {"Chronos": {"model_path": "bolt_tiny"}},
|
16
|
-
"skip_model_selection": True,
|
17
|
-
},
|
18
|
-
bolt_mini={
|
19
|
-
"hyperparameters": {"Chronos": {"model_path": "bolt_mini"}},
|
20
|
-
"skip_model_selection": True,
|
21
|
-
},
|
22
|
-
bolt_small={
|
23
|
-
"hyperparameters": {"Chronos": {"model_path": "bolt_small"}},
|
24
|
-
"skip_model_selection": True,
|
25
|
-
},
|
26
|
-
bolt_base={
|
27
|
-
"hyperparameters": {"Chronos": {"model_path": "bolt_base"}},
|
28
|
-
"skip_model_selection": True,
|
29
|
-
},
|
30
|
-
# Original Chronos models
|
31
|
-
chronos_tiny={
|
32
|
-
"hyperparameters": {"Chronos": {"model_path": "tiny"}},
|
33
|
-
"skip_model_selection": True,
|
34
|
-
},
|
35
|
-
chronos_mini={
|
36
|
-
"hyperparameters": {"Chronos": {"model_path": "mini"}},
|
37
|
-
"skip_model_selection": True,
|
38
|
-
},
|
39
|
-
chronos_small={
|
40
|
-
"hyperparameters": {"Chronos": {"model_path": "small"}},
|
41
|
-
"skip_model_selection": True,
|
42
|
-
},
|
43
|
-
chronos_base={
|
44
|
-
"hyperparameters": {"Chronos": {"model_path": "base"}},
|
45
|
-
"skip_model_selection": True,
|
46
|
-
},
|
47
|
-
chronos_large={
|
48
|
-
"hyperparameters": {"Chronos": {"model_path": "large", "batch_size": 8}},
|
49
|
-
"skip_model_selection": True,
|
50
|
-
},
|
51
|
-
chronos_ensemble={
|
52
|
-
"hyperparameters": {
|
53
|
-
"Chronos": {"model_path": "small"},
|
54
|
-
**get_default_hps("light_inference"),
|
55
|
-
}
|
56
|
-
},
|
57
|
-
chronos_large_ensemble={
|
58
|
-
"hyperparameters": {
|
59
|
-
"Chronos": {"model_path": "large", "batch_size": 8},
|
60
|
-
**get_default_hps("light_inference"),
|
61
|
-
}
|
62
|
-
},
|
63
|
-
)
|
64
|
-
|
65
|
-
TIMESERIES_PRESETS_ALIASES = dict(
|
66
|
-
chronos="chronos_small",
|
67
|
-
best="best_quality",
|
68
|
-
high="high_quality",
|
69
|
-
medium="medium_quality",
|
70
|
-
bq="best_quality",
|
71
|
-
hq="high_quality",
|
72
|
-
mq="medium_quality",
|
73
|
-
)
|
74
|
-
|
75
|
-
# update with aliases
|
76
|
-
TIMESERIES_PRESETS_CONFIGS = {
|
77
|
-
**TIMESERIES_PRESETS_CONFIGS,
|
78
|
-
**{k: TIMESERIES_PRESETS_CONFIGS[v].copy() for k, v in TIMESERIES_PRESETS_ALIASES.items()},
|
79
|
-
}
|