autogluon.timeseries 1.2.1b20250424__py3-none-any.whl → 1.2.1b20250425__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autogluon/timeseries/dataset/ts_dataframe.py +9 -2
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py +3 -2
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py +106 -66
- autogluon/timeseries/models/autogluon_tabular/transforms.py +15 -10
- autogluon/timeseries/models/local/abstract_local_model.py +43 -36
- autogluon/timeseries/models/multi_window/multi_window_model.py +1 -1
- autogluon/timeseries/predictor.py +2 -2
- autogluon/timeseries/trainer.py +1 -1
- autogluon/timeseries/version.py +1 -1
- {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250425.dist-info}/METADATA +5 -5
- {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250425.dist-info}/RECORD +18 -18
- /autogluon.timeseries-1.2.1b20250424-py3.9-nspkg.pth → /autogluon.timeseries-1.2.1b20250425-py3.9-nspkg.pth +0 -0
- {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250425.dist-info}/LICENSE +0 -0
- {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250425.dist-info}/NOTICE +0 -0
- {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250425.dist-info}/WHEEL +0 -0
- {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250425.dist-info}/namespace_packages.txt +0 -0
- {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250425.dist-info}/top_level.txt +0 -0
- {autogluon.timeseries-1.2.1b20250424.dist-info → autogluon.timeseries-1.2.1b20250425.dist-info}/zip-safe +0 -0
@@ -8,12 +8,12 @@ from collections.abc import Iterable
|
|
8
8
|
from itertools import islice
|
9
9
|
from pathlib import Path
|
10
10
|
from pprint import pformat
|
11
|
-
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union
|
11
|
+
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union, overload
|
12
12
|
|
13
13
|
import pandas as pd
|
14
14
|
from joblib.parallel import Parallel, delayed
|
15
15
|
from pandas.core.internals import ArrayManager, BlockManager # type: ignore
|
16
|
-
from typing_extensions import Self
|
16
|
+
from typing_extensions import Self
|
17
17
|
|
18
18
|
from autogluon.common.loaders import load_pd
|
19
19
|
|
@@ -1044,8 +1044,15 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
1044
1044
|
"""Convert `TimeSeriesDataFrame` to a `pandas.DataFrame`"""
|
1045
1045
|
return pd.DataFrame(self)
|
1046
1046
|
|
1047
|
+
# inline typing stubs for various overridden methods
|
1047
1048
|
if TYPE_CHECKING:
|
1048
1049
|
|
1050
|
+
def query( # type: ignore
|
1051
|
+
self, expr: str, *, inplace: bool = False, **kwargs
|
1052
|
+
) -> Self: ...
|
1053
|
+
|
1054
|
+
def reindex(*args, **kwargs) -> Self: ... # type: ignore
|
1055
|
+
|
1049
1056
|
@overload
|
1050
1057
|
def __new__(cls, data: pd.DataFrame, static_features: Optional[pd.DataFrame] = None) -> Self: ... # type: ignore
|
1051
1058
|
|
@@ -187,7 +187,7 @@ class TimeSeriesModelBase(ModelBase, ABC):
|
|
187
187
|
)
|
188
188
|
return hyperparameters, extra_ag_args
|
189
189
|
|
190
|
-
def save(self, path: Optional[str] = None, verbose=True) -> str:
|
190
|
+
def save(self, path: Optional[str] = None, verbose: bool = True) -> str:
|
191
191
|
if path is None:
|
192
192
|
path = self.path
|
193
193
|
|
@@ -395,6 +395,7 @@ class AbstractTimeSeriesModel(TimeSeriesModelBase, TimeSeriesTunable, ABC):
|
|
395
395
|
eval_metric: Union[str, TimeSeriesScorer, None] = None,
|
396
396
|
eval_metric_seasonal_period: Optional[int] = None,
|
397
397
|
):
|
398
|
+
# TODO: make freq a required argument in AbstractTimeSeriesModel
|
398
399
|
super().__init__(
|
399
400
|
path=path,
|
400
401
|
name=name,
|
@@ -410,7 +411,6 @@ class AbstractTimeSeriesModel(TimeSeriesModelBase, TimeSeriesTunable, ABC):
|
|
410
411
|
self.target_scaler: Optional[TargetScaler]
|
411
412
|
self.covariate_scaler: Optional[CovariateScaler]
|
412
413
|
self.covariate_regressor: Optional[CovariateRegressor]
|
413
|
-
self._initialize_transforms_and_regressor()
|
414
414
|
|
415
415
|
def _initialize_transforms_and_regressor(self) -> None:
|
416
416
|
self.target_scaler = get_target_scaler(self.get_hyperparameters().get("target_scaler"), target=self.target)
|
@@ -474,6 +474,7 @@ class AbstractTimeSeriesModel(TimeSeriesModelBase, TimeSeriesTunable, ABC):
|
|
474
474
|
The fitted model object
|
475
475
|
"""
|
476
476
|
start_time = time.monotonic()
|
477
|
+
self._initialize_transforms_and_regressor()
|
477
478
|
|
478
479
|
if self.target_scaler is not None:
|
479
480
|
train_data = self.target_scaler.fit_transform(train_data)
|
@@ -2,15 +2,17 @@ import logging
|
|
2
2
|
import math
|
3
3
|
import os
|
4
4
|
import time
|
5
|
-
from typing import Any, Dict, List, Optional, Tuple
|
5
|
+
from typing import Any, Callable, Collection, Dict, List, Optional, Tuple, Union
|
6
6
|
|
7
7
|
import numpy as np
|
8
8
|
import pandas as pd
|
9
9
|
from sklearn.base import BaseEstimator
|
10
|
+
from typing_extensions import Self
|
10
11
|
|
11
12
|
import autogluon.core as ag
|
12
13
|
from autogluon.tabular import TabularPredictor
|
13
14
|
from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TIMESTAMP, TimeSeriesDataFrame
|
15
|
+
from autogluon.timeseries.metrics.abstract import TimeSeriesScorer
|
14
16
|
from autogluon.timeseries.metrics.utils import in_sample_squared_seasonal_error
|
15
17
|
from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
|
16
18
|
from autogluon.timeseries.models.local import SeasonalNaiveModel
|
@@ -29,17 +31,21 @@ logger = logging.getLogger(__name__)
|
|
29
31
|
class TabularEstimator(BaseEstimator):
|
30
32
|
"""Scikit-learn compatible interface for TabularPredictor."""
|
31
33
|
|
32
|
-
def __init__(
|
34
|
+
def __init__(
|
35
|
+
self,
|
36
|
+
predictor_init_kwargs: Optional[Dict[str, Any]] = None,
|
37
|
+
predictor_fit_kwargs: Optional[Dict[str, Any]] = None,
|
38
|
+
):
|
33
39
|
self.predictor_init_kwargs = predictor_init_kwargs if predictor_init_kwargs is not None else {}
|
34
40
|
self.predictor_fit_kwargs = predictor_fit_kwargs if predictor_fit_kwargs is not None else {}
|
35
41
|
|
36
|
-
def get_params(self, deep: bool = True) ->
|
42
|
+
def get_params(self, deep: bool = True) -> Dict[str, Any]:
|
37
43
|
return {
|
38
44
|
"predictor_init_kwargs": self.predictor_init_kwargs,
|
39
45
|
"predictor_fit_kwargs": self.predictor_fit_kwargs,
|
40
46
|
}
|
41
47
|
|
42
|
-
def fit(self, X: pd.DataFrame, y: pd.Series) ->
|
48
|
+
def fit(self, X: pd.DataFrame, y: pd.Series) -> Self:
|
43
49
|
assert isinstance(X, pd.DataFrame) and isinstance(y, pd.Series)
|
44
50
|
df = pd.concat([X, y.rename(MLF_TARGET).to_frame()], axis=1)
|
45
51
|
self.predictor = TabularPredictor(**self.predictor_init_kwargs)
|
@@ -49,7 +55,7 @@ class TabularEstimator(BaseEstimator):
|
|
49
55
|
|
50
56
|
def predict(self, X: pd.DataFrame) -> np.ndarray:
|
51
57
|
assert isinstance(X, pd.DataFrame)
|
52
|
-
return self.predictor.predict(X).values
|
58
|
+
return self.predictor.predict(X).values # type: ignore
|
53
59
|
|
54
60
|
|
55
61
|
class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
@@ -62,9 +68,9 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
62
68
|
prediction_length: int = 1,
|
63
69
|
path: Optional[str] = None,
|
64
70
|
name: Optional[str] = None,
|
65
|
-
eval_metric: str = None,
|
66
|
-
hyperparameters: Dict[str, Any] = None,
|
67
|
-
**kwargs,
|
71
|
+
eval_metric: Optional[Union[str, TimeSeriesScorer]] = None,
|
72
|
+
hyperparameters: Optional[Dict[str, Any]] = None,
|
73
|
+
**kwargs,
|
68
74
|
):
|
69
75
|
super().__init__(
|
70
76
|
path=path,
|
@@ -80,14 +86,16 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
80
86
|
|
81
87
|
self._sum_of_differences: int = 0 # number of time steps removed from each series by differencing
|
82
88
|
self._max_ts_length: Optional[int] = None
|
83
|
-
self._target_lags:
|
84
|
-
self._date_features:
|
85
|
-
self._mlf:
|
89
|
+
self._target_lags: np.ndarray
|
90
|
+
self._date_features: List[Callable]
|
91
|
+
self._mlf: MLForecast
|
86
92
|
self._scaler: Optional[BaseTargetTransform] = None
|
87
|
-
self._residuals_std_per_item:
|
93
|
+
self._residuals_std_per_item: pd.Series
|
88
94
|
self._train_target_median: Optional[float] = None
|
89
95
|
self._non_boolean_real_covariates: List[str] = []
|
90
96
|
|
97
|
+
def _initialize_transforms_and_regressor(self):
|
98
|
+
super()._initialize_transforms_and_regressor()
|
91
99
|
# Do not create a scaler in the model, scaler will be passed to MLForecast
|
92
100
|
self.target_scaler = None
|
93
101
|
|
@@ -95,20 +103,23 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
95
103
|
def tabular_predictor_path(self) -> str:
|
96
104
|
return os.path.join(self.path, "tabular_predictor")
|
97
105
|
|
98
|
-
def save(self, path: str = None, verbose: bool = True) -> str:
|
106
|
+
def save(self, path: Optional[str] = None, verbose: bool = True) -> str:
|
99
107
|
assert "mean" in self._mlf.models_, "TabularPredictor must be trained before saving"
|
100
|
-
|
101
|
-
self._mlf.models_["mean"]
|
108
|
+
|
109
|
+
mean_estimator = self._mlf.models_["mean"]
|
110
|
+
assert isinstance(mean_estimator, TabularEstimator)
|
111
|
+
|
112
|
+
tabular_predictor = mean_estimator.predictor
|
113
|
+
mean_estimator.predictor = None # type: ignore
|
102
114
|
save_path = super().save(path=path, verbose=verbose)
|
103
|
-
|
115
|
+
mean_estimator.predictor = tabular_predictor
|
104
116
|
return save_path
|
105
117
|
|
106
118
|
@classmethod
|
107
|
-
def load(
|
108
|
-
cls, path: str, reset_paths: bool = True, load_oof: bool = False, verbose: bool = True
|
109
|
-
) -> "AbstractTimeSeriesModel":
|
119
|
+
def load(cls, path: str, reset_paths: bool = True, load_oof: bool = False, verbose: bool = True) -> Self:
|
110
120
|
model = super().load(path=path, reset_paths=reset_paths, load_oof=load_oof, verbose=verbose)
|
111
121
|
assert "mean" in model._mlf.models_, "Loaded model doesn't have a trained TabularPredictor"
|
122
|
+
assert isinstance(model._mlf.models_["mean"], TabularEstimator)
|
112
123
|
model._mlf.models_["mean"].predictor = TabularPredictor.load(model.tabular_predictor_path)
|
113
124
|
return model
|
114
125
|
|
@@ -131,24 +142,27 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
131
142
|
data[self.target] = data[self.target].fillna(value=self._train_target_median)
|
132
143
|
return data, known_covariates
|
133
144
|
|
134
|
-
def _get_extra_tabular_init_kwargs(self) ->
|
145
|
+
def _get_extra_tabular_init_kwargs(self) -> Dict[str, Any]:
|
135
146
|
raise NotImplementedError
|
136
147
|
|
137
|
-
def
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
148
|
+
def _get_default_hyperparameters(self) -> Dict[str, Any]:
|
149
|
+
return {
|
150
|
+
"max_num_items": 20_000,
|
151
|
+
"max_num_samples": 1_000_000,
|
152
|
+
"tabular_hyperparameters": {"GBM": {}},
|
153
|
+
"tabular_fit_kwargs": {},
|
154
|
+
}
|
144
155
|
|
145
|
-
def _get_mlforecast_init_args(
|
156
|
+
def _get_mlforecast_init_args(
|
157
|
+
self, train_data: TimeSeriesDataFrame, model_params: Dict[str, Any]
|
158
|
+
) -> Dict[str, Any]:
|
146
159
|
from mlforecast.target_transforms import Differences
|
147
160
|
|
148
161
|
from .transforms import MLForecastScaler
|
149
162
|
|
150
163
|
lags = model_params.get("lags")
|
151
164
|
if lags is None:
|
165
|
+
assert self.freq is not None
|
152
166
|
lags = get_lags_for_frequency(self.freq)
|
153
167
|
self._target_lags = np.array(sorted(set(lags)), dtype=np.int64)
|
154
168
|
|
@@ -159,6 +173,7 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
159
173
|
|
160
174
|
target_transforms = []
|
161
175
|
differences = model_params.get("differences")
|
176
|
+
assert isinstance(differences, Collection)
|
162
177
|
|
163
178
|
ts_lengths = train_data.num_timesteps_per_item()
|
164
179
|
required_ts_length = sum(differences) + 1
|
@@ -196,7 +211,7 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
196
211
|
return df
|
197
212
|
|
198
213
|
@staticmethod
|
199
|
-
def _shorten_all_series(mlforecast_df: pd.DataFrame, max_length: int):
|
214
|
+
def _shorten_all_series(mlforecast_df: pd.DataFrame, max_length: int) -> pd.DataFrame:
|
200
215
|
logger.debug(f"Shortening all series to at most {max_length}")
|
201
216
|
return mlforecast_df.groupby(MLF_ITEMID, as_index=False, sort=False).tail(max_length)
|
202
217
|
|
@@ -231,7 +246,7 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
231
246
|
# Unless we set static_features=[], MLForecast interprets all known covariates as static features
|
232
247
|
df = self._mlf.preprocess(mlforecast_df, dropna=False, static_features=[])
|
233
248
|
# df.query results in 2x memory saving compared to df.dropna(subset="y")
|
234
|
-
df = df.query("y.notnull()")
|
249
|
+
df = df.query("y.notnull()") # type: ignore
|
235
250
|
|
236
251
|
df = self._mask_df(df)
|
237
252
|
|
@@ -250,12 +265,12 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
250
265
|
val_df = grouped_df.tail(val_rows_per_item)
|
251
266
|
logger.debug(f"train_df shape: {train_df.shape}, val_df shape: {val_df.shape}")
|
252
267
|
|
253
|
-
return train_df.drop(columns=[MLF_TIMESTAMP]), val_df.drop(columns=[MLF_TIMESTAMP])
|
268
|
+
return train_df.drop(columns=[MLF_TIMESTAMP]), val_df.drop(columns=[MLF_TIMESTAMP]) # type: ignore
|
254
269
|
|
255
270
|
def _to_mlforecast_df(
|
256
271
|
self,
|
257
272
|
data: TimeSeriesDataFrame,
|
258
|
-
static_features: pd.DataFrame,
|
273
|
+
static_features: Optional[pd.DataFrame],
|
259
274
|
include_target: bool = True,
|
260
275
|
) -> pd.DataFrame:
|
261
276
|
"""Convert TimeSeriesDataFrame to a format expected by MLForecast methods `predict` and `preprocess`.
|
@@ -288,7 +303,9 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
288
303
|
self,
|
289
304
|
train_data: TimeSeriesDataFrame,
|
290
305
|
val_data: Optional[TimeSeriesDataFrame] = None,
|
291
|
-
time_limit: Optional[
|
306
|
+
time_limit: Optional[float] = None,
|
307
|
+
num_cpus: Optional[int] = None,
|
308
|
+
num_gpus: Optional[int] = None,
|
292
309
|
verbosity: int = 2,
|
293
310
|
**kwargs,
|
294
311
|
) -> None:
|
@@ -304,6 +321,7 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
304
321
|
model_params = self.get_hyperparameters()
|
305
322
|
|
306
323
|
mlforecast_init_args = self._get_mlforecast_init_args(train_data, model_params)
|
324
|
+
assert self.freq is not None
|
307
325
|
self._mlf = MLForecast(models={}, freq=self.freq, **mlforecast_init_args)
|
308
326
|
|
309
327
|
# We generate train/val splits from train_data and ignore val_data to avoid overfitting
|
@@ -327,10 +345,10 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
327
345
|
**model_params["tabular_fit_kwargs"],
|
328
346
|
},
|
329
347
|
)
|
330
|
-
self._mlf.models = {"mean": estimator}
|
348
|
+
self._mlf.models = {"mean": estimator} # type: ignore
|
331
349
|
|
332
350
|
with warning_filter():
|
333
|
-
self._mlf.fit_models(X=train_df.drop(columns=[MLF_TARGET, MLF_ITEMID]), y=train_df[MLF_TARGET])
|
351
|
+
self._mlf.fit_models(X=train_df.drop(columns=[MLF_TARGET, MLF_ITEMID]), y=train_df[MLF_TARGET]) # type: ignore
|
334
352
|
|
335
353
|
self._save_residuals_std(val_df)
|
336
354
|
|
@@ -340,14 +358,19 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
340
358
|
Saves per-item residuals to `self.residuals_std_per_item`.
|
341
359
|
"""
|
342
360
|
residuals_df = val_df[[MLF_ITEMID, MLF_TARGET]]
|
343
|
-
|
361
|
+
mean_estimator = self._mlf.models_["mean"]
|
362
|
+
assert isinstance(mean_estimator, TabularEstimator)
|
363
|
+
|
364
|
+
residuals_df = residuals_df.assign(y_pred=mean_estimator.predict(val_df))
|
344
365
|
if self._scaler is not None:
|
345
366
|
# Scaler expects to find column MLF_TIMESTAMP even though it's not used - fill with dummy
|
346
|
-
residuals_df = residuals_df.assign(**{MLF_TIMESTAMP:
|
367
|
+
residuals_df = residuals_df.assign(**{MLF_TIMESTAMP: np.datetime64("2010-01-01")})
|
347
368
|
residuals_df = self._scaler.inverse_transform(residuals_df)
|
369
|
+
|
370
|
+
assert isinstance(residuals_df, pd.DataFrame)
|
348
371
|
residuals = residuals_df[MLF_TARGET] - residuals_df["y_pred"]
|
349
372
|
self._residuals_std_per_item = (
|
350
|
-
residuals.pow(2.0).groupby(val_df[MLF_ITEMID].values, sort=False).mean().pow(0.5)
|
373
|
+
residuals.pow(2.0).groupby(val_df[MLF_ITEMID].values, sort=False).mean().pow(0.5) # type: ignore
|
351
374
|
)
|
352
375
|
|
353
376
|
def _remove_short_ts_and_generate_fallback_forecast(
|
@@ -395,7 +418,9 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
395
418
|
forecast_for_short_series = None
|
396
419
|
return data_long, known_covariates_long, forecast_for_short_series
|
397
420
|
|
398
|
-
def _add_gaussian_quantiles(
|
421
|
+
def _add_gaussian_quantiles(
|
422
|
+
self, predictions: pd.DataFrame, repeated_item_ids: pd.Series, past_target: pd.Series
|
423
|
+
) -> pd.DataFrame:
|
399
424
|
"""
|
400
425
|
Add quantile levels assuming that residuals follow normal distribution
|
401
426
|
"""
|
@@ -410,9 +435,9 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
410
435
|
# Use in-sample seasonal error in for items not seen during fit
|
411
436
|
items_not_seen_during_fit = residuals_std_per_timestep.index[residuals_std_per_timestep.isna()].unique()
|
412
437
|
if len(items_not_seen_during_fit) > 0:
|
413
|
-
scale_for_new_items: pd.Series =
|
414
|
-
|
415
|
-
)
|
438
|
+
scale_for_new_items: pd.Series = in_sample_squared_seasonal_error(
|
439
|
+
y_past=past_target.loc[items_not_seen_during_fit]
|
440
|
+
).pow(0.5)
|
416
441
|
residuals_std_per_timestep = residuals_std_per_timestep.fillna(scale_for_new_items)
|
417
442
|
|
418
443
|
std_per_timestep = residuals_std_per_timestep * normal_scale_per_timestep
|
@@ -420,7 +445,7 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
|
|
420
445
|
predictions[str(q)] = predictions["mean"] + norm.ppf(q) * std_per_timestep.to_numpy()
|
421
446
|
return predictions
|
422
447
|
|
423
|
-
def _more_tags(self) ->
|
448
|
+
def _more_tags(self) -> Dict[str, Any]:
|
424
449
|
return {"allow_nan": True, "can_refit_full": True}
|
425
450
|
|
426
451
|
|
@@ -473,7 +498,7 @@ class DirectTabularModel(AbstractMLForecastModel):
|
|
473
498
|
def is_quantile_model(self) -> bool:
|
474
499
|
return self.eval_metric.needs_quantile
|
475
500
|
|
476
|
-
def get_hyperparameters(self) ->
|
501
|
+
def get_hyperparameters(self) -> Dict[str, Any]:
|
477
502
|
model_params = super().get_hyperparameters()
|
478
503
|
model_params.setdefault("target_scaler", "mean_abs")
|
479
504
|
if "differences" not in model_params or model_params["differences"] is None:
|
@@ -512,6 +537,7 @@ class DirectTabularModel(AbstractMLForecastModel):
|
|
512
537
|
)
|
513
538
|
if len(data) == 0:
|
514
539
|
# All time series are too short for chosen differences
|
540
|
+
assert forecast_for_short_series is not None
|
515
541
|
return forecast_for_short_series
|
516
542
|
|
517
543
|
if known_covariates is not None:
|
@@ -522,15 +548,19 @@ class DirectTabularModel(AbstractMLForecastModel):
|
|
522
548
|
# MLForecast raises exception of target contains NaN. We use inf as placeholder, replace them by NaN afterwards
|
523
549
|
data_future[self.target] = float("inf")
|
524
550
|
data_extended = pd.concat([data, data_future])
|
525
|
-
mlforecast_df = self._to_mlforecast_df(data_extended, data.static_features)
|
551
|
+
mlforecast_df = self._to_mlforecast_df(data_extended, data.static_features) # type: ignore
|
526
552
|
if self._max_ts_length is not None:
|
527
553
|
# We appended `prediction_length` time steps to each series, so increase length
|
528
554
|
mlforecast_df = self._shorten_all_series(mlforecast_df, self._max_ts_length + self.prediction_length)
|
529
555
|
df = self._mlf.preprocess(mlforecast_df, dropna=False, static_features=[])
|
556
|
+
assert isinstance(df, pd.DataFrame)
|
557
|
+
|
530
558
|
df = df.groupby(MLF_ITEMID, sort=False).tail(self.prediction_length)
|
531
559
|
df = df.replace(float("inf"), float("nan"))
|
532
560
|
|
533
|
-
|
561
|
+
mean_estimator = self._mlf.models_["mean"]
|
562
|
+
assert isinstance(mean_estimator, TabularEstimator)
|
563
|
+
raw_predictions = mean_estimator.predict(df)
|
534
564
|
predictions = self._postprocess_predictions(raw_predictions, repeated_item_ids=df[MLF_ITEMID])
|
535
565
|
# Paste columns one by one to preserve dtypes
|
536
566
|
predictions[MLF_ITEMID] = df[MLF_ITEMID].values
|
@@ -542,6 +572,7 @@ class DirectTabularModel(AbstractMLForecastModel):
|
|
542
572
|
if self._max_ts_length is not None:
|
543
573
|
mlforecast_df_past = self._shorten_all_series(mlforecast_df_past, self._max_ts_length)
|
544
574
|
self._mlf.preprocess(mlforecast_df_past, static_features=[], dropna=False)
|
575
|
+
assert self._mlf.ts.target_transforms is not None
|
545
576
|
for tfm in self._mlf.ts.target_transforms[::-1]:
|
546
577
|
predictions = apply_inverse_transform(predictions, transform=tfm)
|
547
578
|
|
@@ -549,25 +580,30 @@ class DirectTabularModel(AbstractMLForecastModel):
|
|
549
580
|
predictions = self._add_gaussian_quantiles(
|
550
581
|
predictions, repeated_item_ids=predictions[MLF_ITEMID], past_target=data[self.target]
|
551
582
|
)
|
552
|
-
|
583
|
+
predictions_tsdf: TimeSeriesDataFrame = TimeSeriesDataFrame(
|
584
|
+
predictions.rename(columns={MLF_ITEMID: ITEMID, MLF_TIMESTAMP: TIMESTAMP})
|
585
|
+
)
|
553
586
|
|
554
587
|
if forecast_for_short_series is not None:
|
555
|
-
|
556
|
-
|
557
|
-
return predictions
|
588
|
+
predictions_tsdf = pd.concat([predictions_tsdf, forecast_for_short_series]) # type: ignore
|
589
|
+
predictions_tsdf = predictions_tsdf.reindex(original_item_id_order, level=ITEMID)
|
558
590
|
|
559
|
-
|
591
|
+
return predictions_tsdf
|
592
|
+
|
593
|
+
def _postprocess_predictions(
|
594
|
+
self, predictions: Union[np.ndarray, pd.Series], repeated_item_ids: pd.Series
|
595
|
+
) -> pd.DataFrame:
|
560
596
|
if self.is_quantile_model:
|
561
|
-
|
562
|
-
|
563
|
-
|
597
|
+
predictions_df = pd.DataFrame(predictions, columns=[str(q) for q in self.quantile_levels])
|
598
|
+
predictions_df.values.sort(axis=1)
|
599
|
+
predictions_df["mean"] = predictions_df["0.5"]
|
564
600
|
else:
|
565
|
-
|
601
|
+
predictions_df = pd.DataFrame(predictions, columns=["mean"])
|
566
602
|
|
567
|
-
column_order = ["mean"] + [col for col in
|
568
|
-
return
|
603
|
+
column_order = ["mean"] + [col for col in predictions_df.columns if col != "mean"]
|
604
|
+
return predictions_df[column_order]
|
569
605
|
|
570
|
-
def _get_extra_tabular_init_kwargs(self) ->
|
606
|
+
def _get_extra_tabular_init_kwargs(self) -> Dict[str, Any]:
|
571
607
|
if self.is_quantile_model:
|
572
608
|
return {
|
573
609
|
"problem_type": ag.constants.QUANTILE,
|
@@ -622,7 +658,7 @@ class RecursiveTabularModel(AbstractMLForecastModel):
|
|
622
658
|
end of each time series).
|
623
659
|
"""
|
624
660
|
|
625
|
-
def get_hyperparameters(self) ->
|
661
|
+
def get_hyperparameters(self) -> Dict[str, Any]:
|
626
662
|
model_params = super().get_hyperparameters()
|
627
663
|
model_params.setdefault("target_scaler", "standard")
|
628
664
|
if "differences" not in model_params or model_params["differences"] is None:
|
@@ -641,6 +677,7 @@ class RecursiveTabularModel(AbstractMLForecastModel):
|
|
641
677
|
)
|
642
678
|
if len(data) == 0:
|
643
679
|
# All time series are too short for chosen differences
|
680
|
+
assert forecast_for_short_series is not None
|
644
681
|
return forecast_for_short_series
|
645
682
|
|
646
683
|
new_df = self._to_mlforecast_df(data, data.static_features)
|
@@ -648,7 +685,9 @@ class RecursiveTabularModel(AbstractMLForecastModel):
|
|
648
685
|
new_df = self._shorten_all_series(new_df, self._max_ts_length)
|
649
686
|
if known_covariates is None:
|
650
687
|
future_index = self.get_forecast_horizon_index(data)
|
651
|
-
known_covariates =
|
688
|
+
known_covariates = TimeSeriesDataFrame(
|
689
|
+
pd.DataFrame(columns=[self.target], index=future_index, dtype="float32")
|
690
|
+
)
|
652
691
|
X_df = self._to_mlforecast_df(known_covariates, data.static_features, include_target=False)
|
653
692
|
# If both covariates & static features are missing, set X_df = None to avoid exception from MLForecast
|
654
693
|
if len(X_df.columns.difference([MLF_ITEMID, MLF_TIMESTAMP])) == 0:
|
@@ -659,18 +698,19 @@ class RecursiveTabularModel(AbstractMLForecastModel):
|
|
659
698
|
new_df=new_df,
|
660
699
|
X_df=X_df,
|
661
700
|
)
|
662
|
-
|
663
|
-
|
701
|
+
assert isinstance(raw_predictions, pd.DataFrame)
|
702
|
+
raw_predictions = raw_predictions.rename(columns={MLF_ITEMID: ITEMID, MLF_TIMESTAMP: TIMESTAMP})
|
703
|
+
|
704
|
+
predictions: TimeSeriesDataFrame = TimeSeriesDataFrame(
|
664
705
|
self._add_gaussian_quantiles(
|
665
|
-
|
706
|
+
raw_predictions, repeated_item_ids=raw_predictions[ITEMID], past_target=data[self.target]
|
666
707
|
)
|
667
708
|
)
|
668
|
-
|
669
709
|
if forecast_for_short_series is not None:
|
670
|
-
predictions = pd.concat([predictions, forecast_for_short_series])
|
710
|
+
predictions = pd.concat([predictions, forecast_for_short_series]) # type: ignore
|
671
711
|
return predictions.reindex(original_item_id_order, level=ITEMID)
|
672
712
|
|
673
|
-
def _get_extra_tabular_init_kwargs(self) ->
|
713
|
+
def _get_extra_tabular_init_kwargs(self) -> Dict[str, Any]:
|
674
714
|
return {
|
675
715
|
"problem_type": ag.constants.REGRESSION,
|
676
716
|
"eval_metric": self.eval_metric.equivalent_tabular_regression_metric or "mean_absolute_error",
|
@@ -1,4 +1,4 @@
|
|
1
|
-
from typing import Literal,
|
1
|
+
from typing import Literal, Union
|
2
2
|
|
3
3
|
import numpy as np
|
4
4
|
import pandas as pd
|
@@ -13,29 +13,32 @@ from autogluon.timeseries.dataset.ts_dataframe import (
|
|
13
13
|
TIMESTAMP,
|
14
14
|
TimeSeriesDataFrame,
|
15
15
|
)
|
16
|
-
from autogluon.timeseries.transforms.target_scaler import
|
16
|
+
from autogluon.timeseries.transforms.target_scaler import TargetScaler, get_target_scaler
|
17
17
|
|
18
18
|
from .utils import MLF_ITEMID, MLF_TIMESTAMP
|
19
19
|
|
20
20
|
|
21
21
|
class MLForecastScaler(BaseTargetTransform):
|
22
|
-
def __init__(self, scaler_type: Literal["standard", "
|
22
|
+
def __init__(self, scaler_type: Literal["standard", "min_max", "mean_abs", "robust"]):
|
23
23
|
# For backward compatibility
|
24
|
-
self.scaler_type = scaler_type
|
25
|
-
self.ag_scaler:
|
24
|
+
self.scaler_type: Literal["standard", "min_max", "mean_abs", "robust"] = scaler_type
|
25
|
+
self.ag_scaler: TargetScaler
|
26
26
|
|
27
27
|
def _df_to_tsdf(self, df: pd.DataFrame) -> TimeSeriesDataFrame:
|
28
|
-
return
|
28
|
+
return TimeSeriesDataFrame(
|
29
|
+
df.rename(columns={self.id_col: ITEMID, self.time_col: TIMESTAMP}).set_index([ITEMID, TIMESTAMP])
|
30
|
+
)
|
29
31
|
|
30
32
|
def _tsdf_to_df(self, ts_df: TimeSeriesDataFrame) -> pd.DataFrame:
|
31
33
|
return pd.DataFrame(ts_df).reset_index().rename(columns={ITEMID: self.id_col, TIMESTAMP: self.time_col})
|
32
34
|
|
33
|
-
def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame:
|
35
|
+
def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame: # type: ignore
|
34
36
|
self.ag_scaler = get_target_scaler(name=self.scaler_type, target=self.target_col)
|
35
|
-
transformed = self.ag_scaler.fit_transform(self._df_to_tsdf(df))
|
37
|
+
transformed = self.ag_scaler.fit_transform(self._df_to_tsdf(df))
|
36
38
|
return self._tsdf_to_df(transformed)
|
37
39
|
|
38
|
-
def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame:
|
40
|
+
def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame: # type: ignore
|
41
|
+
assert self.ag_scaler is not None
|
39
42
|
transformed = self.ag_scaler.inverse_transform(self._df_to_tsdf(df))
|
40
43
|
return self._tsdf_to_df(transformed)
|
41
44
|
|
@@ -46,7 +49,9 @@ def apply_inverse_transform(
|
|
46
49
|
) -> pd.DataFrame:
|
47
50
|
"""Apply inverse transformation to a dataframe, converting to GroupedArray if necessary"""
|
48
51
|
if isinstance(transform, BaseTargetTransform):
|
49
|
-
|
52
|
+
inverse_transformed = transform.inverse_transform(df=df)
|
53
|
+
assert isinstance(inverse_transformed, pd.DataFrame)
|
54
|
+
return inverse_transformed
|
50
55
|
elif isinstance(transform, _BaseGroupedArrayTargetTransform):
|
51
56
|
indptr = np.concatenate([[0], df[MLF_ITEMID].value_counts().cumsum()])
|
52
57
|
assignment = {}
|
@@ -10,6 +10,7 @@ from scipy.stats import norm
|
|
10
10
|
|
11
11
|
from autogluon.core.utils.exceptions import TimeLimitExceeded
|
12
12
|
from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
|
13
|
+
from autogluon.timeseries.metrics import TimeSeriesScorer
|
13
14
|
from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
|
14
15
|
from autogluon.timeseries.utils.datetime import get_seasonality
|
15
16
|
from autogluon.timeseries.utils.warning_filters import warning_filter
|
@@ -30,8 +31,6 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
|
|
30
31
|
----------
|
31
32
|
allowed_local_model_args : List[str]
|
32
33
|
Argument that can be passed to the underlying local model.
|
33
|
-
default_n_jobs : Union[int, float]
|
34
|
-
Default number of CPU cores used to train models. If float, this fraction of CPU cores will be used.
|
35
34
|
default_max_ts_length : Optional[int]
|
36
35
|
If not None, only the last ``max_ts_length`` time steps of each time series will be used to train the model.
|
37
36
|
This significantly speeds up fitting and usually leads to no change in accuracy.
|
@@ -41,7 +40,6 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
|
|
41
40
|
"""
|
42
41
|
|
43
42
|
allowed_local_model_args: List[str] = []
|
44
|
-
default_n_jobs: Union[int, float] = AG_DEFAULT_N_JOBS
|
45
43
|
default_max_ts_length: Optional[int] = 2500
|
46
44
|
default_max_time_limit_ratio = 1.0
|
47
45
|
init_time_in_seconds: int = 0
|
@@ -52,26 +50,10 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
|
|
52
50
|
prediction_length: int = 1,
|
53
51
|
path: Optional[str] = None,
|
54
52
|
name: Optional[str] = None,
|
55
|
-
eval_metric: str = None,
|
56
|
-
hyperparameters: Dict[str, Any] = None,
|
53
|
+
eval_metric: Union[str, TimeSeriesScorer, None] = None,
|
54
|
+
hyperparameters: Optional[Dict[str, Any]] = None,
|
57
55
|
**kwargs, # noqa
|
58
56
|
):
|
59
|
-
if hyperparameters is None:
|
60
|
-
hyperparameters = {}
|
61
|
-
else:
|
62
|
-
hyperparameters = hyperparameters.copy()
|
63
|
-
# TODO: Replace with 'num_cpus' argument passed to fit (after predictor API is changed)
|
64
|
-
n_jobs = hyperparameters.pop("n_jobs", self.default_n_jobs)
|
65
|
-
if isinstance(n_jobs, float) and 0 < n_jobs <= 1:
|
66
|
-
self.n_jobs = max(int(cpu_count() * n_jobs), 1)
|
67
|
-
elif isinstance(n_jobs, int):
|
68
|
-
self.n_jobs = n_jobs
|
69
|
-
else:
|
70
|
-
raise ValueError(f"n_jobs must be a float between 0 and 1 or an integer (received n_jobs = {n_jobs})")
|
71
|
-
# Default values, potentially overridden inside _fit()
|
72
|
-
self.use_fallback_model = hyperparameters.pop("use_fallback_model", True)
|
73
|
-
self.max_ts_length = hyperparameters.pop("max_ts_length", self.default_max_ts_length)
|
74
|
-
|
75
57
|
super().__init__(
|
76
58
|
path=path,
|
77
59
|
freq=freq,
|
@@ -82,9 +64,9 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
|
|
82
64
|
**kwargs,
|
83
65
|
)
|
84
66
|
|
85
|
-
self._local_model_args: Dict[str, Any]
|
86
|
-
self._seasonal_period:
|
87
|
-
self._dummy_forecast:
|
67
|
+
self._local_model_args: Dict[str, Any]
|
68
|
+
self._seasonal_period: int
|
69
|
+
self._dummy_forecast: pd.DataFrame
|
88
70
|
|
89
71
|
@property
|
90
72
|
def allowed_hyperparameters(self) -> List[str]:
|
@@ -105,19 +87,32 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
|
|
105
87
|
data = data.fill_missing_values()
|
106
88
|
return data, known_covariates
|
107
89
|
|
90
|
+
def _get_default_hyperparameters(self) -> dict:
|
91
|
+
return {
|
92
|
+
"n_jobs": AG_DEFAULT_N_JOBS,
|
93
|
+
"use_fallback_model": True,
|
94
|
+
"max_ts_length": self.default_max_ts_length,
|
95
|
+
}
|
96
|
+
|
97
|
+
@staticmethod
|
98
|
+
def _compute_n_jobs(n_jobs: Union[int, float]) -> int:
|
99
|
+
if isinstance(n_jobs, float) and 0 < n_jobs <= 1:
|
100
|
+
return max(int(cpu_count() * n_jobs), 1)
|
101
|
+
elif isinstance(n_jobs, int):
|
102
|
+
return n_jobs
|
103
|
+
else:
|
104
|
+
raise ValueError(f"n_jobs must be a float between 0 and 1 or an integer (received n_jobs = {n_jobs})")
|
105
|
+
|
108
106
|
def _fit(self, train_data: TimeSeriesDataFrame, time_limit: Optional[int] = None, **kwargs):
|
109
107
|
self._check_fit_params()
|
110
108
|
|
111
109
|
if time_limit is not None and time_limit < self.init_time_in_seconds:
|
112
110
|
raise TimeLimitExceeded
|
113
111
|
|
114
|
-
# Initialize parameters passed to each local model
|
115
|
-
raw_local_model_args = self.get_hyperparameters().copy()
|
116
|
-
|
117
112
|
unused_local_model_args = []
|
118
113
|
local_model_args = {}
|
119
114
|
# TODO: Move filtering logic to AbstractTimeSeriesModel
|
120
|
-
for key, value in
|
115
|
+
for key, value in self.get_hyperparameters().items():
|
121
116
|
if key in self.allowed_local_model_args:
|
122
117
|
local_model_args[key] = value
|
123
118
|
elif key in self.allowed_hyperparameters:
|
@@ -151,9 +146,11 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
|
|
151
146
|
return local_model_args
|
152
147
|
|
153
148
|
def _predict(self, data: TimeSeriesDataFrame, **kwargs) -> TimeSeriesDataFrame:
|
154
|
-
|
155
|
-
|
156
|
-
|
149
|
+
model_params = self.get_hyperparameters()
|
150
|
+
max_ts_length = model_params["max_ts_length"]
|
151
|
+
if max_ts_length is not None:
|
152
|
+
logger.debug(f"Shortening all time series to at most {max_ts_length}")
|
153
|
+
data = data.groupby(level=ITEMID, sort=False).tail(max_ts_length)
|
157
154
|
|
158
155
|
df = pd.DataFrame(data).reset_index(level=ITEMID)
|
159
156
|
all_series = (ts for _, ts in df.groupby(by=ITEMID, as_index=False, sort=False)[self.target])
|
@@ -161,15 +158,20 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
|
|
161
158
|
# timeout ensures that no individual job takes longer than time_limit
|
162
159
|
# TODO: a job started late may still exceed time_limit - how to prevent that?
|
163
160
|
time_limit = kwargs.get("time_limit")
|
164
|
-
|
161
|
+
# TODO: Take into account num_cpus once the TimeSeriesPredictor API is updated
|
162
|
+
n_jobs = self._compute_n_jobs(model_params["n_jobs"])
|
163
|
+
timeout = None if n_jobs == 1 else time_limit
|
165
164
|
# end_time ensures that no new jobs are started after time_limit is exceeded
|
166
165
|
end_time = None if time_limit is None else time.time() + time_limit
|
167
|
-
executor = Parallel(
|
166
|
+
executor = Parallel(n_jobs=n_jobs, timeout=timeout)
|
168
167
|
|
169
168
|
try:
|
170
169
|
with warning_filter():
|
171
170
|
predictions_with_flags = executor(
|
172
|
-
delayed(self._predict_wrapper)(
|
171
|
+
delayed(self._predict_wrapper)(
|
172
|
+
ts, use_fallback_model=model_params["use_fallback_model"], end_time=end_time
|
173
|
+
)
|
174
|
+
for ts in all_series
|
173
175
|
)
|
174
176
|
except TimeoutError:
|
175
177
|
raise TimeLimitExceeded
|
@@ -185,7 +187,12 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
|
|
185
187
|
predictions_df.index = self.get_forecast_horizon_index(data)
|
186
188
|
return TimeSeriesDataFrame(predictions_df)
|
187
189
|
|
188
|
-
def _predict_wrapper(
|
190
|
+
def _predict_wrapper(
|
191
|
+
self,
|
192
|
+
time_series: pd.Series,
|
193
|
+
use_fallback_model: bool,
|
194
|
+
end_time: Optional[float] = None,
|
195
|
+
) -> Tuple[pd.DataFrame, bool]:
|
189
196
|
if end_time is not None and time.time() >= end_time:
|
190
197
|
raise TimeLimitExceeded
|
191
198
|
|
@@ -201,7 +208,7 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
|
|
201
208
|
if not np.isfinite(result.values).all():
|
202
209
|
raise RuntimeError("Forecast contains NaN or Inf values.")
|
203
210
|
except Exception:
|
204
|
-
if
|
211
|
+
if use_fallback_model:
|
205
212
|
result = seasonal_naive_forecast(
|
206
213
|
target=time_series.values.ravel(),
|
207
214
|
prediction_length=self.prediction_length,
|
@@ -215,7 +215,7 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
|
|
215
215
|
def _get_search_space(self):
|
216
216
|
return self.model_base._get_search_space()
|
217
217
|
|
218
|
-
def _initialize_transforms_and_regressor(self
|
218
|
+
def _initialize_transforms_and_regressor(self) -> None:
|
219
219
|
# Do not initialize the target_scaler and covariate_regressor in the multi window model!
|
220
220
|
self.target_scaler = None
|
221
221
|
self.covariate_scaler = None
|
@@ -395,12 +395,12 @@ class TimeSeriesPredictor:
|
|
395
395
|
f"\tRemoving {len(too_short_items)} short time series from train_data. Only series with length "
|
396
396
|
f">= {min_length} will be used for training."
|
397
397
|
)
|
398
|
-
train_data = train_data.query("item_id not in @too_short_items")
|
398
|
+
train_data = train_data.query("item_id not in @too_short_items")
|
399
399
|
|
400
400
|
all_nan_items = train_data.item_ids[train_data[self.target].isna().groupby(ITEMID, sort=False).all()]
|
401
401
|
if len(all_nan_items) > 0:
|
402
402
|
logger.info(f"\tRemoving {len(all_nan_items)} time series consisting of only NaN values from train_data.")
|
403
|
-
train_data = train_data.query("item_id not in @all_nan_items")
|
403
|
+
train_data = train_data.query("item_id not in @all_nan_items")
|
404
404
|
|
405
405
|
if len(too_short_items) or len(all_nan_items):
|
406
406
|
logger.info(f"\tAfter filtering, train_data has {self._get_dataset_stats(train_data)}")
|
autogluon/timeseries/trainer.py
CHANGED
@@ -902,7 +902,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
902
902
|
for n in range(num_iterations):
|
903
903
|
if subsample_size < data.num_items:
|
904
904
|
item_ids_sampled = data.item_ids.to_series().sample(subsample_size) # noqa
|
905
|
-
data_sample: TimeSeriesDataFrame = data.query("item_id in @item_ids_sampled")
|
905
|
+
data_sample: TimeSeriesDataFrame = data.query("item_id in @item_ids_sampled")
|
906
906
|
else:
|
907
907
|
data_sample = data
|
908
908
|
|
autogluon/timeseries/version.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: autogluon.timeseries
|
3
|
-
Version: 1.2.
|
3
|
+
Version: 1.2.1b20250425
|
4
4
|
Summary: Fast and Accurate ML in 3 Lines of Code
|
5
5
|
Home-page: https://github.com/autogluon/autogluon
|
6
6
|
Author: AutoGluon Community
|
@@ -55,10 +55,10 @@ Requires-Dist: fugue>=0.9.0
|
|
55
55
|
Requires-Dist: tqdm<5,>=4.38
|
56
56
|
Requires-Dist: orjson~=3.9
|
57
57
|
Requires-Dist: tensorboard<3,>=2.9
|
58
|
-
Requires-Dist: autogluon.core[raytune]==1.2.
|
59
|
-
Requires-Dist: autogluon.common==1.2.
|
60
|
-
Requires-Dist: autogluon.features==1.2.
|
61
|
-
Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.2.
|
58
|
+
Requires-Dist: autogluon.core[raytune]==1.2.1b20250425
|
59
|
+
Requires-Dist: autogluon.common==1.2.1b20250425
|
60
|
+
Requires-Dist: autogluon.features==1.2.1b20250425
|
61
|
+
Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.2.1b20250425
|
62
62
|
Provides-Extra: all
|
63
63
|
Provides-Extra: chronos-onnx
|
64
64
|
Requires-Dist: optimum[onnxruntime]<1.23,>=1.17; extra == "chronos-onnx"
|
@@ -1,16 +1,16 @@
|
|
1
|
-
autogluon.timeseries-1.2.
|
1
|
+
autogluon.timeseries-1.2.1b20250425-py3.9-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
|
2
2
|
autogluon/timeseries/__init__.py,sha256=_CrLLc1fkjen7UzWoO0Os8WZoHOgvZbHKy46I8v_4k4,304
|
3
3
|
autogluon/timeseries/evaluator.py,sha256=l642tYfTHsl8WVIq_vV6qhgAFVFr9UuZD7gLra3A_Kc,250
|
4
4
|
autogluon/timeseries/learner.py,sha256=7dqSHKCIX2osjv9cmWWLwaGvdrPvla0HTnsR75bdenY,14112
|
5
|
-
autogluon/timeseries/predictor.py,sha256=
|
5
|
+
autogluon/timeseries/predictor.py,sha256=Ur5TV0idIssnUzVdZcVsnrNWAmX6JGxbLZZnpT8EGmQ,87980
|
6
6
|
autogluon/timeseries/regressor.py,sha256=xw5VPrXS-NQ_Ts4ppDjoNV0TdqUYjW4VINUtb_BZdiI,11868
|
7
7
|
autogluon/timeseries/splitter.py,sha256=yzPca9p2bWV-_VJAptUyyzQsxu-uixAdpMoGQtDzMD4,3205
|
8
|
-
autogluon/timeseries/trainer.py,sha256
|
9
|
-
autogluon/timeseries/version.py,sha256=
|
8
|
+
autogluon/timeseries/trainer.py,sha256=zrO3ARI_h14pYy7_GEGO7nqd9rONiDmx5hhCRMyzwls,58115
|
9
|
+
autogluon/timeseries/version.py,sha256=zTyiK3ClhRyp8BZjIMLB2n9YsA7GaR9qnPDLREdyRQs,91
|
10
10
|
autogluon/timeseries/configs/__init__.py,sha256=BTtHIPCYeGjqgOcvqb8qPD4VNX-ICKOg6wnkew1cPOE,98
|
11
11
|
autogluon/timeseries/configs/presets_configs.py,sha256=cLat8ecLlWrI-SC5KLBDCX2SbVXaucemy2pjxJAtSY0,2543
|
12
12
|
autogluon/timeseries/dataset/__init__.py,sha256=UvnhAN5tjgxXTHoZMQDy64YMDj4Xxa68yY7NP4vAw0o,81
|
13
|
-
autogluon/timeseries/dataset/ts_dataframe.py,sha256=
|
13
|
+
autogluon/timeseries/dataset/ts_dataframe.py,sha256=W3VE65lFyWmqMQ3XHN4Jhrqf_dO1EOLneNL2QDvVxeY,48120
|
14
14
|
autogluon/timeseries/metrics/__init__.py,sha256=dJCrZ2cHwqhqNctwQjwG-FHgGUmzIFT-D0z72f4RAVM,2104
|
15
15
|
autogluon/timeseries/metrics/abstract.py,sha256=CHUZB6xt9oF9yijSOjgGtjLuKo2X0mT6dQDuwg4ZzpU,8192
|
16
16
|
autogluon/timeseries/metrics/point.py,sha256=2nlieQcPBCI9hXMT3v0Oe802ykZDuzvEtDpunzt0IVA,15785
|
@@ -19,12 +19,12 @@ autogluon/timeseries/metrics/utils.py,sha256=HuDe1BNe8yJU4f_DKM913nNrUueoRaw6zhx
|
|
19
19
|
autogluon/timeseries/models/__init__.py,sha256=MYD9JJ-wUDE5B6jW6E6LU2eXQ6vflfQBvqQJkdzJa3A,1189
|
20
20
|
autogluon/timeseries/models/presets.py,sha256=BdSTW91-flgqhVNuZIvqEf7wUj1iB6BPger4tJaoAZQ,12322
|
21
21
|
autogluon/timeseries/models/abstract/__init__.py,sha256=Htfkjjc3vo92RvyM8rIlQ0PLWt3jcrCKZES07UvCMV0,146
|
22
|
-
autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=
|
22
|
+
autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=Mj0bx45A2zy9Vzhcd7xjct3KUJGnMKLTozTMvtdsViw,33059
|
23
23
|
autogluon/timeseries/models/abstract/model_trial.py,sha256=ENPg_7nsdxIvaNM0o0UShZ3x8jFlRmwRc5m0fGPC0TM,3720
|
24
24
|
autogluon/timeseries/models/abstract/tunable.py,sha256=SFl4vjkb6BfFFaRPVdftnnLYlIyCThutLHxiiAlV6tY,7168
|
25
25
|
autogluon/timeseries/models/autogluon_tabular/__init__.py,sha256=r9i6jWcyeLHYClkcMSKRVsfrkBUMxpDrTATNTBc_qgQ,136
|
26
|
-
autogluon/timeseries/models/autogluon_tabular/mlforecast.py,sha256=
|
27
|
-
autogluon/timeseries/models/autogluon_tabular/transforms.py,sha256=
|
26
|
+
autogluon/timeseries/models/autogluon_tabular/mlforecast.py,sha256=5JFQTZtYU4bRyNrR8lC-hikI573jIqOxBE2dw35Dg-M,34607
|
27
|
+
autogluon/timeseries/models/autogluon_tabular/transforms.py,sha256=aI1QJLJaOB5Xy2WA0jo6Jh25MRVyyZ8ONrqlV96kpw0,2735
|
28
28
|
autogluon/timeseries/models/autogluon_tabular/utils.py,sha256=Fn3Vu_Q0PCtEUbtNgLp1xIblg7dOdpFlF3W5kLHgruI,63
|
29
29
|
autogluon/timeseries/models/chronos/__init__.py,sha256=wT77HzTtmQxW3sw2k0mA5Ot6PSHivX-Uvn5fjM05EU4,60
|
30
30
|
autogluon/timeseries/models/chronos/model.py,sha256=dYc3nZE6BqpunwI2IyuOm1LGW1RJJEzxYCB5ZW0585E,31649
|
@@ -42,12 +42,12 @@ autogluon/timeseries/models/gluonts/abstract_gluonts.py,sha256=35T8rty6sPGiaSFNp
|
|
42
42
|
autogluon/timeseries/models/gluonts/torch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
43
43
|
autogluon/timeseries/models/gluonts/torch/models.py,sha256=f7IicZzLAN2v_9y3Pxt9G6f48xIzmDjb1U5k44hS3O0,25760
|
44
44
|
autogluon/timeseries/models/local/__init__.py,sha256=e2UImoJhmj70E148IIObv90C_bHxgyLNk6YsS4p7pfs,701
|
45
|
-
autogluon/timeseries/models/local/abstract_local_model.py,sha256=
|
45
|
+
autogluon/timeseries/models/local/abstract_local_model.py,sha256=VP-yP5Rx93Kb6ine1d5lBe6fRbeZcGG3hBn-AoL4OqQ,11841
|
46
46
|
autogluon/timeseries/models/local/naive.py,sha256=BhXxL52-_i4Xynx-spfZMkRejofFPpknggS35_aQSwc,7253
|
47
47
|
autogluon/timeseries/models/local/npts.py,sha256=Bp74doKnfpGE8ywP4FWOCI_RwRMsmgocYDfGtq764DA,4143
|
48
48
|
autogluon/timeseries/models/local/statsforecast.py,sha256=s3Byp7WAUy0Rnfl1qYMSIm44MKD9t8E732xuNLk_aao,32615
|
49
49
|
autogluon/timeseries/models/multi_window/__init__.py,sha256=Bq7AT2Jxdd4WNqmjTdzeqgNiwn1NCyWp4tBIWaM-zfI,60
|
50
|
-
autogluon/timeseries/models/multi_window/multi_window_model.py,sha256=
|
50
|
+
autogluon/timeseries/models/multi_window/multi_window_model.py,sha256=xW55TMg7kgta-TmBpVZGcDQlBdBN_eW1z1lVNjZGhpo,11833
|
51
51
|
autogluon/timeseries/transforms/__init__.py,sha256=fkFc4Q1Dlh0vVRgO7nPD7BgNL9dOki8THPWFkfdIKkM,128
|
52
52
|
autogluon/timeseries/transforms/covariate_scaler.py,sha256=G56PTHKqCFKiXRKLkLun7mN3-T09jxN-5oI1ISADJdQ,7042
|
53
53
|
autogluon/timeseries/transforms/target_scaler.py,sha256=BeT1aP51Wq9EidxC0dVg6dHvampKafpG1uKu4ZaaJPs,6050
|
@@ -60,11 +60,11 @@ autogluon/timeseries/utils/datetime/base.py,sha256=3NdsH3NDq4cVAOSoy3XpaNixyNlbj
|
|
60
60
|
autogluon/timeseries/utils/datetime/lags.py,sha256=gQDk5_zmsY5DUWDUpSaCKYkQ9nHKKY-LsywJQRAoYSk,5988
|
61
61
|
autogluon/timeseries/utils/datetime/seasonality.py,sha256=YK_2k8hvYIMW-sJPnjGWRtCnvIOthwA2hATB3nwVoD4,834
|
62
62
|
autogluon/timeseries/utils/datetime/time_features.py,sha256=MjLi3zQ00uWWJtXH9oGX2GJkTbvjdSiuabSa4kcVuxE,2672
|
63
|
-
autogluon.timeseries-1.2.
|
64
|
-
autogluon.timeseries-1.2.
|
65
|
-
autogluon.timeseries-1.2.
|
66
|
-
autogluon.timeseries-1.2.
|
67
|
-
autogluon.timeseries-1.2.
|
68
|
-
autogluon.timeseries-1.2.
|
69
|
-
autogluon.timeseries-1.2.
|
70
|
-
autogluon.timeseries-1.2.
|
63
|
+
autogluon.timeseries-1.2.1b20250425.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
|
64
|
+
autogluon.timeseries-1.2.1b20250425.dist-info/METADATA,sha256=FQJvTO-OW1GKRFf4RMRuyNUgzGJscd3nXb7it95OrGM,12737
|
65
|
+
autogluon.timeseries-1.2.1b20250425.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
|
66
|
+
autogluon.timeseries-1.2.1b20250425.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
67
|
+
autogluon.timeseries-1.2.1b20250425.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
68
|
+
autogluon.timeseries-1.2.1b20250425.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
69
|
+
autogluon.timeseries-1.2.1b20250425.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
70
|
+
autogluon.timeseries-1.2.1b20250425.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|