autogluon.timeseries 1.0.1b20240408__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of autogluon.timeseries might be problematic. Click here for more details.

Files changed (31) hide show
  1. autogluon/timeseries/configs/presets_configs.py +2 -2
  2. autogluon/timeseries/dataset/ts_dataframe.py +9 -9
  3. autogluon/timeseries/learner.py +8 -3
  4. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +3 -2
  5. autogluon/timeseries/models/autogluon_tabular/mlforecast.py +12 -3
  6. autogluon/timeseries/models/chronos/model.py +27 -44
  7. autogluon/timeseries/models/chronos/utils.py +66 -0
  8. autogluon/timeseries/models/gluonts/abstract_gluonts.py +22 -27
  9. autogluon/timeseries/models/gluonts/torch/models.py +0 -2
  10. autogluon/timeseries/models/local/abstract_local_model.py +14 -11
  11. autogluon/timeseries/models/multi_window/multi_window_model.py +1 -0
  12. autogluon/timeseries/models/presets.py +23 -13
  13. autogluon/timeseries/predictor.py +19 -7
  14. autogluon/timeseries/trainer/abstract_trainer.py +1 -1
  15. autogluon/timeseries/utils/datetime/base.py +38 -20
  16. autogluon/timeseries/utils/datetime/lags.py +13 -12
  17. autogluon/timeseries/utils/datetime/seasonality.py +11 -11
  18. autogluon/timeseries/utils/datetime/time_features.py +12 -11
  19. autogluon/timeseries/utils/features.py +12 -7
  20. autogluon/timeseries/utils/forecast.py +7 -2
  21. autogluon/timeseries/utils/warning_filters.py +1 -3
  22. autogluon/timeseries/version.py +1 -1
  23. {autogluon.timeseries-1.0.1b20240408.dist-info → autogluon.timeseries-1.1.0.dist-info}/METADATA +13 -16
  24. {autogluon.timeseries-1.0.1b20240408.dist-info → autogluon.timeseries-1.1.0.dist-info}/RECORD +31 -30
  25. /autogluon.timeseries-1.0.1b20240408-py3.8-nspkg.pth → /autogluon.timeseries-1.1.0-py3.8-nspkg.pth +0 -0
  26. {autogluon.timeseries-1.0.1b20240408.dist-info → autogluon.timeseries-1.1.0.dist-info}/LICENSE +0 -0
  27. {autogluon.timeseries-1.0.1b20240408.dist-info → autogluon.timeseries-1.1.0.dist-info}/NOTICE +0 -0
  28. {autogluon.timeseries-1.0.1b20240408.dist-info → autogluon.timeseries-1.1.0.dist-info}/WHEEL +0 -0
  29. {autogluon.timeseries-1.0.1b20240408.dist-info → autogluon.timeseries-1.1.0.dist-info}/namespace_packages.txt +0 -0
  30. {autogluon.timeseries-1.0.1b20240408.dist-info → autogluon.timeseries-1.1.0.dist-info}/top_level.txt +0 -0
  31. {autogluon.timeseries-1.0.1b20240408.dist-info → autogluon.timeseries-1.1.0.dist-info}/zip-safe +0 -0
@@ -69,7 +69,7 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
69
69
  models that predict up to 3 days into the future from the most recent observation.
70
70
  freq : str, optional
71
71
  Frequency of the time series data (see `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_
72
- for available frequencies). For example, ``"D"`` for daily data or ``"H"`` for hourly data.
72
+ for available frequencies). For example, ``"D"`` for daily data or ``"h"`` for hourly data.
73
73
 
74
74
  By default, the predictor will attempt to automatically infer the frequency from the data. This argument should
75
75
  only be set in two cases:
@@ -195,7 +195,7 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
195
195
  self._min_train_length = max(self.prediction_length + 1, 5)
196
196
  self.freq = freq
197
197
  if self.freq is not None:
198
- # Standardize frequency string (e.g., "min" -> "T", "Y" -> "A-DEC")
198
+ # Standardize frequency string (e.g., "T" -> "min", "Y" -> "YE")
199
199
  std_freq = pd.tseries.frequencies.to_offset(self.freq).freqstr
200
200
  if std_freq != str(self.freq):
201
201
  logger.info(f"Frequency '{self.freq}' stored as '{std_freq}'")
@@ -293,7 +293,7 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
293
293
  Preprocessed data in TimeSeriesDataFrame format.
294
294
  """
295
295
  df = self._to_data_frame(data, name=name)
296
- df = df.astype({self.target: "float32"})
296
+ df = df.astype({self.target: "float64"})
297
297
  # MultiIndex.is_monotonic_increasing checks if index is sorted by ["item_id", "timestamp"]
298
298
  if not df.index.is_monotonic_increasing:
299
299
  df = df.sort_index()
@@ -499,7 +499,7 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
499
499
  with reasonable training time.
500
500
  - ``"high_quality"``: All ML models available in AutoGluon + additional statistical models (``NPTS``, ``AutoETS``, ``AutoARIMA``, ``CrostonSBA``,
501
501
  ``DynamicOptimizedTheta``). Much more accurate than ``medium_quality``, but takes longer to train.
502
- - ``"best_quality"``: Same models as in ``"high_quality"`, but performs validation with multiple backtests. Usually better than ``high_quality``, but takes even longer to train.
502
+ - ``"best_quality"``: Same models as in ``"high_quality"``, but performs validation with multiple backtests. Usually better than ``high_quality``, but takes even longer to train.
503
503
 
504
504
  Available presets with the `Chronos <https://github.com/amazon-science/chronos-forecasting>`_ model:
505
505
 
@@ -507,8 +507,10 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
507
507
  See the documentation for ``ChronosModel`` or see `Hugging Face <https://huggingface.co/collections/amazon/chronos-models-65f1791d630a8d57cb718444>`_ for more information.
508
508
  Note that a GPU is required for model sizes ``small``, ``base`` and ``large``.
509
509
  - ``"chronos"``: alias for ``"chronos_small"``.
510
- - ``"chronos_ensemble"``: builds an ensemble of the models specified in ``"high_quality"`` and ``"chronos_small"``.
511
- - ``"chronos_large_ensemble"``: builds an ensemble of the models specified in ``"high_quality"`` and ``"chronos_large"``.
510
+ - ``"chronos_ensemble"``: builds an ensemble of seasonal naive, tree-based and deep learning models with fast inference
511
+ and ``"chronos_small"``.
512
+ - ``"chronos_large_ensemble"``: builds an ensemble of seasonal naive, tree-based and deep learning models
513
+ with fast inference and ``"chronos_large"``.
512
514
 
513
515
  Details for these presets can be found in ``autogluon/timeseries/configs/presets_configs.py``. If not
514
516
  provided, user-provided values for ``hyperparameters`` and ``hyperparameter_tune_kwargs`` will be used
@@ -631,7 +633,11 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
631
633
  This argument has no effect if ``tuning_data`` is provided.
632
634
  refit_every_n_windows: int or None, default = 1
633
635
  When performing cross validation, each model will be retrained every ``refit_every_n_windows`` validation
634
- windows. If set to ``None``, model will only be fit once for the first validation window.
636
+ windows, where the number of validation windows is specified by `num_val_windows`. Note that in the
637
+ default setting where `num_val_windows=1`, this argument has no effect.
638
+
639
+ If set to ``None``, models will only be fit once for the first (oldest) validation window. By default,
640
+ `refit_every_n_windows=1`, i.e., all models will be refit for each validation window.
635
641
  refit_full : bool, default = False
636
642
  If True, after training is complete, AutoGluon will attempt to re-train all models using all of training
637
643
  data (including the data initially reserved for validation). This argument has no effect if ``tuning_data``
@@ -717,6 +723,12 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
717
723
  if num_val_windows == 0 and tuning_data is None:
718
724
  raise ValueError("Please set num_val_windows >= 1 or provide custom tuning_data")
719
725
 
726
+ if num_val_windows <= 1 and refit_every_n_windows > 1:
727
+ logger.warning(
728
+ f"\trefit_every_n_windows provided as {refit_every_n_windows} but num_val_windows is set to {num_val_windows}."
729
+ " Refit_every_n_windows will have no effect."
730
+ )
731
+
720
732
  if not skip_model_selection:
721
733
  train_data = self._filter_useless_train_data(
722
734
  train_data, num_val_windows=num_val_windows, val_step_size=val_step_size
@@ -613,7 +613,7 @@ class AbstractTimeSeriesTrainer(SimpleAbstractTrainer):
613
613
  break
614
614
 
615
615
  if random_seed is not None:
616
- seed_everything(random_seed)
616
+ seed_everything(random_seed + i)
617
617
 
618
618
  if contains_searchspace(model.get_user_params()):
619
619
  fit_log_message = f"Hyperparameter tuning model {model.name}. "
@@ -1,31 +1,46 @@
1
1
  import pandas as pd
2
2
 
3
3
  TO_MAJOR_FREQ = {
4
- "min": "T",
5
- "ms": "L",
6
- "us": "U",
4
+ # sub-daily
5
+ "H": "h",
6
+ "BH": "bh",
7
+ "cbh": "bh",
8
+ "CBH": "bh",
9
+ "T": "min",
10
+ "S": "s",
11
+ "L": "ms",
12
+ "U": "us",
13
+ "N": "ns",
7
14
  # business day
8
15
  "C": "B",
9
16
  # month
10
- "BM": "M",
11
- "CBM": "M",
12
- "MS": "M",
13
- "BMS": "M",
14
- "CBMS": "M",
17
+ "M": "ME",
18
+ "BM": "ME",
19
+ "BME": "ME",
20
+ "CBM": "ME",
21
+ "CBME": "ME",
22
+ "MS": "ME",
23
+ "BMS": "ME",
24
+ "CBMS": "ME",
15
25
  # semi-month
16
- "SMS": "SM",
26
+ "SM": "SME",
27
+ "SMS": "SME",
17
28
  # quarter
18
- "BQ": "Q",
19
- "QS": "Q",
20
- "BQS": "Q",
29
+ "Q": "QE",
30
+ "BQ": "QE",
31
+ "BQE": "QE",
32
+ "QS": "QE",
33
+ "BQS": "QE",
21
34
  # annual
22
- "Y": "A",
23
- "BA": "A",
24
- "BY": "A",
25
- "AS": "A",
26
- "YS": "A",
27
- "BAS": "A",
28
- "BYS": "A",
35
+ "A": "YE",
36
+ "Y": "YE",
37
+ "BA": "YE",
38
+ "BY": "YE",
39
+ "BYE": "YE",
40
+ "AS": "YE",
41
+ "YS": "YE",
42
+ "BAS": "YE",
43
+ "BYS": "YE",
29
44
  }
30
45
 
31
46
 
@@ -33,7 +48,10 @@ def norm_freq_str(offset: pd.DateOffset) -> str:
33
48
  """Obtain frequency string from a pandas.DateOffset object.
34
49
 
35
50
  "Non-standard" frequencies are converted to their "standard" counterparts. For example, MS (month start) is mapped
36
- to M (month) since both correspond to the same seasonality, lags and time features.
51
+ to ME (month end) since both correspond to the same seasonality, lags and time features.
52
+
53
+ The frequencies are always mapped to the new non-deprecated aliases (pandas>=2.2), e.g., "H" is mapped to "h". The
54
+ downstream functions like `get_seasonality` handle the new aliases even if older version of pandas is used.
37
55
  """
38
56
  base_freq = offset.name.split("-")[0]
39
57
  return TO_MAJOR_FREQ.get(base_freq, base_freq)
@@ -1,6 +1,7 @@
1
1
  """
2
2
  Generate lag indices based on frequency string. Adapted from gluonts.time_feature.lag.
3
3
  """
4
+
4
5
  from typing import List, Optional
5
6
 
6
7
  import numpy as np
@@ -96,13 +97,13 @@ def get_lags_for_frequency(
96
97
  offset = pd.tseries.frequencies.to_offset(freq)
97
98
  offset_name = norm_freq_str(offset)
98
99
 
99
- if offset_name == "A":
100
+ if offset_name == "YE":
100
101
  lags = []
101
- elif offset_name == "Q":
102
+ elif offset_name == "QE":
102
103
  lags = _make_lags_for_quarter(offset.n)
103
- elif offset_name == "M":
104
+ elif offset_name == "ME":
104
105
  lags = _make_lags_for_month(offset.n)
105
- elif offset_name == "SM":
106
+ elif offset_name == "SME":
106
107
  lags = _make_lags_for_semi_month(offset.n)
107
108
  elif offset_name == "W":
108
109
  lags = _make_lags_for_week(offset.n)
@@ -110,21 +111,21 @@ def get_lags_for_frequency(
110
111
  lags = _make_lags_for_day(offset.n) + _make_lags_for_week(offset.n / 7.0)
111
112
  elif offset_name == "B":
112
113
  lags = _make_lags_for_day(offset.n, days_in_week=5, days_in_month=22) + _make_lags_for_week(offset.n / 5.0)
113
- elif offset_name == "H":
114
+ elif offset_name == "h":
114
115
  lags = (
115
116
  _make_lags_for_hour(offset.n)
116
117
  + _make_lags_for_day(offset.n / 24)
117
118
  + _make_lags_for_week(offset.n / (24 * 7))
118
119
  )
119
120
  # business hour
120
- elif offset_name == "BH":
121
+ elif offset_name == "bh":
121
122
  lags = (
122
123
  _make_lags_for_business_hour(offset.n)
123
124
  + _make_lags_for_day(offset.n / 9)
124
125
  + _make_lags_for_week(offset.n / (9 * 7))
125
126
  )
126
127
  # minutes
127
- elif offset_name == "T":
128
+ elif offset_name == "min":
128
129
  lags = (
129
130
  _make_lags_for_minute(offset.n)
130
131
  + _make_lags_for_hour(offset.n / 60)
@@ -132,32 +133,32 @@ def get_lags_for_frequency(
132
133
  + _make_lags_for_week(offset.n / (60 * 24 * 7))
133
134
  )
134
135
  # second
135
- elif offset_name == "S":
136
+ elif offset_name == "s":
136
137
  lags = (
137
138
  _make_lags_for_second(offset.n)
138
139
  + _make_lags_for_minute(offset.n / 60)
139
140
  + _make_lags_for_hour(offset.n / (60 * 60))
140
141
  )
141
- elif offset_name == "L":
142
+ elif offset_name == "ms":
142
143
  lags = (
143
144
  _make_lags_for_second(offset.n / 1e3)
144
145
  + _make_lags_for_minute(offset.n / (60 * 1e3))
145
146
  + _make_lags_for_hour(offset.n / (60 * 60 * 1e3))
146
147
  )
147
- elif offset_name == "U":
148
+ elif offset_name == "us":
148
149
  lags = (
149
150
  _make_lags_for_second(offset.n / 1e6)
150
151
  + _make_lags_for_minute(offset.n / (60 * 1e6))
151
152
  + _make_lags_for_hour(offset.n / (60 * 60 * 1e6))
152
153
  )
153
- elif offset_name == "N":
154
+ elif offset_name == "ns":
154
155
  lags = (
155
156
  _make_lags_for_second(offset.n / 1e9)
156
157
  + _make_lags_for_minute(offset.n / (60 * 1e9))
157
158
  + _make_lags_for_hour(offset.n / (60 * 60 * 1e9))
158
159
  )
159
160
  else:
160
- raise Exception(f"invalid frequency {freq}")
161
+ raise Exception(f"Cannot get lags for unsupported frequency {freq}")
161
162
 
162
163
  # flatten lags list and filter
163
164
  lags = [int(lag) for sub_list in lags for lag in sub_list if 7 < lag <= lag_ub]
@@ -5,20 +5,20 @@ import pandas as pd
5
5
  from .base import norm_freq_str
6
6
 
7
7
  DEFAULT_SEASONALITIES = {
8
- "A": 1,
9
- "Q": 4,
10
- "M": 12,
11
- "SM": 24,
8
+ "YE": 1,
9
+ "QE": 4,
10
+ "ME": 12,
11
+ "SME": 24,
12
12
  "W": 1,
13
13
  "D": 7,
14
14
  "B": 5,
15
- "BH": 9,
16
- "H": 24,
17
- "T": 60 * 24,
18
- "S": 1,
19
- "L": 1,
20
- "U": 1,
21
- "N": 1,
15
+ "bh": 9,
16
+ "h": 24,
17
+ "min": 60 * 24,
18
+ "s": 1,
19
+ "ms": 1,
20
+ "us": 1,
21
+ "ns": 1,
22
22
  }
23
23
 
24
24
 
@@ -1,6 +1,7 @@
1
1
  """
2
2
  Generate time features based on frequency string. Adapted from gluonts.time_feature.time_feature.
3
3
  """
4
+
4
5
  from typing import Callable, List
5
6
 
6
7
  import numpy as np
@@ -57,20 +58,20 @@ def second_of_minute(index: pd.DatetimeIndex) -> np.ndarray:
57
58
 
58
59
  def get_time_features_for_frequency(freq) -> List[Callable]:
59
60
  features_by_offset_name = {
60
- "A": [],
61
- "Q": [quarter_of_year],
62
- "M": [month_of_year],
63
- "SM": [day_of_month, month_of_year],
61
+ "YE": [],
62
+ "QE": [quarter_of_year],
63
+ "ME": [month_of_year],
64
+ "SME": [day_of_month, month_of_year],
64
65
  "W": [day_of_month, week_of_year],
65
66
  "D": [day_of_week, day_of_month, day_of_year],
66
67
  "B": [day_of_week, day_of_month, day_of_year],
67
- "BH": [hour_of_day, day_of_week, day_of_month, day_of_year],
68
- "H": [hour_of_day, day_of_week, day_of_month, day_of_year],
69
- "T": [minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
70
- "S": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
71
- "L": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
72
- "U": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
73
- "N": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
68
+ "bh": [hour_of_day, day_of_week, day_of_month, day_of_year],
69
+ "h": [hour_of_day, day_of_week, day_of_month, day_of_year],
70
+ "min": [minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
71
+ "s": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
72
+ "ms": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
73
+ "us": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
74
+ "ns": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
74
75
  }
75
76
  offset = pd.tseries.frequencies.to_offset(freq)
76
77
  offset_name = norm_freq_str(offset)
@@ -14,6 +14,7 @@ from autogluon.features.generators import (
14
14
  PipelineFeatureGenerator,
15
15
  )
16
16
  from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
17
+ from autogluon.timeseries.utils.warning_filters import warning_filter
17
18
 
18
19
  logger = logging.getLogger(__name__)
19
20
 
@@ -72,7 +73,7 @@ class ContinuousAndCategoricalFeatureGenerator(PipelineFeatureGenerator):
72
73
  Imputes missing categorical features with the most frequent value in the training set.
73
74
  """
74
75
 
75
- def __init__(self, verbosity: int = 0, minimum_cat_count=2, float_dtype: str = "float32", **kwargs):
76
+ def __init__(self, verbosity: int = 0, minimum_cat_count=2, float_dtype: str = "float64", **kwargs):
76
77
  generators = [
77
78
  CategoryFeatureGenerator(minimum_cat_count=minimum_cat_count, fillna="mode"),
78
79
  IdentityFeatureGenerator(infer_features_in_args={"valid_raw_types": [R_INT, R_FLOAT]}),
@@ -110,7 +111,7 @@ class ContinuousAndCategoricalFeatureGenerator(PipelineFeatureGenerator):
110
111
  class TimeSeriesFeatureGenerator:
111
112
  """Takes care of preprocessing for static_features and past/known covariates.
112
113
 
113
- All covariates & static features are converted into either float32 or categorical dtype.
114
+ All covariates & static features are converted into either float64 or categorical dtype.
114
115
 
115
116
  Missing values in the target column are left as-is but missing values in static features & covariates are imputed.
116
117
  Imputation logic is as follows:
@@ -120,16 +121,18 @@ class TimeSeriesFeatureGenerator:
120
121
  covariate values are missing, we fill them with the median of the training set.
121
122
  """
122
123
 
123
- def __init__(self, target: str, known_covariates_names: List[str], float_dtype: str = "float32"):
124
+ def __init__(self, target: str, known_covariates_names: List[str], float_dtype: str = "float64"):
124
125
  self.target = target
125
126
  self.float_dtype = float_dtype
126
127
  self._is_fit = False
127
128
  self.known_covariates_names = list(known_covariates_names)
128
129
  self.past_covariates_names = []
129
- self.known_covariates_pipeline = ContinuousAndCategoricalFeatureGenerator()
130
- self.past_covariates_pipeline = ContinuousAndCategoricalFeatureGenerator()
130
+ self.known_covariates_pipeline = ContinuousAndCategoricalFeatureGenerator(float_dtype=float_dtype)
131
+ self.past_covariates_pipeline = ContinuousAndCategoricalFeatureGenerator(float_dtype=float_dtype)
131
132
  # Cat features with cat_count=1 are fine in static_features since they are repeated for all time steps in a TS
132
- self.static_feature_pipeline = ContinuousAndCategoricalFeatureGenerator(minimum_cat_count=1)
133
+ self.static_feature_pipeline = ContinuousAndCategoricalFeatureGenerator(
134
+ minimum_cat_count=1, float_dtype=float_dtype
135
+ )
133
136
  self.covariate_metadata: CovariateMetadata = None
134
137
  self._train_covariates_real_median: Optional[pd.Series] = None
135
138
  self._train_static_real_median: Optional[pd.Series] = None
@@ -335,7 +338,9 @@ class AbstractFeatureImportanceTransform:
335
338
  # we'll have to work on the history of the data alone
336
339
  data[feature_name] = data[feature_name].copy()
337
340
  feature_data = data[feature_name].groupby(level=ITEMID, sort=False).head(-self.prediction_length)
338
- data[feature_name].update(self._transform_series(feature_data, is_categorical=is_categorical))
341
+ # Silence spurious FutureWarning raised by DataFrame.update https://github.com/pandas-dev/pandas/issues/57124
342
+ with warning_filter():
343
+ data[feature_name].update(self._transform_series(feature_data, is_categorical=is_categorical))
339
344
  elif feature_name in self.covariate_metadata.static_features:
340
345
  feature_data = data.static_features[feature_name].copy()
341
346
  feature_data.reset_index(drop=True, inplace=True)
@@ -1,4 +1,5 @@
1
1
  import warnings
2
+ from typing import Optional
2
3
 
3
4
  import numpy as np
4
5
  import pandas as pd
@@ -15,7 +16,9 @@ def get_forecast_horizon_index_single_time_series(
15
16
 
16
17
 
17
18
  def get_forecast_horizon_index_ts_dataframe(
18
- ts_dataframe: TimeSeriesDataFrame, prediction_length: int
19
+ ts_dataframe: TimeSeriesDataFrame,
20
+ prediction_length: int,
21
+ freq: Optional[str] = None,
19
22
  ) -> pd.MultiIndex:
20
23
  """For each item in the dataframe, get timestamps for the next prediction_length many time steps into the future.
21
24
 
@@ -26,7 +29,9 @@ def get_forecast_horizon_index_ts_dataframe(
26
29
  last = ts_dataframe.reset_index()[[ITEMID, TIMESTAMP]].groupby(by=ITEMID, sort=False, as_index=False).last()
27
30
  item_ids = np.repeat(last[ITEMID], prediction_length)
28
31
 
29
- offset = pd.tseries.frequencies.to_offset(ts_dataframe.freq)
32
+ if freq is None:
33
+ freq = ts_dataframe.freq
34
+ offset = pd.tseries.frequencies.to_offset(freq)
30
35
  last_ts = pd.DatetimeIndex(last[TIMESTAMP])
31
36
  # Non-vectorized offsets like BusinessDay may produce a PerformanceWarning - we filter them
32
37
  with warnings.catch_warnings():
@@ -7,14 +7,12 @@ import re
7
7
  import sys
8
8
  import warnings
9
9
 
10
- from statsmodels.tools.sm_exceptions import ConvergenceWarning, ValueWarning
11
-
12
10
  __all__ = ["warning_filter", "disable_root_logger", "disable_tqdm"]
13
11
 
14
12
 
15
13
  @contextlib.contextmanager
16
14
  def warning_filter(all_warnings: bool = False):
17
- categories = [RuntimeWarning, UserWarning, ConvergenceWarning, ValueWarning, FutureWarning]
15
+ categories = [RuntimeWarning, UserWarning, FutureWarning]
18
16
  if all_warnings:
19
17
  categories.append(Warning)
20
18
  with warnings.catch_warnings():
@@ -1,3 +1,3 @@
1
1
  """This is the autogluon version file."""
2
- __version__ = '1.0.1b20240408'
2
+ __version__ = '1.1.0'
3
3
  __lite__ = False
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.timeseries
3
- Version: 1.0.1b20240408
4
- Summary: AutoML for Image, Text, and Tabular Data
3
+ Version: 1.1.0
4
+ Summary: Fast and Accurate ML in 3 Lines of Code
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
7
7
  License: Apache-2.0
@@ -10,7 +10,7 @@ Project-URL: Bug Reports, https://github.com/autogluon/autogluon/issues
10
10
  Project-URL: Source, https://github.com/autogluon/autogluon/
11
11
  Project-URL: Contribute!, https://github.com/autogluon/autogluon/blob/master/CONTRIBUTING.md
12
12
  Platform: UNKNOWN
13
- Classifier: Development Status :: 4 - Beta
13
+ Classifier: Development Status :: 5 - Production/Stable
14
14
  Classifier: Intended Audience :: Education
15
15
  Classifier: Intended Audience :: Developers
16
16
  Classifier: Intended Audience :: Science/Research
@@ -37,14 +37,13 @@ Description-Content-Type: text/markdown
37
37
  Requires-Dist: joblib <2,>=1.1
38
38
  Requires-Dist: numpy <1.29,>=1.21
39
39
  Requires-Dist: scipy <1.13,>=1.5.4
40
- Requires-Dist: pandas <2.2.0,>=2.0.0
40
+ Requires-Dist: pandas <2.3.0,>=2.0.0
41
41
  Requires-Dist: torch <2.2,>=2.1
42
42
  Requires-Dist: lightning <2.2,>=2.1
43
43
  Requires-Dist: pytorch-lightning <2.2,>=2.1
44
44
  Requires-Dist: transformers[sentencepiece] <4.39.0,>=4.38.0
45
45
  Requires-Dist: accelerate <0.22.0,>=0.21.0
46
- Requires-Dist: statsmodels <0.15,>=0.13.0
47
- Requires-Dist: gluonts <0.15,>=0.14.0
46
+ Requires-Dist: gluonts <0.14.4,>=0.14.0
48
47
  Requires-Dist: networkx <4,>=3.0
49
48
  Requires-Dist: statsforecast <1.5,>=1.4.0
50
49
  Requires-Dist: mlforecast <0.10.1,>=0.10.0
@@ -52,18 +51,16 @@ Requires-Dist: utilsforecast <0.0.11,>=0.0.10
52
51
  Requires-Dist: tqdm <5,>=4.38
53
52
  Requires-Dist: orjson ~=3.9
54
53
  Requires-Dist: tensorboard <3,>=2.9
55
- Requires-Dist: autogluon.core[raytune] ==1.0.1b20240408
56
- Requires-Dist: autogluon.common ==1.0.1b20240408
57
- Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost] ==1.0.1b20240408
54
+ Requires-Dist: autogluon.core[raytune] ==1.1.0
55
+ Requires-Dist: autogluon.common ==1.1.0
56
+ Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost] ==1.1.0
58
57
  Provides-Extra: all
59
- Requires-Dist: optimum[onnxruntime] <1.19,>=1.18 ; extra == 'all'
60
- Requires-Dist: optimum-intel[nncf,openvino] <1.17,>=1.16 ; extra == 'all'
61
- Requires-Dist: optimum[nncf,openvino] <1.19,>=1.18 ; extra == 'all'
58
+ Requires-Dist: optimum[onnxruntime] <1.19,>=1.17 ; extra == 'all'
62
59
  Provides-Extra: chronos-onnx
63
- Requires-Dist: optimum[onnxruntime] <1.19,>=1.18 ; extra == 'chronos-onnx'
60
+ Requires-Dist: optimum[onnxruntime] <1.19,>=1.17 ; extra == 'chronos-onnx'
64
61
  Provides-Extra: chronos-openvino
65
- Requires-Dist: optimum-intel[nncf,openvino] <1.17,>=1.16 ; extra == 'chronos-openvino'
66
- Requires-Dist: optimum[nncf,openvino] <1.19,>=1.18 ; extra == 'chronos-openvino'
62
+ Requires-Dist: optimum-intel[nncf,openvino] <1.17,>=1.15 ; extra == 'chronos-openvino'
63
+ Requires-Dist: optimum[nncf,openvino] <1.19,>=1.17 ; extra == 'chronos-openvino'
67
64
  Provides-Extra: tests
68
65
  Requires-Dist: pytest ; extra == 'tests'
69
66
  Requires-Dist: ruff >=0.0.285 ; extra == 'tests'
@@ -77,7 +74,7 @@ Requires-Dist: black ~=23.0 ; extra == 'tests'
77
74
  <div align="center">
78
75
  <img src="https://user-images.githubusercontent.com/16392542/77208906-224aa500-6aba-11ea-96bd-e81806074030.png" width="350">
79
76
 
80
- ## AutoML for Image, Text, Time Series, and Tabular Data
77
+ ## Fast and Accurate ML in 3 Lines of Code
81
78
 
82
79
  [![Latest Release](https://img.shields.io/github/v/release/autogluon/autogluon)](https://github.com/autogluon/autogluon/releases)
83
80
  [![Conda Forge](https://img.shields.io/conda/vn/conda-forge/autogluon.svg)](https://anaconda.org/conda-forge/autogluon)
@@ -1,61 +1,62 @@
1
- autogluon.timeseries-1.0.1b20240408-py3.8-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
1
+ autogluon.timeseries-1.1.0-py3.8-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
2
2
  autogluon/timeseries/__init__.py,sha256=_CrLLc1fkjen7UzWoO0Os8WZoHOgvZbHKy46I8v_4k4,304
3
3
  autogluon/timeseries/evaluator.py,sha256=l642tYfTHsl8WVIq_vV6qhgAFVFr9UuZD7gLra3A_Kc,250
4
- autogluon/timeseries/learner.py,sha256=fPIV2p0BMWcZr5fwOkNsJrk8RxK-IYUH_VON3_YXKOQ,13750
5
- autogluon/timeseries/predictor.py,sha256=A-YkJGKrYGXGlmtIHd9CDMmudBSKcBdnOCJK4oGsQr8,81222
4
+ autogluon/timeseries/learner.py,sha256=IYXpJSDyTzjZXjKL_SrTujt5Uke83mSJFA0sMj25_sM,13828
5
+ autogluon/timeseries/predictor.py,sha256=ZwJ_ux4NgsVcUvO-t0hg5ftNsZt8BB8T0rnpkKC1Yv4,81910
6
6
  autogluon/timeseries/splitter.py,sha256=eghGwAAN2_cxGk5aJBILgjGWtLzjxJcytMy49gg_q18,3061
7
- autogluon/timeseries/version.py,sha256=jZBmrJPoADjz7yFzFD1IlaguMPsaJC2NcUT56_rbwBY,90
7
+ autogluon/timeseries/version.py,sha256=n6h1mKS7p0ckrcIknADr01ZN4cKTfDTv6XXjD9tvOk0,81
8
8
  autogluon/timeseries/configs/__init__.py,sha256=BTtHIPCYeGjqgOcvqb8qPD4VNX-ICKOg6wnkew1cPOE,98
9
- autogluon/timeseries/configs/presets_configs.py,sha256=ZVV8BsnGnnHPgjBtJBqF-H35MYUdzRBQ8FP7zA3_11g,1949
9
+ autogluon/timeseries/configs/presets_configs.py,sha256=94-yL9teDHKs2irWjP3kpewI7FE1ChYCgEgz9XHJ6gc,1965
10
10
  autogluon/timeseries/dataset/__init__.py,sha256=UvnhAN5tjgxXTHoZMQDy64YMDj4Xxa68yY7NP4vAw0o,81
11
- autogluon/timeseries/dataset/ts_dataframe.py,sha256=Laa7g_4ssScjNlCCBJgW6R6NLLt3cu8rVElL6FtvlrE,45567
11
+ autogluon/timeseries/dataset/ts_dataframe.py,sha256=QgCwzRx10CRzeWzQREX_zUDAl77-lkdNjM0bT3kK8NU,45595
12
12
  autogluon/timeseries/metrics/__init__.py,sha256=KzgXNj5or7RB_uadjgC8p5gxyV26zjj2hT58OmvnfmA,1875
13
13
  autogluon/timeseries/metrics/abstract.py,sha256=9xCFQ3NaR1C0hn01M7oBd72a_CiNV-w6QFcRjwUbKYI,8183
14
14
  autogluon/timeseries/metrics/point.py,sha256=xy8sKrBbuxZ7yTW21TDPayKnEj2FBj1AEseJxUdneqE,13399
15
15
  autogluon/timeseries/metrics/quantile.py,sha256=owMbOAJYwVyzdRkrJpuCGUXk937GU843QndCZyp5n9Y,3967
16
16
  autogluon/timeseries/metrics/utils.py,sha256=eJ63TCR-UwbeJ1c2Qm7B2q-8B3sFthPgiooEccrf2Kc,912
17
17
  autogluon/timeseries/models/__init__.py,sha256=HFjDOYKQWaGlgQWiLlOvfwE2dH0uDmeKJFC8GDL987c,1271
18
- autogluon/timeseries/models/presets.py,sha256=p36ROcuOnixgGsI1zBdr9VM-MH2pKCiJCS2Ofb4xT8o,11243
18
+ autogluon/timeseries/models/presets.py,sha256=W9H61MefSbxE-woh3ysgzTpc8eML40aa2l8q-AALBo0,11654
19
19
  autogluon/timeseries/models/abstract/__init__.py,sha256=wvDsQAZIV0N3AwBeMaGItoQ82trEfnT-nol2AAOIxBg,102
20
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=aUXlX1ozc5XghinR5ahGIX94MkhBmmYvgmqmMib5BhU,23391
20
+ autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=MvLF529b3fo0icgle-qmS0oce-ftiiQ1jPBLnY-39fk,23435
21
21
  autogluon/timeseries/models/abstract/model_trial.py,sha256=ENPg_7nsdxIvaNM0o0UShZ3x8jFlRmwRc5m0fGPC0TM,3720
22
22
  autogluon/timeseries/models/autogluon_tabular/__init__.py,sha256=r9i6jWcyeLHYClkcMSKRVsfrkBUMxpDrTATNTBc_qgQ,136
23
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py,sha256=9gNuCWf8vVfVPiXppwG5l_3mLbZZ6i5pHKTM-rSk5Ww,30977
23
+ autogluon/timeseries/models/autogluon_tabular/mlforecast.py,sha256=4WbvCgfUCKbg7J5OJisSQK4LMiz8PtTnxU7nkGosmGY,31491
24
24
  autogluon/timeseries/models/autogluon_tabular/utils.py,sha256=4-gTrBtizxeMVQlsuscugPqw9unaXWXhS1TVVssfzYY,2125
25
25
  autogluon/timeseries/models/chronos/__init__.py,sha256=wT77HzTtmQxW3sw2k0mA5Ot6PSHivX-Uvn5fjM05EU4,60
26
- autogluon/timeseries/models/chronos/model.py,sha256=8mZBsjZGP6Q1IGneTiSkcSFvkI6eVBFwweT0t6YUzNk,14974
26
+ autogluon/timeseries/models/chronos/model.py,sha256=0ZxOpGyx7MmXYDr9zeDt6-rIu50Bm7ssR9zTIvd6vmQ,14659
27
27
  autogluon/timeseries/models/chronos/pipeline.py,sha256=caR4tx-MZnrPeiU_Rra566-OP_SpodtOgcU7P0Hw0Vc,20784
28
+ autogluon/timeseries/models/chronos/utils.py,sha256=dl7pytUFmosFVfBcBAGA0JqMJp4cTQ3DmM9Mdjap9no,2124
28
29
  autogluon/timeseries/models/ensemble/__init__.py,sha256=kFr11Gmt7lQJu9Rr8HuIPphQN5l1TsoorfbJm_O3a_s,128
29
30
  autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py,sha256=tifETwmiEGt-YtQ9eNK7ojJ3fBvtFMUJvisbfkIJ7gw,3393
30
31
  autogluon/timeseries/models/ensemble/greedy_ensemble.py,sha256=5HvZuW5osgsZg3V69k82nKEOy_YgeH1JTfQa7F3cU7s,7220
31
32
  autogluon/timeseries/models/gluonts/__init__.py,sha256=M8PV9ZE4WpteScMobXM6RH1Udb1AZiHHtj2g5GQL3TU,329
32
- autogluon/timeseries/models/gluonts/abstract_gluonts.py,sha256=X1l_MexAoyBNMGiJrWreHQHLDSmZV_OSrhjhJ7MA0JM,34348
33
+ autogluon/timeseries/models/gluonts/abstract_gluonts.py,sha256=X4KChuSVSoxLOcrto1SgwAgiHeCuE5jFOaX8GxdBTeg,34017
33
34
  autogluon/timeseries/models/gluonts/torch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
- autogluon/timeseries/models/gluonts/torch/models.py,sha256=PVDns7CnZtJTbPiCw-FJxahKrDjC-wj0VkwIGsodYY0,19930
35
+ autogluon/timeseries/models/gluonts/torch/models.py,sha256=Eo_AI5bWpHx3_05lnates4tnessBrUrVkUAyGoAb0zk,19780
35
36
  autogluon/timeseries/models/local/__init__.py,sha256=JyckWWgMG1BTIWJqFTW6e1O-eb0LPPOwtXwmb1ErohQ,756
36
- autogluon/timeseries/models/local/abstract_local_model.py,sha256=lota8MNpfgYC1PftM7sKcjx2gVCVq3K_D_dovBGqksg,11692
37
+ autogluon/timeseries/models/local/abstract_local_model.py,sha256=JfjDXOSBgD_10JrIq5nWS038-4moRNI0001BLta8nRs,11723
37
38
  autogluon/timeseries/models/local/naive.py,sha256=iwRcFMFmJKPWPbD9TWaIUS51oav69F_VAp6-jb_5SUE,7249
38
39
  autogluon/timeseries/models/local/npts.py,sha256=Bp74doKnfpGE8ywP4FWOCI_RwRMsmgocYDfGtq764DA,4143
39
40
  autogluon/timeseries/models/local/statsforecast.py,sha256=oDYKKM2LZXEQLhPLEgZZWhvSEC1iE1wBexpl8P-Cxwc,32991
40
41
  autogluon/timeseries/models/multi_window/__init__.py,sha256=Bq7AT2Jxdd4WNqmjTdzeqgNiwn1NCyWp4tBIWaM-zfI,60
41
- autogluon/timeseries/models/multi_window/multi_window_model.py,sha256=Thge05cLytJoOpShE7g1MuNa-qlZWUrSvaO0aCbKQbA,11348
42
+ autogluon/timeseries/models/multi_window/multi_window_model.py,sha256=HiujLv8MJ31fWxRM5iXG2PzobFn4Mus0nJPu0MP2Rw4,11374
42
43
  autogluon/timeseries/trainer/__init__.py,sha256=lxiOT-Gc6BEnr_yWQqra85kEngeM_wtH2SCaRbmC_qE,170
43
- autogluon/timeseries/trainer/abstract_trainer.py,sha256=2nPLskmbOGRzkj6ttX0tHVkj9h2Y72MHaZy7L78MBZQ,59100
44
+ autogluon/timeseries/trainer/abstract_trainer.py,sha256=HUyX6xz1R_MoXtZQ2Hci9gEZ7IGdjTD7d6zbJ7UW9Qg,59104
44
45
  autogluon/timeseries/trainer/auto_trainer.py,sha256=psJFZBwWWPlLjNwAgvO4OUJXsRW1sTN2YS9a4pdoeoE,3344
45
46
  autogluon/timeseries/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
- autogluon/timeseries/utils/features.py,sha256=OvBxLIWKR7fPOIlifonVKXUdaWazH_WbdLssJtFCpGs,19261
47
- autogluon/timeseries/utils/forecast.py,sha256=Thjt6yTPSe3V4s5cQ9UbW3ysTJb1lkqxtZiCqgBSt3w,1776
48
- autogluon/timeseries/utils/warning_filters.py,sha256=ngjmfv21zIwTG-7VNZT-NkaSR7ssnoNtUwcXCXANZ4A,2076
47
+ autogluon/timeseries/utils/features.py,sha256=hEir-2lU8fvHjt5r_LG9tLZEk5wNdRdeLRE7qF5z3_Y,19585
48
+ autogluon/timeseries/utils/forecast.py,sha256=p0WKM9Q0nLAwwmCgYZI1zi9mCOWXWJfllEt2lPRQl4M,1882
49
+ autogluon/timeseries/utils/warning_filters.py,sha256=HMXNDo9jOUdf9wvyp-Db55xTq_Ctj6uso7qPhngoJPQ,1964
49
50
  autogluon/timeseries/utils/datetime/__init__.py,sha256=bTMR8jLh1LW55vHjbOr1zvWRMF_PqbvxpS-cUcNIDWI,173
50
- autogluon/timeseries/utils/datetime/base.py,sha256=MsqIHY14m3QMjSwwtE7Uo1oNwepWUby_nxlWm4DlqKU,848
51
- autogluon/timeseries/utils/datetime/lags.py,sha256=kcU4liKbHj7KP2ajNU-KLZ8OYSU35EgT4kJjZNSw0Zg,5875
52
- autogluon/timeseries/utils/datetime/seasonality.py,sha256=kgK_ukw2wCviEB7CZXRVC5HZpBJZu9IsRrvCJ9E_rOE,755
53
- autogluon/timeseries/utils/datetime/time_features.py,sha256=pROkYyxETQ8rHKfPGhf2paB73C7rWJ2Ui0cCswLqbBg,2562
54
- autogluon.timeseries-1.0.1b20240408.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
55
- autogluon.timeseries-1.0.1b20240408.dist-info/METADATA,sha256=6bvEjMigJQz6jN2Ecj2VU9rc-mZC2WNNVlMSFLKxrBk,12685
56
- autogluon.timeseries-1.0.1b20240408.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
57
- autogluon.timeseries-1.0.1b20240408.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
58
- autogluon.timeseries-1.0.1b20240408.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
59
- autogluon.timeseries-1.0.1b20240408.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
60
- autogluon.timeseries-1.0.1b20240408.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
61
- autogluon.timeseries-1.0.1b20240408.dist-info/RECORD,,
51
+ autogluon/timeseries/utils/datetime/base.py,sha256=3NdsH3NDq4cVAOSoy3XpaNixyNlbjy4DJ_YYOGuu9x4,1341
52
+ autogluon/timeseries/utils/datetime/lags.py,sha256=GoLtvcZ8oKb3QkoBJ9E59LSPLOP7Qjxrr2UmMSZgjyw,5909
53
+ autogluon/timeseries/utils/datetime/seasonality.py,sha256=h_4w00iEytAz_N_EpCENQ8RCXy7KQITczrYjBgVqWkQ,764
54
+ autogluon/timeseries/utils/datetime/time_features.py,sha256=PAXbYbQ0z_5GFbkxSNi41zLY_2-U3x0Ynm1m_WhdtGc,2572
55
+ autogluon.timeseries-1.1.0.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
56
+ autogluon.timeseries-1.1.0.dist-info/METADATA,sha256=zVXWVwC3jHtUwkKzvpdT4wjmUhCVjATCHW0N1VRZuxk,12465
57
+ autogluon.timeseries-1.1.0.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
58
+ autogluon.timeseries-1.1.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
59
+ autogluon.timeseries-1.1.0.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
60
+ autogluon.timeseries-1.1.0.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
61
+ autogluon.timeseries-1.1.0.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
62
+ autogluon.timeseries-1.1.0.dist-info/RECORD,,