autogluon.timeseries 0.8.3b20230817__py3-none-any.whl → 0.8.3b20230819__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of autogluon.timeseries might be problematic. Click here for more details.

Files changed (23) hide show
  1. autogluon/timeseries/dataset/ts_dataframe.py +121 -36
  2. autogluon/timeseries/learner.py +9 -31
  3. autogluon/timeseries/models/__init__.py +4 -0
  4. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +1 -4
  5. autogluon/timeseries/models/local/__init__.py +2 -1
  6. autogluon/timeseries/models/local/abstract_local_model.py +5 -1
  7. autogluon/timeseries/models/local/naive.py +102 -0
  8. autogluon/timeseries/models/local/npts.py +59 -0
  9. autogluon/timeseries/models/local/statsforecast.py +0 -2
  10. autogluon/timeseries/models/presets.py +6 -1
  11. autogluon/timeseries/predictor.py +145 -80
  12. autogluon/timeseries/trainer/abstract_trainer.py +1 -4
  13. autogluon/timeseries/trainer/auto_trainer.py +1 -1
  14. autogluon/timeseries/version.py +1 -1
  15. {autogluon.timeseries-0.8.3b20230817.dist-info → autogluon.timeseries-0.8.3b20230819.dist-info}/METADATA +4 -4
  16. {autogluon.timeseries-0.8.3b20230817.dist-info → autogluon.timeseries-0.8.3b20230819.dist-info}/RECORD +23 -22
  17. /autogluon.timeseries-0.8.3b20230817-py3.8-nspkg.pth → /autogluon.timeseries-0.8.3b20230819-py3.8-nspkg.pth +0 -0
  18. {autogluon.timeseries-0.8.3b20230817.dist-info → autogluon.timeseries-0.8.3b20230819.dist-info}/LICENSE +0 -0
  19. {autogluon.timeseries-0.8.3b20230817.dist-info → autogluon.timeseries-0.8.3b20230819.dist-info}/NOTICE +0 -0
  20. {autogluon.timeseries-0.8.3b20230817.dist-info → autogluon.timeseries-0.8.3b20230819.dist-info}/WHEEL +0 -0
  21. {autogluon.timeseries-0.8.3b20230817.dist-info → autogluon.timeseries-0.8.3b20230819.dist-info}/namespace_packages.txt +0 -0
  22. {autogluon.timeseries-0.8.3b20230817.dist-info → autogluon.timeseries-0.8.3b20230819.dist-info}/top_level.txt +0 -0
  23. {autogluon.timeseries-0.8.3b20230817.dist-info → autogluon.timeseries-0.8.3b20230819.dist-info}/zip-safe +0 -0
@@ -4,7 +4,7 @@ import copy
4
4
  import itertools
5
5
  import logging
6
6
  from collections.abc import Iterable
7
- from typing import Any, List, Optional, Tuple, Type
7
+ from typing import Any, List, Optional, Tuple, Type, Union
8
8
 
9
9
  import numpy as np
10
10
  import pandas as pd
@@ -12,6 +12,7 @@ from joblib.parallel import Parallel, delayed
12
12
  from pandas.core.internals import ArrayManager, BlockManager
13
13
 
14
14
  from autogluon.common.loaders import load_pd
15
+ from autogluon.common.utils.deprecated_utils import Deprecated
15
16
 
16
17
  logger = logging.getLogger(__name__)
17
18
 
@@ -322,7 +323,6 @@ class TimeSeriesDataFrame(pd.DataFrame):
322
323
  id_column: Optional[str] = None,
323
324
  timestamp_column: Optional[str] = None,
324
325
  ) -> pd.DataFrame:
325
-
326
326
  df = df.copy()
327
327
  if id_column is not None:
328
328
  assert id_column in df.columns, f"Column '{id_column}' not found!"
@@ -497,7 +497,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
497
497
 
498
498
  Examples
499
499
  --------
500
- >>> print(ts_dataframe)
500
+ >>> ts_df
501
501
  target
502
502
  item_id timestamp
503
503
  0 2019-01-01 0
@@ -582,7 +582,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
582
582
 
583
583
  Returns
584
584
  -------
585
- ts_df: TimeSeriesDataFrame
585
+ ts_df : TimeSeriesDataFrame
586
586
  A new time series dataframe containing entries of the original time series between start and end timestamps.
587
587
  """
588
588
 
@@ -596,7 +596,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
596
596
  )
597
597
 
598
598
  @classmethod
599
- def from_pickle(cls, filepath_or_buffer: Any) -> "TimeSeriesDataFrame":
599
+ def from_pickle(cls, filepath_or_buffer: Any) -> TimeSeriesDataFrame:
600
600
  """Convenience method to read pickled time series data frames. If the read pickle
601
601
  file refers to a plain pandas DataFrame, it will be cast to a TimeSeriesDataFrame.
602
602
 
@@ -607,7 +607,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
607
607
 
608
608
  Returns
609
609
  -------
610
- ts_df: TimeSeriesDataFrame
610
+ ts_df : TimeSeriesDataFrame
611
611
  The pickled time series data frame.
612
612
  """
613
613
  try:
@@ -616,6 +616,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
616
616
  except Exception as err: # noqa
617
617
  raise IOError(f"Could not load pickled data set due to error: {str(err)}")
618
618
 
619
+ @Deprecated(min_version_to_warn="0.9", min_version_to_error="1.0")
619
620
  def get_reindexed_view(self, freq: str = "S") -> TimeSeriesDataFrame:
620
621
  """Returns a new TimeSeriesDataFrame object with the same underlying data and
621
622
  static features as the current data frame, except the time index is replaced by
@@ -649,7 +650,8 @@ class TimeSeriesDataFrame(pd.DataFrame):
649
650
 
650
651
  return df_view
651
652
 
652
- def to_regular_index(self, freq: str) -> "TimeSeriesDataFrame":
653
+ @Deprecated(min_version_to_warn="0.9", min_version_to_error="1.0", new="convert_frequency")
654
+ def to_regular_index(self, freq: str) -> TimeSeriesDataFrame:
653
655
  """Fill the gaps in an irregularly-sampled time series with NaNs.
654
656
 
655
657
  Parameters
@@ -659,7 +661,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
659
661
 
660
662
  Examples
661
663
  --------
662
- >>> print(ts_dataframe)
664
+ >>> ts_df
663
665
  target
664
666
  item_id timestamp
665
667
  0 2019-01-01 NaN
@@ -669,7 +671,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
669
671
  1 2019-02-04 3.0
670
672
  2019-02-07 4.0
671
673
 
672
- >>> print(ts_dataframe.to_regular_index(freq="D"))
674
+ >>> ts_df.to_regular_index(freq="D")
673
675
  target
674
676
  item_id timestamp
675
677
  0 2019-01-01 NaN
@@ -685,30 +687,9 @@ class TimeSeriesDataFrame(pd.DataFrame):
685
687
  2019-02-07 4.0
686
688
 
687
689
  """
688
- if self.freq is not None:
689
- if self.freq != freq:
690
- raise ValueError(
691
- f"TimeSeriesDataFrame already has a regular index with freq '{self.freq}' "
692
- f"that cannot be converted to the given freq '{freq}'"
693
- )
694
- else:
695
- return self
696
-
697
- filled_series = []
698
- for item_id, time_series in self.groupby(level=ITEMID, sort=False):
699
- time_series = time_series.droplevel(ITEMID)
700
- timestamps = time_series.index
701
- resampled_ts = time_series.resample(freq).asfreq()
702
- if not timestamps.isin(resampled_ts.index).all():
703
- raise ValueError(
704
- f"Irregularly-sampled timestamps in this TimeSeriesDataFrame are not compatible "
705
- f"with the given frequency '{freq}'"
706
- )
707
- filled_series.append(pd.concat({item_id: resampled_ts}, names=[ITEMID]))
690
+ return self.convert_frequency(freq=freq)
708
691
 
709
- return TimeSeriesDataFrame(pd.concat(filled_series), static_features=self.static_features)
710
-
711
- def fill_missing_values(self, method: str = "auto", value: float = 0.0) -> "TimeSeriesDataFrame":
692
+ def fill_missing_values(self, method: str = "auto", value: float = 0.0) -> TimeSeriesDataFrame:
712
693
  """Fill missing values represented by NaN.
713
694
 
714
695
  Parameters
@@ -726,7 +707,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
726
707
 
727
708
  Examples
728
709
  --------
729
- >>> print(ts_dataframe)
710
+ >>> ts_df
730
711
  target
731
712
  item_id timestamp
732
713
  0 2019-01-01 NaN
@@ -741,7 +722,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
741
722
  2019-02-06 NaN
742
723
  2019-02-07 4.0
743
724
 
744
- >>> print(ts_dataframe.fill_missing_values(method="auto"))
725
+ >>> ts_df.fill_missing_values(method="auto")
745
726
  target
746
727
  item_id timestamp
747
728
  0 2019-01-01 1.0
@@ -765,7 +746,9 @@ class TimeSeriesDataFrame(pd.DataFrame):
765
746
 
766
747
  grouped_df = pd.DataFrame(self).groupby(level=ITEMID, sort=False, group_keys=False)
767
748
  if method == "auto":
768
- filled_df = grouped_df.fillna(method="ffill").fillna(method="bfill")
749
+ filled_df = grouped_df.fillna(method="ffill")
750
+ # Fill missing values at the start of each time series with bfill
751
+ filled_df = filled_df.groupby(level=ITEMID, sort=False, group_keys=False).fillna(method="bfill")
769
752
  elif method in ["ffill", "pad"]:
770
753
  filled_df = grouped_df.fillna(method="ffill")
771
754
  elif method in ["bfill", "backfill"]:
@@ -782,13 +765,14 @@ class TimeSeriesDataFrame(pd.DataFrame):
782
765
  )
783
766
  return TimeSeriesDataFrame(filled_df, static_features=self.static_features)
784
767
 
785
- def dropna(self, how: str = "any") -> "TimeSeriesDataFrame":
768
+ def dropna(self, how: str = "any") -> TimeSeriesDataFrame:
786
769
  """Drop rows containing NaNs.
787
770
 
788
771
  Parameters
789
772
  ----------
790
773
  how : {"any", "all"}, default = "any"
791
774
  Determine if row or column is removed from TimeSeriesDataFrame, when we have at least one NaN or all NaN.
775
+
792
776
  - "any" : If any NaN values are present, drop that row or column.
793
777
  - "all" : If all values are NaN, drop that row or column.
794
778
  """
@@ -864,3 +848,104 @@ class TimeSeriesDataFrame(pd.DataFrame):
864
848
  data.static_features.index = data.static_features.index.astype(str)
865
849
  data.static_features.index += suffix
866
850
  return train_data, test_data
851
+
852
+ def convert_frequency(
853
+ self,
854
+ freq: Union[str, pd.DateOffset],
855
+ agg_numeric: str = "mean",
856
+ agg_categorical: str = "first",
857
+ **kwargs,
858
+ ) -> TimeSeriesDataFrame:
859
+ """Convert each time series in the data frame to the given frequency.
860
+
861
+ This method is useful for two purposes:
862
+
863
+ 1. Converting an irregularly-sampled time series to a regular time index.
864
+ 2. Aggregating time series data by downsampling (e.g., convert daily sales into weekly sales)
865
+
866
+ Parameters
867
+ ----------
868
+ freq : Union[str, pd.DateOffset]
869
+ Frequency to which the data should be converted. See [pandas frequency aliases](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases)
870
+ for supported values.
871
+ agg_numeric : {"max", "min", "sum", "mean", "median", "first", "last"}, default = "mean"
872
+ Aggregation method applied to numeric columns.
873
+ agg_categorical : {"first", "last"}, default = "first"
874
+ Aggregation method applied to categorical columns.
875
+ **kwargs
876
+ Additional keywords arguments that will be passed to ``pandas.DataFrameGroupBy.resample``.
877
+
878
+ Returns
879
+ -------
880
+ ts_df : TimeSeriesDataFrame
881
+ A new time series dataframe with time series resampled at the new frequency. Output may contain missing
882
+ values represented by ``NaN`` if original data does not have information for the given period.
883
+
884
+ Examples
885
+ --------
886
+ Convert irregularly-sampled time series data to a regular index
887
+
888
+ >>> ts_df
889
+ target
890
+ item_id timestamp
891
+ 0 2019-01-01 NaN
892
+ 2019-01-03 1.0
893
+ 2019-01-06 2.0
894
+ 2019-01-07 NaN
895
+ 1 2019-02-04 3.0
896
+ 2019-02-07 4.0
897
+ >>> ts_df.convert_frequency(freq="D")
898
+ target
899
+ item_id timestamp
900
+ 0 2019-01-01 NaN
901
+ 2019-01-02 NaN
902
+ 2019-01-03 1.0
903
+ 2019-01-04 NaN
904
+ 2019-01-05 NaN
905
+ 2019-01-06 2.0
906
+ 2019-01-07 NaN
907
+ 1 2019-02-04 3.0
908
+ 2019-02-05 NaN
909
+ 2019-02-06 NaN
910
+ 2019-02-07 4.0
911
+
912
+ Downsample quarterly data to yearly frequency
913
+
914
+ >>> ts_df
915
+ target
916
+ item_id timestamp
917
+ 0 2020-03-31 1.0
918
+ 2020-06-30 2.0
919
+ 2020-09-30 3.0
920
+ 2020-12-31 4.0
921
+ 2021-03-31 5.0
922
+ 2021-06-30 6.0
923
+ 2021-09-30 7.0
924
+ 2021-12-31 8.0
925
+ >>> ts_df.convert_frequency("Y")
926
+ target
927
+ item_id timestamp
928
+ 0 2020-12-31 2.5
929
+ 2021-12-31 6.5
930
+ >>> ts_df.convert_frequency("Y", agg_numeric="sum")
931
+ target
932
+ item_id timestamp
933
+ 0 2020-12-31 10.0
934
+ 2021-12-31 26.0
935
+ """
936
+ if self.freq == pd.tseries.frequencies.to_offset(freq).freqstr:
937
+ return self
938
+
939
+ # We need to aggregate categorical columns separately because .agg("mean") deletes all non-numeric columns
940
+ aggregation = {}
941
+ for col in self.columns:
942
+ if pd.api.types.is_numeric_dtype(self.dtypes[col]):
943
+ aggregation[col] = agg_numeric
944
+ else:
945
+ aggregation[col] = agg_categorical
946
+
947
+ resampled_df = TimeSeriesDataFrame(
948
+ self.groupby(level=ITEMID, sort=False).resample(freq, level=TIMESTAMP, **kwargs).agg(aggregation)
949
+ )
950
+ resampled_df.static_features = self.static_features
951
+ return resampled_df
@@ -1,15 +1,13 @@
1
1
  import logging
2
2
  import time
3
- from typing import Any, Dict, List, Optional, Tuple, Type, Union
3
+ from typing import Any, Dict, List, Optional, Type, Union
4
4
 
5
- import numpy as np
6
5
  import pandas as pd
7
6
 
8
7
  from autogluon.core.learner import AbstractLearner
9
- from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
8
+ from autogluon.timeseries.dataset.ts_dataframe import TimeSeriesDataFrame
10
9
  from autogluon.timeseries.evaluator import TimeSeriesEvaluator
11
10
  from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
12
- from autogluon.timeseries.splitter import AbstractTimeSeriesSplitter, LastWindowSplitter
13
11
  from autogluon.timeseries.trainer import AbstractTimeSeriesTrainer, AutoTimeSeriesTrainer
14
12
  from autogluon.timeseries.utils.features import TimeSeriesFeatureGenerator
15
13
  from autogluon.timeseries.utils.forecast import get_forecast_horizon_index_ts_dataframe
@@ -31,7 +29,6 @@ class TimeSeriesLearner(AbstractLearner):
31
29
  eval_metric: Optional[str] = None,
32
30
  eval_metric_seasonal_period: Optional[int] = None,
33
31
  prediction_length: int = 1,
34
- ignore_time_index: bool = False,
35
32
  cache_predictions: bool = True,
36
33
  **kwargs,
37
34
  ):
@@ -42,11 +39,7 @@ class TimeSeriesLearner(AbstractLearner):
42
39
  self.target = target
43
40
  self.known_covariates_names = [] if known_covariates_names is None else known_covariates_names
44
41
  self.prediction_length = prediction_length
45
- self.quantile_levels = kwargs.get(
46
- "quantile_levels",
47
- kwargs.get("quantiles", [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]),
48
- )
49
- self.ignore_time_index = ignore_time_index
42
+ self.quantile_levels = kwargs.get("quantile_levels", [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
50
43
  self.cache_predictions = cache_predictions
51
44
 
52
45
  self.feature_generator = TimeSeriesFeatureGenerator(
@@ -160,28 +153,13 @@ class TimeSeriesLearner(AbstractLearner):
160
153
  )
161
154
 
162
155
  forecast_index = get_forecast_horizon_index_ts_dataframe(data, prediction_length=self.prediction_length)
163
- if self.ignore_time_index:
164
- logger.warning(
165
- "Because `ignore_time_index=True`, the predictor will ignore the time index of `known_covariates`. "
166
- "Please make sure that `known_covariates` contain only the future values of the known covariates "
167
- "(and the past values are not included)."
156
+ try:
157
+ known_covariates = known_covariates.loc[forecast_index]
158
+ except KeyError:
159
+ raise ValueError(
160
+ f"known_covariates should include the values for prediction_length={self.prediction_length} "
161
+ "many time steps into the future."
168
162
  )
169
- known_covariates = known_covariates.loc[forecast_index.unique(level=ITEMID)]
170
- if (known_covariates.num_timesteps_per_item() < self.prediction_length).any():
171
- raise ValueError(
172
- f"known_covariates should include the values for prediction_length={self.prediction_length} "
173
- "many time steps into the future."
174
- )
175
- known_covariates = known_covariates.slice_by_timestep(None, self.prediction_length)
176
- known_covariates.index = forecast_index
177
- else:
178
- try:
179
- known_covariates = known_covariates.loc[forecast_index]
180
- except KeyError:
181
- raise ValueError(
182
- f"known_covariates should include the values for prediction_length={self.prediction_length} "
183
- "many time steps into the future."
184
- )
185
163
  return known_covariates
186
164
 
187
165
  def predict(
@@ -4,9 +4,12 @@ from .local import (
4
4
  ARIMAModel,
5
5
  AutoARIMAModel,
6
6
  AutoETSModel,
7
+ AverageModel,
7
8
  DynamicOptimizedThetaModel,
8
9
  ETSModel,
9
10
  NaiveModel,
11
+ NPTSModel,
12
+ SeasonalAverageModel,
10
13
  SeasonalNaiveModel,
11
14
  ThetaModel,
12
15
  ThetaStatsmodelsModel,
@@ -24,6 +27,7 @@ __all__ = [
24
27
  "DirectTabularModel",
25
28
  "RecursiveTabularModel",
26
29
  "NaiveModel",
30
+ "NPTSModel",
27
31
  "SeasonalNaiveModel",
28
32
  "AutoETSModel",
29
33
  "AutoARIMAModel",
@@ -105,10 +105,7 @@ class AbstractTimeSeriesModel(AbstractModel):
105
105
 
106
106
  self.freq: str = freq
107
107
  self.prediction_length: int = prediction_length
108
- self.quantile_levels = kwargs.get(
109
- "quantile_levels",
110
- kwargs.get("quantiles", [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]),
111
- )
108
+ self.quantile_levels = kwargs.get("quantile_levels", [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
112
109
  self._oof_predictions: Optional[TimeSeriesDataFrame] = None
113
110
 
114
111
  def __repr__(self) -> str:
@@ -1,6 +1,7 @@
1
1
  import joblib.externals.loky
2
2
 
3
- from .naive import NaiveModel, SeasonalNaiveModel
3
+ from .naive import AverageModel, NaiveModel, SeasonalAverageModel, SeasonalNaiveModel
4
+ from .npts import NPTSModel
4
5
  from .statsforecast import AutoARIMAModel, AutoETSModel, DynamicOptimizedThetaModel, ThetaModel
5
6
  from .statsmodels import ARIMAModel, ETSModel, ThetaStatsmodelsModel
6
7
 
@@ -34,6 +34,9 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
34
34
  Argument that can be passed to the underlying local model.
35
35
  default_n_jobs : Union[int, float]
36
36
  Default number of CPU cores used to train models. If float, this fraction of CPU cores will be used.
37
+ default_max_ts_length : Optional[int]
38
+ If not None, only the last ``max_ts_length`` time steps of each time series will be used to train the model.
39
+ This significantly speeds up fitting and usually leads to no change in accuracy.
37
40
  init_time_in_seconds : int
38
41
  Time that it takes to initialize the model in seconds (e.g., because of JIT compilation by Numba).
39
42
  If time_limit is below this number, model won't be trained.
@@ -41,6 +44,7 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
41
44
 
42
45
  allowed_local_model_args: List[str] = []
43
46
  default_n_jobs: Union[int, float] = AG_DEFAULT_N_JOBS
47
+ default_max_ts_length: Optional[int] = 2500
44
48
  init_time_in_seconds: int = 0
45
49
 
46
50
  def __init__(
@@ -65,7 +69,7 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
65
69
  raise ValueError(f"n_jobs must be a float between 0 and 1 or an integer (received n_jobs = {n_jobs})")
66
70
  # Default values, potentially overridden inside _fit()
67
71
  self.use_fallback_model = hyperparameters.pop("use_fallback_model", True)
68
- self.max_ts_length = hyperparameters.pop("max_ts_length", 2500)
72
+ self.max_ts_length = hyperparameters.pop("max_ts_length", self.default_max_ts_length)
69
73
 
70
74
  super().__init__(
71
75
  path=path,
@@ -1,3 +1,5 @@
1
+ from typing import Callable
2
+
1
3
  import numpy as np
2
4
  import pandas as pd
3
5
 
@@ -11,6 +13,13 @@ class NaiveModel(AbstractLocalModel):
11
13
  estimated from the empirical distribution of the residuals.
12
14
  As described in https://otexts.com/fpp3/prediction-intervals.html
13
15
 
16
+ Other Parameters
17
+ ----------------
18
+ n_jobs : int or float, default = 0.5
19
+ Number of CPU cores used to fit the models in parallel.
20
+ When set to a float between 0.0 and 1.0, that fraction of available CPU cores is used.
21
+ When set to a positive integer, that many cores are used.
22
+ When set to -1, all CPU cores are used.
14
23
  """
15
24
 
16
25
  allowed_local_model_args = ["seasonal_period"]
@@ -45,6 +54,11 @@ class SeasonalNaiveModel(AbstractLocalModel):
45
54
  specified manually by providing an integer > 1.
46
55
  If seasonal_period (inferred or provided) is equal to 1, will fall back to Naive forecast.
47
56
  Seasonality will also be disabled, if the length of the time series is < seasonal_period.
57
+ n_jobs : int or float, default = 0.5
58
+ Number of CPU cores used to fit the models in parallel.
59
+ When set to a float between 0.0 and 1.0, that fraction of available CPU cores is used.
60
+ When set to a positive integer, that many cores are used.
61
+ When set to -1, all CPU cores are used.
48
62
  """
49
63
 
50
64
  allowed_local_model_args = ["seasonal_period"]
@@ -60,3 +74,91 @@ class SeasonalNaiveModel(AbstractLocalModel):
60
74
  quantile_levels=self.quantile_levels,
61
75
  seasonal_period=local_model_args["seasonal_period"],
62
76
  )
77
+
78
+
79
+ def _get_quantile_function(q: float) -> Callable:
80
+ """Returns a function with name "q" that computes the q'th quantile of a pandas.Series."""
81
+
82
+ def quantile_fn(x: pd.Series) -> pd.Series:
83
+ return x.quantile(q)
84
+
85
+ quantile_fn.__name__ = str(q)
86
+ return quantile_fn
87
+
88
+
89
+ class AverageModel(AbstractLocalModel):
90
+ """Baseline model that sets the forecast equal to the historic average or quantile.
91
+
92
+ Other Parameters
93
+ ----------------
94
+ n_jobs : int or float, default = 0.5
95
+ Number of CPU cores used to fit the models in parallel.
96
+ When set to a float between 0.0 and 1.0, that fraction of available CPU cores is used.
97
+ When set to a positive integer, that many cores are used.
98
+ When set to -1, all CPU cores are used.
99
+ max_ts_length : Optional[int], default = None
100
+ If not None, only the last ``max_ts_length`` time steps of each time series will be used to train the model.
101
+ This significantly speeds up fitting and usually leads to no change in accuracy.
102
+ """
103
+
104
+ allowed_local_model_args = ["seasonal_period"]
105
+ default_max_ts_length = None
106
+
107
+ def _predict_with_local_model(
108
+ self,
109
+ time_series: pd.Series,
110
+ local_model_args: dict,
111
+ ) -> pd.DataFrame:
112
+ agg_functions = ["mean"] + [_get_quantile_function(q) for q in self.quantile_levels]
113
+ stats_marginal = time_series.agg(agg_functions)
114
+ stats_repeated = np.tile(stats_marginal.values, [self.prediction_length, 1])
115
+ return pd.DataFrame(stats_repeated, columns=stats_marginal.index)
116
+
117
+
118
+ class SeasonalAverageModel(AbstractLocalModel):
119
+ """Baseline model that sets the forecast equal to the historic average or quantile in the same season.
120
+
121
+ Other Parameters
122
+ ----------------
123
+ seasonal_period : int or None, default = None
124
+ Number of time steps in a complete seasonal cycle for seasonal models. For example, 7 for daily data with a
125
+ weekly cycle or 12 for monthly data with an annual cycle.
126
+ When set to None, seasonal_period will be inferred from the frequency of the training data. Can also be
127
+ specified manually by providing an integer > 1.
128
+ If seasonal_period (inferred or provided) is equal to 1, will fall back to Naive forecast.
129
+ Seasonality will also be disabled, if the length of the time series is < seasonal_period.
130
+ n_jobs : int or float, default = 0.5
131
+ Number of CPU cores used to fit the models in parallel.
132
+ When set to a float between 0.0 and 1.0, that fraction of available CPU cores is used.
133
+ When set to a positive integer, that many cores are used.
134
+ When set to -1, all CPU cores are used.
135
+ max_ts_length : Optional[int], default = None
136
+ If not None, only the last ``max_ts_length`` time steps of each time series will be used to train the model.
137
+ This significantly speeds up fitting and usually leads to no change in accuracy.
138
+ """
139
+
140
+ allowed_local_model_args = ["seasonal_period"]
141
+ default_max_ts_length = None
142
+
143
+ def _predict_with_local_model(
144
+ self,
145
+ time_series: pd.Series,
146
+ local_model_args: dict,
147
+ ) -> pd.DataFrame:
148
+ seasonal_period = local_model_args["seasonal_period"]
149
+ agg_functions = ["mean"] + [_get_quantile_function(q) for q in self.quantile_levels]
150
+
151
+ # Compute mean & quantiles for each season
152
+ ts_df = time_series.reset_index(drop=True).to_frame()
153
+ ts_df["season"] = ts_df.index % seasonal_period
154
+ stats_per_season = ts_df.groupby("season")[self.target].agg(agg_functions)
155
+
156
+ next_season = ts_df["season"].iloc[-1] + 1
157
+ season_in_forecast_horizon = np.arange(next_season, next_season + self.prediction_length) % seasonal_period
158
+ result = stats_per_season.reindex(season_in_forecast_horizon)
159
+
160
+ if np.any(result.isna().values):
161
+ # Use statistics over all timesteps to fill values for seasons that are missing from training data
162
+ stats_marginal = time_series.agg(agg_functions)
163
+ result = result.fillna(stats_marginal)
164
+ return result
@@ -0,0 +1,59 @@
1
+ import pandas as pd
2
+
3
+ from autogluon.timeseries.models.local.abstract_local_model import AbstractLocalModel
4
+
5
+
6
+ class NPTSModel(AbstractLocalModel):
7
+ """Non-Parametric Time Series Forecaster.
8
+
9
+ This models is especially well suited for forecasting sparse or intermittent time series with many zero values.
10
+
11
+ Based on `gluonts.model.npts.NPTSPredictor <https://ts.gluon.ai/stable/api/gluonts/gluonts.model.npts.html>`_.
12
+ See GluonTS documentation for more information about the model.
13
+
14
+ Other Parameters
15
+ ----------------
16
+ kernel_type : {"exponential", "uniform"}, default = "exponential"
17
+ Kernel used by the model.
18
+ exp_kernel_weights : float, default = 1.0
19
+ Scaling factor used in the exponential kernel.
20
+ use_seasonal_variant : bool, default = True
21
+ Whether to use the seasonal variant of the model.
22
+ n_jobs : int or float, default = 0.5
23
+ Number of CPU cores used to fit the models in parallel.
24
+ When set to a float between 0.0 and 1.0, that fraction of available CPU cores is used.
25
+ When set to a positive integer, that many cores are used.
26
+ When set to -1, all CPU cores are used.
27
+ max_ts_length : Optional[int], default = 2500
28
+ If not None, only the last ``max_ts_length`` time steps of each time series will be used to train the model.
29
+ This significantly speeds up fitting and usually leads to no change in accuracy.
30
+ """
31
+
32
+ allowed_local_model_args = [
33
+ "kernel_type",
34
+ "exp_kernel_weights",
35
+ "use_seasonal_model",
36
+ "seasonal_period",
37
+ ]
38
+
39
+ def _predict_with_local_model(
40
+ self,
41
+ time_series: pd.Series,
42
+ local_model_args: dict,
43
+ ) -> pd.DataFrame:
44
+ from gluonts.model.npts import NPTSPredictor
45
+
46
+ local_model_args.pop("seasonal_period")
47
+
48
+ predictor = NPTSPredictor(
49
+ freq=self.freq,
50
+ prediction_length=self.prediction_length,
51
+ **local_model_args,
52
+ )
53
+ ts = time_series.copy(deep=False)
54
+ ts.index = ts.index.to_period()
55
+ forecast = predictor.predict_time_series(ts, num_samples=100)
56
+ forecast_dict = {"mean": forecast.mean}
57
+ for q in self.quantile_levels:
58
+ forecast_dict[str(q)] = forecast.quantile(q)
59
+ return pd.DataFrame(forecast_dict)
@@ -255,8 +255,6 @@ class ThetaModel(AbstractStatsForecastModel):
255
255
  This significantly speeds up fitting and usually leads to no change in accuracy.
256
256
  """
257
257
 
258
- max_ts_length = 3000
259
-
260
258
  allowed_local_model_args = [
261
259
  "decomposition_type",
262
260
  "seasonal_period",
@@ -4,7 +4,6 @@ import re
4
4
  from collections import defaultdict
5
5
  from typing import Any, Dict, List, Optional, Type, Union
6
6
 
7
- import autogluon.timeseries as agts
8
7
  from autogluon.common import space
9
8
  from autogluon.core import constants
10
9
 
@@ -12,14 +11,17 @@ from . import (
12
11
  ARIMAModel,
13
12
  AutoARIMAModel,
14
13
  AutoETSModel,
14
+ AverageModel,
15
15
  DeepARModel,
16
16
  DirectTabularModel,
17
17
  DLinearModel,
18
18
  DynamicOptimizedThetaModel,
19
19
  ETSModel,
20
20
  NaiveModel,
21
+ NPTSModel,
21
22
  PatchTSTModel,
22
23
  RecursiveTabularModel,
24
+ SeasonalAverageModel,
23
25
  SeasonalNaiveModel,
24
26
  SimpleFeedForwardModel,
25
27
  TemporalFusionTransformerModel,
@@ -42,11 +44,14 @@ MODEL_TYPES = dict(
42
44
  TemporalFusionTransformer=TemporalFusionTransformerModel,
43
45
  RecursiveTabular=RecursiveTabularModel,
44
46
  DirectTabular=DirectTabularModel,
47
+ Average=AverageModel,
48
+ SeasonalAverage=SeasonalAverageModel,
45
49
  Naive=NaiveModel,
46
50
  SeasonalNaive=SeasonalNaiveModel,
47
51
  AutoETS=AutoETSModel,
48
52
  AutoARIMA=AutoARIMAModel,
49
53
  DynamicOptimizedTheta=DynamicOptimizedThetaModel,
54
+ NPTS=NPTSModel,
50
55
  Theta=ThetaModel,
51
56
  ARIMA=ARIMAModel,
52
57
  ETS=ETSModel,
@@ -2,12 +2,12 @@ import logging
2
2
  import os
3
3
  import pprint
4
4
  import time
5
- import warnings
6
5
  from typing import Any, Dict, List, Optional, Type, Union
7
6
 
8
7
  import pandas as pd
9
8
  import pytorch_lightning as pl
10
9
 
10
+ from autogluon.common.utils.deprecated_utils import Deprecated_args
11
11
  from autogluon.common.utils.log_utils import set_logger_verbosity
12
12
  from autogluon.common.utils.utils import check_saved_predictor_version, setup_outputdir
13
13
  from autogluon.core.utils.decorators import apply_presets
@@ -27,9 +27,9 @@ SUPPORTED_FREQUENCIES = {"D", "W", "M", "Q", "A", "Y", "H", "T", "min", "S"}
27
27
  class TimeSeriesPredictor:
28
28
  """AutoGluon ``TimeSeriesPredictor`` predicts future values of multiple related time series.
29
29
 
30
- ``TimeSeriesPredictor`` provides probabilistic (distributional) multi-step-ahead forecasts for univariate time
31
- series. The forecast includes both the mean (i.e., conditional expectation of future values given the past), as
32
- well as the quantiles of the forecast distribution, indicating the range of possible future outcomes.
30
+ ``TimeSeriesPredictor`` provides probabilistic (quantile) multi-step-ahead forecasts for univariate time series.
31
+ The forecast includes both the mean (i.e., conditional expectation of future values given the past), as well as the
32
+ quantiles of the forecast distribution, indicating the range of possible future outcomes.
33
33
 
34
34
  ``TimeSeriesPredictor`` fits both "global" deep learning models that are shared across all time series
35
35
  (e.g., DeepAR, Transformer), as well as "local" statistical models that are fit to each individual time series
@@ -47,6 +47,18 @@ class TimeSeriesPredictor:
47
47
  The forecast horizon, i.e., How many time steps into the future the models should be trained to predict.
48
48
  For example, if time series contain daily observations, setting ``prediction_length = 3`` will train
49
49
  models that predict up to 3 days into the future from the most recent observation.
50
+ freq : str, optional
51
+ Frequency of the time series data (see `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_
52
+ for available frequencies). For example, ``"D"`` for daily data or ``"H"`` for hourly data.
53
+
54
+ By default, the predictor will attempt to automatically infer the frequency from the data. This argument should
55
+ only be set in two cases:
56
+
57
+ 1. The time series data has irregular timestamps, so frequency cannot be inferred automatically.
58
+ 2. You would like to resample the original data at a different frequency (for example, convert hourly measurements into daily measurements).
59
+
60
+ If ``freq`` is provided when creating the predictor, all data passed to the predictor will be automatically
61
+ resampled at this frequency.
50
62
  eval_metric : str, default = "mean_wQuantileLoss"
51
63
  Metric by which predictions will be ultimately evaluated on future test data. AutoGluon tunes hyperparameters
52
64
  in order to improve this metric on validation data, and ranks models (on validation data) according to this
@@ -62,7 +74,7 @@ class TimeSeriesPredictor:
62
74
  For more information about these metrics, see https://docs.aws.amazon.com/forecast/latest/dg/metrics.html.
63
75
  eval_metric_seasonal_period : int, optional
64
76
  Seasonal period used to compute the mean absolute scaled error (MASE) evaluation metric. This parameter is only
65
- used if ``eval_metric="MASE"`. See https://en.wikipedia.org/wiki/Mean_absolute_scaled_error for more details.
77
+ used if ``eval_metric="MASE"``. See https://en.wikipedia.org/wiki/Mean_absolute_scaled_error for more details.
66
78
  Defaults to ``None``, in which case the seasonal period is computed based on the data frequency.
67
79
  known_covariates_names: List[str], optional
68
80
  Names of the covariates that are known in advance for all time steps in the forecast horizon. These are also
@@ -79,7 +91,6 @@ class TimeSeriesPredictor:
79
91
  quantile_levels : List[float], optional
80
92
  List of increasing decimals that specifies which quantiles should be estimated when making distributional
81
93
  forecasts. Defaults to ``[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]``.
82
- Can alternatively be provided with the keyword argument ``quantiles``.
83
94
  path : str, optional
84
95
  Path to the directory where models and intermediate outputs will be saved. Defaults to a timestamped folder
85
96
  ``AutogluonModels/ag-[TIMESTAMP]`` that will be created in the working directory.
@@ -89,10 +100,6 @@ class TimeSeriesPredictor:
89
100
  If using ``logging``, you can alternatively control amount of information printed via ``logger.setLevel(L)``,
90
101
  where ``L`` ranges from 0 to 50 (Note: higher values of ``L`` correspond to fewer print statements, opposite
91
102
  of verbosity levels).
92
- ignore_time_index : bool, default = False
93
- If True, the predictor will ignore the datetime indexes during both training and testing, and will replace
94
- the data indexes with dummy timestamps in second frequency. In this case, the forecast output time indexes will
95
- be arbitrary values, and seasonality will be turned off for local models.
96
103
  cache_predictions : bool, default = True
97
104
  If True, the predictor will cache and reuse the predictions made by individual models whenever
98
105
  :meth:`~autogluon.timeseries.TimeSeriesPredictor.predict`, :meth:`~autogluon.timeseries.TimeSeriesPredictor.leaderboard`,
@@ -106,29 +113,28 @@ class TimeSeriesPredictor:
106
113
  predictor_file_name = "predictor.pkl"
107
114
  _predictor_version_file_name = "__version__"
108
115
 
116
+ @Deprecated_args(min_version_to_warn="0.9", min_version_to_error="1.0", ignore_time_index=None)
109
117
  def __init__(
110
118
  self,
111
119
  target: Optional[str] = None,
112
120
  known_covariates_names: Optional[List[str]] = None,
113
121
  prediction_length: int = 1,
122
+ freq: str = None,
114
123
  eval_metric: Optional[str] = None,
115
124
  eval_metric_seasonal_period: Optional[int] = None,
116
125
  path: Optional[str] = None,
117
126
  verbosity: int = 2,
118
127
  quantile_levels: Optional[List[float]] = None,
119
- ignore_time_index: bool = False,
120
128
  cache_predictions: bool = True,
121
- learner_type: Type[AbstractLearner] = TimeSeriesLearner,
129
+ learner_type: Optional[Type[AbstractLearner]] = None,
122
130
  learner_kwargs: Optional[dict] = None,
123
131
  label: Optional[str] = None,
124
- quantiles: Optional[List[float]] = None,
125
- validation_splitter: Optional[Any] = None,
132
+ ignore_time_index: bool = False,
126
133
  ):
127
134
  self.verbosity = verbosity
128
135
  set_logger_verbosity(self.verbosity, logger=logger)
129
136
  self.path = setup_outputdir(path)
130
137
 
131
- self.ignore_time_index = ignore_time_index
132
138
  self.cache_predictions = cache_predictions
133
139
  if target is not None and label is not None:
134
140
  raise ValueError("Both `label` and `target` are specified. Please specify at most one of these arguments.")
@@ -147,23 +153,19 @@ class TimeSeriesPredictor:
147
153
  self.known_covariates_names = known_covariates_names
148
154
 
149
155
  self.prediction_length = prediction_length
156
+ self.freq = freq
157
+ if self.freq is not None:
158
+ # Standardize frequency string (e.g., "min" -> "T", "Y" -> "A-DEC")
159
+ std_freq = pd.tseries.frequencies.to_offset(self.freq).freqstr
160
+ if std_freq != str(self.freq):
161
+ logger.info(f"Frequency '{self.freq}' stored as '{std_freq}'")
162
+ self.freq = std_freq
150
163
  self.eval_metric = eval_metric
151
164
  self.eval_metric_seasonal_period = eval_metric_seasonal_period
152
165
  if quantile_levels is None:
153
166
  quantile_levels = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
154
167
  self.quantile_levels = sorted(quantile_levels)
155
168
 
156
- if validation_splitter is not None:
157
- warnings.warn(
158
- "`validation_splitter` argument has been deprecated as of v0.8.0. "
159
- "Please use the `num_val_windows` argument of `TimeSeriesPredictor.fit` instead."
160
- )
161
- if quantiles is not None:
162
- warnings.warn(
163
- "`quantiles` argument has been deprecated as of v0.8.0. "
164
- "Please use the `quantile_levels` argument instead."
165
- )
166
-
167
169
  if learner_kwargs is None:
168
170
  learner_kwargs = {}
169
171
  learner_kwargs = learner_kwargs.copy()
@@ -176,10 +178,12 @@ class TimeSeriesPredictor:
176
178
  known_covariates_names=self.known_covariates_names,
177
179
  prediction_length=self.prediction_length,
178
180
  quantile_levels=self.quantile_levels,
179
- ignore_time_index=ignore_time_index,
180
181
  cache_predictions=self.cache_predictions,
181
182
  )
182
183
  )
184
+ # Using `TimeSeriesLearner` as default argument breaks doc generation with Sphnix
185
+ if learner_type is None:
186
+ learner_type = TimeSeriesLearner
183
187
  self._learner: AbstractLearner = learner_type(**learner_kwargs)
184
188
  self._learner_type = type(self._learner)
185
189
 
@@ -187,57 +191,109 @@ class TimeSeriesPredictor:
187
191
  def _trainer(self) -> AbstractTimeSeriesTrainer:
188
192
  return self._learner.load_trainer() # noqa
189
193
 
190
- def _check_and_prepare_data_frame(self, df: Union[TimeSeriesDataFrame, pd.DataFrame]) -> TimeSeriesDataFrame:
191
- """Ensure that TimeSeriesDataFrame has a frequency, or replace its time index with a dummy if
192
- ``self.ignore_time_index`` is True.
193
- """
194
- if df is None:
195
- return df
196
- if not isinstance(df, TimeSeriesDataFrame):
197
- if isinstance(df, pd.DataFrame):
198
- try:
199
- df = TimeSeriesDataFrame(df)
200
- except:
201
- raise ValueError(
202
- f"Provided data of type {type(df)} cannot be automatically converted to a TimeSeriesDataFrame."
203
- )
204
- else:
194
+ def _to_data_frame(
195
+ self,
196
+ data: Union[TimeSeriesDataFrame, pd.DataFrame, str],
197
+ name: str = "data",
198
+ ) -> "TimeSeriesDataFrame":
199
+ if isinstance(data, TimeSeriesDataFrame):
200
+ return data
201
+ elif isinstance(data, (pd.DataFrame, str)):
202
+ try:
203
+ data = TimeSeriesDataFrame(data)
204
+ except:
205
205
  raise ValueError(
206
- f"Please provide data in TimeSeriesDataFrame format (received an object of type {type(df)})."
206
+ f"Provided {name} of type {type(data)} cannot be automatically converted to a TimeSeriesDataFrame."
207
207
  )
208
- if self.ignore_time_index:
209
- df = df.get_reindexed_view(freq="S")
208
+ return data
209
+ else:
210
+ raise TypeError(
211
+ f"{name} must be a TimeSeriesDataFrame or pandas.DataFrame or string (path to data) "
212
+ f"but received an object of type {type(data)}."
213
+ )
214
+
215
+ def _check_and_prepare_data_frame(
216
+ self,
217
+ data: Union[TimeSeriesDataFrame, pd.DataFrame, str],
218
+ name: str = "data",
219
+ ) -> TimeSeriesDataFrame:
220
+ """Ensure that TimeSeriesDataFrame has a sorted index, valid frequency, and contains no missing values.
221
+
222
+ If self.freq is None, then self.freq of the predictor will be set to the frequency of the data.
223
+
224
+ Parameters
225
+ ----------
226
+ data : Union[TimeSeriesDataFrame, pd.DataFrame, str]
227
+ Data as a data frame or path to file storing the data.
228
+ name : str
229
+ Name of the data that will be used in log messages (e.g., 'train_data', 'tuning_data', or 'data').
230
+
231
+ Returns
232
+ -------
233
+ df : TimeSeriesDataFrame
234
+ Preprocessed data in TimeSeriesDataFrame format.
235
+ """
236
+ df = self._to_data_frame(data, name=name)
210
237
  # MultiIndex.is_monotonic_increasing checks if index is sorted by ["item_id", "timestamp"]
211
238
  if not df.index.is_monotonic_increasing:
212
239
  df = df.sort_index()
213
240
  df._cached_freq = None # in case frequency was incorrectly cached as IRREGULAR_TIME_INDEX_FREQSTR
214
- if df.freq is None:
215
- raise ValueError(
216
- "Frequency not provided and cannot be inferred. This is often due to the "
217
- "time index of the data being irregularly sampled. Please ensure that the "
218
- "data set used has a uniform time index, or create the `TimeSeriesPredictor` "
219
- "setting `ignore_time_index=True`."
220
- )
221
- # Check if frequency is supported
241
+
242
+ # Ensure that data has a regular frequency that matches the predictor frequency
243
+ if self.freq is None:
244
+ if df.freq is None:
245
+ raise ValueError(
246
+ f"Frequency of {name} is not provided and cannot be inferred. Please set the expected data "
247
+ f"frequency when creating the predictor with `TimeSeriesPredictor(freq=...)` or ensure that "
248
+ f"the data has a regular time index with `{name}.to_regular_index(freq=...)`"
249
+ )
250
+ else:
251
+ self.freq = df.freq
252
+ logger.info(f"Inferred data frequency: {df.freq}")
253
+ else:
254
+ if df.freq != self.freq:
255
+ logger.warning(f"{name} with frequency '{df.freq}' has been resampled to frequency '{self.freq}'.")
256
+ df = df.convert_frequency(freq=self.freq)
257
+
258
+ # TODO: Add support for all pandas frequencies
222
259
  offset = pd.tseries.frequencies.to_offset(df.freq)
223
260
  norm_freq_str = offset.name.split("-")[0]
224
261
  if norm_freq_str not in SUPPORTED_FREQUENCIES:
225
- warnings.warn(
226
- f"Detected frequency '{norm_freq_str}' is not supported by TimeSeriesPredictor. This may lead to some "
262
+ logger.warning(
263
+ f"Frequency '{norm_freq_str}' is not supported by TimeSeriesPredictor. This may lead to some "
227
264
  f"models not working as intended. "
228
265
  f"Please convert the timestamps to one of the supported frequencies: {SUPPORTED_FREQUENCIES}. "
229
266
  f"See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases for details."
230
267
  )
268
+
269
+ # Fill missing values
231
270
  if df.isna().values.any():
232
- raise ValueError(
233
- "TimeSeriesPredictor does not yet support missing values. "
234
- "Please make sure that the provided data contains no NaNs."
271
+ # FIXME: Do not automatically fill NaNs here, handle missing values at the level of individual models.
272
+ # FIXME: Current solution leads to incorrect metric computation if missing values are present
273
+ logger.warning(
274
+ f"{name} contains missing values represented by NaN. "
275
+ f"They have been filled by carrying forward the last valid observation."
235
276
  )
277
+ df = df.fill_missing_values()
278
+ if df.isna().values.any():
279
+ raise ValueError(f"Some time series in {name} consist completely of NaN values. Please remove them.")
280
+
281
+ # Ensure that time series are long enough
236
282
  if (df.num_timesteps_per_item() <= 2).any():
237
- # Time series with length <= 2 make frequency inference impossible
238
- raise ValueError("Detected time series with length <= 2 in data. Please remove them from the dataset.")
283
+ # FIXME: Gracefully handle short time series: Ignore time series with length <= 2 in train_data,
284
+ # FIXME: otherwise generate naive forecast for short time series
285
+ raise ValueError(f"Detected time series with length <= 2 in {name}. Please remove them from the dataset.")
239
286
  return df
240
287
 
288
+ def _check_data_for_evaluation(self, data: TimeSeriesDataFrame, name: str = "data"):
289
+ """Make sure that provided evaluation data includes both historic and future time series values."""
290
+ if data.num_timesteps_per_item().min() <= self.prediction_length:
291
+ raise ValueError(
292
+ f"Cannot reserve last prediction_length={self.prediction_length} time steps for evaluation in some "
293
+ f"time series in {name}. Please make sure that {name} includes both historic and future data, and that"
294
+ f"all time series have length > prediction_length (at least {self.prediction_length + 1})"
295
+ )
296
+
241
297
  def _validate_num_val_windows(
242
298
  self,
243
299
  train_data: TimeSeriesDataFrame,
@@ -283,8 +339,8 @@ class TimeSeriesPredictor:
283
339
  @apply_presets(TIMESERIES_PRESETS_CONFIGS)
284
340
  def fit(
285
341
  self,
286
- train_data: Union[TimeSeriesDataFrame, pd.DataFrame],
287
- tuning_data: Optional[Union[TimeSeriesDataFrame, pd.DataFrame]] = None,
342
+ train_data: Union[TimeSeriesDataFrame, pd.DataFrame, str],
343
+ tuning_data: Optional[Union[TimeSeriesDataFrame, pd.DataFrame, str]] = None,
288
344
  time_limit: Optional[int] = None,
289
345
  presets: Optional[str] = None,
290
346
  hyperparameters: Dict[Union[str, Type], Any] = None,
@@ -300,7 +356,7 @@ class TimeSeriesPredictor:
300
356
 
301
357
  Parameters
302
358
  ----------
303
- train_data : Union[TimeSeriesDataFrame, pd.DataFrame]
359
+ train_data : Union[TimeSeriesDataFrame, pd.DataFrame, str]
304
360
  Training data in the :class:`~autogluon.timeseries.TimeSeriesDataFrame` format. For best performance, all
305
361
  time series should have length ``> 2 * prediction_length``.
306
362
 
@@ -323,7 +379,7 @@ class TimeSeriesPredictor:
323
379
  If provided data is an instance of pandas DataFrame, AutoGluon will attempt to automatically convert it
324
380
  to a ``TimeSeriesDataFrame``.
325
381
 
326
- tuning_data : Union[TimeSeriesDataFrame, pd.DataFrame], optional
382
+ tuning_data : Union[TimeSeriesDataFrame, pd.DataFrame, str], optional
327
383
  Data reserved for model selection and hyperparameter tuning, rather than training individual models. Also
328
384
  used to compute the validation scores. Note that only the last ``prediction_length`` time steps of each
329
385
  time series are used for computing the validation score.
@@ -479,9 +535,6 @@ class TimeSeriesPredictor:
479
535
  if hyperparameters is None:
480
536
  hyperparameters = "default"
481
537
 
482
- train_data = self._check_and_prepare_data_frame(train_data)
483
- tuning_data = self._check_and_prepare_data_frame(tuning_data)
484
-
485
538
  if verbosity is None:
486
539
  verbosity = self.verbosity
487
540
  set_logger_verbosity(verbosity)
@@ -489,12 +542,15 @@ class TimeSeriesPredictor:
489
542
  fit_args = dict(
490
543
  prediction_length=self.prediction_length,
491
544
  target=self.target,
545
+ eval_metric=self.eval_metric,
546
+ quantile_levels=self.quantile_levels,
547
+ freq=self.freq,
492
548
  time_limit=time_limit,
493
- evaluation_metric=self.eval_metric,
494
549
  hyperparameters=hyperparameters,
495
550
  hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
496
551
  excluded_model_types=excluded_model_types,
497
552
  num_val_windows=num_val_windows,
553
+ refit_full=refit_full,
498
554
  enable_ensemble=enable_ensemble,
499
555
  random_seed=random_seed,
500
556
  verbosity=verbosity,
@@ -504,13 +560,17 @@ class TimeSeriesPredictor:
504
560
  if presets is not None:
505
561
  logger.info(f"Setting presets to: {presets}")
506
562
  logger.info("Fitting with arguments:")
507
- logger.info(f"{pprint.pformat(fit_args)}")
563
+ logger.info(f"{pprint.pformat(fit_args)}\n")
564
+
565
+ train_data = self._check_and_prepare_data_frame(train_data, name="train_data")
508
566
  logger.info(
509
567
  f"Provided training data set with {len(train_data)} rows, {train_data.num_items} items (item = single time series). "
510
568
  f"Average time series length is {len(train_data) / train_data.num_items:.1f}. "
511
- f"Data frequency is '{train_data.freq}'."
512
569
  )
570
+
513
571
  if tuning_data is not None:
572
+ tuning_data = self._check_and_prepare_data_frame(tuning_data, name="tuning_data")
573
+ self._check_data_for_evaluation(tuning_data, name="tuning_data")
514
574
  logger.info(
515
575
  f"Provided tuning data set with {len(tuning_data)} rows, {tuning_data.num_items} items. "
516
576
  f"Average time series length is {len(tuning_data) / tuning_data.num_items:.1f}."
@@ -552,8 +612,8 @@ class TimeSeriesPredictor:
552
612
 
553
613
  def predict(
554
614
  self,
555
- data: Union[TimeSeriesDataFrame, pd.DataFrame],
556
- known_covariates: Optional[TimeSeriesDataFrame] = None,
615
+ data: Union[TimeSeriesDataFrame, pd.DataFrame, str],
616
+ known_covariates: Optional[Union[TimeSeriesDataFrame, pd.DataFrame, str]] = None,
557
617
  model: Optional[str] = None,
558
618
  use_cache: bool = True,
559
619
  random_seed: Optional[int] = 123,
@@ -562,7 +622,7 @@ class TimeSeriesPredictor:
562
622
 
563
623
  Parameters
564
624
  ----------
565
- data : Union[TimeSeriesDataFrame, pd.DataFrame]
625
+ data : Union[TimeSeriesDataFrame, pd.DataFrame, str]
566
626
  Time series data to forecast with.
567
627
 
568
628
  If ``known_covariates_names`` were specified when creating the predictor, ``data`` must include the columns
@@ -573,7 +633,7 @@ class TimeSeriesPredictor:
573
633
 
574
634
  If provided data is an instance of pandas DataFrame, AutoGluon will attempt to automatically convert it
575
635
  to a ``TimeSeriesDataFrame``.
576
- known_covariates : TimeSeriesDataFrame, optional
636
+ known_covariates : Union[TimeSeriesDataFrame, pd.DataFrame, str], optional
577
637
  If ``known_covariates_names`` were specified when creating the predictor, it is necessary to provide the
578
638
  values of the known covariates for each time series during the forecast horizon. That is:
579
639
 
@@ -625,16 +685,18 @@ class TimeSeriesPredictor:
625
685
  # Don't use data.item_ids in case data is not a TimeSeriesDataFrame
626
686
  original_item_id_order = data.reset_index()[ITEMID].unique()
627
687
  data = self._check_and_prepare_data_frame(data)
688
+ if known_covariates is not None:
689
+ known_covariates = self._to_data_frame(known_covariates)
628
690
  predictions = self._learner.predict(data, known_covariates=known_covariates, model=model, use_cache=use_cache)
629
691
  return predictions.reindex(original_item_id_order, level=ITEMID)
630
692
 
631
- def evaluate(self, data: Union[TimeSeriesDataFrame, pd.DataFrame], **kwargs):
693
+ def evaluate(self, data: Union[TimeSeriesDataFrame, pd.DataFrame, str], **kwargs):
632
694
  """Evaluate the performance for given dataset, computing the score determined by ``self.eval_metric``
633
695
  on the given data set, and with the same ``prediction_length`` used when training models.
634
696
 
635
697
  Parameters
636
698
  ----------
637
- data : Union[TimeSeriesDataFrame, pd.DataFrame]
699
+ data : Union[TimeSeriesDataFrame, pd.DataFrame, str]
638
700
  The data to evaluate the best model on. The last ``prediction_length`` time steps of the data set, for each
639
701
  item, will be held out for prediction and forecast accuracy will be calculated on these time steps.
640
702
 
@@ -665,9 +727,10 @@ class TimeSeriesPredictor:
665
727
  will have their signs flipped to obey this convention. For example, negative MAPE values will be reported.
666
728
  """
667
729
  data = self._check_and_prepare_data_frame(data)
730
+ self._check_data_for_evaluation(data)
668
731
  return self._learner.score(data, **kwargs)
669
732
 
670
- def score(self, data: Union[TimeSeriesDataFrame, pd.DataFrame], **kwargs):
733
+ def score(self, data: Union[TimeSeriesDataFrame, pd.DataFrame, str], **kwargs):
671
734
  """See, :meth:`~autogluon.timeseries.TimeSeriesPredictor.evaluate`."""
672
735
  return self.evaluate(data, **kwargs)
673
736
 
@@ -753,7 +816,7 @@ class TimeSeriesPredictor:
753
816
 
754
817
  def leaderboard(
755
818
  self,
756
- data: Optional[Union[TimeSeriesDataFrame, pd.DataFrame]] = None,
819
+ data: Optional[Union[TimeSeriesDataFrame, pd.DataFrame, str]] = None,
757
820
  silent: bool = False,
758
821
  use_cache: bool = True,
759
822
  ) -> pd.DataFrame:
@@ -776,7 +839,7 @@ class TimeSeriesPredictor:
776
839
 
777
840
  Parameters
778
841
  ----------
779
- data : Union[TimeSeriesDataFrame, pd.DataFrame], optional
842
+ data : Union[TimeSeriesDataFrame, pd.DataFrame, str], optional
780
843
  dataset used for additional evaluation. If not provided, the validation set used during training will be
781
844
  used.
782
845
 
@@ -801,7 +864,9 @@ class TimeSeriesPredictor:
801
864
  The leaderboard containing information on all models and in order of best model to worst in terms of
802
865
  test performance.
803
866
  """
804
- data = self._check_and_prepare_data_frame(data)
867
+ if data is not None:
868
+ data = self._check_and_prepare_data_frame(data)
869
+ self._check_data_for_evaluation(data)
805
870
  leaderboard = self._learner.leaderboard(data, use_cache=use_cache)
806
871
  if not silent:
807
872
  with pd.option_context("display.max_rows", None, "display.max_columns", None, "display.width", 1000):
@@ -264,10 +264,7 @@ class AbstractTimeSeriesTrainer(SimpleAbstractTrainer):
264
264
  super().__init__(path=path, save_data=save_data, low_memory=True, **kwargs)
265
265
 
266
266
  self.prediction_length = prediction_length
267
- self.quantile_levels = kwargs.get(
268
- "quantile_levels",
269
- kwargs.get("quantiles", [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]),
270
- )
267
+ self.quantile_levels = kwargs.get("quantile_levels", [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
271
268
  self.target = kwargs.get("target", "target")
272
269
  self.metadata = kwargs.get("metadata", CovariateMetadata())
273
270
  self.is_data_saved = False
@@ -22,7 +22,7 @@ class AutoTimeSeriesTrainer(AbstractTimeSeriesTrainer):
22
22
  freq=kwargs.get("freq"),
23
23
  hyperparameters=hyperparameters,
24
24
  hyperparameter_tune=hyperparameter_tune,
25
- quantiles=quantile_levels,
25
+ quantile_levels=quantile_levels,
26
26
  all_assigned_names=self._get_banned_model_names(),
27
27
  target=self.target,
28
28
  metadata=self.metadata,
@@ -1,3 +1,3 @@
1
1
  """This is the autogluon version file."""
2
- __version__ = '0.8.3b20230817'
2
+ __version__ = '0.8.3b20230819'
3
3
  __lite__ = False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.timeseries
3
- Version: 0.8.3b20230817
3
+ Version: 0.8.3b20230819
4
4
  Summary: AutoML for Image, Text, and Tabular Data
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -46,9 +46,9 @@ Requires-Dist: statsforecast <1.5,>=1.4.0
46
46
  Requires-Dist: mlforecast <0.7.4,>=0.7.0
47
47
  Requires-Dist: tqdm <5,>=4.38
48
48
  Requires-Dist: ujson <6,>=5
49
- Requires-Dist: autogluon.core[raytune] ==0.8.3b20230817
50
- Requires-Dist: autogluon.common ==0.8.3b20230817
51
- Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost] ==0.8.3b20230817
49
+ Requires-Dist: autogluon.core[raytune] ==0.8.3b20230819
50
+ Requires-Dist: autogluon.common ==0.8.3b20230819
51
+ Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost] ==0.8.3b20230819
52
52
  Provides-Extra: all
53
53
  Provides-Extra: tests
54
54
  Requires-Dist: pytest ; extra == 'tests'
@@ -1,18 +1,18 @@
1
- autogluon.timeseries-0.8.3b20230817-py3.8-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
1
+ autogluon.timeseries-0.8.3b20230819-py3.8-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
2
2
  autogluon/timeseries/__init__.py,sha256=oGfAdHmGz9zGFH53Q4zDL42CavzjqFaWTgkx_vg17QM,370
3
3
  autogluon/timeseries/evaluator.py,sha256=AWjqItDZA2tPexQ1e5S3IWTMNL4K-_Bcig6SUzDRkxY,11293
4
- autogluon/timeseries/learner.py,sha256=vhFt4y3J5q8UUD8Xf9C5VuWI60RngM7kXY0soYld2Dg,10323
5
- autogluon/timeseries/predictor.py,sha256=_FDHoKKlAwqG6qZB0KYNs0FdJE3zzr-cyQMqzaDBghI,49176
4
+ autogluon/timeseries/learner.py,sha256=nt1z7BmL1f0lRcWTg0CFhK77bCbhqyla3LVs7FIIAdI,9090
5
+ autogluon/timeseries/predictor.py,sha256=041AjOVnQ1PxjVULyybAZ8O4Hfnt-wCvSb3fTvWrrpU,52588
6
6
  autogluon/timeseries/splitter.py,sha256=s5S3CeJxcUfZrl7PSXjzubE06bgB8J8uUT8EywSwtYQ,9252
7
- autogluon/timeseries/version.py,sha256=42pIiXfodX5wHiRjy6ECpjN5JI46Reymvz5FV8O3Hjg,90
7
+ autogluon/timeseries/version.py,sha256=UihvW4WkBxZ767FhaGBcD5FR7QEZDQR-1aUraFUtRAM,90
8
8
  autogluon/timeseries/configs/__init__.py,sha256=BTtHIPCYeGjqgOcvqb8qPD4VNX-ICKOg6wnkew1cPOE,98
9
9
  autogluon/timeseries/configs/presets_configs.py,sha256=mX0V5zajWWArVforLvbyr6W-JMsQBp2AkBqlWVP2Zuw,640
10
10
  autogluon/timeseries/dataset/__init__.py,sha256=UvnhAN5tjgxXTHoZMQDy64YMDj4Xxa68yY7NP4vAw0o,81
11
- autogluon/timeseries/dataset/ts_dataframe.py,sha256=zN_sKK10wymlGiFnaBx6ocnoUvOVX113DquduMdlNVg,37845
12
- autogluon/timeseries/models/__init__.py,sha256=dBYglymYNKgSdBEGqUybkVWL6B13eTDOrNwR9herycw,848
13
- autogluon/timeseries/models/presets.py,sha256=_q8Rbl3E2-_whi18-FcauZS4iQMoqrmejk1BZ23OavI,11317
11
+ autogluon/timeseries/dataset/ts_dataframe.py,sha256=sHzsmnjED-3t3KSmAKSDuwNbWklxT-jcHMPfei8nNm4,41046
12
+ autogluon/timeseries/models/__init__.py,sha256=eRXcHY5Fc3MRs-AMqQL8HNOHZtVDW1h43XB9zJGOZrg,924
13
+ autogluon/timeseries/models/presets.py,sha256=us6ZpA3UL-NsOXMn_1hnfsd3fTloVEO8JAHjnw_2DZM,11428
14
14
  autogluon/timeseries/models/abstract/__init__.py,sha256=wvDsQAZIV0N3AwBeMaGItoQ82trEfnT-nol2AAOIxBg,102
15
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=K4_N6T2nfM1cOSDLGYDMscMbO7tiOuu4B4v3cMv1360,19492
15
+ autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=cM5Wegh3ZrlzCL81EUimMV2aehpdyePJH0jif5jQAuo,19432
16
16
  autogluon/timeseries/models/abstract/model_trial.py,sha256=f840EF-PSj_j_u1DGVzSD3Z1kCXdOSRLcbn_LJSpw5g,3734
17
17
  autogluon/timeseries/models/autogluon_tabular/__init__.py,sha256=ZidrVDHL5x_k9F8lcGrg9Y-soLljsBlrsSeT5FIkn1E,163
18
18
  autogluon/timeseries/models/autogluon_tabular/direct_tabular.py,sha256=0-PAot_vu_BQEMtBHl8Zbky7mqahiWHIfOzsvfJ_VeE,17338
@@ -25,26 +25,27 @@ autogluon/timeseries/models/gluonts/__init__.py,sha256=AeycIz-Y-ZQhBxCQdqWKbbhCF
25
25
  autogluon/timeseries/models/gluonts/abstract_gluonts.py,sha256=K67s2eBk-PQX_RAt-iMmtxo-8kFTB1DTPFS8meyZOX8,21710
26
26
  autogluon/timeseries/models/gluonts/torch/__init__.py,sha256=DJqh-hYrxjRQHOc3GsaQE2Oa4wccedYW2NcSFC-W6rI,260
27
27
  autogluon/timeseries/models/gluonts/torch/models.py,sha256=7QNCLQuO52vrxxYSiEiRu3fTU9o5L7XClVQsdIhAVu4,12397
28
- autogluon/timeseries/models/local/__init__.py,sha256=-l8ib8W0jRho28Ll70Amj0-FE8pGqjebh_pQdrCue_s,572
29
- autogluon/timeseries/models/local/abstract_local_model.py,sha256=mLB9riUvJABpaV68lwTQnbNk2FXTreciELiN-8wTCSw,9326
30
- autogluon/timeseries/models/local/naive.py,sha256=oxSATbj7VKUB81WzYZExXxPipIVjWud3fsTlv3ZYwpU,2451
31
- autogluon/timeseries/models/local/statsforecast.py,sha256=HHpYSFIwndSL85cVvLVpsD2nlIr2EOq5RnjjUkrh-ic,11112
28
+ autogluon/timeseries/models/local/__init__.py,sha256=ibQQmwvZNVjTpKGoKrLdBYO_pvkAeO4yG8qVJ4JFPfE,636
29
+ autogluon/timeseries/models/local/abstract_local_model.py,sha256=xiK7ObhBuqJ1g85PSed8fG1j5kg7IXjLGbqs72HqRzM,9644
30
+ autogluon/timeseries/models/local/naive.py,sha256=9b80zUccHfGv6pg33mppwTcSJgq4JF4CqTQ7SWq48Hk,7243
31
+ autogluon/timeseries/models/local/npts.py,sha256=_bHwWDEnI8zoZ2KQQyF59BIom6VHc0sJgVjPDRGHSFY,2313
32
+ autogluon/timeseries/models/local/statsforecast.py,sha256=FETPDwC5PYw5nFx13rjpYOiTHSmz2l241ot_TdRNtQQ,11086
32
33
  autogluon/timeseries/models/local/statsmodels.py,sha256=WdhUxmjmBpoWx6XRmTTWmPxTY8VSzxdDe-G38PgXDIQ,15975
33
34
  autogluon/timeseries/models/multi_window/__init__.py,sha256=Bq7AT2Jxdd4WNqmjTdzeqgNiwn1NCyWp4tBIWaM-zfI,60
34
35
  autogluon/timeseries/models/multi_window/multi_window_model.py,sha256=2eu9vph6pazrKvycEKEq8zgTzkIe_G7JcrgC8LKxCWE,8995
35
36
  autogluon/timeseries/trainer/__init__.py,sha256=lxiOT-Gc6BEnr_yWQqra85kEngeM_wtH2SCaRbmC_qE,170
36
- autogluon/timeseries/trainer/abstract_trainer.py,sha256=ovZ7U-xhaMTTMLw1p51mEXRs9XpXDJ3_EOF6XzhoRhE,48122
37
- autogluon/timeseries/trainer/auto_trainer.py,sha256=wABPY75dd8k7JE5AhdiIDIzhOTLFDwiruV4AtpqGB0I,3063
37
+ autogluon/timeseries/trainer/abstract_trainer.py,sha256=_ef5d4oX8StLjd0uATd76bbFtxhFeYRE3YIG5J46L-w,48062
38
+ autogluon/timeseries/trainer/auto_trainer.py,sha256=d_JDMxnEoDHqMIDvmz8qGe7AF2stmwe7IxF8V8qrFwU,3069
38
39
  autogluon/timeseries/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
40
  autogluon/timeseries/utils/features.py,sha256=QPESzJwMZlsnt_woQ4_I42MOlVT1VcPKk1fTeltPYLU,8270
40
41
  autogluon/timeseries/utils/forecast.py,sha256=ouOHcQEppD1ry-9buQ4plmyFK3GPef01gEQE7u2HzcI,1544
41
42
  autogluon/timeseries/utils/seasonality.py,sha256=p9mtahWOtDhHUjeGECUJA0VAKeLkZGZbj070dEqMTJQ,652
42
43
  autogluon/timeseries/utils/warning_filters.py,sha256=Xg9wuTaj-xRKVzdV43oHPbzrfCv_oWRHVrlB7S15rwc,2198
43
- autogluon.timeseries-0.8.3b20230817.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
44
- autogluon.timeseries-0.8.3b20230817.dist-info/METADATA,sha256=IlsS4rppTAdAUdr0OC-_RUJTy0ngbElLTNpyxTUzhxc,12682
45
- autogluon.timeseries-0.8.3b20230817.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
46
- autogluon.timeseries-0.8.3b20230817.dist-info/WHEEL,sha256=5sUXSg9e4bi7lTLOHcm6QEYwO5TIF1TNbTSVFVjcJcc,92
47
- autogluon.timeseries-0.8.3b20230817.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
48
- autogluon.timeseries-0.8.3b20230817.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
49
- autogluon.timeseries-0.8.3b20230817.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
50
- autogluon.timeseries-0.8.3b20230817.dist-info/RECORD,,
44
+ autogluon.timeseries-0.8.3b20230819.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
45
+ autogluon.timeseries-0.8.3b20230819.dist-info/METADATA,sha256=J0gTh3-Gx3vx1LHhbpWVVFHx11vlxGfBGsyyYGVkA88,12682
46
+ autogluon.timeseries-0.8.3b20230819.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
47
+ autogluon.timeseries-0.8.3b20230819.dist-info/WHEEL,sha256=5sUXSg9e4bi7lTLOHcm6QEYwO5TIF1TNbTSVFVjcJcc,92
48
+ autogluon.timeseries-0.8.3b20230819.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
49
+ autogluon.timeseries-0.8.3b20230819.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
50
+ autogluon.timeseries-0.8.3b20230819.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
51
+ autogluon.timeseries-0.8.3b20230819.dist-info/RECORD,,