autogluon.timeseries 1.1.0b20240411__py3-none-any.whl → 1.1.0b20240412__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of autogluon.timeseries might be problematic. Click here for more details.

Files changed (21) hide show
  1. autogluon/timeseries/dataset/ts_dataframe.py +3 -3
  2. autogluon/timeseries/learner.py +2 -2
  3. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +1 -1
  4. autogluon/timeseries/models/gluonts/abstract_gluonts.py +3 -24
  5. autogluon/timeseries/models/gluonts/torch/models.py +1 -1
  6. autogluon/timeseries/predictor.py +2 -2
  7. autogluon/timeseries/utils/datetime/base.py +38 -20
  8. autogluon/timeseries/utils/datetime/lags.py +13 -12
  9. autogluon/timeseries/utils/datetime/seasonality.py +11 -11
  10. autogluon/timeseries/utils/datetime/time_features.py +12 -11
  11. autogluon/timeseries/utils/features.py +4 -1
  12. autogluon/timeseries/version.py +1 -1
  13. {autogluon.timeseries-1.1.0b20240411.dist-info → autogluon.timeseries-1.1.0b20240412.dist-info}/METADATA +6 -6
  14. {autogluon.timeseries-1.1.0b20240411.dist-info → autogluon.timeseries-1.1.0b20240412.dist-info}/RECORD +21 -21
  15. /autogluon.timeseries-1.1.0b20240411-py3.8-nspkg.pth → /autogluon.timeseries-1.1.0b20240412-py3.8-nspkg.pth +0 -0
  16. {autogluon.timeseries-1.1.0b20240411.dist-info → autogluon.timeseries-1.1.0b20240412.dist-info}/LICENSE +0 -0
  17. {autogluon.timeseries-1.1.0b20240411.dist-info → autogluon.timeseries-1.1.0b20240412.dist-info}/NOTICE +0 -0
  18. {autogluon.timeseries-1.1.0b20240411.dist-info → autogluon.timeseries-1.1.0b20240412.dist-info}/WHEEL +0 -0
  19. {autogluon.timeseries-1.1.0b20240411.dist-info → autogluon.timeseries-1.1.0b20240412.dist-info}/namespace_packages.txt +0 -0
  20. {autogluon.timeseries-1.1.0b20240411.dist-info → autogluon.timeseries-1.1.0b20240412.dist-info}/top_level.txt +0 -0
  21. {autogluon.timeseries-1.1.0b20240411.dist-info → autogluon.timeseries-1.1.0b20240412.dist-info}/zip-safe +0 -0
@@ -134,7 +134,7 @@ class TimeSeriesDataFrame(pd.DataFrame, TimeSeriesDataFrameDeprecatedMixin):
134
134
  ----------
135
135
  freq : str
136
136
  A pandas-compatible string describing the frequency of the time series. For example ``"D"`` for daily data,
137
- ``"H"`` for hourly data, etc. This attribute is determined automatically based on the timestamps. For the full
137
+ ``"h"`` for hourly data, etc. This attribute is determined automatically based on the timestamps. For the full
138
138
  list of possible values, see `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.
139
139
  num_items : int
140
140
  Number of items (time series) in the data set.
@@ -961,12 +961,12 @@ class TimeSeriesDataFrame(pd.DataFrame, TimeSeriesDataFrameDeprecatedMixin):
961
961
  2021-06-30 6.0
962
962
  2021-09-30 7.0
963
963
  2021-12-31 8.0
964
- >>> ts_df.convert_frequency("Y")
964
+ >>> ts_df.convert_frequency("YE")
965
965
  target
966
966
  item_id timestamp
967
967
  0 2020-12-31 2.5
968
968
  2021-12-31 6.5
969
- >>> ts_df.convert_frequency("Y", agg_numeric="sum")
969
+ >>> ts_df.convert_frequency("YE", agg_numeric="sum")
970
970
  target
971
971
  item_id timestamp
972
972
  0 2020-12-31 10.0
@@ -250,8 +250,8 @@ class TimeSeriesLearner(AbstractLearner):
250
250
  raise ValueError(f"Feature {fn} not found in covariate metadata or the dataset.")
251
251
 
252
252
  if len(set(features)) < len(features):
253
- logger.warning(
254
- "Duplicate feature names provided to compute feature importance. This will lead to unexpected behavior. "
253
+ raise ValueError(
254
+ "Duplicate feature names provided to compute feature importance. "
255
255
  "Please provide unique feature names across both static features and covariates."
256
256
  )
257
257
 
@@ -31,7 +31,7 @@ class AbstractTimeSeriesModel(AbstractModel):
31
31
  If None, a new unique time-stamped directory is chosen.
32
32
  freq: str
33
33
  Frequency string (cf. gluonts frequency strings) describing the frequency
34
- of the time series data. For example, "H" for hourly or "D" for daily data.
34
+ of the time series data. For example, "h" for hourly or "D" for daily data.
35
35
  prediction_length: int
36
36
  Length of the prediction horizon, i.e., the number of time steps the model
37
37
  is fit to forecast.
@@ -15,7 +15,6 @@ from gluonts.dataset.field_names import FieldName
15
15
  from gluonts.model.estimator import Estimator as GluonTSEstimator
16
16
  from gluonts.model.forecast import Forecast, QuantileForecast, SampleForecast
17
17
  from gluonts.model.predictor import Predictor as GluonTSPredictor
18
- from pandas.tseries.frequencies import to_offset
19
18
  from sklearn.compose import ColumnTransformer
20
19
  from sklearn.preprocessing import QuantileTransformer, StandardScaler
21
20
 
@@ -26,7 +25,6 @@ from autogluon.tabular.models.tabular_nn.utils.categorical_encoders import (
26
25
  )
27
26
  from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TIMESTAMP, TimeSeriesDataFrame
28
27
  from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel
29
- from autogluon.timeseries.utils.datetime import norm_freq_str
30
28
  from autogluon.timeseries.utils.forecast import get_forecast_horizon_index_ts_dataframe
31
29
  from autogluon.timeseries.utils.warning_filters import disable_root_logger, warning_filter
32
30
 
@@ -37,16 +35,14 @@ logger = logging.getLogger(__name__)
37
35
  gts_logger = logging.getLogger(gluonts.__name__)
38
36
 
39
37
 
40
- GLUONTS_SUPPORTED_OFFSETS = ["Y", "Q", "M", "W", "D", "B", "H", "T", "min", "S"]
41
-
42
-
43
38
  class SimpleGluonTSDataset(GluonTSDataset):
44
39
  """Wrapper for TimeSeriesDataFrame that is compatible with the GluonTS Dataset API."""
45
40
 
41
+ _dummy_gluonts_freq = "D"
42
+
46
43
  def __init__(
47
44
  self,
48
45
  target_df: TimeSeriesDataFrame,
49
- freq: str,
50
46
  target_column: str = "target",
51
47
  feat_static_cat: Optional[np.ndarray] = None,
52
48
  feat_static_real: Optional[np.ndarray] = None,
@@ -66,7 +62,6 @@ class SimpleGluonTSDataset(GluonTSDataset):
66
62
  self.feat_dynamic_real = self._astype(feat_dynamic_real, dtype=np.float32)
67
63
  self.past_feat_dynamic_cat = self._astype(past_feat_dynamic_cat, dtype=np.int64)
68
64
  self.past_feat_dynamic_real = self._astype(past_feat_dynamic_real, dtype=np.float32)
69
- self.freq = self._to_gluonts_freq(freq)
70
65
 
71
66
  # Necessary to compute indptr for known_covariates at prediction time
72
67
  self.includes_future = includes_future
@@ -88,21 +83,6 @@ class SimpleGluonTSDataset(GluonTSDataset):
88
83
  else:
89
84
  return array.astype(dtype)
90
85
 
91
- @staticmethod
92
- def _to_gluonts_freq(freq: str) -> str:
93
- # FIXME: GluonTS expects a frequency string, but only supports a limited number of such strings
94
- # for feature generation. If the frequency string doesn't match or is not provided, it raises an exception.
95
- # Here we bypass this by issuing a default "yearly" frequency, tricking it into not producing
96
- # any lags or features.
97
- pd_offset = to_offset(freq)
98
-
99
- # normalize freq str to handle peculiarities such as W-SUN
100
- offset_base_alias = norm_freq_str(pd_offset)
101
- if offset_base_alias not in GLUONTS_SUPPORTED_OFFSETS:
102
- return "A"
103
- else:
104
- return f"{pd_offset.n}{offset_base_alias}"
105
-
106
86
  def __len__(self):
107
87
  return len(self.indptr) - 1 # noqa
108
88
 
@@ -113,7 +93,7 @@ class SimpleGluonTSDataset(GluonTSDataset):
113
93
  # GluonTS expects item_id to be a string
114
94
  ts = {
115
95
  FieldName.ITEM_ID: str(self.item_ids[j]),
116
- FieldName.START: pd.Period(self.start_timestamps.iloc[j], freq=self.freq),
96
+ FieldName.START: pd.Period(self.start_timestamps.iloc[j], freq=self._dummy_gluonts_freq),
117
97
  FieldName.TARGET: self.target_array[start_idx:end_idx],
118
98
  }
119
99
  if self.feat_static_cat is not None:
@@ -495,7 +475,6 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
495
475
 
496
476
  return SimpleGluonTSDataset(
497
477
  target_df=time_series_df[[self.target]],
498
- freq=self.freq,
499
478
  target_column=self.target,
500
479
  feat_static_cat=feat_static_cat,
501
480
  feat_static_real=feat_static_real,
@@ -424,5 +424,5 @@ class WaveNetModel(AbstractGluonTSModel):
424
424
  init_kwargs.setdefault("time_features", get_time_features_for_frequency(self.freq))
425
425
  init_kwargs.setdefault("num_parallel_samples", self.default_num_samples)
426
426
  # WaveNet model fails if an unsupported frequency such as "SM" is provided. We provide a dummy freq instead
427
- init_kwargs["freq"] = "H"
427
+ init_kwargs["freq"] = "D"
428
428
  return init_kwargs
@@ -69,7 +69,7 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
69
69
  models that predict up to 3 days into the future from the most recent observation.
70
70
  freq : str, optional
71
71
  Frequency of the time series data (see `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_
72
- for available frequencies). For example, ``"D"`` for daily data or ``"H"`` for hourly data.
72
+ for available frequencies). For example, ``"D"`` for daily data or ``"h"`` for hourly data.
73
73
 
74
74
  By default, the predictor will attempt to automatically infer the frequency from the data. This argument should
75
75
  only be set in two cases:
@@ -195,7 +195,7 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin):
195
195
  self._min_train_length = max(self.prediction_length + 1, 5)
196
196
  self.freq = freq
197
197
  if self.freq is not None:
198
- # Standardize frequency string (e.g., "min" -> "T", "Y" -> "A-DEC")
198
+ # Standardize frequency string (e.g., "T" -> "min", "Y" -> "YE")
199
199
  std_freq = pd.tseries.frequencies.to_offset(self.freq).freqstr
200
200
  if std_freq != str(self.freq):
201
201
  logger.info(f"Frequency '{self.freq}' stored as '{std_freq}'")
@@ -1,31 +1,46 @@
1
1
  import pandas as pd
2
2
 
3
3
  TO_MAJOR_FREQ = {
4
- "min": "T",
5
- "ms": "L",
6
- "us": "U",
4
+ # sub-daily
5
+ "H": "h",
6
+ "BH": "bh",
7
+ "cbh": "bh",
8
+ "CBH": "bh",
9
+ "T": "min",
10
+ "S": "s",
11
+ "L": "ms",
12
+ "U": "us",
13
+ "N": "ns",
7
14
  # business day
8
15
  "C": "B",
9
16
  # month
10
- "BM": "M",
11
- "CBM": "M",
12
- "MS": "M",
13
- "BMS": "M",
14
- "CBMS": "M",
17
+ "M": "ME",
18
+ "BM": "ME",
19
+ "BME": "ME",
20
+ "CBM": "ME",
21
+ "CBME": "ME",
22
+ "MS": "ME",
23
+ "BMS": "ME",
24
+ "CBMS": "ME",
15
25
  # semi-month
16
- "SMS": "SM",
26
+ "SM": "SME",
27
+ "SMS": "SME",
17
28
  # quarter
18
- "BQ": "Q",
19
- "QS": "Q",
20
- "BQS": "Q",
29
+ "Q": "QE",
30
+ "BQ": "QE",
31
+ "BQE": "QE",
32
+ "QS": "QE",
33
+ "BQS": "QE",
21
34
  # annual
22
- "Y": "A",
23
- "BA": "A",
24
- "BY": "A",
25
- "AS": "A",
26
- "YS": "A",
27
- "BAS": "A",
28
- "BYS": "A",
35
+ "A": "YE",
36
+ "Y": "YE",
37
+ "BA": "YE",
38
+ "BY": "YE",
39
+ "BYE": "YE",
40
+ "AS": "YE",
41
+ "YS": "YE",
42
+ "BAS": "YE",
43
+ "BYS": "YE",
29
44
  }
30
45
 
31
46
 
@@ -33,7 +48,10 @@ def norm_freq_str(offset: pd.DateOffset) -> str:
33
48
  """Obtain frequency string from a pandas.DateOffset object.
34
49
 
35
50
  "Non-standard" frequencies are converted to their "standard" counterparts. For example, MS (month start) is mapped
36
- to M (month) since both correspond to the same seasonality, lags and time features.
51
+ to ME (month end) since both correspond to the same seasonality, lags and time features.
52
+
53
+ The frequencies are always mapped to the new non-deprecated aliases (pandas>=2.2), e.g., "H" is mapped to "h". The
54
+ downstream functions like `get_seasonality` handle the new aliases even if older version of pandas is used.
37
55
  """
38
56
  base_freq = offset.name.split("-")[0]
39
57
  return TO_MAJOR_FREQ.get(base_freq, base_freq)
@@ -1,6 +1,7 @@
1
1
  """
2
2
  Generate lag indices based on frequency string. Adapted from gluonts.time_feature.lag.
3
3
  """
4
+
4
5
  from typing import List, Optional
5
6
 
6
7
  import numpy as np
@@ -96,13 +97,13 @@ def get_lags_for_frequency(
96
97
  offset = pd.tseries.frequencies.to_offset(freq)
97
98
  offset_name = norm_freq_str(offset)
98
99
 
99
- if offset_name == "A":
100
+ if offset_name == "YE":
100
101
  lags = []
101
- elif offset_name == "Q":
102
+ elif offset_name == "QE":
102
103
  lags = _make_lags_for_quarter(offset.n)
103
- elif offset_name == "M":
104
+ elif offset_name == "ME":
104
105
  lags = _make_lags_for_month(offset.n)
105
- elif offset_name == "SM":
106
+ elif offset_name == "SME":
106
107
  lags = _make_lags_for_semi_month(offset.n)
107
108
  elif offset_name == "W":
108
109
  lags = _make_lags_for_week(offset.n)
@@ -110,21 +111,21 @@ def get_lags_for_frequency(
110
111
  lags = _make_lags_for_day(offset.n) + _make_lags_for_week(offset.n / 7.0)
111
112
  elif offset_name == "B":
112
113
  lags = _make_lags_for_day(offset.n, days_in_week=5, days_in_month=22) + _make_lags_for_week(offset.n / 5.0)
113
- elif offset_name == "H":
114
+ elif offset_name == "h":
114
115
  lags = (
115
116
  _make_lags_for_hour(offset.n)
116
117
  + _make_lags_for_day(offset.n / 24)
117
118
  + _make_lags_for_week(offset.n / (24 * 7))
118
119
  )
119
120
  # business hour
120
- elif offset_name == "BH":
121
+ elif offset_name == "bh":
121
122
  lags = (
122
123
  _make_lags_for_business_hour(offset.n)
123
124
  + _make_lags_for_day(offset.n / 9)
124
125
  + _make_lags_for_week(offset.n / (9 * 7))
125
126
  )
126
127
  # minutes
127
- elif offset_name == "T":
128
+ elif offset_name == "min":
128
129
  lags = (
129
130
  _make_lags_for_minute(offset.n)
130
131
  + _make_lags_for_hour(offset.n / 60)
@@ -132,32 +133,32 @@ def get_lags_for_frequency(
132
133
  + _make_lags_for_week(offset.n / (60 * 24 * 7))
133
134
  )
134
135
  # second
135
- elif offset_name == "S":
136
+ elif offset_name == "s":
136
137
  lags = (
137
138
  _make_lags_for_second(offset.n)
138
139
  + _make_lags_for_minute(offset.n / 60)
139
140
  + _make_lags_for_hour(offset.n / (60 * 60))
140
141
  )
141
- elif offset_name == "L":
142
+ elif offset_name == "ms":
142
143
  lags = (
143
144
  _make_lags_for_second(offset.n / 1e3)
144
145
  + _make_lags_for_minute(offset.n / (60 * 1e3))
145
146
  + _make_lags_for_hour(offset.n / (60 * 60 * 1e3))
146
147
  )
147
- elif offset_name == "U":
148
+ elif offset_name == "us":
148
149
  lags = (
149
150
  _make_lags_for_second(offset.n / 1e6)
150
151
  + _make_lags_for_minute(offset.n / (60 * 1e6))
151
152
  + _make_lags_for_hour(offset.n / (60 * 60 * 1e6))
152
153
  )
153
- elif offset_name == "N":
154
+ elif offset_name == "ns":
154
155
  lags = (
155
156
  _make_lags_for_second(offset.n / 1e9)
156
157
  + _make_lags_for_minute(offset.n / (60 * 1e9))
157
158
  + _make_lags_for_hour(offset.n / (60 * 60 * 1e9))
158
159
  )
159
160
  else:
160
- raise Exception(f"invalid frequency {freq}")
161
+ raise Exception(f"Cannot get lags for unsupported frequency {freq}")
161
162
 
162
163
  # flatten lags list and filter
163
164
  lags = [int(lag) for sub_list in lags for lag in sub_list if 7 < lag <= lag_ub]
@@ -5,20 +5,20 @@ import pandas as pd
5
5
  from .base import norm_freq_str
6
6
 
7
7
  DEFAULT_SEASONALITIES = {
8
- "A": 1,
9
- "Q": 4,
10
- "M": 12,
11
- "SM": 24,
8
+ "YE": 1,
9
+ "QE": 4,
10
+ "ME": 12,
11
+ "SME": 24,
12
12
  "W": 1,
13
13
  "D": 7,
14
14
  "B": 5,
15
- "BH": 9,
16
- "H": 24,
17
- "T": 60 * 24,
18
- "S": 1,
19
- "L": 1,
20
- "U": 1,
21
- "N": 1,
15
+ "bh": 9,
16
+ "h": 24,
17
+ "min": 60 * 24,
18
+ "s": 1,
19
+ "ms": 1,
20
+ "us": 1,
21
+ "ns": 1,
22
22
  }
23
23
 
24
24
 
@@ -1,6 +1,7 @@
1
1
  """
2
2
  Generate time features based on frequency string. Adapted from gluonts.time_feature.time_feature.
3
3
  """
4
+
4
5
  from typing import Callable, List
5
6
 
6
7
  import numpy as np
@@ -57,20 +58,20 @@ def second_of_minute(index: pd.DatetimeIndex) -> np.ndarray:
57
58
 
58
59
  def get_time_features_for_frequency(freq) -> List[Callable]:
59
60
  features_by_offset_name = {
60
- "A": [],
61
- "Q": [quarter_of_year],
62
- "M": [month_of_year],
63
- "SM": [day_of_month, month_of_year],
61
+ "YE": [],
62
+ "QE": [quarter_of_year],
63
+ "ME": [month_of_year],
64
+ "SME": [day_of_month, month_of_year],
64
65
  "W": [day_of_month, week_of_year],
65
66
  "D": [day_of_week, day_of_month, day_of_year],
66
67
  "B": [day_of_week, day_of_month, day_of_year],
67
- "BH": [hour_of_day, day_of_week, day_of_month, day_of_year],
68
- "H": [hour_of_day, day_of_week, day_of_month, day_of_year],
69
- "T": [minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
70
- "S": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
71
- "L": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
72
- "U": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
73
- "N": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
68
+ "bh": [hour_of_day, day_of_week, day_of_month, day_of_year],
69
+ "h": [hour_of_day, day_of_week, day_of_month, day_of_year],
70
+ "min": [minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
71
+ "s": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
72
+ "ms": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
73
+ "us": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
74
+ "ns": [second_of_minute, minute_of_hour, hour_of_day, day_of_week, day_of_month, day_of_year],
74
75
  }
75
76
  offset = pd.tseries.frequencies.to_offset(freq)
76
77
  offset_name = norm_freq_str(offset)
@@ -14,6 +14,7 @@ from autogluon.features.generators import (
14
14
  PipelineFeatureGenerator,
15
15
  )
16
16
  from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
17
+ from autogluon.timeseries.utils.warning_filters import warning_filter
17
18
 
18
19
  logger = logging.getLogger(__name__)
19
20
 
@@ -335,7 +336,9 @@ class AbstractFeatureImportanceTransform:
335
336
  # we'll have to work on the history of the data alone
336
337
  data[feature_name] = data[feature_name].copy()
337
338
  feature_data = data[feature_name].groupby(level=ITEMID, sort=False).head(-self.prediction_length)
338
- data[feature_name].update(self._transform_series(feature_data, is_categorical=is_categorical))
339
+ # Silence spurious FutureWarning raised by DataFrame.update https://github.com/pandas-dev/pandas/issues/57124
340
+ with warning_filter():
341
+ data[feature_name].update(self._transform_series(feature_data, is_categorical=is_categorical))
339
342
  elif feature_name in self.covariate_metadata.static_features:
340
343
  feature_data = data.static_features[feature_name].copy()
341
344
  feature_data.reset_index(drop=True, inplace=True)
@@ -1,3 +1,3 @@
1
1
  """This is the autogluon version file."""
2
- __version__ = '1.1.0b20240411'
2
+ __version__ = '1.1.0b20240412'
3
3
  __lite__ = False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.timeseries
3
- Version: 1.1.0b20240411
3
+ Version: 1.1.0b20240412
4
4
  Summary: Fast and Accurate ML in 3 Lines of Code
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -37,14 +37,14 @@ Description-Content-Type: text/markdown
37
37
  Requires-Dist: joblib <2,>=1.1
38
38
  Requires-Dist: numpy <1.29,>=1.21
39
39
  Requires-Dist: scipy <1.13,>=1.5.4
40
- Requires-Dist: pandas <2.2.0,>=2.0.0
40
+ Requires-Dist: pandas <2.3.0,>=2.0.0
41
41
  Requires-Dist: torch <2.2,>=2.1
42
42
  Requires-Dist: lightning <2.2,>=2.1
43
43
  Requires-Dist: pytorch-lightning <2.2,>=2.1
44
44
  Requires-Dist: transformers[sentencepiece] <4.39.0,>=4.38.0
45
45
  Requires-Dist: accelerate <0.22.0,>=0.21.0
46
46
  Requires-Dist: statsmodels <0.15,>=0.13.0
47
- Requires-Dist: gluonts <0.15,>=0.14.0
47
+ Requires-Dist: gluonts <0.14.4,>=0.14.0
48
48
  Requires-Dist: networkx <4,>=3.0
49
49
  Requires-Dist: statsforecast <1.5,>=1.4.0
50
50
  Requires-Dist: mlforecast <0.10.1,>=0.10.0
@@ -52,9 +52,9 @@ Requires-Dist: utilsforecast <0.0.11,>=0.0.10
52
52
  Requires-Dist: tqdm <5,>=4.38
53
53
  Requires-Dist: orjson ~=3.9
54
54
  Requires-Dist: tensorboard <3,>=2.9
55
- Requires-Dist: autogluon.core[raytune] ==1.1.0b20240411
56
- Requires-Dist: autogluon.common ==1.1.0b20240411
57
- Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost] ==1.1.0b20240411
55
+ Requires-Dist: autogluon.core[raytune] ==1.1.0b20240412
56
+ Requires-Dist: autogluon.common ==1.1.0b20240412
57
+ Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost] ==1.1.0b20240412
58
58
  Provides-Extra: all
59
59
  Requires-Dist: optimum[onnxruntime] <1.19,>=1.17 ; extra == 'all'
60
60
  Provides-Extra: chronos-onnx
@@ -1,14 +1,14 @@
1
- autogluon.timeseries-1.1.0b20240411-py3.8-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
1
+ autogluon.timeseries-1.1.0b20240412-py3.8-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
2
2
  autogluon/timeseries/__init__.py,sha256=_CrLLc1fkjen7UzWoO0Os8WZoHOgvZbHKy46I8v_4k4,304
3
3
  autogluon/timeseries/evaluator.py,sha256=l642tYfTHsl8WVIq_vV6qhgAFVFr9UuZD7gLra3A_Kc,250
4
- autogluon/timeseries/learner.py,sha256=m80SjcXTqJvbjIozUlu8s8HBz1De3W9AXsTvKeKIto0,13865
5
- autogluon/timeseries/predictor.py,sha256=A-YkJGKrYGXGlmtIHd9CDMmudBSKcBdnOCJK4oGsQr8,81222
4
+ autogluon/timeseries/learner.py,sha256=IYXpJSDyTzjZXjKL_SrTujt5Uke83mSJFA0sMj25_sM,13828
5
+ autogluon/timeseries/predictor.py,sha256=w9YWluyCVoFabeKOvfV7GiPNe7Z7pV2JDjOt8mXUdJo,81219
6
6
  autogluon/timeseries/splitter.py,sha256=eghGwAAN2_cxGk5aJBILgjGWtLzjxJcytMy49gg_q18,3061
7
- autogluon/timeseries/version.py,sha256=y7BI44S8iDi76ohS8KVBBeNT-lipii0Hz837pbKHVLQ,90
7
+ autogluon/timeseries/version.py,sha256=x3pjPHRSXvEKGk0gDn7ARHXnZxx2LjH9RjumNUkaoeY,90
8
8
  autogluon/timeseries/configs/__init__.py,sha256=BTtHIPCYeGjqgOcvqb8qPD4VNX-ICKOg6wnkew1cPOE,98
9
9
  autogluon/timeseries/configs/presets_configs.py,sha256=ZVV8BsnGnnHPgjBtJBqF-H35MYUdzRBQ8FP7zA3_11g,1949
10
10
  autogluon/timeseries/dataset/__init__.py,sha256=UvnhAN5tjgxXTHoZMQDy64YMDj4Xxa68yY7NP4vAw0o,81
11
- autogluon/timeseries/dataset/ts_dataframe.py,sha256=ep0_3hZfQu59fLoxvMWU6JpoE31d4SuvwSoXO_X48f8,45593
11
+ autogluon/timeseries/dataset/ts_dataframe.py,sha256=QgCwzRx10CRzeWzQREX_zUDAl77-lkdNjM0bT3kK8NU,45595
12
12
  autogluon/timeseries/metrics/__init__.py,sha256=KzgXNj5or7RB_uadjgC8p5gxyV26zjj2hT58OmvnfmA,1875
13
13
  autogluon/timeseries/metrics/abstract.py,sha256=9xCFQ3NaR1C0hn01M7oBd72a_CiNV-w6QFcRjwUbKYI,8183
14
14
  autogluon/timeseries/metrics/point.py,sha256=xy8sKrBbuxZ7yTW21TDPayKnEj2FBj1AEseJxUdneqE,13399
@@ -17,7 +17,7 @@ autogluon/timeseries/metrics/utils.py,sha256=eJ63TCR-UwbeJ1c2Qm7B2q-8B3sFthPgioo
17
17
  autogluon/timeseries/models/__init__.py,sha256=HFjDOYKQWaGlgQWiLlOvfwE2dH0uDmeKJFC8GDL987c,1271
18
18
  autogluon/timeseries/models/presets.py,sha256=p36ROcuOnixgGsI1zBdr9VM-MH2pKCiJCS2Ofb4xT8o,11243
19
19
  autogluon/timeseries/models/abstract/__init__.py,sha256=wvDsQAZIV0N3AwBeMaGItoQ82trEfnT-nol2AAOIxBg,102
20
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=q5yVFyFJPaMVtW48tr2Pw-hgedM5upvc-93qjN4Li68,23435
20
+ autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=MvLF529b3fo0icgle-qmS0oce-ftiiQ1jPBLnY-39fk,23435
21
21
  autogluon/timeseries/models/abstract/model_trial.py,sha256=ENPg_7nsdxIvaNM0o0UShZ3x8jFlRmwRc5m0fGPC0TM,3720
22
22
  autogluon/timeseries/models/autogluon_tabular/__init__.py,sha256=r9i6jWcyeLHYClkcMSKRVsfrkBUMxpDrTATNTBc_qgQ,136
23
23
  autogluon/timeseries/models/autogluon_tabular/mlforecast.py,sha256=lnHzCoMF6x9jZOzRM4zSlcXmx0XmtRlsPoiE-LWmqQ0,31299
@@ -30,9 +30,9 @@ autogluon/timeseries/models/ensemble/__init__.py,sha256=kFr11Gmt7lQJu9Rr8HuIPphQ
30
30
  autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py,sha256=tifETwmiEGt-YtQ9eNK7ojJ3fBvtFMUJvisbfkIJ7gw,3393
31
31
  autogluon/timeseries/models/ensemble/greedy_ensemble.py,sha256=5HvZuW5osgsZg3V69k82nKEOy_YgeH1JTfQa7F3cU7s,7220
32
32
  autogluon/timeseries/models/gluonts/__init__.py,sha256=M8PV9ZE4WpteScMobXM6RH1Udb1AZiHHtj2g5GQL3TU,329
33
- autogluon/timeseries/models/gluonts/abstract_gluonts.py,sha256=1MUbeFqRZbfPwAp6ClXmduxXgRV-5H0m1h23OeyPMp0,34031
33
+ autogluon/timeseries/models/gluonts/abstract_gluonts.py,sha256=iB3-VNZeg2Wf7rW1WeKDAOJyh2ZGuq2BU309OirBFqc,33055
34
34
  autogluon/timeseries/models/gluonts/torch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
- autogluon/timeseries/models/gluonts/torch/models.py,sha256=PVDns7CnZtJTbPiCw-FJxahKrDjC-wj0VkwIGsodYY0,19930
35
+ autogluon/timeseries/models/gluonts/torch/models.py,sha256=vzR-FRHSQDC76z2cK37eVDzs7tCXvGpZtrvtZptyP5Y,19930
36
36
  autogluon/timeseries/models/local/__init__.py,sha256=JyckWWgMG1BTIWJqFTW6e1O-eb0LPPOwtXwmb1ErohQ,756
37
37
  autogluon/timeseries/models/local/abstract_local_model.py,sha256=5wvwt7d99kw-PTDnuT45uoCeXk6POjUArCAwUj8mSok,11836
38
38
  autogluon/timeseries/models/local/naive.py,sha256=iwRcFMFmJKPWPbD9TWaIUS51oav69F_VAp6-jb_5SUE,7249
@@ -44,19 +44,19 @@ autogluon/timeseries/trainer/__init__.py,sha256=lxiOT-Gc6BEnr_yWQqra85kEngeM_wtH
44
44
  autogluon/timeseries/trainer/abstract_trainer.py,sha256=2nPLskmbOGRzkj6ttX0tHVkj9h2Y72MHaZy7L78MBZQ,59100
45
45
  autogluon/timeseries/trainer/auto_trainer.py,sha256=psJFZBwWWPlLjNwAgvO4OUJXsRW1sTN2YS9a4pdoeoE,3344
46
46
  autogluon/timeseries/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
- autogluon/timeseries/utils/features.py,sha256=OvBxLIWKR7fPOIlifonVKXUdaWazH_WbdLssJtFCpGs,19261
47
+ autogluon/timeseries/utils/features.py,sha256=kG1xin3VCjtLwfaDkxlGKaesS7Ah-hIvsUNdwToLxYY,19492
48
48
  autogluon/timeseries/utils/forecast.py,sha256=p0WKM9Q0nLAwwmCgYZI1zi9mCOWXWJfllEt2lPRQl4M,1882
49
49
  autogluon/timeseries/utils/warning_filters.py,sha256=ngjmfv21zIwTG-7VNZT-NkaSR7ssnoNtUwcXCXANZ4A,2076
50
50
  autogluon/timeseries/utils/datetime/__init__.py,sha256=bTMR8jLh1LW55vHjbOr1zvWRMF_PqbvxpS-cUcNIDWI,173
51
- autogluon/timeseries/utils/datetime/base.py,sha256=MsqIHY14m3QMjSwwtE7Uo1oNwepWUby_nxlWm4DlqKU,848
52
- autogluon/timeseries/utils/datetime/lags.py,sha256=kcU4liKbHj7KP2ajNU-KLZ8OYSU35EgT4kJjZNSw0Zg,5875
53
- autogluon/timeseries/utils/datetime/seasonality.py,sha256=kgK_ukw2wCviEB7CZXRVC5HZpBJZu9IsRrvCJ9E_rOE,755
54
- autogluon/timeseries/utils/datetime/time_features.py,sha256=pROkYyxETQ8rHKfPGhf2paB73C7rWJ2Ui0cCswLqbBg,2562
55
- autogluon.timeseries-1.1.0b20240411.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
56
- autogluon.timeseries-1.1.0b20240411.dist-info/METADATA,sha256=BHzU1LhBz9T7B4z230k-Fy7c7MkjQ9UEcgcUezVbTCI,12528
57
- autogluon.timeseries-1.1.0b20240411.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
58
- autogluon.timeseries-1.1.0b20240411.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
59
- autogluon.timeseries-1.1.0b20240411.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
60
- autogluon.timeseries-1.1.0b20240411.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
61
- autogluon.timeseries-1.1.0b20240411.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
62
- autogluon.timeseries-1.1.0b20240411.dist-info/RECORD,,
51
+ autogluon/timeseries/utils/datetime/base.py,sha256=3NdsH3NDq4cVAOSoy3XpaNixyNlbjy4DJ_YYOGuu9x4,1341
52
+ autogluon/timeseries/utils/datetime/lags.py,sha256=GoLtvcZ8oKb3QkoBJ9E59LSPLOP7Qjxrr2UmMSZgjyw,5909
53
+ autogluon/timeseries/utils/datetime/seasonality.py,sha256=h_4w00iEytAz_N_EpCENQ8RCXy7KQITczrYjBgVqWkQ,764
54
+ autogluon/timeseries/utils/datetime/time_features.py,sha256=PAXbYbQ0z_5GFbkxSNi41zLY_2-U3x0Ynm1m_WhdtGc,2572
55
+ autogluon.timeseries-1.1.0b20240412.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
56
+ autogluon.timeseries-1.1.0b20240412.dist-info/METADATA,sha256=KsqIZCL5sRlmJ90KadBcOSXKeL2XazvcaT1LLw1FF1Y,12530
57
+ autogluon.timeseries-1.1.0b20240412.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
58
+ autogluon.timeseries-1.1.0b20240412.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
59
+ autogluon.timeseries-1.1.0b20240412.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
60
+ autogluon.timeseries-1.1.0b20240412.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
61
+ autogluon.timeseries-1.1.0b20240412.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
62
+ autogluon.timeseries-1.1.0b20240412.dist-info/RECORD,,