autogluon.timeseries 0.8.3b20231101__tar.gz → 0.8.3b20231103__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of autogluon.timeseries might be problematic. Click here for more details.

Files changed (59) hide show
  1. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/PKG-INFO +1 -1
  2. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/learner.py +10 -0
  3. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/metrics/__init__.py +8 -3
  4. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py +3 -0
  5. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/local/abstract_local_model.py +4 -1
  6. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/local/statsforecast.py +4 -3
  7. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/predictor.py +15 -23
  8. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/trainer/abstract_trainer.py +20 -1
  9. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/utils/features.py +3 -3
  10. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/version.py +1 -1
  11. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon.timeseries.egg-info/PKG-INFO +1 -1
  12. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon.timeseries.egg-info/requires.txt +3 -3
  13. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/setup.cfg +0 -0
  14. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/setup.py +0 -0
  15. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/__init__.py +0 -0
  16. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/configs/__init__.py +0 -0
  17. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/configs/presets_configs.py +0 -0
  18. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/dataset/__init__.py +0 -0
  19. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/dataset/ts_dataframe.py +0 -0
  20. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/evaluator.py +0 -0
  21. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/metrics/abstract.py +0 -0
  22. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/metrics/point.py +0 -0
  23. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/metrics/quantile.py +0 -0
  24. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/metrics/utils.py +0 -0
  25. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/__init__.py +0 -0
  26. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/abstract/__init__.py +0 -0
  27. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py +0 -0
  28. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/abstract/model_trial.py +0 -0
  29. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/autogluon_tabular/__init__.py +0 -0
  30. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py +0 -0
  31. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/autogluon_tabular/utils.py +0 -0
  32. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/ensemble/__init__.py +0 -0
  33. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py +0 -0
  34. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/ensemble/greedy_ensemble.py +0 -0
  35. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/gluonts/__init__.py +0 -0
  36. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/gluonts/torch/__init__.py +0 -0
  37. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/gluonts/torch/models.py +0 -0
  38. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/local/__init__.py +0 -0
  39. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/local/naive.py +0 -0
  40. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/local/npts.py +0 -0
  41. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/multi_window/__init__.py +0 -0
  42. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/multi_window/multi_window_model.py +0 -0
  43. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/models/presets.py +0 -0
  44. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/splitter.py +0 -0
  45. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/trainer/__init__.py +0 -0
  46. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/trainer/auto_trainer.py +0 -0
  47. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/utils/__init__.py +0 -0
  48. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/utils/datetime/__init__.py +0 -0
  49. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/utils/datetime/base.py +0 -0
  50. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/utils/datetime/lags.py +0 -0
  51. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/utils/datetime/seasonality.py +0 -0
  52. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/utils/datetime/time_features.py +0 -0
  53. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/utils/forecast.py +0 -0
  54. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon/timeseries/utils/warning_filters.py +0 -0
  55. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon.timeseries.egg-info/SOURCES.txt +0 -0
  56. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon.timeseries.egg-info/dependency_links.txt +0 -0
  57. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon.timeseries.egg-info/namespace_packages.txt +0 -0
  58. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon.timeseries.egg-info/top_level.txt +0 -0
  59. {autogluon.timeseries-0.8.3b20231101 → autogluon.timeseries-0.8.3b20231103}/src/autogluon.timeseries.egg-info/zip-safe +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.timeseries
3
- Version: 0.8.3b20231101
3
+ Version: 0.8.3b20231103
4
4
  Summary: AutoML for Image, Text, and Tabular Data
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -191,6 +191,16 @@ class TimeSeriesLearner(AbstractLearner):
191
191
  data = self.feature_generator.transform(data)
192
192
  return self.load_trainer().score(data=data, model=model, metric=metric, use_cache=use_cache)
193
193
 
194
+ def evaluate(
195
+ self,
196
+ data: Union[TimeSeriesDataFrame, pd.DataFrame, str],
197
+ model: Optional[str] = None,
198
+ metrics: Optional[Union[str, TimeSeriesScorer, List[Union[str, TimeSeriesScorer]]]] = None,
199
+ use_cache: bool = True,
200
+ ) -> Dict[str, float]:
201
+ data = self.feature_generator.transform(data)
202
+ return self.load_trainer().evaluate(data=data, model=model, metrics=metrics, use_cache=use_cache)
203
+
194
204
  def leaderboard(self, data: Optional[TimeSeriesDataFrame] = None, use_cache: bool = True) -> pd.DataFrame:
195
205
  if data is not None:
196
206
  data = self.feature_generator.transform(data)
@@ -1,4 +1,4 @@
1
- import json
1
+ from pprint import pformat
2
2
  from typing import Type, Union
3
3
 
4
4
  from .abstract import TimeSeriesScorer
@@ -29,11 +29,15 @@ AVAILABLE_METRICS = {
29
29
  "WAPE": WAPE,
30
30
  "SQL": SQL,
31
31
  "WQL": WQL,
32
- # Exist for compatibility
33
32
  "MSE": MSE,
34
33
  "MAE": MAE,
35
34
  }
36
35
 
36
+ # For backward compatibility
37
+ DEPRECATED_METRICS = {
38
+ "mean_wQuantileLoss": "WQL",
39
+ }
40
+
37
41
 
38
42
  def check_get_evaluation_metric(
39
43
  eval_metric: Union[str, TimeSeriesScorer, Type[TimeSeriesScorer], None] = None
@@ -44,10 +48,11 @@ def check_get_evaluation_metric(
44
48
  # e.g., user passed `eval_metric=CustomMetric` instead of `eval_metric=CustomMetric()`
45
49
  eval_metric = eval_metric()
46
50
  elif isinstance(eval_metric, str):
51
+ eval_metric = DEPRECATED_METRICS.get(eval_metric, eval_metric)
47
52
  if eval_metric.upper() not in AVAILABLE_METRICS:
48
53
  raise ValueError(
49
54
  f"Time series metric {eval_metric} not supported. Available metrics are:\n"
50
- f"{json.dumps(list(AVAILABLE_METRICS.keys()), indent=2)}"
55
+ f"{pformat(sorted(AVAILABLE_METRICS.keys()))}"
51
56
  )
52
57
  eval_metric = AVAILABLE_METRICS[eval_metric.upper()]()
53
58
  elif eval_metric is None:
@@ -305,6 +305,7 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
305
305
  self, time_series_df: Optional[TimeSeriesDataFrame], known_covariates: Optional[TimeSeriesDataFrame] = None
306
306
  ) -> Optional[GluonTSDataset]:
307
307
  if time_series_df is not None:
308
+ # TODO: Preprocess real-valued features with StdScaler?
308
309
  if self.num_feat_static_cat > 0:
309
310
  feat_static_cat = time_series_df.static_features[self.metadata.static_features_cat]
310
311
  else:
@@ -312,6 +313,8 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel):
312
313
 
313
314
  if self.num_feat_static_real > 0:
314
315
  feat_static_real = time_series_df.static_features[self.metadata.static_features_real]
316
+ if feat_static_real.isna().values.any():
317
+ feat_static_real = feat_static_real.fillna(feat_static_real.mean())
315
318
  else:
316
319
  feat_static_real = None
317
320
 
@@ -168,6 +168,8 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
168
168
  time_series=time_series,
169
169
  local_model_args=self._local_model_args.copy(),
170
170
  )
171
+ if not np.isfinite(result.values).all():
172
+ raise RuntimeError("Forecast contains NaN or Inf values.")
171
173
  model_failed = False
172
174
  except Exception:
173
175
  if self.use_fallback_model:
@@ -195,7 +197,8 @@ def seasonal_naive_forecast(
195
197
  ) -> pd.DataFrame:
196
198
  """Generate seasonal naive forecast, predicting the last observed value from the same period."""
197
199
  forecast = {}
198
- if len(target) > seasonal_period and seasonal_period > 1:
200
+ # At least seasonal_period + 2 values are required to compute sigma for seasonal naive
201
+ if len(target) > seasonal_period + 1 and seasonal_period > 1:
199
202
  indices = [len(target) - seasonal_period + k % seasonal_period for k in range(prediction_length)]
200
203
  forecast["mean"] = target[indices]
201
204
  residuals = target[seasonal_period:] - target[:-seasonal_period]
@@ -255,9 +255,10 @@ class AutoETSModel(AbstractStatsForecastModel):
255
255
  time_series: pd.Series,
256
256
  local_model_args: dict,
257
257
  ) -> pd.DataFrame:
258
- # Disable seasonality if time series too short for chosen season_length or season_length == 1,
259
- # otherwise model will crash
260
- if len(time_series) < 2 * local_model_args["season_length"] or local_model_args["season_length"] == 1:
258
+ # Disable seasonality if time series too short for chosen season_length, season_length is too high, or
259
+ # season_length == 1. Otherwise model will crash
260
+ season_length = local_model_args["season_length"]
261
+ if len(time_series) < 2 * season_length or season_length == 1 or season_length > 24:
261
262
  # changing last character to "N" disables seasonality, e.g., model="AAA" -> model="AAN"
262
263
  local_model_args["model"] = local_model_args["model"][:-1] + "N"
263
264
  return super()._predict_with_local_model(time_series=time_series, local_model_args=local_model_args)
@@ -16,7 +16,7 @@ from autogluon.timeseries import __version__ as current_ag_version
16
16
  from autogluon.timeseries.configs import TIMESERIES_PRESETS_CONFIGS
17
17
  from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
18
18
  from autogluon.timeseries.learner import AbstractLearner, TimeSeriesLearner
19
- from autogluon.timeseries.metrics import TimeSeriesScorer
19
+ from autogluon.timeseries.metrics import TimeSeriesScorer, check_get_evaluation_metric
20
20
  from autogluon.timeseries.splitter import ExpandingWindowSplitter
21
21
  from autogluon.timeseries.trainer import AbstractTimeSeriesTrainer
22
22
 
@@ -168,15 +168,7 @@ class TimeSeriesPredictor:
168
168
  if std_freq != str(self.freq):
169
169
  logger.info(f"Frequency '{self.freq}' stored as '{std_freq}'")
170
170
  self.freq = std_freq
171
- # TODO: Change to DeprecationWarning, make sure it's displayed correctly https://github.com/autogluon/autogluon/issues/3465
172
- if isinstance(eval_metric, str) and eval_metric == "mean_wQuantileLoss":
173
- # We don't use warnings.warn since DeprecationWarning may be silenced by the Python warning filters
174
- logger.warning(
175
- "DeprecationWarning: Evaluation metric 'mean_wQuantileLoss' has been renamed to 'WQL'. "
176
- "Support for the old name will be removed in v1.1.",
177
- )
178
- eval_metric = "WQL"
179
- self.eval_metric = eval_metric
171
+ self.eval_metric = check_get_evaluation_metric(eval_metric)
180
172
  self.eval_metric_seasonal_period = eval_metric_seasonal_period
181
173
  if quantile_levels is None:
182
174
  quantile_levels = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
@@ -758,11 +750,13 @@ class TimeSeriesPredictor:
758
750
  self,
759
751
  data: Union[TimeSeriesDataFrame, pd.DataFrame, str],
760
752
  model: Optional[str] = None,
761
- metric: Union[str, TimeSeriesScorer, None] = None,
753
+ metrics: Optional[Union[str, TimeSeriesScorer, List[Union[str, TimeSeriesScorer]]]] = None,
762
754
  use_cache: bool = True,
763
- ):
764
- """Evaluate the performance for given dataset, computing the score determined by ``self.eval_metric``
765
- on the given data set, and with the same ``prediction_length`` used when training models.
755
+ ) -> Dict[str, float]:
756
+ """Evaluate the forecast accuracy for given dataset.
757
+
758
+ This method measures the forecast accuracy using the last ``self.prediction_length`` time steps of each time
759
+ series in ``data`` as a hold-out set.
766
760
 
767
761
  Parameters
768
762
  ----------
@@ -781,25 +775,23 @@ class TimeSeriesPredictor:
781
775
  model : str, optional
782
776
  Name of the model that you would like to evaluate. By default, the best model during training
783
777
  (with highest validation score) will be used.
784
- metric : str or TimeSeriesScorer, optional
785
- Evaluation metric to compute scores with. Defaults to ``self.eval_metric``
778
+ metrics : str, TimeSeriesScorer or List[Union[str, TimeSeriesScorer]], optional
779
+ Metric or a list of metrics to compute scores with. Defaults to ``self.eval_metric``. Supports both
780
+ metric names as strings and custom metrics based on TimeSeriesScorer.
786
781
  use_cache : bool, default = True
787
782
  If True, will attempt to use the cached predictions. If False, cached predictions will be ignored.
788
783
  This argument is ignored if ``cache_predictions`` was set to False when creating the ``TimeSeriesPredictor``.
789
784
 
790
785
  Returns
791
786
  -------
792
- score : float
793
- A forecast accuracy score, where higher values indicate better quality. For consistency, error metrics
787
+ scores_dict : Dict[str, float]
788
+ Dictionary where keys = metrics, values = performance along each metric. For consistency, error metrics
794
789
  will have their signs flipped to obey this convention. For example, negative MAPE values will be reported.
790
+ To get the ``eval_metric`` score, do ``output[predictor.eval_metric.name]``.
795
791
  """
796
792
  data = self._check_and_prepare_data_frame(data)
797
793
  self._check_data_for_evaluation(data)
798
- return self._learner.score(data, model=model, metric=metric, use_cache=use_cache)
799
-
800
- def score(self, data: Union[TimeSeriesDataFrame, pd.DataFrame, str], **kwargs):
801
- """See, :meth:`~autogluon.timeseries.TimeSeriesPredictor.evaluate`."""
802
- return self.evaluate(data, **kwargs)
794
+ return self._learner.evaluate(data, model=model, metrics=metrics, use_cache=use_cache)
803
795
 
804
796
  @classmethod
805
797
  def _load_version_file(cls, path: str) -> str:
@@ -836,11 +836,30 @@ class AbstractTimeSeriesTrainer(SimpleAbstractTrainer):
836
836
  metric: Union[str, TimeSeriesScorer, None] = None,
837
837
  use_cache: bool = True,
838
838
  ) -> float:
839
+ eval_metric = self.eval_metric if metric is None else check_get_evaluation_metric(metric)
840
+ scores_dict = self.evaluate(data=data, model=model, metrics=[eval_metric], use_cache=use_cache)
841
+ return scores_dict[eval_metric.name]
842
+
843
+ def evaluate(
844
+ self,
845
+ data: TimeSeriesDataFrame,
846
+ model: Optional[Union[str, AbstractTimeSeriesModel]] = None,
847
+ metrics: Optional[Union[str, TimeSeriesScorer, List[Union[str, TimeSeriesScorer]]]] = None,
848
+ use_cache: bool = True,
849
+ ) -> Dict[str, float]:
839
850
  past_data, known_covariates = data.get_model_inputs_for_scoring(
840
851
  prediction_length=self.prediction_length, known_covariates_names=self.metadata.known_covariates_real
841
852
  )
842
853
  predictions = self.predict(data=past_data, known_covariates=known_covariates, model=model, use_cache=use_cache)
843
- return self._score_with_predictions(data=data, predictions=predictions, metric=metric)
854
+ if not isinstance(metrics, list): # a single metric is provided
855
+ metrics = [metrics]
856
+ scores_dict = {}
857
+ for metric in metrics:
858
+ eval_metric = self.eval_metric if metric is None else check_get_evaluation_metric(metric)
859
+ scores_dict[eval_metric.name] = self._score_with_predictions(
860
+ data=data, predictions=predictions, metric=eval_metric
861
+ )
862
+ return scores_dict
844
863
 
845
864
  def _predict_model(
846
865
  self,
@@ -98,9 +98,9 @@ class TimeSeriesFeatureGenerator:
98
98
 
99
99
  unused = []
100
100
  for col_name in data.static_features.columns:
101
- if static[col_name].dtype == "category":
101
+ if col_name in static.columns and static[col_name].dtype == "category":
102
102
  static_features_cat.append(col_name)
103
- elif static[col_name].dtype == np.float64:
103
+ elif col_name in static.columns and static[col_name].dtype == np.float64:
104
104
  static_features_real.append(col_name)
105
105
  else:
106
106
  unused.append(col_name)
@@ -109,7 +109,7 @@ class TimeSeriesFeatureGenerator:
109
109
  logger.info(f"\tcategorical: {static_features_cat}")
110
110
  logger.info(f"\tcontinuous (float): {static_features_real}")
111
111
  if len(unused) > 0:
112
- logger.info(f"\tremoved (neither categorical nor continuous): {unused}")
112
+ logger.info(f"\tremoved (uninformative columns): {unused}")
113
113
  logger.info(
114
114
  "To learn how to fix incorrectly inferred types, please see documentation for TimeSeriesPredictor.fit "
115
115
  )
@@ -1,3 +1,3 @@
1
1
  """This is the autogluon version file."""
2
- __version__ = '0.8.3b20231101'
2
+ __version__ = '0.8.3b20231103'
3
3
  __lite__ = False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.timeseries
3
- Version: 0.8.3b20231101
3
+ Version: 0.8.3b20231103
4
4
  Summary: AutoML for Image, Text, and Tabular Data
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -13,9 +13,9 @@ mlforecast<0.10.1,>=0.10.0
13
13
  utilsforecast<0.0.11,>=0.0.10
14
14
  tqdm<5,>=4.38
15
15
  ujson<6,>=5
16
- autogluon.core[raytune]==0.8.3b20231101
17
- autogluon.common==0.8.3b20231101
18
- autogluon.tabular[catboost,lightgbm,xgboost]==0.8.3b20231101
16
+ autogluon.core[raytune]==0.8.3b20231103
17
+ autogluon.common==0.8.3b20231103
18
+ autogluon.tabular[catboost,lightgbm,xgboost]==0.8.3b20231103
19
19
 
20
20
  [all]
21
21