autogluon.timeseries 1.0.1b20240408__tar.gz → 1.1.0b20240410__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of autogluon.timeseries might be problematic. Click here for more details.

Files changed (63) hide show
  1. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/PKG-INFO +3 -3
  2. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/setup.py +5 -6
  3. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py +2 -1
  4. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py +25 -1
  5. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/chronos/model.py +26 -43
  6. autogluon.timeseries-1.1.0b20240410/src/autogluon/timeseries/models/chronos/utils.py +66 -0
  7. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/local/abstract_local_model.py +13 -8
  8. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/multi_window/multi_window_model.py +1 -0
  9. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/version.py +1 -1
  10. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon.timeseries.egg-info/PKG-INFO +3 -3
  11. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon.timeseries.egg-info/SOURCES.txt +1 -0
  12. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon.timeseries.egg-info/requires.txt +7 -9
  13. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/setup.cfg +0 -0
  14. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/__init__.py +0 -0
  15. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/configs/__init__.py +0 -0
  16. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/configs/presets_configs.py +0 -0
  17. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/dataset/__init__.py +0 -0
  18. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/dataset/ts_dataframe.py +0 -0
  19. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/evaluator.py +0 -0
  20. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/learner.py +0 -0
  21. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/metrics/__init__.py +0 -0
  22. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/metrics/abstract.py +0 -0
  23. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/metrics/point.py +0 -0
  24. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/metrics/quantile.py +0 -0
  25. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/metrics/utils.py +0 -0
  26. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/__init__.py +0 -0
  27. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/abstract/__init__.py +0 -0
  28. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/abstract/model_trial.py +0 -0
  29. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/autogluon_tabular/__init__.py +0 -0
  30. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/autogluon_tabular/utils.py +0 -0
  31. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/chronos/__init__.py +0 -0
  32. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/chronos/pipeline.py +0 -0
  33. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/ensemble/__init__.py +0 -0
  34. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py +0 -0
  35. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/ensemble/greedy_ensemble.py +0 -0
  36. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/gluonts/__init__.py +0 -0
  37. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py +0 -0
  38. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/gluonts/torch/__init__.py +0 -0
  39. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/gluonts/torch/models.py +0 -0
  40. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/local/__init__.py +0 -0
  41. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/local/naive.py +0 -0
  42. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/local/npts.py +0 -0
  43. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/local/statsforecast.py +0 -0
  44. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/multi_window/__init__.py +0 -0
  45. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/models/presets.py +0 -0
  46. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/predictor.py +0 -0
  47. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/splitter.py +0 -0
  48. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/trainer/__init__.py +0 -0
  49. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/trainer/abstract_trainer.py +0 -0
  50. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/trainer/auto_trainer.py +0 -0
  51. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/utils/__init__.py +0 -0
  52. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/utils/datetime/__init__.py +0 -0
  53. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/utils/datetime/base.py +0 -0
  54. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/utils/datetime/lags.py +0 -0
  55. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/utils/datetime/seasonality.py +0 -0
  56. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/utils/datetime/time_features.py +0 -0
  57. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/utils/features.py +0 -0
  58. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/utils/forecast.py +0 -0
  59. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon/timeseries/utils/warning_filters.py +0 -0
  60. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon.timeseries.egg-info/dependency_links.txt +0 -0
  61. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon.timeseries.egg-info/namespace_packages.txt +0 -0
  62. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon.timeseries.egg-info/top_level.txt +0 -0
  63. {autogluon.timeseries-1.0.1b20240408 → autogluon.timeseries-1.1.0b20240410}/src/autogluon.timeseries.egg-info/zip-safe +0 -0
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.timeseries
3
- Version: 1.0.1b20240408
4
- Summary: AutoML for Image, Text, and Tabular Data
3
+ Version: 1.1.0b20240410
4
+ Summary: Fast and Accurate ML in 3 Lines of Code
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
7
7
  License: Apache-2.0
@@ -14,7 +14,7 @@ Description:
14
14
  <div align="center">
15
15
  <img src="https://user-images.githubusercontent.com/16392542/77208906-224aa500-6aba-11ea-96bd-e81806074030.png" width="350">
16
16
 
17
- ## AutoML for Image, Text, Time Series, and Tabular Data
17
+ ## Fast and Accurate ML in 3 Lines of Code
18
18
 
19
19
  [![Latest Release](https://img.shields.io/github/v/release/autogluon/autogluon)](https://github.com/autogluon/autogluon/releases)
20
20
  [![Conda Forge](https://img.shields.io/conda/vn/conda-forge/autogluon.svg)](https://anaconda.org/conda-forge/autogluon)
@@ -56,17 +56,16 @@ extras_require = {
56
56
  "black~=23.0",
57
57
  ],
58
58
  "chronos-openvino": [ # for faster CPU inference in pretrained models with OpenVINO
59
- "optimum-intel[openvino,nncf]>=1.16,<1.17",
60
- "optimum[openvino,nncf]>=1.18,<1.19",
59
+ "optimum-intel[openvino,nncf]>=1.15,<1.17",
60
+ "optimum[openvino,nncf]>=1.17,<1.19",
61
61
  ],
62
62
  "chronos-onnx": [ # for faster CPU inference in pretrained models with ONNX
63
- "optimum[onnxruntime]>=1.18,<1.19",
63
+ "optimum[onnxruntime]>=1.17,<1.19",
64
64
  ],
65
65
  }
66
66
 
67
- extras_require["all"] = list(
68
- set.union(*(set(extras_require[extra]) for extra in ["chronos-onnx", "chronos-openvino"]))
69
- )
67
+ # TODO: add openvino back to "all" after dependency versions are relaxed
68
+ extras_require["all"] = list(set.union(*(set(extras_require[extra]) for extra in ["chronos-onnx"])))
70
69
 
71
70
  install_requires = ag.get_dependency_version_ranges(install_requires)
72
71
 
@@ -373,13 +373,14 @@ class AbstractTimeSeriesModel(AbstractModel):
373
373
  val_data: TimeSeriesDataFrame,
374
374
  store_val_score: bool = False,
375
375
  store_predict_time: bool = False,
376
+ **predict_kwargs,
376
377
  ) -> None:
377
378
  """Compute val_score, predict_time and cache out-of-fold (OOF) predictions."""
378
379
  past_data, known_covariates = val_data.get_model_inputs_for_scoring(
379
380
  prediction_length=self.prediction_length, known_covariates_names=self.metadata.known_covariates
380
381
  )
381
382
  predict_start_time = time.time()
382
- oof_predictions = self.predict(past_data, known_covariates=known_covariates)
383
+ oof_predictions = self.predict(past_data, known_covariates=known_covariates, **predict_kwargs)
383
384
  self._oof_predictions = [oof_predictions]
384
385
  if store_predict_time:
385
386
  self.predict_time = time.time() - predict_start_time
@@ -174,6 +174,21 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
174
174
  """
175
175
  return df
176
176
 
177
+ def _add_scale_as_static_feature(self, data: TimeSeriesDataFrame) -> TimeSeriesDataFrame:
178
+ """Add mean/std of the target column for each series as a static feature."""
179
+ data = data.copy(deep=False)
180
+ scale_features = (
181
+ data[self.target]
182
+ .groupby(ITEMID, sort=False)
183
+ .agg(["mean", "std"])
184
+ .rename(columns={"mean": "__target_mean", "std": "__target_scale"})
185
+ )
186
+ if data.static_features is None:
187
+ data.static_features = scale_features
188
+ else:
189
+ data.static_features = pd.concat([data.static_features, scale_features], axis=1)
190
+ return data
191
+
177
192
  @staticmethod
178
193
  def _shorten_all_series(mlforecast_df: pd.DataFrame, max_length: int):
179
194
  logger.debug(f"Shortening all series to at most {max_length}")
@@ -252,6 +267,11 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
252
267
  if static_features is not None:
253
268
  df = pd.merge(df, static_features, how="left", on=ITEMID, suffixes=(None, "_static_feat"))
254
269
 
270
+ for col in self.metadata.known_covariates_real:
271
+ # Normalize non-boolean features using mean_abs scaling
272
+ if not df[col].isin([0, 1]).all():
273
+ df[f"__scaled_{col}"] = df[col] / df[col].abs().groupby(df[ITEMID]).mean().reindex(df[ITEMID]).values
274
+
255
275
  # We assume that df is sorted by 'unique_id' inside `TimeSeriesPredictor._check_and_prepare_data_frame`
256
276
  return df.rename(columns=column_name_mapping)
257
277
 
@@ -265,6 +285,8 @@ class AbstractMLForecastModel(AbstractTimeSeriesModel):
265
285
  ) -> None:
266
286
  from mlforecast import MLForecast
267
287
 
288
+ train_data = self._add_scale_as_static_feature(train_data)
289
+
268
290
  self._check_fit_params()
269
291
  fit_start_time = time.time()
270
292
  self._train_target_median = train_data[self.target].median()
@@ -463,6 +485,7 @@ class DirectTabularModel(AbstractMLForecastModel):
463
485
  known_covariates: Optional[TimeSeriesDataFrame] = None,
464
486
  **kwargs,
465
487
  ) -> TimeSeriesDataFrame:
488
+ data = self._add_scale_as_static_feature(data)
466
489
  original_item_id_order = data.item_ids
467
490
  data, known_covariates, forecast_for_short_series = self._remove_short_ts_and_generate_fallback_forecast(
468
491
  data=data, known_covariates=known_covariates
@@ -479,7 +502,7 @@ class DirectTabularModel(AbstractMLForecastModel):
479
502
  # MLForecast raises exception of target contains NaN. We use inf as placeholder, replace them by NaN afterwards
480
503
  data_future[self.target] = float("inf")
481
504
  data_extended = pd.concat([data, data_future])
482
- mlforecast_df = self._to_mlforecast_df(data_extended, data.static_features)
505
+ mlforecast_df = self._to_mlforecast_df(data_extended, data_extended.static_features)
483
506
  if self._max_ts_length is not None:
484
507
  # We appended `prediction_length` time steps to each series, so increase length
485
508
  mlforecast_df = self._shorten_all_series(mlforecast_df, self._max_ts_length + self.prediction_length)
@@ -594,6 +617,7 @@ class RecursiveTabularModel(AbstractMLForecastModel):
594
617
  known_covariates: Optional[TimeSeriesDataFrame] = None,
595
618
  **kwargs,
596
619
  ) -> TimeSeriesDataFrame:
620
+ data = self._add_scale_as_static_feature(data)
597
621
  original_item_id_order = data.item_ids
598
622
  data, known_covariates, forecast_for_short_series = self._remove_short_ts_and_generate_fallback_forecast(
599
623
  data=data, known_covariates=known_covariates
@@ -53,42 +53,6 @@ MODEL_ALIASES = {
53
53
  }
54
54
 
55
55
 
56
- class ChronosInferenceDataset:
57
- """A container for time series datasets that implements the ``torch.utils.data.Dataset`` interface"""
58
-
59
- def __init__(
60
- self,
61
- target_df: TimeSeriesDataFrame,
62
- context_length: int,
63
- target_column: str = "target",
64
- ):
65
- assert context_length > 0
66
- self.context_length = context_length
67
- self.target_array = target_df[target_column].to_numpy(dtype=np.float32)
68
- self.freq = target_df.freq
69
-
70
- # store pointer to start:end of each time series
71
- cum_sizes = target_df.num_timesteps_per_item().values.cumsum()
72
- self.indptr = np.append(0, cum_sizes).astype(np.int32)
73
-
74
- def __len__(self):
75
- return len(self.indptr) - 1 # noqa
76
-
77
- def _get_context(self, a: np.ndarray, pad_value=np.nan):
78
- a = a[-self.context_length :]
79
- pad_size = self.context_length - len(a)
80
- if pad_size > 0:
81
- pad = np.full(shape=(pad_size,), fill_value=pad_value)
82
- a = np.concatenate((pad, a))
83
- return a
84
-
85
- def __getitem__(self, idx) -> np.ndarray:
86
- start_idx = self.indptr[idx]
87
- end_idx = self.indptr[idx + 1]
88
-
89
- return self._get_context(self.target_array[start_idx:end_idx])
90
-
91
-
92
56
  class ChronosModel(AbstractTimeSeriesModel):
93
57
  """Chronos pretrained time series forecasting models, based on the original
94
58
  `ChronosModel <https://github.com/amazon-science/chronos-forecasting>`_ implementation.
@@ -196,6 +160,7 @@ class ChronosModel(AbstractTimeSeriesModel):
196
160
  )
197
161
 
198
162
  self.model_pipeline: Optional[Any] = None # of type OptimizedChronosPipeline
163
+ self.time_limit: Optional[float] = None
199
164
 
200
165
  def save(self, path: str = None, verbose: bool = True) -> str:
201
166
  pipeline = self.model_pipeline
@@ -288,14 +253,16 @@ class ChronosModel(AbstractTimeSeriesModel):
288
253
  **kwargs,
289
254
  ) -> None:
290
255
  self._check_fit_params()
256
+ self.time_limit = time_limit
291
257
 
292
258
  def _get_inference_data_loader(
293
259
  self,
294
260
  data: TimeSeriesDataFrame,
295
261
  context_length: int,
296
262
  num_workers: int = 0,
263
+ time_limit: Optional[float] = None,
297
264
  ):
298
- import torch
265
+ from .utils import ChronosInferenceDataLoader, ChronosInferenceDataset, timeout_callback
299
266
 
300
267
  chronos_dataset = ChronosInferenceDataset(
301
268
  target_df=data,
@@ -303,11 +270,12 @@ class ChronosModel(AbstractTimeSeriesModel):
303
270
  context_length=context_length,
304
271
  )
305
272
 
306
- return torch.utils.data.DataLoader(
273
+ return ChronosInferenceDataLoader(
307
274
  chronos_dataset,
308
275
  batch_size=self.batch_size,
309
276
  shuffle=False,
310
277
  num_workers=num_workers,
278
+ on_batch=timeout_callback(seconds=time_limit),
311
279
  )
312
280
 
313
281
  def _predict(
@@ -333,6 +301,12 @@ class ChronosModel(AbstractTimeSeriesModel):
333
301
  # load model pipeline to device memory
334
302
  self.load_model_pipeline(context_length=context_length)
335
303
 
304
+ inference_data_loader = self._get_inference_data_loader(
305
+ data=data,
306
+ num_workers=self.data_loader_num_workers,
307
+ context_length=context_length,
308
+ time_limit=kwargs.get("time_limit"),
309
+ )
336
310
  self.model_pipeline.model.eval()
337
311
  with torch.inference_mode():
338
312
  prediction_samples = [
@@ -345,11 +319,7 @@ class ChronosModel(AbstractTimeSeriesModel):
345
319
  .detach()
346
320
  .cpu()
347
321
  .numpy()
348
- for batch in self._get_inference_data_loader(
349
- data=data,
350
- num_workers=self.data_loader_num_workers,
351
- context_length=context_length,
352
- )
322
+ for batch in inference_data_loader
353
323
  ]
354
324
 
355
325
  samples = np.concatenate(prediction_samples, axis=0).swapaxes(1, 2).reshape(-1, self.num_samples)
@@ -367,3 +337,16 @@ class ChronosModel(AbstractTimeSeriesModel):
367
337
 
368
338
  def _more_tags(self) -> Dict:
369
339
  return {"allow_nan": True}
340
+
341
+ def score_and_cache_oof(
342
+ self,
343
+ val_data: TimeSeriesDataFrame,
344
+ store_val_score: bool = False,
345
+ store_predict_time: bool = False,
346
+ **predict_kwargs,
347
+ ) -> None:
348
+ # All computation happens during inference, so we provide the time_limit at prediction time
349
+ # TODO: Once custom predict_kwargs is allowed, make sure that `time_limit` is not among the keys
350
+ super().score_and_cache_oof(
351
+ val_data, store_val_score, store_predict_time, time_limit=self.time_limit, **predict_kwargs
352
+ )
@@ -0,0 +1,66 @@
1
+ import time
2
+ from typing import Callable, Optional
3
+
4
+ import numpy as np
5
+ import torch
6
+
7
+ from autogluon.core.utils.exceptions import TimeLimitExceeded
8
+ from autogluon.timeseries.dataset.ts_dataframe import TimeSeriesDataFrame
9
+
10
+
11
+ class ChronosInferenceDataset:
12
+ """A container for time series datasets that implements the ``torch.utils.data.Dataset`` interface"""
13
+
14
+ def __init__(
15
+ self,
16
+ target_df: TimeSeriesDataFrame,
17
+ context_length: int,
18
+ target_column: str = "target",
19
+ ):
20
+ assert context_length > 0
21
+ self.context_length = context_length
22
+ self.target_array = target_df[target_column].to_numpy(dtype=np.float32)
23
+ self.freq = target_df.freq
24
+
25
+ # store pointer to start:end of each time series
26
+ cum_sizes = target_df.num_timesteps_per_item().values.cumsum()
27
+ self.indptr = np.append(0, cum_sizes).astype(np.int32)
28
+
29
+ def __len__(self):
30
+ return len(self.indptr) - 1 # noqa
31
+
32
+ def _get_context(self, a: np.ndarray, pad_value=np.nan):
33
+ a = a[-self.context_length :]
34
+ pad_size = self.context_length - len(a)
35
+ if pad_size > 0:
36
+ pad = np.full(shape=(pad_size,), fill_value=pad_value)
37
+ a = np.concatenate((pad, a))
38
+ return a
39
+
40
+ def __getitem__(self, idx) -> np.ndarray:
41
+ start_idx = self.indptr[idx]
42
+ end_idx = self.indptr[idx + 1]
43
+
44
+ return self._get_context(self.target_array[start_idx:end_idx])
45
+
46
+
47
+ class ChronosInferenceDataLoader(torch.utils.data.DataLoader):
48
+ def __init__(self, *args, **kwargs):
49
+ self.callback: Callable = kwargs.pop("on_batch", lambda: None)
50
+ super().__init__(*args, **kwargs)
51
+
52
+ def __iter__(self):
53
+ for item in super().__iter__():
54
+ yield item
55
+ self.callback()
56
+
57
+
58
+ def timeout_callback(seconds: Optional[float]) -> Callable:
59
+ """Return a callback object that raises an exception if time limit is exceeded."""
60
+ start_time = time.time()
61
+
62
+ def callback() -> None:
63
+ if seconds is not None and time.time() - start_time > seconds:
64
+ raise TimeLimitExceeded
65
+
66
+ return callback
@@ -144,9 +144,10 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
144
144
 
145
145
  # timeout ensures that no individual job takes longer than time_limit
146
146
  # TODO: a job started late may still exceed time_limit - how to prevent that?
147
- timeout = None if self.n_jobs == 1 else self.time_limit
147
+ time_limit = kwargs.get("time_limit")
148
+ timeout = None if self.n_jobs == 1 else time_limit
148
149
  # end_time ensures that no new jobs are started after time_limit is exceeded
149
- end_time = None if self.time_limit is None else time.time() + self.time_limit
150
+ end_time = None if time_limit is None else time.time() + time_limit
150
151
  executor = Parallel(self.n_jobs, timeout=timeout)
151
152
 
152
153
  try:
@@ -169,19 +170,24 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
169
170
  return TimeSeriesDataFrame(predictions_df)
170
171
 
171
172
  def score_and_cache_oof(
172
- self, val_data: TimeSeriesDataFrame, store_val_score: bool = False, store_predict_time: bool = False
173
+ self,
174
+ val_data: TimeSeriesDataFrame,
175
+ store_val_score: bool = False,
176
+ store_predict_time: bool = False,
177
+ **predict_kwargs,
173
178
  ) -> None:
174
- super().score_and_cache_oof(val_data, store_val_score, store_predict_time)
175
- # Remove time_limit for future predictions
176
- self.time_limit = None
179
+ # All computation happens during inference, so we provide the time_limit at prediction time
180
+ super().score_and_cache_oof(
181
+ val_data, store_val_score, store_predict_time, time_limit=self.time_limit, **predict_kwargs
182
+ )
177
183
 
178
184
  def _predict_wrapper(self, time_series: pd.Series, end_time: Optional[float] = None) -> Tuple[pd.DataFrame, bool]:
179
185
  if end_time is not None and time.time() >= end_time:
180
186
  raise TimeLimitExceeded
181
187
 
188
+ model_failed = False
182
189
  if time_series.isna().all():
183
190
  result = self._dummy_forecast.copy()
184
- model_failed = True
185
191
  else:
186
192
  try:
187
193
  result = self._predict_with_local_model(
@@ -190,7 +196,6 @@ class AbstractLocalModel(AbstractTimeSeriesModel):
190
196
  )
191
197
  if not np.isfinite(result.values).all():
192
198
  raise RuntimeError("Forecast contains NaN or Inf values.")
193
- model_failed = False
194
199
  except Exception:
195
200
  if self.use_fallback_model:
196
201
  result = seasonal_naive_forecast(
@@ -189,6 +189,7 @@ class MultiWindowBacktestingModel(AbstractTimeSeriesModel):
189
189
  val_data: TimeSeriesDataFrame,
190
190
  store_val_score: bool = False,
191
191
  store_predict_time: bool = False,
192
+ **predict_kwargs,
192
193
  ) -> None:
193
194
  # self.val_score, self.predict_time, self._oof_predictions already saved during _fit()
194
195
  assert self._oof_predictions is not None
@@ -1,3 +1,3 @@
1
1
  """This is the autogluon version file."""
2
- __version__ = '1.0.1b20240408'
2
+ __version__ = '1.1.0b20240410'
3
3
  __lite__ = False
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.timeseries
3
- Version: 1.0.1b20240408
4
- Summary: AutoML for Image, Text, and Tabular Data
3
+ Version: 1.1.0b20240410
4
+ Summary: Fast and Accurate ML in 3 Lines of Code
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
7
7
  License: Apache-2.0
@@ -14,7 +14,7 @@ Description:
14
14
  <div align="center">
15
15
  <img src="https://user-images.githubusercontent.com/16392542/77208906-224aa500-6aba-11ea-96bd-e81806074030.png" width="350">
16
16
 
17
- ## AutoML for Image, Text, Time Series, and Tabular Data
17
+ ## Fast and Accurate ML in 3 Lines of Code
18
18
 
19
19
  [![Latest Release](https://img.shields.io/github/v/release/autogluon/autogluon)](https://github.com/autogluon/autogluon/releases)
20
20
  [![Conda Forge](https://img.shields.io/conda/vn/conda-forge/autogluon.svg)](https://anaconda.org/conda-forge/autogluon)
@@ -32,6 +32,7 @@ src/autogluon/timeseries/models/autogluon_tabular/utils.py
32
32
  src/autogluon/timeseries/models/chronos/__init__.py
33
33
  src/autogluon/timeseries/models/chronos/model.py
34
34
  src/autogluon/timeseries/models/chronos/pipeline.py
35
+ src/autogluon/timeseries/models/chronos/utils.py
35
36
  src/autogluon/timeseries/models/ensemble/__init__.py
36
37
  src/autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py
37
38
  src/autogluon/timeseries/models/ensemble/greedy_ensemble.py
@@ -16,21 +16,19 @@ utilsforecast<0.0.11,>=0.0.10
16
16
  tqdm<5,>=4.38
17
17
  orjson~=3.9
18
18
  tensorboard<3,>=2.9
19
- autogluon.core[raytune]==1.0.1b20240408
20
- autogluon.common==1.0.1b20240408
21
- autogluon.tabular[catboost,lightgbm,xgboost]==1.0.1b20240408
19
+ autogluon.core[raytune]==1.1.0b20240410
20
+ autogluon.common==1.1.0b20240410
21
+ autogluon.tabular[catboost,lightgbm,xgboost]==1.1.0b20240410
22
22
 
23
23
  [all]
24
- optimum[onnxruntime]<1.19,>=1.18
25
- optimum-intel[nncf,openvino]<1.17,>=1.16
26
- optimum[nncf,openvino]<1.19,>=1.18
24
+ optimum[onnxruntime]<1.19,>=1.17
27
25
 
28
26
  [chronos-onnx]
29
- optimum[onnxruntime]<1.19,>=1.18
27
+ optimum[onnxruntime]<1.19,>=1.17
30
28
 
31
29
  [chronos-openvino]
32
- optimum-intel[nncf,openvino]<1.17,>=1.16
33
- optimum[nncf,openvino]<1.19,>=1.18
30
+ optimum-intel[nncf,openvino]<1.17,>=1.15
31
+ optimum[nncf,openvino]<1.19,>=1.17
34
32
 
35
33
  [tests]
36
34
  pytest