autogluon.timeseries 1.2.1b20250420__py3-none-any.whl → 1.2.1b20250422__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. autogluon/timeseries/learner.py +4 -0
  2. autogluon/timeseries/metrics/__init__.py +30 -1
  3. autogluon/timeseries/metrics/abstract.py +10 -0
  4. autogluon/timeseries/metrics/point.py +131 -41
  5. autogluon/timeseries/metrics/quantile.py +36 -15
  6. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +7 -0
  7. autogluon/timeseries/models/ensemble/greedy_ensemble.py +8 -1
  8. autogluon/timeseries/models/presets.py +4 -0
  9. autogluon/timeseries/predictor.py +12 -1
  10. autogluon/timeseries/trainer.py +5 -0
  11. autogluon/timeseries/version.py +1 -1
  12. {autogluon.timeseries-1.2.1b20250420.dist-info → autogluon.timeseries-1.2.1b20250422.dist-info}/METADATA +4 -4
  13. {autogluon.timeseries-1.2.1b20250420.dist-info → autogluon.timeseries-1.2.1b20250422.dist-info}/RECORD +20 -20
  14. /autogluon.timeseries-1.2.1b20250420-py3.9-nspkg.pth → /autogluon.timeseries-1.2.1b20250422-py3.9-nspkg.pth +0 -0
  15. {autogluon.timeseries-1.2.1b20250420.dist-info → autogluon.timeseries-1.2.1b20250422.dist-info}/LICENSE +0 -0
  16. {autogluon.timeseries-1.2.1b20250420.dist-info → autogluon.timeseries-1.2.1b20250422.dist-info}/NOTICE +0 -0
  17. {autogluon.timeseries-1.2.1b20250420.dist-info → autogluon.timeseries-1.2.1b20250422.dist-info}/WHEEL +0 -0
  18. {autogluon.timeseries-1.2.1b20250420.dist-info → autogluon.timeseries-1.2.1b20250422.dist-info}/namespace_packages.txt +0 -0
  19. {autogluon.timeseries-1.2.1b20250420.dist-info → autogluon.timeseries-1.2.1b20250422.dist-info}/top_level.txt +0 -0
  20. {autogluon.timeseries-1.2.1b20250420.dist-info → autogluon.timeseries-1.2.1b20250422.dist-info}/zip-safe +0 -0
@@ -3,6 +3,7 @@ import reprlib
3
3
  import time
4
4
  from typing import Any, Dict, List, Literal, Optional, Type, Union
5
5
 
6
+ import numpy as np
6
7
  import pandas as pd
7
8
 
8
9
  from autogluon.core.learner import AbstractLearner
@@ -30,6 +31,7 @@ class TimeSeriesLearner(AbstractLearner):
30
31
  trainer_type: Type[TimeSeriesTrainer] = TimeSeriesTrainer,
31
32
  eval_metric: Union[str, TimeSeriesScorer, None] = None,
32
33
  eval_metric_seasonal_period: Optional[int] = None,
34
+ horizon_weight: Optional[np.ndarray] = None,
33
35
  prediction_length: int = 1,
34
36
  cache_predictions: bool = True,
35
37
  ensemble_model_type: Optional[Type] = None,
@@ -38,6 +40,7 @@ class TimeSeriesLearner(AbstractLearner):
38
40
  super().__init__(path_context=path_context)
39
41
  self.eval_metric: TimeSeriesScorer = check_get_evaluation_metric(eval_metric)
40
42
  self.eval_metric_seasonal_period = eval_metric_seasonal_period
43
+ self.horizon_weight = horizon_weight
41
44
  self.trainer_type = trainer_type
42
45
  self.target = target
43
46
  self.known_covariates_names = [] if known_covariates_names is None else known_covariates_names
@@ -83,6 +86,7 @@ class TimeSeriesLearner(AbstractLearner):
83
86
  prediction_length=self.prediction_length,
84
87
  eval_metric=self.eval_metric,
85
88
  eval_metric_seasonal_period=self.eval_metric_seasonal_period,
89
+ horizon_weight=self.horizon_weight,
86
90
  target=self.target,
87
91
  quantile_levels=self.quantile_levels,
88
92
  verbosity=kwargs.get("verbosity", 2),
@@ -1,5 +1,8 @@
1
1
  from pprint import pformat
2
- from typing import Type, Union
2
+ from typing import Optional, Type, Union, overload
3
+
4
+ import numpy as np
5
+ import numpy.typing as npt
3
6
 
4
7
  from .abstract import TimeSeriesScorer
5
8
  from .point import MAE, MAPE, MASE, MSE, RMSE, RMSLE, RMSSE, SMAPE, WAPE, WCD
@@ -75,3 +78,29 @@ def check_get_evaluation_metric(
75
78
  f"(received eval_metric = {eval_metric} of type {type(eval_metric)})"
76
79
  )
77
80
  return scorer
81
+
82
+ @overload
83
+ def check_get_horizon_weight(horizon_weight: None, prediction_length: int) -> None: ...
84
+ @overload
85
+ def check_get_horizon_weight(horizon_weight: list[float], prediction_length: int) -> np.ndarray: ...
86
+
87
+ def check_get_horizon_weight(horizon_weight: list[float] | None, prediction_length: int) -> Optional[np.ndarray]:
88
+ """Convert horizon_weight to a non-negative numpy array that sums up to prediction_length.
89
+
90
+ Raises an exception if horizon_weight has an invalid shape or contains invalid values.
91
+ """
92
+ if horizon_weight is None:
93
+ return None
94
+ horizon_weight_np = np.array(list(horizon_weight), dtype=np.float64)
95
+ if horizon_weight_np.shape != (prediction_length,):
96
+ raise ValueError(
97
+ f"horizon_weight must have length equal to {prediction_length=} (got {len(horizon_weight)=})"
98
+ )
99
+ if not (horizon_weight_np >= 0).all():
100
+ raise ValueError(f"All values in horizon_weight must be >= 0 (got {horizon_weight})")
101
+ if not horizon_weight_np.sum() > 0:
102
+ raise ValueError(f"At least some values in horizon_weight must be > 0 (got {horizon_weight})")
103
+ if not np.isfinite(horizon_weight_np).all():
104
+ raise ValueError(f"All horizon_weight values must be finite (got {horizon_weight})")
105
+ horizon_weight_np = horizon_weight_np * prediction_length / horizon_weight_np.sum()
106
+ return horizon_weight_np
@@ -69,6 +69,7 @@ class TimeSeriesScorer:
69
69
  prediction_length: int = 1,
70
70
  target: str = "target",
71
71
  seasonal_period: Optional[int] = None,
72
+ horizon_weight: Optional[np.ndarray] = None,
72
73
  **kwargs,
73
74
  ) -> float:
74
75
  seasonal_period = get_seasonality(data.freq) if seasonal_period is None else seasonal_period
@@ -92,6 +93,8 @@ class TimeSeriesScorer:
92
93
  data_future=data_future,
93
94
  predictions=predictions,
94
95
  target=target,
96
+ prediction_length=prediction_length,
97
+ horizon_weight=horizon_weight,
95
98
  **kwargs,
96
99
  )
97
100
  finally:
@@ -105,6 +108,8 @@ class TimeSeriesScorer:
105
108
  data_future: TimeSeriesDataFrame,
106
109
  predictions: TimeSeriesDataFrame,
107
110
  target: str = "target",
111
+ prediction_length: int = 1,
112
+ horizon_weight: Optional[np.ndarray] = None,
108
113
  **kwargs,
109
114
  ) -> float:
110
115
  """Internal method that computes the metric for given forecast & actual data.
@@ -121,6 +126,11 @@ class TimeSeriesScorer:
121
126
  columns corresponding to each of the quantile levels. Must have the same index as ``data_future``.
122
127
  target : str, default = "target"
123
128
  Name of the column in ``data_future`` that contains the target time series.
129
+ prediction_length : int, default = 1
130
+ Length of the forecast horizon in time steps.
131
+ horizon_weight : np.ndarray, optional
132
+ Weight assigned to each time step in the forecast horizon when computing the metric. If provided, this list
133
+ must contain `prediction_length` non-negative values, with `sum(horizon_weight) = prediction_length`.
124
134
 
125
135
  Returns
126
136
  -------
@@ -38,10 +38,20 @@ class RMSE(TimeSeriesScorer):
38
38
  equivalent_tabular_regression_metric = "root_mean_squared_error"
39
39
 
40
40
  def compute_metric(
41
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
41
+ self,
42
+ data_future: TimeSeriesDataFrame,
43
+ predictions: TimeSeriesDataFrame,
44
+ target: str = "target",
45
+ prediction_length: int = 1,
46
+ horizon_weight: Optional[np.ndarray] = None,
47
+ **kwargs,
42
48
  ) -> float:
43
49
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
44
- return np.sqrt(self._safemean((y_true - y_pred) ** 2))
50
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
51
+ errors = ((y_true - y_pred) ** 2).reshape([-1, prediction_length])
52
+ if horizon_weight is not None:
53
+ errors *= horizon_weight.reshape([1, prediction_length])
54
+ return np.sqrt(self._safemean(errors))
45
55
 
46
56
 
47
57
  class MSE(TimeSeriesScorer):
@@ -69,10 +79,20 @@ class MSE(TimeSeriesScorer):
69
79
  equivalent_tabular_regression_metric = "mean_squared_error"
70
80
 
71
81
  def compute_metric(
72
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
82
+ self,
83
+ data_future: TimeSeriesDataFrame,
84
+ predictions: TimeSeriesDataFrame,
85
+ target: str = "target",
86
+ prediction_length: int = 1,
87
+ horizon_weight: Optional[np.ndarray] = None,
88
+ **kwargs,
73
89
  ) -> float:
74
90
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
75
- return self._safemean((y_true - y_pred) ** 2)
91
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
92
+ errors = ((y_true - y_pred) ** 2).reshape([-1, prediction_length])
93
+ if horizon_weight is not None:
94
+ errors *= horizon_weight.reshape([1, prediction_length])
95
+ return self._safemean(errors)
76
96
 
77
97
 
78
98
  class MAE(TimeSeriesScorer):
@@ -98,10 +118,20 @@ class MAE(TimeSeriesScorer):
98
118
  equivalent_tabular_regression_metric = "mean_absolute_error"
99
119
 
100
120
  def compute_metric(
101
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
121
+ self,
122
+ data_future: TimeSeriesDataFrame,
123
+ predictions: TimeSeriesDataFrame,
124
+ target: str = "target",
125
+ prediction_length: int = 1,
126
+ horizon_weight: Optional[np.ndarray] = None,
127
+ **kwargs,
102
128
  ) -> float:
103
129
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
104
- return self._safemean((y_true - y_pred).abs())
130
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
131
+ errors = np.abs(y_true - y_pred).reshape([-1, prediction_length])
132
+ if horizon_weight is not None:
133
+ errors *= horizon_weight.reshape([1, prediction_length])
134
+ return self._safemean(errors)
105
135
 
106
136
 
107
137
  class WAPE(TimeSeriesScorer):
@@ -119,6 +149,7 @@ class WAPE(TimeSeriesScorer):
119
149
  - not sensitive to outliers
120
150
  - prefers models that accurately estimate the median
121
151
 
152
+ If `horizon_weight` is provided, both the errors and the target time series in the denominator will be re-weighted.
122
153
 
123
154
  References
124
155
  ----------
@@ -129,10 +160,21 @@ class WAPE(TimeSeriesScorer):
129
160
  equivalent_tabular_regression_metric = "mean_absolute_error"
130
161
 
131
162
  def compute_metric(
132
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
163
+ self,
164
+ data_future: TimeSeriesDataFrame,
165
+ predictions: TimeSeriesDataFrame,
166
+ target: str = "target",
167
+ prediction_length: int = 1,
168
+ horizon_weight: Optional[np.ndarray] = None,
169
+ **kwargs,
133
170
  ) -> float:
134
171
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
135
- return (y_true - y_pred).abs().sum() / y_true.abs().sum()
172
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
173
+ errors = np.abs(y_true - y_pred).reshape([-1, prediction_length])
174
+ if horizon_weight is not None:
175
+ errors *= horizon_weight.reshape([1, prediction_length])
176
+ y_true = y_true.reshape([-1, prediction_length]) * horizon_weight.reshape([1, prediction_length])
177
+ return np.nansum(errors) / np.nansum(np.abs(y_true))
136
178
 
137
179
 
138
180
  class SMAPE(TimeSeriesScorer):
@@ -158,10 +200,20 @@ class SMAPE(TimeSeriesScorer):
158
200
  equivalent_tabular_regression_metric = "symmetric_mean_absolute_percentage_error"
159
201
 
160
202
  def compute_metric(
161
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
203
+ self,
204
+ data_future: TimeSeriesDataFrame,
205
+ predictions: TimeSeriesDataFrame,
206
+ target: str = "target",
207
+ prediction_length: int = 1,
208
+ horizon_weight: Optional[np.ndarray] = None,
209
+ **kwargs,
162
210
  ) -> float:
163
211
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
164
- return self._safemean(2 * ((y_true - y_pred).abs() / (y_true.abs() + y_pred.abs())))
212
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
213
+ errors = (np.abs(y_true - y_pred) / (np.abs(y_true) + np.abs(y_pred))).reshape([-1, prediction_length])
214
+ if horizon_weight is not None:
215
+ errors *= horizon_weight.reshape([1, prediction_length])
216
+ return 2 * self._safemean(errors)
165
217
 
166
218
 
167
219
  class MAPE(TimeSeriesScorer):
@@ -187,10 +239,20 @@ class MAPE(TimeSeriesScorer):
187
239
  equivalent_tabular_regression_metric = "mean_absolute_percentage_error"
188
240
 
189
241
  def compute_metric(
190
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
242
+ self,
243
+ data_future: TimeSeriesDataFrame,
244
+ predictions: TimeSeriesDataFrame,
245
+ target: str = "target",
246
+ prediction_length: int = 1,
247
+ horizon_weight: Optional[np.ndarray] = None,
248
+ **kwargs,
191
249
  ) -> float:
192
250
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
193
- return self._safemean((y_true - y_pred).abs() / y_true.abs())
251
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
252
+ errors = (np.abs(y_true - y_pred) / np.abs(y_true)).reshape([-1, prediction_length])
253
+ if horizon_weight is not None:
254
+ errors *= horizon_weight.reshape([1, prediction_length])
255
+ return self._safemean(errors)
194
256
 
195
257
 
196
258
  class MASE(TimeSeriesScorer):
@@ -240,16 +302,24 @@ class MASE(TimeSeriesScorer):
240
302
  self._past_abs_seasonal_error = None
241
303
 
242
304
  def compute_metric(
243
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
305
+ self,
306
+ data_future: TimeSeriesDataFrame,
307
+ predictions: TimeSeriesDataFrame,
308
+ target: str = "target",
309
+ prediction_length: int = 1,
310
+ horizon_weight: Optional[np.ndarray] = None,
311
+ **kwargs,
244
312
  ) -> float:
245
- y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
246
313
  if self._past_abs_seasonal_error is None:
247
314
  raise AssertionError("Call `save_past_metrics` before `compute_metric`")
248
315
 
249
- num_items = len(self._past_abs_seasonal_error)
250
- # Reshape abs errors into [num_items, prediction_length] to normalize per item without groupby
251
- abs_errors = np.abs(y_true.to_numpy() - y_pred.to_numpy()).reshape([num_items, -1])
252
- return self._safemean(abs_errors / self._past_abs_seasonal_error.values[:, None])
316
+ y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
317
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
318
+
319
+ errors = np.abs(y_true - y_pred).reshape([-1, prediction_length])
320
+ if horizon_weight is not None:
321
+ errors *= horizon_weight.reshape([1, prediction_length])
322
+ return self._safemean(errors / self._past_abs_seasonal_error.to_numpy()[:, None])
253
323
 
254
324
 
255
325
  class RMSSE(TimeSeriesScorer):
@@ -300,16 +370,23 @@ class RMSSE(TimeSeriesScorer):
300
370
  self._past_squared_seasonal_error = None
301
371
 
302
372
  def compute_metric(
303
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
373
+ self,
374
+ data_future: TimeSeriesDataFrame,
375
+ predictions: TimeSeriesDataFrame,
376
+ target: str = "target",
377
+ prediction_length: int = 1,
378
+ horizon_weight: Optional[np.ndarray] = None,
379
+ **kwargs,
304
380
  ) -> float:
305
- y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
306
381
  if self._past_squared_seasonal_error is None:
307
382
  raise AssertionError("Call `save_past_metrics` before `compute_metric`")
308
383
 
309
- num_items = len(self._past_squared_seasonal_error)
310
- # Reshape squared errors into [num_items, prediction_length] to normalize per item without groupby
311
- squared_errors = ((y_true.to_numpy() - y_pred.to_numpy()) ** 2.0).reshape([num_items, -1])
312
- return np.sqrt(self._safemean(squared_errors / self._past_squared_seasonal_error.values[:, None]))
384
+ y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
385
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
386
+ errors = ((y_true - y_pred) ** 2).reshape([-1, prediction_length])
387
+ if horizon_weight is not None:
388
+ errors *= horizon_weight.reshape([1, prediction_length])
389
+ return np.sqrt(self._safemean(errors / self._past_squared_seasonal_error.to_numpy()[:, None]))
313
390
 
314
391
 
315
392
  class RMSLE(TimeSeriesScorer):
@@ -336,12 +413,22 @@ class RMSLE(TimeSeriesScorer):
336
413
  """
337
414
 
338
415
  def compute_metric(
339
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
416
+ self,
417
+ data_future: TimeSeriesDataFrame,
418
+ predictions: TimeSeriesDataFrame,
419
+ target: str = "target",
420
+ prediction_length: int = 1,
421
+ horizon_weight: Optional[np.ndarray] = None,
422
+ **kwargs,
340
423
  ) -> float:
341
424
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
425
+ y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
342
426
  y_pred = np.clip(y_pred, a_min=0.0, a_max=None)
343
427
 
344
- return np.sqrt(np.power(np.log1p(y_pred) - np.log1p(y_true), 2).mean())
428
+ errors = np.power(np.log1p(y_pred) - np.log1p(y_true), 2).reshape([-1, prediction_length])
429
+ if horizon_weight is not None:
430
+ errors *= horizon_weight.reshape([1, prediction_length])
431
+ return np.sqrt(self._safemean(errors))
345
432
 
346
433
  def __call__(
347
434
  self,
@@ -350,6 +437,7 @@ class RMSLE(TimeSeriesScorer):
350
437
  prediction_length: int = 1,
351
438
  target: str = "target",
352
439
  seasonal_period: Optional[int] = None,
440
+ horizon_weight: Optional[np.ndarray] = None,
353
441
  **kwargs,
354
442
  ) -> float:
355
443
  if (data[target] < 0).any():
@@ -360,6 +448,7 @@ class RMSLE(TimeSeriesScorer):
360
448
  prediction_length=prediction_length,
361
449
  target=target,
362
450
  seasonal_period=seasonal_period,
451
+ horizon_weight=horizon_weight,
363
452
  **kwargs,
364
453
  )
365
454
 
@@ -382,35 +471,36 @@ class WCD(TimeSeriesScorer):
382
471
  Parameters
383
472
  ----------
384
473
  alpha : float, default = 0.5
385
- Values > 0.5 correspond put a stronger penalty on underpredictions (when cumulative forecast is below the
474
+ Values > 0.5 put a stronger penalty on underpredictions (when cumulative forecast is below the
386
475
  cumulative actual value). Values < 0.5 put a stronger penalty on overpredictions.
387
476
  """
388
477
 
389
478
  def __init__(self, alpha: float = 0.5):
390
479
  assert 0 < alpha < 1, "alpha must be in (0, 1)"
391
480
  self.alpha = alpha
392
- self.num_items: Optional[int] = None
393
481
  warnings.warn(
394
482
  f"{self.name} is an experimental metric. Its behavior may change in the future version of AutoGluon."
395
483
  )
396
484
 
397
- def save_past_metrics(
398
- self, data_past: TimeSeriesDataFrame, target: str = "target", seasonal_period: int = 1, **kwargs
399
- ) -> None:
400
- self.num_items = data_past.num_items
401
-
402
- def _fast_cumsum(self, y: np.ndarray) -> np.ndarray:
485
+ def _fast_cumsum(self, y: np.ndarray, prediction_length: int) -> np.ndarray:
403
486
  """Compute the cumulative sum for each consecutive `prediction_length` items in the array."""
404
- assert self.num_items is not None, "Make sure to call `save_past_metrics` before `compute_metric`"
405
- y = y.reshape(self.num_items, -1)
487
+ y = y.reshape(-1, prediction_length)
406
488
  return np.nancumsum(y, axis=1).ravel()
407
489
 
408
490
  def compute_metric(
409
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
491
+ self,
492
+ data_future: TimeSeriesDataFrame,
493
+ predictions: TimeSeriesDataFrame,
494
+ target: str = "target",
495
+ prediction_length: int = 1,
496
+ horizon_weight: Optional[np.ndarray] = None,
497
+ **kwargs,
410
498
  ) -> float:
411
499
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
412
- cumsum_true = self._fast_cumsum(y_true.to_numpy())
413
- cumsum_pred = self._fast_cumsum(y_pred.to_numpy())
500
+ cumsum_true = self._fast_cumsum(y_true.to_numpy(), prediction_length=prediction_length)
501
+ cumsum_pred = self._fast_cumsum(y_pred.to_numpy(), prediction_length=prediction_length)
414
502
  diffs = cumsum_pred - cumsum_true
415
- error = diffs * np.where(diffs < 0, -self.alpha, (1 - self.alpha))
416
- return 2 * self._safemean(error)
503
+ errors = (diffs * np.where(diffs < 0, -self.alpha, (1 - self.alpha))).reshape([-1, prediction_length])
504
+ if horizon_weight is not None:
505
+ errors *= horizon_weight.reshape([1, prediction_length])
506
+ return 2 * self._safemean(errors)
@@ -25,6 +25,7 @@ class WQL(TimeSeriesScorer):
25
25
  - scale-dependent (time series with large absolute value contribute more to the loss)
26
26
  - equivalent to WAPE if ``quantile_levels = [0.5]``
27
27
 
28
+ If `horizon_weight` is provided, both the errors and the target time series in the denominator will be re-weighted.
28
29
 
29
30
  References
30
31
  ----------
@@ -34,16 +35,27 @@ class WQL(TimeSeriesScorer):
34
35
  needs_quantile = True
35
36
 
36
37
  def compute_metric(
37
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
38
+ self,
39
+ data_future: TimeSeriesDataFrame,
40
+ predictions: TimeSeriesDataFrame,
41
+ target: str = "target",
42
+ prediction_length: int = 1,
43
+ horizon_weight: Optional[np.ndarray] = None,
44
+ **kwargs,
38
45
  ) -> float:
39
46
  y_true, q_pred, quantile_levels = self._get_quantile_forecast_score_inputs(data_future, predictions, target)
40
- values_true = y_true.values[:, None] # shape [N, 1]
41
- values_pred = q_pred.values # shape [N, len(quantile_levels)]
47
+ y_true = y_true.to_numpy()[:, None] # shape [N, 1]
48
+ q_pred = q_pred.to_numpy() # shape [N, len(quantile_levels)]
42
49
 
43
- return 2 * np.mean(
44
- np.nansum(np.abs((values_true - values_pred) * ((values_true <= values_pred) - quantile_levels)), axis=0)
45
- / np.nansum(np.abs(values_true))
50
+ errors = (
51
+ np.abs((q_pred - y_true) * ((y_true <= q_pred) - quantile_levels))
52
+ .mean(axis=1)
53
+ .reshape([-1, prediction_length])
46
54
  )
55
+ if horizon_weight is not None:
56
+ errors *= horizon_weight.reshape([1, prediction_length])
57
+ y_true = y_true.reshape([-1, prediction_length]) * horizon_weight.reshape([1, prediction_length])
58
+ return 2 * np.nansum(errors) / np.nansum(np.abs(y_true))
47
59
 
48
60
 
49
61
  class SQL(TimeSeriesScorer):
@@ -93,17 +105,26 @@ class SQL(TimeSeriesScorer):
93
105
  self._past_abs_seasonal_error = None
94
106
 
95
107
  def compute_metric(
96
- self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
108
+ self,
109
+ data_future: TimeSeriesDataFrame,
110
+ predictions: TimeSeriesDataFrame,
111
+ target: str = "target",
112
+ prediction_length: int = 1,
113
+ horizon_weight: Optional[np.ndarray] = None,
114
+ **kwargs,
97
115
  ) -> float:
98
116
  if self._past_abs_seasonal_error is None:
99
117
  raise AssertionError("Call `save_past_metrics` before `compute_metric`")
100
118
 
101
119
  y_true, q_pred, quantile_levels = self._get_quantile_forecast_score_inputs(data_future, predictions, target)
102
- q_pred = q_pred.values
103
- values_true = y_true.values[:, None] # shape [N, 1]
104
-
105
- ql = np.abs((q_pred - values_true) * ((values_true <= q_pred) - quantile_levels)).mean(axis=1)
106
- num_items = len(self._past_abs_seasonal_error)
107
- # Reshape quantile losses values into [num_items, prediction_length] to normalize per item without groupby
108
- quantile_losses = ql.reshape([num_items, -1])
109
- return 2 * self._safemean(quantile_losses / self._past_abs_seasonal_error.values[:, None])
120
+ q_pred = q_pred.to_numpy()
121
+ y_true = y_true.to_numpy()[:, None] # shape [N, 1]
122
+
123
+ errors = (
124
+ np.abs((q_pred - y_true) * ((y_true <= q_pred) - quantile_levels))
125
+ .mean(axis=1)
126
+ .reshape([-1, prediction_length])
127
+ )
128
+ if horizon_weight is not None:
129
+ errors *= horizon_weight.reshape([1, prediction_length])
130
+ return 2 * self._safemean(errors / self._past_abs_seasonal_error.to_numpy()[:, None])
@@ -8,6 +8,7 @@ import time
8
8
  from abc import ABC, abstractmethod
9
9
  from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
10
10
 
11
+ import numpy as np
11
12
  import pandas as pd
12
13
  from typing_extensions import Self
13
14
 
@@ -59,6 +60,9 @@ class TimeSeriesModelBase(ModelBase, ABC):
59
60
  eval_metric_seasonal_period : int, optional
60
61
  Seasonal period used to compute some evaluation metrics such as mean absolute scaled error (MASE). Defaults to
61
62
  ``None``, in which case the seasonal period is computed based on the data frequency.
63
+ horizon_weight : np.ndarray, optional
64
+ Weight assigned to each time step in the forecast horizon when computing the metric. If provided, this list
65
+ must contain `prediction_length` non-negative values, with `sum(horizon_weight) = prediction_length`.
62
66
  hyperparameters : dict, default = None
63
67
  Hyperparameters that will be used by the model (can be search spaces instead of fixed values).
64
68
  If None, model defaults are used. This is identical to passing an empty dictionary.
@@ -88,6 +92,7 @@ class TimeSeriesModelBase(ModelBase, ABC):
88
92
  quantile_levels: Sequence[float] = (0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
89
93
  eval_metric: Union[str, TimeSeriesScorer, None] = None,
90
94
  eval_metric_seasonal_period: Optional[int] = None,
95
+ horizon_weight: Optional[np.ndarray] = None,
91
96
  ):
92
97
  self.name = name or re.sub(r"Model$", "", self.__class__.__name__)
93
98
 
@@ -104,6 +109,7 @@ class TimeSeriesModelBase(ModelBase, ABC):
104
109
 
105
110
  self.eval_metric: TimeSeriesScorer = check_get_evaluation_metric(eval_metric)
106
111
  self.eval_metric_seasonal_period = eval_metric_seasonal_period
112
+ self.horizon_weight = horizon_weight
107
113
  self.target: str = target
108
114
  self.covariate_metadata = covariate_metadata or CovariateMetadata()
109
115
 
@@ -322,6 +328,7 @@ class TimeSeriesModelBase(ModelBase, ABC):
322
328
  prediction_length=self.prediction_length,
323
329
  target=self.target,
324
330
  seasonal_period=self.eval_metric_seasonal_period,
331
+ horizon_weight=self.horizon_weight,
325
332
  )
326
333
 
327
334
  def score(self, data: TimeSeriesDataFrame, metric: Optional[str] = None) -> float: # type: ignore
@@ -28,6 +28,7 @@ class TimeSeriesEnsembleSelection(EnsembleSelection):
28
28
  prediction_length: int = 1,
29
29
  target: str = "target",
30
30
  eval_metric_seasonal_period: Optional[int] = None,
31
+ horizon_weight: Optional[np.ndarray] = None,
31
32
  **kwargs,
32
33
  ):
33
34
  super().__init__(
@@ -43,6 +44,7 @@ class TimeSeriesEnsembleSelection(EnsembleSelection):
43
44
  self.prediction_length = prediction_length
44
45
  self.target = target
45
46
  self.eval_metric_seasonal_period = eval_metric_seasonal_period
47
+ self.horizon_weight = horizon_weight
46
48
 
47
49
  def _fit(
48
50
  self,
@@ -90,7 +92,11 @@ class TimeSeriesEnsembleSelection(EnsembleSelection):
90
92
  dummy_pred[list(dummy_pred.columns)] = y_pred_proba[window_idx]
91
93
  # We use scorer.compute_metric instead of scorer.score to avoid repeated calls to scorer.save_past_metrics
92
94
  metric_value = self.scorer_per_window[window_idx].compute_metric(
93
- data_future, dummy_pred, target=self.target
95
+ data_future,
96
+ dummy_pred,
97
+ target=self.target,
98
+ prediction_length=self.prediction_length,
99
+ horizon_weight=self.horizon_weight,
94
100
  )
95
101
  total_score += metric.sign * metric_value
96
102
  avg_score = total_score / len(self.data_future_per_window)
@@ -123,6 +129,7 @@ class TimeSeriesGreedyEnsemble(AbstractTimeSeriesEnsembleModel):
123
129
  prediction_length=self.prediction_length,
124
130
  target=self.target,
125
131
  eval_metric_seasonal_period=self.eval_metric_seasonal_period,
132
+ horizon_weight=self.horizon_weight,
126
133
  )
127
134
  ensemble_selection.fit(
128
135
  predictions=list(predictions_per_window.values()),
@@ -4,6 +4,8 @@ import re
4
4
  from collections import defaultdict
5
5
  from typing import Any, Dict, List, Optional, Type, Union
6
6
 
7
+ import numpy as np
8
+
7
9
  from autogluon.common import space
8
10
  from autogluon.core import constants
9
11
  from autogluon.timeseries.metrics import TimeSeriesScorer
@@ -184,6 +186,7 @@ def get_preset_models(
184
186
  path: str,
185
187
  eval_metric: Union[str, TimeSeriesScorer],
186
188
  eval_metric_seasonal_period: Optional[int],
189
+ horizon_weight: Optional[np.ndarray],
187
190
  hyperparameters: Union[str, Dict, None],
188
191
  hyperparameter_tune: bool,
189
192
  covariate_metadata: CovariateMetadata,
@@ -262,6 +265,7 @@ def get_preset_models(
262
265
  eval_metric=eval_metric,
263
266
  eval_metric_seasonal_period=eval_metric_seasonal_period,
264
267
  covariate_metadata=covariate_metadata,
268
+ horizon_weight=horizon_weight,
265
269
  hyperparameters=model_hps,
266
270
  **kwargs,
267
271
  )
@@ -24,7 +24,7 @@ from autogluon.timeseries import __version__ as current_ag_version
24
24
  from autogluon.timeseries.configs import TIMESERIES_PRESETS_CONFIGS
25
25
  from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame
26
26
  from autogluon.timeseries.learner import TimeSeriesLearner
27
- from autogluon.timeseries.metrics import TimeSeriesScorer, check_get_evaluation_metric
27
+ from autogluon.timeseries.metrics import TimeSeriesScorer, check_get_evaluation_metric, check_get_horizon_weight
28
28
  from autogluon.timeseries.splitter import ExpandingWindowSplitter
29
29
  from autogluon.timeseries.trainer import TimeSeriesTrainer
30
30
  from autogluon.timeseries.utils.forecast import make_future_data_frame
@@ -93,6 +93,14 @@ class TimeSeriesPredictor:
93
93
  eval_metric_seasonal_period : int, optional
94
94
  Seasonal period used to compute some evaluation metrics such as mean absolute scaled error (MASE). Defaults to
95
95
  ``None``, in which case the seasonal period is computed based on the data frequency.
96
+ horizon_weight : List[float], optional
97
+ Weight assigned to each time step in the forecast horizon when computing the `eval_metric`. If provided, this
98
+ must be a list with `prediction_length` non-negative values, where at least some values are greater than zero.
99
+ AutoGluon will automatically normalize the weights so that they sum up to `prediction_length`. By default, all
100
+ time steps in the forecast horizon have the same weight, which is equivalent to setting `horizon_weight = [1] * prediction_length`.
101
+
102
+ This parameter only affects model selection and ensemble construction; it has no effect on the loss function of
103
+ the individual forecasting models.
96
104
  known_covariates_names: List[str], optional
97
105
  Names of the covariates that are known in advance for all time steps in the forecast horizon. These are also
98
106
  known as dynamic features, exogenous variables, additional regressors or related time series. Examples of such
@@ -144,6 +152,7 @@ class TimeSeriesPredictor:
144
152
  freq: Optional[str] = None,
145
153
  eval_metric: Union[str, TimeSeriesScorer, None] = None,
146
154
  eval_metric_seasonal_period: Optional[int] = None,
155
+ horizon_weight: list[float] | None = None,
147
156
  path: Optional[Union[str, Path]] = None,
148
157
  verbosity: int = 2,
149
158
  log_to_file: bool = True,
@@ -189,6 +198,7 @@ class TimeSeriesPredictor:
189
198
  self.freq = std_freq
190
199
  self.eval_metric = check_get_evaluation_metric(eval_metric)
191
200
  self.eval_metric_seasonal_period = eval_metric_seasonal_period
201
+ self.horizon_weight = check_get_horizon_weight(horizon_weight, prediction_length=self.prediction_length)
192
202
  if quantile_levels is None:
193
203
  quantile_levels = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
194
204
  self.quantile_levels = sorted(quantile_levels)
@@ -196,6 +206,7 @@ class TimeSeriesPredictor:
196
206
  path_context=self.path,
197
207
  eval_metric=eval_metric,
198
208
  eval_metric_seasonal_period=eval_metric_seasonal_period,
209
+ horizon_weight=self.horizon_weight,
199
210
  target=self.target,
200
211
  known_covariates_names=self.known_covariates_names,
201
212
  prediction_length=self.prediction_length,
@@ -47,6 +47,7 @@ class TimeSeriesTrainer(AbstractTrainer[AbstractTimeSeriesModel]):
47
47
  prediction_length: int = 1,
48
48
  eval_metric: Union[str, TimeSeriesScorer, None] = None,
49
49
  eval_metric_seasonal_period: Optional[int] = None,
50
+ horizon_weight: Optional[np.ndarray] = None,
50
51
  save_data: bool = True,
51
52
  skip_model_selection: bool = False,
52
53
  enable_ensemble: bool = True,
@@ -88,6 +89,7 @@ class TimeSeriesTrainer(AbstractTrainer[AbstractTimeSeriesModel]):
88
89
 
89
90
  self.eval_metric: TimeSeriesScorer = check_get_evaluation_metric(eval_metric)
90
91
  self.eval_metric_seasonal_period = eval_metric_seasonal_period
92
+ self.horizon_weight = horizon_weight
91
93
  if val_splitter is None:
92
94
  val_splitter = ExpandingWindowSplitter(prediction_length=self.prediction_length)
93
95
  assert isinstance(val_splitter, AbstractWindowSplitter), "val_splitter must be of type AbstractWindowSplitter"
@@ -571,6 +573,7 @@ class TimeSeriesTrainer(AbstractTrainer[AbstractTimeSeriesModel]):
571
573
  name=self._get_ensemble_model_name(),
572
574
  eval_metric=self.eval_metric,
573
575
  eval_metric_seasonal_period=self.eval_metric_seasonal_period,
576
+ horizon_weight=self.horizon_weight,
574
577
  target=self.target,
575
578
  prediction_length=self.prediction_length,
576
579
  path=self.path,
@@ -793,6 +796,7 @@ class TimeSeriesTrainer(AbstractTrainer[AbstractTimeSeriesModel]):
793
796
  prediction_length=self.prediction_length,
794
797
  target=self.target,
795
798
  seasonal_period=self.eval_metric_seasonal_period,
799
+ horizon_weight=self.horizon_weight,
796
800
  )
797
801
 
798
802
  def score(
@@ -1254,6 +1258,7 @@ class TimeSeriesTrainer(AbstractTrainer[AbstractTimeSeriesModel]):
1254
1258
  path=self.path,
1255
1259
  eval_metric=self.eval_metric,
1256
1260
  eval_metric_seasonal_period=self.eval_metric_seasonal_period,
1261
+ horizon_weight=self.horizon_weight,
1257
1262
  prediction_length=self.prediction_length,
1258
1263
  freq=freq,
1259
1264
  hyperparameters=hyperparameters,
@@ -1,4 +1,4 @@
1
1
  """This is the autogluon version file."""
2
2
 
3
- __version__ = "1.2.1b20250420"
3
+ __version__ = "1.2.1b20250422"
4
4
  __lite__ = False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.timeseries
3
- Version: 1.2.1b20250420
3
+ Version: 1.2.1b20250422
4
4
  Summary: Fast and Accurate ML in 3 Lines of Code
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -55,9 +55,9 @@ Requires-Dist: fugue>=0.9.0
55
55
  Requires-Dist: tqdm<5,>=4.38
56
56
  Requires-Dist: orjson~=3.9
57
57
  Requires-Dist: tensorboard<3,>=2.9
58
- Requires-Dist: autogluon.core[raytune]==1.2.1b20250420
59
- Requires-Dist: autogluon.common==1.2.1b20250420
60
- Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.2.1b20250420
58
+ Requires-Dist: autogluon.core[raytune]==1.2.1b20250422
59
+ Requires-Dist: autogluon.common==1.2.1b20250422
60
+ Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.2.1b20250422
61
61
  Provides-Extra: all
62
62
  Provides-Extra: chronos-onnx
63
63
  Requires-Dist: optimum[onnxruntime]<1.23,>=1.17; extra == "chronos-onnx"
@@ -1,25 +1,25 @@
1
- autogluon.timeseries-1.2.1b20250420-py3.9-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
1
+ autogluon.timeseries-1.2.1b20250422-py3.9-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
2
2
  autogluon/timeseries/__init__.py,sha256=_CrLLc1fkjen7UzWoO0Os8WZoHOgvZbHKy46I8v_4k4,304
3
3
  autogluon/timeseries/evaluator.py,sha256=l642tYfTHsl8WVIq_vV6qhgAFVFr9UuZD7gLra3A_Kc,250
4
- autogluon/timeseries/learner.py,sha256=7dqSHKCIX2osjv9cmWWLwaGvdrPvla0HTnsR75bdenY,14112
5
- autogluon/timeseries/predictor.py,sha256=eklp1Qils6f4vIex8KhLD6nVsUQwZ6Jt9UKkTsSyErM,85739
4
+ autogluon/timeseries/learner.py,sha256=F3RhHCFc4EB3QmKSaYV9WH7vvoN5T8HoneSdraNJ_2Q,14281
5
+ autogluon/timeseries/predictor.py,sha256=Fvf1C2Eap61DIc6a5aYcbSOOqKdzroU83MgWpqfhM-k,86680
6
6
  autogluon/timeseries/regressor.py,sha256=xw5VPrXS-NQ_Ts4ppDjoNV0TdqUYjW4VINUtb_BZdiI,11868
7
7
  autogluon/timeseries/splitter.py,sha256=yzPca9p2bWV-_VJAptUyyzQsxu-uixAdpMoGQtDzMD4,3205
8
- autogluon/timeseries/trainer.py,sha256=LHLaLvzOLjjwFHfKifydp5NOCLLv2nv2BJLerbeNWuU,57700
9
- autogluon/timeseries/version.py,sha256=JeRMGQ1tQl_wapzgVTNXyANKEqo0z7JdlDeM0AwnHcw,91
8
+ autogluon/timeseries/trainer.py,sha256=R_u7a90PyB9apSMknEGMwpcMRXvepuNnn0NAACw5sSg,57942
9
+ autogluon/timeseries/version.py,sha256=CZZ1almZ8IwUvIs8IcJ7fdkqLQwq9WIQ8u_uKA5X8YU,91
10
10
  autogluon/timeseries/configs/__init__.py,sha256=BTtHIPCYeGjqgOcvqb8qPD4VNX-ICKOg6wnkew1cPOE,98
11
11
  autogluon/timeseries/configs/presets_configs.py,sha256=cLat8ecLlWrI-SC5KLBDCX2SbVXaucemy2pjxJAtSY0,2543
12
12
  autogluon/timeseries/dataset/__init__.py,sha256=UvnhAN5tjgxXTHoZMQDy64YMDj4Xxa68yY7NP4vAw0o,81
13
13
  autogluon/timeseries/dataset/ts_dataframe.py,sha256=SodnGhEA2V-hnfYHuAkH8rK4hQlLH8K5Tb6dsGapvPM,47161
14
- autogluon/timeseries/metrics/__init__.py,sha256=dJCrZ2cHwqhqNctwQjwG-FHgGUmzIFT-D0z72f4RAVM,2104
15
- autogluon/timeseries/metrics/abstract.py,sha256=CHUZB6xt9oF9yijSOjgGtjLuKo2X0mT6dQDuwg4ZzpU,8192
16
- autogluon/timeseries/metrics/point.py,sha256=2nlieQcPBCI9hXMT3v0Oe802ykZDuzvEtDpunzt0IVA,15785
17
- autogluon/timeseries/metrics/quantile.py,sha256=wvFeDMvRf1mFurhvVr_7g13Kg-hKIRoW4y9t2no_e7A,3969
14
+ autogluon/timeseries/metrics/__init__.py,sha256=IWEG1X0NP_WbBwgLO7iC9TAoKZD7v-G6rrzqgWqhx08,3552
15
+ autogluon/timeseries/metrics/abstract.py,sha256=nrnFZpBJo6f7zg5fbqJoLubTZyQdj3_rh-Z8D5qfxAk,8825
16
+ autogluon/timeseries/metrics/point.py,sha256=yHs5ivgINO8WlhkhERVBFyP-hgk2tjBSJsOF7OszYgk,18814
17
+ autogluon/timeseries/metrics/quantile.py,sha256=s2N70vdVgMVJpaFNBCbOfmvYVKZMlSyqfyy75WLpzZY,4552
18
18
  autogluon/timeseries/metrics/utils.py,sha256=HuDe1BNe8yJU4f_DKM913nNrUueoRaw6zhxm1-S20s0,910
19
19
  autogluon/timeseries/models/__init__.py,sha256=MYD9JJ-wUDE5B6jW6E6LU2eXQ6vflfQBvqQJkdzJa3A,1189
20
- autogluon/timeseries/models/presets.py,sha256=BdSTW91-flgqhVNuZIvqEf7wUj1iB6BPger4tJaoAZQ,12322
20
+ autogluon/timeseries/models/presets.py,sha256=k5V2GgH5xFEqDrHBhfnLph61ASq_B1elcPLPurvFo0U,12431
21
21
  autogluon/timeseries/models/abstract/__init__.py,sha256=wvDsQAZIV0N3AwBeMaGItoQ82trEfnT-nol2AAOIxBg,102
22
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=gGairH3JX5rMEWhSj6VYy6zu7isZ04IaIj4lDXaTc1E,30814
22
+ autogluon/timeseries/models/abstract/abstract_timeseries_model.py,sha256=UeSgldAHam7hI0J8jY4ALJLwZNC3_hRjo8NdK1UesyA,31247
23
23
  autogluon/timeseries/models/abstract/model_trial.py,sha256=ENPg_7nsdxIvaNM0o0UShZ3x8jFlRmwRc5m0fGPC0TM,3720
24
24
  autogluon/timeseries/models/abstract/tunable.py,sha256=SFl4vjkb6BfFFaRPVdftnnLYlIyCThutLHxiiAlV6tY,7168
25
25
  autogluon/timeseries/models/autogluon_tabular/__init__.py,sha256=r9i6jWcyeLHYClkcMSKRVsfrkBUMxpDrTATNTBc_qgQ,136
@@ -35,7 +35,7 @@ autogluon/timeseries/models/chronos/pipeline/chronos_bolt.py,sha256=kNIDesojKB3r
35
35
  autogluon/timeseries/models/chronos/pipeline/utils.py,sha256=dtDX5Pyu95bGv7qmqgfUc1iYowWPY84dnGN0uyqyHyQ,13131
36
36
  autogluon/timeseries/models/ensemble/__init__.py,sha256=kFr11Gmt7lQJu9Rr8HuIPphQN5l1TsoorfbJm_O3a_s,128
37
37
  autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py,sha256=LzL64JASiwkLsuFxGToXJGRItcMxq5_Ig2QP5Zm7SHw,3537
38
- autogluon/timeseries/models/ensemble/greedy_ensemble.py,sha256=v5A2xv4d_QynA1GWD7iqmn-VVEFpD88Oiswyp72yBCc,7321
38
+ autogluon/timeseries/models/ensemble/greedy_ensemble.py,sha256=S9ghBwfSUGT0fvHZl5HHcOPmaBnQE2ZQR0lLrC5TXsE,7610
39
39
  autogluon/timeseries/models/gluonts/__init__.py,sha256=asC1PTj4j9xMbilvk1IT1julnpeoKbv5ZNuAR6-DFgA,361
40
40
  autogluon/timeseries/models/gluonts/abstract_gluonts.py,sha256=35T8rty6sPGiaSFNpiVNmeseo1_qpn664UcWo92W5eI,32906
41
41
  autogluon/timeseries/models/gluonts/torch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -59,11 +59,11 @@ autogluon/timeseries/utils/datetime/base.py,sha256=3NdsH3NDq4cVAOSoy3XpaNixyNlbj
59
59
  autogluon/timeseries/utils/datetime/lags.py,sha256=gQDk5_zmsY5DUWDUpSaCKYkQ9nHKKY-LsywJQRAoYSk,5988
60
60
  autogluon/timeseries/utils/datetime/seasonality.py,sha256=YK_2k8hvYIMW-sJPnjGWRtCnvIOthwA2hATB3nwVoD4,834
61
61
  autogluon/timeseries/utils/datetime/time_features.py,sha256=MjLi3zQ00uWWJtXH9oGX2GJkTbvjdSiuabSa4kcVuxE,2672
62
- autogluon.timeseries-1.2.1b20250420.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
63
- autogluon.timeseries-1.2.1b20250420.dist-info/METADATA,sha256=rU1Po4zkmiHUBtZTV9IXmOlGXXrtIftFPtaXqdGVrBU,12687
64
- autogluon.timeseries-1.2.1b20250420.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
65
- autogluon.timeseries-1.2.1b20250420.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
66
- autogluon.timeseries-1.2.1b20250420.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
67
- autogluon.timeseries-1.2.1b20250420.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
68
- autogluon.timeseries-1.2.1b20250420.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
69
- autogluon.timeseries-1.2.1b20250420.dist-info/RECORD,,
62
+ autogluon.timeseries-1.2.1b20250422.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
63
+ autogluon.timeseries-1.2.1b20250422.dist-info/METADATA,sha256=4UirSdHj-jRgjGXA8AAVHrCVhTECV4Za_K6-yidbEJc,12687
64
+ autogluon.timeseries-1.2.1b20250422.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
65
+ autogluon.timeseries-1.2.1b20250422.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
66
+ autogluon.timeseries-1.2.1b20250422.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
67
+ autogluon.timeseries-1.2.1b20250422.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
68
+ autogluon.timeseries-1.2.1b20250422.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
69
+ autogluon.timeseries-1.2.1b20250422.dist-info/RECORD,,