autogluon.timeseries 1.2.1b20250422__py3-none-any.whl → 1.2.1b20250423__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. autogluon/timeseries/dataset/ts_dataframe.py +1 -1
  2. autogluon/timeseries/learner.py +0 -4
  3. autogluon/timeseries/metrics/__init__.py +1 -30
  4. autogluon/timeseries/metrics/abstract.py +0 -10
  5. autogluon/timeseries/metrics/point.py +41 -131
  6. autogluon/timeseries/metrics/quantile.py +15 -36
  7. autogluon/timeseries/models/abstract/__init__.py +2 -2
  8. autogluon/timeseries/models/abstract/abstract_timeseries_model.py +178 -129
  9. autogluon/timeseries/models/chronos/model.py +3 -2
  10. autogluon/timeseries/models/ensemble/__init__.py +3 -2
  11. autogluon/timeseries/models/ensemble/abstract.py +139 -0
  12. autogluon/timeseries/models/ensemble/basic.py +88 -0
  13. autogluon/timeseries/models/ensemble/{greedy_ensemble.py → greedy.py} +67 -61
  14. autogluon/timeseries/models/presets.py +0 -4
  15. autogluon/timeseries/predictor.py +1 -12
  16. autogluon/timeseries/trainer.py +35 -27
  17. autogluon/timeseries/version.py +1 -1
  18. {autogluon.timeseries-1.2.1b20250422.dist-info → autogluon.timeseries-1.2.1b20250423.dist-info}/METADATA +4 -4
  19. {autogluon.timeseries-1.2.1b20250422.dist-info → autogluon.timeseries-1.2.1b20250423.dist-info}/RECORD +26 -25
  20. autogluon/timeseries/models/ensemble/abstract_timeseries_ensemble.py +0 -86
  21. /autogluon.timeseries-1.2.1b20250422-py3.9-nspkg.pth → /autogluon.timeseries-1.2.1b20250423-py3.9-nspkg.pth +0 -0
  22. {autogluon.timeseries-1.2.1b20250422.dist-info → autogluon.timeseries-1.2.1b20250423.dist-info}/LICENSE +0 -0
  23. {autogluon.timeseries-1.2.1b20250422.dist-info → autogluon.timeseries-1.2.1b20250423.dist-info}/NOTICE +0 -0
  24. {autogluon.timeseries-1.2.1b20250422.dist-info → autogluon.timeseries-1.2.1b20250423.dist-info}/WHEEL +0 -0
  25. {autogluon.timeseries-1.2.1b20250422.dist-info → autogluon.timeseries-1.2.1b20250423.dist-info}/namespace_packages.txt +0 -0
  26. {autogluon.timeseries-1.2.1b20250422.dist-info → autogluon.timeseries-1.2.1b20250423.dist-info}/top_level.txt +0 -0
  27. {autogluon.timeseries-1.2.1b20250422.dist-info → autogluon.timeseries-1.2.1b20250423.dist-info}/zip-safe +0 -0
@@ -490,7 +490,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
490
490
  except ValueError:
491
491
  inferred_freq = None
492
492
  else:
493
- inferred_freq = candidate_freq
493
+ inferred_freq = candidate_freq.freqstr
494
494
  return inferred_freq
495
495
 
496
496
  freq_for_each_item = index_df.groupby(ITEMID, sort=False).agg(get_freq)[TIMESTAMP]
@@ -3,7 +3,6 @@ import reprlib
3
3
  import time
4
4
  from typing import Any, Dict, List, Literal, Optional, Type, Union
5
5
 
6
- import numpy as np
7
6
  import pandas as pd
8
7
 
9
8
  from autogluon.core.learner import AbstractLearner
@@ -31,7 +30,6 @@ class TimeSeriesLearner(AbstractLearner):
31
30
  trainer_type: Type[TimeSeriesTrainer] = TimeSeriesTrainer,
32
31
  eval_metric: Union[str, TimeSeriesScorer, None] = None,
33
32
  eval_metric_seasonal_period: Optional[int] = None,
34
- horizon_weight: Optional[np.ndarray] = None,
35
33
  prediction_length: int = 1,
36
34
  cache_predictions: bool = True,
37
35
  ensemble_model_type: Optional[Type] = None,
@@ -40,7 +38,6 @@ class TimeSeriesLearner(AbstractLearner):
40
38
  super().__init__(path_context=path_context)
41
39
  self.eval_metric: TimeSeriesScorer = check_get_evaluation_metric(eval_metric)
42
40
  self.eval_metric_seasonal_period = eval_metric_seasonal_period
43
- self.horizon_weight = horizon_weight
44
41
  self.trainer_type = trainer_type
45
42
  self.target = target
46
43
  self.known_covariates_names = [] if known_covariates_names is None else known_covariates_names
@@ -86,7 +83,6 @@ class TimeSeriesLearner(AbstractLearner):
86
83
  prediction_length=self.prediction_length,
87
84
  eval_metric=self.eval_metric,
88
85
  eval_metric_seasonal_period=self.eval_metric_seasonal_period,
89
- horizon_weight=self.horizon_weight,
90
86
  target=self.target,
91
87
  quantile_levels=self.quantile_levels,
92
88
  verbosity=kwargs.get("verbosity", 2),
@@ -1,8 +1,5 @@
1
1
  from pprint import pformat
2
- from typing import Optional, Type, Union, overload
3
-
4
- import numpy as np
5
- import numpy.typing as npt
2
+ from typing import Type, Union
6
3
 
7
4
  from .abstract import TimeSeriesScorer
8
5
  from .point import MAE, MAPE, MASE, MSE, RMSE, RMSLE, RMSSE, SMAPE, WAPE, WCD
@@ -78,29 +75,3 @@ def check_get_evaluation_metric(
78
75
  f"(received eval_metric = {eval_metric} of type {type(eval_metric)})"
79
76
  )
80
77
  return scorer
81
-
82
- @overload
83
- def check_get_horizon_weight(horizon_weight: None, prediction_length: int) -> None: ...
84
- @overload
85
- def check_get_horizon_weight(horizon_weight: list[float], prediction_length: int) -> np.ndarray: ...
86
-
87
- def check_get_horizon_weight(horizon_weight: list[float] | None, prediction_length: int) -> Optional[np.ndarray]:
88
- """Convert horizon_weight to a non-negative numpy array that sums up to prediction_length.
89
-
90
- Raises an exception if horizon_weight has an invalid shape or contains invalid values.
91
- """
92
- if horizon_weight is None:
93
- return None
94
- horizon_weight_np = np.array(list(horizon_weight), dtype=np.float64)
95
- if horizon_weight_np.shape != (prediction_length,):
96
- raise ValueError(
97
- f"horizon_weight must have length equal to {prediction_length=} (got {len(horizon_weight)=})"
98
- )
99
- if not (horizon_weight_np >= 0).all():
100
- raise ValueError(f"All values in horizon_weight must be >= 0 (got {horizon_weight})")
101
- if not horizon_weight_np.sum() > 0:
102
- raise ValueError(f"At least some values in horizon_weight must be > 0 (got {horizon_weight})")
103
- if not np.isfinite(horizon_weight_np).all():
104
- raise ValueError(f"All horizon_weight values must be finite (got {horizon_weight})")
105
- horizon_weight_np = horizon_weight_np * prediction_length / horizon_weight_np.sum()
106
- return horizon_weight_np
@@ -69,7 +69,6 @@ class TimeSeriesScorer:
69
69
  prediction_length: int = 1,
70
70
  target: str = "target",
71
71
  seasonal_period: Optional[int] = None,
72
- horizon_weight: Optional[np.ndarray] = None,
73
72
  **kwargs,
74
73
  ) -> float:
75
74
  seasonal_period = get_seasonality(data.freq) if seasonal_period is None else seasonal_period
@@ -93,8 +92,6 @@ class TimeSeriesScorer:
93
92
  data_future=data_future,
94
93
  predictions=predictions,
95
94
  target=target,
96
- prediction_length=prediction_length,
97
- horizon_weight=horizon_weight,
98
95
  **kwargs,
99
96
  )
100
97
  finally:
@@ -108,8 +105,6 @@ class TimeSeriesScorer:
108
105
  data_future: TimeSeriesDataFrame,
109
106
  predictions: TimeSeriesDataFrame,
110
107
  target: str = "target",
111
- prediction_length: int = 1,
112
- horizon_weight: Optional[np.ndarray] = None,
113
108
  **kwargs,
114
109
  ) -> float:
115
110
  """Internal method that computes the metric for given forecast & actual data.
@@ -126,11 +121,6 @@ class TimeSeriesScorer:
126
121
  columns corresponding to each of the quantile levels. Must have the same index as ``data_future``.
127
122
  target : str, default = "target"
128
123
  Name of the column in ``data_future`` that contains the target time series.
129
- prediction_length : int, default = 1
130
- Length of the forecast horizon in time steps.
131
- horizon_weight : np.ndarray, optional
132
- Weight assigned to each time step in the forecast horizon when computing the metric. If provided, this list
133
- must contain `prediction_length` non-negative values, with `sum(horizon_weight) = prediction_length`.
134
124
 
135
125
  Returns
136
126
  -------
@@ -38,20 +38,10 @@ class RMSE(TimeSeriesScorer):
38
38
  equivalent_tabular_regression_metric = "root_mean_squared_error"
39
39
 
40
40
  def compute_metric(
41
- self,
42
- data_future: TimeSeriesDataFrame,
43
- predictions: TimeSeriesDataFrame,
44
- target: str = "target",
45
- prediction_length: int = 1,
46
- horizon_weight: Optional[np.ndarray] = None,
47
- **kwargs,
41
+ self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
48
42
  ) -> float:
49
43
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
50
- y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
51
- errors = ((y_true - y_pred) ** 2).reshape([-1, prediction_length])
52
- if horizon_weight is not None:
53
- errors *= horizon_weight.reshape([1, prediction_length])
54
- return np.sqrt(self._safemean(errors))
44
+ return np.sqrt(self._safemean((y_true - y_pred) ** 2))
55
45
 
56
46
 
57
47
  class MSE(TimeSeriesScorer):
@@ -79,20 +69,10 @@ class MSE(TimeSeriesScorer):
79
69
  equivalent_tabular_regression_metric = "mean_squared_error"
80
70
 
81
71
  def compute_metric(
82
- self,
83
- data_future: TimeSeriesDataFrame,
84
- predictions: TimeSeriesDataFrame,
85
- target: str = "target",
86
- prediction_length: int = 1,
87
- horizon_weight: Optional[np.ndarray] = None,
88
- **kwargs,
72
+ self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
89
73
  ) -> float:
90
74
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
91
- y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
92
- errors = ((y_true - y_pred) ** 2).reshape([-1, prediction_length])
93
- if horizon_weight is not None:
94
- errors *= horizon_weight.reshape([1, prediction_length])
95
- return self._safemean(errors)
75
+ return self._safemean((y_true - y_pred) ** 2)
96
76
 
97
77
 
98
78
  class MAE(TimeSeriesScorer):
@@ -118,20 +98,10 @@ class MAE(TimeSeriesScorer):
118
98
  equivalent_tabular_regression_metric = "mean_absolute_error"
119
99
 
120
100
  def compute_metric(
121
- self,
122
- data_future: TimeSeriesDataFrame,
123
- predictions: TimeSeriesDataFrame,
124
- target: str = "target",
125
- prediction_length: int = 1,
126
- horizon_weight: Optional[np.ndarray] = None,
127
- **kwargs,
101
+ self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
128
102
  ) -> float:
129
103
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
130
- y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
131
- errors = np.abs(y_true - y_pred).reshape([-1, prediction_length])
132
- if horizon_weight is not None:
133
- errors *= horizon_weight.reshape([1, prediction_length])
134
- return self._safemean(errors)
104
+ return self._safemean((y_true - y_pred).abs())
135
105
 
136
106
 
137
107
  class WAPE(TimeSeriesScorer):
@@ -149,7 +119,6 @@ class WAPE(TimeSeriesScorer):
149
119
  - not sensitive to outliers
150
120
  - prefers models that accurately estimate the median
151
121
 
152
- If `horizon_weight` is provided, both the errors and the target time series in the denominator will be re-weighted.
153
122
 
154
123
  References
155
124
  ----------
@@ -160,21 +129,10 @@ class WAPE(TimeSeriesScorer):
160
129
  equivalent_tabular_regression_metric = "mean_absolute_error"
161
130
 
162
131
  def compute_metric(
163
- self,
164
- data_future: TimeSeriesDataFrame,
165
- predictions: TimeSeriesDataFrame,
166
- target: str = "target",
167
- prediction_length: int = 1,
168
- horizon_weight: Optional[np.ndarray] = None,
169
- **kwargs,
132
+ self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
170
133
  ) -> float:
171
134
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
172
- y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
173
- errors = np.abs(y_true - y_pred).reshape([-1, prediction_length])
174
- if horizon_weight is not None:
175
- errors *= horizon_weight.reshape([1, prediction_length])
176
- y_true = y_true.reshape([-1, prediction_length]) * horizon_weight.reshape([1, prediction_length])
177
- return np.nansum(errors) / np.nansum(np.abs(y_true))
135
+ return (y_true - y_pred).abs().sum() / y_true.abs().sum()
178
136
 
179
137
 
180
138
  class SMAPE(TimeSeriesScorer):
@@ -200,20 +158,10 @@ class SMAPE(TimeSeriesScorer):
200
158
  equivalent_tabular_regression_metric = "symmetric_mean_absolute_percentage_error"
201
159
 
202
160
  def compute_metric(
203
- self,
204
- data_future: TimeSeriesDataFrame,
205
- predictions: TimeSeriesDataFrame,
206
- target: str = "target",
207
- prediction_length: int = 1,
208
- horizon_weight: Optional[np.ndarray] = None,
209
- **kwargs,
161
+ self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
210
162
  ) -> float:
211
163
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
212
- y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
213
- errors = (np.abs(y_true - y_pred) / (np.abs(y_true) + np.abs(y_pred))).reshape([-1, prediction_length])
214
- if horizon_weight is not None:
215
- errors *= horizon_weight.reshape([1, prediction_length])
216
- return 2 * self._safemean(errors)
164
+ return self._safemean(2 * ((y_true - y_pred).abs() / (y_true.abs() + y_pred.abs())))
217
165
 
218
166
 
219
167
  class MAPE(TimeSeriesScorer):
@@ -239,20 +187,10 @@ class MAPE(TimeSeriesScorer):
239
187
  equivalent_tabular_regression_metric = "mean_absolute_percentage_error"
240
188
 
241
189
  def compute_metric(
242
- self,
243
- data_future: TimeSeriesDataFrame,
244
- predictions: TimeSeriesDataFrame,
245
- target: str = "target",
246
- prediction_length: int = 1,
247
- horizon_weight: Optional[np.ndarray] = None,
248
- **kwargs,
190
+ self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
249
191
  ) -> float:
250
192
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
251
- y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
252
- errors = (np.abs(y_true - y_pred) / np.abs(y_true)).reshape([-1, prediction_length])
253
- if horizon_weight is not None:
254
- errors *= horizon_weight.reshape([1, prediction_length])
255
- return self._safemean(errors)
193
+ return self._safemean((y_true - y_pred).abs() / y_true.abs())
256
194
 
257
195
 
258
196
  class MASE(TimeSeriesScorer):
@@ -302,24 +240,16 @@ class MASE(TimeSeriesScorer):
302
240
  self._past_abs_seasonal_error = None
303
241
 
304
242
  def compute_metric(
305
- self,
306
- data_future: TimeSeriesDataFrame,
307
- predictions: TimeSeriesDataFrame,
308
- target: str = "target",
309
- prediction_length: int = 1,
310
- horizon_weight: Optional[np.ndarray] = None,
311
- **kwargs,
243
+ self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
312
244
  ) -> float:
245
+ y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
313
246
  if self._past_abs_seasonal_error is None:
314
247
  raise AssertionError("Call `save_past_metrics` before `compute_metric`")
315
248
 
316
- y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
317
- y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
318
-
319
- errors = np.abs(y_true - y_pred).reshape([-1, prediction_length])
320
- if horizon_weight is not None:
321
- errors *= horizon_weight.reshape([1, prediction_length])
322
- return self._safemean(errors / self._past_abs_seasonal_error.to_numpy()[:, None])
249
+ num_items = len(self._past_abs_seasonal_error)
250
+ # Reshape abs errors into [num_items, prediction_length] to normalize per item without groupby
251
+ abs_errors = np.abs(y_true.to_numpy() - y_pred.to_numpy()).reshape([num_items, -1])
252
+ return self._safemean(abs_errors / self._past_abs_seasonal_error.values[:, None])
323
253
 
324
254
 
325
255
  class RMSSE(TimeSeriesScorer):
@@ -370,23 +300,16 @@ class RMSSE(TimeSeriesScorer):
370
300
  self._past_squared_seasonal_error = None
371
301
 
372
302
  def compute_metric(
373
- self,
374
- data_future: TimeSeriesDataFrame,
375
- predictions: TimeSeriesDataFrame,
376
- target: str = "target",
377
- prediction_length: int = 1,
378
- horizon_weight: Optional[np.ndarray] = None,
379
- **kwargs,
303
+ self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
380
304
  ) -> float:
305
+ y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
381
306
  if self._past_squared_seasonal_error is None:
382
307
  raise AssertionError("Call `save_past_metrics` before `compute_metric`")
383
308
 
384
- y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
385
- y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
386
- errors = ((y_true - y_pred) ** 2).reshape([-1, prediction_length])
387
- if horizon_weight is not None:
388
- errors *= horizon_weight.reshape([1, prediction_length])
389
- return np.sqrt(self._safemean(errors / self._past_squared_seasonal_error.to_numpy()[:, None]))
309
+ num_items = len(self._past_squared_seasonal_error)
310
+ # Reshape squared errors into [num_items, prediction_length] to normalize per item without groupby
311
+ squared_errors = ((y_true.to_numpy() - y_pred.to_numpy()) ** 2.0).reshape([num_items, -1])
312
+ return np.sqrt(self._safemean(squared_errors / self._past_squared_seasonal_error.values[:, None]))
390
313
 
391
314
 
392
315
  class RMSLE(TimeSeriesScorer):
@@ -413,22 +336,12 @@ class RMSLE(TimeSeriesScorer):
413
336
  """
414
337
 
415
338
  def compute_metric(
416
- self,
417
- data_future: TimeSeriesDataFrame,
418
- predictions: TimeSeriesDataFrame,
419
- target: str = "target",
420
- prediction_length: int = 1,
421
- horizon_weight: Optional[np.ndarray] = None,
422
- **kwargs,
339
+ self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
423
340
  ) -> float:
424
341
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
425
- y_true, y_pred = y_true.to_numpy(), y_pred.to_numpy()
426
342
  y_pred = np.clip(y_pred, a_min=0.0, a_max=None)
427
343
 
428
- errors = np.power(np.log1p(y_pred) - np.log1p(y_true), 2).reshape([-1, prediction_length])
429
- if horizon_weight is not None:
430
- errors *= horizon_weight.reshape([1, prediction_length])
431
- return np.sqrt(self._safemean(errors))
344
+ return np.sqrt(np.power(np.log1p(y_pred) - np.log1p(y_true), 2).mean())
432
345
 
433
346
  def __call__(
434
347
  self,
@@ -437,7 +350,6 @@ class RMSLE(TimeSeriesScorer):
437
350
  prediction_length: int = 1,
438
351
  target: str = "target",
439
352
  seasonal_period: Optional[int] = None,
440
- horizon_weight: Optional[np.ndarray] = None,
441
353
  **kwargs,
442
354
  ) -> float:
443
355
  if (data[target] < 0).any():
@@ -448,7 +360,6 @@ class RMSLE(TimeSeriesScorer):
448
360
  prediction_length=prediction_length,
449
361
  target=target,
450
362
  seasonal_period=seasonal_period,
451
- horizon_weight=horizon_weight,
452
363
  **kwargs,
453
364
  )
454
365
 
@@ -471,36 +382,35 @@ class WCD(TimeSeriesScorer):
471
382
  Parameters
472
383
  ----------
473
384
  alpha : float, default = 0.5
474
- Values > 0.5 put a stronger penalty on underpredictions (when cumulative forecast is below the
385
+ Values > 0.5 correspond put a stronger penalty on underpredictions (when cumulative forecast is below the
475
386
  cumulative actual value). Values < 0.5 put a stronger penalty on overpredictions.
476
387
  """
477
388
 
478
389
  def __init__(self, alpha: float = 0.5):
479
390
  assert 0 < alpha < 1, "alpha must be in (0, 1)"
480
391
  self.alpha = alpha
392
+ self.num_items: Optional[int] = None
481
393
  warnings.warn(
482
394
  f"{self.name} is an experimental metric. Its behavior may change in the future version of AutoGluon."
483
395
  )
484
396
 
485
- def _fast_cumsum(self, y: np.ndarray, prediction_length: int) -> np.ndarray:
397
+ def save_past_metrics(
398
+ self, data_past: TimeSeriesDataFrame, target: str = "target", seasonal_period: int = 1, **kwargs
399
+ ) -> None:
400
+ self.num_items = data_past.num_items
401
+
402
+ def _fast_cumsum(self, y: np.ndarray) -> np.ndarray:
486
403
  """Compute the cumulative sum for each consecutive `prediction_length` items in the array."""
487
- y = y.reshape(-1, prediction_length)
404
+ assert self.num_items is not None, "Make sure to call `save_past_metrics` before `compute_metric`"
405
+ y = y.reshape(self.num_items, -1)
488
406
  return np.nancumsum(y, axis=1).ravel()
489
407
 
490
408
  def compute_metric(
491
- self,
492
- data_future: TimeSeriesDataFrame,
493
- predictions: TimeSeriesDataFrame,
494
- target: str = "target",
495
- prediction_length: int = 1,
496
- horizon_weight: Optional[np.ndarray] = None,
497
- **kwargs,
409
+ self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
498
410
  ) -> float:
499
411
  y_true, y_pred = self._get_point_forecast_score_inputs(data_future, predictions, target=target)
500
- cumsum_true = self._fast_cumsum(y_true.to_numpy(), prediction_length=prediction_length)
501
- cumsum_pred = self._fast_cumsum(y_pred.to_numpy(), prediction_length=prediction_length)
412
+ cumsum_true = self._fast_cumsum(y_true.to_numpy())
413
+ cumsum_pred = self._fast_cumsum(y_pred.to_numpy())
502
414
  diffs = cumsum_pred - cumsum_true
503
- errors = (diffs * np.where(diffs < 0, -self.alpha, (1 - self.alpha))).reshape([-1, prediction_length])
504
- if horizon_weight is not None:
505
- errors *= horizon_weight.reshape([1, prediction_length])
506
- return 2 * self._safemean(errors)
415
+ error = diffs * np.where(diffs < 0, -self.alpha, (1 - self.alpha))
416
+ return 2 * self._safemean(error)
@@ -25,7 +25,6 @@ class WQL(TimeSeriesScorer):
25
25
  - scale-dependent (time series with large absolute value contribute more to the loss)
26
26
  - equivalent to WAPE if ``quantile_levels = [0.5]``
27
27
 
28
- If `horizon_weight` is provided, both the errors and the target time series in the denominator will be re-weighted.
29
28
 
30
29
  References
31
30
  ----------
@@ -35,27 +34,16 @@ class WQL(TimeSeriesScorer):
35
34
  needs_quantile = True
36
35
 
37
36
  def compute_metric(
38
- self,
39
- data_future: TimeSeriesDataFrame,
40
- predictions: TimeSeriesDataFrame,
41
- target: str = "target",
42
- prediction_length: int = 1,
43
- horizon_weight: Optional[np.ndarray] = None,
44
- **kwargs,
37
+ self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
45
38
  ) -> float:
46
39
  y_true, q_pred, quantile_levels = self._get_quantile_forecast_score_inputs(data_future, predictions, target)
47
- y_true = y_true.to_numpy()[:, None] # shape [N, 1]
48
- q_pred = q_pred.to_numpy() # shape [N, len(quantile_levels)]
40
+ values_true = y_true.values[:, None] # shape [N, 1]
41
+ values_pred = q_pred.values # shape [N, len(quantile_levels)]
49
42
 
50
- errors = (
51
- np.abs((q_pred - y_true) * ((y_true <= q_pred) - quantile_levels))
52
- .mean(axis=1)
53
- .reshape([-1, prediction_length])
43
+ return 2 * np.mean(
44
+ np.nansum(np.abs((values_true - values_pred) * ((values_true <= values_pred) - quantile_levels)), axis=0)
45
+ / np.nansum(np.abs(values_true))
54
46
  )
55
- if horizon_weight is not None:
56
- errors *= horizon_weight.reshape([1, prediction_length])
57
- y_true = y_true.reshape([-1, prediction_length]) * horizon_weight.reshape([1, prediction_length])
58
- return 2 * np.nansum(errors) / np.nansum(np.abs(y_true))
59
47
 
60
48
 
61
49
  class SQL(TimeSeriesScorer):
@@ -105,26 +93,17 @@ class SQL(TimeSeriesScorer):
105
93
  self._past_abs_seasonal_error = None
106
94
 
107
95
  def compute_metric(
108
- self,
109
- data_future: TimeSeriesDataFrame,
110
- predictions: TimeSeriesDataFrame,
111
- target: str = "target",
112
- prediction_length: int = 1,
113
- horizon_weight: Optional[np.ndarray] = None,
114
- **kwargs,
96
+ self, data_future: TimeSeriesDataFrame, predictions: TimeSeriesDataFrame, target: str = "target", **kwargs
115
97
  ) -> float:
116
98
  if self._past_abs_seasonal_error is None:
117
99
  raise AssertionError("Call `save_past_metrics` before `compute_metric`")
118
100
 
119
101
  y_true, q_pred, quantile_levels = self._get_quantile_forecast_score_inputs(data_future, predictions, target)
120
- q_pred = q_pred.to_numpy()
121
- y_true = y_true.to_numpy()[:, None] # shape [N, 1]
122
-
123
- errors = (
124
- np.abs((q_pred - y_true) * ((y_true <= q_pred) - quantile_levels))
125
- .mean(axis=1)
126
- .reshape([-1, prediction_length])
127
- )
128
- if horizon_weight is not None:
129
- errors *= horizon_weight.reshape([1, prediction_length])
130
- return 2 * self._safemean(errors / self._past_abs_seasonal_error.to_numpy()[:, None])
102
+ q_pred = q_pred.values
103
+ values_true = y_true.values[:, None] # shape [N, 1]
104
+
105
+ ql = np.abs((q_pred - values_true) * ((values_true <= q_pred) - quantile_levels)).mean(axis=1)
106
+ num_items = len(self._past_abs_seasonal_error)
107
+ # Reshape quantile losses values into [num_items, prediction_length] to normalize per item without groupby
108
+ quantile_losses = ql.reshape([num_items, -1])
109
+ return 2 * self._safemean(quantile_losses / self._past_abs_seasonal_error.values[:, None])
@@ -1,3 +1,3 @@
1
- from .abstract_timeseries_model import AbstractTimeSeriesModel
1
+ from .abstract_timeseries_model import AbstractTimeSeriesModel, TimeSeriesModelBase
2
2
 
3
- __all__ = ["AbstractTimeSeriesModel"]
3
+ __all__ = ["AbstractTimeSeriesModel", "TimeSeriesModelBase"]