autogluon.timeseries 1.4.1b20251115__py3-none-any.whl → 1.5.0b20251221__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of autogluon.timeseries might be problematic. Click here for more details.
- autogluon/timeseries/configs/hyperparameter_presets.py +13 -28
- autogluon/timeseries/configs/predictor_presets.py +23 -39
- autogluon/timeseries/dataset/ts_dataframe.py +32 -34
- autogluon/timeseries/learner.py +67 -33
- autogluon/timeseries/metrics/__init__.py +4 -4
- autogluon/timeseries/metrics/abstract.py +8 -8
- autogluon/timeseries/metrics/point.py +9 -9
- autogluon/timeseries/metrics/quantile.py +4 -4
- autogluon/timeseries/models/__init__.py +2 -1
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py +52 -50
- autogluon/timeseries/models/abstract/model_trial.py +2 -1
- autogluon/timeseries/models/abstract/tunable.py +8 -8
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py +30 -26
- autogluon/timeseries/models/autogluon_tabular/per_step.py +13 -11
- autogluon/timeseries/models/autogluon_tabular/transforms.py +2 -2
- autogluon/timeseries/models/chronos/__init__.py +2 -1
- autogluon/timeseries/models/chronos/chronos2.py +395 -0
- autogluon/timeseries/models/chronos/model.py +30 -25
- autogluon/timeseries/models/chronos/utils.py +5 -5
- autogluon/timeseries/models/ensemble/__init__.py +17 -10
- autogluon/timeseries/models/ensemble/abstract.py +13 -9
- autogluon/timeseries/models/ensemble/array_based/__init__.py +2 -2
- autogluon/timeseries/models/ensemble/array_based/abstract.py +24 -31
- autogluon/timeseries/models/ensemble/array_based/models.py +146 -11
- autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +2 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +6 -5
- autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py +186 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +44 -83
- autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +21 -55
- autogluon/timeseries/models/ensemble/ensemble_selection.py +167 -0
- autogluon/timeseries/models/ensemble/per_item_greedy.py +172 -0
- autogluon/timeseries/models/ensemble/weighted/abstract.py +7 -3
- autogluon/timeseries/models/ensemble/weighted/basic.py +26 -13
- autogluon/timeseries/models/ensemble/weighted/greedy.py +21 -144
- autogluon/timeseries/models/gluonts/abstract.py +30 -29
- autogluon/timeseries/models/gluonts/dataset.py +9 -9
- autogluon/timeseries/models/gluonts/models.py +0 -7
- autogluon/timeseries/models/local/__init__.py +0 -7
- autogluon/timeseries/models/local/abstract_local_model.py +13 -16
- autogluon/timeseries/models/local/naive.py +2 -2
- autogluon/timeseries/models/local/npts.py +7 -1
- autogluon/timeseries/models/local/statsforecast.py +13 -13
- autogluon/timeseries/models/multi_window/multi_window_model.py +38 -23
- autogluon/timeseries/models/registry.py +3 -4
- autogluon/timeseries/models/toto/_internal/backbone/attention.py +3 -4
- autogluon/timeseries/models/toto/_internal/backbone/backbone.py +6 -6
- autogluon/timeseries/models/toto/_internal/backbone/rope.py +4 -9
- autogluon/timeseries/models/toto/_internal/backbone/rotary_embedding_torch.py +342 -0
- autogluon/timeseries/models/toto/_internal/backbone/scaler.py +2 -3
- autogluon/timeseries/models/toto/_internal/backbone/transformer.py +10 -10
- autogluon/timeseries/models/toto/_internal/dataset.py +2 -2
- autogluon/timeseries/models/toto/_internal/forecaster.py +8 -8
- autogluon/timeseries/models/toto/dataloader.py +4 -4
- autogluon/timeseries/models/toto/hf_pretrained_model.py +97 -16
- autogluon/timeseries/models/toto/model.py +30 -17
- autogluon/timeseries/predictor.py +531 -136
- autogluon/timeseries/regressor.py +18 -23
- autogluon/timeseries/splitter.py +2 -2
- autogluon/timeseries/trainer/ensemble_composer.py +323 -129
- autogluon/timeseries/trainer/model_set_builder.py +9 -9
- autogluon/timeseries/trainer/prediction_cache.py +16 -16
- autogluon/timeseries/trainer/trainer.py +235 -145
- autogluon/timeseries/trainer/utils.py +3 -4
- autogluon/timeseries/transforms/covariate_scaler.py +7 -7
- autogluon/timeseries/transforms/target_scaler.py +8 -8
- autogluon/timeseries/utils/constants.py +10 -0
- autogluon/timeseries/utils/datetime/lags.py +1 -3
- autogluon/timeseries/utils/datetime/seasonality.py +1 -3
- autogluon/timeseries/utils/features.py +22 -9
- autogluon/timeseries/utils/forecast.py +1 -2
- autogluon/timeseries/utils/timer.py +173 -0
- autogluon/timeseries/version.py +1 -1
- {autogluon_timeseries-1.4.1b20251115.dist-info → autogluon_timeseries-1.5.0b20251221.dist-info}/METADATA +23 -21
- autogluon_timeseries-1.5.0b20251221.dist-info/RECORD +103 -0
- autogluon_timeseries-1.4.1b20251115.dist-info/RECORD +0 -96
- /autogluon.timeseries-1.4.1b20251115-py3.9-nspkg.pth → /autogluon.timeseries-1.5.0b20251221-py3.11-nspkg.pth +0 -0
- {autogluon_timeseries-1.4.1b20251115.dist-info → autogluon_timeseries-1.5.0b20251221.dist-info}/WHEEL +0 -0
- {autogluon_timeseries-1.4.1b20251115.dist-info → autogluon_timeseries-1.5.0b20251221.dist-info}/licenses/LICENSE +0 -0
- {autogluon_timeseries-1.4.1b20251115.dist-info → autogluon_timeseries-1.5.0b20251221.dist-info}/licenses/NOTICE +0 -0
- {autogluon_timeseries-1.4.1b20251115.dist-info → autogluon_timeseries-1.5.0b20251221.dist-info}/namespace_packages.txt +0 -0
- {autogluon_timeseries-1.4.1b20251115.dist-info → autogluon_timeseries-1.5.0b20251221.dist-info}/top_level.txt +0 -0
- {autogluon_timeseries-1.4.1b20251115.dist-info → autogluon_timeseries-1.5.0b20251221.dist-info}/zip-safe +0 -0
|
@@ -5,7 +5,7 @@ import time
|
|
|
5
5
|
import traceback
|
|
6
6
|
from collections import defaultdict
|
|
7
7
|
from pathlib import Path
|
|
8
|
-
from typing import Any, Literal
|
|
8
|
+
from typing import Any, Literal
|
|
9
9
|
|
|
10
10
|
import networkx as nx
|
|
11
11
|
import numpy as np
|
|
@@ -47,14 +47,14 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
47
47
|
self,
|
|
48
48
|
path: str,
|
|
49
49
|
prediction_length: int = 1,
|
|
50
|
-
eval_metric:
|
|
50
|
+
eval_metric: str | TimeSeriesScorer | None = None,
|
|
51
51
|
save_data: bool = True,
|
|
52
52
|
skip_model_selection: bool = False,
|
|
53
53
|
enable_ensemble: bool = True,
|
|
54
54
|
verbosity: int = 2,
|
|
55
|
-
num_val_windows:
|
|
56
|
-
val_step_size:
|
|
57
|
-
refit_every_n_windows:
|
|
55
|
+
num_val_windows: tuple[int, ...] = (1,),
|
|
56
|
+
val_step_size: int | None = None,
|
|
57
|
+
refit_every_n_windows: int | None = 1,
|
|
58
58
|
# TODO: Set cache_predictions=False by default once all models in default presets have a reasonable inference speed
|
|
59
59
|
cache_predictions: bool = True,
|
|
60
60
|
**kwargs,
|
|
@@ -88,6 +88,13 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
88
88
|
self.eval_metric = check_get_evaluation_metric(eval_metric, prediction_length=prediction_length)
|
|
89
89
|
|
|
90
90
|
self.num_val_windows = num_val_windows
|
|
91
|
+
|
|
92
|
+
# Validate num_val_windows
|
|
93
|
+
if len(self.num_val_windows) == 0:
|
|
94
|
+
raise ValueError("num_val_windows cannot be empty")
|
|
95
|
+
if not all(isinstance(w, int) and w > 0 for w in self.num_val_windows):
|
|
96
|
+
raise ValueError(f"num_val_windows must contain only positive integers, got {self.num_val_windows}")
|
|
97
|
+
|
|
91
98
|
self.val_step_size = val_step_size
|
|
92
99
|
self.refit_every_n_windows = refit_every_n_windows
|
|
93
100
|
self.hpo_results = {}
|
|
@@ -111,14 +118,14 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
111
118
|
path = os.path.join(self.path_data, "train.pkl")
|
|
112
119
|
return load_pkl.load(path=path)
|
|
113
120
|
|
|
114
|
-
def load_val_data(self) ->
|
|
121
|
+
def load_val_data(self) -> TimeSeriesDataFrame | None:
|
|
115
122
|
path = os.path.join(self.path_data, "val.pkl")
|
|
116
123
|
if os.path.exists(path):
|
|
117
124
|
return load_pkl.load(path=path)
|
|
118
125
|
else:
|
|
119
126
|
return None
|
|
120
127
|
|
|
121
|
-
def load_data(self) -> tuple[TimeSeriesDataFrame,
|
|
128
|
+
def load_data(self) -> tuple[TimeSeriesDataFrame, TimeSeriesDataFrame | None]:
|
|
122
129
|
train_data = self.load_train_data()
|
|
123
130
|
val_data = self.load_val_data()
|
|
124
131
|
return train_data, val_data
|
|
@@ -141,7 +148,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
141
148
|
def _add_model(
|
|
142
149
|
self,
|
|
143
150
|
model: TimeSeriesModelBase,
|
|
144
|
-
base_models:
|
|
151
|
+
base_models: list[str] | None = None,
|
|
145
152
|
):
|
|
146
153
|
"""Add a model to the model graph of the trainer. If the model is an ensemble, also add
|
|
147
154
|
information about dependencies to the model graph (list of models specified via ``base_models``).
|
|
@@ -173,8 +180,8 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
173
180
|
for base_model in base_models:
|
|
174
181
|
self.model_graph.add_edge(base_model, model.name)
|
|
175
182
|
|
|
176
|
-
def
|
|
177
|
-
"""Get a dictionary mapping each model to their
|
|
183
|
+
def _get_model_layers(self) -> dict[str, int]:
|
|
184
|
+
"""Get a dictionary mapping each model to their layer in the model graph"""
|
|
178
185
|
|
|
179
186
|
# get nodes without a parent
|
|
180
187
|
rootset = set(self.model_graph.nodes)
|
|
@@ -187,14 +194,14 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
187
194
|
for dest_node in paths_to:
|
|
188
195
|
paths_from[dest_node][source_node] = paths_to[dest_node]
|
|
189
196
|
|
|
190
|
-
# determine
|
|
191
|
-
|
|
197
|
+
# determine layers
|
|
198
|
+
layers = {}
|
|
192
199
|
for n in paths_from:
|
|
193
|
-
|
|
200
|
+
layers[n] = max(paths_from[n].get(src, 0) for src in rootset)
|
|
194
201
|
|
|
195
|
-
return
|
|
202
|
+
return layers
|
|
196
203
|
|
|
197
|
-
def get_models_attribute_dict(self, attribute: str, models:
|
|
204
|
+
def get_models_attribute_dict(self, attribute: str, models: list[str] | None = None) -> dict[str, Any]:
|
|
198
205
|
"""Get an attribute from the `model_graph` for each of the model names
|
|
199
206
|
specified. If `models` is none, the attribute will be returned for all models"""
|
|
200
207
|
results = {}
|
|
@@ -212,25 +219,25 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
212
219
|
if len(models) == 1:
|
|
213
220
|
return models[0]
|
|
214
221
|
model_performances = self.get_models_attribute_dict(attribute="val_score")
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
(m, model_performances[m],
|
|
222
|
+
model_layers = self._get_model_layers()
|
|
223
|
+
model_name_score_layer_list = [
|
|
224
|
+
(m, model_performances[m], model_layers.get(m, 0)) for m in models if model_performances[m] is not None
|
|
218
225
|
]
|
|
219
226
|
|
|
220
|
-
if not
|
|
227
|
+
if not model_name_score_layer_list:
|
|
221
228
|
raise ValueError("No fitted models have validation scores computed.")
|
|
222
229
|
|
|
223
230
|
# rank models in terms of validation score. if two models have the same validation score,
|
|
224
|
-
# rank them by their
|
|
231
|
+
# rank them by their layer in the model graph (lower layer models are preferred).
|
|
225
232
|
return max(
|
|
226
|
-
|
|
227
|
-
key=lambda mns: (mns[1], -mns[2]), # (score, -
|
|
233
|
+
model_name_score_layer_list,
|
|
234
|
+
key=lambda mns: (mns[1], -mns[2]), # (score, -layer)
|
|
228
235
|
)[0]
|
|
229
236
|
|
|
230
|
-
def get_model_names(self,
|
|
237
|
+
def get_model_names(self, layer: int | None = None) -> list[str]:
|
|
231
238
|
"""Get model names that are registered in the model graph"""
|
|
232
|
-
if
|
|
233
|
-
return list(node for node, l in self.
|
|
239
|
+
if layer is not None:
|
|
240
|
+
return list(node for node, l in self._get_model_layers().items() if l == layer) # noqa: E741
|
|
234
241
|
return list(self.model_graph.nodes)
|
|
235
242
|
|
|
236
243
|
def get_info(self, include_model_info: bool = False) -> dict[str, Any]:
|
|
@@ -262,9 +269,9 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
262
269
|
self,
|
|
263
270
|
model: AbstractTimeSeriesModel,
|
|
264
271
|
train_data: TimeSeriesDataFrame,
|
|
265
|
-
time_limit:
|
|
266
|
-
val_data:
|
|
267
|
-
hyperparameter_tune_kwargs:
|
|
272
|
+
time_limit: float | None = None,
|
|
273
|
+
val_data: TimeSeriesDataFrame | None = None,
|
|
274
|
+
hyperparameter_tune_kwargs: str | dict = "auto",
|
|
268
275
|
):
|
|
269
276
|
default_num_trials = None
|
|
270
277
|
if time_limit is None and (
|
|
@@ -280,7 +287,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
280
287
|
hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
|
|
281
288
|
time_limit=time_limit,
|
|
282
289
|
default_num_trials=default_num_trials,
|
|
283
|
-
val_splitter=self._get_val_splitter(),
|
|
290
|
+
val_splitter=self._get_val_splitter(use_val_data=val_data is not None),
|
|
284
291
|
refit_every_n_windows=self.refit_every_n_windows,
|
|
285
292
|
)
|
|
286
293
|
total_tuning_time = time.time() - tuning_start_time
|
|
@@ -290,11 +297,21 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
290
297
|
# add each of the trained HPO configurations to the trained models
|
|
291
298
|
for model_hpo_name, model_info in hpo_models.items():
|
|
292
299
|
model_path = os.path.join(self.path, model_info["path"])
|
|
300
|
+
|
|
293
301
|
# Only load model configurations that didn't fail
|
|
294
|
-
if Path(model_path).exists():
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
302
|
+
if not Path(model_path).exists():
|
|
303
|
+
continue
|
|
304
|
+
|
|
305
|
+
model_hpo = self.load_model(model_hpo_name, path=model_path, model_type=type(model))
|
|
306
|
+
|
|
307
|
+
# override validation score to align evaluations on the final ensemble layer's window
|
|
308
|
+
if isinstance(model_hpo, MultiWindowBacktestingModel):
|
|
309
|
+
model_hpo.val_score = float(
|
|
310
|
+
np.mean([info["val_score"] for info in model_hpo.info_per_val_window[-self.num_val_windows[-1] :]])
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
self._add_model(model_hpo)
|
|
314
|
+
model_names_trained.append(model_hpo.name)
|
|
298
315
|
|
|
299
316
|
logger.info(f"\tTrained {len(model_names_trained)} models while tuning {model.name}.")
|
|
300
317
|
|
|
@@ -315,8 +332,8 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
315
332
|
self,
|
|
316
333
|
train_data: TimeSeriesDataFrame,
|
|
317
334
|
model: AbstractTimeSeriesModel,
|
|
318
|
-
val_data:
|
|
319
|
-
time_limit:
|
|
335
|
+
val_data: TimeSeriesDataFrame | None = None,
|
|
336
|
+
time_limit: float | None = None,
|
|
320
337
|
) -> list[str]:
|
|
321
338
|
"""Fit and save the given model on given training and validation data and save the trained model.
|
|
322
339
|
|
|
@@ -335,10 +352,10 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
335
352
|
|
|
336
353
|
model.fit(
|
|
337
354
|
train_data=train_data,
|
|
338
|
-
val_data=val_data,
|
|
355
|
+
val_data=None if isinstance(model, MultiWindowBacktestingModel) else val_data,
|
|
339
356
|
time_limit=time_limit,
|
|
340
357
|
verbosity=self.verbosity,
|
|
341
|
-
val_splitter=self._get_val_splitter(),
|
|
358
|
+
val_splitter=self._get_val_splitter(use_val_data=val_data is not None),
|
|
342
359
|
refit_every_n_windows=self.refit_every_n_windows,
|
|
343
360
|
)
|
|
344
361
|
|
|
@@ -347,11 +364,19 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
347
364
|
|
|
348
365
|
if time_limit is not None:
|
|
349
366
|
time_limit = time_limit - (fit_end_time - fit_start_time)
|
|
350
|
-
if val_data is not None
|
|
367
|
+
if val_data is not None:
|
|
351
368
|
model.score_and_cache_oof(
|
|
352
369
|
val_data, store_val_score=True, store_predict_time=True, time_limit=time_limit
|
|
353
370
|
)
|
|
354
371
|
|
|
372
|
+
# by default, MultiWindowBacktestingModel computes validation score on all windows. However,
|
|
373
|
+
# when doing multi-layer stacking, the trainer only scores on the windows of the last layer.
|
|
374
|
+
# we override the val_score to align scores.
|
|
375
|
+
if isinstance(model, MultiWindowBacktestingModel):
|
|
376
|
+
model.val_score = float(
|
|
377
|
+
np.mean([info["val_score"] for info in model.info_per_val_window[-self.num_val_windows[-1] :]])
|
|
378
|
+
)
|
|
379
|
+
|
|
355
380
|
log_scores_and_times(
|
|
356
381
|
val_score=model.val_score,
|
|
357
382
|
fit_time=model.fit_time,
|
|
@@ -376,13 +401,13 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
376
401
|
def fit(
|
|
377
402
|
self,
|
|
378
403
|
train_data: TimeSeriesDataFrame,
|
|
379
|
-
hyperparameters:
|
|
380
|
-
val_data:
|
|
381
|
-
ensemble_hyperparameters:
|
|
382
|
-
hyperparameter_tune_kwargs:
|
|
383
|
-
excluded_model_types:
|
|
384
|
-
time_limit:
|
|
385
|
-
random_seed:
|
|
404
|
+
hyperparameters: str | dict[Any, dict],
|
|
405
|
+
val_data: TimeSeriesDataFrame | None = None,
|
|
406
|
+
ensemble_hyperparameters: dict | list[dict] | None = None,
|
|
407
|
+
hyperparameter_tune_kwargs: str | dict | None = None,
|
|
408
|
+
excluded_model_types: list[str] | None = None,
|
|
409
|
+
time_limit: float | None = None,
|
|
410
|
+
random_seed: int | None = None,
|
|
386
411
|
):
|
|
387
412
|
"""Fit a set of timeseries models specified by the `hyperparameters`
|
|
388
413
|
dictionary that maps model names to their specified hyperparameters.
|
|
@@ -415,12 +440,22 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
415
440
|
|
|
416
441
|
# Handle ensemble hyperparameters
|
|
417
442
|
if ensemble_hyperparameters is None:
|
|
418
|
-
ensemble_hyperparameters = {"GreedyEnsemble": {}}
|
|
419
|
-
|
|
443
|
+
ensemble_hyperparameters = [{"GreedyEnsemble": {}}]
|
|
444
|
+
if isinstance(ensemble_hyperparameters, dict):
|
|
445
|
+
ensemble_hyperparameters = [ensemble_hyperparameters]
|
|
446
|
+
validate_ensemble_hyperparameters(ensemble_hyperparameters)
|
|
420
447
|
|
|
421
448
|
time_start = time.time()
|
|
422
449
|
hyperparameters = copy.deepcopy(hyperparameters)
|
|
423
450
|
|
|
451
|
+
if val_data is not None:
|
|
452
|
+
if self.num_val_windows[-1] != 1:
|
|
453
|
+
raise ValueError(
|
|
454
|
+
f"When val_data is provided, the last element of num_val_windows must be 1, "
|
|
455
|
+
f"got {self.num_val_windows[-1]}"
|
|
456
|
+
)
|
|
457
|
+
multi_window = self._get_val_splitter(use_val_data=val_data is not None).num_val_windows > 0
|
|
458
|
+
|
|
424
459
|
if self.save_data and not self.is_data_saved:
|
|
425
460
|
self.save_train_data(train_data)
|
|
426
461
|
if val_data is not None:
|
|
@@ -431,7 +466,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
431
466
|
hyperparameters=hyperparameters,
|
|
432
467
|
hyperparameter_tune=hyperparameter_tune_kwargs is not None, # TODO: remove hyperparameter_tune
|
|
433
468
|
freq=train_data.freq,
|
|
434
|
-
multi_window=
|
|
469
|
+
multi_window=multi_window,
|
|
435
470
|
excluded_model_types=excluded_model_types,
|
|
436
471
|
)
|
|
437
472
|
|
|
@@ -460,7 +495,6 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
460
495
|
time_reserved_for_ensemble = min(
|
|
461
496
|
self.max_ensemble_time_limit, time_left / (num_base_models - i + 1)
|
|
462
497
|
)
|
|
463
|
-
logger.debug(f"Reserving {time_reserved_for_ensemble:.1f}s for ensemble")
|
|
464
498
|
else:
|
|
465
499
|
time_reserved_for_ensemble = 0.0
|
|
466
500
|
time_left_for_model = (time_left - time_reserved_for_ensemble) / (num_base_models - i)
|
|
@@ -500,13 +534,16 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
500
534
|
train_data, model=model, val_data=val_data, time_limit=time_left_for_model
|
|
501
535
|
)
|
|
502
536
|
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
537
|
+
if self.enable_ensemble and ensemble_hyperparameters:
|
|
538
|
+
model_names = self.get_model_names(layer=0)
|
|
539
|
+
ensemble_names = self._fit_ensembles(
|
|
540
|
+
data_per_window=self._get_validation_windows(train_data, val_data),
|
|
541
|
+
predictions_per_window=self._get_base_model_predictions(model_names),
|
|
542
|
+
time_limit=None if time_limit is None else time_limit - (time.time() - time_start),
|
|
543
|
+
ensemble_hyperparameters=ensemble_hyperparameters,
|
|
544
|
+
num_windows_per_layer=self.num_val_windows,
|
|
545
|
+
)
|
|
546
|
+
model_names_trained.extend(ensemble_names)
|
|
510
547
|
|
|
511
548
|
logger.info(f"Training complete. Models trained: {model_names_trained}")
|
|
512
549
|
logger.info(f"Total runtime: {time.time() - time_start:.2f} s")
|
|
@@ -523,19 +560,25 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
523
560
|
def _fit_ensembles(
|
|
524
561
|
self,
|
|
525
562
|
*,
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
time_limit:
|
|
529
|
-
ensemble_hyperparameters: dict,
|
|
563
|
+
data_per_window: list[TimeSeriesDataFrame],
|
|
564
|
+
predictions_per_window: dict[str, list[TimeSeriesDataFrame]],
|
|
565
|
+
time_limit: float | None,
|
|
566
|
+
ensemble_hyperparameters: list[dict],
|
|
567
|
+
num_windows_per_layer: tuple[int, ...],
|
|
530
568
|
) -> list[str]:
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
569
|
+
ensemble_composer = EnsembleComposer(
|
|
570
|
+
path=self.path,
|
|
571
|
+
prediction_length=self.prediction_length,
|
|
572
|
+
eval_metric=self.eval_metric,
|
|
573
|
+
target=self.target,
|
|
574
|
+
ensemble_hyperparameters=ensemble_hyperparameters,
|
|
575
|
+
num_windows_per_layer=num_windows_per_layer,
|
|
576
|
+
quantile_levels=self.quantile_levels,
|
|
577
|
+
model_graph=self.model_graph,
|
|
578
|
+
).fit(
|
|
579
|
+
data_per_window=data_per_window,
|
|
580
|
+
predictions_per_window=predictions_per_window,
|
|
581
|
+
time_limit=time_limit,
|
|
539
582
|
)
|
|
540
583
|
|
|
541
584
|
ensembles_trained = []
|
|
@@ -544,48 +587,34 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
544
587
|
self.save_model(model=model)
|
|
545
588
|
ensembles_trained.append(model.name)
|
|
546
589
|
|
|
547
|
-
return ensembles_trained
|
|
590
|
+
return ensembles_trained
|
|
548
591
|
|
|
549
|
-
def
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
prediction_length=self.prediction_length,
|
|
555
|
-
num_val_windows=self.num_val_windows,
|
|
556
|
-
val_step_size=self.val_step_size,
|
|
557
|
-
)
|
|
558
|
-
return val_splitter
|
|
592
|
+
def _get_validation_windows(self, train_data: TimeSeriesDataFrame, val_data: TimeSeriesDataFrame | None):
|
|
593
|
+
train_splitter = self._get_val_splitter(use_val_data=val_data is not None)
|
|
594
|
+
return [val_fold for _, val_fold in train_splitter.split(train_data)] + (
|
|
595
|
+
[] if val_data is None else [val_data]
|
|
596
|
+
)
|
|
559
597
|
|
|
560
|
-
def
|
|
561
|
-
|
|
562
|
-
return
|
|
563
|
-
path=self.path,
|
|
598
|
+
def _get_val_splitter(self, use_val_data: bool = False) -> AbstractWindowSplitter:
|
|
599
|
+
num_windows_from_train = sum(self.num_val_windows[:-1]) if use_val_data else sum(self.num_val_windows)
|
|
600
|
+
return ExpandingWindowSplitter(
|
|
564
601
|
prediction_length=self.prediction_length,
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
quantile_levels=self.quantile_levels,
|
|
568
|
-
model_graph=self.model_graph,
|
|
569
|
-
ensemble_hyperparameters=ensemble_hyperparameters,
|
|
570
|
-
window_splitter=self._get_val_splitter(),
|
|
602
|
+
num_val_windows=num_windows_from_train,
|
|
603
|
+
val_step_size=self.val_step_size,
|
|
571
604
|
)
|
|
572
605
|
|
|
573
|
-
def
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
if val_data is None:
|
|
580
|
-
return [val_fold for _, val_fold in self._get_val_splitter().split(train_data)]
|
|
581
|
-
else:
|
|
582
|
-
return [val_data]
|
|
606
|
+
def _get_base_model_predictions(self, model_names: list[str]) -> dict[str, list[TimeSeriesDataFrame]]:
|
|
607
|
+
"""Get base model predictions for ensemble training / inference."""
|
|
608
|
+
predictions_per_window = {}
|
|
609
|
+
for model_name in model_names:
|
|
610
|
+
predictions_per_window[model_name] = self._get_model_oof_predictions(model_name)
|
|
611
|
+
return predictions_per_window
|
|
583
612
|
|
|
584
613
|
def leaderboard(
|
|
585
614
|
self,
|
|
586
|
-
data:
|
|
615
|
+
data: TimeSeriesDataFrame | None = None,
|
|
587
616
|
extra_info: bool = False,
|
|
588
|
-
extra_metrics:
|
|
617
|
+
extra_metrics: list[str | TimeSeriesScorer] | None = None,
|
|
589
618
|
use_cache: bool = True,
|
|
590
619
|
) -> pd.DataFrame:
|
|
591
620
|
logger.debug("Generating leaderboard for all models trained")
|
|
@@ -675,7 +704,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
675
704
|
return df[explicit_column_order]
|
|
676
705
|
|
|
677
706
|
def persist(
|
|
678
|
-
self, model_names:
|
|
707
|
+
self, model_names: Literal["all", "best"] | list[str] = "all", with_ancestors: bool = False
|
|
679
708
|
) -> list[str]:
|
|
680
709
|
if model_names == "all":
|
|
681
710
|
model_names = self.get_model_names()
|
|
@@ -700,7 +729,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
700
729
|
|
|
701
730
|
return model_names
|
|
702
731
|
|
|
703
|
-
def unpersist(self, model_names:
|
|
732
|
+
def unpersist(self, model_names: Literal["all"] | list[str] = "all") -> list[str]:
|
|
704
733
|
if model_names == "all":
|
|
705
734
|
model_names = list(self.models.keys())
|
|
706
735
|
if not isinstance(model_names, list):
|
|
@@ -712,9 +741,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
712
741
|
unpersisted_models.append(model)
|
|
713
742
|
return unpersisted_models
|
|
714
743
|
|
|
715
|
-
def _get_model_for_prediction(
|
|
716
|
-
self, model: Optional[Union[str, TimeSeriesModelBase]] = None, verbose: bool = True
|
|
717
|
-
) -> str:
|
|
744
|
+
def _get_model_for_prediction(self, model: str | TimeSeriesModelBase | None = None, verbose: bool = True) -> str:
|
|
718
745
|
"""Given an optional identifier or model object, return the name of the model with which to predict.
|
|
719
746
|
|
|
720
747
|
If the model is not provided, this method will default to the best model according to the validation score.
|
|
@@ -740,10 +767,10 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
740
767
|
def predict(
|
|
741
768
|
self,
|
|
742
769
|
data: TimeSeriesDataFrame,
|
|
743
|
-
known_covariates:
|
|
744
|
-
model:
|
|
770
|
+
known_covariates: TimeSeriesDataFrame | None = None,
|
|
771
|
+
model: str | TimeSeriesModelBase | None = None,
|
|
745
772
|
use_cache: bool = True,
|
|
746
|
-
random_seed:
|
|
773
|
+
random_seed: int | None = None,
|
|
747
774
|
) -> TimeSeriesDataFrame:
|
|
748
775
|
model_name = self._get_model_for_prediction(model)
|
|
749
776
|
model_pred_dict, _ = self.get_model_pred_dict(
|
|
@@ -758,7 +785,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
758
785
|
raise ValueError(f"Model {model_name} failed to predict. Please check the model's logs.")
|
|
759
786
|
return predictions
|
|
760
787
|
|
|
761
|
-
def _get_eval_metric(self, metric:
|
|
788
|
+
def _get_eval_metric(self, metric: str | TimeSeriesScorer | None) -> TimeSeriesScorer:
|
|
762
789
|
if metric is None:
|
|
763
790
|
return self.eval_metric
|
|
764
791
|
else:
|
|
@@ -773,7 +800,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
773
800
|
self,
|
|
774
801
|
data: TimeSeriesDataFrame,
|
|
775
802
|
predictions: TimeSeriesDataFrame,
|
|
776
|
-
metric:
|
|
803
|
+
metric: str | TimeSeriesScorer | None = None,
|
|
777
804
|
) -> float:
|
|
778
805
|
"""Compute the score measuring how well the predictions align with the data."""
|
|
779
806
|
return self._get_eval_metric(metric).score(
|
|
@@ -785,8 +812,8 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
785
812
|
def score(
|
|
786
813
|
self,
|
|
787
814
|
data: TimeSeriesDataFrame,
|
|
788
|
-
model:
|
|
789
|
-
metric:
|
|
815
|
+
model: str | TimeSeriesModelBase | None = None,
|
|
816
|
+
metric: str | TimeSeriesScorer | None = None,
|
|
790
817
|
use_cache: bool = True,
|
|
791
818
|
) -> float:
|
|
792
819
|
eval_metric = self._get_eval_metric(metric)
|
|
@@ -796,8 +823,8 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
796
823
|
def evaluate(
|
|
797
824
|
self,
|
|
798
825
|
data: TimeSeriesDataFrame,
|
|
799
|
-
model:
|
|
800
|
-
metrics:
|
|
826
|
+
model: str | TimeSeriesModelBase | None = None,
|
|
827
|
+
metrics: str | TimeSeriesScorer | list[str | TimeSeriesScorer] | None = None,
|
|
801
828
|
use_cache: bool = True,
|
|
802
829
|
) -> dict[str, float]:
|
|
803
830
|
past_data, known_covariates = data.get_model_inputs_for_scoring(
|
|
@@ -818,13 +845,13 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
818
845
|
self,
|
|
819
846
|
data: TimeSeriesDataFrame,
|
|
820
847
|
features: list[str],
|
|
821
|
-
model:
|
|
822
|
-
metric:
|
|
823
|
-
time_limit:
|
|
848
|
+
model: str | TimeSeriesModelBase | None = None,
|
|
849
|
+
metric: str | TimeSeriesScorer | None = None,
|
|
850
|
+
time_limit: float | None = None,
|
|
824
851
|
method: Literal["naive", "permutation"] = "permutation",
|
|
825
852
|
subsample_size: int = 50,
|
|
826
|
-
num_iterations:
|
|
827
|
-
random_seed:
|
|
853
|
+
num_iterations: int | None = None,
|
|
854
|
+
random_seed: int | None = None,
|
|
828
855
|
relative_scores: bool = False,
|
|
829
856
|
include_confidence_band: bool = True,
|
|
830
857
|
confidence_level: float = 0.99,
|
|
@@ -841,9 +868,6 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
841
868
|
# start timer and cap subsample size if it's greater than the number of items in the provided data set
|
|
842
869
|
time_start = time.time()
|
|
843
870
|
if subsample_size > data.num_items:
|
|
844
|
-
logger.info(
|
|
845
|
-
f"Subsample_size {subsample_size} is larger than the number of items in the data and will be ignored"
|
|
846
|
-
)
|
|
847
871
|
subsample_size = data.num_items
|
|
848
872
|
|
|
849
873
|
# set default number of iterations and cap iterations if the number of items in the data is smaller
|
|
@@ -923,7 +947,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
923
947
|
|
|
924
948
|
return importance_df
|
|
925
949
|
|
|
926
|
-
def _model_uses_feature(self, model:
|
|
950
|
+
def _model_uses_feature(self, model: str | TimeSeriesModelBase, feature: str) -> bool:
|
|
927
951
|
"""Check if the given model uses the given feature."""
|
|
928
952
|
models_with_ancestors = set(self.get_minimum_model_set(model))
|
|
929
953
|
|
|
@@ -936,6 +960,72 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
936
960
|
|
|
937
961
|
return False
|
|
938
962
|
|
|
963
|
+
def backtest_predictions(
|
|
964
|
+
self,
|
|
965
|
+
data: TimeSeriesDataFrame | None,
|
|
966
|
+
model_names: list[str],
|
|
967
|
+
num_val_windows: int | None = None,
|
|
968
|
+
val_step_size: int | None = None,
|
|
969
|
+
use_cache: bool = True,
|
|
970
|
+
) -> dict[str, list[TimeSeriesDataFrame]]:
|
|
971
|
+
if data is None:
|
|
972
|
+
assert num_val_windows is None, "num_val_windows must be None when data is None"
|
|
973
|
+
assert val_step_size is None, "val_step_size must be None when data is None"
|
|
974
|
+
return {model_name: self._get_model_oof_predictions(model_name) for model_name in model_names}
|
|
975
|
+
|
|
976
|
+
if val_step_size is None:
|
|
977
|
+
val_step_size = self.prediction_length
|
|
978
|
+
if num_val_windows is None:
|
|
979
|
+
num_val_windows = 1
|
|
980
|
+
|
|
981
|
+
splitter = ExpandingWindowSplitter(
|
|
982
|
+
prediction_length=self.prediction_length,
|
|
983
|
+
num_val_windows=num_val_windows,
|
|
984
|
+
val_step_size=val_step_size,
|
|
985
|
+
)
|
|
986
|
+
|
|
987
|
+
result: dict[str, list[TimeSeriesDataFrame]] = {model_name: [] for model_name in model_names}
|
|
988
|
+
for past_data, full_data in splitter.split(data):
|
|
989
|
+
known_covariates = full_data.slice_by_timestep(-self.prediction_length, None)[
|
|
990
|
+
self.covariate_metadata.known_covariates
|
|
991
|
+
]
|
|
992
|
+
pred_dict, _ = self.get_model_pred_dict(
|
|
993
|
+
model_names=model_names,
|
|
994
|
+
data=past_data,
|
|
995
|
+
known_covariates=known_covariates,
|
|
996
|
+
use_cache=use_cache,
|
|
997
|
+
)
|
|
998
|
+
for model_name in model_names:
|
|
999
|
+
result[model_name].append(pred_dict[model_name]) # type: ignore
|
|
1000
|
+
|
|
1001
|
+
return result
|
|
1002
|
+
|
|
1003
|
+
def backtest_targets(
|
|
1004
|
+
self,
|
|
1005
|
+
data: TimeSeriesDataFrame | None,
|
|
1006
|
+
num_val_windows: int | None = None,
|
|
1007
|
+
val_step_size: int | None = None,
|
|
1008
|
+
) -> list[TimeSeriesDataFrame]:
|
|
1009
|
+
if data is None:
|
|
1010
|
+
assert num_val_windows is None, "num_val_windows must be None when data is None"
|
|
1011
|
+
assert val_step_size is None, "val_step_size must be None when data is None"
|
|
1012
|
+
train_data = self.load_train_data()
|
|
1013
|
+
val_data = self.load_val_data()
|
|
1014
|
+
return self._get_validation_windows(train_data=train_data, val_data=val_data)
|
|
1015
|
+
|
|
1016
|
+
if val_step_size is None:
|
|
1017
|
+
val_step_size = self.prediction_length
|
|
1018
|
+
if num_val_windows is None:
|
|
1019
|
+
num_val_windows = 1
|
|
1020
|
+
|
|
1021
|
+
splitter = ExpandingWindowSplitter(
|
|
1022
|
+
prediction_length=self.prediction_length,
|
|
1023
|
+
num_val_windows=num_val_windows,
|
|
1024
|
+
val_step_size=val_step_size,
|
|
1025
|
+
)
|
|
1026
|
+
|
|
1027
|
+
return [val_fold for _, val_fold in splitter.split(data)]
|
|
1028
|
+
|
|
939
1029
|
def _add_ci_to_feature_importance(
|
|
940
1030
|
self, importance_df: pd.DataFrame, confidence_level: float = 0.99
|
|
941
1031
|
) -> pd.DataFrame:
|
|
@@ -965,10 +1055,10 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
965
1055
|
|
|
966
1056
|
def _predict_model(
|
|
967
1057
|
self,
|
|
968
|
-
model:
|
|
1058
|
+
model: str | TimeSeriesModelBase,
|
|
969
1059
|
data: TimeSeriesDataFrame,
|
|
970
|
-
model_pred_dict: dict[str,
|
|
971
|
-
known_covariates:
|
|
1060
|
+
model_pred_dict: dict[str, TimeSeriesDataFrame | None],
|
|
1061
|
+
known_covariates: TimeSeriesDataFrame | None = None,
|
|
972
1062
|
) -> TimeSeriesDataFrame:
|
|
973
1063
|
"""Generate predictions using the given model.
|
|
974
1064
|
|
|
@@ -981,10 +1071,10 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
981
1071
|
|
|
982
1072
|
def _get_inputs_to_model(
|
|
983
1073
|
self,
|
|
984
|
-
model:
|
|
1074
|
+
model: str | TimeSeriesModelBase,
|
|
985
1075
|
data: TimeSeriesDataFrame,
|
|
986
|
-
model_pred_dict: dict[str,
|
|
987
|
-
) ->
|
|
1076
|
+
model_pred_dict: dict[str, TimeSeriesDataFrame | None],
|
|
1077
|
+
) -> TimeSeriesDataFrame | dict[str, TimeSeriesDataFrame | None]:
|
|
988
1078
|
"""Get the first argument that should be passed to model.predict.
|
|
989
1079
|
|
|
990
1080
|
This method assumes that model_pred_dict contains the predictions of all base models, if model is an ensemble.
|
|
@@ -1002,11 +1092,11 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
1002
1092
|
self,
|
|
1003
1093
|
model_names: list[str],
|
|
1004
1094
|
data: TimeSeriesDataFrame,
|
|
1005
|
-
known_covariates:
|
|
1095
|
+
known_covariates: TimeSeriesDataFrame | None = None,
|
|
1006
1096
|
raise_exception_if_failed: bool = True,
|
|
1007
1097
|
use_cache: bool = True,
|
|
1008
|
-
random_seed:
|
|
1009
|
-
) -> tuple[dict[str,
|
|
1098
|
+
random_seed: int | None = None,
|
|
1099
|
+
) -> tuple[dict[str, TimeSeriesDataFrame | None], dict[str, float]]:
|
|
1010
1100
|
"""Return a dictionary with predictions of all models for the given dataset.
|
|
1011
1101
|
|
|
1012
1102
|
Parameters
|
|
@@ -1038,8 +1128,8 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
1038
1128
|
for model_name in model_names:
|
|
1039
1129
|
model_set.update(self.get_minimum_model_set(model_name))
|
|
1040
1130
|
if len(model_set) > 1:
|
|
1041
|
-
|
|
1042
|
-
model_set = sorted(model_set, key=
|
|
1131
|
+
model_to_layer = self._get_model_layers()
|
|
1132
|
+
model_set = sorted(model_set, key=model_to_layer.get) # type: ignore
|
|
1043
1133
|
logger.debug(f"Prediction order: {model_set}")
|
|
1044
1134
|
|
|
1045
1135
|
failed_models = []
|
|
@@ -1089,7 +1179,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
1089
1179
|
return dict(pred_time_dict_total)
|
|
1090
1180
|
|
|
1091
1181
|
def _merge_refit_full_data(
|
|
1092
|
-
self, train_data: TimeSeriesDataFrame, val_data:
|
|
1182
|
+
self, train_data: TimeSeriesDataFrame, val_data: TimeSeriesDataFrame | None
|
|
1093
1183
|
) -> TimeSeriesDataFrame:
|
|
1094
1184
|
if val_data is None:
|
|
1095
1185
|
return train_data
|
|
@@ -1099,9 +1189,9 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
1099
1189
|
|
|
1100
1190
|
def refit_single_full(
|
|
1101
1191
|
self,
|
|
1102
|
-
train_data:
|
|
1103
|
-
val_data:
|
|
1104
|
-
models:
|
|
1192
|
+
train_data: TimeSeriesDataFrame | None = None,
|
|
1193
|
+
val_data: TimeSeriesDataFrame | None = None,
|
|
1194
|
+
models: list[str] | None = None,
|
|
1105
1195
|
) -> list[str]:
|
|
1106
1196
|
train_data = train_data or self.load_train_data()
|
|
1107
1197
|
val_data = val_data or self.load_val_data()
|
|
@@ -1110,12 +1200,12 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
1110
1200
|
if models is None:
|
|
1111
1201
|
models = self.get_model_names()
|
|
1112
1202
|
|
|
1113
|
-
|
|
1114
|
-
|
|
1203
|
+
model_to_layer = self._get_model_layers()
|
|
1204
|
+
models_sorted_by_layer = sorted(models, key=model_to_layer.get) # type: ignore
|
|
1115
1205
|
|
|
1116
1206
|
model_refit_map = {}
|
|
1117
1207
|
models_trained_full = []
|
|
1118
|
-
for model in
|
|
1208
|
+
for model in models_sorted_by_layer:
|
|
1119
1209
|
model = self.load_model(model)
|
|
1120
1210
|
model_name = model.name
|
|
1121
1211
|
if model._get_tags()["can_refit_full"]:
|
|
@@ -1180,11 +1270,11 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
1180
1270
|
|
|
1181
1271
|
def get_trainable_base_models(
|
|
1182
1272
|
self,
|
|
1183
|
-
hyperparameters:
|
|
1273
|
+
hyperparameters: str | dict[str, Any],
|
|
1184
1274
|
*,
|
|
1185
1275
|
multi_window: bool = False,
|
|
1186
|
-
freq:
|
|
1187
|
-
excluded_model_types:
|
|
1276
|
+
freq: str | None = None,
|
|
1277
|
+
excluded_model_types: list[str] | None = None,
|
|
1188
1278
|
hyperparameter_tune: bool = False,
|
|
1189
1279
|
) -> list[AbstractTimeSeriesModel]:
|
|
1190
1280
|
return TrainableModelSetBuilder(
|