autogluon.timeseries 1.4.1b20251115__py3-none-any.whl → 1.4.1b20251218__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of autogluon.timeseries might be problematic. Click here for more details.
- autogluon/timeseries/configs/hyperparameter_presets.py +7 -21
- autogluon/timeseries/configs/predictor_presets.py +23 -39
- autogluon/timeseries/dataset/ts_dataframe.py +32 -34
- autogluon/timeseries/learner.py +67 -33
- autogluon/timeseries/metrics/__init__.py +4 -4
- autogluon/timeseries/metrics/abstract.py +8 -8
- autogluon/timeseries/metrics/point.py +9 -9
- autogluon/timeseries/metrics/quantile.py +4 -4
- autogluon/timeseries/models/__init__.py +2 -1
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py +52 -39
- autogluon/timeseries/models/abstract/model_trial.py +2 -1
- autogluon/timeseries/models/abstract/tunable.py +8 -8
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py +30 -26
- autogluon/timeseries/models/autogluon_tabular/per_step.py +12 -10
- autogluon/timeseries/models/autogluon_tabular/transforms.py +2 -2
- autogluon/timeseries/models/chronos/__init__.py +2 -1
- autogluon/timeseries/models/chronos/chronos2.py +395 -0
- autogluon/timeseries/models/chronos/model.py +29 -24
- autogluon/timeseries/models/chronos/utils.py +5 -5
- autogluon/timeseries/models/ensemble/__init__.py +17 -10
- autogluon/timeseries/models/ensemble/abstract.py +13 -9
- autogluon/timeseries/models/ensemble/array_based/__init__.py +2 -2
- autogluon/timeseries/models/ensemble/array_based/abstract.py +24 -31
- autogluon/timeseries/models/ensemble/array_based/models.py +146 -11
- autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +2 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +6 -5
- autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py +186 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +44 -83
- autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +21 -55
- autogluon/timeseries/models/ensemble/ensemble_selection.py +167 -0
- autogluon/timeseries/models/ensemble/per_item_greedy.py +172 -0
- autogluon/timeseries/models/ensemble/weighted/abstract.py +7 -3
- autogluon/timeseries/models/ensemble/weighted/basic.py +26 -13
- autogluon/timeseries/models/ensemble/weighted/greedy.py +20 -145
- autogluon/timeseries/models/gluonts/abstract.py +30 -29
- autogluon/timeseries/models/gluonts/dataset.py +9 -9
- autogluon/timeseries/models/gluonts/models.py +0 -7
- autogluon/timeseries/models/local/__init__.py +0 -7
- autogluon/timeseries/models/local/abstract_local_model.py +13 -16
- autogluon/timeseries/models/local/naive.py +2 -2
- autogluon/timeseries/models/local/npts.py +7 -1
- autogluon/timeseries/models/local/statsforecast.py +12 -12
- autogluon/timeseries/models/multi_window/multi_window_model.py +38 -23
- autogluon/timeseries/models/registry.py +3 -4
- autogluon/timeseries/models/toto/_internal/backbone/attention.py +3 -4
- autogluon/timeseries/models/toto/_internal/backbone/backbone.py +6 -6
- autogluon/timeseries/models/toto/_internal/backbone/rope.py +4 -9
- autogluon/timeseries/models/toto/_internal/backbone/rotary_embedding_torch.py +342 -0
- autogluon/timeseries/models/toto/_internal/backbone/scaler.py +2 -3
- autogluon/timeseries/models/toto/_internal/backbone/transformer.py +10 -10
- autogluon/timeseries/models/toto/_internal/dataset.py +2 -2
- autogluon/timeseries/models/toto/_internal/forecaster.py +8 -8
- autogluon/timeseries/models/toto/dataloader.py +4 -4
- autogluon/timeseries/models/toto/hf_pretrained_model.py +97 -16
- autogluon/timeseries/models/toto/model.py +30 -17
- autogluon/timeseries/predictor.py +517 -129
- autogluon/timeseries/regressor.py +18 -23
- autogluon/timeseries/splitter.py +2 -2
- autogluon/timeseries/trainer/ensemble_composer.py +323 -129
- autogluon/timeseries/trainer/model_set_builder.py +9 -9
- autogluon/timeseries/trainer/prediction_cache.py +16 -16
- autogluon/timeseries/trainer/trainer.py +235 -144
- autogluon/timeseries/trainer/utils.py +3 -4
- autogluon/timeseries/transforms/covariate_scaler.py +7 -7
- autogluon/timeseries/transforms/target_scaler.py +8 -8
- autogluon/timeseries/utils/constants.py +10 -0
- autogluon/timeseries/utils/datetime/lags.py +1 -3
- autogluon/timeseries/utils/datetime/seasonality.py +1 -3
- autogluon/timeseries/utils/features.py +22 -9
- autogluon/timeseries/utils/forecast.py +1 -2
- autogluon/timeseries/utils/timer.py +173 -0
- autogluon/timeseries/version.py +1 -1
- {autogluon_timeseries-1.4.1b20251115.dist-info → autogluon_timeseries-1.4.1b20251218.dist-info}/METADATA +23 -21
- autogluon_timeseries-1.4.1b20251218.dist-info/RECORD +103 -0
- autogluon_timeseries-1.4.1b20251115.dist-info/RECORD +0 -96
- /autogluon.timeseries-1.4.1b20251115-py3.9-nspkg.pth → /autogluon.timeseries-1.4.1b20251218-py3.11-nspkg.pth +0 -0
- {autogluon_timeseries-1.4.1b20251115.dist-info → autogluon_timeseries-1.4.1b20251218.dist-info}/WHEEL +0 -0
- {autogluon_timeseries-1.4.1b20251115.dist-info → autogluon_timeseries-1.4.1b20251218.dist-info}/licenses/LICENSE +0 -0
- {autogluon_timeseries-1.4.1b20251115.dist-info → autogluon_timeseries-1.4.1b20251218.dist-info}/licenses/NOTICE +0 -0
- {autogluon_timeseries-1.4.1b20251115.dist-info → autogluon_timeseries-1.4.1b20251218.dist-info}/namespace_packages.txt +0 -0
- {autogluon_timeseries-1.4.1b20251115.dist-info → autogluon_timeseries-1.4.1b20251218.dist-info}/top_level.txt +0 -0
- {autogluon_timeseries-1.4.1b20251115.dist-info → autogluon_timeseries-1.4.1b20251218.dist-info}/zip-safe +0 -0
|
@@ -5,7 +5,7 @@ import time
|
|
|
5
5
|
import traceback
|
|
6
6
|
from collections import defaultdict
|
|
7
7
|
from pathlib import Path
|
|
8
|
-
from typing import Any, Literal
|
|
8
|
+
from typing import Any, Literal
|
|
9
9
|
|
|
10
10
|
import networkx as nx
|
|
11
11
|
import numpy as np
|
|
@@ -47,14 +47,14 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
47
47
|
self,
|
|
48
48
|
path: str,
|
|
49
49
|
prediction_length: int = 1,
|
|
50
|
-
eval_metric:
|
|
50
|
+
eval_metric: str | TimeSeriesScorer | None = None,
|
|
51
51
|
save_data: bool = True,
|
|
52
52
|
skip_model_selection: bool = False,
|
|
53
53
|
enable_ensemble: bool = True,
|
|
54
54
|
verbosity: int = 2,
|
|
55
|
-
num_val_windows:
|
|
56
|
-
val_step_size:
|
|
57
|
-
refit_every_n_windows:
|
|
55
|
+
num_val_windows: tuple[int, ...] = (1,),
|
|
56
|
+
val_step_size: int | None = None,
|
|
57
|
+
refit_every_n_windows: int | None = 1,
|
|
58
58
|
# TODO: Set cache_predictions=False by default once all models in default presets have a reasonable inference speed
|
|
59
59
|
cache_predictions: bool = True,
|
|
60
60
|
**kwargs,
|
|
@@ -88,6 +88,13 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
88
88
|
self.eval_metric = check_get_evaluation_metric(eval_metric, prediction_length=prediction_length)
|
|
89
89
|
|
|
90
90
|
self.num_val_windows = num_val_windows
|
|
91
|
+
|
|
92
|
+
# Validate num_val_windows
|
|
93
|
+
if len(self.num_val_windows) == 0:
|
|
94
|
+
raise ValueError("num_val_windows cannot be empty")
|
|
95
|
+
if not all(isinstance(w, int) and w > 0 for w in self.num_val_windows):
|
|
96
|
+
raise ValueError(f"num_val_windows must contain only positive integers, got {self.num_val_windows}")
|
|
97
|
+
|
|
91
98
|
self.val_step_size = val_step_size
|
|
92
99
|
self.refit_every_n_windows = refit_every_n_windows
|
|
93
100
|
self.hpo_results = {}
|
|
@@ -111,14 +118,14 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
111
118
|
path = os.path.join(self.path_data, "train.pkl")
|
|
112
119
|
return load_pkl.load(path=path)
|
|
113
120
|
|
|
114
|
-
def load_val_data(self) ->
|
|
121
|
+
def load_val_data(self) -> TimeSeriesDataFrame | None:
|
|
115
122
|
path = os.path.join(self.path_data, "val.pkl")
|
|
116
123
|
if os.path.exists(path):
|
|
117
124
|
return load_pkl.load(path=path)
|
|
118
125
|
else:
|
|
119
126
|
return None
|
|
120
127
|
|
|
121
|
-
def load_data(self) -> tuple[TimeSeriesDataFrame,
|
|
128
|
+
def load_data(self) -> tuple[TimeSeriesDataFrame, TimeSeriesDataFrame | None]:
|
|
122
129
|
train_data = self.load_train_data()
|
|
123
130
|
val_data = self.load_val_data()
|
|
124
131
|
return train_data, val_data
|
|
@@ -141,7 +148,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
141
148
|
def _add_model(
|
|
142
149
|
self,
|
|
143
150
|
model: TimeSeriesModelBase,
|
|
144
|
-
base_models:
|
|
151
|
+
base_models: list[str] | None = None,
|
|
145
152
|
):
|
|
146
153
|
"""Add a model to the model graph of the trainer. If the model is an ensemble, also add
|
|
147
154
|
information about dependencies to the model graph (list of models specified via ``base_models``).
|
|
@@ -173,8 +180,8 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
173
180
|
for base_model in base_models:
|
|
174
181
|
self.model_graph.add_edge(base_model, model.name)
|
|
175
182
|
|
|
176
|
-
def
|
|
177
|
-
"""Get a dictionary mapping each model to their
|
|
183
|
+
def _get_model_layers(self) -> dict[str, int]:
|
|
184
|
+
"""Get a dictionary mapping each model to their layer in the model graph"""
|
|
178
185
|
|
|
179
186
|
# get nodes without a parent
|
|
180
187
|
rootset = set(self.model_graph.nodes)
|
|
@@ -187,14 +194,14 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
187
194
|
for dest_node in paths_to:
|
|
188
195
|
paths_from[dest_node][source_node] = paths_to[dest_node]
|
|
189
196
|
|
|
190
|
-
# determine
|
|
191
|
-
|
|
197
|
+
# determine layers
|
|
198
|
+
layers = {}
|
|
192
199
|
for n in paths_from:
|
|
193
|
-
|
|
200
|
+
layers[n] = max(paths_from[n].get(src, 0) for src in rootset)
|
|
194
201
|
|
|
195
|
-
return
|
|
202
|
+
return layers
|
|
196
203
|
|
|
197
|
-
def get_models_attribute_dict(self, attribute: str, models:
|
|
204
|
+
def get_models_attribute_dict(self, attribute: str, models: list[str] | None = None) -> dict[str, Any]:
|
|
198
205
|
"""Get an attribute from the `model_graph` for each of the model names
|
|
199
206
|
specified. If `models` is none, the attribute will be returned for all models"""
|
|
200
207
|
results = {}
|
|
@@ -212,25 +219,25 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
212
219
|
if len(models) == 1:
|
|
213
220
|
return models[0]
|
|
214
221
|
model_performances = self.get_models_attribute_dict(attribute="val_score")
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
(m, model_performances[m],
|
|
222
|
+
model_layers = self._get_model_layers()
|
|
223
|
+
model_name_score_layer_list = [
|
|
224
|
+
(m, model_performances[m], model_layers.get(m, 0)) for m in models if model_performances[m] is not None
|
|
218
225
|
]
|
|
219
226
|
|
|
220
|
-
if not
|
|
227
|
+
if not model_name_score_layer_list:
|
|
221
228
|
raise ValueError("No fitted models have validation scores computed.")
|
|
222
229
|
|
|
223
230
|
# rank models in terms of validation score. if two models have the same validation score,
|
|
224
|
-
# rank them by their
|
|
231
|
+
# rank them by their layer in the model graph (lower layer models are preferred).
|
|
225
232
|
return max(
|
|
226
|
-
|
|
227
|
-
key=lambda mns: (mns[1], -mns[2]), # (score, -
|
|
233
|
+
model_name_score_layer_list,
|
|
234
|
+
key=lambda mns: (mns[1], -mns[2]), # (score, -layer)
|
|
228
235
|
)[0]
|
|
229
236
|
|
|
230
|
-
def get_model_names(self,
|
|
237
|
+
def get_model_names(self, layer: int | None = None) -> list[str]:
|
|
231
238
|
"""Get model names that are registered in the model graph"""
|
|
232
|
-
if
|
|
233
|
-
return list(node for node, l in self.
|
|
239
|
+
if layer is not None:
|
|
240
|
+
return list(node for node, l in self._get_model_layers().items() if l == layer) # noqa: E741
|
|
234
241
|
return list(self.model_graph.nodes)
|
|
235
242
|
|
|
236
243
|
def get_info(self, include_model_info: bool = False) -> dict[str, Any]:
|
|
@@ -262,9 +269,9 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
262
269
|
self,
|
|
263
270
|
model: AbstractTimeSeriesModel,
|
|
264
271
|
train_data: TimeSeriesDataFrame,
|
|
265
|
-
time_limit:
|
|
266
|
-
val_data:
|
|
267
|
-
hyperparameter_tune_kwargs:
|
|
272
|
+
time_limit: float | None = None,
|
|
273
|
+
val_data: TimeSeriesDataFrame | None = None,
|
|
274
|
+
hyperparameter_tune_kwargs: str | dict = "auto",
|
|
268
275
|
):
|
|
269
276
|
default_num_trials = None
|
|
270
277
|
if time_limit is None and (
|
|
@@ -280,7 +287,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
280
287
|
hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
|
|
281
288
|
time_limit=time_limit,
|
|
282
289
|
default_num_trials=default_num_trials,
|
|
283
|
-
val_splitter=self._get_val_splitter(),
|
|
290
|
+
val_splitter=self._get_val_splitter(use_val_data=val_data is not None),
|
|
284
291
|
refit_every_n_windows=self.refit_every_n_windows,
|
|
285
292
|
)
|
|
286
293
|
total_tuning_time = time.time() - tuning_start_time
|
|
@@ -290,11 +297,21 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
290
297
|
# add each of the trained HPO configurations to the trained models
|
|
291
298
|
for model_hpo_name, model_info in hpo_models.items():
|
|
292
299
|
model_path = os.path.join(self.path, model_info["path"])
|
|
300
|
+
|
|
293
301
|
# Only load model configurations that didn't fail
|
|
294
|
-
if Path(model_path).exists():
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
302
|
+
if not Path(model_path).exists():
|
|
303
|
+
continue
|
|
304
|
+
|
|
305
|
+
model_hpo = self.load_model(model_hpo_name, path=model_path, model_type=type(model))
|
|
306
|
+
|
|
307
|
+
# override validation score to align evaluations on the final ensemble layer's window
|
|
308
|
+
if isinstance(model_hpo, MultiWindowBacktestingModel):
|
|
309
|
+
model_hpo.val_score = float(
|
|
310
|
+
np.mean([info["val_score"] for info in model_hpo.info_per_val_window[-self.num_val_windows[-1] :]])
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
self._add_model(model_hpo)
|
|
314
|
+
model_names_trained.append(model_hpo.name)
|
|
298
315
|
|
|
299
316
|
logger.info(f"\tTrained {len(model_names_trained)} models while tuning {model.name}.")
|
|
300
317
|
|
|
@@ -315,8 +332,8 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
315
332
|
self,
|
|
316
333
|
train_data: TimeSeriesDataFrame,
|
|
317
334
|
model: AbstractTimeSeriesModel,
|
|
318
|
-
val_data:
|
|
319
|
-
time_limit:
|
|
335
|
+
val_data: TimeSeriesDataFrame | None = None,
|
|
336
|
+
time_limit: float | None = None,
|
|
320
337
|
) -> list[str]:
|
|
321
338
|
"""Fit and save the given model on given training and validation data and save the trained model.
|
|
322
339
|
|
|
@@ -335,10 +352,10 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
335
352
|
|
|
336
353
|
model.fit(
|
|
337
354
|
train_data=train_data,
|
|
338
|
-
val_data=val_data,
|
|
355
|
+
val_data=None if isinstance(model, MultiWindowBacktestingModel) else val_data,
|
|
339
356
|
time_limit=time_limit,
|
|
340
357
|
verbosity=self.verbosity,
|
|
341
|
-
val_splitter=self._get_val_splitter(),
|
|
358
|
+
val_splitter=self._get_val_splitter(use_val_data=val_data is not None),
|
|
342
359
|
refit_every_n_windows=self.refit_every_n_windows,
|
|
343
360
|
)
|
|
344
361
|
|
|
@@ -347,11 +364,19 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
347
364
|
|
|
348
365
|
if time_limit is not None:
|
|
349
366
|
time_limit = time_limit - (fit_end_time - fit_start_time)
|
|
350
|
-
if val_data is not None
|
|
367
|
+
if val_data is not None:
|
|
351
368
|
model.score_and_cache_oof(
|
|
352
369
|
val_data, store_val_score=True, store_predict_time=True, time_limit=time_limit
|
|
353
370
|
)
|
|
354
371
|
|
|
372
|
+
# by default, MultiWindowBacktestingModel computes validation score on all windows. However,
|
|
373
|
+
# when doing multi-layer stacking, the trainer only scores on the windows of the last layer.
|
|
374
|
+
# we override the val_score to align scores.
|
|
375
|
+
if isinstance(model, MultiWindowBacktestingModel):
|
|
376
|
+
model.val_score = float(
|
|
377
|
+
np.mean([info["val_score"] for info in model.info_per_val_window[-self.num_val_windows[-1] :]])
|
|
378
|
+
)
|
|
379
|
+
|
|
355
380
|
log_scores_and_times(
|
|
356
381
|
val_score=model.val_score,
|
|
357
382
|
fit_time=model.fit_time,
|
|
@@ -376,13 +401,13 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
376
401
|
def fit(
|
|
377
402
|
self,
|
|
378
403
|
train_data: TimeSeriesDataFrame,
|
|
379
|
-
hyperparameters:
|
|
380
|
-
val_data:
|
|
381
|
-
ensemble_hyperparameters:
|
|
382
|
-
hyperparameter_tune_kwargs:
|
|
383
|
-
excluded_model_types:
|
|
384
|
-
time_limit:
|
|
385
|
-
random_seed:
|
|
404
|
+
hyperparameters: str | dict[Any, dict],
|
|
405
|
+
val_data: TimeSeriesDataFrame | None = None,
|
|
406
|
+
ensemble_hyperparameters: dict | list[dict] | None = None,
|
|
407
|
+
hyperparameter_tune_kwargs: str | dict | None = None,
|
|
408
|
+
excluded_model_types: list[str] | None = None,
|
|
409
|
+
time_limit: float | None = None,
|
|
410
|
+
random_seed: int | None = None,
|
|
386
411
|
):
|
|
387
412
|
"""Fit a set of timeseries models specified by the `hyperparameters`
|
|
388
413
|
dictionary that maps model names to their specified hyperparameters.
|
|
@@ -415,12 +440,22 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
415
440
|
|
|
416
441
|
# Handle ensemble hyperparameters
|
|
417
442
|
if ensemble_hyperparameters is None:
|
|
418
|
-
ensemble_hyperparameters = {"GreedyEnsemble": {}}
|
|
419
|
-
|
|
443
|
+
ensemble_hyperparameters = [{"GreedyEnsemble": {}}]
|
|
444
|
+
if isinstance(ensemble_hyperparameters, dict):
|
|
445
|
+
ensemble_hyperparameters = [ensemble_hyperparameters]
|
|
446
|
+
validate_ensemble_hyperparameters(ensemble_hyperparameters)
|
|
420
447
|
|
|
421
448
|
time_start = time.time()
|
|
422
449
|
hyperparameters = copy.deepcopy(hyperparameters)
|
|
423
450
|
|
|
451
|
+
if val_data is not None:
|
|
452
|
+
if self.num_val_windows[-1] != 1:
|
|
453
|
+
raise ValueError(
|
|
454
|
+
f"When val_data is provided, the last element of num_val_windows must be 1, "
|
|
455
|
+
f"got {self.num_val_windows[-1]}"
|
|
456
|
+
)
|
|
457
|
+
multi_window = self._get_val_splitter(use_val_data=val_data is not None).num_val_windows > 0
|
|
458
|
+
|
|
424
459
|
if self.save_data and not self.is_data_saved:
|
|
425
460
|
self.save_train_data(train_data)
|
|
426
461
|
if val_data is not None:
|
|
@@ -431,7 +466,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
431
466
|
hyperparameters=hyperparameters,
|
|
432
467
|
hyperparameter_tune=hyperparameter_tune_kwargs is not None, # TODO: remove hyperparameter_tune
|
|
433
468
|
freq=train_data.freq,
|
|
434
|
-
multi_window=
|
|
469
|
+
multi_window=multi_window,
|
|
435
470
|
excluded_model_types=excluded_model_types,
|
|
436
471
|
)
|
|
437
472
|
|
|
@@ -500,13 +535,16 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
500
535
|
train_data, model=model, val_data=val_data, time_limit=time_left_for_model
|
|
501
536
|
)
|
|
502
537
|
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
538
|
+
if self.enable_ensemble and ensemble_hyperparameters:
|
|
539
|
+
model_names = self.get_model_names(layer=0)
|
|
540
|
+
ensemble_names = self._fit_ensembles(
|
|
541
|
+
data_per_window=self._get_validation_windows(train_data, val_data),
|
|
542
|
+
predictions_per_window=self._get_base_model_predictions(model_names),
|
|
543
|
+
time_limit=None if time_limit is None else time_limit - (time.time() - time_start),
|
|
544
|
+
ensemble_hyperparameters=ensemble_hyperparameters,
|
|
545
|
+
num_windows_per_layer=self.num_val_windows,
|
|
546
|
+
)
|
|
547
|
+
model_names_trained.extend(ensemble_names)
|
|
510
548
|
|
|
511
549
|
logger.info(f"Training complete. Models trained: {model_names_trained}")
|
|
512
550
|
logger.info(f"Total runtime: {time.time() - time_start:.2f} s")
|
|
@@ -523,19 +561,25 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
523
561
|
def _fit_ensembles(
|
|
524
562
|
self,
|
|
525
563
|
*,
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
time_limit:
|
|
529
|
-
ensemble_hyperparameters: dict,
|
|
564
|
+
data_per_window: list[TimeSeriesDataFrame],
|
|
565
|
+
predictions_per_window: dict[str, list[TimeSeriesDataFrame]],
|
|
566
|
+
time_limit: float | None,
|
|
567
|
+
ensemble_hyperparameters: list[dict],
|
|
568
|
+
num_windows_per_layer: tuple[int, ...],
|
|
530
569
|
) -> list[str]:
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
570
|
+
ensemble_composer = EnsembleComposer(
|
|
571
|
+
path=self.path,
|
|
572
|
+
prediction_length=self.prediction_length,
|
|
573
|
+
eval_metric=self.eval_metric,
|
|
574
|
+
target=self.target,
|
|
575
|
+
ensemble_hyperparameters=ensemble_hyperparameters,
|
|
576
|
+
num_windows_per_layer=num_windows_per_layer,
|
|
577
|
+
quantile_levels=self.quantile_levels,
|
|
578
|
+
model_graph=self.model_graph,
|
|
579
|
+
).fit(
|
|
580
|
+
data_per_window=data_per_window,
|
|
581
|
+
predictions_per_window=predictions_per_window,
|
|
582
|
+
time_limit=time_limit,
|
|
539
583
|
)
|
|
540
584
|
|
|
541
585
|
ensembles_trained = []
|
|
@@ -544,48 +588,34 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
544
588
|
self.save_model(model=model)
|
|
545
589
|
ensembles_trained.append(model.name)
|
|
546
590
|
|
|
547
|
-
return ensembles_trained
|
|
591
|
+
return ensembles_trained
|
|
548
592
|
|
|
549
|
-
def
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
prediction_length=self.prediction_length,
|
|
555
|
-
num_val_windows=self.num_val_windows,
|
|
556
|
-
val_step_size=self.val_step_size,
|
|
557
|
-
)
|
|
558
|
-
return val_splitter
|
|
593
|
+
def _get_validation_windows(self, train_data: TimeSeriesDataFrame, val_data: TimeSeriesDataFrame | None):
|
|
594
|
+
train_splitter = self._get_val_splitter(use_val_data=val_data is not None)
|
|
595
|
+
return [val_fold for _, val_fold in train_splitter.split(train_data)] + (
|
|
596
|
+
[] if val_data is None else [val_data]
|
|
597
|
+
)
|
|
559
598
|
|
|
560
|
-
def
|
|
561
|
-
|
|
562
|
-
return
|
|
563
|
-
path=self.path,
|
|
599
|
+
def _get_val_splitter(self, use_val_data: bool = False) -> AbstractWindowSplitter:
|
|
600
|
+
num_windows_from_train = sum(self.num_val_windows[:-1]) if use_val_data else sum(self.num_val_windows)
|
|
601
|
+
return ExpandingWindowSplitter(
|
|
564
602
|
prediction_length=self.prediction_length,
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
quantile_levels=self.quantile_levels,
|
|
568
|
-
model_graph=self.model_graph,
|
|
569
|
-
ensemble_hyperparameters=ensemble_hyperparameters,
|
|
570
|
-
window_splitter=self._get_val_splitter(),
|
|
603
|
+
num_val_windows=num_windows_from_train,
|
|
604
|
+
val_step_size=self.val_step_size,
|
|
571
605
|
)
|
|
572
606
|
|
|
573
|
-
def
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
if val_data is None:
|
|
580
|
-
return [val_fold for _, val_fold in self._get_val_splitter().split(train_data)]
|
|
581
|
-
else:
|
|
582
|
-
return [val_data]
|
|
607
|
+
def _get_base_model_predictions(self, model_names: list[str]) -> dict[str, list[TimeSeriesDataFrame]]:
|
|
608
|
+
"""Get base model predictions for ensemble training / inference."""
|
|
609
|
+
predictions_per_window = {}
|
|
610
|
+
for model_name in model_names:
|
|
611
|
+
predictions_per_window[model_name] = self._get_model_oof_predictions(model_name)
|
|
612
|
+
return predictions_per_window
|
|
583
613
|
|
|
584
614
|
def leaderboard(
|
|
585
615
|
self,
|
|
586
|
-
data:
|
|
616
|
+
data: TimeSeriesDataFrame | None = None,
|
|
587
617
|
extra_info: bool = False,
|
|
588
|
-
extra_metrics:
|
|
618
|
+
extra_metrics: list[str | TimeSeriesScorer] | None = None,
|
|
589
619
|
use_cache: bool = True,
|
|
590
620
|
) -> pd.DataFrame:
|
|
591
621
|
logger.debug("Generating leaderboard for all models trained")
|
|
@@ -675,7 +705,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
675
705
|
return df[explicit_column_order]
|
|
676
706
|
|
|
677
707
|
def persist(
|
|
678
|
-
self, model_names:
|
|
708
|
+
self, model_names: Literal["all", "best"] | list[str] = "all", with_ancestors: bool = False
|
|
679
709
|
) -> list[str]:
|
|
680
710
|
if model_names == "all":
|
|
681
711
|
model_names = self.get_model_names()
|
|
@@ -700,7 +730,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
700
730
|
|
|
701
731
|
return model_names
|
|
702
732
|
|
|
703
|
-
def unpersist(self, model_names:
|
|
733
|
+
def unpersist(self, model_names: Literal["all"] | list[str] = "all") -> list[str]:
|
|
704
734
|
if model_names == "all":
|
|
705
735
|
model_names = list(self.models.keys())
|
|
706
736
|
if not isinstance(model_names, list):
|
|
@@ -712,9 +742,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
712
742
|
unpersisted_models.append(model)
|
|
713
743
|
return unpersisted_models
|
|
714
744
|
|
|
715
|
-
def _get_model_for_prediction(
|
|
716
|
-
self, model: Optional[Union[str, TimeSeriesModelBase]] = None, verbose: bool = True
|
|
717
|
-
) -> str:
|
|
745
|
+
def _get_model_for_prediction(self, model: str | TimeSeriesModelBase | None = None, verbose: bool = True) -> str:
|
|
718
746
|
"""Given an optional identifier or model object, return the name of the model with which to predict.
|
|
719
747
|
|
|
720
748
|
If the model is not provided, this method will default to the best model according to the validation score.
|
|
@@ -740,10 +768,10 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
740
768
|
def predict(
|
|
741
769
|
self,
|
|
742
770
|
data: TimeSeriesDataFrame,
|
|
743
|
-
known_covariates:
|
|
744
|
-
model:
|
|
771
|
+
known_covariates: TimeSeriesDataFrame | None = None,
|
|
772
|
+
model: str | TimeSeriesModelBase | None = None,
|
|
745
773
|
use_cache: bool = True,
|
|
746
|
-
random_seed:
|
|
774
|
+
random_seed: int | None = None,
|
|
747
775
|
) -> TimeSeriesDataFrame:
|
|
748
776
|
model_name = self._get_model_for_prediction(model)
|
|
749
777
|
model_pred_dict, _ = self.get_model_pred_dict(
|
|
@@ -758,7 +786,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
758
786
|
raise ValueError(f"Model {model_name} failed to predict. Please check the model's logs.")
|
|
759
787
|
return predictions
|
|
760
788
|
|
|
761
|
-
def _get_eval_metric(self, metric:
|
|
789
|
+
def _get_eval_metric(self, metric: str | TimeSeriesScorer | None) -> TimeSeriesScorer:
|
|
762
790
|
if metric is None:
|
|
763
791
|
return self.eval_metric
|
|
764
792
|
else:
|
|
@@ -773,7 +801,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
773
801
|
self,
|
|
774
802
|
data: TimeSeriesDataFrame,
|
|
775
803
|
predictions: TimeSeriesDataFrame,
|
|
776
|
-
metric:
|
|
804
|
+
metric: str | TimeSeriesScorer | None = None,
|
|
777
805
|
) -> float:
|
|
778
806
|
"""Compute the score measuring how well the predictions align with the data."""
|
|
779
807
|
return self._get_eval_metric(metric).score(
|
|
@@ -785,8 +813,8 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
785
813
|
def score(
|
|
786
814
|
self,
|
|
787
815
|
data: TimeSeriesDataFrame,
|
|
788
|
-
model:
|
|
789
|
-
metric:
|
|
816
|
+
model: str | TimeSeriesModelBase | None = None,
|
|
817
|
+
metric: str | TimeSeriesScorer | None = None,
|
|
790
818
|
use_cache: bool = True,
|
|
791
819
|
) -> float:
|
|
792
820
|
eval_metric = self._get_eval_metric(metric)
|
|
@@ -796,8 +824,8 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
796
824
|
def evaluate(
|
|
797
825
|
self,
|
|
798
826
|
data: TimeSeriesDataFrame,
|
|
799
|
-
model:
|
|
800
|
-
metrics:
|
|
827
|
+
model: str | TimeSeriesModelBase | None = None,
|
|
828
|
+
metrics: str | TimeSeriesScorer | list[str | TimeSeriesScorer] | None = None,
|
|
801
829
|
use_cache: bool = True,
|
|
802
830
|
) -> dict[str, float]:
|
|
803
831
|
past_data, known_covariates = data.get_model_inputs_for_scoring(
|
|
@@ -818,13 +846,13 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
818
846
|
self,
|
|
819
847
|
data: TimeSeriesDataFrame,
|
|
820
848
|
features: list[str],
|
|
821
|
-
model:
|
|
822
|
-
metric:
|
|
823
|
-
time_limit:
|
|
849
|
+
model: str | TimeSeriesModelBase | None = None,
|
|
850
|
+
metric: str | TimeSeriesScorer | None = None,
|
|
851
|
+
time_limit: float | None = None,
|
|
824
852
|
method: Literal["naive", "permutation"] = "permutation",
|
|
825
853
|
subsample_size: int = 50,
|
|
826
|
-
num_iterations:
|
|
827
|
-
random_seed:
|
|
854
|
+
num_iterations: int | None = None,
|
|
855
|
+
random_seed: int | None = None,
|
|
828
856
|
relative_scores: bool = False,
|
|
829
857
|
include_confidence_band: bool = True,
|
|
830
858
|
confidence_level: float = 0.99,
|
|
@@ -841,9 +869,6 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
841
869
|
# start timer and cap subsample size if it's greater than the number of items in the provided data set
|
|
842
870
|
time_start = time.time()
|
|
843
871
|
if subsample_size > data.num_items:
|
|
844
|
-
logger.info(
|
|
845
|
-
f"Subsample_size {subsample_size} is larger than the number of items in the data and will be ignored"
|
|
846
|
-
)
|
|
847
872
|
subsample_size = data.num_items
|
|
848
873
|
|
|
849
874
|
# set default number of iterations and cap iterations if the number of items in the data is smaller
|
|
@@ -923,7 +948,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
923
948
|
|
|
924
949
|
return importance_df
|
|
925
950
|
|
|
926
|
-
def _model_uses_feature(self, model:
|
|
951
|
+
def _model_uses_feature(self, model: str | TimeSeriesModelBase, feature: str) -> bool:
|
|
927
952
|
"""Check if the given model uses the given feature."""
|
|
928
953
|
models_with_ancestors = set(self.get_minimum_model_set(model))
|
|
929
954
|
|
|
@@ -936,6 +961,72 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
936
961
|
|
|
937
962
|
return False
|
|
938
963
|
|
|
964
|
+
def backtest_predictions(
|
|
965
|
+
self,
|
|
966
|
+
data: TimeSeriesDataFrame | None,
|
|
967
|
+
model_names: list[str],
|
|
968
|
+
num_val_windows: int | None = None,
|
|
969
|
+
val_step_size: int | None = None,
|
|
970
|
+
use_cache: bool = True,
|
|
971
|
+
) -> dict[str, list[TimeSeriesDataFrame]]:
|
|
972
|
+
if data is None:
|
|
973
|
+
assert num_val_windows is None, "num_val_windows must be None when data is None"
|
|
974
|
+
assert val_step_size is None, "val_step_size must be None when data is None"
|
|
975
|
+
return {model_name: self._get_model_oof_predictions(model_name) for model_name in model_names}
|
|
976
|
+
|
|
977
|
+
if val_step_size is None:
|
|
978
|
+
val_step_size = self.prediction_length
|
|
979
|
+
if num_val_windows is None:
|
|
980
|
+
num_val_windows = 1
|
|
981
|
+
|
|
982
|
+
splitter = ExpandingWindowSplitter(
|
|
983
|
+
prediction_length=self.prediction_length,
|
|
984
|
+
num_val_windows=num_val_windows,
|
|
985
|
+
val_step_size=val_step_size,
|
|
986
|
+
)
|
|
987
|
+
|
|
988
|
+
result: dict[str, list[TimeSeriesDataFrame]] = {model_name: [] for model_name in model_names}
|
|
989
|
+
for past_data, full_data in splitter.split(data):
|
|
990
|
+
known_covariates = full_data.slice_by_timestep(-self.prediction_length, None)[
|
|
991
|
+
self.covariate_metadata.known_covariates
|
|
992
|
+
]
|
|
993
|
+
pred_dict, _ = self.get_model_pred_dict(
|
|
994
|
+
model_names=model_names,
|
|
995
|
+
data=past_data,
|
|
996
|
+
known_covariates=known_covariates,
|
|
997
|
+
use_cache=use_cache,
|
|
998
|
+
)
|
|
999
|
+
for model_name in model_names:
|
|
1000
|
+
result[model_name].append(pred_dict[model_name]) # type: ignore
|
|
1001
|
+
|
|
1002
|
+
return result
|
|
1003
|
+
|
|
1004
|
+
def backtest_targets(
|
|
1005
|
+
self,
|
|
1006
|
+
data: TimeSeriesDataFrame | None,
|
|
1007
|
+
num_val_windows: int | None = None,
|
|
1008
|
+
val_step_size: int | None = None,
|
|
1009
|
+
) -> list[TimeSeriesDataFrame]:
|
|
1010
|
+
if data is None:
|
|
1011
|
+
assert num_val_windows is None, "num_val_windows must be None when data is None"
|
|
1012
|
+
assert val_step_size is None, "val_step_size must be None when data is None"
|
|
1013
|
+
train_data = self.load_train_data()
|
|
1014
|
+
val_data = self.load_val_data()
|
|
1015
|
+
return self._get_validation_windows(train_data=train_data, val_data=val_data)
|
|
1016
|
+
|
|
1017
|
+
if val_step_size is None:
|
|
1018
|
+
val_step_size = self.prediction_length
|
|
1019
|
+
if num_val_windows is None:
|
|
1020
|
+
num_val_windows = 1
|
|
1021
|
+
|
|
1022
|
+
splitter = ExpandingWindowSplitter(
|
|
1023
|
+
prediction_length=self.prediction_length,
|
|
1024
|
+
num_val_windows=num_val_windows,
|
|
1025
|
+
val_step_size=val_step_size,
|
|
1026
|
+
)
|
|
1027
|
+
|
|
1028
|
+
return [val_fold for _, val_fold in splitter.split(data)]
|
|
1029
|
+
|
|
939
1030
|
def _add_ci_to_feature_importance(
|
|
940
1031
|
self, importance_df: pd.DataFrame, confidence_level: float = 0.99
|
|
941
1032
|
) -> pd.DataFrame:
|
|
@@ -965,10 +1056,10 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
965
1056
|
|
|
966
1057
|
def _predict_model(
|
|
967
1058
|
self,
|
|
968
|
-
model:
|
|
1059
|
+
model: str | TimeSeriesModelBase,
|
|
969
1060
|
data: TimeSeriesDataFrame,
|
|
970
|
-
model_pred_dict: dict[str,
|
|
971
|
-
known_covariates:
|
|
1061
|
+
model_pred_dict: dict[str, TimeSeriesDataFrame | None],
|
|
1062
|
+
known_covariates: TimeSeriesDataFrame | None = None,
|
|
972
1063
|
) -> TimeSeriesDataFrame:
|
|
973
1064
|
"""Generate predictions using the given model.
|
|
974
1065
|
|
|
@@ -981,10 +1072,10 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
981
1072
|
|
|
982
1073
|
def _get_inputs_to_model(
|
|
983
1074
|
self,
|
|
984
|
-
model:
|
|
1075
|
+
model: str | TimeSeriesModelBase,
|
|
985
1076
|
data: TimeSeriesDataFrame,
|
|
986
|
-
model_pred_dict: dict[str,
|
|
987
|
-
) ->
|
|
1077
|
+
model_pred_dict: dict[str, TimeSeriesDataFrame | None],
|
|
1078
|
+
) -> TimeSeriesDataFrame | dict[str, TimeSeriesDataFrame | None]:
|
|
988
1079
|
"""Get the first argument that should be passed to model.predict.
|
|
989
1080
|
|
|
990
1081
|
This method assumes that model_pred_dict contains the predictions of all base models, if model is an ensemble.
|
|
@@ -1002,11 +1093,11 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
1002
1093
|
self,
|
|
1003
1094
|
model_names: list[str],
|
|
1004
1095
|
data: TimeSeriesDataFrame,
|
|
1005
|
-
known_covariates:
|
|
1096
|
+
known_covariates: TimeSeriesDataFrame | None = None,
|
|
1006
1097
|
raise_exception_if_failed: bool = True,
|
|
1007
1098
|
use_cache: bool = True,
|
|
1008
|
-
random_seed:
|
|
1009
|
-
) -> tuple[dict[str,
|
|
1099
|
+
random_seed: int | None = None,
|
|
1100
|
+
) -> tuple[dict[str, TimeSeriesDataFrame | None], dict[str, float]]:
|
|
1010
1101
|
"""Return a dictionary with predictions of all models for the given dataset.
|
|
1011
1102
|
|
|
1012
1103
|
Parameters
|
|
@@ -1038,8 +1129,8 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
1038
1129
|
for model_name in model_names:
|
|
1039
1130
|
model_set.update(self.get_minimum_model_set(model_name))
|
|
1040
1131
|
if len(model_set) > 1:
|
|
1041
|
-
|
|
1042
|
-
model_set = sorted(model_set, key=
|
|
1132
|
+
model_to_layer = self._get_model_layers()
|
|
1133
|
+
model_set = sorted(model_set, key=model_to_layer.get) # type: ignore
|
|
1043
1134
|
logger.debug(f"Prediction order: {model_set}")
|
|
1044
1135
|
|
|
1045
1136
|
failed_models = []
|
|
@@ -1089,7 +1180,7 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
1089
1180
|
return dict(pred_time_dict_total)
|
|
1090
1181
|
|
|
1091
1182
|
def _merge_refit_full_data(
|
|
1092
|
-
self, train_data: TimeSeriesDataFrame, val_data:
|
|
1183
|
+
self, train_data: TimeSeriesDataFrame, val_data: TimeSeriesDataFrame | None
|
|
1093
1184
|
) -> TimeSeriesDataFrame:
|
|
1094
1185
|
if val_data is None:
|
|
1095
1186
|
return train_data
|
|
@@ -1099,9 +1190,9 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
1099
1190
|
|
|
1100
1191
|
def refit_single_full(
|
|
1101
1192
|
self,
|
|
1102
|
-
train_data:
|
|
1103
|
-
val_data:
|
|
1104
|
-
models:
|
|
1193
|
+
train_data: TimeSeriesDataFrame | None = None,
|
|
1194
|
+
val_data: TimeSeriesDataFrame | None = None,
|
|
1195
|
+
models: list[str] | None = None,
|
|
1105
1196
|
) -> list[str]:
|
|
1106
1197
|
train_data = train_data or self.load_train_data()
|
|
1107
1198
|
val_data = val_data or self.load_val_data()
|
|
@@ -1110,12 +1201,12 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
1110
1201
|
if models is None:
|
|
1111
1202
|
models = self.get_model_names()
|
|
1112
1203
|
|
|
1113
|
-
|
|
1114
|
-
|
|
1204
|
+
model_to_layer = self._get_model_layers()
|
|
1205
|
+
models_sorted_by_layer = sorted(models, key=model_to_layer.get) # type: ignore
|
|
1115
1206
|
|
|
1116
1207
|
model_refit_map = {}
|
|
1117
1208
|
models_trained_full = []
|
|
1118
|
-
for model in
|
|
1209
|
+
for model in models_sorted_by_layer:
|
|
1119
1210
|
model = self.load_model(model)
|
|
1120
1211
|
model_name = model.name
|
|
1121
1212
|
if model._get_tags()["can_refit_full"]:
|
|
@@ -1180,11 +1271,11 @@ class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
|
1180
1271
|
|
|
1181
1272
|
def get_trainable_base_models(
|
|
1182
1273
|
self,
|
|
1183
|
-
hyperparameters:
|
|
1274
|
+
hyperparameters: str | dict[str, Any],
|
|
1184
1275
|
*,
|
|
1185
1276
|
multi_window: bool = False,
|
|
1186
|
-
freq:
|
|
1187
|
-
excluded_model_types:
|
|
1277
|
+
freq: str | None = None,
|
|
1278
|
+
excluded_model_types: list[str] | None = None,
|
|
1188
1279
|
hyperparameter_tune: bool = False,
|
|
1189
1280
|
) -> list[AbstractTimeSeriesModel]:
|
|
1190
1281
|
return TrainableModelSetBuilder(
|