autogluon.timeseries 1.0.1b20240304__py3-none-any.whl → 1.4.1b20251210__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of autogluon.timeseries might be problematic. Click here for more details.
- autogluon/timeseries/configs/__init__.py +3 -2
- autogluon/timeseries/configs/hyperparameter_presets.py +62 -0
- autogluon/timeseries/configs/predictor_presets.py +84 -0
- autogluon/timeseries/dataset/ts_dataframe.py +339 -186
- autogluon/timeseries/learner.py +192 -60
- autogluon/timeseries/metrics/__init__.py +55 -11
- autogluon/timeseries/metrics/abstract.py +96 -25
- autogluon/timeseries/metrics/point.py +186 -39
- autogluon/timeseries/metrics/quantile.py +47 -20
- autogluon/timeseries/metrics/utils.py +6 -6
- autogluon/timeseries/models/__init__.py +13 -7
- autogluon/timeseries/models/abstract/__init__.py +2 -2
- autogluon/timeseries/models/abstract/abstract_timeseries_model.py +533 -273
- autogluon/timeseries/models/abstract/model_trial.py +10 -10
- autogluon/timeseries/models/abstract/tunable.py +189 -0
- autogluon/timeseries/models/autogluon_tabular/__init__.py +2 -0
- autogluon/timeseries/models/autogluon_tabular/mlforecast.py +369 -215
- autogluon/timeseries/models/autogluon_tabular/per_step.py +513 -0
- autogluon/timeseries/models/autogluon_tabular/transforms.py +67 -0
- autogluon/timeseries/models/autogluon_tabular/utils.py +3 -51
- autogluon/timeseries/models/chronos/__init__.py +4 -0
- autogluon/timeseries/models/chronos/chronos2.py +361 -0
- autogluon/timeseries/models/chronos/model.py +738 -0
- autogluon/timeseries/models/chronos/utils.py +369 -0
- autogluon/timeseries/models/ensemble/__init__.py +35 -2
- autogluon/timeseries/models/ensemble/{abstract_timeseries_ensemble.py → abstract.py} +50 -26
- autogluon/timeseries/models/ensemble/array_based/__init__.py +3 -0
- autogluon/timeseries/models/ensemble/array_based/abstract.py +236 -0
- autogluon/timeseries/models/ensemble/array_based/models.py +73 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +12 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +88 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py +167 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +94 -0
- autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +107 -0
- autogluon/timeseries/models/ensemble/ensemble_selection.py +167 -0
- autogluon/timeseries/models/ensemble/per_item_greedy.py +162 -0
- autogluon/timeseries/models/ensemble/weighted/__init__.py +8 -0
- autogluon/timeseries/models/ensemble/weighted/abstract.py +40 -0
- autogluon/timeseries/models/ensemble/weighted/basic.py +78 -0
- autogluon/timeseries/models/ensemble/weighted/greedy.py +57 -0
- autogluon/timeseries/models/gluonts/__init__.py +3 -1
- autogluon/timeseries/models/gluonts/abstract.py +583 -0
- autogluon/timeseries/models/gluonts/dataset.py +109 -0
- autogluon/timeseries/models/gluonts/{torch/models.py → models.py} +185 -44
- autogluon/timeseries/models/local/__init__.py +1 -10
- autogluon/timeseries/models/local/abstract_local_model.py +150 -97
- autogluon/timeseries/models/local/naive.py +31 -23
- autogluon/timeseries/models/local/npts.py +6 -2
- autogluon/timeseries/models/local/statsforecast.py +99 -112
- autogluon/timeseries/models/multi_window/multi_window_model.py +99 -40
- autogluon/timeseries/models/registry.py +64 -0
- autogluon/timeseries/models/toto/__init__.py +3 -0
- autogluon/timeseries/models/toto/_internal/__init__.py +9 -0
- autogluon/timeseries/models/toto/_internal/backbone/__init__.py +3 -0
- autogluon/timeseries/models/toto/_internal/backbone/attention.py +196 -0
- autogluon/timeseries/models/toto/_internal/backbone/backbone.py +262 -0
- autogluon/timeseries/models/toto/_internal/backbone/distribution.py +70 -0
- autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +136 -0
- autogluon/timeseries/models/toto/_internal/backbone/rope.py +89 -0
- autogluon/timeseries/models/toto/_internal/backbone/rotary_embedding_torch.py +342 -0
- autogluon/timeseries/models/toto/_internal/backbone/scaler.py +305 -0
- autogluon/timeseries/models/toto/_internal/backbone/transformer.py +333 -0
- autogluon/timeseries/models/toto/_internal/dataset.py +165 -0
- autogluon/timeseries/models/toto/_internal/forecaster.py +423 -0
- autogluon/timeseries/models/toto/dataloader.py +108 -0
- autogluon/timeseries/models/toto/hf_pretrained_model.py +118 -0
- autogluon/timeseries/models/toto/model.py +236 -0
- autogluon/timeseries/predictor.py +826 -305
- autogluon/timeseries/regressor.py +253 -0
- autogluon/timeseries/splitter.py +10 -31
- autogluon/timeseries/trainer/__init__.py +2 -3
- autogluon/timeseries/trainer/ensemble_composer.py +439 -0
- autogluon/timeseries/trainer/model_set_builder.py +256 -0
- autogluon/timeseries/trainer/prediction_cache.py +149 -0
- autogluon/timeseries/trainer/trainer.py +1298 -0
- autogluon/timeseries/trainer/utils.py +17 -0
- autogluon/timeseries/transforms/__init__.py +2 -0
- autogluon/timeseries/transforms/covariate_scaler.py +164 -0
- autogluon/timeseries/transforms/target_scaler.py +149 -0
- autogluon/timeseries/utils/constants.py +10 -0
- autogluon/timeseries/utils/datetime/base.py +38 -20
- autogluon/timeseries/utils/datetime/lags.py +18 -16
- autogluon/timeseries/utils/datetime/seasonality.py +14 -14
- autogluon/timeseries/utils/datetime/time_features.py +17 -14
- autogluon/timeseries/utils/features.py +317 -53
- autogluon/timeseries/utils/forecast.py +31 -17
- autogluon/timeseries/utils/timer.py +173 -0
- autogluon/timeseries/utils/warning_filters.py +44 -6
- autogluon/timeseries/version.py +2 -1
- autogluon.timeseries-1.4.1b20251210-py3.11-nspkg.pth +1 -0
- {autogluon.timeseries-1.0.1b20240304.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/METADATA +71 -47
- autogluon_timeseries-1.4.1b20251210.dist-info/RECORD +103 -0
- {autogluon.timeseries-1.0.1b20240304.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/WHEEL +1 -1
- autogluon/timeseries/configs/presets_configs.py +0 -11
- autogluon/timeseries/evaluator.py +0 -6
- autogluon/timeseries/models/ensemble/greedy_ensemble.py +0 -170
- autogluon/timeseries/models/gluonts/abstract_gluonts.py +0 -550
- autogluon/timeseries/models/gluonts/torch/__init__.py +0 -0
- autogluon/timeseries/models/presets.py +0 -325
- autogluon/timeseries/trainer/abstract_trainer.py +0 -1144
- autogluon/timeseries/trainer/auto_trainer.py +0 -74
- autogluon.timeseries-1.0.1b20240304-py3.8-nspkg.pth +0 -1
- autogluon.timeseries-1.0.1b20240304.dist-info/RECORD +0 -58
- {autogluon.timeseries-1.0.1b20240304.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info/licenses}/LICENSE +0 -0
- {autogluon.timeseries-1.0.1b20240304.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info/licenses}/NOTICE +0 -0
- {autogluon.timeseries-1.0.1b20240304.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/namespace_packages.txt +0 -0
- {autogluon.timeseries-1.0.1b20240304.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/top_level.txt +0 -0
- {autogluon.timeseries-1.0.1b20240304.dist-info → autogluon_timeseries-1.4.1b20251210.dist-info}/zip-safe +0 -0
|
@@ -0,0 +1,1298 @@
|
|
|
1
|
+
import copy
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
import time
|
|
5
|
+
import traceback
|
|
6
|
+
from collections import defaultdict
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Literal
|
|
9
|
+
|
|
10
|
+
import networkx as nx
|
|
11
|
+
import numpy as np
|
|
12
|
+
import pandas as pd
|
|
13
|
+
from tqdm import tqdm
|
|
14
|
+
|
|
15
|
+
from autogluon.common.utils.utils import seed_everything
|
|
16
|
+
from autogluon.core.trainer.abstract_trainer import AbstractTrainer
|
|
17
|
+
from autogluon.core.utils.exceptions import TimeLimitExceeded
|
|
18
|
+
from autogluon.core.utils.loaders import load_pkl
|
|
19
|
+
from autogluon.core.utils.savers import save_pkl
|
|
20
|
+
from autogluon.timeseries import TimeSeriesDataFrame
|
|
21
|
+
from autogluon.timeseries.metrics import TimeSeriesScorer, check_get_evaluation_metric
|
|
22
|
+
from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel, TimeSeriesModelBase
|
|
23
|
+
from autogluon.timeseries.models.ensemble import AbstractTimeSeriesEnsembleModel
|
|
24
|
+
from autogluon.timeseries.models.multi_window import MultiWindowBacktestingModel
|
|
25
|
+
from autogluon.timeseries.splitter import AbstractWindowSplitter, ExpandingWindowSplitter
|
|
26
|
+
from autogluon.timeseries.trainer.ensemble_composer import EnsembleComposer, validate_ensemble_hyperparameters
|
|
27
|
+
from autogluon.timeseries.utils.features import (
|
|
28
|
+
ConstantReplacementFeatureImportanceTransform,
|
|
29
|
+
CovariateMetadata,
|
|
30
|
+
PermutationFeatureImportanceTransform,
|
|
31
|
+
)
|
|
32
|
+
from autogluon.timeseries.utils.warning_filters import disable_tqdm
|
|
33
|
+
|
|
34
|
+
from .model_set_builder import TrainableModelSetBuilder, contains_searchspace
|
|
35
|
+
from .prediction_cache import PredictionCache, get_prediction_cache
|
|
36
|
+
from .utils import log_scores_and_times
|
|
37
|
+
|
|
38
|
+
logger = logging.getLogger("autogluon.timeseries.trainer")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class TimeSeriesTrainer(AbstractTrainer[TimeSeriesModelBase]):
|
|
42
|
+
max_rel_importance_score: float = 1e5
|
|
43
|
+
eps_abs_importance_score: float = 1e-5
|
|
44
|
+
max_ensemble_time_limit: float = 600.0
|
|
45
|
+
|
|
46
|
+
def __init__(
|
|
47
|
+
self,
|
|
48
|
+
path: str,
|
|
49
|
+
prediction_length: int = 1,
|
|
50
|
+
eval_metric: str | TimeSeriesScorer | None = None,
|
|
51
|
+
save_data: bool = True,
|
|
52
|
+
skip_model_selection: bool = False,
|
|
53
|
+
enable_ensemble: bool = True,
|
|
54
|
+
verbosity: int = 2,
|
|
55
|
+
num_val_windows: tuple[int, ...] = (1,),
|
|
56
|
+
val_step_size: int | None = None,
|
|
57
|
+
refit_every_n_windows: int | None = 1,
|
|
58
|
+
# TODO: Set cache_predictions=False by default once all models in default presets have a reasonable inference speed
|
|
59
|
+
cache_predictions: bool = True,
|
|
60
|
+
**kwargs,
|
|
61
|
+
):
|
|
62
|
+
super().__init__(
|
|
63
|
+
path=path,
|
|
64
|
+
low_memory=True,
|
|
65
|
+
save_data=save_data,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
self.prediction_length = prediction_length
|
|
69
|
+
self.quantile_levels = kwargs.get("quantile_levels", [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
|
|
70
|
+
self.target = kwargs.get("target", "target")
|
|
71
|
+
self.covariate_metadata = kwargs.get("covariate_metadata", CovariateMetadata())
|
|
72
|
+
self.is_data_saved = False
|
|
73
|
+
self.skip_model_selection = skip_model_selection
|
|
74
|
+
# Ensemble cannot be fit if val_scores are not computed
|
|
75
|
+
self.enable_ensemble = enable_ensemble and not skip_model_selection
|
|
76
|
+
if kwargs.get("ensemble_model_type") is not None:
|
|
77
|
+
logger.warning(
|
|
78
|
+
"Using a custom `ensemble_model_type` is no longer supported. Use the `ensemble_hyperparameters` "
|
|
79
|
+
"argument to `fit` instead."
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
self.verbosity = verbosity
|
|
83
|
+
|
|
84
|
+
#: dict of normal model -> FULL model. FULL models are produced by
|
|
85
|
+
#: self.refit_single_full() and self.refit_full().
|
|
86
|
+
self.model_refit_map = {}
|
|
87
|
+
|
|
88
|
+
self.eval_metric = check_get_evaluation_metric(eval_metric, prediction_length=prediction_length)
|
|
89
|
+
|
|
90
|
+
self.num_val_windows = num_val_windows
|
|
91
|
+
|
|
92
|
+
# Validate num_val_windows
|
|
93
|
+
if len(self.num_val_windows) == 0:
|
|
94
|
+
raise ValueError("num_val_windows cannot be empty")
|
|
95
|
+
if not all(isinstance(w, int) and w > 0 for w in self.num_val_windows):
|
|
96
|
+
raise ValueError(f"num_val_windows must contain only positive integers, got {self.num_val_windows}")
|
|
97
|
+
|
|
98
|
+
self.val_step_size = val_step_size
|
|
99
|
+
self.refit_every_n_windows = refit_every_n_windows
|
|
100
|
+
self.hpo_results = {}
|
|
101
|
+
|
|
102
|
+
self.prediction_cache: PredictionCache = get_prediction_cache(cache_predictions, self.path)
|
|
103
|
+
self.prediction_cache.clear()
|
|
104
|
+
|
|
105
|
+
@property
|
|
106
|
+
def path_pkl(self) -> str:
|
|
107
|
+
return os.path.join(self.path, self.trainer_file_name)
|
|
108
|
+
|
|
109
|
+
def save_train_data(self, data: TimeSeriesDataFrame, verbose: bool = True) -> None:
|
|
110
|
+
path = os.path.join(self.path_data, "train.pkl")
|
|
111
|
+
save_pkl.save(path=path, object=data, verbose=verbose)
|
|
112
|
+
|
|
113
|
+
def save_val_data(self, data: TimeSeriesDataFrame, verbose: bool = True) -> None:
|
|
114
|
+
path = os.path.join(self.path_data, "val.pkl")
|
|
115
|
+
save_pkl.save(path=path, object=data, verbose=verbose)
|
|
116
|
+
|
|
117
|
+
def load_train_data(self) -> TimeSeriesDataFrame:
|
|
118
|
+
path = os.path.join(self.path_data, "train.pkl")
|
|
119
|
+
return load_pkl.load(path=path)
|
|
120
|
+
|
|
121
|
+
def load_val_data(self) -> TimeSeriesDataFrame | None:
|
|
122
|
+
path = os.path.join(self.path_data, "val.pkl")
|
|
123
|
+
if os.path.exists(path):
|
|
124
|
+
return load_pkl.load(path=path)
|
|
125
|
+
else:
|
|
126
|
+
return None
|
|
127
|
+
|
|
128
|
+
def load_data(self) -> tuple[TimeSeriesDataFrame, TimeSeriesDataFrame | None]:
|
|
129
|
+
train_data = self.load_train_data()
|
|
130
|
+
val_data = self.load_val_data()
|
|
131
|
+
return train_data, val_data
|
|
132
|
+
|
|
133
|
+
def save(self) -> None:
|
|
134
|
+
models = self.models
|
|
135
|
+
self.models = {}
|
|
136
|
+
|
|
137
|
+
save_pkl.save(path=self.path_pkl, object=self)
|
|
138
|
+
for model in self.models.values():
|
|
139
|
+
model.save()
|
|
140
|
+
|
|
141
|
+
self.models = models
|
|
142
|
+
|
|
143
|
+
def _get_model_oof_predictions(self, model_name: str) -> list[TimeSeriesDataFrame]:
|
|
144
|
+
model_path = os.path.join(self.path, self.get_model_attribute(model=model_name, attribute="path"))
|
|
145
|
+
model_type = self.get_model_attribute(model=model_name, attribute="type")
|
|
146
|
+
return model_type.load_oof_predictions(path=model_path)
|
|
147
|
+
|
|
148
|
+
def _add_model(
|
|
149
|
+
self,
|
|
150
|
+
model: TimeSeriesModelBase,
|
|
151
|
+
base_models: list[str] | None = None,
|
|
152
|
+
):
|
|
153
|
+
"""Add a model to the model graph of the trainer. If the model is an ensemble, also add
|
|
154
|
+
information about dependencies to the model graph (list of models specified via ``base_models``).
|
|
155
|
+
|
|
156
|
+
Parameters
|
|
157
|
+
----------
|
|
158
|
+
model
|
|
159
|
+
The model to be added to the model graph.
|
|
160
|
+
base_models
|
|
161
|
+
If the model is an ensemble, the list of base model names that are included in the ensemble.
|
|
162
|
+
Expected only when ``model`` is a ``AbstractTimeSeriesEnsembleModel``.
|
|
163
|
+
|
|
164
|
+
Raises
|
|
165
|
+
------
|
|
166
|
+
AssertionError
|
|
167
|
+
If ``base_models`` are provided and ``model`` is not a ``AbstractTimeSeriesEnsembleModel``.
|
|
168
|
+
"""
|
|
169
|
+
node_attrs = dict(
|
|
170
|
+
path=os.path.relpath(model.path, self.path).split(os.sep),
|
|
171
|
+
type=type(model),
|
|
172
|
+
fit_time=model.fit_time,
|
|
173
|
+
predict_time=model.predict_time,
|
|
174
|
+
val_score=model.val_score,
|
|
175
|
+
)
|
|
176
|
+
self.model_graph.add_node(model.name, **node_attrs)
|
|
177
|
+
|
|
178
|
+
if base_models:
|
|
179
|
+
assert isinstance(model, AbstractTimeSeriesEnsembleModel)
|
|
180
|
+
for base_model in base_models:
|
|
181
|
+
self.model_graph.add_edge(base_model, model.name)
|
|
182
|
+
|
|
183
|
+
def _get_model_layers(self) -> dict[str, int]:
|
|
184
|
+
"""Get a dictionary mapping each model to their layer in the model graph"""
|
|
185
|
+
|
|
186
|
+
# get nodes without a parent
|
|
187
|
+
rootset = set(self.model_graph.nodes)
|
|
188
|
+
for e in self.model_graph.edges():
|
|
189
|
+
rootset.discard(e[1])
|
|
190
|
+
|
|
191
|
+
# get shortest paths
|
|
192
|
+
paths_from = defaultdict(dict)
|
|
193
|
+
for source_node, paths_to in nx.shortest_path_length(self.model_graph):
|
|
194
|
+
for dest_node in paths_to:
|
|
195
|
+
paths_from[dest_node][source_node] = paths_to[dest_node]
|
|
196
|
+
|
|
197
|
+
# determine layers
|
|
198
|
+
layers = {}
|
|
199
|
+
for n in paths_from:
|
|
200
|
+
layers[n] = max(paths_from[n].get(src, 0) for src in rootset)
|
|
201
|
+
|
|
202
|
+
return layers
|
|
203
|
+
|
|
204
|
+
def get_models_attribute_dict(self, attribute: str, models: list[str] | None = None) -> dict[str, Any]:
|
|
205
|
+
"""Get an attribute from the `model_graph` for each of the model names
|
|
206
|
+
specified. If `models` is none, the attribute will be returned for all models"""
|
|
207
|
+
results = {}
|
|
208
|
+
if models is None:
|
|
209
|
+
models = self.get_model_names()
|
|
210
|
+
for model in models:
|
|
211
|
+
results[model] = self.model_graph.nodes[model][attribute]
|
|
212
|
+
return results
|
|
213
|
+
|
|
214
|
+
def get_model_best(self) -> str:
|
|
215
|
+
"""Return the name of the best model by model performance on the validation set."""
|
|
216
|
+
models = self.get_model_names()
|
|
217
|
+
if not models:
|
|
218
|
+
raise ValueError("Trainer has no fit models that can predict.")
|
|
219
|
+
if len(models) == 1:
|
|
220
|
+
return models[0]
|
|
221
|
+
model_performances = self.get_models_attribute_dict(attribute="val_score")
|
|
222
|
+
model_layers = self._get_model_layers()
|
|
223
|
+
model_name_score_layer_list = [
|
|
224
|
+
(m, model_performances[m], model_layers.get(m, 0)) for m in models if model_performances[m] is not None
|
|
225
|
+
]
|
|
226
|
+
|
|
227
|
+
if not model_name_score_layer_list:
|
|
228
|
+
raise ValueError("No fitted models have validation scores computed.")
|
|
229
|
+
|
|
230
|
+
# rank models in terms of validation score. if two models have the same validation score,
|
|
231
|
+
# rank them by their layer in the model graph (lower layer models are preferred).
|
|
232
|
+
return max(
|
|
233
|
+
model_name_score_layer_list,
|
|
234
|
+
key=lambda mns: (mns[1], -mns[2]), # (score, -layer)
|
|
235
|
+
)[0]
|
|
236
|
+
|
|
237
|
+
def get_model_names(self, layer: int | None = None) -> list[str]:
|
|
238
|
+
"""Get model names that are registered in the model graph"""
|
|
239
|
+
if layer is not None:
|
|
240
|
+
return list(node for node, l in self._get_model_layers().items() if l == layer) # noqa: E741
|
|
241
|
+
return list(self.model_graph.nodes)
|
|
242
|
+
|
|
243
|
+
def get_info(self, include_model_info: bool = False) -> dict[str, Any]:
|
|
244
|
+
num_models_trained = len(self.get_model_names())
|
|
245
|
+
if self.model_best is not None:
|
|
246
|
+
best_model = self.model_best
|
|
247
|
+
else:
|
|
248
|
+
try:
|
|
249
|
+
best_model = self.get_model_best()
|
|
250
|
+
except AssertionError:
|
|
251
|
+
best_model = None
|
|
252
|
+
if best_model is not None:
|
|
253
|
+
best_model_score_val = self.get_model_attribute(model=best_model, attribute="val_score")
|
|
254
|
+
else:
|
|
255
|
+
best_model_score_val = None
|
|
256
|
+
|
|
257
|
+
info = {
|
|
258
|
+
"best_model": best_model,
|
|
259
|
+
"best_model_score_val": best_model_score_val,
|
|
260
|
+
"num_models_trained": num_models_trained,
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
if include_model_info:
|
|
264
|
+
info["model_info"] = self.get_models_info()
|
|
265
|
+
|
|
266
|
+
return info
|
|
267
|
+
|
|
268
|
+
def tune_model_hyperparameters(
|
|
269
|
+
self,
|
|
270
|
+
model: AbstractTimeSeriesModel,
|
|
271
|
+
train_data: TimeSeriesDataFrame,
|
|
272
|
+
time_limit: float | None = None,
|
|
273
|
+
val_data: TimeSeriesDataFrame | None = None,
|
|
274
|
+
hyperparameter_tune_kwargs: str | dict = "auto",
|
|
275
|
+
):
|
|
276
|
+
default_num_trials = None
|
|
277
|
+
if time_limit is None and (
|
|
278
|
+
"num_samples" not in hyperparameter_tune_kwargs or isinstance(hyperparameter_tune_kwargs, str)
|
|
279
|
+
):
|
|
280
|
+
default_num_trials = 10
|
|
281
|
+
|
|
282
|
+
tuning_start_time = time.time()
|
|
283
|
+
with disable_tqdm():
|
|
284
|
+
hpo_models, _ = model.hyperparameter_tune(
|
|
285
|
+
train_data=train_data,
|
|
286
|
+
val_data=val_data,
|
|
287
|
+
hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
|
|
288
|
+
time_limit=time_limit,
|
|
289
|
+
default_num_trials=default_num_trials,
|
|
290
|
+
val_splitter=self._get_val_splitter(use_val_data=val_data is not None),
|
|
291
|
+
refit_every_n_windows=self.refit_every_n_windows,
|
|
292
|
+
)
|
|
293
|
+
total_tuning_time = time.time() - tuning_start_time
|
|
294
|
+
|
|
295
|
+
self.hpo_results[model.name] = hpo_models
|
|
296
|
+
model_names_trained = []
|
|
297
|
+
# add each of the trained HPO configurations to the trained models
|
|
298
|
+
for model_hpo_name, model_info in hpo_models.items():
|
|
299
|
+
model_path = os.path.join(self.path, model_info["path"])
|
|
300
|
+
|
|
301
|
+
# Only load model configurations that didn't fail
|
|
302
|
+
if not Path(model_path).exists():
|
|
303
|
+
continue
|
|
304
|
+
|
|
305
|
+
model_hpo = self.load_model(model_hpo_name, path=model_path, model_type=type(model))
|
|
306
|
+
|
|
307
|
+
# override validation score to align evaluations on the final ensemble layer's window
|
|
308
|
+
if isinstance(model_hpo, MultiWindowBacktestingModel):
|
|
309
|
+
model_hpo.val_score = float(
|
|
310
|
+
np.mean([info["val_score"] for info in model_hpo.info_per_val_window[-self.num_val_windows[-1] :]])
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
self._add_model(model_hpo)
|
|
314
|
+
model_names_trained.append(model_hpo.name)
|
|
315
|
+
|
|
316
|
+
logger.info(f"\tTrained {len(model_names_trained)} models while tuning {model.name}.")
|
|
317
|
+
|
|
318
|
+
if len(model_names_trained) > 0:
|
|
319
|
+
trained_model_results = [hpo_models[model_name] for model_name in model_names_trained]
|
|
320
|
+
best_model_result = max(trained_model_results, key=lambda x: x["val_score"])
|
|
321
|
+
|
|
322
|
+
logger.info(
|
|
323
|
+
f"\t{best_model_result['val_score']:<7.4f}".ljust(15)
|
|
324
|
+
+ f"= Validation score ({self.eval_metric.name_with_sign})"
|
|
325
|
+
)
|
|
326
|
+
logger.info(f"\t{total_tuning_time:<7.2f} s".ljust(15) + "= Total tuning time")
|
|
327
|
+
logger.debug(f"\tBest hyperparameter configuration: {best_model_result['hyperparameters']}")
|
|
328
|
+
|
|
329
|
+
return model_names_trained
|
|
330
|
+
|
|
331
|
+
def _train_and_save(
|
|
332
|
+
self,
|
|
333
|
+
train_data: TimeSeriesDataFrame,
|
|
334
|
+
model: AbstractTimeSeriesModel,
|
|
335
|
+
val_data: TimeSeriesDataFrame | None = None,
|
|
336
|
+
time_limit: float | None = None,
|
|
337
|
+
) -> list[str]:
|
|
338
|
+
"""Fit and save the given model on given training and validation data and save the trained model.
|
|
339
|
+
|
|
340
|
+
Returns
|
|
341
|
+
-------
|
|
342
|
+
model_names_trained
|
|
343
|
+
the list of model names that were successfully trained
|
|
344
|
+
"""
|
|
345
|
+
fit_start_time = time.time()
|
|
346
|
+
model_names_trained = []
|
|
347
|
+
try:
|
|
348
|
+
if time_limit is not None:
|
|
349
|
+
if time_limit <= 0:
|
|
350
|
+
logger.info(f"\tSkipping {model.name} due to lack of time remaining.")
|
|
351
|
+
return model_names_trained
|
|
352
|
+
|
|
353
|
+
model.fit(
|
|
354
|
+
train_data=train_data,
|
|
355
|
+
val_data=None if isinstance(model, MultiWindowBacktestingModel) else val_data,
|
|
356
|
+
time_limit=time_limit,
|
|
357
|
+
verbosity=self.verbosity,
|
|
358
|
+
val_splitter=self._get_val_splitter(use_val_data=val_data is not None),
|
|
359
|
+
refit_every_n_windows=self.refit_every_n_windows,
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
fit_end_time = time.time()
|
|
363
|
+
model.fit_time = model.fit_time or (fit_end_time - fit_start_time)
|
|
364
|
+
|
|
365
|
+
if time_limit is not None:
|
|
366
|
+
time_limit = time_limit - (fit_end_time - fit_start_time)
|
|
367
|
+
if val_data is not None:
|
|
368
|
+
model.score_and_cache_oof(
|
|
369
|
+
val_data, store_val_score=True, store_predict_time=True, time_limit=time_limit
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
# by default, MultiWindowBacktestingModel computes validation score on all windows. However,
|
|
373
|
+
# when doing multilayer stacking, the trainer only scores on the windows of the last layer.
|
|
374
|
+
# we override the val_score to align scores.
|
|
375
|
+
if isinstance(model, MultiWindowBacktestingModel):
|
|
376
|
+
model.val_score = float(
|
|
377
|
+
np.mean([info["val_score"] for info in model.info_per_val_window[-self.num_val_windows[-1] :]])
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
log_scores_and_times(
|
|
381
|
+
val_score=model.val_score,
|
|
382
|
+
fit_time=model.fit_time,
|
|
383
|
+
predict_time=model.predict_time,
|
|
384
|
+
eval_metric_name=self.eval_metric.name_with_sign,
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
self.save_model(model=model)
|
|
388
|
+
except TimeLimitExceeded:
|
|
389
|
+
logger.error(f"\tTime limit exceeded... Skipping {model.name}.")
|
|
390
|
+
except (Exception, MemoryError):
|
|
391
|
+
logger.error(f"\tWarning: Exception caused {model.name} to fail during training... Skipping this model.")
|
|
392
|
+
logger.error(traceback.format_exc())
|
|
393
|
+
else:
|
|
394
|
+
self._add_model(model=model) # noqa: F821
|
|
395
|
+
model_names_trained.append(model.name) # noqa: F821
|
|
396
|
+
finally:
|
|
397
|
+
del model
|
|
398
|
+
|
|
399
|
+
return model_names_trained
|
|
400
|
+
|
|
401
|
+
def fit(
|
|
402
|
+
self,
|
|
403
|
+
train_data: TimeSeriesDataFrame,
|
|
404
|
+
hyperparameters: str | dict[Any, dict],
|
|
405
|
+
val_data: TimeSeriesDataFrame | None = None,
|
|
406
|
+
ensemble_hyperparameters: dict | list[dict] | None = None,
|
|
407
|
+
hyperparameter_tune_kwargs: str | dict | None = None,
|
|
408
|
+
excluded_model_types: list[str] | None = None,
|
|
409
|
+
time_limit: float | None = None,
|
|
410
|
+
random_seed: int | None = None,
|
|
411
|
+
):
|
|
412
|
+
"""Fit a set of timeseries models specified by the `hyperparameters`
|
|
413
|
+
dictionary that maps model names to their specified hyperparameters.
|
|
414
|
+
|
|
415
|
+
Parameters
|
|
416
|
+
----------
|
|
417
|
+
train_data
|
|
418
|
+
Training data for fitting time series timeseries models.
|
|
419
|
+
hyperparameters
|
|
420
|
+
A dictionary mapping selected model names, model classes or model factory to hyperparameter
|
|
421
|
+
settings. Model names should be present in `trainer.presets.DEFAULT_MODEL_NAMES`. Optionally,
|
|
422
|
+
the user may provide one of "default", "light" and "very_light" to specify presets.
|
|
423
|
+
val_data
|
|
424
|
+
Optional validation data set to report validation scores on.
|
|
425
|
+
ensemble_hyperparameters
|
|
426
|
+
A dictionary mapping ensemble names to their specified hyperparameters. Ensemble names
|
|
427
|
+
should be defined in the models.ensemble namespace. defaults to `{"GreedyEnsemble": {}}`
|
|
428
|
+
which only fits a greedy weighted ensemble with default hyperparameters. Providing an
|
|
429
|
+
empty dictionary disables ensemble training.
|
|
430
|
+
hyperparameter_tune_kwargs
|
|
431
|
+
Args for hyperparameter tuning
|
|
432
|
+
excluded_model_types
|
|
433
|
+
Names of models that should not be trained, even if listed in `hyperparameters`.
|
|
434
|
+
time_limit
|
|
435
|
+
Time limit for training
|
|
436
|
+
random_seed
|
|
437
|
+
Random seed that will be set to each model during training
|
|
438
|
+
"""
|
|
439
|
+
logger.info(f"\nStarting training. Start time is {time.strftime('%Y-%m-%d %H:%M:%S')}")
|
|
440
|
+
|
|
441
|
+
# Handle ensemble hyperparameters
|
|
442
|
+
if ensemble_hyperparameters is None:
|
|
443
|
+
ensemble_hyperparameters = [{"GreedyEnsemble": {}}]
|
|
444
|
+
if isinstance(ensemble_hyperparameters, dict):
|
|
445
|
+
ensemble_hyperparameters = [ensemble_hyperparameters]
|
|
446
|
+
validate_ensemble_hyperparameters(ensemble_hyperparameters)
|
|
447
|
+
|
|
448
|
+
time_start = time.time()
|
|
449
|
+
hyperparameters = copy.deepcopy(hyperparameters)
|
|
450
|
+
|
|
451
|
+
if val_data is not None:
|
|
452
|
+
if self.num_val_windows[-1] != 1:
|
|
453
|
+
raise ValueError(
|
|
454
|
+
f"When val_data is provided, the last element of num_val_windows must be 1, "
|
|
455
|
+
f"got {self.num_val_windows[-1]}"
|
|
456
|
+
)
|
|
457
|
+
multi_window = self._get_val_splitter(use_val_data=val_data is not None).num_val_windows > 0
|
|
458
|
+
|
|
459
|
+
if self.save_data and not self.is_data_saved:
|
|
460
|
+
self.save_train_data(train_data)
|
|
461
|
+
if val_data is not None:
|
|
462
|
+
self.save_val_data(val_data)
|
|
463
|
+
self.is_data_saved = True
|
|
464
|
+
|
|
465
|
+
models = self.get_trainable_base_models(
|
|
466
|
+
hyperparameters=hyperparameters,
|
|
467
|
+
hyperparameter_tune=hyperparameter_tune_kwargs is not None, # TODO: remove hyperparameter_tune
|
|
468
|
+
freq=train_data.freq,
|
|
469
|
+
multi_window=multi_window,
|
|
470
|
+
excluded_model_types=excluded_model_types,
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
logger.info(f"Models that will be trained: {list(m.name for m in models)}")
|
|
474
|
+
|
|
475
|
+
if self.skip_model_selection:
|
|
476
|
+
if len(models) > 1:
|
|
477
|
+
raise ValueError(
|
|
478
|
+
"When `skip_model_selection=True`, only a single model must be provided via `hyperparameters` "
|
|
479
|
+
f"but {len(models)} models were given"
|
|
480
|
+
)
|
|
481
|
+
if contains_searchspace(models[0].get_hyperparameters()):
|
|
482
|
+
raise ValueError(
|
|
483
|
+
"When `skip_model_selection=True`, model configuration should contain no search spaces."
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
num_base_models = len(models)
|
|
487
|
+
model_names_trained = []
|
|
488
|
+
for i, model in enumerate(models):
|
|
489
|
+
if time_limit is None:
|
|
490
|
+
time_left = None
|
|
491
|
+
time_left_for_model = None
|
|
492
|
+
else:
|
|
493
|
+
time_left = time_limit - (time.time() - time_start)
|
|
494
|
+
if num_base_models > 1 and self.enable_ensemble:
|
|
495
|
+
time_reserved_for_ensemble = min(
|
|
496
|
+
self.max_ensemble_time_limit, time_left / (num_base_models - i + 1)
|
|
497
|
+
)
|
|
498
|
+
logger.debug(f"Reserving {time_reserved_for_ensemble:.1f}s for ensemble")
|
|
499
|
+
else:
|
|
500
|
+
time_reserved_for_ensemble = 0.0
|
|
501
|
+
time_left_for_model = (time_left - time_reserved_for_ensemble) / (num_base_models - i)
|
|
502
|
+
if time_left <= 0:
|
|
503
|
+
logger.info(f"Stopping training due to lack of time remaining. Time left: {time_left:.1f} seconds")
|
|
504
|
+
break
|
|
505
|
+
|
|
506
|
+
if random_seed is not None:
|
|
507
|
+
seed_everything(random_seed + i)
|
|
508
|
+
|
|
509
|
+
if contains_searchspace(model.get_hyperparameters()):
|
|
510
|
+
fit_log_message = f"Hyperparameter tuning model {model.name}. "
|
|
511
|
+
if time_left is not None:
|
|
512
|
+
fit_log_message += (
|
|
513
|
+
f"Tuning model for up to {time_left_for_model:.1f}s of the {time_left:.1f}s remaining."
|
|
514
|
+
)
|
|
515
|
+
logger.info(fit_log_message)
|
|
516
|
+
with tqdm.external_write_mode():
|
|
517
|
+
assert hyperparameter_tune_kwargs is not None, (
|
|
518
|
+
"`hyperparameter_tune_kwargs` must be provided if hyperparameters contain a search space"
|
|
519
|
+
)
|
|
520
|
+
model_names_trained += self.tune_model_hyperparameters(
|
|
521
|
+
model,
|
|
522
|
+
time_limit=time_left_for_model,
|
|
523
|
+
train_data=train_data,
|
|
524
|
+
val_data=val_data,
|
|
525
|
+
hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
|
|
526
|
+
)
|
|
527
|
+
else:
|
|
528
|
+
fit_log_message = f"Training timeseries model {model.name}. "
|
|
529
|
+
if time_left is not None:
|
|
530
|
+
fit_log_message += (
|
|
531
|
+
f"Training for up to {time_left_for_model:.1f}s of the {time_left:.1f}s of remaining time."
|
|
532
|
+
)
|
|
533
|
+
logger.info(fit_log_message)
|
|
534
|
+
model_names_trained += self._train_and_save(
|
|
535
|
+
train_data, model=model, val_data=val_data, time_limit=time_left_for_model
|
|
536
|
+
)
|
|
537
|
+
|
|
538
|
+
if self.enable_ensemble and ensemble_hyperparameters:
|
|
539
|
+
model_names = self.get_model_names(layer=0)
|
|
540
|
+
ensemble_names = self._fit_ensembles(
|
|
541
|
+
data_per_window=self._get_validation_windows(train_data, val_data),
|
|
542
|
+
predictions_per_window=self._get_base_model_predictions(model_names),
|
|
543
|
+
time_limit=None if time_limit is None else time_limit - (time.time() - time_start),
|
|
544
|
+
ensemble_hyperparameters=ensemble_hyperparameters,
|
|
545
|
+
num_windows_per_layer=self.num_val_windows,
|
|
546
|
+
)
|
|
547
|
+
model_names_trained.extend(ensemble_names)
|
|
548
|
+
|
|
549
|
+
logger.info(f"Training complete. Models trained: {model_names_trained}")
|
|
550
|
+
logger.info(f"Total runtime: {time.time() - time_start:.2f} s")
|
|
551
|
+
try:
|
|
552
|
+
best_model = self.get_model_best()
|
|
553
|
+
logger.info(f"Best model: {best_model}")
|
|
554
|
+
if not self.skip_model_selection:
|
|
555
|
+
logger.info(f"Best model score: {self.get_model_attribute(best_model, 'val_score'):.4f}")
|
|
556
|
+
except ValueError as e:
|
|
557
|
+
logger.error(str(e))
|
|
558
|
+
|
|
559
|
+
return model_names_trained
|
|
560
|
+
|
|
561
|
+
def _fit_ensembles(
|
|
562
|
+
self,
|
|
563
|
+
*,
|
|
564
|
+
data_per_window: list[TimeSeriesDataFrame],
|
|
565
|
+
predictions_per_window: dict[str, list[TimeSeriesDataFrame]],
|
|
566
|
+
time_limit: float | None,
|
|
567
|
+
ensemble_hyperparameters: list[dict],
|
|
568
|
+
num_windows_per_layer: tuple[int, ...],
|
|
569
|
+
) -> list[str]:
|
|
570
|
+
ensemble_composer = EnsembleComposer(
|
|
571
|
+
path=self.path,
|
|
572
|
+
prediction_length=self.prediction_length,
|
|
573
|
+
eval_metric=self.eval_metric,
|
|
574
|
+
target=self.target,
|
|
575
|
+
ensemble_hyperparameters=ensemble_hyperparameters,
|
|
576
|
+
num_windows_per_layer=num_windows_per_layer,
|
|
577
|
+
quantile_levels=self.quantile_levels,
|
|
578
|
+
model_graph=self.model_graph,
|
|
579
|
+
).fit(
|
|
580
|
+
data_per_window=data_per_window,
|
|
581
|
+
predictions_per_window=predictions_per_window,
|
|
582
|
+
time_limit=time_limit,
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
ensembles_trained = []
|
|
586
|
+
for _, model, base_models in ensemble_composer.iter_ensembles():
|
|
587
|
+
self._add_model(model=model, base_models=base_models)
|
|
588
|
+
self.save_model(model=model)
|
|
589
|
+
ensembles_trained.append(model.name)
|
|
590
|
+
|
|
591
|
+
return ensembles_trained
|
|
592
|
+
|
|
593
|
+
def _get_validation_windows(self, train_data: TimeSeriesDataFrame, val_data: TimeSeriesDataFrame | None):
|
|
594
|
+
train_splitter = self._get_val_splitter(use_val_data=val_data is not None)
|
|
595
|
+
return [val_fold for _, val_fold in train_splitter.split(train_data)] + (
|
|
596
|
+
[] if val_data is None else [val_data]
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
def _get_val_splitter(self, use_val_data: bool = False) -> AbstractWindowSplitter:
|
|
600
|
+
num_windows_from_train = sum(self.num_val_windows[:-1]) if use_val_data else sum(self.num_val_windows)
|
|
601
|
+
return ExpandingWindowSplitter(
|
|
602
|
+
prediction_length=self.prediction_length,
|
|
603
|
+
num_val_windows=num_windows_from_train,
|
|
604
|
+
val_step_size=self.val_step_size,
|
|
605
|
+
)
|
|
606
|
+
|
|
607
|
+
def _get_base_model_predictions(self, model_names: list[str]) -> dict[str, list[TimeSeriesDataFrame]]:
|
|
608
|
+
"""Get base model predictions for ensemble training / inference."""
|
|
609
|
+
predictions_per_window = {}
|
|
610
|
+
for model_name in model_names:
|
|
611
|
+
predictions_per_window[model_name] = self._get_model_oof_predictions(model_name)
|
|
612
|
+
return predictions_per_window
|
|
613
|
+
|
|
614
|
+
def leaderboard(
|
|
615
|
+
self,
|
|
616
|
+
data: TimeSeriesDataFrame | None = None,
|
|
617
|
+
extra_info: bool = False,
|
|
618
|
+
extra_metrics: list[str | TimeSeriesScorer] | None = None,
|
|
619
|
+
use_cache: bool = True,
|
|
620
|
+
) -> pd.DataFrame:
|
|
621
|
+
logger.debug("Generating leaderboard for all models trained")
|
|
622
|
+
|
|
623
|
+
model_names = self.get_model_names()
|
|
624
|
+
if len(model_names) == 0:
|
|
625
|
+
logger.warning("Warning: No models were trained during fit. Resulting leaderboard will be empty.")
|
|
626
|
+
|
|
627
|
+
model_info = {}
|
|
628
|
+
for ix, model_name in enumerate(model_names):
|
|
629
|
+
model_info[model_name] = {
|
|
630
|
+
"model": model_name,
|
|
631
|
+
"fit_order": ix + 1,
|
|
632
|
+
"score_val": self.get_model_attribute(model_name, "val_score"),
|
|
633
|
+
"fit_time_marginal": self.get_model_attribute(model_name, "fit_time"),
|
|
634
|
+
"pred_time_val": self.get_model_attribute(model_name, "predict_time"),
|
|
635
|
+
}
|
|
636
|
+
if extra_info:
|
|
637
|
+
model = self.load_model(model_name=model_name)
|
|
638
|
+
if isinstance(model, MultiWindowBacktestingModel):
|
|
639
|
+
model = model.most_recent_model
|
|
640
|
+
assert model is not None
|
|
641
|
+
model_info[model_name]["hyperparameters"] = model.get_hyperparameters()
|
|
642
|
+
|
|
643
|
+
if extra_metrics is None:
|
|
644
|
+
extra_metrics = []
|
|
645
|
+
|
|
646
|
+
if data is not None:
|
|
647
|
+
past_data, known_covariates = data.get_model_inputs_for_scoring(
|
|
648
|
+
prediction_length=self.prediction_length,
|
|
649
|
+
known_covariates_names=self.covariate_metadata.known_covariates,
|
|
650
|
+
)
|
|
651
|
+
logger.info(
|
|
652
|
+
"Additional data provided, testing on additional data. Resulting leaderboard "
|
|
653
|
+
"will be sorted according to test score (`score_test`)."
|
|
654
|
+
)
|
|
655
|
+
model_predictions, pred_time_dict = self.get_model_pred_dict(
|
|
656
|
+
model_names=model_names,
|
|
657
|
+
data=past_data,
|
|
658
|
+
known_covariates=known_covariates,
|
|
659
|
+
raise_exception_if_failed=False,
|
|
660
|
+
use_cache=use_cache,
|
|
661
|
+
)
|
|
662
|
+
|
|
663
|
+
for model_name in model_names:
|
|
664
|
+
model_preds = model_predictions[model_name]
|
|
665
|
+
if model_preds is None:
|
|
666
|
+
# Model failed at prediction time
|
|
667
|
+
model_info[model_name]["score_test"] = float("nan")
|
|
668
|
+
model_info[model_name]["pred_time_test"] = float("nan")
|
|
669
|
+
else:
|
|
670
|
+
model_info[model_name]["score_test"] = self._score_with_predictions(data, model_preds)
|
|
671
|
+
model_info[model_name]["pred_time_test"] = pred_time_dict[model_name]
|
|
672
|
+
|
|
673
|
+
for metric in extra_metrics:
|
|
674
|
+
if model_preds is None:
|
|
675
|
+
model_info[model_name][str(metric)] = float("nan")
|
|
676
|
+
else:
|
|
677
|
+
model_info[model_name][str(metric)] = self._score_with_predictions(
|
|
678
|
+
data, model_preds, metric=metric
|
|
679
|
+
)
|
|
680
|
+
|
|
681
|
+
explicit_column_order = [
|
|
682
|
+
"model",
|
|
683
|
+
"score_test",
|
|
684
|
+
"score_val",
|
|
685
|
+
"pred_time_test",
|
|
686
|
+
"pred_time_val",
|
|
687
|
+
"fit_time_marginal",
|
|
688
|
+
"fit_order",
|
|
689
|
+
]
|
|
690
|
+
if extra_info:
|
|
691
|
+
explicit_column_order += ["hyperparameters"]
|
|
692
|
+
|
|
693
|
+
if data is None:
|
|
694
|
+
explicit_column_order.remove("score_test")
|
|
695
|
+
explicit_column_order.remove("pred_time_test")
|
|
696
|
+
sort_column = "score_val"
|
|
697
|
+
else:
|
|
698
|
+
sort_column = "score_test"
|
|
699
|
+
explicit_column_order += [str(metric) for metric in extra_metrics]
|
|
700
|
+
|
|
701
|
+
df = pd.DataFrame(model_info.values(), columns=explicit_column_order)
|
|
702
|
+
df.sort_values(by=[sort_column, "model"], ascending=[False, False], inplace=True)
|
|
703
|
+
df.reset_index(drop=True, inplace=True)
|
|
704
|
+
|
|
705
|
+
return df[explicit_column_order]
|
|
706
|
+
|
|
707
|
+
def persist(
|
|
708
|
+
self, model_names: Literal["all", "best"] | list[str] = "all", with_ancestors: bool = False
|
|
709
|
+
) -> list[str]:
|
|
710
|
+
if model_names == "all":
|
|
711
|
+
model_names = self.get_model_names()
|
|
712
|
+
elif model_names == "best":
|
|
713
|
+
model_names = [self.get_model_best()]
|
|
714
|
+
if not isinstance(model_names, list):
|
|
715
|
+
raise ValueError(f"model_names must be a list of model names. Invalid value: {model_names}")
|
|
716
|
+
|
|
717
|
+
if with_ancestors:
|
|
718
|
+
models_with_ancestors = set()
|
|
719
|
+
for model_name in model_names:
|
|
720
|
+
models_with_ancestors = models_with_ancestors.union(self.get_minimum_model_set(model_name))
|
|
721
|
+
model_names = list(models_with_ancestors)
|
|
722
|
+
|
|
723
|
+
model_names_already_persisted = [model_name for model_name in model_names if model_name in self.models]
|
|
724
|
+
model_names = [model_name for model_name in model_names if model_name not in model_names_already_persisted]
|
|
725
|
+
|
|
726
|
+
for model_name in model_names:
|
|
727
|
+
model = self.load_model(model_name)
|
|
728
|
+
model.persist()
|
|
729
|
+
self.models[model.name] = model
|
|
730
|
+
|
|
731
|
+
return model_names
|
|
732
|
+
|
|
733
|
+
def unpersist(self, model_names: Literal["all"] | list[str] = "all") -> list[str]:
|
|
734
|
+
if model_names == "all":
|
|
735
|
+
model_names = list(self.models.keys())
|
|
736
|
+
if not isinstance(model_names, list):
|
|
737
|
+
raise ValueError(f"model_names must be a list of model names. Invalid value: {model_names}")
|
|
738
|
+
unpersisted_models = []
|
|
739
|
+
for model in model_names:
|
|
740
|
+
if model in self.models:
|
|
741
|
+
self.models.pop(model)
|
|
742
|
+
unpersisted_models.append(model)
|
|
743
|
+
return unpersisted_models
|
|
744
|
+
|
|
745
|
+
def _get_model_for_prediction(self, model: str | TimeSeriesModelBase | None = None, verbose: bool = True) -> str:
|
|
746
|
+
"""Given an optional identifier or model object, return the name of the model with which to predict.
|
|
747
|
+
|
|
748
|
+
If the model is not provided, this method will default to the best model according to the validation score.
|
|
749
|
+
"""
|
|
750
|
+
if model is None:
|
|
751
|
+
if self.model_best is None:
|
|
752
|
+
best_model_name: str = self.get_model_best()
|
|
753
|
+
self.model_best = best_model_name
|
|
754
|
+
if verbose:
|
|
755
|
+
logger.info(
|
|
756
|
+
f"Model not specified in predict, will default to the model with the "
|
|
757
|
+
f"best validation score: {self.model_best}",
|
|
758
|
+
)
|
|
759
|
+
return self.model_best
|
|
760
|
+
else:
|
|
761
|
+
if isinstance(model, TimeSeriesModelBase):
|
|
762
|
+
return model.name
|
|
763
|
+
else:
|
|
764
|
+
if model not in self.get_model_names():
|
|
765
|
+
raise KeyError(f"Model '{model}' not found. Available models: {self.get_model_names()}")
|
|
766
|
+
return model
|
|
767
|
+
|
|
768
|
+
def predict(
|
|
769
|
+
self,
|
|
770
|
+
data: TimeSeriesDataFrame,
|
|
771
|
+
known_covariates: TimeSeriesDataFrame | None = None,
|
|
772
|
+
model: str | TimeSeriesModelBase | None = None,
|
|
773
|
+
use_cache: bool = True,
|
|
774
|
+
random_seed: int | None = None,
|
|
775
|
+
) -> TimeSeriesDataFrame:
|
|
776
|
+
model_name = self._get_model_for_prediction(model)
|
|
777
|
+
model_pred_dict, _ = self.get_model_pred_dict(
|
|
778
|
+
model_names=[model_name],
|
|
779
|
+
data=data,
|
|
780
|
+
known_covariates=known_covariates,
|
|
781
|
+
use_cache=use_cache,
|
|
782
|
+
random_seed=random_seed,
|
|
783
|
+
)
|
|
784
|
+
predictions = model_pred_dict[model_name]
|
|
785
|
+
if predictions is None:
|
|
786
|
+
raise ValueError(f"Model {model_name} failed to predict. Please check the model's logs.")
|
|
787
|
+
return predictions
|
|
788
|
+
|
|
789
|
+
def _get_eval_metric(self, metric: str | TimeSeriesScorer | None) -> TimeSeriesScorer:
|
|
790
|
+
if metric is None:
|
|
791
|
+
return self.eval_metric
|
|
792
|
+
else:
|
|
793
|
+
return check_get_evaluation_metric(
|
|
794
|
+
metric,
|
|
795
|
+
prediction_length=self.prediction_length,
|
|
796
|
+
seasonal_period=self.eval_metric.seasonal_period,
|
|
797
|
+
horizon_weight=self.eval_metric.horizon_weight,
|
|
798
|
+
)
|
|
799
|
+
|
|
800
|
+
def _score_with_predictions(
|
|
801
|
+
self,
|
|
802
|
+
data: TimeSeriesDataFrame,
|
|
803
|
+
predictions: TimeSeriesDataFrame,
|
|
804
|
+
metric: str | TimeSeriesScorer | None = None,
|
|
805
|
+
) -> float:
|
|
806
|
+
"""Compute the score measuring how well the predictions align with the data."""
|
|
807
|
+
return self._get_eval_metric(metric).score(
|
|
808
|
+
data=data,
|
|
809
|
+
predictions=predictions,
|
|
810
|
+
target=self.target,
|
|
811
|
+
)
|
|
812
|
+
|
|
813
|
+
def score(
|
|
814
|
+
self,
|
|
815
|
+
data: TimeSeriesDataFrame,
|
|
816
|
+
model: str | TimeSeriesModelBase | None = None,
|
|
817
|
+
metric: str | TimeSeriesScorer | None = None,
|
|
818
|
+
use_cache: bool = True,
|
|
819
|
+
) -> float:
|
|
820
|
+
eval_metric = self._get_eval_metric(metric)
|
|
821
|
+
scores_dict = self.evaluate(data=data, model=model, metrics=[eval_metric], use_cache=use_cache)
|
|
822
|
+
return scores_dict[eval_metric.name]
|
|
823
|
+
|
|
824
|
+
def evaluate(
|
|
825
|
+
self,
|
|
826
|
+
data: TimeSeriesDataFrame,
|
|
827
|
+
model: str | TimeSeriesModelBase | None = None,
|
|
828
|
+
metrics: str | TimeSeriesScorer | list[str | TimeSeriesScorer] | None = None,
|
|
829
|
+
use_cache: bool = True,
|
|
830
|
+
) -> dict[str, float]:
|
|
831
|
+
past_data, known_covariates = data.get_model_inputs_for_scoring(
|
|
832
|
+
prediction_length=self.prediction_length, known_covariates_names=self.covariate_metadata.known_covariates
|
|
833
|
+
)
|
|
834
|
+
predictions = self.predict(data=past_data, known_covariates=known_covariates, model=model, use_cache=use_cache)
|
|
835
|
+
|
|
836
|
+
metrics_ = [metrics] if not isinstance(metrics, list) else metrics
|
|
837
|
+
scores_dict = {}
|
|
838
|
+
for metric in metrics_:
|
|
839
|
+
eval_metric = self._get_eval_metric(metric)
|
|
840
|
+
scores_dict[eval_metric.name] = self._score_with_predictions(
|
|
841
|
+
data=data, predictions=predictions, metric=eval_metric
|
|
842
|
+
)
|
|
843
|
+
return scores_dict
|
|
844
|
+
|
|
845
|
+
def get_feature_importance(
|
|
846
|
+
self,
|
|
847
|
+
data: TimeSeriesDataFrame,
|
|
848
|
+
features: list[str],
|
|
849
|
+
model: str | TimeSeriesModelBase | None = None,
|
|
850
|
+
metric: str | TimeSeriesScorer | None = None,
|
|
851
|
+
time_limit: float | None = None,
|
|
852
|
+
method: Literal["naive", "permutation"] = "permutation",
|
|
853
|
+
subsample_size: int = 50,
|
|
854
|
+
num_iterations: int | None = None,
|
|
855
|
+
random_seed: int | None = None,
|
|
856
|
+
relative_scores: bool = False,
|
|
857
|
+
include_confidence_band: bool = True,
|
|
858
|
+
confidence_level: float = 0.99,
|
|
859
|
+
) -> pd.DataFrame:
|
|
860
|
+
assert method in ["naive", "permutation"], f"Invalid feature importance method {method}."
|
|
861
|
+
eval_metric = self._get_eval_metric(metric)
|
|
862
|
+
|
|
863
|
+
logger.info("Computing feature importance")
|
|
864
|
+
|
|
865
|
+
# seed everything if random_seed is provided
|
|
866
|
+
if random_seed is not None:
|
|
867
|
+
seed_everything(random_seed)
|
|
868
|
+
|
|
869
|
+
# start timer and cap subsample size if it's greater than the number of items in the provided data set
|
|
870
|
+
time_start = time.time()
|
|
871
|
+
if subsample_size > data.num_items:
|
|
872
|
+
logger.info(
|
|
873
|
+
f"Subsample_size {subsample_size} is larger than the number of items in the data and will be ignored"
|
|
874
|
+
)
|
|
875
|
+
subsample_size = data.num_items
|
|
876
|
+
|
|
877
|
+
# set default number of iterations and cap iterations if the number of items in the data is smaller
|
|
878
|
+
# than the subsample size for the naive method
|
|
879
|
+
num_iterations = num_iterations or (5 if method == "permutation" else 1)
|
|
880
|
+
if method == "naive" and data.num_items <= subsample_size:
|
|
881
|
+
num_iterations = 1
|
|
882
|
+
|
|
883
|
+
# initialize the importance transform
|
|
884
|
+
importance_transform_type = {
|
|
885
|
+
"permutation": PermutationFeatureImportanceTransform,
|
|
886
|
+
"naive": ConstantReplacementFeatureImportanceTransform,
|
|
887
|
+
}.get(method)
|
|
888
|
+
assert importance_transform_type is not None, (
|
|
889
|
+
f"Invalid feature importance method {method}. Valid methods are 'permutation' and 'naive',"
|
|
890
|
+
)
|
|
891
|
+
|
|
892
|
+
importance_transform = importance_transform_type(
|
|
893
|
+
covariate_metadata=self.covariate_metadata,
|
|
894
|
+
prediction_length=self.prediction_length,
|
|
895
|
+
random_seed=random_seed,
|
|
896
|
+
)
|
|
897
|
+
|
|
898
|
+
# if model is not provided, use the best model according to the validation score
|
|
899
|
+
model = self._get_model_for_prediction(model, verbose=False)
|
|
900
|
+
|
|
901
|
+
# persist trainer to speed up repeated inference
|
|
902
|
+
persisted_models = self.persist(model_names=[model], with_ancestors=True)
|
|
903
|
+
|
|
904
|
+
importance_samples = defaultdict(list)
|
|
905
|
+
for n in range(num_iterations):
|
|
906
|
+
if subsample_size < data.num_items:
|
|
907
|
+
item_ids_sampled = data.item_ids.to_series().sample(subsample_size) # noqa
|
|
908
|
+
data_sample: TimeSeriesDataFrame = data.query("item_id in @item_ids_sampled")
|
|
909
|
+
else:
|
|
910
|
+
data_sample = data
|
|
911
|
+
|
|
912
|
+
base_score = self.evaluate(data=data_sample, model=model, metrics=eval_metric, use_cache=False)[
|
|
913
|
+
eval_metric.name
|
|
914
|
+
]
|
|
915
|
+
|
|
916
|
+
for feature in features:
|
|
917
|
+
# override importance for unused features
|
|
918
|
+
if not self._model_uses_feature(model, feature):
|
|
919
|
+
continue
|
|
920
|
+
else:
|
|
921
|
+
data_sample_replaced = importance_transform.transform(data_sample, feature_name=feature)
|
|
922
|
+
score = self.evaluate(
|
|
923
|
+
data=data_sample_replaced, model=model, metrics=eval_metric, use_cache=False
|
|
924
|
+
)[eval_metric.name]
|
|
925
|
+
|
|
926
|
+
importance = base_score - score
|
|
927
|
+
if relative_scores:
|
|
928
|
+
importance /= np.abs(base_score - self.eps_abs_importance_score)
|
|
929
|
+
importance = min(self.max_rel_importance_score, importance)
|
|
930
|
+
|
|
931
|
+
importance_samples[feature].append(importance)
|
|
932
|
+
|
|
933
|
+
if time_limit is not None and time.time() - time_start > time_limit:
|
|
934
|
+
logger.info(f"Time limit reached, stopping feature importance computation after {n} iterations")
|
|
935
|
+
break
|
|
936
|
+
|
|
937
|
+
self.unpersist(model_names=persisted_models)
|
|
938
|
+
|
|
939
|
+
importance_df = (
|
|
940
|
+
(
|
|
941
|
+
pd.DataFrame(importance_samples)
|
|
942
|
+
.agg(["mean", "std", "count"])
|
|
943
|
+
.T.rename(columns={"mean": "importance", "std": "stdev", "count": "n"})
|
|
944
|
+
)
|
|
945
|
+
if len(importance_samples) > 0
|
|
946
|
+
else pd.DataFrame(columns=["importance", "stdev", "n"])
|
|
947
|
+
)
|
|
948
|
+
|
|
949
|
+
if include_confidence_band:
|
|
950
|
+
importance_df = self._add_ci_to_feature_importance(importance_df, confidence_level=confidence_level)
|
|
951
|
+
|
|
952
|
+
return importance_df
|
|
953
|
+
|
|
954
|
+
def _model_uses_feature(self, model: str | TimeSeriesModelBase, feature: str) -> bool:
|
|
955
|
+
"""Check if the given model uses the given feature."""
|
|
956
|
+
models_with_ancestors = set(self.get_minimum_model_set(model))
|
|
957
|
+
|
|
958
|
+
if feature in self.covariate_metadata.static_features:
|
|
959
|
+
return any(self.load_model(m).supports_static_features for m in models_with_ancestors)
|
|
960
|
+
elif feature in self.covariate_metadata.known_covariates:
|
|
961
|
+
return any(self.load_model(m).supports_known_covariates for m in models_with_ancestors)
|
|
962
|
+
elif feature in self.covariate_metadata.past_covariates:
|
|
963
|
+
return any(self.load_model(m).supports_past_covariates for m in models_with_ancestors)
|
|
964
|
+
|
|
965
|
+
return False
|
|
966
|
+
|
|
967
|
+
def backtest_predictions(
|
|
968
|
+
self,
|
|
969
|
+
data: TimeSeriesDataFrame | None,
|
|
970
|
+
model_names: list[str],
|
|
971
|
+
num_val_windows: int | None = None,
|
|
972
|
+
val_step_size: int | None = None,
|
|
973
|
+
use_cache: bool = True,
|
|
974
|
+
) -> dict[str, list[TimeSeriesDataFrame]]:
|
|
975
|
+
if data is None:
|
|
976
|
+
assert num_val_windows is None, "num_val_windows must be None when data is None"
|
|
977
|
+
assert val_step_size is None, "val_step_size must be None when data is None"
|
|
978
|
+
return {model_name: self._get_model_oof_predictions(model_name) for model_name in model_names}
|
|
979
|
+
|
|
980
|
+
if val_step_size is None:
|
|
981
|
+
val_step_size = self.prediction_length
|
|
982
|
+
if num_val_windows is None:
|
|
983
|
+
num_val_windows = 1
|
|
984
|
+
|
|
985
|
+
splitter = ExpandingWindowSplitter(
|
|
986
|
+
prediction_length=self.prediction_length,
|
|
987
|
+
num_val_windows=num_val_windows,
|
|
988
|
+
val_step_size=val_step_size,
|
|
989
|
+
)
|
|
990
|
+
|
|
991
|
+
result: dict[str, list[TimeSeriesDataFrame]] = {model_name: [] for model_name in model_names}
|
|
992
|
+
for past_data, full_data in splitter.split(data):
|
|
993
|
+
known_covariates = full_data.slice_by_timestep(-self.prediction_length, None)[
|
|
994
|
+
self.covariate_metadata.known_covariates
|
|
995
|
+
]
|
|
996
|
+
pred_dict, _ = self.get_model_pred_dict(
|
|
997
|
+
model_names=model_names,
|
|
998
|
+
data=past_data,
|
|
999
|
+
known_covariates=known_covariates,
|
|
1000
|
+
use_cache=use_cache,
|
|
1001
|
+
)
|
|
1002
|
+
for model_name in model_names:
|
|
1003
|
+
result[model_name].append(pred_dict[model_name]) # type: ignore
|
|
1004
|
+
|
|
1005
|
+
return result
|
|
1006
|
+
|
|
1007
|
+
def backtest_targets(
|
|
1008
|
+
self,
|
|
1009
|
+
data: TimeSeriesDataFrame | None,
|
|
1010
|
+
num_val_windows: int | None = None,
|
|
1011
|
+
val_step_size: int | None = None,
|
|
1012
|
+
) -> list[TimeSeriesDataFrame]:
|
|
1013
|
+
if data is None:
|
|
1014
|
+
assert num_val_windows is None, "num_val_windows must be None when data is None"
|
|
1015
|
+
assert val_step_size is None, "val_step_size must be None when data is None"
|
|
1016
|
+
train_data = self.load_train_data()
|
|
1017
|
+
val_data = self.load_val_data()
|
|
1018
|
+
return self._get_validation_windows(train_data=train_data, val_data=val_data)
|
|
1019
|
+
|
|
1020
|
+
if val_step_size is None:
|
|
1021
|
+
val_step_size = self.prediction_length
|
|
1022
|
+
if num_val_windows is None:
|
|
1023
|
+
num_val_windows = 1
|
|
1024
|
+
|
|
1025
|
+
splitter = ExpandingWindowSplitter(
|
|
1026
|
+
prediction_length=self.prediction_length,
|
|
1027
|
+
num_val_windows=num_val_windows,
|
|
1028
|
+
val_step_size=val_step_size,
|
|
1029
|
+
)
|
|
1030
|
+
|
|
1031
|
+
return [val_fold for _, val_fold in splitter.split(data)]
|
|
1032
|
+
|
|
1033
|
+
def _add_ci_to_feature_importance(
|
|
1034
|
+
self, importance_df: pd.DataFrame, confidence_level: float = 0.99
|
|
1035
|
+
) -> pd.DataFrame:
|
|
1036
|
+
"""Add confidence intervals to the feature importance."""
|
|
1037
|
+
import scipy.stats
|
|
1038
|
+
|
|
1039
|
+
if confidence_level <= 0.5 or confidence_level >= 1.0:
|
|
1040
|
+
raise ValueError("confidence_level must lie between 0.5 and 1.0")
|
|
1041
|
+
ci_str = "{:.0f}".format(confidence_level * 100)
|
|
1042
|
+
|
|
1043
|
+
alpha = 1 - confidence_level
|
|
1044
|
+
importance_df[f"p{ci_str}_low"] = np.nan
|
|
1045
|
+
importance_df[f"p{ci_str}_high"] = np.nan
|
|
1046
|
+
|
|
1047
|
+
for i in importance_df.index:
|
|
1048
|
+
r = importance_df.loc[i]
|
|
1049
|
+
importance, stdev, n = r["importance"], r["stdev"], r["n"]
|
|
1050
|
+
if np.isnan(importance) or np.isnan(stdev) or np.isnan(n) or n <= 1:
|
|
1051
|
+
continue
|
|
1052
|
+
|
|
1053
|
+
t_crit = scipy.stats.t.ppf(1 - alpha / 2, df=n - 1)
|
|
1054
|
+
|
|
1055
|
+
importance_df.loc[i, f"p{ci_str}_low"] = importance - t_crit * stdev / np.sqrt(n)
|
|
1056
|
+
importance_df.loc[i, f"p{ci_str}_high"] = importance + t_crit * stdev / np.sqrt(n)
|
|
1057
|
+
|
|
1058
|
+
return importance_df
|
|
1059
|
+
|
|
1060
|
+
def _predict_model(
|
|
1061
|
+
self,
|
|
1062
|
+
model: str | TimeSeriesModelBase,
|
|
1063
|
+
data: TimeSeriesDataFrame,
|
|
1064
|
+
model_pred_dict: dict[str, TimeSeriesDataFrame | None],
|
|
1065
|
+
known_covariates: TimeSeriesDataFrame | None = None,
|
|
1066
|
+
) -> TimeSeriesDataFrame:
|
|
1067
|
+
"""Generate predictions using the given model.
|
|
1068
|
+
|
|
1069
|
+
This method assumes that model_pred_dict contains the predictions of all base models, if model is an ensemble.
|
|
1070
|
+
"""
|
|
1071
|
+
if isinstance(model, str):
|
|
1072
|
+
model = self.load_model(model)
|
|
1073
|
+
model_inputs = self._get_inputs_to_model(model=model, data=data, model_pred_dict=model_pred_dict)
|
|
1074
|
+
return model.predict(model_inputs, known_covariates=known_covariates)
|
|
1075
|
+
|
|
1076
|
+
def _get_inputs_to_model(
|
|
1077
|
+
self,
|
|
1078
|
+
model: str | TimeSeriesModelBase,
|
|
1079
|
+
data: TimeSeriesDataFrame,
|
|
1080
|
+
model_pred_dict: dict[str, TimeSeriesDataFrame | None],
|
|
1081
|
+
) -> TimeSeriesDataFrame | dict[str, TimeSeriesDataFrame | None]:
|
|
1082
|
+
"""Get the first argument that should be passed to model.predict.
|
|
1083
|
+
|
|
1084
|
+
This method assumes that model_pred_dict contains the predictions of all base models, if model is an ensemble.
|
|
1085
|
+
"""
|
|
1086
|
+
model_set = self.get_minimum_model_set(model, include_self=False)
|
|
1087
|
+
if model_set:
|
|
1088
|
+
for m in model_set:
|
|
1089
|
+
if m not in model_pred_dict:
|
|
1090
|
+
raise AssertionError(f"Prediction for base model {m} not found in model_pred_dict")
|
|
1091
|
+
return {m: model_pred_dict[m] for m in model_set}
|
|
1092
|
+
else:
|
|
1093
|
+
return data
|
|
1094
|
+
|
|
1095
|
+
def get_model_pred_dict(
|
|
1096
|
+
self,
|
|
1097
|
+
model_names: list[str],
|
|
1098
|
+
data: TimeSeriesDataFrame,
|
|
1099
|
+
known_covariates: TimeSeriesDataFrame | None = None,
|
|
1100
|
+
raise_exception_if_failed: bool = True,
|
|
1101
|
+
use_cache: bool = True,
|
|
1102
|
+
random_seed: int | None = None,
|
|
1103
|
+
) -> tuple[dict[str, TimeSeriesDataFrame | None], dict[str, float]]:
|
|
1104
|
+
"""Return a dictionary with predictions of all models for the given dataset.
|
|
1105
|
+
|
|
1106
|
+
Parameters
|
|
1107
|
+
----------
|
|
1108
|
+
model_names
|
|
1109
|
+
Names of the model for which the predictions should be produced.
|
|
1110
|
+
data
|
|
1111
|
+
Time series data to forecast with.
|
|
1112
|
+
known_covariates
|
|
1113
|
+
Future values of the known covariates.
|
|
1114
|
+
record_pred_time
|
|
1115
|
+
If True, will additionally return the total prediction times for all models (including the prediction time
|
|
1116
|
+
for base models). If False, will only return the model predictions.
|
|
1117
|
+
raise_exception_if_failed
|
|
1118
|
+
If True, the method will raise an exception if any model crashes during prediction.
|
|
1119
|
+
If False, error will be logged and predictions for failed models will contain None.
|
|
1120
|
+
use_cache
|
|
1121
|
+
If False, will ignore the cache even if it's available.
|
|
1122
|
+
"""
|
|
1123
|
+
if use_cache:
|
|
1124
|
+
model_pred_dict, pred_time_dict_marginal = self.prediction_cache.get(
|
|
1125
|
+
data=data, known_covariates=known_covariates
|
|
1126
|
+
)
|
|
1127
|
+
else:
|
|
1128
|
+
model_pred_dict = {}
|
|
1129
|
+
pred_time_dict_marginal: dict[str, Any] = {}
|
|
1130
|
+
|
|
1131
|
+
model_set = set()
|
|
1132
|
+
for model_name in model_names:
|
|
1133
|
+
model_set.update(self.get_minimum_model_set(model_name))
|
|
1134
|
+
if len(model_set) > 1:
|
|
1135
|
+
model_to_layer = self._get_model_layers()
|
|
1136
|
+
model_set = sorted(model_set, key=model_to_layer.get) # type: ignore
|
|
1137
|
+
logger.debug(f"Prediction order: {model_set}")
|
|
1138
|
+
|
|
1139
|
+
failed_models = []
|
|
1140
|
+
for model_name in model_set:
|
|
1141
|
+
if model_name not in model_pred_dict:
|
|
1142
|
+
if random_seed is not None:
|
|
1143
|
+
seed_everything(random_seed)
|
|
1144
|
+
try:
|
|
1145
|
+
predict_start_time = time.time()
|
|
1146
|
+
model_pred_dict[model_name] = self._predict_model(
|
|
1147
|
+
model=model_name,
|
|
1148
|
+
data=data,
|
|
1149
|
+
known_covariates=known_covariates,
|
|
1150
|
+
model_pred_dict=model_pred_dict,
|
|
1151
|
+
)
|
|
1152
|
+
pred_time_dict_marginal[model_name] = time.time() - predict_start_time
|
|
1153
|
+
except Exception:
|
|
1154
|
+
failed_models.append(model_name)
|
|
1155
|
+
logger.error(f"Model {model_name} failed to predict with the following exception:")
|
|
1156
|
+
logger.error(traceback.format_exc())
|
|
1157
|
+
model_pred_dict[model_name] = None
|
|
1158
|
+
pred_time_dict_marginal[model_name] = None
|
|
1159
|
+
|
|
1160
|
+
if len(failed_models) > 0 and raise_exception_if_failed:
|
|
1161
|
+
raise RuntimeError(f"Following models failed to predict: {failed_models}")
|
|
1162
|
+
|
|
1163
|
+
if use_cache:
|
|
1164
|
+
self.prediction_cache.put(
|
|
1165
|
+
data=data,
|
|
1166
|
+
known_covariates=known_covariates,
|
|
1167
|
+
model_pred_dict=model_pred_dict,
|
|
1168
|
+
pred_time_dict=pred_time_dict_marginal,
|
|
1169
|
+
)
|
|
1170
|
+
pred_time_dict_total = self._get_total_pred_time_from_marginal(pred_time_dict_marginal)
|
|
1171
|
+
|
|
1172
|
+
final_model_pred_dict = {model_name: model_pred_dict[model_name] for model_name in model_names}
|
|
1173
|
+
final_pred_time_dict_total = {model_name: pred_time_dict_total[model_name] for model_name in model_names}
|
|
1174
|
+
|
|
1175
|
+
return final_model_pred_dict, final_pred_time_dict_total
|
|
1176
|
+
|
|
1177
|
+
def _get_total_pred_time_from_marginal(self, pred_time_dict_marginal: dict[str, float]) -> dict[str, float]:
|
|
1178
|
+
pred_time_dict_total = defaultdict(float)
|
|
1179
|
+
for model_name in pred_time_dict_marginal.keys():
|
|
1180
|
+
for base_model in self.get_minimum_model_set(model_name):
|
|
1181
|
+
if pred_time_dict_marginal[base_model] is not None:
|
|
1182
|
+
pred_time_dict_total[model_name] += pred_time_dict_marginal[base_model]
|
|
1183
|
+
return dict(pred_time_dict_total)
|
|
1184
|
+
|
|
1185
|
+
def _merge_refit_full_data(
|
|
1186
|
+
self, train_data: TimeSeriesDataFrame, val_data: TimeSeriesDataFrame | None
|
|
1187
|
+
) -> TimeSeriesDataFrame:
|
|
1188
|
+
if val_data is None:
|
|
1189
|
+
return train_data
|
|
1190
|
+
else:
|
|
1191
|
+
# TODO: Implement merging of arbitrary tuning_data with train_data
|
|
1192
|
+
raise NotImplementedError("refit_full is not supported if custom val_data is provided.")
|
|
1193
|
+
|
|
1194
|
+
def refit_single_full(
|
|
1195
|
+
self,
|
|
1196
|
+
train_data: TimeSeriesDataFrame | None = None,
|
|
1197
|
+
val_data: TimeSeriesDataFrame | None = None,
|
|
1198
|
+
models: list[str] | None = None,
|
|
1199
|
+
) -> list[str]:
|
|
1200
|
+
train_data = train_data or self.load_train_data()
|
|
1201
|
+
val_data = val_data or self.load_val_data()
|
|
1202
|
+
refit_full_data = self._merge_refit_full_data(train_data, val_data)
|
|
1203
|
+
|
|
1204
|
+
if models is None:
|
|
1205
|
+
models = self.get_model_names()
|
|
1206
|
+
|
|
1207
|
+
model_to_layer = self._get_model_layers()
|
|
1208
|
+
models_sorted_by_layer = sorted(models, key=model_to_layer.get) # type: ignore
|
|
1209
|
+
|
|
1210
|
+
model_refit_map = {}
|
|
1211
|
+
models_trained_full = []
|
|
1212
|
+
for model in models_sorted_by_layer:
|
|
1213
|
+
model = self.load_model(model)
|
|
1214
|
+
model_name = model.name
|
|
1215
|
+
if model._get_tags()["can_refit_full"]:
|
|
1216
|
+
model_full = model.convert_to_refit_full_template()
|
|
1217
|
+
assert isinstance(model_full, AbstractTimeSeriesModel)
|
|
1218
|
+
logger.info(f"Fitting model: {model_full.name}")
|
|
1219
|
+
models_trained = self._train_and_save(
|
|
1220
|
+
train_data=refit_full_data,
|
|
1221
|
+
val_data=None,
|
|
1222
|
+
model=model_full,
|
|
1223
|
+
)
|
|
1224
|
+
else:
|
|
1225
|
+
model_full = model.convert_to_refit_full_via_copy()
|
|
1226
|
+
logger.info(f"Fitting model: {model_full.name} | Skipping fit via cloning parent ...")
|
|
1227
|
+
models_trained = [model_full.name]
|
|
1228
|
+
if isinstance(model_full, AbstractTimeSeriesEnsembleModel):
|
|
1229
|
+
model_full.remap_base_models(model_refit_map)
|
|
1230
|
+
self._add_model(model_full, base_models=model_full.model_names)
|
|
1231
|
+
else:
|
|
1232
|
+
self._add_model(model_full)
|
|
1233
|
+
self.save_model(model_full)
|
|
1234
|
+
|
|
1235
|
+
if len(models_trained) == 1:
|
|
1236
|
+
model_refit_map[model_name] = models_trained[0]
|
|
1237
|
+
models_trained_full += models_trained
|
|
1238
|
+
|
|
1239
|
+
self.model_refit_map.update(model_refit_map)
|
|
1240
|
+
self.save()
|
|
1241
|
+
return models_trained_full
|
|
1242
|
+
|
|
1243
|
+
def refit_full(self, model: str = "all") -> dict[str, str]:
|
|
1244
|
+
time_start = time.time()
|
|
1245
|
+
existing_models = self.get_model_names()
|
|
1246
|
+
if model == "all":
|
|
1247
|
+
model_names = existing_models
|
|
1248
|
+
elif model == "best":
|
|
1249
|
+
model_names = self.get_minimum_model_set(self.get_model_best())
|
|
1250
|
+
else:
|
|
1251
|
+
model_names = self.get_minimum_model_set(model)
|
|
1252
|
+
|
|
1253
|
+
valid_model_set = []
|
|
1254
|
+
for name in model_names:
|
|
1255
|
+
if name in self.model_refit_map and self.model_refit_map[name] in existing_models:
|
|
1256
|
+
logger.info(
|
|
1257
|
+
f"Model '{name}' already has a refit _FULL model: "
|
|
1258
|
+
f"'{self.model_refit_map[name]}', skipping refit..."
|
|
1259
|
+
)
|
|
1260
|
+
elif name in self.model_refit_map.values():
|
|
1261
|
+
logger.debug(f"Model '{name}' is a refit _FULL model, skipping refit...")
|
|
1262
|
+
else:
|
|
1263
|
+
valid_model_set.append(name)
|
|
1264
|
+
|
|
1265
|
+
if valid_model_set:
|
|
1266
|
+
models_trained_full = self.refit_single_full(models=valid_model_set)
|
|
1267
|
+
else:
|
|
1268
|
+
models_trained_full = []
|
|
1269
|
+
|
|
1270
|
+
self.save()
|
|
1271
|
+
logger.info(f"Refit complete. Models trained: {models_trained_full}")
|
|
1272
|
+
logger.info(f"Total runtime: {time.time() - time_start:.2f} s")
|
|
1273
|
+
return copy.deepcopy(self.model_refit_map)
|
|
1274
|
+
|
|
1275
|
+
def get_trainable_base_models(
|
|
1276
|
+
self,
|
|
1277
|
+
hyperparameters: str | dict[str, Any],
|
|
1278
|
+
*,
|
|
1279
|
+
multi_window: bool = False,
|
|
1280
|
+
freq: str | None = None,
|
|
1281
|
+
excluded_model_types: list[str] | None = None,
|
|
1282
|
+
hyperparameter_tune: bool = False,
|
|
1283
|
+
) -> list[AbstractTimeSeriesModel]:
|
|
1284
|
+
return TrainableModelSetBuilder(
|
|
1285
|
+
freq=freq,
|
|
1286
|
+
prediction_length=self.prediction_length,
|
|
1287
|
+
path=self.path,
|
|
1288
|
+
eval_metric=self.eval_metric,
|
|
1289
|
+
quantile_levels=self.quantile_levels,
|
|
1290
|
+
target=self.target,
|
|
1291
|
+
covariate_metadata=self.covariate_metadata,
|
|
1292
|
+
multi_window=multi_window and not self.skip_model_selection,
|
|
1293
|
+
).get_model_set(
|
|
1294
|
+
hyperparameters=hyperparameters,
|
|
1295
|
+
hyperparameter_tune=hyperparameter_tune,
|
|
1296
|
+
excluded_model_types=excluded_model_types,
|
|
1297
|
+
banned_model_names=self._get_banned_model_names(),
|
|
1298
|
+
)
|