oracle-ads 2.10.0__py3-none-any.whl → 2.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ads/aqua/__init__.py +12 -0
- ads/aqua/base.py +324 -0
- ads/aqua/cli.py +19 -0
- ads/aqua/config/deployment_config_defaults.json +9 -0
- ads/aqua/config/resource_limit_names.json +7 -0
- ads/aqua/constants.py +45 -0
- ads/aqua/data.py +40 -0
- ads/aqua/decorator.py +101 -0
- ads/aqua/deployment.py +643 -0
- ads/aqua/dummy_data/icon.txt +1 -0
- ads/aqua/dummy_data/oci_model_deployments.json +56 -0
- ads/aqua/dummy_data/oci_models.json +1 -0
- ads/aqua/dummy_data/readme.md +26 -0
- ads/aqua/evaluation.py +1751 -0
- ads/aqua/exception.py +82 -0
- ads/aqua/extension/__init__.py +40 -0
- ads/aqua/extension/base_handler.py +138 -0
- ads/aqua/extension/common_handler.py +21 -0
- ads/aqua/extension/deployment_handler.py +202 -0
- ads/aqua/extension/evaluation_handler.py +135 -0
- ads/aqua/extension/finetune_handler.py +66 -0
- ads/aqua/extension/model_handler.py +59 -0
- ads/aqua/extension/ui_handler.py +201 -0
- ads/aqua/extension/utils.py +23 -0
- ads/aqua/finetune.py +579 -0
- ads/aqua/job.py +29 -0
- ads/aqua/model.py +819 -0
- ads/aqua/training/__init__.py +4 -0
- ads/aqua/training/exceptions.py +459 -0
- ads/aqua/ui.py +453 -0
- ads/aqua/utils.py +715 -0
- ads/cli.py +37 -6
- ads/common/auth.py +7 -0
- ads/common/decorator/__init__.py +7 -3
- ads/common/decorator/require_nonempty_arg.py +65 -0
- ads/common/object_storage_details.py +166 -7
- ads/common/oci_client.py +18 -1
- ads/common/oci_logging.py +2 -2
- ads/common/oci_mixin.py +4 -5
- ads/common/serializer.py +34 -5
- ads/common/utils.py +75 -10
- ads/config.py +40 -1
- ads/dataset/correlation_plot.py +10 -12
- ads/jobs/ads_job.py +43 -25
- ads/jobs/builders/infrastructure/base.py +4 -2
- ads/jobs/builders/infrastructure/dsc_job.py +49 -39
- ads/jobs/builders/runtimes/base.py +71 -1
- ads/jobs/builders/runtimes/container_runtime.py +4 -4
- ads/jobs/builders/runtimes/pytorch_runtime.py +10 -63
- ads/jobs/templates/driver_pytorch.py +27 -10
- ads/model/artifact_downloader.py +84 -14
- ads/model/artifact_uploader.py +25 -23
- ads/model/datascience_model.py +388 -38
- ads/model/deployment/model_deployment.py +10 -2
- ads/model/generic_model.py +8 -0
- ads/model/model_file_description_schema.json +68 -0
- ads/model/model_metadata.py +1 -1
- ads/model/service/oci_datascience_model.py +34 -5
- ads/opctl/config/merger.py +2 -2
- ads/opctl/operator/__init__.py +3 -1
- ads/opctl/operator/cli.py +7 -1
- ads/opctl/operator/cmd.py +3 -3
- ads/opctl/operator/common/errors.py +2 -1
- ads/opctl/operator/common/operator_config.py +22 -3
- ads/opctl/operator/common/utils.py +16 -0
- ads/opctl/operator/lowcode/anomaly/MLoperator +15 -0
- ads/opctl/operator/lowcode/anomaly/README.md +209 -0
- ads/opctl/operator/lowcode/anomaly/__init__.py +5 -0
- ads/opctl/operator/lowcode/anomaly/__main__.py +104 -0
- ads/opctl/operator/lowcode/anomaly/cmd.py +35 -0
- ads/opctl/operator/lowcode/anomaly/const.py +88 -0
- ads/opctl/operator/lowcode/anomaly/environment.yaml +12 -0
- ads/opctl/operator/lowcode/anomaly/model/__init__.py +5 -0
- ads/opctl/operator/lowcode/anomaly/model/anomaly_dataset.py +147 -0
- ads/opctl/operator/lowcode/anomaly/model/automlx.py +89 -0
- ads/opctl/operator/lowcode/anomaly/model/autots.py +103 -0
- ads/opctl/operator/lowcode/anomaly/model/base_model.py +354 -0
- ads/opctl/operator/lowcode/anomaly/model/factory.py +67 -0
- ads/opctl/operator/lowcode/anomaly/model/tods.py +119 -0
- ads/opctl/operator/lowcode/anomaly/operator_config.py +105 -0
- ads/opctl/operator/lowcode/anomaly/schema.yaml +359 -0
- ads/opctl/operator/lowcode/anomaly/utils.py +81 -0
- ads/opctl/operator/lowcode/common/__init__.py +5 -0
- ads/opctl/operator/lowcode/common/const.py +10 -0
- ads/opctl/operator/lowcode/common/data.py +96 -0
- ads/opctl/operator/lowcode/common/errors.py +41 -0
- ads/opctl/operator/lowcode/common/transformations.py +191 -0
- ads/opctl/operator/lowcode/common/utils.py +250 -0
- ads/opctl/operator/lowcode/forecast/README.md +3 -2
- ads/opctl/operator/lowcode/forecast/__main__.py +18 -2
- ads/opctl/operator/lowcode/forecast/cmd.py +8 -7
- ads/opctl/operator/lowcode/forecast/const.py +17 -1
- ads/opctl/operator/lowcode/forecast/environment.yaml +3 -2
- ads/opctl/operator/lowcode/forecast/model/arima.py +106 -117
- ads/opctl/operator/lowcode/forecast/model/automlx.py +204 -180
- ads/opctl/operator/lowcode/forecast/model/autots.py +144 -253
- ads/opctl/operator/lowcode/forecast/model/base_model.py +326 -259
- ads/opctl/operator/lowcode/forecast/model/forecast_datasets.py +325 -176
- ads/opctl/operator/lowcode/forecast/model/neuralprophet.py +293 -237
- ads/opctl/operator/lowcode/forecast/model/prophet.py +191 -208
- ads/opctl/operator/lowcode/forecast/operator_config.py +24 -33
- ads/opctl/operator/lowcode/forecast/schema.yaml +116 -29
- ads/opctl/operator/lowcode/forecast/utils.py +186 -356
- ads/opctl/operator/lowcode/pii/model/guardrails.py +18 -15
- ads/opctl/operator/lowcode/pii/model/report.py +7 -7
- ads/opctl/operator/lowcode/pii/operator_config.py +1 -8
- ads/opctl/operator/lowcode/pii/utils.py +0 -82
- ads/opctl/operator/runtime/runtime.py +3 -2
- ads/telemetry/base.py +62 -0
- ads/telemetry/client.py +105 -0
- ads/telemetry/telemetry.py +6 -3
- {oracle_ads-2.10.0.dist-info → oracle_ads-2.11.0.dist-info}/METADATA +44 -7
- {oracle_ads-2.10.0.dist-info → oracle_ads-2.11.0.dist-info}/RECORD +116 -59
- ads/opctl/operator/lowcode/forecast/model/transformations.py +0 -125
- {oracle_ads-2.10.0.dist-info → oracle_ads-2.11.0.dist-info}/LICENSE.txt +0 -0
- {oracle_ads-2.10.0.dist-info → oracle_ads-2.11.0.dist-info}/WHEEL +0 -0
- {oracle_ads-2.10.0.dist-info → oracle_ads-2.11.0.dist-info}/entry_points.txt +0 -0
@@ -11,13 +11,13 @@ import numpy as np
|
|
11
11
|
import yaml
|
12
12
|
|
13
13
|
from ads.opctl import logger
|
14
|
-
from ads.opctl.operator.lowcode.
|
14
|
+
from ads.opctl.operator.lowcode.common.utils import seconds_to_datetime
|
15
15
|
from .base_model import ForecastOperatorBaseModel
|
16
16
|
from ..operator_config import ForecastOperatorConfig
|
17
17
|
from ads.common.decorator.runtime_dependency import runtime_dependency
|
18
18
|
from .forecast_datasets import ForecastDatasets, ForecastOutput
|
19
|
-
from ..const import ForecastOutputColumns
|
20
|
-
|
19
|
+
from ..const import ForecastOutputColumns, SupportedModels
|
20
|
+
from ads.opctl.operator.lowcode.forecast.utils import _select_plot_list
|
21
21
|
|
22
22
|
AUTOTS_MAX_GENERATION = 10
|
23
23
|
AUTOTS_MODELS_TO_VALIDATE = 0.15
|
@@ -45,167 +45,158 @@ class AutoTSOperatorModel(ForecastOperatorBaseModel):
|
|
45
45
|
# Import necessary libraries
|
46
46
|
from autots import AutoTS, create_regressor
|
47
47
|
|
48
|
+
self.outputs = None
|
48
49
|
models = dict()
|
49
|
-
outputs = dict()
|
50
|
-
outputs_legacy = []
|
51
50
|
# Get the name of the datetime column
|
52
|
-
date_column = self.spec.datetime_column.name
|
53
|
-
self.datasets.datetime_col = date_column
|
54
51
|
self.forecast_output = ForecastOutput(
|
55
|
-
confidence_interval_width=self.spec.confidence_interval_width
|
52
|
+
confidence_interval_width=self.spec.confidence_interval_width,
|
53
|
+
horizon=self.spec.horizon,
|
54
|
+
target_column=self.original_target_column,
|
55
|
+
dt_column=self.spec.datetime_column.name,
|
56
56
|
)
|
57
|
+
model = self.loaded_models if self.loaded_models is not None else None
|
58
|
+
if model is None:
|
59
|
+
# Initialize the AutoTS model with specified parameters
|
60
|
+
model = AutoTS(
|
61
|
+
forecast_length=self.spec.horizon,
|
62
|
+
frequency=self.spec.model_kwargs.get(
|
63
|
+
"frequency", "infer"
|
64
|
+
), # TODO: Use datasets.get_datetime_frequency ?
|
65
|
+
prediction_interval=self.spec.confidence_interval_width,
|
66
|
+
max_generations=self.spec.model_kwargs.get(
|
67
|
+
"max_generations", AUTOTS_MAX_GENERATION
|
68
|
+
),
|
69
|
+
no_negatives=self.spec.model_kwargs.get("no_negatives", False),
|
70
|
+
constraint=self.spec.model_kwargs.get("constraint", None),
|
71
|
+
ensemble=self.spec.model_kwargs.get("ensemble", "auto"),
|
72
|
+
initial_template=self.spec.model_kwargs.get(
|
73
|
+
"initial_template", "General+Random"
|
74
|
+
),
|
75
|
+
random_seed=self.spec.model_kwargs.get("random_seed", 2022),
|
76
|
+
holiday_country=self.spec.model_kwargs.get("holiday_country", "US"),
|
77
|
+
subset=self.spec.model_kwargs.get("subset", None),
|
78
|
+
aggfunc=self.spec.model_kwargs.get("aggfunc", "first"),
|
79
|
+
na_tolerance=self.spec.model_kwargs.get("na_tolerance", 1),
|
80
|
+
drop_most_recent=self.spec.model_kwargs.get("drop_most_recent", 0),
|
81
|
+
drop_data_older_than_periods=self.spec.model_kwargs.get(
|
82
|
+
"drop_data_older_than_periods", None
|
83
|
+
),
|
84
|
+
model_list=self.spec.model_kwargs.get("model_list", "fast_parallel"),
|
85
|
+
transformer_list=self.spec.model_kwargs.get("transformer_list", "auto"),
|
86
|
+
transformer_max_depth=self.spec.model_kwargs.get(
|
87
|
+
"transformer_max_depth", 6
|
88
|
+
),
|
89
|
+
models_mode=self.spec.model_kwargs.get("models_mode", "random"),
|
90
|
+
num_validations=self.spec.model_kwargs.get("num_validations", "auto"),
|
91
|
+
models_to_validate=self.spec.model_kwargs.get(
|
92
|
+
"models_to_validate", AUTOTS_MODELS_TO_VALIDATE
|
93
|
+
),
|
94
|
+
max_per_model_class=self.spec.model_kwargs.get(
|
95
|
+
"max_per_model_class", None
|
96
|
+
),
|
97
|
+
validation_method=self.spec.model_kwargs.get(
|
98
|
+
"validation_method", "backwards"
|
99
|
+
),
|
100
|
+
min_allowed_train_percent=self.spec.model_kwargs.get(
|
101
|
+
"min_allowed_train_percent", 0.5
|
102
|
+
),
|
103
|
+
remove_leading_zeroes=self.spec.model_kwargs.get(
|
104
|
+
"remove_leading_zeroes", False
|
105
|
+
),
|
106
|
+
prefill_na=self.spec.model_kwargs.get("prefill_na", None),
|
107
|
+
introduce_na=self.spec.model_kwargs.get("introduce_na", None),
|
108
|
+
preclean=self.spec.model_kwargs.get("preclean", None),
|
109
|
+
model_interrupt=self.spec.model_kwargs.get("model_interrupt", True),
|
110
|
+
generation_timeout=self.spec.model_kwargs.get(
|
111
|
+
"generation_timeout", None
|
112
|
+
),
|
113
|
+
current_model_file=self.spec.model_kwargs.get(
|
114
|
+
"current_model_file", None
|
115
|
+
),
|
116
|
+
verbose=-1 if logger.level > 40 else 1,
|
117
|
+
n_jobs=self.spec.model_kwargs.get("n_jobs", -1),
|
118
|
+
)
|
57
119
|
|
58
|
-
|
59
|
-
model = AutoTS(
|
60
|
-
forecast_length=self.spec.horizon,
|
61
|
-
frequency=self.spec.model_kwargs.get("frequency", "infer"),
|
62
|
-
prediction_interval=self.spec.confidence_interval_width,
|
63
|
-
max_generations=self.spec.model_kwargs.get(
|
64
|
-
"max_generations", AUTOTS_MAX_GENERATION
|
65
|
-
),
|
66
|
-
no_negatives=self.spec.model_kwargs.get("no_negatives", False),
|
67
|
-
constraint=self.spec.model_kwargs.get("constraint", None),
|
68
|
-
ensemble=self.spec.model_kwargs.get("ensemble", "auto"),
|
69
|
-
initial_template=self.spec.model_kwargs.get(
|
70
|
-
"initial_template", "General+Random"
|
71
|
-
),
|
72
|
-
random_seed=self.spec.model_kwargs.get("random_seed", 2022),
|
73
|
-
holiday_country=self.spec.model_kwargs.get("holiday_country", "US"),
|
74
|
-
subset=self.spec.model_kwargs.get("subset", None),
|
75
|
-
aggfunc=self.spec.model_kwargs.get("aggfunc", "first"),
|
76
|
-
na_tolerance=self.spec.model_kwargs.get("na_tolerance", 1),
|
77
|
-
drop_most_recent=self.spec.model_kwargs.get("drop_most_recent", 0),
|
78
|
-
drop_data_older_than_periods=self.spec.model_kwargs.get(
|
79
|
-
"drop_data_older_than_periods", None
|
80
|
-
),
|
81
|
-
model_list=self.spec.model_kwargs.get("model_list", "fast_parallel"),
|
82
|
-
transformer_list=self.spec.model_kwargs.get("transformer_list", "auto"),
|
83
|
-
transformer_max_depth=self.spec.model_kwargs.get(
|
84
|
-
"transformer_max_depth", 6
|
85
|
-
),
|
86
|
-
models_mode=self.spec.model_kwargs.get("models_mode", "random"),
|
87
|
-
num_validations=self.spec.model_kwargs.get("num_validations", "auto"),
|
88
|
-
models_to_validate=self.spec.model_kwargs.get(
|
89
|
-
"models_to_validate", AUTOTS_MODELS_TO_VALIDATE
|
90
|
-
),
|
91
|
-
max_per_model_class=self.spec.model_kwargs.get("max_per_model_class", None),
|
92
|
-
validation_method=self.spec.model_kwargs.get(
|
93
|
-
"validation_method", "backwards"
|
94
|
-
),
|
95
|
-
min_allowed_train_percent=self.spec.model_kwargs.get(
|
96
|
-
"min_allowed_train_percent", 0.5
|
97
|
-
),
|
98
|
-
remove_leading_zeroes=self.spec.model_kwargs.get(
|
99
|
-
"remove_leading_zeroes", False
|
100
|
-
),
|
101
|
-
prefill_na=self.spec.model_kwargs.get("prefill_na", None),
|
102
|
-
introduce_na=self.spec.model_kwargs.get("introduce_na", None),
|
103
|
-
preclean=self.spec.model_kwargs.get("preclean", None),
|
104
|
-
model_interrupt=self.spec.model_kwargs.get("model_interrupt", True),
|
105
|
-
generation_timeout=self.spec.model_kwargs.get("generation_timeout", None),
|
106
|
-
current_model_file=self.spec.model_kwargs.get("current_model_file", None),
|
107
|
-
verbose=self.spec.model_kwargs.get("verbose", 1),
|
108
|
-
n_jobs=self.spec.model_kwargs.get("n_jobs", -1),
|
109
|
-
)
|
120
|
+
full_data_indexed = self.datasets.get_data_multi_indexed()
|
110
121
|
|
111
|
-
|
112
|
-
|
113
|
-
temp_list = [full_data_dict[i] for i in full_data_dict.keys()]
|
114
|
-
melt_temp = [
|
115
|
-
temp_list[i].melt(
|
116
|
-
temp_list[i].columns.difference(self.target_columns),
|
117
|
-
var_name="series_id",
|
118
|
-
value_name=self.original_target_column,
|
119
|
-
)
|
120
|
-
for i in range(len(self.target_columns))
|
121
|
-
]
|
122
|
+
dates = full_data_indexed.index.get_level_values(0).unique().tolist()
|
123
|
+
train_idx = dates[: -self.spec.horizon]
|
122
124
|
|
123
|
-
|
125
|
+
df_train = full_data_indexed[
|
126
|
+
full_data_indexed.index.get_level_values(0).isin(train_idx)
|
127
|
+
][[self.original_target_column]].reset_index()
|
124
128
|
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
129
|
+
# Future regressors need to be in wide format - (num_unique_dates x (num_unique_series x num_unique_cols))
|
130
|
+
additional_regressors = list(
|
131
|
+
set(full_data_indexed.columns) - {self.original_target_column}
|
132
|
+
)
|
133
|
+
if len(additional_regressors) > 1:
|
134
|
+
future_regressor = full_data_indexed.reset_index().pivot(
|
135
|
+
index=self.spec.datetime_column.name,
|
136
|
+
columns=ForecastOutputColumns.SERIES,
|
137
|
+
values=additional_regressors,
|
130
138
|
)
|
131
|
-
|
132
|
-
|
139
|
+
future_reg = future_regressor[: -self.spec.horizon]
|
140
|
+
regr_fcst = future_regressor[-self.spec.horizon :]
|
141
|
+
else:
|
142
|
+
future_reg = None
|
143
|
+
regr_fcst = None
|
144
|
+
|
145
|
+
if self.loaded_models is None:
|
146
|
+
model = model.fit(
|
147
|
+
df_train,
|
148
|
+
future_regressor=future_reg,
|
149
|
+
date_col=self.spec.datetime_column.name,
|
150
|
+
value_col=self.original_target_column,
|
151
|
+
id_col=ForecastOutputColumns.SERIES,
|
133
152
|
)
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
values=list(
|
139
|
-
self.original_additional_data.set_index(
|
140
|
-
[
|
141
|
-
self.spec.target_category_columns[0],
|
142
|
-
self.spec.datetime_column.name,
|
143
|
-
]
|
144
|
-
).columns
|
145
|
-
),
|
146
|
-
),
|
147
|
-
forecast_length=self.spec.horizon,
|
148
|
-
)
|
149
|
-
|
150
|
-
self.future_regressor_train = r_tr.copy()
|
151
|
-
|
152
|
-
# Fit the model to the training data
|
153
|
-
model = model.fit(
|
154
|
-
self.full_data_long.groupby("series_id")
|
155
|
-
.head(-self.spec.horizon)
|
156
|
-
.reset_index(drop=True),
|
157
|
-
date_col=self.spec.datetime_column.name,
|
158
|
-
value_col=self.original_target_column,
|
159
|
-
future_regressor=r_tr.head(-self.spec.horizon)
|
160
|
-
if self.spec.additional_data
|
161
|
-
else None,
|
162
|
-
id_col="series_id",
|
163
|
-
)
|
153
|
+
# Store the trained model and generate forecasts
|
154
|
+
self.models = copy.deepcopy(model)
|
155
|
+
else:
|
156
|
+
self.models = self.loaded_models
|
164
157
|
|
165
|
-
|
166
|
-
self.models = copy.deepcopy(model)
|
158
|
+
self.outputs = model.predict(future_regressor=regr_fcst)
|
167
159
|
logger.debug("===========Forecast Generated===========")
|
168
|
-
self.prediction = model.predict(
|
169
|
-
future_regressor=r_tr.tail(self.spec.horizon)
|
170
|
-
if self.spec.additional_data
|
171
|
-
else None
|
172
|
-
)
|
173
|
-
|
174
|
-
outputs = dict()
|
175
160
|
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
161
|
+
hist_df = model.back_forecast().forecast
|
162
|
+
|
163
|
+
params = vars(model).copy()
|
164
|
+
for param in [
|
165
|
+
"ens_copy",
|
166
|
+
"df_wide_numeric",
|
167
|
+
"future_regressor_train",
|
168
|
+
"initial_results",
|
169
|
+
"score_per_series",
|
170
|
+
"validation_results",
|
171
|
+
"validation_train_indexes",
|
172
|
+
"validation_test_indexes",
|
173
|
+
"validation_indexes",
|
174
|
+
"best_model",
|
175
|
+
]:
|
176
|
+
if param in params:
|
177
|
+
params.pop(param)
|
178
|
+
|
179
|
+
for s_id in self.datasets.list_series_ids():
|
180
|
+
self.forecast_output.init_series_output(
|
181
|
+
series_id=s_id, data_at_series=self.datasets.get_data_at_series(s_id)
|
188
182
|
)
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
output_i[yhat_lower_name] = self.prediction.lower_forecast[[cat_target]]
|
197
|
-
|
198
|
-
output_i = output_i.reset_index()
|
199
|
-
output_col = pd.concat([output_col, output_i])
|
200
|
-
self.forecast_output.add_category(
|
201
|
-
category=cat, target_category_column=cat_target, forecast=output_i
|
183
|
+
|
184
|
+
self.forecast_output.populate_series_output(
|
185
|
+
series_id=s_id,
|
186
|
+
fit_val=hist_df[s_id].values,
|
187
|
+
forecast_val=self.outputs.forecast[s_id].values,
|
188
|
+
upper_bound=self.outputs.upper_forecast[s_id].values,
|
189
|
+
lower_bound=self.outputs.lower_forecast[s_id].values,
|
202
190
|
)
|
203
191
|
|
204
|
-
|
192
|
+
self.model_parameters[s_id] = {
|
193
|
+
"framework": SupportedModels.AutoTS,
|
194
|
+
**params,
|
195
|
+
}
|
205
196
|
|
206
197
|
logger.debug("===========Done===========")
|
207
198
|
|
208
|
-
return
|
199
|
+
return self.forecast_output.get_forecast_long()
|
209
200
|
|
210
201
|
def _generate_report(self) -> tuple:
|
211
202
|
"""
|
@@ -228,15 +219,9 @@ class AutoTSOperatorModel(ForecastOperatorBaseModel):
|
|
228
219
|
"## Forecast Overview \n"
|
229
220
|
"These plots show your forecast in the context of historical data."
|
230
221
|
)
|
231
|
-
sec_1 =
|
232
|
-
lambda
|
233
|
-
|
234
|
-
series=self.models.df_wide_numeric.columns[idx],
|
235
|
-
start_date=self.models.df_wide_numeric.reset_index()[
|
236
|
-
self.spec.datetime_column.name
|
237
|
-
].min(),
|
238
|
-
),
|
239
|
-
target_columns=self.target_columns,
|
222
|
+
sec_1 = _select_plot_list(
|
223
|
+
lambda s_id: self.outputs.plot(self.models.df_wide_numeric, series=s_id),
|
224
|
+
self.datasets.list_series_ids(),
|
240
225
|
)
|
241
226
|
|
242
227
|
# Section 2: AutoTS Model Parameters
|
@@ -253,65 +238,7 @@ class AutoTSOperatorModel(ForecastOperatorBaseModel):
|
|
253
238
|
all_sections = [sec1_text, sec_1, sec2_text, sec2]
|
254
239
|
|
255
240
|
if self.spec.generate_explanations:
|
256
|
-
|
257
|
-
try:
|
258
|
-
self.explain_model(
|
259
|
-
datetime_col_name=self.spec.datetime_column.name,
|
260
|
-
explain_predict_fn=self._custom_predict_autots,
|
261
|
-
)
|
262
|
-
|
263
|
-
# Create a markdown text block for the global explanation section
|
264
|
-
global_explanation_text = dp.Text(
|
265
|
-
f"## Global Explanation of Models \n "
|
266
|
-
"The following tables provide the feature attribution for the global explainability."
|
267
|
-
)
|
268
|
-
|
269
|
-
# Convert the global explanation data to a DataFrame
|
270
|
-
global_explanation_df = pd.DataFrame(self.global_explanation).drop(
|
271
|
-
index=["series_id", self.spec.target_column]
|
272
|
-
)
|
273
|
-
|
274
|
-
self.formatted_global_explanation = (
|
275
|
-
global_explanation_df / global_explanation_df.sum(axis=0) * 100
|
276
|
-
)
|
277
|
-
|
278
|
-
# Create a markdown section for the global explainability
|
279
|
-
global_explanation_section = dp.Blocks(
|
280
|
-
"### Global Explainability ",
|
281
|
-
dp.DataTable(self.formatted_global_explanation),
|
282
|
-
)
|
283
|
-
|
284
|
-
aggregate_local_explanations = pd.DataFrame()
|
285
|
-
for s_id, local_ex_df in self.local_explanation.items():
|
286
|
-
local_ex_df_copy = local_ex_df.copy()
|
287
|
-
local_ex_df_copy["Series"] = s_id
|
288
|
-
aggregate_local_explanations = pd.concat(
|
289
|
-
[aggregate_local_explanations, local_ex_df_copy], axis=0
|
290
|
-
)
|
291
|
-
self.formatted_local_explanation = aggregate_local_explanations
|
292
|
-
|
293
|
-
local_explanation_text = dp.Text(f"## Local Explanation of Models \n ")
|
294
|
-
blocks = [
|
295
|
-
dp.DataTable(
|
296
|
-
local_ex_df.div(local_ex_df.abs().sum(axis=1), axis=0) * 100,
|
297
|
-
label=s_id,
|
298
|
-
)
|
299
|
-
for s_id, local_ex_df in self.local_explanation.items()
|
300
|
-
]
|
301
|
-
local_explanation_section = (
|
302
|
-
dp.Select(blocks=blocks) if len(blocks) > 1 else blocks[0]
|
303
|
-
)
|
304
|
-
|
305
|
-
# Append the global explanation text and section to the "all_sections" list
|
306
|
-
all_sections = all_sections + [
|
307
|
-
global_explanation_text,
|
308
|
-
global_explanation_section,
|
309
|
-
local_explanation_text,
|
310
|
-
local_explanation_section,
|
311
|
-
]
|
312
|
-
except Exception as e:
|
313
|
-
logger.warn(f"Failed to generate Explanations with error: {e}.")
|
314
|
-
logger.debug(f"Full Traceback: {traceback.format_exc()}")
|
241
|
+
logger.warn(f"Explanations not yet supported for the AutoTS Module")
|
315
242
|
|
316
243
|
# Model Description
|
317
244
|
model_description = dp.Text(
|
@@ -327,49 +254,12 @@ class AutoTSOperatorModel(ForecastOperatorBaseModel):
|
|
327
254
|
other_sections,
|
328
255
|
)
|
329
256
|
|
330
|
-
def
|
331
|
-
"""
|
332
|
-
Predicts the future values of a time series using the AutoTS model.
|
333
|
-
|
334
|
-
Parameters
|
335
|
-
----------
|
336
|
-
data (numpy.ndarray): The input data to be used for prediction.
|
337
|
-
|
338
|
-
Returns
|
339
|
-
-------
|
340
|
-
numpy.ndarray: The predicted future values of the time series.
|
341
|
-
"""
|
342
|
-
|
343
|
-
data.index = pd.to_datetime(data.index)
|
344
|
-
temp_model = copy.deepcopy(self.models)
|
345
|
-
|
346
|
-
if data.shape[0] > 1:
|
347
|
-
temp_model.fit_data(
|
348
|
-
data[~data.index.duplicated()],
|
349
|
-
future_regressor=self.future_regressor_train.head(-self.spec.horizon),
|
350
|
-
)
|
351
|
-
dedup_shape = data.shape[0] - data[~data.index.duplicated()].shape[0] + 1
|
352
|
-
return pd.Series(0, index=np.arange(dedup_shape)).append(
|
353
|
-
temp_model.back_forecast(
|
354
|
-
tail=data[~data.index.duplicated()].shape[0] - 1
|
355
|
-
)
|
356
|
-
.forecast[self.spec.target_column]
|
357
|
-
.fillna(0)
|
358
|
-
)
|
359
|
-
|
360
|
-
return temp_model.predict(
|
361
|
-
future_regressor=self.future_regressor_train.loc[
|
362
|
-
self.future_regressor_train.index.isin(data.index)
|
363
|
-
],
|
364
|
-
forecast_length=1,
|
365
|
-
).forecast[self.series_id]
|
366
|
-
|
367
|
-
def _generate_train_metrics(self) -> pd.DataFrame:
|
257
|
+
def generate_train_metrics(self) -> pd.DataFrame:
|
368
258
|
"""
|
369
259
|
Generate Training Metrics when fitted data is not available.
|
370
260
|
The method that needs to be implemented on the particular model level.
|
371
261
|
|
372
|
-
metrics
|
262
|
+
metrics Store 1
|
373
263
|
sMAPE 26.19
|
374
264
|
MAPE 2.96E+18
|
375
265
|
RMSE 2014.192531
|
@@ -380,4 +270,5 @@ class AutoTSOperatorModel(ForecastOperatorBaseModel):
|
|
380
270
|
scores = pd.DataFrame(
|
381
271
|
self.models.best_model_per_series_score(), columns=["AutoTS Score"]
|
382
272
|
).T
|
383
|
-
|
273
|
+
df = pd.concat([mapes, scores])
|
274
|
+
return df
|