oracle-ads 2.12.5__py3-none-any.whl → 2.12.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. ads/aqua/common/decorator.py +10 -0
  2. ads/aqua/evaluation/entities.py +12 -2
  3. ads/aqua/extension/aqua_ws_msg_handler.py +2 -0
  4. ads/aqua/extension/base_handler.py +2 -0
  5. ads/aqua/finetuning/constants.py +3 -0
  6. ads/aqua/finetuning/finetuning.py +13 -2
  7. ads/opctl/operator/lowcode/anomaly/model/anomaly_merlion.py +6 -5
  8. ads/opctl/operator/lowcode/anomaly/model/automlx.py +12 -8
  9. ads/opctl/operator/lowcode/anomaly/model/autots.py +6 -3
  10. ads/opctl/operator/lowcode/anomaly/model/base_model.py +19 -7
  11. ads/opctl/operator/lowcode/anomaly/model/isolationforest.py +9 -10
  12. ads/opctl/operator/lowcode/anomaly/model/oneclasssvm.py +10 -11
  13. ads/opctl/operator/lowcode/anomaly/model/randomcutforest.py +6 -2
  14. ads/opctl/operator/lowcode/common/data.py +13 -11
  15. ads/opctl/operator/lowcode/forecast/model/arima.py +14 -12
  16. ads/opctl/operator/lowcode/forecast/model/automlx.py +26 -26
  17. ads/opctl/operator/lowcode/forecast/model/autots.py +16 -18
  18. ads/opctl/operator/lowcode/forecast/model/base_model.py +45 -36
  19. ads/opctl/operator/lowcode/forecast/model/forecast_datasets.py +36 -47
  20. ads/opctl/operator/lowcode/forecast/model/ml_forecast.py +3 -0
  21. ads/opctl/operator/lowcode/forecast/model/neuralprophet.py +30 -46
  22. ads/opctl/operator/lowcode/forecast/model/prophet.py +15 -20
  23. ads/opctl/operator/lowcode/forecast/model_evaluator.py +25 -20
  24. ads/opctl/operator/lowcode/forecast/utils.py +30 -33
  25. ads/opctl/operator/lowcode/pii/model/report.py +11 -7
  26. ads/opctl/operator/lowcode/recommender/model/base_model.py +58 -45
  27. ads/opctl/operator/lowcode/recommender/model/svd.py +47 -29
  28. {oracle_ads-2.12.5.dist-info → oracle_ads-2.12.6.dist-info}/METADATA +5 -5
  29. {oracle_ads-2.12.5.dist-info → oracle_ads-2.12.6.dist-info}/RECORD +32 -32
  30. {oracle_ads-2.12.5.dist-info → oracle_ads-2.12.6.dist-info}/LICENSE.txt +0 -0
  31. {oracle_ads-2.12.5.dist-info → oracle_ads-2.12.6.dist-info}/WHEEL +0 -0
  32. {oracle_ads-2.12.5.dist-info → oracle_ads-2.12.6.dist-info}/entry_points.txt +0 -0
@@ -1,45 +1,35 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
2
 
4
3
  # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
5
 
6
+ import logging
7
+ import traceback
8
+
7
9
  import numpy as np
8
10
  import optuna
9
11
  import pandas as pd
10
- from joblib import Parallel, delayed
11
12
  from torch import Tensor
12
- from torchmetrics.regression import (
13
- MeanAbsoluteError,
14
- MeanAbsolutePercentageError,
15
- MeanSquaredError,
16
- R2Score,
17
- SymmetricMeanAbsolutePercentageError,
18
- )
19
13
 
20
14
  from ads.common.decorator.runtime_dependency import (
21
15
  OptionalDependency,
22
16
  runtime_dependency,
23
17
  )
24
18
  from ads.opctl import logger
25
-
26
- from ..const import DEFAULT_TRIALS, ForecastOutputColumns, SupportedModels
27
- from ads.opctl.operator.lowcode.forecast.utils import (
28
- load_pkl,
29
- write_pkl,
30
- _select_plot_list,
31
- _label_encode_dataframe,
32
- )
33
19
  from ads.opctl.operator.lowcode.common.utils import (
34
20
  disable_print,
35
21
  enable_print,
36
- seconds_to_datetime,
37
22
  )
38
- from .base_model import ForecastOperatorBaseModel
23
+ from ads.opctl.operator.lowcode.forecast.utils import (
24
+ _select_plot_list,
25
+ load_pkl,
26
+ write_pkl,
27
+ )
28
+
29
+ from ..const import DEFAULT_TRIALS, SupportedModels
39
30
  from ..operator_config import ForecastOperatorConfig
31
+ from .base_model import ForecastOperatorBaseModel
40
32
  from .forecast_datasets import ForecastDatasets, ForecastOutput
41
- import traceback
42
-
43
33
 
44
34
  # def _get_np_metrics_dict(selected_metric):
45
35
  # metric_translation = {
@@ -62,7 +52,7 @@ import traceback
62
52
  object="NeuralProphet",
63
53
  install_from=OptionalDependency.FORECAST,
64
54
  )
65
- def _fit_model(data, params, additional_regressors, select_metric):
55
+ def _fit_model(data, params, additional_regressors):
66
56
  from neuralprophet import NeuralProphet, set_log_level
67
57
 
68
58
  if logger.level > 10:
@@ -70,13 +60,12 @@ def _fit_model(data, params, additional_regressors, select_metric):
70
60
  disable_print()
71
61
 
72
62
  m = NeuralProphet(**params)
73
- # m.metrics = _get_np_metrics_dict(select_metric)
74
63
  for add_reg in additional_regressors:
75
64
  m = m.add_future_regressor(name=add_reg)
76
65
  m.fit(df=data)
77
- accepted_regressors_config = m.config_regressors or dict()
66
+ accepted_regressors_config = m.config_regressors or {}
78
67
  if hasattr(accepted_regressors_config, "regressors"):
79
- accepted_regressors_config = accepted_regressors_config.regressors or dict()
68
+ accepted_regressors_config = accepted_regressors_config.regressors or {}
80
69
 
81
70
  enable_print()
82
71
  return m, list(accepted_regressors_config.keys())
@@ -97,11 +86,12 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
97
86
  self.loaded_trainers = load_pkl(
98
87
  self.spec.previous_output_dir + "/trainer.pkl"
99
88
  )
100
- except:
101
- logger.debug("model.pkl/trainer.pkl is not present")
89
+ except Exception as e:
90
+ logger.debug(f"model.pkl/trainer.pkl is not present. Error message: {e}")
102
91
 
103
92
  def set_kwargs(self):
104
93
  # Extract the Confidence Interval Width and convert to prophet's equivalent - interval_width
94
+ model_kwargs = self.spec.model_kwargs
105
95
  if self.spec.confidence_interval_width is None:
106
96
  quantiles = model_kwargs.get("quantiles", [0.05, 0.95])
107
97
  self.spec.confidence_interval_width = float(quantiles[1]) - float(
@@ -110,8 +100,6 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
110
100
  else:
111
101
  boundaries = round((1 - self.spec.confidence_interval_width) / 2, 2)
112
102
  quantiles = [boundaries, self.spec.confidence_interval_width + boundaries]
113
-
114
- model_kwargs = self.spec.model_kwargs
115
103
  model_kwargs["quantiles"] = quantiles
116
104
  return model_kwargs
117
105
 
@@ -124,12 +112,10 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
124
112
 
125
113
  if self.loaded_models is not None and s_id in self.loaded_models:
126
114
  model = self.loaded_models[s_id]
127
- accepted_regressors_config = (
128
- model.config_regressors.regressors or dict()
129
- )
115
+ accepted_regressors_config = model.config_regressors.regressors or {}
130
116
  if hasattr(accepted_regressors_config, "regressors"):
131
117
  accepted_regressors_config = (
132
- accepted_regressors_config.regressors or dict()
118
+ accepted_regressors_config.regressors or {}
133
119
  )
134
120
  self.accepted_regressors[s_id] = list(accepted_regressors_config.keys())
135
121
  if self.loaded_trainers is not None and s_id in self.loaded_trainers:
@@ -143,8 +129,6 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
143
129
  data=data_i,
144
130
  params=model_kwargs,
145
131
  additional_regressors=self.additional_regressors,
146
- select_metric=None,
147
- # select_metric=self.spec.metric,
148
132
  )
149
133
 
150
134
  logger.debug(
@@ -205,7 +189,6 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
205
189
  "config_normalization": model.config_normalization,
206
190
  "config_missing": model.config_missing,
207
191
  "config_model": model.config_model,
208
- "config_normalization": model.config_normalization,
209
192
  "data_freq": model.data_freq,
210
193
  "fitted": model.fitted,
211
194
  "data_params": model.data_params,
@@ -220,19 +203,19 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
220
203
  self.errors_dict[s_id] = {
221
204
  "model_name": self.spec.model,
222
205
  "error": str(e),
223
- "error_trace": traceback.format_exc()
206
+ "error_trace": traceback.format_exc(),
224
207
  }
225
208
  logger.warn(traceback.format_exc())
226
209
  raise e
227
210
 
228
211
  def _build_model(self) -> pd.DataFrame:
229
212
  full_data_dict = self.datasets.get_data_by_series()
230
- self.models = dict()
231
- self.trainers = dict()
232
- self.outputs = dict()
233
- self.errors_dict = dict()
234
- self.explanations_info = dict()
235
- self.accepted_regressors = dict()
213
+ self.models = {}
214
+ self.trainers = {}
215
+ self.outputs = {}
216
+ self.errors_dict = {}
217
+ self.explanations_info = {}
218
+ self.accepted_regressors = {}
236
219
  self.additional_regressors = self.datasets.get_additional_data_column_names()
237
220
  model_kwargs = self.set_kwargs()
238
221
  self.forecast_output = ForecastOutput(
@@ -282,7 +265,6 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
282
265
  data=df_train,
283
266
  params=params,
284
267
  additional_regressors=self.additional_regressors,
285
- select_metric=self.spec.metric,
286
268
  )
287
269
  df_test = df_test[["y", "ds"] + accepted_regressors]
288
270
 
@@ -326,6 +308,8 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
326
308
  def _generate_report(self):
327
309
  import report_creator as rc
328
310
 
311
+ logging.getLogger("report_creator").setLevel(logging.WARNING)
312
+
329
313
  series_ids = self.models.keys()
330
314
  all_sections = []
331
315
  if len(series_ids) > 0:
@@ -371,7 +355,7 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
371
355
 
372
356
  sec5_text = rc.Heading("Neural Prophet Model Parameters", level=2)
373
357
  model_states = []
374
- for i, (s_id, m) in enumerate(self.models.items()):
358
+ for s_id, m in self.models.items():
375
359
  model_states.append(
376
360
  pd.Series(
377
361
  m.state_dict(),
@@ -449,7 +433,7 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
449
433
  )
450
434
 
451
435
  def explain_model(self):
452
- self.local_explanation = dict()
436
+ self.local_explanation = {}
453
437
  global_expl = []
454
438
  rename_cols = {
455
439
  f"future_regressor_{col}": col
@@ -1,17 +1,23 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
2
 
4
3
  # Copyright (c) 2024 Oracle and/or its affiliates.
5
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
5
 
6
+ import logging
7
+ import traceback
8
+
9
+ import matplotlib as mpl
7
10
  import numpy as np
8
11
  import optuna
9
12
  import pandas as pd
10
- import logging
11
13
  from joblib import Parallel, delayed
12
- from ads.common.decorator.runtime_dependency import runtime_dependency
14
+
13
15
  from ads.opctl import logger
16
+ from ads.opctl.operator.lowcode.common.utils import set_log_level
14
17
  from ads.opctl.operator.lowcode.forecast.operator_config import ForecastOperatorConfig
18
+ from ads.opctl.operator.lowcode.forecast.utils import (
19
+ _select_plot_list,
20
+ )
15
21
 
16
22
  from ..const import (
17
23
  DEFAULT_TRIALS,
@@ -19,23 +25,14 @@ from ..const import (
19
25
  ForecastOutputColumns,
20
26
  SupportedModels,
21
27
  )
22
- from ads.opctl.operator.lowcode.forecast.utils import (
23
- _select_plot_list,
24
- _label_encode_dataframe,
25
- )
26
- from ads.opctl.operator.lowcode.common.utils import set_log_level
27
28
  from .base_model import ForecastOperatorBaseModel
28
- from ..operator_config import ForecastOperatorConfig
29
29
  from .forecast_datasets import ForecastDatasets, ForecastOutput
30
- import traceback
31
- import matplotlib as mpl
32
-
33
30
 
34
31
  try:
35
32
  set_log_level("prophet", logger.level)
36
33
  set_log_level("cmdstanpy", logger.level)
37
34
  mpl.rcParams["figure.max_open_warning"] = 100
38
- except:
35
+ except Exception:
39
36
  pass
40
37
 
41
38
 
@@ -73,9 +70,6 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
73
70
 
74
71
  def _train_model(self, i, series_id, df, model_kwargs):
75
72
  try:
76
- from prophet import Prophet
77
- from prophet.diagnostics import cross_validation, performance_metrics
78
-
79
73
  self.forecast_output.init_series_output(
80
74
  series_id=series_id, data_at_series=df
81
75
  )
@@ -130,15 +124,15 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
130
124
  self.errors_dict[series_id] = {
131
125
  "model_name": self.spec.model,
132
126
  "error": str(e),
133
- "error_trace": traceback.format_exc()
127
+ "error_trace": traceback.format_exc(),
134
128
  }
135
129
  logger.warn(f"Encountered Error: {e}. Skipping.")
136
130
  logger.warn(traceback.format_exc())
137
131
 
138
132
  def _build_model(self) -> pd.DataFrame:
139
133
  full_data_dict = self.datasets.get_data_by_series()
140
- self.models = dict()
141
- self.outputs = dict()
134
+ self.models = {}
135
+ self.outputs = {}
142
136
  self.additional_regressors = self.datasets.get_additional_data_column_names()
143
137
  model_kwargs = self.set_kwargs()
144
138
  self.forecast_output = ForecastOutput(
@@ -249,6 +243,8 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
249
243
  import report_creator as rc
250
244
  from prophet.plot import add_changepoints_to_plot
251
245
 
246
+ logging.getLogger("report_creator").setLevel(logging.WARNING)
247
+
252
248
  series_ids = self.models.keys()
253
249
  all_sections = []
254
250
  if len(series_ids) > 0:
@@ -351,7 +347,6 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
351
347
  # Append the global explanation text and section to the "all_sections" list
352
348
  all_sections = all_sections + [
353
349
  global_explanation_section,
354
- local_explanation_text,
355
350
  local_explanation_section,
356
351
  ]
357
352
  except Exception as e:
@@ -121,23 +121,26 @@ class ModelEvaluator:
121
121
  from .model.factory import ForecastOperatorModelFactory
122
122
  metrics[model] = {}
123
123
  for i in range(len(cut_offs)):
124
- backtest_historical_data = train_sets[i]
125
- backtest_additional_data = additional_data[i]
126
- backtest_test_data = test_sets[i]
127
- backtest_operator_config = self.create_operator_config(operator_config, i, model,
128
- backtest_historical_data,
129
- backtest_additional_data,
130
- backtest_test_data)
131
- datasets = ForecastDatasets(backtest_operator_config)
132
- ForecastOperatorModelFactory.get_model(
133
- backtest_operator_config, datasets
134
- ).generate_report()
135
- test_metrics_filename = backtest_operator_config.spec.test_metrics_filename
136
- metrics_df = pd.read_csv(
137
- f"{backtest_operator_config.spec.output_directory.url}/{test_metrics_filename}")
138
- metrics_df["average_across_series"] = metrics_df.drop('metrics', axis=1).mean(axis=1)
139
- metrics_average_dict = dict(zip(metrics_df['metrics'].str.lower(), metrics_df['average_across_series']))
140
- metrics[model][i] = metrics_average_dict[operator_config.spec.metric]
124
+ try:
125
+ backtest_historical_data = train_sets[i]
126
+ backtest_additional_data = additional_data[i]
127
+ backtest_test_data = test_sets[i]
128
+ backtest_operator_config = self.create_operator_config(operator_config, i, model,
129
+ backtest_historical_data,
130
+ backtest_additional_data,
131
+ backtest_test_data)
132
+ datasets = ForecastDatasets(backtest_operator_config)
133
+ ForecastOperatorModelFactory.get_model(
134
+ backtest_operator_config, datasets
135
+ ).generate_report()
136
+ test_metrics_filename = backtest_operator_config.spec.test_metrics_filename
137
+ metrics_df = pd.read_csv(
138
+ f"{backtest_operator_config.spec.output_directory.url}/{test_metrics_filename}")
139
+ metrics_df["average_across_series"] = metrics_df.drop('metrics', axis=1).mean(axis=1)
140
+ metrics_average_dict = dict(zip(metrics_df['metrics'].str.lower(), metrics_df['average_across_series']))
141
+ metrics[model][i] = metrics_average_dict[operator_config.spec.metric]
142
+ except:
143
+ logger.warn(f"Failed to calculate metrics for {model} and {i} backtest")
141
144
  return metrics
142
145
 
143
146
  def find_best_model(self, datasets: ForecastDatasets, operator_config: ForecastOperatorConfig):
@@ -147,10 +150,12 @@ class ModelEvaluator:
147
150
  model = SupportedModels.Prophet
148
151
  logger.error(f"Running {model} model as auto-select failed with the following error: {e.message}")
149
152
  return model
150
- avg_backtests_metrics = {key: sum(value.values()) / len(value.values()) for key, value in metrics.items()}
151
- best_model = min(avg_backtests_metrics, key=avg_backtests_metrics.get)
153
+ nonempty_metrics = {model: metric for model, metric in metrics.items() if metric != {}}
154
+ avg_backtests_metric = {model: sum(value.values()) / len(value.values())
155
+ for model, value in nonempty_metrics.items()}
156
+ best_model = min(avg_backtests_metric, key=avg_backtests_metric.get)
152
157
  logger.info(f"Among models {self.models}, {best_model} model shows better performance during backtesting.")
153
- backtest_stats = pd.DataFrame(metrics).rename_axis('backtest')
158
+ backtest_stats = pd.DataFrame(nonempty_metrics).rename_axis('backtest')
154
159
  backtest_stats.reset_index(inplace=True)
155
160
  output_dir = operator_config.spec.output_directory.url
156
161
  backtest_report_name = "backtest_stats.csv"
@@ -1,42 +1,41 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
2
 
4
3
  # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
5
 
6
+ import logging
7
7
  import os
8
- import sys
9
- from typing import List
8
+ from typing import Set
10
9
 
10
+ import cloudpickle
11
11
  import fsspec
12
12
  import numpy as np
13
13
  import pandas as pd
14
- import cloudpickle
15
- import plotly.express as px
14
+ import report_creator as rc
16
15
  from plotly import graph_objects as go
16
+ from scipy.stats import linregress
17
17
  from sklearn.metrics import (
18
18
  explained_variance_score,
19
19
  mean_absolute_percentage_error,
20
20
  mean_squared_error,
21
+ r2_score,
21
22
  )
22
- try:
23
- from scipy.stats import linregress
24
- except:
25
- from sklearn.metrics import r2_score
26
23
 
27
24
  from ads.common.object_storage_details import ObjectStorageDetails
28
25
  from ads.dataset.label_encoder import DataFrameLabelEncoder
29
26
  from ads.opctl import logger
30
-
31
- from .const import SupportedMetrics, SupportedModels, RENDER_LIMIT
32
- from .errors import ForecastInputDataError, ForecastSchemaYamlError
33
- from .operator_config import ForecastOperatorSpec, ForecastOperatorConfig
34
- from ads.opctl.operator.lowcode.common.utils import merge_category_columns
35
27
  from ads.opctl.operator.lowcode.forecast.const import ForecastOutputColumns
36
- import report_creator as rc
28
+ from ads.opctl.operator.lowcode.forecast.model.forecast_datasets import (
29
+ ForecastOutput,
30
+ TestData,
31
+ )
37
32
 
33
+ from .const import RENDER_LIMIT, SupportedMetrics
38
34
 
39
- def _label_encode_dataframe(df, no_encode=set()):
35
+ logging.getLogger("report_creator").setLevel(logging.WARNING)
36
+
37
+
38
+ def _label_encode_dataframe(df, no_encode: Set = None):
40
39
  df_to_encode = df[list(set(df.columns) - no_encode)]
41
40
  le = DataFrameLabelEncoder().fit(df_to_encode)
42
41
  return le, le.transform(df)
@@ -55,15 +54,14 @@ def smape(actual, predicted) -> float:
55
54
  denominator[zero_mask] = 1
56
55
 
57
56
  numerator = np.abs(actual - predicted)
58
- default_output = np.ones_like(numerator) * np.inf
59
57
 
60
58
  abs_error = np.divide(numerator, denominator)
61
59
  return round(np.mean(abs_error) * 100, 2)
62
60
 
63
61
 
64
62
  def _build_metrics_per_horizon(
65
- test_data: "TestData",
66
- output: "ForecastOutput",
63
+ test_data: TestData,
64
+ output: ForecastOutput,
67
65
  ) -> pd.DataFrame:
68
66
  """
69
67
  Calculates Mean sMAPE, Median sMAPE, Mean MAPE, Median MAPE, Mean wMAPE, Median wMAPE for each horizon
@@ -173,7 +171,7 @@ def _build_metrics_per_horizon(
173
171
 
174
172
 
175
173
  def load_pkl(filepath):
176
- storage_options = dict()
174
+ storage_options = {}
177
175
  if ObjectStorageDetails.is_oci_path(filepath):
178
176
  storage_options = default_signer()
179
177
 
@@ -195,13 +193,13 @@ def write_pkl(obj, filename, output_dir, storage_options):
195
193
  def _build_metrics_df(y_true, y_pred, series_id):
196
194
  if len(y_true) == 0 or len(y_pred) == 0:
197
195
  return pd.DataFrame()
198
- metrics = dict()
196
+ metrics = {}
199
197
  metrics["sMAPE"] = smape(actual=y_true, predicted=y_pred)
200
198
  metrics["MAPE"] = mean_absolute_percentage_error(y_true=y_true, y_pred=y_pred)
201
199
  metrics["RMSE"] = np.sqrt(mean_squared_error(y_true=y_true, y_pred=y_pred))
202
200
  try:
203
201
  metrics["r2"] = linregress(y_true, y_pred).rvalue ** 2
204
- except:
202
+ except Exception:
205
203
  metrics["r2"] = r2_score(y_true=y_true, y_pred=y_pred)
206
204
  metrics["Explained Variance"] = explained_variance_score(
207
205
  y_true=y_true, y_pred=y_pred
@@ -209,16 +207,13 @@ def _build_metrics_df(y_true, y_pred, series_id):
209
207
  return pd.DataFrame.from_dict(metrics, orient="index", columns=[series_id])
210
208
 
211
209
 
212
- def evaluate_train_metrics(output, metrics_col_name=None):
210
+ def evaluate_train_metrics(output):
213
211
  """
214
212
  Training metrics
215
213
 
216
214
  Parameters:
217
215
  output: ForecastOutputs
218
216
 
219
- metrics_col_name: str
220
- Only passed in if the series column was created artifically.
221
- When passed in, replaces s_id as the column name in the metrics table
222
217
  """
223
218
  total_metrics = pd.DataFrame()
224
219
  for s_id in output.list_series_ids():
@@ -263,20 +258,21 @@ def _select_plot_list(fn, series_ids):
263
258
  def _add_unit(num, unit):
264
259
  return f"{num} {unit}"
265
260
 
261
+
266
262
  def get_auto_select_plot(backtest_results):
267
263
  fig = go.Figure()
268
264
  columns = backtest_results.columns.tolist()
269
265
  back_test_column = "backtest"
270
266
  columns.remove(back_test_column)
271
- for i, column in enumerate(columns):
272
- color = 0 #int(i * 255 / len(columns))
267
+ for column in columns:
273
268
  fig.add_trace(
274
269
  go.Scatter(
275
- x=backtest_results[back_test_column],
276
- y=backtest_results[column],
277
- mode="lines",
278
- name=column,
279
- ))
270
+ x=backtest_results[back_test_column],
271
+ y=backtest_results[column],
272
+ mode="lines",
273
+ name=column,
274
+ )
275
+ )
280
276
 
281
277
  return rc.Widget(fig)
282
278
 
@@ -384,6 +380,7 @@ def get_forecast_plots(
384
380
 
385
381
  return _select_plot_list(plot_forecast_plotly, forecast_output.list_series_ids())
386
382
 
383
+
387
384
  def convert_target(target: str, target_col: str):
388
385
  """
389
386
  Removes the target_column that got appended to target.
@@ -1,10 +1,10 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
2
 
4
3
  # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
5
 
7
6
 
7
+ import logging
8
8
  import os
9
9
  import random
10
10
  import tempfile
@@ -40,11 +40,13 @@ from ads.opctl.operator.lowcode.pii.utils import compute_rate
40
40
 
41
41
  try:
42
42
  import report_creator as rc
43
- except ImportError:
43
+ except ImportError as e:
44
44
  raise ModuleNotFoundError(
45
45
  f"`report-creator` module was not found. Please run "
46
46
  f"`pip install {OptionalDependency.PII}`."
47
- )
47
+ ) from e
48
+
49
+ logging.getLogger("report_creator").setLevel(logging.WARNING)
48
50
 
49
51
 
50
52
  @dataclass(repr=True)
@@ -139,13 +141,13 @@ def make_model_card(model_name="", readme_path=""):
139
141
  fig = go.Figure(
140
142
  data=[
141
143
  go.Table(
142
- header=dict(values=list(df.columns)),
143
- cells=dict(values=[df.Metrics, df.Values]),
144
+ header={"Columns": df.columns},
145
+ cells={"Metrics": df.Metrics, "Values": df.Values},
144
146
  )
145
147
  ]
146
148
  )
147
149
  eval_res_tb = rc.Widget(data=fig, caption="Evaluation Results")
148
- except:
150
+ except Exception:
149
151
  eval_res_tb = rc.Text("-")
150
152
  logger.warning(
151
153
  "The given readme.md doesn't have correct template for Evaluation Results."
@@ -321,7 +323,9 @@ class PIIOperatorReport:
321
323
  self.report_sections = [title_text, report_description, time_proceed, structure]
322
324
  return self
323
325
 
324
- def save_report(self, report_sections=None, report_uri=None, storage_options={}):
326
+ def save_report(
327
+ self, report_sections=None, report_uri=None, storage_options: Dict = None
328
+ ):
325
329
  with tempfile.TemporaryDirectory() as temp_dir:
326
330
  report_local_path = os.path.join(temp_dir, "___report.html")
327
331
  disable_print()