oracle-ads 2.11.7__py3-none-any.whl → 2.11.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. ads/aqua/__init__.py +24 -14
  2. ads/aqua/base.py +0 -2
  3. ads/aqua/cli.py +50 -2
  4. ads/aqua/decorator.py +8 -0
  5. ads/aqua/deployment.py +37 -34
  6. ads/aqua/evaluation.py +106 -49
  7. ads/aqua/extension/base_handler.py +18 -10
  8. ads/aqua/extension/common_handler.py +21 -2
  9. ads/aqua/extension/deployment_handler.py +1 -4
  10. ads/aqua/extension/evaluation_handler.py +1 -2
  11. ads/aqua/extension/finetune_handler.py +0 -1
  12. ads/aqua/extension/ui_handler.py +1 -12
  13. ads/aqua/extension/utils.py +4 -4
  14. ads/aqua/finetune.py +24 -11
  15. ads/aqua/model.py +2 -4
  16. ads/aqua/utils.py +39 -23
  17. ads/cli.py +19 -1
  18. ads/common/serializer.py +5 -4
  19. ads/common/utils.py +6 -2
  20. ads/config.py +1 -0
  21. ads/llm/serializers/runnable_parallel.py +7 -1
  22. ads/opctl/operator/lowcode/anomaly/README.md +1 -1
  23. ads/opctl/operator/lowcode/anomaly/environment.yaml +1 -1
  24. ads/opctl/operator/lowcode/anomaly/model/automlx.py +15 -10
  25. ads/opctl/operator/lowcode/anomaly/model/autots.py +9 -10
  26. ads/opctl/operator/lowcode/anomaly/model/base_model.py +34 -37
  27. ads/opctl/operator/lowcode/anomaly/model/tods.py +4 -4
  28. ads/opctl/operator/lowcode/anomaly/schema.yaml +1 -1
  29. ads/opctl/operator/lowcode/forecast/README.md +1 -1
  30. ads/opctl/operator/lowcode/forecast/environment.yaml +4 -4
  31. ads/opctl/operator/lowcode/forecast/model/arima.py +19 -21
  32. ads/opctl/operator/lowcode/forecast/model/automlx.py +36 -42
  33. ads/opctl/operator/lowcode/forecast/model/autots.py +41 -25
  34. ads/opctl/operator/lowcode/forecast/model/base_model.py +93 -107
  35. ads/opctl/operator/lowcode/forecast/model/neuralprophet.py +51 -45
  36. ads/opctl/operator/lowcode/forecast/model/prophet.py +32 -27
  37. ads/opctl/operator/lowcode/forecast/schema.yaml +2 -2
  38. ads/opctl/operator/lowcode/forecast/utils.py +4 -4
  39. ads/opctl/operator/lowcode/pii/README.md +1 -1
  40. ads/opctl/operator/lowcode/pii/environment.yaml +1 -1
  41. ads/opctl/operator/lowcode/pii/model/report.py +71 -70
  42. {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.8.dist-info}/METADATA +5 -5
  43. {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.8.dist-info}/RECORD +46 -46
  44. {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.8.dist-info}/LICENSE.txt +0 -0
  45. {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.8.dist-info}/WHEEL +0 -0
  46. {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.8.dist-info}/entry_points.txt +0 -0
@@ -1,3 +1,9 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*--
3
+
4
+ # Copyright (c) 2024 Oracle and/or its affiliates.
5
+ # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
+
1
7
  from langchain.schema.runnable import RunnableParallel
2
8
  from langchain.load.dump import dumpd
3
9
  from langchain.load.load import load
@@ -10,7 +16,7 @@ class RunnableParallelSerializer:
10
16
 
11
17
  @staticmethod
12
18
  def load(config: dict, **kwargs):
13
- steps = config["kwargs"]["steps"]
19
+ steps = config.get("kwargs", dict()).get("steps", dict())
14
20
  steps = {k: load(v, **kwargs) for k, v in steps.items()}
15
21
  return RunnableParallel(**steps)
16
22
 
@@ -35,7 +35,7 @@ All generated configurations should be ready to use without the need for any add
35
35
  To run anomaly detection locally, create and activate a new conda environment (`ads-anomaly`). Install all the required libraries listed in the `environment.yaml` file.
36
36
 
37
37
  ```yaml
38
- - datapane
38
+ - report-creator
39
39
  - cerberus
40
40
  - oracle-automlx==23.4.1
41
41
  - oracle-automlx[classic]==23.4.1
@@ -5,7 +5,7 @@ dependencies:
5
5
  - python=3.8
6
6
  - pip
7
7
  - pip:
8
- - datapane
8
+ - report-creator
9
9
  - cerberus
10
10
  - oracle-automlx==23.4.1
11
11
  - oracle-automlx[classic]==23.4.1
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*--
3
3
 
4
- # Copyright (c) 2023 Oracle and/or its affiliates.
4
+ # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
5
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
6
 
7
7
  import pandas as pd
@@ -27,8 +27,13 @@ class AutoMLXOperatorModel(AnomalyOperatorBaseModel):
27
27
  def _build_model(self) -> pd.DataFrame:
28
28
  from automlx import init
29
29
  import logging
30
+
30
31
  try:
31
- init(engine="ray", engine_opts={"ray_setup": {"_temp_dir": "/tmp/ray-temp"}}, loglevel=logging.CRITICAL)
32
+ init(
33
+ engine="ray",
34
+ engine_opts={"ray_setup": {"_temp_dir": "/tmp/ray-temp"}},
35
+ loglevel=logging.CRITICAL,
36
+ )
32
37
  except Exception as e:
33
38
  logger.info("Ray already initialized")
34
39
  date_column = self.spec.datetime_column.name
@@ -68,21 +73,21 @@ class AutoMLXOperatorModel(AnomalyOperatorBaseModel):
68
73
  return anomaly_output
69
74
 
70
75
  def _generate_report(self):
71
- import datapane as dp
76
+ import report_creator as rc
72
77
 
73
78
  """The method that needs to be implemented on the particular model level."""
74
- selected_models_text = dp.Text(
75
- f"## Selected Models Overview \n "
76
- "The following tables provide information regarding the chosen model."
77
- )
78
- all_sections = [selected_models_text]
79
+ other_sections = [
80
+ rc.Heading("Selected Models Overview", level=2),
81
+ rc.Text(
82
+ "The following tables provide information regarding the chosen model."
83
+ ),
84
+ ]
79
85
 
80
- model_description = dp.Text(
86
+ model_description = rc.Text(
81
87
  "The automlx model automatically pre-processes, selects and engineers "
82
88
  "high-quality features in your dataset, which then given to an automatically "
83
89
  "chosen and optimized machine learning model.."
84
90
  )
85
- other_sections = all_sections
86
91
 
87
92
  return (
88
93
  model_description,
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*--
3
3
 
4
- # Copyright (c) 2023 Oracle and/or its affiliates.
4
+ # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
5
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
6
 
7
7
  import pandas as pd
@@ -81,21 +81,20 @@ class AutoTSOperatorModel(AnomalyOperatorBaseModel):
81
81
  return anomaly_output
82
82
 
83
83
  def _generate_report(self):
84
- import datapane as dp
84
+ import report_creator as rc
85
85
 
86
86
  """The method that needs to be implemented on the particular model level."""
87
- selected_models_text = dp.Text(
88
- f"## Selected Models Overview \n "
89
- "The following tables provide information regarding the chosen model."
90
- )
91
- all_sections = [selected_models_text]
92
-
93
- model_description = dp.Text(
87
+ other_sections = [
88
+ rc.Heading("Selected Models Overview", level=2),
89
+ rc.Text(
90
+ "The following tables provide information regarding the chosen model."
91
+ ),
92
+ ]
93
+ model_description = rc.Text(
94
94
  "The automlx model automatically pre-processes, selects and engineers "
95
95
  "high-quality features in your dataset, which then given to an automatically "
96
96
  "chosen and optimized machine learning model.."
97
97
  )
98
- other_sections = all_sections
99
98
 
100
99
  return (
101
100
  model_description,
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*--
3
3
 
4
- # Copyright (c) 2023 Oracle and/or its affiliates.
4
+ # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
5
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
6
 
7
7
  import os
@@ -57,7 +57,7 @@ class AnomalyOperatorBaseModel(ABC):
57
57
 
58
58
  def generate_report(self):
59
59
  """Generates the report."""
60
- import datapane as dp
60
+ import report_creator as rc
61
61
  import matplotlib.pyplot as plt
62
62
 
63
63
  start_time = time.time()
@@ -79,12 +79,10 @@ class AnomalyOperatorBaseModel(ABC):
79
79
  anomaly_output, test_data, elapsed_time
80
80
  )
81
81
  table_blocks = [
82
- dp.DataTable(df, label=col)
82
+ rc.DataTable(df, label=col, index=True)
83
83
  for col, df in self.datasets.full_data_dict.items()
84
84
  ]
85
- data_table = (
86
- dp.Select(blocks=table_blocks) if len(table_blocks) > 1 else table_blocks[0]
87
- )
85
+ data_table = rc.Select(blocks=table_blocks)
88
86
  date_column = self.spec.datetime_column.name
89
87
 
90
88
  blocks = []
@@ -106,44 +104,42 @@ class AnomalyOperatorBaseModel(ABC):
106
104
  plt.xlabel(date_column)
107
105
  plt.ylabel(col)
108
106
  plt.title(f"`{col}` with reference to anomalies")
109
- figure_blocks.append(ax)
110
- blocks.append(dp.Group(blocks=figure_blocks, label=target))
111
- plots = dp.Select(blocks=blocks) if len(blocks) > 1 else blocks[0]
107
+ figure_blocks.append(rc.Widget(ax))
108
+ blocks.append(rc.Group(*figure_blocks, label=target))
109
+ plots = rc.Select(blocks)
112
110
 
113
111
  report_sections = []
114
- title_text = dp.Text("# Anomaly Detection Report")
115
-
116
- yaml_appendix_title = dp.Text(f"## Reference: YAML File")
117
- yaml_appendix = dp.Code(code=self.config.to_yaml(), language="yaml")
118
- summary = dp.Blocks(
119
- blocks=[
120
- dp.Group(
121
- dp.Text(f"You selected the **`{self.spec.model}`** model."),
122
- dp.Text(
123
- "Based on your dataset, you could have also selected "
124
- f"any of the models: `{'`, `'.join(SupportedModels.keys())}`."
125
- ),
126
- dp.BigNumber(
127
- heading="Analysis was completed in ",
128
- value=human_time_friendly(elapsed_time),
129
- ),
130
- label="Summary",
131
- )
132
- ]
112
+ title_text = rc.Heading("Anomaly Detection Report", level=1)
113
+
114
+ yaml_appendix_title = rc.Heading("Reference: YAML File", level=2)
115
+ yaml_appendix = rc.Yaml(self.config.to_dict())
116
+ summary = rc.Block(
117
+ rc.Group(
118
+ rc.Text(f"You selected the **`{self.spec.model}`** model."),
119
+ rc.Text(
120
+ "Based on your dataset, you could have also selected "
121
+ f"any of the models: `{'`, `'.join(SupportedModels.keys())}`."
122
+ ),
123
+ rc.Metric(
124
+ heading="Analysis was completed in ",
125
+ value=human_time_friendly(elapsed_time),
126
+ ),
127
+ label="Summary",
128
+ )
133
129
  )
134
- sec_text = dp.Text(f"## Train Evaluation Metrics")
135
- sec = dp.DataTable(self._evaluation_metrics(anomaly_output))
130
+ sec_text = rc.Heading("Train Evaluation Metrics", level=2)
131
+ sec = rc.DataTable(self._evaluation_metrics(anomaly_output), index=True)
136
132
  evaluation_metrics_sec = [sec_text, sec]
137
133
 
138
134
  test_metrics_sections = []
139
135
  if total_metrics is not None and not total_metrics.empty:
140
- sec_text = dp.Text(f"## Test Data Evaluation Metrics")
141
- sec = dp.DataTable(total_metrics)
136
+ sec_text = rc.Heading("Test Data Evaluation Metrics", level=2)
137
+ sec = rc.DataTable(total_metrics, index=True)
142
138
  test_metrics_sections = test_metrics_sections + [sec_text, sec]
143
139
 
144
140
  if summary_metrics is not None and not summary_metrics.empty:
145
- sec_text = dp.Text(f"## Test Data Summary Metrics")
146
- sec = dp.DataTable(summary_metrics)
141
+ sec_text = rc.Heading("Test Data Summary Metrics", level=2)
142
+ sec = rc.DataTable(summary_metrics, index=True)
147
143
  test_metrics_sections = test_metrics_sections + [sec_text, sec]
148
144
 
149
145
  report_sections = (
@@ -248,7 +244,7 @@ class AnomalyOperatorBaseModel(ABC):
248
244
  test_metrics: pd.DataFrame,
249
245
  ):
250
246
  """Saves resulting reports to the given folder."""
251
- import datapane as dp
247
+ import report_creator as rc
252
248
 
253
249
  unique_output_dir = find_output_dirname(self.spec.output_directory)
254
250
 
@@ -257,11 +253,12 @@ class AnomalyOperatorBaseModel(ABC):
257
253
  else:
258
254
  storage_options = dict()
259
255
 
260
- # datapane html report
256
+ # report-creator html report
261
257
  with tempfile.TemporaryDirectory() as temp_dir:
262
258
  report_local_path = os.path.join(temp_dir, "___report.html")
263
259
  disable_print()
264
- dp.save_report(report_sections, report_local_path)
260
+ with rc.ReportCreator("My Report") as report:
261
+ report.save(rc.Block(*report_sections), report_local_path)
265
262
  enable_print()
266
263
  with open(report_local_path) as f1:
267
264
  with fsspec.open(
@@ -1,7 +1,7 @@
1
1
  # #!/usr/bin/env python
2
2
  # # -*- coding: utf-8 -*--
3
3
 
4
- # # Copyright (c) 2023 Oracle and/or its affiliates.
4
+ # # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
5
  # # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
6
 
7
7
  # import importlib
@@ -97,16 +97,16 @@
97
97
  # return anomaly_output
98
98
 
99
99
  # def _generate_report(self):
100
- # import datapane as dp
100
+ # import report_creator as rc
101
101
 
102
102
  # """The method that needs to be implemented on the particular model level."""
103
- # selected_models_text = dp.Text(
103
+ # selected_models_text = rc.Text(
104
104
  # f"## Selected Models Overview \n "
105
105
  # "The following tables provide information regarding the chosen model."
106
106
  # )
107
107
  # all_sections = [selected_models_text]
108
108
 
109
- # model_description = dp.Text(
109
+ # model_description = rc.Text(
110
110
  # "The tods model is a full-stack automated machine learning system for outlier detection "
111
111
  # "on univariate / multivariate time-series data. It provides exhaustive modules for building "
112
112
  # "machine learning-based outlier detection systems and wide range of algorithms."
@@ -323,7 +323,7 @@ spec:
323
323
  missing_value_imputation:
324
324
  type: boolean
325
325
  required: false
326
- default: true
326
+ default: false
327
327
 
328
328
  generate_report:
329
329
  type: boolean
@@ -35,7 +35,7 @@ To run forecasting locally, create and activate a new conda environment (`ads-fo
35
35
  - neuralprophet
36
36
  - pmdarima
37
37
  - statsmodels
38
- - datapane
38
+ - report-creator
39
39
  - cerberus
40
40
  - sktime
41
41
  - optuna==3.1.0
@@ -10,12 +10,12 @@ dependencies:
10
10
  - neuralprophet
11
11
  - pmdarima
12
12
  - statsmodels
13
- - datapane
13
+ - report-creator
14
14
  - cerberus
15
15
  - sktime
16
16
  - shap
17
17
  - autots[additional]
18
- - optuna==3.1.0
19
- - oracle-automlx==23.4.1
20
- - oracle-automlx[forecasting]==23.4.1
18
+ - optuna
19
+ - oracle-automlx>=23.4.1
20
+ - oracle-automlx[forecasting]>=23.4.1
21
21
  - fire
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*--
3
3
 
4
- # Copyright (c) 2023 Oracle and/or its affiliates.
4
+ # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
5
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
6
 
7
7
  import pandas as pd
@@ -66,7 +66,7 @@ class ArimaOperatorModel(ForecastOperatorBaseModel):
66
66
  target = self.original_target_column
67
67
  self.forecast_output.init_series_output(series_id=s_id, data_at_series=df)
68
68
  # If trend is constant, remove constant columns
69
- if 'trend' not in model_kwargs or model_kwargs['trend'] == 'c':
69
+ if "trend" not in model_kwargs or model_kwargs["trend"] == "c":
70
70
  self.constant_cols[s_id] = df.columns[df.nunique() == 1]
71
71
  df = df.drop(columns=self.constant_cols[s_id])
72
72
 
@@ -147,29 +147,25 @@ class ArimaOperatorModel(ForecastOperatorBaseModel):
147
147
 
148
148
  def _generate_report(self):
149
149
  """The method that needs to be implemented on the particular model level."""
150
- import datapane as dp
150
+ import report_creator as rc
151
+
151
152
  all_sections = []
152
153
  if len(self.models) > 0:
153
- sec5_text = dp.Text(f"## ARIMA Model Parameters")
154
+ sec5_text = rc.Heading("ARIMA Model Parameters", level=2)
154
155
  blocks = [
155
- dp.HTML(
156
+ rc.Html(
156
157
  m.summary().as_html(),
157
158
  label=s_id,
158
159
  )
159
160
  for i, (s_id, m) in enumerate(self.models.items())
160
161
  ]
161
- sec5 = dp.Select(blocks=blocks) if len(blocks) > 1 else blocks[0]
162
+ sec5 = rc.Select(blocks=blocks)
162
163
  all_sections = [sec5_text, sec5]
163
164
 
164
165
  if self.spec.generate_explanations:
165
166
  try:
166
167
  # If the key is present, call the "explain_model" method
167
168
  self.explain_model()
168
- # Create a markdown text block for the global explanation section
169
- global_explanation_text = dp.Text(
170
- f"## Global Explanation of Models \n "
171
- "The following tables provide the feature attribution for the global explainability."
172
- )
173
169
 
174
170
  # Convert the global explanation data to a DataFrame
175
171
  global_explanation_df = pd.DataFrame(self.global_explanation)
@@ -185,9 +181,12 @@ class ArimaOperatorModel(ForecastOperatorBaseModel):
185
181
  )
186
182
 
187
183
  # Create a markdown section for the global explainability
188
- global_explanation_section = dp.Blocks(
189
- "### Global Explainability ",
190
- dp.DataTable(self.formatted_global_explanation),
184
+ global_explanation_section = rc.Block(
185
+ rc.Heading("Global Explanation of Models", level=2),
186
+ rc.Text(
187
+ "The following tables provide the feature attribution for the global explainability."
188
+ ),
189
+ rc.DataTable(self.formatted_global_explanation, index=True),
191
190
  )
192
191
 
193
192
  aggregate_local_explanations = pd.DataFrame()
@@ -199,30 +198,29 @@ class ArimaOperatorModel(ForecastOperatorBaseModel):
199
198
  )
200
199
  self.formatted_local_explanation = aggregate_local_explanations
201
200
 
202
- local_explanation_text = dp.Text(f"## Local Explanation of Models \n ")
203
201
  blocks = [
204
- dp.DataTable(
202
+ rc.DataTable(
205
203
  local_ex_df.div(local_ex_df.abs().sum(axis=1), axis=0) * 100,
206
204
  label=s_id,
205
+ index=True,
207
206
  )
208
207
  for s_id, local_ex_df in self.local_explanation.items()
209
208
  ]
210
- local_explanation_section = (
211
- dp.Select(blocks=blocks) if len(blocks) > 1 else blocks[0]
209
+ local_explanation_section = rc.Block(
210
+ rc.Heading("Local Explanation of Models", level=2),
211
+ rc.Select(blocks=blocks),
212
212
  )
213
213
 
214
214
  # Append the global explanation text and section to the "all_sections" list
215
215
  all_sections = all_sections + [
216
- global_explanation_text,
217
216
  global_explanation_section,
218
- local_explanation_text,
219
217
  local_explanation_section,
220
218
  ]
221
219
  except Exception as e:
222
220
  logger.warn(f"Failed to generate Explanations with error: {e}.")
223
221
  logger.debug(f"Full Traceback: {traceback.format_exc()}")
224
222
 
225
- model_description = dp.Text(
223
+ model_description = rc.Text(
226
224
  "An autoregressive integrated moving average, or ARIMA, is a statistical "
227
225
  "analysis model that uses time series data to either better understand the "
228
226
  "data set or to predict future trends. A statistical model is autoregressive if "
@@ -2,7 +2,7 @@
2
2
  # -*- coding: utf-8 -*--
3
3
  import traceback
4
4
 
5
- # Copyright (c) 2023 Oracle and/or its affiliates.
5
+ # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
6
6
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
7
7
 
8
8
  import pandas as pd
@@ -52,7 +52,6 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
52
52
  ] = self.spec.preprocessing or model_kwargs_cleaned.get("preprocessing", True)
53
53
  return model_kwargs_cleaned, time_budget
54
54
 
55
-
56
55
  def preprocess(self, data, series_id=None): # TODO: re-use self.le for explanations
57
56
  _, df_encoded = _label_encode_dataframe(
58
57
  data,
@@ -77,8 +76,13 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
77
76
  def _build_model(self) -> pd.DataFrame:
78
77
  from automlx import init
79
78
  import logging
79
+
80
80
  try:
81
- init(engine="ray", engine_opts={"ray_setup": {"_temp_dir": "/tmp/ray-temp"}}, loglevel=logging.CRITICAL)
81
+ init(
82
+ engine="ray",
83
+ engine_opts={"ray_setup": {"_temp_dir": "/tmp/ray-temp"}},
84
+ loglevel=logging.CRITICAL,
85
+ )
82
86
  except Exception as e:
83
87
  logger.info("Ray already initialized")
84
88
 
@@ -167,12 +171,6 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
167
171
 
168
172
  return self.forecast_output.get_forecast_long()
169
173
 
170
- @runtime_dependency(
171
- module="datapane",
172
- err_msg=(
173
- "Please run `pip3 install datapane` to install the required dependencies for report generation."
174
- ),
175
- )
176
174
  def _generate_report(self):
177
175
  """
178
176
  Generate the report for the automlx model.
@@ -183,25 +181,20 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
183
181
 
184
182
  Returns
185
183
  -------
186
- - model_description (datapane.Text): A Text component containing the description of the automlx model.
187
- - other_sections (List[Union[datapane.Text, datapane.Blocks]]): A list of Text and Blocks components representing various sections of the report.
184
+ - model_description (report-creator.Text): A Text component containing the description of the automlx model.
185
+ - other_sections (List[Union[report-creator.Text, report-creator.Block]]): A list of Text and Blocks components representing various sections of the report.
188
186
  - forecast_col_name (str): The name of the forecasted column.
189
187
  - train_metrics (bool): A boolean value indicating whether to include train metrics in the report.
190
188
  - ds_column_series (pd.Series): The pd.Series object representing the datetime column of the dataset.
191
189
  - ds_forecast_col (pd.Series): The pd.Series object representing the forecasted column.
192
190
  - ci_col_names (List[str]): A list of column names for the confidence interval in the report.
193
191
  """
194
- import datapane as dp
192
+ import report_creator as rc
195
193
 
196
194
  """The method that needs to be implemented on the particular model level."""
197
- selected_models_text = dp.Text(
198
- f"## Selected Models Overview \n "
199
- "The following tables provide information regarding the "
200
- "chosen model for each series and the corresponding parameters of the models."
201
- )
202
195
  selected_models = dict()
203
196
  models = self.models
204
- all_sections = []
197
+ other_sections = []
205
198
 
206
199
  if len(self.models) > 0:
207
200
  for i, (s_id, m) in enumerate(models.items()):
@@ -214,39 +207,42 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
214
207
  selected_models.items(), columns=["series_id", "best_selected_model"]
215
208
  )
216
209
  selected_df = selected_models_df["best_selected_model"].apply(pd.Series)
217
- selected_models_section = dp.Blocks(
218
- "### Best Selected Model", dp.DataTable(selected_df)
210
+ selected_models_section = rc.Block(
211
+ rc.Heading("Selected Models Overview", level=2),
212
+ rc.Text(
213
+ "The following tables provide information regarding the "
214
+ "chosen model for each series and the corresponding parameters of the models."
215
+ ),
216
+ rc.DataTable(selected_df, index=True),
219
217
  )
220
218
 
221
- all_sections = [selected_models_text, selected_models_section]
219
+ other_sections = [selected_models_section]
222
220
 
223
221
  if self.spec.generate_explanations:
224
222
  try:
225
223
  # If the key is present, call the "explain_model" method
226
224
  self.explain_model()
227
225
 
228
- # Create a markdown text block for the global explanation section
229
- global_explanation_text = dp.Text(
230
- f"## Global Explanation of Models \n "
231
- "The following tables provide the feature attribution for the global explainability."
232
- )
233
-
234
226
  # Convert the global explanation data to a DataFrame
235
227
  global_explanation_df = pd.DataFrame(self.global_explanation)
236
228
 
237
229
  self.formatted_global_explanation = (
238
- global_explanation_df / global_explanation_df.sum(axis=0) * 100
230
+ global_explanation_df / global_explanation_df.sum(axis=0) * 100
239
231
  )
240
232
  self.formatted_global_explanation = (
241
233
  self.formatted_global_explanation.rename(
242
- {self.spec.datetime_column.name: ForecastOutputColumns.DATE}, axis=1
234
+ {self.spec.datetime_column.name: ForecastOutputColumns.DATE},
235
+ axis=1,
243
236
  )
244
237
  )
245
238
 
246
239
  # Create a markdown section for the global explainability
247
- global_explanation_section = dp.Blocks(
248
- "### Global Explainability ",
249
- dp.DataTable(self.formatted_global_explanation),
240
+ global_explanation_section = rc.Block(
241
+ rc.Heading("Global Explanation of Models", level=2),
242
+ rc.Text(
243
+ "The following tables provide the feature attribution for the global explainability."
244
+ ),
245
+ rc.DataTable(self.formatted_global_explanation, index=True),
250
246
  )
251
247
 
252
248
  aggregate_local_explanations = pd.DataFrame()
@@ -258,34 +254,32 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
258
254
  )
259
255
  self.formatted_local_explanation = aggregate_local_explanations
260
256
 
261
- local_explanation_text = dp.Text(f"## Local Explanation of Models \n ")
262
257
  blocks = [
263
- dp.DataTable(
258
+ rc.DataTable(
264
259
  local_ex_df.div(local_ex_df.abs().sum(axis=1), axis=0) * 100,
265
260
  label=s_id,
261
+ index=True,
266
262
  )
267
263
  for s_id, local_ex_df in self.local_explanation.items()
268
264
  ]
269
- local_explanation_section = (
270
- dp.Select(blocks=blocks) if len(blocks) > 1 else blocks[0]
265
+ local_explanation_section = rc.Block(
266
+ rc.Heading("Local Explanation of Models", level=2),
267
+ rc.Select(blocks=blocks),
271
268
  )
272
269
 
273
- # Append the global explanation text and section to the "all_sections" list
274
- all_sections = all_sections + [
275
- global_explanation_text,
270
+ # Append the global explanation text and section to the "other_sections" list
271
+ other_sections = other_sections + [
276
272
  global_explanation_section,
277
- local_explanation_text,
278
273
  local_explanation_section,
279
274
  ]
280
275
  except Exception as e:
281
276
  logger.warn(f"Failed to generate Explanations with error: {e}.")
282
277
  logger.debug(f"Full Traceback: {traceback.format_exc()}")
283
278
 
284
- model_description = dp.Text(
279
+ model_description = rc.Text(
285
280
  "The AutoMLx model automatically preprocesses, selects and engineers "
286
281
  "high-quality features in your dataset, which are then provided for further processing."
287
282
  )
288
- other_sections = all_sections
289
283
 
290
284
  return (
291
285
  model_description,