oracle-ads 2.11.7__py3-none-any.whl → 2.11.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. ads/aqua/__init__.py +24 -14
  2. ads/aqua/base.py +0 -2
  3. ads/aqua/cli.py +50 -2
  4. ads/aqua/decorator.py +8 -0
  5. ads/aqua/deployment.py +37 -34
  6. ads/aqua/evaluation.py +106 -49
  7. ads/aqua/extension/base_handler.py +18 -10
  8. ads/aqua/extension/common_handler.py +21 -2
  9. ads/aqua/extension/deployment_handler.py +1 -4
  10. ads/aqua/extension/evaluation_handler.py +1 -2
  11. ads/aqua/extension/finetune_handler.py +0 -1
  12. ads/aqua/extension/ui_handler.py +1 -12
  13. ads/aqua/extension/utils.py +4 -4
  14. ads/aqua/finetune.py +24 -11
  15. ads/aqua/model.py +2 -4
  16. ads/aqua/utils.py +40 -23
  17. ads/cli.py +19 -1
  18. ads/common/serializer.py +5 -4
  19. ads/common/utils.py +6 -2
  20. ads/config.py +1 -0
  21. ads/llm/serializers/runnable_parallel.py +7 -1
  22. ads/model/datascience_model.py +20 -0
  23. ads/opctl/operator/lowcode/anomaly/README.md +1 -1
  24. ads/opctl/operator/lowcode/anomaly/environment.yaml +1 -1
  25. ads/opctl/operator/lowcode/anomaly/model/automlx.py +15 -10
  26. ads/opctl/operator/lowcode/anomaly/model/autots.py +9 -10
  27. ads/opctl/operator/lowcode/anomaly/model/base_model.py +34 -37
  28. ads/opctl/operator/lowcode/anomaly/model/tods.py +4 -4
  29. ads/opctl/operator/lowcode/anomaly/schema.yaml +1 -1
  30. ads/opctl/operator/lowcode/forecast/README.md +1 -1
  31. ads/opctl/operator/lowcode/forecast/environment.yaml +4 -4
  32. ads/opctl/operator/lowcode/forecast/model/arima.py +19 -21
  33. ads/opctl/operator/lowcode/forecast/model/automlx.py +36 -42
  34. ads/opctl/operator/lowcode/forecast/model/autots.py +41 -25
  35. ads/opctl/operator/lowcode/forecast/model/base_model.py +93 -107
  36. ads/opctl/operator/lowcode/forecast/model/neuralprophet.py +51 -45
  37. ads/opctl/operator/lowcode/forecast/model/prophet.py +32 -27
  38. ads/opctl/operator/lowcode/forecast/schema.yaml +2 -2
  39. ads/opctl/operator/lowcode/forecast/utils.py +4 -4
  40. ads/opctl/operator/lowcode/pii/README.md +1 -1
  41. ads/opctl/operator/lowcode/pii/environment.yaml +1 -1
  42. ads/opctl/operator/lowcode/pii/model/report.py +71 -70
  43. {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.9.dist-info}/METADATA +5 -5
  44. {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.9.dist-info}/RECORD +47 -47
  45. {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.9.dist-info}/LICENSE.txt +0 -0
  46. {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.9.dist-info}/WHEEL +0 -0
  47. {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.9.dist-info}/entry_points.txt +0 -0
@@ -2,7 +2,7 @@
2
2
  # -*- coding: utf-8 -*--
3
3
  import traceback
4
4
 
5
- # Copyright (c) 2023 Oracle and/or its affiliates.
5
+ # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
6
6
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
7
7
 
8
8
  import pandas as pd
@@ -52,7 +52,6 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
52
52
  ] = self.spec.preprocessing or model_kwargs_cleaned.get("preprocessing", True)
53
53
  return model_kwargs_cleaned, time_budget
54
54
 
55
-
56
55
  def preprocess(self, data, series_id=None): # TODO: re-use self.le for explanations
57
56
  _, df_encoded = _label_encode_dataframe(
58
57
  data,
@@ -77,8 +76,13 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
77
76
  def _build_model(self) -> pd.DataFrame:
78
77
  from automlx import init
79
78
  import logging
79
+
80
80
  try:
81
- init(engine="ray", engine_opts={"ray_setup": {"_temp_dir": "/tmp/ray-temp"}}, loglevel=logging.CRITICAL)
81
+ init(
82
+ engine="ray",
83
+ engine_opts={"ray_setup": {"_temp_dir": "/tmp/ray-temp"}},
84
+ loglevel=logging.CRITICAL,
85
+ )
82
86
  except Exception as e:
83
87
  logger.info("Ray already initialized")
84
88
 
@@ -167,12 +171,6 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
167
171
 
168
172
  return self.forecast_output.get_forecast_long()
169
173
 
170
- @runtime_dependency(
171
- module="datapane",
172
- err_msg=(
173
- "Please run `pip3 install datapane` to install the required dependencies for report generation."
174
- ),
175
- )
176
174
  def _generate_report(self):
177
175
  """
178
176
  Generate the report for the automlx model.
@@ -183,25 +181,20 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
183
181
 
184
182
  Returns
185
183
  -------
186
- - model_description (datapane.Text): A Text component containing the description of the automlx model.
187
- - other_sections (List[Union[datapane.Text, datapane.Blocks]]): A list of Text and Blocks components representing various sections of the report.
184
+ - model_description (report-creator.Text): A Text component containing the description of the automlx model.
185
+ - other_sections (List[Union[report-creator.Text, report-creator.Block]]): A list of Text and Blocks components representing various sections of the report.
188
186
  - forecast_col_name (str): The name of the forecasted column.
189
187
  - train_metrics (bool): A boolean value indicating whether to include train metrics in the report.
190
188
  - ds_column_series (pd.Series): The pd.Series object representing the datetime column of the dataset.
191
189
  - ds_forecast_col (pd.Series): The pd.Series object representing the forecasted column.
192
190
  - ci_col_names (List[str]): A list of column names for the confidence interval in the report.
193
191
  """
194
- import datapane as dp
192
+ import report_creator as rc
195
193
 
196
194
  """The method that needs to be implemented on the particular model level."""
197
- selected_models_text = dp.Text(
198
- f"## Selected Models Overview \n "
199
- "The following tables provide information regarding the "
200
- "chosen model for each series and the corresponding parameters of the models."
201
- )
202
195
  selected_models = dict()
203
196
  models = self.models
204
- all_sections = []
197
+ other_sections = []
205
198
 
206
199
  if len(self.models) > 0:
207
200
  for i, (s_id, m) in enumerate(models.items()):
@@ -214,39 +207,42 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
214
207
  selected_models.items(), columns=["series_id", "best_selected_model"]
215
208
  )
216
209
  selected_df = selected_models_df["best_selected_model"].apply(pd.Series)
217
- selected_models_section = dp.Blocks(
218
- "### Best Selected Model", dp.DataTable(selected_df)
210
+ selected_models_section = rc.Block(
211
+ rc.Heading("Selected Models Overview", level=2),
212
+ rc.Text(
213
+ "The following tables provide information regarding the "
214
+ "chosen model for each series and the corresponding parameters of the models."
215
+ ),
216
+ rc.DataTable(selected_df, index=True),
219
217
  )
220
218
 
221
- all_sections = [selected_models_text, selected_models_section]
219
+ other_sections = [selected_models_section]
222
220
 
223
221
  if self.spec.generate_explanations:
224
222
  try:
225
223
  # If the key is present, call the "explain_model" method
226
224
  self.explain_model()
227
225
 
228
- # Create a markdown text block for the global explanation section
229
- global_explanation_text = dp.Text(
230
- f"## Global Explanation of Models \n "
231
- "The following tables provide the feature attribution for the global explainability."
232
- )
233
-
234
226
  # Convert the global explanation data to a DataFrame
235
227
  global_explanation_df = pd.DataFrame(self.global_explanation)
236
228
 
237
229
  self.formatted_global_explanation = (
238
- global_explanation_df / global_explanation_df.sum(axis=0) * 100
230
+ global_explanation_df / global_explanation_df.sum(axis=0) * 100
239
231
  )
240
232
  self.formatted_global_explanation = (
241
233
  self.formatted_global_explanation.rename(
242
- {self.spec.datetime_column.name: ForecastOutputColumns.DATE}, axis=1
234
+ {self.spec.datetime_column.name: ForecastOutputColumns.DATE},
235
+ axis=1,
243
236
  )
244
237
  )
245
238
 
246
239
  # Create a markdown section for the global explainability
247
- global_explanation_section = dp.Blocks(
248
- "### Global Explainability ",
249
- dp.DataTable(self.formatted_global_explanation),
240
+ global_explanation_section = rc.Block(
241
+ rc.Heading("Global Explanation of Models", level=2),
242
+ rc.Text(
243
+ "The following tables provide the feature attribution for the global explainability."
244
+ ),
245
+ rc.DataTable(self.formatted_global_explanation, index=True),
250
246
  )
251
247
 
252
248
  aggregate_local_explanations = pd.DataFrame()
@@ -258,34 +254,32 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
258
254
  )
259
255
  self.formatted_local_explanation = aggregate_local_explanations
260
256
 
261
- local_explanation_text = dp.Text(f"## Local Explanation of Models \n ")
262
257
  blocks = [
263
- dp.DataTable(
258
+ rc.DataTable(
264
259
  local_ex_df.div(local_ex_df.abs().sum(axis=1), axis=0) * 100,
265
260
  label=s_id,
261
+ index=True,
266
262
  )
267
263
  for s_id, local_ex_df in self.local_explanation.items()
268
264
  ]
269
- local_explanation_section = (
270
- dp.Select(blocks=blocks) if len(blocks) > 1 else blocks[0]
265
+ local_explanation_section = rc.Block(
266
+ rc.Heading("Local Explanation of Models", level=2),
267
+ rc.Select(blocks=blocks),
271
268
  )
272
269
 
273
- # Append the global explanation text and section to the "all_sections" list
274
- all_sections = all_sections + [
275
- global_explanation_text,
270
+ # Append the global explanation text and section to the "other_sections" list
271
+ other_sections = other_sections + [
276
272
  global_explanation_section,
277
- local_explanation_text,
278
273
  local_explanation_section,
279
274
  ]
280
275
  except Exception as e:
281
276
  logger.warn(f"Failed to generate Explanations with error: {e}.")
282
277
  logger.debug(f"Full Traceback: {traceback.format_exc()}")
283
278
 
284
- model_description = dp.Text(
279
+ model_description = rc.Text(
285
280
  "The AutoMLx model automatically preprocesses, selects and engineers "
286
281
  "high-quality features in your dataset, which are then provided for further processing."
287
282
  )
288
- other_sections = all_sections
289
283
 
290
284
  return (
291
285
  model_description,
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*--
3
3
 
4
- # Copyright (c) 2023 Oracle and/or its affiliates.
4
+ # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
5
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
6
 
7
7
  import copy
@@ -82,13 +82,19 @@ class AutoTSOperatorModel(ForecastOperatorBaseModel):
82
82
  drop_data_older_than_periods=self.spec.model_kwargs.get(
83
83
  "drop_data_older_than_periods", None
84
84
  ),
85
- model_list=self.spec.model_kwargs.get("model_list", "fast_parallel"),
86
- transformer_list=self.spec.model_kwargs.get("transformer_list", "auto"),
85
+ model_list=self.spec.model_kwargs.get(
86
+ "model_list", "fast_parallel"
87
+ ),
88
+ transformer_list=self.spec.model_kwargs.get(
89
+ "transformer_list", "auto"
90
+ ),
87
91
  transformer_max_depth=self.spec.model_kwargs.get(
88
92
  "transformer_max_depth", 6
89
93
  ),
90
94
  models_mode=self.spec.model_kwargs.get("models_mode", "random"),
91
- num_validations=self.spec.model_kwargs.get("num_validations", "auto"),
95
+ num_validations=self.spec.model_kwargs.get(
96
+ "num_validations", "auto"
97
+ ),
92
98
  models_to_validate=self.spec.model_kwargs.get(
93
99
  "models_to_validate", AUTOTS_MODELS_TO_VALIDATE
94
100
  ),
@@ -138,14 +144,15 @@ class AutoTSOperatorModel(ForecastOperatorBaseModel):
138
144
  values=additional_regressors,
139
145
  )
140
146
  future_reg = future_regressor[: -self.spec.horizon]
141
- regr_fcst = future_regressor[-self.spec.horizon:]
147
+ regr_fcst = future_regressor[-self.spec.horizon :]
142
148
  else:
143
149
  future_reg = None
144
150
  regr_fcst = None
145
151
 
146
152
  for s_id in self.datasets.list_series_ids():
147
153
  self.forecast_output.init_series_output(
148
- series_id=s_id, data_at_series=self.datasets.get_data_at_series(s_id)
154
+ series_id=s_id,
155
+ data_at_series=self.datasets.get_data_at_series(s_id),
149
156
  )
150
157
 
151
158
  if self.loaded_models is None:
@@ -213,7 +220,7 @@ class AutoTSOperatorModel(ForecastOperatorBaseModel):
213
220
 
214
221
  Returns:
215
222
  tuple: A tuple containing the following elements:
216
- - model_description (dp.Text): A text object containing the description of the AutoTS model.
223
+ - model_description (rc.Text): A text object containing the description of the AutoTS model.
217
224
  - other_sections (list): A list of sections to be included in the report.
218
225
  - forecast_col_name (str): The name of the forecast column.
219
226
  - train_metrics (bool): A boolean indicating whether to include train metrics.
@@ -221,37 +228,48 @@ class AutoTSOperatorModel(ForecastOperatorBaseModel):
221
228
  - ds_forecast_col (pd.Index): A pandas Index containing the forecast column values.
222
229
  - ci_col_names (list): A list of column names for confidence intervals.
223
230
  """
224
- import datapane as dp
231
+ import report_creator as rc
232
+
225
233
  all_sections = []
226
234
  if self.models:
227
235
  # Section 1: Forecast Overview
228
- sec1_text = dp.Text(
229
- "## Forecast Overview \n"
230
- "These plots show your forecast in the context of historical data."
231
- )
232
- sec_1 = _select_plot_list(
233
- lambda s_id: self.outputs.plot(self.models.df_wide_numeric, series=s_id),
236
+
237
+ sec_1_plots = _select_plot_list(
238
+ lambda s_id: self.outputs.plot(
239
+ self.models.df_wide_numeric, series=s_id
240
+ ),
234
241
  self.datasets.list_series_ids(),
235
242
  )
243
+ section_1 = rc.Block(
244
+ rc.Heading("Forecast Overview", level=2),
245
+ rc.Text(
246
+ "These plots show your forecast in the context of historical data."
247
+ ),
248
+ sec_1_plots,
249
+ )
236
250
 
237
251
  # Section 2: AutoTS Model Parameters
238
- sec2_text = dp.Text(f"## AutoTS Model Parameters")
252
+ sec2_text = rc.Heading("AutoTS Model Parameters", level=2)
239
253
  try:
240
- sec2 = dp.Code(
241
- code=yaml.dump(list(self.models.best_model.T.to_dict().values())[0]),
242
- language="yaml",
254
+ sec2 = rc.Yaml(
255
+ yaml.dump(list(self.models.best_model.T.to_dict().values())[0]),
243
256
  )
244
257
 
245
258
  except KeyError as ke:
246
- logger.warn(f"Issue generating Model Parameters Table Section. Skipping")
247
- sec2 = dp.Text(f"Error generating model parameters.")
248
- all_sections = [sec1_text, sec_1, sec2_text, sec2]
259
+ logger.warn(
260
+ f"Issue generating Model Parameters Table Section. Skipping"
261
+ )
262
+ sec2 = rc.Text("Error generating model parameters.")
263
+
264
+ section_2 = rc.Block(sec2_text, sec2)
265
+
266
+ all_sections = [sec_1_plots, section_2]
249
267
 
250
268
  if self.spec.generate_explanations:
251
269
  logger.warn(f"Explanations not yet supported for the AutoTS Module")
252
270
 
253
271
  # Model Description
254
- model_description = dp.Text(
272
+ model_description = rc.Text(
255
273
  "AutoTS is a time series package for Python designed for rapidly deploying high-accuracy forecasts at scale. "
256
274
  "In 2023, AutoTS has won in the M6 forecasting competition, "
257
275
  "delivering the highest performance investment decisions across 12 months of stock market forecasting."
@@ -284,9 +302,7 @@ class AutoTSOperatorModel(ForecastOperatorBaseModel):
284
302
  ).T
285
303
  df = pd.concat([mapes, scores])
286
304
  except Exception as e:
287
- logger.debug(
288
- f"Failed to generate training metrics"
289
- )
305
+ logger.debug(f"Failed to generate training metrics")
290
306
  logger.debug(f"Received Error Statement: {e}")
291
307
 
292
308
  return df
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*--
3
3
 
4
- # Copyright (c) 2023 Oracle and/or its affiliates.
4
+ # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
5
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
6
 
7
7
  import json
@@ -88,7 +88,9 @@ class ForecastOperatorBaseModel(ABC):
88
88
  self.formatted_local_explanation = None
89
89
 
90
90
  self.forecast_col_name = "yhat"
91
- self.perform_tuning = (self.spec.tuning != None) and (self.spec.tuning.n_trials != None)
91
+ self.perform_tuning = (self.spec.tuning != None) and (
92
+ self.spec.tuning.n_trials != None
93
+ )
92
94
 
93
95
  def generate_report(self):
94
96
  """Generates the forecasting report."""
@@ -100,7 +102,7 @@ class ForecastOperatorBaseModel(ABC):
100
102
  warnings.simplefilter(action="ignore", category=UserWarning)
101
103
  warnings.simplefilter(action="ignore", category=RuntimeWarning)
102
104
  warnings.simplefilter(action="ignore", category=ConvergenceWarning)
103
- import datapane as dp
105
+ import report_creator as rc
104
106
 
105
107
  # load models if given
106
108
  if self.spec.previous_output_dir is not None:
@@ -140,69 +142,58 @@ class ForecastOperatorBaseModel(ABC):
140
142
  other_sections,
141
143
  ) = self._generate_report()
142
144
 
143
- title_text = dp.Text("# Forecast Report")
144
-
145
- md_columns = " * ".join(
146
- [f"{s_id} \n" for s_id in self.datasets.list_series_ids()]
147
- )
148
-
149
- header_section = dp.Blocks(
150
- blocks=[
151
- dp.Text(f"You selected the **`{self.spec.model}`** model."),
152
- model_description,
153
- dp.Text(
154
- "Based on your dataset, you could have also selected "
155
- f"any of the models: `{'`, `'.join(SupportedModels.keys())}`."
145
+ header_section = rc.Block(
146
+ rc.Heading("Forecast Report", level=1),
147
+ rc.Text(
148
+ f"You selected the {self.spec.model} model.\n{model_description}\nBased on your dataset, you could have also selected any of the models: {SupportedModels.keys()}."
149
+ ),
150
+ rc.Group(
151
+ rc.Metric(
152
+ heading="Analysis was completed in ",
153
+ value=human_time_friendly(elapsed_time),
156
154
  ),
157
- dp.Group(
158
- dp.BigNumber(
159
- heading="Analysis was completed in ",
160
- value=human_time_friendly(elapsed_time),
161
- ),
162
- dp.BigNumber(
163
- heading="Starting time index",
164
- value=self.datasets.get_earliest_timestamp().strftime(
165
- "%B %d, %Y"
166
- ),
155
+ rc.Metric(
156
+ heading="Starting time index",
157
+ value=self.datasets.get_earliest_timestamp().strftime(
158
+ "%B %d, %Y"
167
159
  ),
168
- dp.BigNumber(
169
- heading="Ending time index",
170
- value=self.datasets.get_latest_timestamp().strftime(
171
- "%B %d, %Y"
172
- ),
173
- ),
174
- dp.BigNumber(
175
- heading="Num series",
176
- value=len(self.datasets.list_series_ids()),
160
+ ),
161
+ rc.Metric(
162
+ heading="Ending time index",
163
+ value=self.datasets.get_latest_timestamp().strftime(
164
+ "%B %d, %Y"
177
165
  ),
178
- columns=4,
179
166
  ),
180
- ]
167
+ rc.Metric(
168
+ heading="Num series",
169
+ value=len(self.datasets.list_series_ids()),
170
+ ),
171
+ ),
181
172
  )
182
173
 
183
- first_10_rows_blocks = [
184
- dp.DataTable(
185
- df.head(10),
186
- caption="Start",
174
+ first_5_rows_blocks = [
175
+ rc.DataTable(
176
+ df.head(5),
187
177
  label=s_id,
178
+ index=True,
188
179
  )
189
180
  for s_id, df in self.full_data_dict.items()
190
181
  ]
191
182
 
192
- last_10_rows_blocks = [
193
- dp.DataTable(
194
- df.tail(10),
195
- caption="End",
183
+ last_5_rows_blocks = [
184
+ rc.DataTable(
185
+ df.tail(5),
196
186
  label=s_id,
187
+ index=True,
197
188
  )
198
189
  for s_id, df in self.full_data_dict.items()
199
190
  ]
200
191
 
201
192
  data_summary_blocks = [
202
- dp.DataTable(
193
+ rc.DataTable(
203
194
  df.describe(),
204
- caption="Summary Statistics",
205
195
  label=s_id,
196
+ index=True,
206
197
  )
207
198
  for s_id, df in self.full_data_dict.items()
208
199
  ]
@@ -210,44 +201,33 @@ class ForecastOperatorBaseModel(ABC):
210
201
  series_name = merged_category_column_name(
211
202
  self.spec.target_category_columns
212
203
  )
213
- series_subtext = dp.Text(f"Indexed by {series_name}")
214
- first_10_title = dp.Text("### First 10 Rows of Data")
215
- last_10_title = dp.Text("### Last 10 Rows of Data")
216
- summary_title = dp.Text("### Data Summary Statistics")
217
-
218
- if series_name is not None and len(self.datasets.list_series_ids()) > 1:
219
- data_summary_sec = dp.Blocks(
220
- blocks=[
221
- first_10_title,
222
- series_subtext,
223
- dp.Select(blocks=first_10_rows_blocks),
224
- last_10_title,
225
- series_subtext,
226
- dp.Select(blocks=last_10_rows_blocks),
227
- summary_title,
228
- series_subtext,
229
- dp.Select(blocks=data_summary_blocks),
230
- dp.Text("----"),
231
- ]
232
- )
233
- else:
234
- data_summary_sec = dp.Blocks(
235
- blocks=[
236
- first_10_title,
237
- first_10_rows_blocks[0],
238
- last_10_title,
239
- last_10_rows_blocks[0],
240
- summary_title,
241
- data_summary_blocks[0],
242
- dp.Text("----"),
243
- ]
244
- )
204
+ # series_subtext = rc.Text(f"Indexed by {series_name}")
205
+ first_10_title = rc.Heading("First 5 Rows of Data", level=3)
206
+ last_10_title = rc.Heading("Last 5 Rows of Data", level=3)
207
+ summary_title = rc.Heading("Data Summary Statistics", level=3)
208
+
209
+ data_summary_sec = rc.Block(
210
+ rc.Block(
211
+ first_10_title,
212
+ # series_subtext,
213
+ rc.Select(blocks=first_5_rows_blocks),
214
+ ),
215
+ rc.Block(
216
+ last_10_title,
217
+ # series_subtext,
218
+ rc.Select(blocks=last_5_rows_blocks),
219
+ ),
220
+ rc.Block(
221
+ summary_title,
222
+ # series_subtext,
223
+ rc.Select(blocks=data_summary_blocks),
224
+ ),
225
+ rc.Separator(),
226
+ )
245
227
 
246
- summary = dp.Group(
247
- blocks=[
248
- header_section,
249
- data_summary_sec,
250
- ]
228
+ summary = rc.Block(
229
+ header_section,
230
+ data_summary_sec,
251
231
  )
252
232
 
253
233
  test_metrics_sections = []
@@ -255,41 +235,47 @@ class ForecastOperatorBaseModel(ABC):
255
235
  self.test_eval_metrics is not None
256
236
  and not self.test_eval_metrics.empty
257
237
  ):
258
- sec7_text = dp.Text(f"## Test Data Evaluation Metrics")
259
- sec7 = dp.DataTable(self.test_eval_metrics)
238
+ sec7_text = rc.Heading("Test Data Evaluation Metrics", level=2)
239
+ sec7 = rc.DataTable(self.test_eval_metrics, index=True)
260
240
  test_metrics_sections = test_metrics_sections + [sec7_text, sec7]
261
241
 
262
242
  if summary_metrics is not None and not summary_metrics.empty:
263
- sec8_text = dp.Text(f"## Test Data Summary Metrics")
264
- sec8 = dp.DataTable(summary_metrics)
243
+ sec8_text = rc.Heading("Test Data Summary Metrics", level=2)
244
+ sec8 = rc.DataTable(summary_metrics, index=True)
265
245
  test_metrics_sections = test_metrics_sections + [sec8_text, sec8]
266
246
 
267
247
  train_metrics_sections = []
268
248
  if self.eval_metrics is not None and not self.eval_metrics.empty:
269
- sec9_text = dp.Text(f"## Training Data Metrics")
270
- sec9 = dp.DataTable(self.eval_metrics)
249
+ sec9_text = rc.Heading("Training Data Metrics", level=2)
250
+ sec9 = rc.DataTable(self.eval_metrics, index=True)
271
251
  train_metrics_sections = [sec9_text, sec9]
272
252
 
273
-
274
253
  forecast_plots = []
275
254
  if len(self.forecast_output.list_series_ids()) > 0:
276
- forecast_text = dp.Text(f"## Forecasted Data Overlaying Historical")
255
+ forecast_text = rc.Heading(
256
+ "Forecasted Data Overlaying Historical", level=2
257
+ )
277
258
  forecast_sec = get_forecast_plots(
278
259
  self.forecast_output,
279
260
  horizon=self.spec.horizon,
280
261
  test_data=test_data,
281
262
  ci_interval_width=self.spec.confidence_interval_width,
282
263
  )
283
- if series_name is not None and len(self.datasets.list_series_ids()) > 1:
284
- forecast_plots = [forecast_text, series_subtext, forecast_sec]
264
+ if (
265
+ series_name is not None
266
+ and len(self.datasets.list_series_ids()) > 1
267
+ ):
268
+ forecast_plots = [
269
+ forecast_text,
270
+ forecast_sec,
271
+ ] # series_subtext,
285
272
  else:
286
273
  forecast_plots = [forecast_text, forecast_sec]
287
274
 
288
- yaml_appendix_title = dp.Text(f"## Reference: YAML File")
289
- yaml_appendix = dp.Code(code=self.config.to_yaml(), language="yaml")
275
+ yaml_appendix_title = rc.Heading("Reference: YAML File", level=2)
276
+ yaml_appendix = rc.Yaml(self.config.to_dict())
290
277
  report_sections = (
291
- [title_text]
292
- + [summary]
278
+ [summary]
293
279
  + forecast_plots
294
280
  + other_sections
295
281
  + test_metrics_sections
@@ -421,7 +407,7 @@ class ForecastOperatorBaseModel(ABC):
421
407
  test_metrics_df: pd.DataFrame,
422
408
  ):
423
409
  """Saves resulting reports to the given folder."""
424
- import datapane as dp
410
+ import report_creator as rc
425
411
 
426
412
  unique_output_dir = find_output_dirname(self.spec.output_directory)
427
413
 
@@ -430,13 +416,13 @@ class ForecastOperatorBaseModel(ABC):
430
416
  else:
431
417
  storage_options = dict()
432
418
 
433
- # datapane html report
419
+ # report-creator html report
434
420
  if self.spec.generate_report:
435
- # datapane html report
436
421
  with tempfile.TemporaryDirectory() as temp_dir:
437
422
  report_local_path = os.path.join(temp_dir, "___report.html")
438
423
  disable_print()
439
- dp.save_report(report_sections, report_local_path)
424
+ with rc.ReportCreator("My Report") as report:
425
+ report.save(rc.Block(*report_sections), report_local_path)
440
426
  enable_print()
441
427
 
442
428
  report_path = os.path.join(unique_output_dir, self.spec.report_filename)
@@ -655,11 +641,10 @@ class ForecastOperatorBaseModel(ABC):
655
641
  include_horizon=False
656
642
  ).items():
657
643
  if s_id in self.models:
658
-
659
644
  explain_predict_fn = self.get_explain_predict_fn(series_id=s_id)
660
- data_trimmed = data_i.tail(max(int(len(data_i) * ratio), 5)).reset_index(
661
- drop=True
662
- )
645
+ data_trimmed = data_i.tail(
646
+ max(int(len(data_i) * ratio), 5)
647
+ ).reset_index(drop=True)
663
648
  data_trimmed[datetime_col_name] = data_trimmed[datetime_col_name].apply(
664
649
  lambda x: x.timestamp()
665
650
  )
@@ -667,7 +652,8 @@ class ForecastOperatorBaseModel(ABC):
667
652
  # Explainer fails when boolean columns are passed
668
653
 
669
654
  _, data_trimmed_encoded = _label_encode_dataframe(
670
- data_trimmed, no_encode={datetime_col_name, self.original_target_column}
655
+ data_trimmed,
656
+ no_encode={datetime_col_name, self.original_target_column},
671
657
  )
672
658
 
673
659
  kernel_explnr = PermutationExplainer(