oracle-ads 2.13.0__py3-none-any.whl → 2.13.1rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,11 @@
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) 2025 Oracle and/or its affiliates.
4
+ # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
+
6
+ from ads.opctl.operator.lowcode.anomaly.__main__ import operate
7
+ from ads.opctl.operator.lowcode.anomaly.operator_config import AnomalyOperatorConfig
8
+
9
+ if __name__ == "__main__":
10
+ config = AnomalyOperatorConfig()
11
+ operate(config)
ads/opctl/forecast.py ADDED
@@ -0,0 +1,11 @@
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) 2025 Oracle and/or its affiliates.
4
+ # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
+
6
+ from ads.opctl.operator.lowcode.forecast.__main__ import operate
7
+ from ads.opctl.operator.lowcode.forecast.operator_config import ForecastOperatorConfig
8
+
9
+ if __name__ == "__main__":
10
+ config = ForecastOperatorConfig()
11
+ operate(config)
@@ -1,7 +1,6 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
2
 
4
- # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
3
+ # Copyright (c) 2023, 2025 Oracle and/or its affiliates.
5
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
5
 
7
6
  import json
@@ -15,17 +14,17 @@ from ads.opctl import logger
15
14
  from ads.opctl.operator.common.const import ENV_OPERATOR_ARGS
16
15
  from ads.opctl.operator.common.utils import _parse_input_args
17
16
 
17
+ from .model.forecast_datasets import ForecastDatasets, ForecastResults
18
18
  from .operator_config import ForecastOperatorConfig
19
- from .model.forecast_datasets import ForecastDatasets
20
19
  from .whatifserve import ModelDeploymentManager
21
20
 
22
21
 
23
- def operate(operator_config: ForecastOperatorConfig) -> None:
22
+ def operate(operator_config: ForecastOperatorConfig) -> ForecastResults:
24
23
  """Runs the forecasting operator."""
25
24
  from .model.factory import ForecastOperatorModelFactory
26
25
 
27
26
  datasets = ForecastDatasets(operator_config)
28
- ForecastOperatorModelFactory.get_model(
27
+ results = ForecastOperatorModelFactory.get_model(
29
28
  operator_config, datasets
30
29
  ).generate_report()
31
30
  # saving to model catalog
@@ -36,6 +35,7 @@ def operate(operator_config: ForecastOperatorConfig) -> None:
36
35
  if spec.what_if_analysis.model_deployment:
37
36
  mdm.create_deployment()
38
37
  mdm.save_deployment_info()
38
+ return results
39
39
 
40
40
 
41
41
  def verify(spec: Dict, **kwargs: Dict) -> bool:
@@ -116,7 +116,10 @@ class ArimaOperatorModel(ForecastOperatorBaseModel):
116
116
  lower_bound=self.get_horizon(forecast["yhat_lower"]).values,
117
117
  )
118
118
 
119
- self.models[s_id] = model
119
+ self.models[s_id] = {}
120
+ self.models[s_id]["model"] = model
121
+ self.models[s_id]["le"] = self.le[s_id]
122
+ self.models[s_id]["predict_component_cols"] = X_pred.columns
120
123
 
121
124
  params = vars(model).copy()
122
125
  for param in ["arima_res_", "endog_index_"]:
@@ -163,7 +166,7 @@ class ArimaOperatorModel(ForecastOperatorBaseModel):
163
166
  sec5_text = rc.Heading("ARIMA Model Parameters", level=2)
164
167
  blocks = [
165
168
  rc.Html(
166
- m.summary().as_html(),
169
+ m['model'].summary().as_html(),
167
170
  label=s_id if self.target_cat_col else None,
168
171
  )
169
172
  for i, (s_id, m) in enumerate(self.models.items())
@@ -251,7 +254,7 @@ class ArimaOperatorModel(ForecastOperatorBaseModel):
251
254
  def get_explain_predict_fn(self, series_id):
252
255
  def _custom_predict(
253
256
  data,
254
- model=self.models[series_id],
257
+ model=self.models[series_id]["model"],
255
258
  dt_column_name=self.datasets._datetime_column_name,
256
259
  target_col=self.original_target_column,
257
260
  ):
@@ -1,5 +1,5 @@
1
1
  #!/usr/bin/env python
2
- # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
2
+ # Copyright (c) 2023, 2025 Oracle and/or its affiliates.
3
3
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
4
4
  import logging
5
5
  import os
@@ -56,8 +56,8 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
56
56
  )
57
57
  return model_kwargs_cleaned, time_budget
58
58
 
59
- def preprocess(self, data): # TODO: re-use self.le for explanations
60
- _, df_encoded = _label_encode_dataframe(
59
+ def preprocess(self, data, series_id): # TODO: re-use self.le for explanations
60
+ self.le[series_id], df_encoded = _label_encode_dataframe(
61
61
  data,
62
62
  no_encode={self.spec.datetime_column.name, self.original_target_column},
63
63
  )
@@ -66,8 +66,7 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
66
66
  @runtime_dependency(
67
67
  module="automlx",
68
68
  err_msg=(
69
- "Please run `pip3 install oracle-automlx>=23.4.1` and "
70
- "`pip3 install oracle-automlx[forecasting]>=23.4.1` "
69
+ "Please run `pip3 install oracle-automlx[forecasting]>=25.1.1` "
71
70
  "to install the required dependencies for automlx."
72
71
  ),
73
72
  )
@@ -105,7 +104,7 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
105
104
  engine_opts = (
106
105
  None
107
106
  if engine_type == "local"
108
- else ({"ray_setup": {"_temp_dir": "/tmp/ray-temp"}},)
107
+ else {"ray_setup": {"_temp_dir": "/tmp/ray-temp"}}
109
108
  )
110
109
  init(
111
110
  engine=engine_type,
@@ -125,7 +124,7 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
125
124
  self.forecast_output.init_series_output(
126
125
  series_id=s_id, data_at_series=df
127
126
  )
128
- data = self.preprocess(df)
127
+ data = self.preprocess(df, s_id)
129
128
  data_i = self.drop_horizon(data)
130
129
  X_pred = self.get_horizon(data).drop(target, axis=1)
131
130
 
@@ -157,7 +156,9 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
157
156
  target
158
157
  ].values
159
158
 
160
- self.models[s_id] = model
159
+ self.models[s_id] = {}
160
+ self.models[s_id]["model"] = model
161
+ self.models[s_id]["le"] = self.le[s_id]
161
162
 
162
163
  # In case of Naive model, model.forecast function call does not return confidence intervals.
163
164
  if f"{target}_ci_upper" not in summary_frame:
@@ -218,7 +219,8 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
218
219
  other_sections = []
219
220
 
220
221
  if len(self.models) > 0:
221
- for s_id, m in models.items():
222
+ for s_id, artifacts in models.items():
223
+ m = artifacts["model"]
222
224
  selected_models[s_id] = {
223
225
  "series_id": s_id,
224
226
  "selected_model": m.selected_model_,
@@ -247,17 +249,18 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
247
249
  self.explain_model()
248
250
 
249
251
  global_explanation_section = None
250
- if self.spec.explanations_accuracy_mode != SpeedAccuracyMode.AUTOMLX:
251
- # Convert the global explanation data to a DataFrame
252
- global_explanation_df = pd.DataFrame(self.global_explanation)
253
252
 
254
- self.formatted_global_explanation = (
255
- global_explanation_df / global_explanation_df.sum(axis=0) * 100
256
- )
257
- self.formatted_global_explanation = self.formatted_global_explanation.rename(
258
- {self.spec.datetime_column.name: ForecastOutputColumns.DATE},
259
- axis=1,
260
- )
253
+ # Convert the global explanation data to a DataFrame
254
+ global_explanation_df = pd.DataFrame(self.global_explanation)
255
+
256
+ self.formatted_global_explanation = (
257
+ global_explanation_df / global_explanation_df.sum(axis=0) * 100
258
+ )
259
+
260
+ self.formatted_global_explanation.rename(
261
+ columns={self.spec.datetime_column.name: ForecastOutputColumns.DATE},
262
+ inplace=True,
263
+ )
261
264
 
262
265
  aggregate_local_explanations = pd.DataFrame()
263
266
  for s_id, local_ex_df in self.local_explanation.items():
@@ -269,11 +272,15 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
269
272
  self.formatted_local_explanation = aggregate_local_explanations
270
273
 
271
274
  if not self.target_cat_col:
272
- self.formatted_global_explanation = self.formatted_global_explanation.rename(
273
- {"Series 1": self.original_target_column},
274
- axis=1,
275
+ self.formatted_global_explanation = (
276
+ self.formatted_global_explanation.rename(
277
+ {"Series 1": self.original_target_column},
278
+ axis=1,
279
+ )
280
+ )
281
+ self.formatted_local_explanation.drop(
282
+ "Series", axis=1, inplace=True
275
283
  )
276
- self.formatted_local_explanation.drop("Series", axis=1, inplace=True)
277
284
 
278
285
  # Create a markdown section for the global explainability
279
286
  global_explanation_section = rc.Block(
@@ -320,7 +327,7 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
320
327
  )
321
328
 
322
329
  def get_explain_predict_fn(self, series_id):
323
- selected_model = self.models[series_id]
330
+ selected_model = self.models[series_id]["model"]
324
331
 
325
332
  # If training date, use method below. If future date, use forecast!
326
333
  def _custom_predict_fn(
@@ -338,12 +345,12 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
338
345
  data[dt_column_name] = seconds_to_datetime(
339
346
  data[dt_column_name], dt_format=self.spec.datetime_column.format
340
347
  )
341
- data = self.preprocess(data)
348
+ data = self.preprocess(data, series_id)
342
349
  horizon_data = horizon_data.drop(target_col, axis=1)
343
350
  horizon_data[dt_column_name] = seconds_to_datetime(
344
351
  horizon_data[dt_column_name], dt_format=self.spec.datetime_column.format
345
352
  )
346
- horizon_data = self.preprocess(horizon_data)
353
+ horizon_data = self.preprocess(horizon_data, series_id)
347
354
 
348
355
  rows = []
349
356
  for i in range(data.shape[0]):
@@ -421,8 +428,10 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
421
428
  if self.spec.explanations_accuracy_mode == SpeedAccuracyMode.AUTOMLX:
422
429
  # Use the MLExplainer class from AutoMLx to generate explanations
423
430
  explainer = automlx.MLExplainer(
424
- self.models[s_id],
425
- self.datasets.additional_data.get_data_for_series(series_id=s_id)
431
+ self.models[s_id]["model"],
432
+ self.datasets.additional_data.get_data_for_series(
433
+ series_id=s_id
434
+ )
426
435
  .drop(self.spec.datetime_column.name, axis=1)
427
436
  .head(-self.spec.horizon)
428
437
  if self.spec.additional_data
@@ -433,7 +442,9 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
433
442
 
434
443
  # Generate explanations for the forecast
435
444
  explanations = explainer.explain_prediction(
436
- X=self.datasets.additional_data.get_data_for_series(series_id=s_id)
445
+ X=self.datasets.additional_data.get_data_for_series(
446
+ series_id=s_id
447
+ )
437
448
  .drop(self.spec.datetime_column.name, axis=1)
438
449
  .tail(self.spec.horizon)
439
450
  if self.spec.additional_data
@@ -445,7 +456,9 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
445
456
  explanations_df = pd.concat(
446
457
  [exp.to_dataframe() for exp in explanations]
447
458
  )
448
- explanations_df["row"] = explanations_df.groupby("Feature").cumcount()
459
+ explanations_df["row"] = explanations_df.groupby(
460
+ "Feature"
461
+ ).cumcount()
449
462
  explanations_df = explanations_df.pivot(
450
463
  index="row", columns="Feature", values="Attribution"
451
464
  )
@@ -453,9 +466,18 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
453
466
 
454
467
  # Store the explanations in the local_explanation dictionary
455
468
  self.local_explanation[s_id] = explanations_df
469
+
470
+ self.global_explanation[s_id] = dict(
471
+ zip(
472
+ self.local_explanation[s_id].columns,
473
+ np.nanmean((self.local_explanation[s_id]), axis=0),
474
+ )
475
+ )
456
476
  else:
457
477
  # Fall back to the default explanation generation method
458
478
  super().explain_model()
459
479
  except Exception as e:
460
- logger.warning(f"Failed to generate explanations for series {s_id} with error: {e}.")
480
+ logger.warning(
481
+ f"Failed to generate explanations for series {s_id} with error: {e}."
482
+ )
461
483
  logger.debug(f"Full Traceback: {traceback.format_exc()}")
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env python
2
2
 
3
- # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
3
+ # Copyright (c) 2023, 2025 Oracle and/or its affiliates.
4
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
5
 
6
6
  import logging
@@ -19,6 +19,7 @@ import report_creator as rc
19
19
  from ads.common.decorator.runtime_dependency import runtime_dependency
20
20
  from ads.common.object_storage_details import ObjectStorageDetails
21
21
  from ads.opctl import logger
22
+ from ads.opctl.operator.lowcode.common.const import DataColumns
22
23
  from ads.opctl.operator.lowcode.common.utils import (
23
24
  datetime_to_seconds,
24
25
  disable_print,
@@ -28,7 +29,6 @@ from ads.opctl.operator.lowcode.common.utils import (
28
29
  seconds_to_datetime,
29
30
  write_data,
30
31
  )
31
- from ads.opctl.operator.lowcode.common.const import DataColumns
32
32
  from ads.opctl.operator.lowcode.forecast.model.forecast_datasets import TestData
33
33
  from ads.opctl.operator.lowcode.forecast.utils import (
34
34
  _build_metrics_df,
@@ -49,10 +49,9 @@ from ..const import (
49
49
  SpeedAccuracyMode,
50
50
  SupportedMetrics,
51
51
  SupportedModels,
52
- BACKTEST_REPORT_NAME,
53
52
  )
54
53
  from ..operator_config import ForecastOperatorConfig, ForecastOperatorSpec
55
- from .forecast_datasets import ForecastDatasets
54
+ from .forecast_datasets import ForecastDatasets, ForecastResults
56
55
 
57
56
  logging.getLogger("report_creator").setLevel(logging.WARNING)
58
57
 
@@ -127,8 +126,9 @@ class ForecastOperatorBaseModel(ABC):
127
126
  if self.spec.generate_report or self.spec.generate_metrics:
128
127
  self.eval_metrics = self.generate_train_metrics()
129
128
  if not self.target_cat_col:
130
- self.eval_metrics.rename({"Series 1": self.original_target_column},
131
- axis=1, inplace=True)
129
+ self.eval_metrics.rename(
130
+ {"Series 1": self.original_target_column}, axis=1, inplace=True
131
+ )
132
132
 
133
133
  if self.spec.test_data:
134
134
  try:
@@ -140,8 +140,11 @@ class ForecastOperatorBaseModel(ABC):
140
140
  elapsed_time=elapsed_time,
141
141
  )
142
142
  if not self.target_cat_col:
143
- self.test_eval_metrics.rename({"Series 1": self.original_target_column},
144
- axis=1, inplace=True)
143
+ self.test_eval_metrics.rename(
144
+ {"Series 1": self.original_target_column},
145
+ axis=1,
146
+ inplace=True,
147
+ )
145
148
  except Exception:
146
149
  logger.warn("Unable to generate Test Metrics.")
147
150
  logger.debug(f"Full Traceback: {traceback.format_exc()}")
@@ -223,17 +226,23 @@ class ForecastOperatorBaseModel(ABC):
223
226
  rc.Block(
224
227
  first_10_title,
225
228
  # series_subtext,
226
- rc.Select(blocks=first_5_rows_blocks) if self.target_cat_col else first_5_rows_blocks[0],
229
+ rc.Select(blocks=first_5_rows_blocks)
230
+ if self.target_cat_col
231
+ else first_5_rows_blocks[0],
227
232
  ),
228
233
  rc.Block(
229
234
  last_10_title,
230
235
  # series_subtext,
231
- rc.Select(blocks=last_5_rows_blocks) if self.target_cat_col else last_5_rows_blocks[0],
236
+ rc.Select(blocks=last_5_rows_blocks)
237
+ if self.target_cat_col
238
+ else last_5_rows_blocks[0],
232
239
  ),
233
240
  rc.Block(
234
241
  summary_title,
235
242
  # series_subtext,
236
- rc.Select(blocks=data_summary_blocks) if self.target_cat_col else data_summary_blocks[0],
243
+ rc.Select(blocks=data_summary_blocks)
244
+ if self.target_cat_col
245
+ else data_summary_blocks[0],
237
246
  ),
238
247
  rc.Separator(),
239
248
  )
@@ -308,7 +317,7 @@ class ForecastOperatorBaseModel(ABC):
308
317
  horizon=self.spec.horizon,
309
318
  test_data=test_data,
310
319
  ci_interval_width=self.spec.confidence_interval_width,
311
- target_category_column=self.target_cat_col
320
+ target_category_column=self.target_cat_col,
312
321
  )
313
322
  if (
314
323
  series_name is not None
@@ -341,11 +350,12 @@ class ForecastOperatorBaseModel(ABC):
341
350
  )
342
351
 
343
352
  # save the report and result CSV
344
- self._save_report(
353
+ return self._save_report(
345
354
  report_sections=report_sections,
346
355
  result_df=result_df,
347
356
  metrics_df=self.eval_metrics,
348
357
  test_metrics_df=self.test_eval_metrics,
358
+ test_data=test_data,
349
359
  )
350
360
 
351
361
  def _test_evaluate_metrics(self, elapsed_time=0):
@@ -462,10 +472,12 @@ class ForecastOperatorBaseModel(ABC):
462
472
  result_df: pd.DataFrame,
463
473
  metrics_df: pd.DataFrame,
464
474
  test_metrics_df: pd.DataFrame,
475
+ test_data: pd.DataFrame,
465
476
  ):
466
477
  """Saves resulting reports to the given folder."""
467
478
 
468
479
  unique_output_dir = self.spec.output_directory.url
480
+ results = ForecastResults()
469
481
 
470
482
  if ObjectStorageDetails.is_oci_path(unique_output_dir):
471
483
  storage_options = default_signer()
@@ -491,13 +503,23 @@ class ForecastOperatorBaseModel(ABC):
491
503
  f2.write(f1.read())
492
504
 
493
505
  # forecast csv report
494
- result_df = result_df if self.target_cat_col else result_df.drop(DataColumns.Series, axis=1)
506
+ # todo: add test data into forecast.csv
507
+ # if self.spec.test_data is not None:
508
+ # test_data_dict = test_data.get_dict_by_series()
509
+ # for series_id, test_data_values in test_data_dict.items():
510
+ # result_df[DataColumns.Series] = test_data_values[]
511
+ result_df = (
512
+ result_df
513
+ if self.target_cat_col
514
+ else result_df.drop(DataColumns.Series, axis=1)
515
+ )
495
516
  write_data(
496
517
  data=result_df,
497
518
  filename=os.path.join(unique_output_dir, self.spec.forecast_filename),
498
519
  format="csv",
499
520
  storage_options=storage_options,
500
521
  )
522
+ results.set_forecast(result_df)
501
523
 
502
524
  # metrics csv report
503
525
  if self.spec.generate_metrics:
@@ -507,10 +529,11 @@ class ForecastOperatorBaseModel(ABC):
507
529
  else "Series 1"
508
530
  )
509
531
  if metrics_df is not None:
532
+ metrics_df_formatted = metrics_df.reset_index().rename(
533
+ {"index": "metrics", "Series 1": metrics_col_name}, axis=1
534
+ )
510
535
  write_data(
511
- data=metrics_df.reset_index().rename(
512
- {"index": "metrics", "Series 1": metrics_col_name}, axis=1
513
- ),
536
+ data=metrics_df_formatted,
514
537
  filename=os.path.join(
515
538
  unique_output_dir, self.spec.metrics_filename
516
539
  ),
@@ -518,6 +541,7 @@ class ForecastOperatorBaseModel(ABC):
518
541
  storage_options=storage_options,
519
542
  index=False,
520
543
  )
544
+ results.set_metrics(metrics_df_formatted)
521
545
  else:
522
546
  logger.warn(
523
547
  f"Attempted to generate the {self.spec.metrics_filename} file with the training metrics, however the training metrics could not be properly generated."
@@ -526,10 +550,11 @@ class ForecastOperatorBaseModel(ABC):
526
550
  # test_metrics csv report
527
551
  if self.spec.test_data is not None:
528
552
  if test_metrics_df is not None:
553
+ test_metrics_df_formatted = test_metrics_df.reset_index().rename(
554
+ {"index": "metrics", "Series 1": metrics_col_name}, axis=1
555
+ )
529
556
  write_data(
530
- data=test_metrics_df.reset_index().rename(
531
- {"index": "metrics", "Series 1": metrics_col_name}, axis=1
532
- ),
557
+ data=test_metrics_df_formatted,
533
558
  filename=os.path.join(
534
559
  unique_output_dir, self.spec.test_metrics_filename
535
560
  ),
@@ -537,6 +562,7 @@ class ForecastOperatorBaseModel(ABC):
537
562
  storage_options=storage_options,
538
563
  index=False,
539
564
  )
565
+ results.set_test_metrics(test_metrics_df_formatted)
540
566
  else:
541
567
  logger.warn(
542
568
  f"Attempted to generate the {self.spec.test_metrics_filename} file with the test metrics, however the test metrics could not be properly generated."
@@ -554,6 +580,7 @@ class ForecastOperatorBaseModel(ABC):
554
580
  storage_options=storage_options,
555
581
  index=True,
556
582
  )
583
+ results.set_global_explanations(self.formatted_global_explanation)
557
584
  else:
558
585
  logger.warn(
559
586
  f"Attempted to generate global explanations for the {self.spec.global_explanation_filename} file, but an issue occured in formatting the explanations."
@@ -569,6 +596,7 @@ class ForecastOperatorBaseModel(ABC):
569
596
  storage_options=storage_options,
570
597
  index=True,
571
598
  )
599
+ results.set_local_explanations(self.formatted_local_explanation)
572
600
  else:
573
601
  logger.warn(
574
602
  f"Attempted to generate local explanations for the {self.spec.local_explanation_filename} file, but an issue occured in formatting the explanations."
@@ -589,10 +617,12 @@ class ForecastOperatorBaseModel(ABC):
589
617
  index=True,
590
618
  indent=4,
591
619
  )
620
+ results.set_model_parameters(self.model_parameters)
592
621
 
593
622
  # model pickle
594
623
  if self.spec.generate_model_pickle:
595
624
  self._save_model(unique_output_dir, storage_options)
625
+ results.set_models(self.models)
596
626
 
597
627
  logger.info(
598
628
  f"The outputs have been successfully "
@@ -612,8 +642,10 @@ class ForecastOperatorBaseModel(ABC):
612
642
  index=True,
613
643
  indent=4,
614
644
  )
645
+ results.set_errors_dict(self.errors_dict)
615
646
  else:
616
647
  logger.info("All modeling completed successfully.")
648
+ return results
617
649
 
618
650
  def preprocess(self, df, series_id):
619
651
  """The method that needs to be implemented on the particular model level."""
@@ -667,7 +699,10 @@ class ForecastOperatorBaseModel(ABC):
667
699
  )
668
700
 
669
701
  def _validate_automlx_explanation_mode(self):
670
- if self.spec.model != SupportedModels.AutoMLX and self.spec.explanations_accuracy_mode == SpeedAccuracyMode.AUTOMLX:
702
+ if (
703
+ self.spec.model != SupportedModels.AutoMLX
704
+ and self.spec.explanations_accuracy_mode == SpeedAccuracyMode.AUTOMLX
705
+ ):
671
706
  raise ValueError(
672
707
  "AUTOMLX explanation accuracy mode is only supported for AutoMLX models. "
673
708
  "Please select mode other than AUTOMLX from the available explanations_accuracy_mode options"
@@ -738,14 +773,6 @@ class ForecastOperatorBaseModel(ABC):
738
773
  logger.warn(
739
774
  "No explanations generated. Ensure that additional data has been provided."
740
775
  )
741
- elif (
742
- self.spec.model == SupportedModels.AutoMLX
743
- and self.spec.explanations_accuracy_mode
744
- == SpeedAccuracyMode.AUTOMLX
745
- ):
746
- logger.warning(
747
- "Global explanations not available for AutoMLX models with inherent explainability"
748
- )
749
776
  else:
750
777
  self.global_explanation[s_id] = dict(
751
778
  zip(
@@ -794,7 +821,7 @@ class ForecastOperatorBaseModel(ABC):
794
821
  def get_explain_predict_fn(self, series_id, fcst_col_name="yhat"):
795
822
  def _custom_predict(
796
823
  data,
797
- model=self.models[series_id],
824
+ model=self.models[series_id]["model"],
798
825
  dt_column_name=self.datasets._datetime_column_name,
799
826
  ):
800
827
  """
@@ -1,8 +1,10 @@
1
1
  #!/usr/bin/env python
2
2
 
3
- # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
3
+ # Copyright (c) 2023, 2025 Oracle and/or its affiliates.
4
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
5
 
6
+ from typing import Dict, List
7
+
6
8
  import pandas as pd
7
9
 
8
10
  from ads.opctl import logger
@@ -167,7 +169,7 @@ class ForecastDatasets:
167
169
  self.historical_data.data,
168
170
  self.additional_data.data,
169
171
  ],
170
- axis=1
172
+ axis=1,
171
173
  )
172
174
 
173
175
  def get_data_by_series(self, include_horizon=True):
@@ -416,3 +418,59 @@ class ForecastOutput:
416
418
  for df in self.series_id_map.values():
417
419
  output = pd.concat([output, df])
418
420
  return output.reset_index(drop=True)
421
+
422
+
423
+ class ForecastResults:
424
+ """
425
+ Forecast Results contains all outputs from the forecast run.
426
+ This class is returned to users who use the Forecast's `operate` method.
427
+
428
+ """
429
+
430
+ def set_forecast(self, df: pd.DataFrame):
431
+ self.forecast = df
432
+
433
+ def get_forecast(self):
434
+ return getattr(self, "forecast", None)
435
+
436
+ def set_metrics(self, df: pd.DataFrame):
437
+ self.metrics = df
438
+
439
+ def get_metrics(self):
440
+ return getattr(self, "metrics", None)
441
+
442
+ def set_test_metrics(self, df: pd.DataFrame):
443
+ self.test_metrics = df
444
+
445
+ def get_test_metrics(self):
446
+ return getattr(self, "test_metrics", None)
447
+
448
+ def set_local_explanations(self, df: pd.DataFrame):
449
+ self.local_explanations = df
450
+
451
+ def get_local_explanations(self):
452
+ return getattr(self, "local_explanations", None)
453
+
454
+ def set_global_explanations(self, df: pd.DataFrame):
455
+ self.global_explanations = df
456
+
457
+ def get_global_explanations(self):
458
+ return getattr(self, "global_explanations", None)
459
+
460
+ def set_model_parameters(self, df: pd.DataFrame):
461
+ self.model_parameters = df
462
+
463
+ def get_model_parameters(self):
464
+ return getattr(self, "model_parameters", None)
465
+
466
+ def set_models(self, models: List):
467
+ self.models = models
468
+
469
+ def get_models(self):
470
+ return getattr(self, "models", None)
471
+
472
+ def set_errors_dict(self, errors_dict: Dict):
473
+ self.errors_dict = errors_dict
474
+
475
+ def get_errors_dict(self):
476
+ return getattr(self, "errors_dict", None)
@@ -172,8 +172,10 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
172
172
  ).values,
173
173
  )
174
174
 
175
- self.models[s_id] = model
176
175
  self.trainers[s_id] = model.trainer
176
+ self.models[s_id] = {}
177
+ self.models[s_id]["model"] = model
178
+ self.models[s_id]["le"] = self.le[s_id]
177
179
 
178
180
  self.model_parameters[s_id] = {
179
181
  "framework": SupportedModels.NeuralProphet,
@@ -355,7 +357,8 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
355
357
 
356
358
  sec5_text = rc.Heading("Neural Prophet Model Parameters", level=2)
357
359
  model_states = []
358
- for s_id, m in self.models.items():
360
+ for s_id, artifacts in self.models.items():
361
+ m = artifacts["model"]
359
362
  model_states.append(
360
363
  pd.Series(
361
364
  m.state_dict(),
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env python
2
2
 
3
- # Copyright (c) 2024 Oracle and/or its affiliates.
3
+ # Copyright (c) 2024, 2025 Oracle and/or its affiliates.
4
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
5
 
6
6
  import logging
@@ -43,7 +43,11 @@ def _add_unit(num, unit):
43
43
  def _fit_model(data, params, additional_regressors):
44
44
  from prophet import Prophet
45
45
 
46
+ monthly_seasonality = params.pop("monthly_seasonality", False)
46
47
  model = Prophet(**params)
48
+ if monthly_seasonality:
49
+ model.add_seasonality(name="monthly", period=30.5, fourier_order=5)
50
+ params["monthly_seasonality"] = monthly_seasonality
47
51
  for add_reg in additional_regressors:
48
52
  model.add_regressor(add_reg)
49
53
  model.fit(data)
@@ -108,7 +112,10 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
108
112
  upper_bound=self.get_horizon(forecast["yhat_upper"]).values,
109
113
  lower_bound=self.get_horizon(forecast["yhat_lower"]).values,
110
114
  )
111
- self.models[series_id] = model
115
+
116
+ self.models[series_id] = {}
117
+ self.models[series_id]["model"] = model
118
+ self.models[series_id]["le"] = self.le[series_id]
112
119
 
113
120
  params = vars(model).copy()
114
121
  for param in ["history", "history_dates", "stan_fit"]:
@@ -252,11 +259,11 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
252
259
  all_sections = []
253
260
  if len(series_ids) > 0:
254
261
  sec1 = _select_plot_list(
255
- lambda s_id: self.models[s_id].plot(
262
+ lambda s_id: self.models[s_id]["model"].plot(
256
263
  self.outputs[s_id], include_legend=True
257
264
  ),
258
265
  series_ids=series_ids,
259
- target_category_column=self.target_cat_col
266
+ target_category_column=self.target_cat_col,
260
267
  )
261
268
  section_1 = rc.Block(
262
269
  rc.Heading("Forecast Overview", level=2),
@@ -267,25 +274,25 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
267
274
  )
268
275
 
269
276
  sec2 = _select_plot_list(
270
- lambda s_id: self.models[s_id].plot_components(self.outputs[s_id]),
277
+ lambda s_id: self.models[s_id]["model"].plot_components(self.outputs[s_id]),
271
278
  series_ids=series_ids,
272
- target_category_column=self.target_cat_col
279
+ target_category_column=self.target_cat_col,
273
280
  )
274
281
  section_2 = rc.Block(
275
282
  rc.Heading("Forecast Broken Down by Trend Component", level=2), sec2
276
283
  )
277
284
 
278
285
  sec3_figs = {
279
- s_id: self.models[s_id].plot(self.outputs[s_id]) for s_id in series_ids
286
+ s_id: self.models[s_id]["model"].plot(self.outputs[s_id]) for s_id in series_ids
280
287
  }
281
288
  for s_id in series_ids:
282
289
  add_changepoints_to_plot(
283
- sec3_figs[s_id].gca(), self.models[s_id], self.outputs[s_id]
290
+ sec3_figs[s_id].gca(), self.models[s_id]["model"], self.outputs[s_id]
284
291
  )
285
292
  sec3 = _select_plot_list(
286
293
  lambda s_id: sec3_figs[s_id],
287
294
  series_ids=series_ids,
288
- target_category_column=self.target_cat_col
295
+ target_category_column=self.target_cat_col,
289
296
  )
290
297
  section_3 = rc.Block(rc.Heading("Forecast Changepoints", level=2), sec3)
291
298
 
@@ -294,12 +301,14 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
294
301
  sec5_text = rc.Heading("Prophet Model Seasonality Components", level=2)
295
302
  model_states = []
296
303
  for s_id in series_ids:
297
- m = self.models[s_id]
304
+ m = self.models[s_id]["model"]
298
305
  model_states.append(
299
306
  pd.Series(
300
307
  m.seasonalities,
301
308
  index=pd.Index(m.seasonalities.keys(), dtype="object"),
302
- name=s_id if self.target_cat_col else self.original_target_column,
309
+ name=s_id
310
+ if self.target_cat_col
311
+ else self.original_target_column,
303
312
  dtype="object",
304
313
  )
305
314
  )
@@ -330,11 +339,15 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
330
339
  self.formatted_local_explanation = aggregate_local_explanations
331
340
 
332
341
  if not self.target_cat_col:
333
- self.formatted_global_explanation = self.formatted_global_explanation.rename(
334
- {"Series 1": self.original_target_column},
335
- axis=1,
342
+ self.formatted_global_explanation = (
343
+ self.formatted_global_explanation.rename(
344
+ {"Series 1": self.original_target_column},
345
+ axis=1,
346
+ )
347
+ )
348
+ self.formatted_local_explanation.drop(
349
+ "Series", axis=1, inplace=True
336
350
  )
337
- self.formatted_local_explanation.drop("Series", axis=1, inplace=True)
338
351
 
339
352
  # Create a markdown section for the global explainability
340
353
  global_explanation_section = rc.Block(
@@ -151,34 +151,42 @@ def get_forecast(future_df, model_name, series_id, model_object, date_col, targe
151
151
  pred_obj = model_object.predict(future_regressor=future_reg)
152
152
  return pred_obj.forecast[series_id].tolist()
153
153
  elif model_name == SupportedModels.Prophet and series_id in model_object:
154
- model = model_object[series_id]
154
+ model = model_object[series_id]['model']
155
+ label_encoder = model_object[series_id]['le']
155
156
  processed = future_df.rename(columns={date_col_name: 'ds', target_column: 'y'})
156
- forecast = model.predict(processed)
157
+ encoded_df = label_encoder.transform(processed)
158
+ forecast = model.predict(encoded_df)
157
159
  return forecast['yhat'].tolist()
158
160
  elif model_name == SupportedModels.NeuralProphet and series_id in model_object:
159
- model = model_object[series_id]
161
+ model = model_object[series_id]['model']
162
+ label_encoder = model_object[series_id]['le']
160
163
  model.restore_trainer()
161
164
  accepted_regressors = list(model.config_regressors.regressors.keys())
162
165
  data = future_df.rename(columns={date_col_name: 'ds', target_column: 'y'})
163
- future = data[accepted_regressors + ["ds"]].reset_index(drop=True)
166
+ encoded_df = label_encoder.transform(data)
167
+ future = encoded_df[accepted_regressors + ["ds"]].reset_index(drop=True)
164
168
  future["y"] = None
165
169
  forecast = model.predict(future)
166
170
  return forecast['yhat1'].tolist()
167
171
  elif model_name == SupportedModels.Arima and series_id in model_object:
168
- model = model_object[series_id]
169
- future_df = future_df.set_index(date_col_name)
170
- x_pred = future_df.drop(target_cat_col, axis=1)
172
+ model = model_object[series_id]['model']
173
+ label_encoder = model_object[series_id]['le']
174
+ predict_cols = model_object[series_id]["predict_component_cols"]
175
+ encoded_df = label_encoder.transform(future_df)
176
+ x_pred = encoded_df.set_index(date_col_name)
177
+ x_pred = x_pred.drop(target_cat_col, axis=1)
171
178
  yhat, conf_int = model.predict(
172
179
  n_periods=horizon,
173
- X=x_pred,
180
+ X=x_pred[predict_cols],
174
181
  return_conf_int=True
175
182
  )
176
183
  yhat_clean = pd.DataFrame(yhat, index=yhat.index, columns=["yhat"])
177
184
  return yhat_clean['yhat'].tolist()
178
185
  elif model_name == SupportedModels.AutoMLX and series_id in model_object:
179
- # automlx model
180
- model = model_object[series_id]
181
- x_pred = future_df.drop(target_cat_col, axis=1)
186
+ model = model_object[series_id]['model']
187
+ label_encoder = model_object[series_id]['le']
188
+ encoded_df = label_encoder.transform(future_df)
189
+ x_pred = encoded_df.drop(target_cat_col, axis=1)
182
190
  x_pred = x_pred.set_index(date_col_name)
183
191
  forecast = model.forecast(
184
192
  X=x_pred,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: oracle_ads
3
- Version: 2.13.0
3
+ Version: 2.13.1rc0
4
4
  Summary: Oracle Accelerated Data Science SDK
5
5
  Keywords: Oracle Cloud Infrastructure,OCI,Machine Learning,ML,Artificial Intelligence,AI,Data Science,Cloud,Oracle
6
6
  Author: Oracle Data Science
@@ -39,7 +39,7 @@ Requires-Dist: httpx
39
39
  Requires-Dist: oracle_ads[opctl] ; extra == "anomaly"
40
40
  Requires-Dist: autots ; extra == "anomaly"
41
41
  Requires-Dist: oracledb ; extra == "anomaly"
42
- Requires-Dist: report-creator==1.0.32 ; extra == "anomaly"
42
+ Requires-Dist: report-creator==1.0.37 ; extra == "anomaly"
43
43
  Requires-Dist: rrcf==0.4.4 ; extra == "anomaly"
44
44
  Requires-Dist: scikit-learn<1.6.0 ; extra == "anomaly"
45
45
  Requires-Dist: salesforce-merlion[all]==2.0.4 ; extra == "anomaly"
@@ -78,7 +78,7 @@ Requires-Dist: sktime ; extra == "forecast"
78
78
  Requires-Dist: statsmodels ; extra == "forecast"
79
79
  Requires-Dist: plotly ; extra == "forecast"
80
80
  Requires-Dist: oracledb ; extra == "forecast"
81
- Requires-Dist: report-creator==1.0.32 ; extra == "forecast"
81
+ Requires-Dist: report-creator==1.0.37 ; extra == "forecast"
82
82
  Requires-Dist: geopandas<1.0.0 ; extra == "geo"
83
83
  Requires-Dist: fiona<=1.9.6 ; extra == "geo"
84
84
  Requires-Dist: oracle_ads[viz] ; extra == "geo"
@@ -122,11 +122,11 @@ Requires-Dist: scrubadub==2.0.1 ; extra == "pii"
122
122
  Requires-Dist: scrubadub_spacy ; extra == "pii"
123
123
  Requires-Dist: spacy-transformers==1.2.5 ; extra == "pii"
124
124
  Requires-Dist: spacy==3.6.1 ; extra == "pii"
125
- Requires-Dist: report-creator==1.0.32 ; extra == "pii"
125
+ Requires-Dist: report-creator>=1.0.32 ; extra == "pii"
126
126
  Requires-Dist: oracle_ads[opctl] ; extra == "recommender"
127
127
  Requires-Dist: scikit-surprise ; extra == "recommender"
128
128
  Requires-Dist: plotly ; extra == "recommender"
129
- Requires-Dist: report-creator==1.0.32 ; extra == "recommender"
129
+ Requires-Dist: report-creator==1.0.37 ; extra == "recommender"
130
130
  Requires-Dist: pyspark>=3.0.0 ; extra == "spark"
131
131
  Requires-Dist: oracle_ads[viz] ; extra == "tensorflow"
132
132
  Requires-Dist: tensorflow<=2.15.1 ; extra == "tensorflow"
@@ -564,9 +564,11 @@ ads/model/transformer/onnx_transformer.py,sha256=2aiG_-OaDaM0JJhM4w2vblZyI6q-1GB
564
564
  ads/mysqldb/__init__.py,sha256=yBa9sP_49XF0GDWWG-u1Q5ry-vXfmO61oUjNp7mdN74,204
565
565
  ads/mysqldb/mysql_db.py,sha256=nG9vRO_BItFB2z6hTptodMCMB-2fiuNVCrgTdVoSrjY,7828
566
566
  ads/opctl/__init__.py,sha256=uPSJLEAIl2RMMz5zWhhTYYEeWejfcMQTOK2qQFVVaPM,435
567
+ ads/opctl/anomaly_detection.py,sha256=9D2U7K_5W9O__AjKNnJoEk-cbYjp8G6w5905-i004mU,414
567
568
  ads/opctl/cli.py,sha256=5R7CLyk7UDODhvlVMj2i8i7_gFzYU2GsIs5kUbLQK68,20408
568
569
  ads/opctl/cmds.py,sha256=Z22y1yW82RY-BV1c7ooE5HtFdzILypgSOE2FKzswBUo,29135
569
570
  ads/opctl/constants.py,sha256=F-JcVhdidIwPweGmq2auHjvSfXpfkUljngIZXVb_1mQ,2183
571
+ ads/opctl/forecast.py,sha256=ZInj8FyXHkdjiEmXDc-jH1xT7JAGHEKwP0Lnq39xWGQ,418
570
572
  ads/opctl/index.yaml,sha256=cf9j3VXcNY-DvFlAZQLZj8-hA9QArWlirPtN1sRKqyM,68
571
573
  ads/opctl/schema.yaml.yml,sha256=L4eoHVFLu5tHPDOD53-dVGbscKkXG86i4IbRX-bVL2g,546
572
574
  ads/opctl/script.py,sha256=3AgTOjDnvmheu4ROrn56d38h8wZVOZwne_ix2x3U6bY,1181
@@ -702,7 +704,7 @@ ads/opctl/operator/lowcode/feature_store_marketplace/models/serializable_yaml_mo
702
704
  ads/opctl/operator/lowcode/forecast/MLoperator,sha256=xM8yBUQObjG_6Mg36f3Vv8b9N3L8_5RUZJE2riOjXuw,5981
703
705
  ads/opctl/operator/lowcode/forecast/README.md,sha256=kbCCEdo-0pwKlZp9ctnWUK6Z31n69IsnG0i26b202Zg,9768
704
706
  ads/opctl/operator/lowcode/forecast/__init__.py,sha256=sAqmLhogrLXb3xI7dPOj9HmSkpTnLh9wkzysuGd8AXk,204
705
- ads/opctl/operator/lowcode/forecast/__main__.py,sha256=2NmZ4Z-Hu9ViuH6LOQ27ciVN7uryho9Fxs3adfWQkbk,2894
707
+ ads/opctl/operator/lowcode/forecast/__main__.py,sha256=FTQhYCZEAbQO2sCOpepermKimSktFd9pERNu1rC-K3A,2926
706
708
  ads/opctl/operator/lowcode/forecast/cmd.py,sha256=uwU-QvnYwxoRFXZv7_JFkzAUnjTNoSsHEme2FF-9Rl0,1151
707
709
  ads/opctl/operator/lowcode/forecast/const.py,sha256=HJQFM35t-pG4g6z63YABx2ehuKfo9yBHklVbZrGpVzY,2615
708
710
  ads/opctl/operator/lowcode/forecast/environment.yaml,sha256=eVMf9pcjADI14_GRGdZOB_gK5_MyG_-cX037TXqzFho,330
@@ -712,18 +714,18 @@ ads/opctl/operator/lowcode/forecast/operator_config.py,sha256=fcq0WrqW4AYkcW6d_L
712
714
  ads/opctl/operator/lowcode/forecast/schema.yaml,sha256=nDrY-8Qyv-_6Olxi4CoUgyQe65h7I9CPYghtSVGIxVE,12437
713
715
  ads/opctl/operator/lowcode/forecast/utils.py,sha256=0ssrXBAEL5hjQX4avLPkSwFp3sKE8QV5M3K5InqvzYg,14137
714
716
  ads/opctl/operator/lowcode/forecast/model/__init__.py,sha256=sAqmLhogrLXb3xI7dPOj9HmSkpTnLh9wkzysuGd8AXk,204
715
- ads/opctl/operator/lowcode/forecast/model/arima.py,sha256=sWGTUxisV8ytUA-_MK54bdP2FVO_9BMD8-EsulJEYxE,11430
716
- ads/opctl/operator/lowcode/forecast/model/automlx.py,sha256=lXJoeMFHapyd5aYLi81T4WOV4ilheVz0FrNW6yE2dg4,19362
717
+ ads/opctl/operator/lowcode/forecast/model/arima.py,sha256=5e0LI-rwya1aovnOlll9ZHt2qYy8Jr--OA8w_cuCdD0,11617
718
+ ads/opctl/operator/lowcode/forecast/model/automlx.py,sha256=xQWgy2XjyXUlYzmETcomYC28xOE3xvn0q2AuGQEJHD4,19965
717
719
  ads/opctl/operator/lowcode/forecast/model/autots.py,sha256=RyLeD3dwMfrb6St-QFoH2MM8vH3inepVamRRovI-bwM,13086
718
- ads/opctl/operator/lowcode/forecast/model/base_model.py,sha256=h0PGYUKfO2CSH34EK3YtYnZHnpiRJThvIkwyIiKqxDI,33531
720
+ ads/opctl/operator/lowcode/forecast/model/base_model.py,sha256=LWU_W394J3yhjAbl98iRF9cTqWJ1Orp4l9x32KVjNQg,34510
719
721
  ads/opctl/operator/lowcode/forecast/model/factory.py,sha256=hSRPPWdpIRSMYPUFMIUuxc2TPZt-SG18MiqhtdfL3mg,3488
720
- ads/opctl/operator/lowcode/forecast/model/forecast_datasets.py,sha256=BFZL-F2pec5Gb5UTRcFlNPi3LT65z4pGzRJvhgxK0TE,16562
722
+ ads/opctl/operator/lowcode/forecast/model/forecast_datasets.py,sha256=K22itixKOcWMrMGL8CqfhnQZ18lnWUpFuuYvy7OKbcU,18120
721
723
  ads/opctl/operator/lowcode/forecast/model/ml_forecast.py,sha256=NSZ2L6gRw4S68BUF0Vyu-cUPSsq8LRxgoVajW9Ra63k,9640
722
- ads/opctl/operator/lowcode/forecast/model/neuralprophet.py,sha256=URtnP4oEMP7tGwe0WfWtfMFftAXQzN3K9RurAv_cgnY,19251
723
- ads/opctl/operator/lowcode/forecast/model/prophet.py,sha256=yiCIP0bR0jg-b2XHVJSfO7CFZ3_GXEnpLkW_MkV45Jo,14983
724
+ ads/opctl/operator/lowcode/forecast/model/neuralprophet.py,sha256=Vu3z97CYAAEaumg-4W5PdgC21izd7aFxWBdWzBJSs4U,19394
725
+ ads/opctl/operator/lowcode/forecast/model/prophet.py,sha256=slbpvD0Zz1zrRttxouL_cERjIP_SMxtnUMZ1_kVwu6g,15534
724
726
  ads/opctl/operator/lowcode/forecast/whatifserve/__init__.py,sha256=JNDDjLrNorKXMHUuXMifqXea3eheST-lnrcwCl2bWrk,242
725
727
  ads/opctl/operator/lowcode/forecast/whatifserve/deployment_manager.py,sha256=fTu5h18dyNi61wX4u0bcevBVd5QCx2avpW4g1Ry-xwM,11168
726
- ads/opctl/operator/lowcode/forecast/whatifserve/score.py,sha256=KpWx7fGFGPb5VUKIoMpEDbUs6q9j3hT-Zax7rsbfYuw,8172
728
+ ads/opctl/operator/lowcode/forecast/whatifserve/score.py,sha256=JjEDtrqUfL4x9t-vvafXMLNwY9-vgc6QPX_Ee-wmI58,8709
727
729
  ads/opctl/operator/lowcode/pii/MLoperator,sha256=GKCuiXRwfGLyBqELbtgtg-kJPtNWNVA-kSprYTqhF64,6406
728
730
  ads/opctl/operator/lowcode/pii/README.md,sha256=2P3tpKv6v__Eehj6iLfTXgyDhS4lmi1BTfEdmJhT0K4,9237
729
731
  ads/opctl/operator/lowcode/pii/__init__.py,sha256=sAqmLhogrLXb3xI7dPOj9HmSkpTnLh9wkzysuGd8AXk,204
@@ -843,8 +845,8 @@ ads/type_discovery/unknown_detector.py,sha256=yZuYQReO7PUyoWZE7onhhtYaOg6088wf1y
843
845
  ads/type_discovery/zipcode_detector.py,sha256=3AlETg_ZF4FT0u914WXvTT3F3Z6Vf51WiIt34yQMRbw,1421
844
846
  ads/vault/__init__.py,sha256=x9tMdDAOdF5iDHk9u2di_K-ze5Nq068x25EWOBoWwqY,245
845
847
  ads/vault/vault.py,sha256=hFBkpYE-Hfmzu1L0sQwUfYcGxpWmgG18JPndRl0NOXI,8624
846
- oracle_ads-2.13.0.dist-info/entry_points.txt,sha256=9VFnjpQCsMORA4rVkvN8eH6D3uHjtegb9T911t8cqV0,35
847
- oracle_ads-2.13.0.dist-info/LICENSE.txt,sha256=zoGmbfD1IdRKx834U0IzfFFFo5KoFK71TND3K9xqYqo,1845
848
- oracle_ads-2.13.0.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI,82
849
- oracle_ads-2.13.0.dist-info/METADATA,sha256=ZKRWF3LwXQmV0JIdEXnkKVz8GzFRACsNzXmSbRyJMFU,16279
850
- oracle_ads-2.13.0.dist-info/RECORD,,
848
+ oracle_ads-2.13.1rc0.dist-info/entry_points.txt,sha256=9VFnjpQCsMORA4rVkvN8eH6D3uHjtegb9T911t8cqV0,35
849
+ oracle_ads-2.13.1rc0.dist-info/LICENSE.txt,sha256=zoGmbfD1IdRKx834U0IzfFFFo5KoFK71TND3K9xqYqo,1845
850
+ oracle_ads-2.13.1rc0.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI,82
851
+ oracle_ads-2.13.1rc0.dist-info/METADATA,sha256=Bp8bLw33VEHDWbLQHmE3RF91Kig2XcIEgwVJ8D5uiY8,16282
852
+ oracle_ads-2.13.1rc0.dist-info/RECORD,,