oracle-ads 2.12.11__py3-none-any.whl → 2.13.1rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. ads/aqua/app.py +23 -10
  2. ads/aqua/common/enums.py +19 -14
  3. ads/aqua/common/errors.py +3 -4
  4. ads/aqua/common/utils.py +2 -2
  5. ads/aqua/constants.py +1 -0
  6. ads/aqua/evaluation/constants.py +7 -7
  7. ads/aqua/evaluation/errors.py +3 -4
  8. ads/aqua/extension/model_handler.py +23 -0
  9. ads/aqua/extension/models/ws_models.py +5 -6
  10. ads/aqua/finetuning/constants.py +3 -3
  11. ads/aqua/model/constants.py +7 -7
  12. ads/aqua/model/enums.py +4 -5
  13. ads/aqua/model/model.py +22 -0
  14. ads/aqua/modeldeployment/entities.py +3 -1
  15. ads/common/auth.py +33 -20
  16. ads/common/extended_enum.py +52 -44
  17. ads/llm/__init__.py +11 -8
  18. ads/llm/langchain/plugins/embeddings/__init__.py +4 -0
  19. ads/llm/langchain/plugins/embeddings/oci_data_science_model_deployment_endpoint.py +184 -0
  20. ads/model/artifact_downloader.py +3 -4
  21. ads/model/datascience_model.py +84 -64
  22. ads/model/generic_model.py +3 -3
  23. ads/model/model_metadata.py +17 -11
  24. ads/model/service/oci_datascience_model.py +12 -14
  25. ads/opctl/anomaly_detection.py +11 -0
  26. ads/opctl/backend/marketplace/helm_helper.py +13 -14
  27. ads/opctl/cli.py +4 -5
  28. ads/opctl/cmds.py +28 -32
  29. ads/opctl/config/merger.py +8 -11
  30. ads/opctl/config/resolver.py +25 -30
  31. ads/opctl/forecast.py +11 -0
  32. ads/opctl/operator/cli.py +9 -9
  33. ads/opctl/operator/common/backend_factory.py +56 -60
  34. ads/opctl/operator/common/const.py +5 -5
  35. ads/opctl/operator/lowcode/anomaly/const.py +8 -9
  36. ads/opctl/operator/lowcode/feature_store_marketplace/operator_utils.py +43 -48
  37. ads/opctl/operator/lowcode/forecast/__main__.py +5 -5
  38. ads/opctl/operator/lowcode/forecast/const.py +6 -6
  39. ads/opctl/operator/lowcode/forecast/model/arima.py +6 -3
  40. ads/opctl/operator/lowcode/forecast/model/automlx.py +53 -31
  41. ads/opctl/operator/lowcode/forecast/model/base_model.py +57 -30
  42. ads/opctl/operator/lowcode/forecast/model/forecast_datasets.py +60 -2
  43. ads/opctl/operator/lowcode/forecast/model/neuralprophet.py +5 -2
  44. ads/opctl/operator/lowcode/forecast/model/prophet.py +28 -15
  45. ads/opctl/operator/lowcode/forecast/whatifserve/score.py +19 -11
  46. ads/opctl/operator/lowcode/pii/constant.py +6 -7
  47. ads/opctl/operator/lowcode/recommender/constant.py +12 -7
  48. ads/opctl/operator/runtime/marketplace_runtime.py +4 -10
  49. ads/opctl/operator/runtime/runtime.py +4 -6
  50. ads/pipeline/ads_pipeline_run.py +13 -25
  51. ads/pipeline/visualizer/graph_renderer.py +3 -4
  52. {oracle_ads-2.12.11.dist-info → oracle_ads-2.13.1rc0.dist-info}/METADATA +6 -6
  53. {oracle_ads-2.12.11.dist-info → oracle_ads-2.13.1rc0.dist-info}/RECORD +56 -52
  54. {oracle_ads-2.12.11.dist-info → oracle_ads-2.13.1rc0.dist-info}/LICENSE.txt +0 -0
  55. {oracle_ads-2.12.11.dist-info → oracle_ads-2.13.1rc0.dist-info}/WHEEL +0 -0
  56. {oracle_ads-2.12.11.dist-info → oracle_ads-2.13.1rc0.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env python
2
2
 
3
- # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
3
+ # Copyright (c) 2023, 2025 Oracle and/or its affiliates.
4
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
5
 
6
6
  import logging
@@ -19,6 +19,7 @@ import report_creator as rc
19
19
  from ads.common.decorator.runtime_dependency import runtime_dependency
20
20
  from ads.common.object_storage_details import ObjectStorageDetails
21
21
  from ads.opctl import logger
22
+ from ads.opctl.operator.lowcode.common.const import DataColumns
22
23
  from ads.opctl.operator.lowcode.common.utils import (
23
24
  datetime_to_seconds,
24
25
  disable_print,
@@ -28,7 +29,6 @@ from ads.opctl.operator.lowcode.common.utils import (
28
29
  seconds_to_datetime,
29
30
  write_data,
30
31
  )
31
- from ads.opctl.operator.lowcode.common.const import DataColumns
32
32
  from ads.opctl.operator.lowcode.forecast.model.forecast_datasets import TestData
33
33
  from ads.opctl.operator.lowcode.forecast.utils import (
34
34
  _build_metrics_df,
@@ -49,10 +49,9 @@ from ..const import (
49
49
  SpeedAccuracyMode,
50
50
  SupportedMetrics,
51
51
  SupportedModels,
52
- BACKTEST_REPORT_NAME,
53
52
  )
54
53
  from ..operator_config import ForecastOperatorConfig, ForecastOperatorSpec
55
- from .forecast_datasets import ForecastDatasets
54
+ from .forecast_datasets import ForecastDatasets, ForecastResults
56
55
 
57
56
  logging.getLogger("report_creator").setLevel(logging.WARNING)
58
57
 
@@ -127,8 +126,9 @@ class ForecastOperatorBaseModel(ABC):
127
126
  if self.spec.generate_report or self.spec.generate_metrics:
128
127
  self.eval_metrics = self.generate_train_metrics()
129
128
  if not self.target_cat_col:
130
- self.eval_metrics.rename({"Series 1": self.original_target_column},
131
- axis=1, inplace=True)
129
+ self.eval_metrics.rename(
130
+ {"Series 1": self.original_target_column}, axis=1, inplace=True
131
+ )
132
132
 
133
133
  if self.spec.test_data:
134
134
  try:
@@ -140,8 +140,11 @@ class ForecastOperatorBaseModel(ABC):
140
140
  elapsed_time=elapsed_time,
141
141
  )
142
142
  if not self.target_cat_col:
143
- self.test_eval_metrics.rename({"Series 1": self.original_target_column},
144
- axis=1, inplace=True)
143
+ self.test_eval_metrics.rename(
144
+ {"Series 1": self.original_target_column},
145
+ axis=1,
146
+ inplace=True,
147
+ )
145
148
  except Exception:
146
149
  logger.warn("Unable to generate Test Metrics.")
147
150
  logger.debug(f"Full Traceback: {traceback.format_exc()}")
@@ -223,17 +226,23 @@ class ForecastOperatorBaseModel(ABC):
223
226
  rc.Block(
224
227
  first_10_title,
225
228
  # series_subtext,
226
- rc.Select(blocks=first_5_rows_blocks) if self.target_cat_col else first_5_rows_blocks[0],
229
+ rc.Select(blocks=first_5_rows_blocks)
230
+ if self.target_cat_col
231
+ else first_5_rows_blocks[0],
227
232
  ),
228
233
  rc.Block(
229
234
  last_10_title,
230
235
  # series_subtext,
231
- rc.Select(blocks=last_5_rows_blocks) if self.target_cat_col else last_5_rows_blocks[0],
236
+ rc.Select(blocks=last_5_rows_blocks)
237
+ if self.target_cat_col
238
+ else last_5_rows_blocks[0],
232
239
  ),
233
240
  rc.Block(
234
241
  summary_title,
235
242
  # series_subtext,
236
- rc.Select(blocks=data_summary_blocks) if self.target_cat_col else data_summary_blocks[0],
243
+ rc.Select(blocks=data_summary_blocks)
244
+ if self.target_cat_col
245
+ else data_summary_blocks[0],
237
246
  ),
238
247
  rc.Separator(),
239
248
  )
@@ -308,7 +317,7 @@ class ForecastOperatorBaseModel(ABC):
308
317
  horizon=self.spec.horizon,
309
318
  test_data=test_data,
310
319
  ci_interval_width=self.spec.confidence_interval_width,
311
- target_category_column=self.target_cat_col
320
+ target_category_column=self.target_cat_col,
312
321
  )
313
322
  if (
314
323
  series_name is not None
@@ -341,11 +350,12 @@ class ForecastOperatorBaseModel(ABC):
341
350
  )
342
351
 
343
352
  # save the report and result CSV
344
- self._save_report(
353
+ return self._save_report(
345
354
  report_sections=report_sections,
346
355
  result_df=result_df,
347
356
  metrics_df=self.eval_metrics,
348
357
  test_metrics_df=self.test_eval_metrics,
358
+ test_data=test_data,
349
359
  )
350
360
 
351
361
  def _test_evaluate_metrics(self, elapsed_time=0):
@@ -462,10 +472,12 @@ class ForecastOperatorBaseModel(ABC):
462
472
  result_df: pd.DataFrame,
463
473
  metrics_df: pd.DataFrame,
464
474
  test_metrics_df: pd.DataFrame,
475
+ test_data: pd.DataFrame,
465
476
  ):
466
477
  """Saves resulting reports to the given folder."""
467
478
 
468
479
  unique_output_dir = self.spec.output_directory.url
480
+ results = ForecastResults()
469
481
 
470
482
  if ObjectStorageDetails.is_oci_path(unique_output_dir):
471
483
  storage_options = default_signer()
@@ -491,13 +503,23 @@ class ForecastOperatorBaseModel(ABC):
491
503
  f2.write(f1.read())
492
504
 
493
505
  # forecast csv report
494
- result_df = result_df if self.target_cat_col else result_df.drop(DataColumns.Series, axis=1)
506
+ # todo: add test data into forecast.csv
507
+ # if self.spec.test_data is not None:
508
+ # test_data_dict = test_data.get_dict_by_series()
509
+ # for series_id, test_data_values in test_data_dict.items():
510
+ # result_df[DataColumns.Series] = test_data_values[]
511
+ result_df = (
512
+ result_df
513
+ if self.target_cat_col
514
+ else result_df.drop(DataColumns.Series, axis=1)
515
+ )
495
516
  write_data(
496
517
  data=result_df,
497
518
  filename=os.path.join(unique_output_dir, self.spec.forecast_filename),
498
519
  format="csv",
499
520
  storage_options=storage_options,
500
521
  )
522
+ results.set_forecast(result_df)
501
523
 
502
524
  # metrics csv report
503
525
  if self.spec.generate_metrics:
@@ -507,10 +529,11 @@ class ForecastOperatorBaseModel(ABC):
507
529
  else "Series 1"
508
530
  )
509
531
  if metrics_df is not None:
532
+ metrics_df_formatted = metrics_df.reset_index().rename(
533
+ {"index": "metrics", "Series 1": metrics_col_name}, axis=1
534
+ )
510
535
  write_data(
511
- data=metrics_df.reset_index().rename(
512
- {"index": "metrics", "Series 1": metrics_col_name}, axis=1
513
- ),
536
+ data=metrics_df_formatted,
514
537
  filename=os.path.join(
515
538
  unique_output_dir, self.spec.metrics_filename
516
539
  ),
@@ -518,6 +541,7 @@ class ForecastOperatorBaseModel(ABC):
518
541
  storage_options=storage_options,
519
542
  index=False,
520
543
  )
544
+ results.set_metrics(metrics_df_formatted)
521
545
  else:
522
546
  logger.warn(
523
547
  f"Attempted to generate the {self.spec.metrics_filename} file with the training metrics, however the training metrics could not be properly generated."
@@ -526,10 +550,11 @@ class ForecastOperatorBaseModel(ABC):
526
550
  # test_metrics csv report
527
551
  if self.spec.test_data is not None:
528
552
  if test_metrics_df is not None:
553
+ test_metrics_df_formatted = test_metrics_df.reset_index().rename(
554
+ {"index": "metrics", "Series 1": metrics_col_name}, axis=1
555
+ )
529
556
  write_data(
530
- data=test_metrics_df.reset_index().rename(
531
- {"index": "metrics", "Series 1": metrics_col_name}, axis=1
532
- ),
557
+ data=test_metrics_df_formatted,
533
558
  filename=os.path.join(
534
559
  unique_output_dir, self.spec.test_metrics_filename
535
560
  ),
@@ -537,6 +562,7 @@ class ForecastOperatorBaseModel(ABC):
537
562
  storage_options=storage_options,
538
563
  index=False,
539
564
  )
565
+ results.set_test_metrics(test_metrics_df_formatted)
540
566
  else:
541
567
  logger.warn(
542
568
  f"Attempted to generate the {self.spec.test_metrics_filename} file with the test metrics, however the test metrics could not be properly generated."
@@ -554,6 +580,7 @@ class ForecastOperatorBaseModel(ABC):
554
580
  storage_options=storage_options,
555
581
  index=True,
556
582
  )
583
+ results.set_global_explanations(self.formatted_global_explanation)
557
584
  else:
558
585
  logger.warn(
559
586
  f"Attempted to generate global explanations for the {self.spec.global_explanation_filename} file, but an issue occured in formatting the explanations."
@@ -569,6 +596,7 @@ class ForecastOperatorBaseModel(ABC):
569
596
  storage_options=storage_options,
570
597
  index=True,
571
598
  )
599
+ results.set_local_explanations(self.formatted_local_explanation)
572
600
  else:
573
601
  logger.warn(
574
602
  f"Attempted to generate local explanations for the {self.spec.local_explanation_filename} file, but an issue occured in formatting the explanations."
@@ -589,10 +617,12 @@ class ForecastOperatorBaseModel(ABC):
589
617
  index=True,
590
618
  indent=4,
591
619
  )
620
+ results.set_model_parameters(self.model_parameters)
592
621
 
593
622
  # model pickle
594
623
  if self.spec.generate_model_pickle:
595
624
  self._save_model(unique_output_dir, storage_options)
625
+ results.set_models(self.models)
596
626
 
597
627
  logger.info(
598
628
  f"The outputs have been successfully "
@@ -612,8 +642,10 @@ class ForecastOperatorBaseModel(ABC):
612
642
  index=True,
613
643
  indent=4,
614
644
  )
645
+ results.set_errors_dict(self.errors_dict)
615
646
  else:
616
647
  logger.info("All modeling completed successfully.")
648
+ return results
617
649
 
618
650
  def preprocess(self, df, series_id):
619
651
  """The method that needs to be implemented on the particular model level."""
@@ -667,7 +699,10 @@ class ForecastOperatorBaseModel(ABC):
667
699
  )
668
700
 
669
701
  def _validate_automlx_explanation_mode(self):
670
- if self.spec.model != SupportedModels.AutoMLX and self.spec.explanations_accuracy_mode == SpeedAccuracyMode.AUTOMLX:
702
+ if (
703
+ self.spec.model != SupportedModels.AutoMLX
704
+ and self.spec.explanations_accuracy_mode == SpeedAccuracyMode.AUTOMLX
705
+ ):
671
706
  raise ValueError(
672
707
  "AUTOMLX explanation accuracy mode is only supported for AutoMLX models. "
673
708
  "Please select mode other than AUTOMLX from the available explanations_accuracy_mode options"
@@ -738,14 +773,6 @@ class ForecastOperatorBaseModel(ABC):
738
773
  logger.warn(
739
774
  "No explanations generated. Ensure that additional data has been provided."
740
775
  )
741
- elif (
742
- self.spec.model == SupportedModels.AutoMLX
743
- and self.spec.explanations_accuracy_mode
744
- == SpeedAccuracyMode.AUTOMLX
745
- ):
746
- logger.warning(
747
- "Global explanations not available for AutoMLX models with inherent explainability"
748
- )
749
776
  else:
750
777
  self.global_explanation[s_id] = dict(
751
778
  zip(
@@ -794,7 +821,7 @@ class ForecastOperatorBaseModel(ABC):
794
821
  def get_explain_predict_fn(self, series_id, fcst_col_name="yhat"):
795
822
  def _custom_predict(
796
823
  data,
797
- model=self.models[series_id],
824
+ model=self.models[series_id]["model"],
798
825
  dt_column_name=self.datasets._datetime_column_name,
799
826
  ):
800
827
  """
@@ -1,8 +1,10 @@
1
1
  #!/usr/bin/env python
2
2
 
3
- # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
3
+ # Copyright (c) 2023, 2025 Oracle and/or its affiliates.
4
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
5
 
6
+ from typing import Dict, List
7
+
6
8
  import pandas as pd
7
9
 
8
10
  from ads.opctl import logger
@@ -167,7 +169,7 @@ class ForecastDatasets:
167
169
  self.historical_data.data,
168
170
  self.additional_data.data,
169
171
  ],
170
- axis=1
172
+ axis=1,
171
173
  )
172
174
 
173
175
  def get_data_by_series(self, include_horizon=True):
@@ -416,3 +418,59 @@ class ForecastOutput:
416
418
  for df in self.series_id_map.values():
417
419
  output = pd.concat([output, df])
418
420
  return output.reset_index(drop=True)
421
+
422
+
423
+ class ForecastResults:
424
+ """
425
+ Forecast Results contains all outputs from the forecast run.
426
+ This class is returned to users who use the Forecast's `operate` method.
427
+
428
+ """
429
+
430
+ def set_forecast(self, df: pd.DataFrame):
431
+ self.forecast = df
432
+
433
+ def get_forecast(self):
434
+ return getattr(self, "forecast", None)
435
+
436
+ def set_metrics(self, df: pd.DataFrame):
437
+ self.metrics = df
438
+
439
+ def get_metrics(self):
440
+ return getattr(self, "metrics", None)
441
+
442
+ def set_test_metrics(self, df: pd.DataFrame):
443
+ self.test_metrics = df
444
+
445
+ def get_test_metrics(self):
446
+ return getattr(self, "test_metrics", None)
447
+
448
+ def set_local_explanations(self, df: pd.DataFrame):
449
+ self.local_explanations = df
450
+
451
+ def get_local_explanations(self):
452
+ return getattr(self, "local_explanations", None)
453
+
454
+ def set_global_explanations(self, df: pd.DataFrame):
455
+ self.global_explanations = df
456
+
457
+ def get_global_explanations(self):
458
+ return getattr(self, "global_explanations", None)
459
+
460
+ def set_model_parameters(self, df: pd.DataFrame):
461
+ self.model_parameters = df
462
+
463
+ def get_model_parameters(self):
464
+ return getattr(self, "model_parameters", None)
465
+
466
+ def set_models(self, models: List):
467
+ self.models = models
468
+
469
+ def get_models(self):
470
+ return getattr(self, "models", None)
471
+
472
+ def set_errors_dict(self, errors_dict: Dict):
473
+ self.errors_dict = errors_dict
474
+
475
+ def get_errors_dict(self):
476
+ return getattr(self, "errors_dict", None)
@@ -172,8 +172,10 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
172
172
  ).values,
173
173
  )
174
174
 
175
- self.models[s_id] = model
176
175
  self.trainers[s_id] = model.trainer
176
+ self.models[s_id] = {}
177
+ self.models[s_id]["model"] = model
178
+ self.models[s_id]["le"] = self.le[s_id]
177
179
 
178
180
  self.model_parameters[s_id] = {
179
181
  "framework": SupportedModels.NeuralProphet,
@@ -355,7 +357,8 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
355
357
 
356
358
  sec5_text = rc.Heading("Neural Prophet Model Parameters", level=2)
357
359
  model_states = []
358
- for s_id, m in self.models.items():
360
+ for s_id, artifacts in self.models.items():
361
+ m = artifacts["model"]
359
362
  model_states.append(
360
363
  pd.Series(
361
364
  m.state_dict(),
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env python
2
2
 
3
- # Copyright (c) 2024 Oracle and/or its affiliates.
3
+ # Copyright (c) 2024, 2025 Oracle and/or its affiliates.
4
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
5
 
6
6
  import logging
@@ -43,7 +43,11 @@ def _add_unit(num, unit):
43
43
  def _fit_model(data, params, additional_regressors):
44
44
  from prophet import Prophet
45
45
 
46
+ monthly_seasonality = params.pop("monthly_seasonality", False)
46
47
  model = Prophet(**params)
48
+ if monthly_seasonality:
49
+ model.add_seasonality(name="monthly", period=30.5, fourier_order=5)
50
+ params["monthly_seasonality"] = monthly_seasonality
47
51
  for add_reg in additional_regressors:
48
52
  model.add_regressor(add_reg)
49
53
  model.fit(data)
@@ -108,7 +112,10 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
108
112
  upper_bound=self.get_horizon(forecast["yhat_upper"]).values,
109
113
  lower_bound=self.get_horizon(forecast["yhat_lower"]).values,
110
114
  )
111
- self.models[series_id] = model
115
+
116
+ self.models[series_id] = {}
117
+ self.models[series_id]["model"] = model
118
+ self.models[series_id]["le"] = self.le[series_id]
112
119
 
113
120
  params = vars(model).copy()
114
121
  for param in ["history", "history_dates", "stan_fit"]:
@@ -252,11 +259,11 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
252
259
  all_sections = []
253
260
  if len(series_ids) > 0:
254
261
  sec1 = _select_plot_list(
255
- lambda s_id: self.models[s_id].plot(
262
+ lambda s_id: self.models[s_id]["model"].plot(
256
263
  self.outputs[s_id], include_legend=True
257
264
  ),
258
265
  series_ids=series_ids,
259
- target_category_column=self.target_cat_col
266
+ target_category_column=self.target_cat_col,
260
267
  )
261
268
  section_1 = rc.Block(
262
269
  rc.Heading("Forecast Overview", level=2),
@@ -267,25 +274,25 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
267
274
  )
268
275
 
269
276
  sec2 = _select_plot_list(
270
- lambda s_id: self.models[s_id].plot_components(self.outputs[s_id]),
277
+ lambda s_id: self.models[s_id]["model"].plot_components(self.outputs[s_id]),
271
278
  series_ids=series_ids,
272
- target_category_column=self.target_cat_col
279
+ target_category_column=self.target_cat_col,
273
280
  )
274
281
  section_2 = rc.Block(
275
282
  rc.Heading("Forecast Broken Down by Trend Component", level=2), sec2
276
283
  )
277
284
 
278
285
  sec3_figs = {
279
- s_id: self.models[s_id].plot(self.outputs[s_id]) for s_id in series_ids
286
+ s_id: self.models[s_id]["model"].plot(self.outputs[s_id]) for s_id in series_ids
280
287
  }
281
288
  for s_id in series_ids:
282
289
  add_changepoints_to_plot(
283
- sec3_figs[s_id].gca(), self.models[s_id], self.outputs[s_id]
290
+ sec3_figs[s_id].gca(), self.models[s_id]["model"], self.outputs[s_id]
284
291
  )
285
292
  sec3 = _select_plot_list(
286
293
  lambda s_id: sec3_figs[s_id],
287
294
  series_ids=series_ids,
288
- target_category_column=self.target_cat_col
295
+ target_category_column=self.target_cat_col,
289
296
  )
290
297
  section_3 = rc.Block(rc.Heading("Forecast Changepoints", level=2), sec3)
291
298
 
@@ -294,12 +301,14 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
294
301
  sec5_text = rc.Heading("Prophet Model Seasonality Components", level=2)
295
302
  model_states = []
296
303
  for s_id in series_ids:
297
- m = self.models[s_id]
304
+ m = self.models[s_id]["model"]
298
305
  model_states.append(
299
306
  pd.Series(
300
307
  m.seasonalities,
301
308
  index=pd.Index(m.seasonalities.keys(), dtype="object"),
302
- name=s_id if self.target_cat_col else self.original_target_column,
309
+ name=s_id
310
+ if self.target_cat_col
311
+ else self.original_target_column,
303
312
  dtype="object",
304
313
  )
305
314
  )
@@ -330,11 +339,15 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
330
339
  self.formatted_local_explanation = aggregate_local_explanations
331
340
 
332
341
  if not self.target_cat_col:
333
- self.formatted_global_explanation = self.formatted_global_explanation.rename(
334
- {"Series 1": self.original_target_column},
335
- axis=1,
342
+ self.formatted_global_explanation = (
343
+ self.formatted_global_explanation.rename(
344
+ {"Series 1": self.original_target_column},
345
+ axis=1,
346
+ )
347
+ )
348
+ self.formatted_local_explanation.drop(
349
+ "Series", axis=1, inplace=True
336
350
  )
337
- self.formatted_local_explanation.drop("Series", axis=1, inplace=True)
338
351
 
339
352
  # Create a markdown section for the global explainability
340
353
  global_explanation_section = rc.Block(
@@ -151,34 +151,42 @@ def get_forecast(future_df, model_name, series_id, model_object, date_col, targe
151
151
  pred_obj = model_object.predict(future_regressor=future_reg)
152
152
  return pred_obj.forecast[series_id].tolist()
153
153
  elif model_name == SupportedModels.Prophet and series_id in model_object:
154
- model = model_object[series_id]
154
+ model = model_object[series_id]['model']
155
+ label_encoder = model_object[series_id]['le']
155
156
  processed = future_df.rename(columns={date_col_name: 'ds', target_column: 'y'})
156
- forecast = model.predict(processed)
157
+ encoded_df = label_encoder.transform(processed)
158
+ forecast = model.predict(encoded_df)
157
159
  return forecast['yhat'].tolist()
158
160
  elif model_name == SupportedModels.NeuralProphet and series_id in model_object:
159
- model = model_object[series_id]
161
+ model = model_object[series_id]['model']
162
+ label_encoder = model_object[series_id]['le']
160
163
  model.restore_trainer()
161
164
  accepted_regressors = list(model.config_regressors.regressors.keys())
162
165
  data = future_df.rename(columns={date_col_name: 'ds', target_column: 'y'})
163
- future = data[accepted_regressors + ["ds"]].reset_index(drop=True)
166
+ encoded_df = label_encoder.transform(data)
167
+ future = encoded_df[accepted_regressors + ["ds"]].reset_index(drop=True)
164
168
  future["y"] = None
165
169
  forecast = model.predict(future)
166
170
  return forecast['yhat1'].tolist()
167
171
  elif model_name == SupportedModels.Arima and series_id in model_object:
168
- model = model_object[series_id]
169
- future_df = future_df.set_index(date_col_name)
170
- x_pred = future_df.drop(target_cat_col, axis=1)
172
+ model = model_object[series_id]['model']
173
+ label_encoder = model_object[series_id]['le']
174
+ predict_cols = model_object[series_id]["predict_component_cols"]
175
+ encoded_df = label_encoder.transform(future_df)
176
+ x_pred = encoded_df.set_index(date_col_name)
177
+ x_pred = x_pred.drop(target_cat_col, axis=1)
171
178
  yhat, conf_int = model.predict(
172
179
  n_periods=horizon,
173
- X=x_pred,
180
+ X=x_pred[predict_cols],
174
181
  return_conf_int=True
175
182
  )
176
183
  yhat_clean = pd.DataFrame(yhat, index=yhat.index, columns=["yhat"])
177
184
  return yhat_clean['yhat'].tolist()
178
185
  elif model_name == SupportedModels.AutoMLX and series_id in model_object:
179
- # automlx model
180
- model = model_object[series_id]
181
- x_pred = future_df.drop(target_cat_col, axis=1)
186
+ model = model_object[series_id]['model']
187
+ label_encoder = model_object[series_id]['le']
188
+ encoded_df = label_encoder.transform(future_df)
189
+ x_pred = encoded_df.drop(target_cat_col, axis=1)
182
190
  x_pred = x_pred.set_index(date_col_name)
183
191
  forecast = model.forecast(
184
192
  X=x_pred,
@@ -1,9 +1,8 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
2
 
4
- # Copyright (c) 2023 Oracle and/or its affiliates.
3
+ # Copyright (c) 2023, 2025 Oracle and/or its affiliates.
5
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
- from ads.common.extended_enum import ExtendedEnumMeta
5
+ from ads.common.extended_enum import ExtendedEnum
7
6
 
8
7
  DEFAULT_SHOW_ROWS = 25
9
8
  DEFAULT_TIME_OUT = 5
@@ -12,7 +11,7 @@ DEFAULT_REPORT_FILENAME = "report.html"
12
11
  DEFAULT_TARGET_COLUMN = "target"
13
12
 
14
13
 
15
- class SupportedAction(str, metaclass=ExtendedEnumMeta):
14
+ class SupportedAction(ExtendedEnum):
16
15
  """Supported action to process detected entities."""
17
16
 
18
17
  MASK = "mask"
@@ -20,19 +19,19 @@ class SupportedAction(str, metaclass=ExtendedEnumMeta):
20
19
  ANONYMIZE = "anonymize"
21
20
 
22
21
 
23
- class SupportedDetector(str, metaclass=ExtendedEnumMeta):
22
+ class SupportedDetector(ExtendedEnum):
24
23
  """Supported pii detectors."""
25
24
 
26
25
  DEFAULT = "default"
27
26
  SPACY = "spacy"
28
27
 
29
28
 
30
- class DataFrameColumn(str, metaclass=ExtendedEnumMeta):
29
+ class DataFrameColumn(ExtendedEnum):
31
30
  REDACTED_TEXT: str = "redacted_text"
32
31
  ENTITIES: str = "entities_cols"
33
32
 
34
33
 
35
- class YamlKey(str, metaclass=ExtendedEnumMeta):
34
+ class YamlKey(ExtendedEnum):
36
35
  """Yaml key used in pii.yaml."""
37
36
 
38
37
  pass
@@ -1,25 +1,30 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
2
 
4
- # Copyright (c) 2023 Oracle and/or its affiliates.
3
+ # Copyright (c) 2023, 2025 Oracle and/or its affiliates.
5
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
5
 
7
- from ads.common.extended_enum import ExtendedEnumMeta
6
+ from ads.common.extended_enum import ExtendedEnum
8
7
 
9
8
  DEFAULT_SHOW_ROWS = 25
10
9
  DEFAULT_REPORT_FILENAME = "report.html"
11
10
 
12
- class OutputColumns(str, metaclass=ExtendedEnumMeta):
11
+
12
+ class OutputColumns(ExtendedEnum):
13
13
  """output columns for recommender operator"""
14
+
14
15
  USER_COL = "user"
15
16
  ITEM_COL = "item"
16
17
  SCORE = "score"
17
18
 
18
- class SupportedMetrics(str, metaclass=ExtendedEnumMeta):
19
+
20
+ class SupportedMetrics(ExtendedEnum):
19
21
  """Supported recommender metrics."""
22
+
20
23
  RMSE = "RMSE"
21
24
  MAE = "MAE"
22
25
 
23
- class SupportedModels(str, metaclass=ExtendedEnumMeta):
26
+
27
+ class SupportedModels(ExtendedEnum):
24
28
  """Supported recommender models."""
25
- SVD = "svd"
29
+
30
+ SVD = "svd"
@@ -1,20 +1,14 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
2
 
4
- # Copyright (c) 2024 Oracle and/or its affiliates.
3
+ # Copyright (c) 2024, 2025 Oracle and/or its affiliates.
5
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
5
 
7
6
  from dataclasses import dataclass
8
7
  from typing import ClassVar, Dict
9
8
 
10
- from ads.opctl.operator.common.utils import _load_yaml_from_uri
11
-
12
- from ads.opctl.operator.common.operator_yaml_generator import YamlGenerator
13
-
14
- from ads.common.serializer import DataClassSerializable
15
-
16
9
  from ads.common.extended_enum import ExtendedEnum
17
-
10
+ from ads.opctl.operator.common.operator_yaml_generator import YamlGenerator
11
+ from ads.opctl.operator.common.utils import _load_yaml_from_uri
18
12
  from ads.opctl.operator.runtime.runtime import Runtime
19
13
 
20
14
 
@@ -30,7 +24,7 @@ class MarketplacePythonRuntime(Runtime):
30
24
  """Represents a python operator runtime."""
31
25
 
32
26
  _schema: ClassVar[str] = "python_marketplace_runtime_schema.yaml"
33
- type: str = OPERATOR_MARKETPLACE_LOCAL_RUNTIME_TYPE.PYTHON.value
27
+ type: str = OPERATOR_MARKETPLACE_LOCAL_RUNTIME_TYPE.PYTHON
34
28
  version: str = "v1"
35
29
 
36
30
  def __init__(self, **kwargs):