oracle-ads 2.12.7__py3-none-any.whl → 2.12.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. ads/aqua/app.py +12 -2
  2. ads/aqua/evaluation/entities.py +6 -0
  3. ads/aqua/evaluation/evaluation.py +25 -3
  4. ads/aqua/extension/deployment_handler.py +8 -4
  5. ads/aqua/extension/model_handler.py +9 -7
  6. ads/aqua/extension/ui_handler.py +13 -1
  7. ads/aqua/finetuning/entities.py +6 -0
  8. ads/aqua/finetuning/finetuning.py +47 -34
  9. ads/aqua/model/entities.py +2 -0
  10. ads/aqua/model/model.py +34 -6
  11. ads/aqua/modeldeployment/deployment.py +28 -10
  12. ads/aqua/modeldeployment/entities.py +7 -4
  13. ads/aqua/ui.py +24 -2
  14. ads/llm/guardrails/base.py +6 -5
  15. ads/llm/langchain/plugins/chat_models/oci_data_science.py +34 -9
  16. ads/llm/langchain/plugins/llms/oci_data_science_model_deployment_endpoint.py +38 -11
  17. ads/opctl/operator/common/utils.py +6 -4
  18. ads/opctl/operator/lowcode/anomaly/model/base_model.py +2 -3
  19. ads/opctl/operator/lowcode/anomaly/model/factory.py +2 -2
  20. ads/opctl/operator/lowcode/common/transformations.py +14 -10
  21. ads/opctl/operator/lowcode/common/utils.py +37 -37
  22. ads/opctl/operator/lowcode/forecast/const.py +1 -0
  23. ads/opctl/operator/lowcode/forecast/model/automlx.py +10 -2
  24. ads/opctl/operator/lowcode/forecast/model/base_model.py +10 -15
  25. ads/opctl/operator/lowcode/forecast/model/factory.py +3 -2
  26. ads/opctl/operator/lowcode/forecast/model/prophet.py +4 -1
  27. ads/opctl/operator/lowcode/forecast/model_evaluator.py +3 -2
  28. ads/opctl/operator/lowcode/forecast/schema.yaml +1 -1
  29. ads/opctl/operator/lowcode/forecast/utils.py +4 -3
  30. ads/opctl/operator/lowcode/pii/model/factory.py +7 -5
  31. ads/opctl/operator/lowcode/recommender/model/base_model.py +2 -1
  32. ads/opctl/operator/lowcode/recommender/model/factory.py +4 -6
  33. ads/opctl/operator/lowcode/recommender/model/svd.py +5 -5
  34. {oracle_ads-2.12.7.dist-info → oracle_ads-2.12.9.dist-info}/METADATA +3 -3
  35. {oracle_ads-2.12.7.dist-info → oracle_ads-2.12.9.dist-info}/RECORD +38 -38
  36. {oracle_ads-2.12.7.dist-info → oracle_ads-2.12.9.dist-info}/LICENSE.txt +0 -0
  37. {oracle_ads-2.12.7.dist-info → oracle_ads-2.12.9.dist-info}/WHEEL +0 -0
  38. {oracle_ads-2.12.7.dist-info → oracle_ads-2.12.9.dist-info}/entry_points.txt +0 -0
@@ -1,42 +1,32 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
2
 
4
3
  # Copyright (c) 2024 Oracle and/or its affiliates.
5
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
5
 
7
- import argparse
8
6
  import logging
9
7
  import os
10
8
  import shutil
11
9
  import sys
12
10
  import tempfile
13
- import time
14
- from string import Template
15
- from typing import Any, Dict, List, Tuple
16
- import pandas as pd
17
- from ads.opctl import logger
18
- import oracledb
11
+ from typing import List, Union
19
12
 
20
13
  import fsspec
21
- import yaml
22
- from typing import Union
14
+ import oracledb
15
+ import pandas as pd
23
16
 
17
+ from ads.common.object_storage_details import ObjectStorageDetails
24
18
  from ads.opctl import logger
19
+ from ads.opctl.operator.common.operator_config import OutputDirectory
25
20
  from ads.opctl.operator.lowcode.common.errors import (
26
- InputDataError,
27
21
  InvalidParameterError,
28
- PermissionsError,
29
- DataMismatchError,
30
22
  )
31
- from ads.opctl.operator.common.operator_config import OutputDirectory
32
- from ads.common.object_storage_details import ObjectStorageDetails
33
23
  from ads.secrets import ADBSecretKeeper
34
24
 
35
25
 
36
26
  def call_pandas_fsspec(pd_fn, filename, storage_options, **kwargs):
37
- if fsspec.utils.get_protocol(filename) == "file":
38
- return pd_fn(filename, **kwargs)
39
- elif fsspec.utils.get_protocol(filename) in ["http", "https"]:
27
+ if fsspec.utils.get_protocol(filename) == "file" or fsspec.utils.get_protocol(
28
+ filename
29
+ ) in ["http", "https"]:
40
30
  return pd_fn(filename, **kwargs)
41
31
 
42
32
  storage_options = storage_options or (
@@ -48,7 +38,7 @@ def call_pandas_fsspec(pd_fn, filename, storage_options, **kwargs):
48
38
 
49
39
  def load_data(data_spec, storage_options=None, **kwargs):
50
40
  if data_spec is None:
51
- raise InvalidParameterError(f"No details provided for this data source.")
41
+ raise InvalidParameterError("No details provided for this data source.")
52
42
  filename = data_spec.url
53
43
  format = data_spec.format
54
44
  columns = data_spec.columns
@@ -67,7 +57,7 @@ def load_data(data_spec, storage_options=None, **kwargs):
67
57
  if not format:
68
58
  _, format = os.path.splitext(filename)
69
59
  format = format[1:]
70
- if format in ["json", "clipboard", "excel", "csv", "feather", "hdf"]:
60
+ if format in ["json", "clipboard", "excel", "csv", "feather", "hdf", "parquet"]:
71
61
  read_fn = getattr(pd, f"read_{format}")
72
62
  data = call_pandas_fsspec(
73
63
  read_fn, filename, storage_options=storage_options
@@ -84,19 +74,31 @@ def load_data(data_spec, storage_options=None, **kwargs):
84
74
  with tempfile.TemporaryDirectory() as temp_dir:
85
75
  if vault_secret_id is not None:
86
76
  try:
87
- with ADBSecretKeeper.load_secret(vault_secret_id, wallet_dir=temp_dir) as adwsecret:
88
- if 'wallet_location' in adwsecret and 'wallet_location' not in connect_args:
89
- shutil.unpack_archive(adwsecret["wallet_location"], temp_dir)
90
- connect_args['wallet_location'] = temp_dir
91
- if 'user_name' in adwsecret and 'user' not in connect_args:
92
- connect_args['user'] = adwsecret['user_name']
93
- if 'password' in adwsecret and 'password' not in connect_args:
94
- connect_args['password'] = adwsecret['password']
95
- if 'service_name' in adwsecret and 'service_name' not in connect_args:
96
- connect_args['service_name'] = adwsecret['service_name']
77
+ with ADBSecretKeeper.load_secret(
78
+ vault_secret_id, wallet_dir=temp_dir
79
+ ) as adwsecret:
80
+ if (
81
+ "wallet_location" in adwsecret
82
+ and "wallet_location" not in connect_args
83
+ ):
84
+ shutil.unpack_archive(
85
+ adwsecret["wallet_location"], temp_dir
86
+ )
87
+ connect_args["wallet_location"] = temp_dir
88
+ if "user_name" in adwsecret and "user" not in connect_args:
89
+ connect_args["user"] = adwsecret["user_name"]
90
+ if "password" in adwsecret and "password" not in connect_args:
91
+ connect_args["password"] = adwsecret["password"]
92
+ if (
93
+ "service_name" in adwsecret
94
+ and "service_name" not in connect_args
95
+ ):
96
+ connect_args["service_name"] = adwsecret["service_name"]
97
97
 
98
98
  except Exception as e:
99
- raise Exception(f"Could not retrieve database credentials from vault {vault_secret_id}: {e}")
99
+ raise Exception(
100
+ f"Could not retrieve database credentials from vault {vault_secret_id}: {e}"
101
+ )
100
102
 
101
103
  con = oracledb.connect(**connect_args)
102
104
  if table_name is not None:
@@ -105,11 +107,11 @@ def load_data(data_spec, storage_options=None, **kwargs):
105
107
  data = pd.read_sql(sql, con)
106
108
  else:
107
109
  raise InvalidParameterError(
108
- f"Database `connect_args` provided without sql query or table name. Please specify either `sql` or `table_name`."
110
+ "Database `connect_args` provided without sql query or table name. Please specify either `sql` or `table_name`."
109
111
  )
110
112
  else:
111
113
  raise InvalidParameterError(
112
- f"No filename/url provided, and no connect_args provided. Please specify one of these if you want to read data from a file or a database respectively."
114
+ "No filename/url provided, and no connect_args provided. Please specify one of these if you want to read data from a file or a database respectively."
113
115
  )
114
116
  if columns:
115
117
  # keep only these columns, done after load because only CSV supports stream filtering
@@ -232,7 +234,7 @@ def human_time_friendly(seconds):
232
234
  accumulator.append(
233
235
  "{} {}{}".format(int(amount), unit, "" if amount == 1 else "s")
234
236
  )
235
- accumulator.append("{} secs".format(round(seconds, 2)))
237
+ accumulator.append(f"{round(seconds, 2)} secs")
236
238
  return ", ".join(accumulator)
237
239
 
238
240
 
@@ -248,9 +250,7 @@ def find_output_dirname(output_dir: OutputDirectory):
248
250
  unique_output_dir = f"{output_dir}_{counter}"
249
251
  counter += 1
250
252
  logger.warn(
251
- "Since the output directory was not specified, the output will be saved to {} directory.".format(
252
- unique_output_dir
253
- )
253
+ f"Since the output directory was not specified, the output will be saved to {unique_output_dir} directory."
254
254
  )
255
255
  return unique_output_dir
256
256
 
@@ -87,3 +87,4 @@ SUMMARY_METRICS_HORIZON_LIMIT = 10
87
87
  PROPHET_INTERNAL_DATE_COL = "ds"
88
88
  RENDER_LIMIT = 5000
89
89
  AUTO_SELECT = "auto-select"
90
+ BACKTEST_REPORT_NAME = "back_test.csv"
@@ -2,6 +2,7 @@
2
2
  # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
3
3
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
4
4
  import logging
5
+ import os
5
6
  import traceback
6
7
 
7
8
  import numpy as np
@@ -80,10 +81,17 @@ class AutoMLXOperatorModel(ForecastOperatorBaseModel):
80
81
 
81
82
  from automlx import Pipeline, init
82
83
 
84
+ cpu_count = os.cpu_count()
83
85
  try:
86
+ if cpu_count < 4:
87
+ engine = "local"
88
+ engine_opts = None
89
+ else:
90
+ engine = "ray"
91
+ engine_opts = ({"ray_setup": {"_temp_dir": "/tmp/ray-temp"}},)
84
92
  init(
85
- engine="ray",
86
- engine_opts={"ray_setup": {"_temp_dir": "/tmp/ray-temp"}},
93
+ engine=engine,
94
+ engine_opts=engine_opts,
87
95
  loglevel=logging.CRITICAL,
88
96
  )
89
97
  except Exception as e:
@@ -47,6 +47,7 @@ from ..const import (
47
47
  SpeedAccuracyMode,
48
48
  SupportedMetrics,
49
49
  SupportedModels,
50
+ BACKTEST_REPORT_NAME
50
51
  )
51
52
  from ..operator_config import ForecastOperatorConfig, ForecastOperatorSpec
52
53
  from .forecast_datasets import ForecastDatasets
@@ -148,8 +149,9 @@ class ForecastOperatorBaseModel(ABC):
148
149
  header_section = rc.Block(
149
150
  rc.Heading("Forecast Report", level=1),
150
151
  rc.Text(
151
- f"You selected the {self.spec.model} model.\n{model_description}\nBased on your dataset, you could have also selected any of the models: {SupportedModels.keys()}."
152
+ f"You selected the {self.spec.model} model.\nBased on your dataset, you could have also selected any of the models: {SupportedModels.keys()}."
152
153
  ),
154
+ model_description,
153
155
  rc.Group(
154
156
  rc.Metric(
155
157
  heading="Analysis was completed in ",
@@ -255,12 +257,9 @@ class ForecastOperatorBaseModel(ABC):
255
257
 
256
258
  backtest_sections = []
257
259
  output_dir = self.spec.output_directory.url
258
- backtest_report_name = "backtest_stats.csv"
259
- file_path = f"{output_dir}/{backtest_report_name}"
260
+ file_path = f"{output_dir}/{BACKTEST_REPORT_NAME}"
260
261
  if self.spec.model == AUTO_SELECT:
261
- backtest_sections.append(
262
- rc.Heading("Auto-select statistics", level=2)
263
- )
262
+ backtest_sections.append(rc.Heading("Auto-Select Backtesting and Performance Metrics", level=2))
264
263
  if not os.path.exists(file_path):
265
264
  failure_msg = rc.Text(
266
265
  "auto-select could not be executed. Please check the "
@@ -269,19 +268,15 @@ class ForecastOperatorBaseModel(ABC):
269
268
  backtest_sections.append(failure_msg)
270
269
  else:
271
270
  backtest_stats = pd.read_csv(file_path)
272
- average_dict = backtest_stats.mean().to_dict()
273
- del average_dict["backtest"]
271
+ model_metric_map = backtest_stats.drop(columns=['metric', 'backtest'])
272
+ average_dict = {k: round(v, 4) for k, v in model_metric_map.mean().to_dict().items()}
274
273
  best_model = min(average_dict, key=average_dict.get)
275
- backtest_text = rc.Heading("Back Testing Metrics", level=3)
276
274
  summary_text = rc.Text(
277
- f"Overall, the average scores for the models are {average_dict}, with {best_model}"
278
- f" being identified as the top-performing model during backtesting."
279
- )
275
+ f"Overall, the average {self.spec.metric} scores for the models are {average_dict}, with"
276
+ f" {best_model} being identified as the top-performing model during backtesting.")
280
277
  backtest_table = rc.DataTable(backtest_stats, index=True)
281
278
  liner_plot = get_auto_select_plot(backtest_stats)
282
- backtest_sections.extend(
283
- [backtest_text, backtest_table, summary_text, liner_plot]
284
- )
279
+ backtest_sections.extend([backtest_table, summary_text, liner_plot])
285
280
 
286
281
  forecast_plots = []
287
282
  if len(self.forecast_output.list_series_ids()) > 0:
@@ -11,6 +11,7 @@ from .automlx import AutoMLXOperatorModel
11
11
  from .autots import AutoTSOperatorModel
12
12
  from .base_model import ForecastOperatorBaseModel
13
13
  from .forecast_datasets import ForecastDatasets
14
+ from .ml_forecast import MLForecastOperatorModel
14
15
  from .neuralprophet import NeuralProphetOperatorModel
15
16
  from .prophet import ProphetOperatorModel
16
17
 
@@ -19,7 +20,7 @@ class UnSupportedModelError(Exception):
19
20
  def __init__(self, model_type: str):
20
21
  super().__init__(
21
22
  f"Model: `{model_type}` "
22
- f"is not supported. Supported models: {SupportedModels.values}"
23
+ f"is not supported. Supported models: {SupportedModels.values()}"
23
24
  )
24
25
 
25
26
 
@@ -32,7 +33,7 @@ class ForecastOperatorModelFactory:
32
33
  SupportedModels.Prophet: ProphetOperatorModel,
33
34
  SupportedModels.Arima: ArimaOperatorModel,
34
35
  SupportedModels.NeuralProphet: NeuralProphetOperatorModel,
35
- # SupportedModels.LGBForecast: MLForecastOperatorModel,
36
+ SupportedModels.LGBForecast: MLForecastOperatorModel,
36
37
  SupportedModels.AutoMLX: AutoMLXOperatorModel,
37
38
  SupportedModels.AutoTS: AutoTSOperatorModel,
38
39
  }
@@ -142,6 +142,9 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
142
142
  dt_column=self.spec.datetime_column.name,
143
143
  )
144
144
 
145
+ # if os.environ["OCI__IS_SPARK"]:
146
+ # pass
147
+ # else:
145
148
  Parallel(n_jobs=-1, require="sharedmem")(
146
149
  delayed(ProphetOperatorModel._train_model)(
147
150
  self, i, series_id, df, model_kwargs.copy()
@@ -354,7 +357,7 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
354
357
  logger.warn(f"Failed to generate Explanations with error: {e}.")
355
358
  logger.debug(f"Full Traceback: {traceback.format_exc()}")
356
359
 
357
- model_description = (
360
+ model_description = rc.Text(
358
361
  "Prophet is a procedure for forecasting time series data based on an additive "
359
362
  "model where non-linear trends are fit with yearly, weekly, and daily seasonality, "
360
363
  "plus holiday effects. It works best with time series that have strong seasonal "
@@ -10,6 +10,7 @@ from pathlib import Path
10
10
 
11
11
  from ads.opctl import logger
12
12
  from ads.opctl.operator.lowcode.common.const import DataColumns
13
+ from ads.opctl.operator.lowcode.forecast.const import BACKTEST_REPORT_NAME
13
14
  from .model.forecast_datasets import ForecastDatasets
14
15
  from .operator_config import ForecastOperatorConfig
15
16
  from ads.opctl.operator.lowcode.forecast.model.factory import SupportedModels
@@ -156,8 +157,8 @@ class ModelEvaluator:
156
157
  best_model = min(avg_backtests_metric, key=avg_backtests_metric.get)
157
158
  logger.info(f"Among models {self.models}, {best_model} model shows better performance during backtesting.")
158
159
  backtest_stats = pd.DataFrame(nonempty_metrics).rename_axis('backtest')
160
+ backtest_stats["metric"] = operator_config.spec.metric
159
161
  backtest_stats.reset_index(inplace=True)
160
162
  output_dir = operator_config.spec.output_directory.url
161
- backtest_report_name = "backtest_stats.csv"
162
- backtest_stats.to_csv(f"{output_dir}/{backtest_report_name}", index=False)
163
+ backtest_stats.to_csv(f"{output_dir}/{BACKTEST_REPORT_NAME}", index=False)
163
164
  return best_model
@@ -311,7 +311,7 @@ spec:
311
311
  missing_value_imputation:
312
312
  type: boolean
313
313
  required: false
314
- default: false
314
+ default: true
315
315
  outlier_treatment:
316
316
  type: boolean
317
317
  required: false
@@ -261,10 +261,11 @@ def _add_unit(num, unit):
261
261
 
262
262
  def get_auto_select_plot(backtest_results):
263
263
  fig = go.Figure()
264
- columns = backtest_results.columns.tolist()
264
+ back_test_csv_columns = backtest_results.columns.tolist()
265
265
  back_test_column = "backtest"
266
- columns.remove(back_test_column)
267
- for column in columns:
266
+ metric_column = "metric"
267
+ models = [x for x in back_test_csv_columns if x not in [back_test_column, metric_column]]
268
+ for i, column in enumerate(models):
268
269
  fig.add_trace(
269
270
  go.Scatter(
270
271
  x=backtest_results[back_test_column],
@@ -1,7 +1,6 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
2
 
4
- # Copyright (c) 2023 Oracle and/or its affiliates.
3
+ # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
5
 
7
6
  import uuid
@@ -18,7 +17,7 @@ class UnSupportedDetectorError(Exception):
18
17
  def __init__(self, dtype: str):
19
18
  super().__init__(
20
19
  f"Detector: `{dtype}` "
21
- f"is not supported. Supported models: {SupportedDetector.values}"
20
+ f"is not supported. Supported models: {SupportedDetector.values()}"
22
21
  )
23
22
 
24
23
 
@@ -42,7 +41,10 @@ class SpacyDetector(PiiBaseDetector):
42
41
  @runtime_dependency(module="scrubadub", install_from=OptionalDependency.PII)
43
42
  @runtime_dependency(module="scrubadub_spacy", install_from=OptionalDependency.PII)
44
43
  def construct(cls, entity, model, **kwargs):
45
- spacy_entity_detector = scrubadub_spacy.detectors.spacy.SpacyEntityDetector(
44
+ from scrubadub.filth import Filth
45
+ from scrubadub_spacy.detectors.spacy import SpacyEntityDetector
46
+
47
+ spacy_entity_detector = SpacyEntityDetector(
46
48
  named_entities=[entity],
47
49
  name=f"spacy_{uuid.uuid4()}",
48
50
  model=model,
@@ -50,7 +52,7 @@ class SpacyDetector(PiiBaseDetector):
50
52
  if entity.upper() not in cls.DEFAULT_SPACY_NAMED_ENTITIES:
51
53
  filth_cls = type(
52
54
  construct_filth_cls_name(entity),
53
- (scrubadub.filth.Filth,),
55
+ (Filth,),
54
56
  {"type": entity.upper()},
55
57
  )
56
58
  spacy_entity_detector.filth_cls_map[entity.upper()] = filth_cls
@@ -61,8 +61,9 @@ class RecommenderOperatorBaseModel(ABC):
61
61
  header_section = rc.Block(
62
62
  rc.Heading("Recommender Report", level=1),
63
63
  rc.Text(
64
- f"The recommendations was generated using {SupportedModels.SVD.upper()}. {model_description}"
64
+ f"The recommendations was generated using {SupportedModels.SVD.upper()}."
65
65
  ),
66
+ model_description,
66
67
  rc.Group(
67
68
  rc.Metric(
68
69
  heading="Recommendations was generated in ",
@@ -1,7 +1,6 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
2
 
4
- # Copyright (c) 2023 Oracle and/or its affiliates.
3
+ # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
5
 
7
6
  from ..constant import SupportedModels
@@ -10,11 +9,12 @@ from .base_model import RecommenderOperatorBaseModel
10
9
  from .recommender_dataset import RecommenderDatasets
11
10
  from .svd import SVDOperatorModel
12
11
 
12
+
13
13
  class UnSupportedModelError(Exception):
14
14
  def __init__(self, model_type: str):
15
15
  super().__init__(
16
16
  f"Model: `{model_type}` "
17
- f"is not supported. Supported models: {SupportedModels.values}"
17
+ f"is not supported. Supported models: {SupportedModels.values()}"
18
18
  )
19
19
 
20
20
 
@@ -23,9 +23,7 @@ class RecommenderOperatorModelFactory:
23
23
  The factory class helps to instantiate proper model operator based on the model type.
24
24
  """
25
25
 
26
- _MAP = {
27
- SupportedModels.SVD: SVDOperatorModel
28
- }
26
+ _MAP = {SupportedModels.SVD: SVDOperatorModel}
29
27
 
30
28
  @classmethod
31
29
  def get_model(
@@ -78,11 +78,11 @@ class SVDOperatorModel(RecommenderOperatorBaseModel):
78
78
  return recommendations_df, metric
79
79
 
80
80
  def _generate_report(self):
81
- model_description = """
82
- Singular Value Decomposition (SVD) is a matrix factorization technique used in recommendation systems to
83
- decompose a user-item interaction matrix into three constituent matrices. These matrices capture the
84
- latent factors that explain the observed interactions.
85
- """
81
+ model_description = rc.Text(
82
+ "Singular Value Decomposition (SVD) is a matrix factorization technique used in recommendation systems to \
83
+ decompose a user-item interaction matrix into three constituent matrices. These matrices capture the \
84
+ latent factors that explain the observed interactions."
85
+ )
86
86
  new_user_recommendations = self._get_recommendations(
87
87
  "__new_user__", self.spec.top_k
88
88
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: oracle_ads
3
- Version: 2.12.7
3
+ Version: 2.12.9
4
4
  Summary: Oracle Accelerated Data Science SDK
5
5
  Keywords: Oracle Cloud Infrastructure,OCI,Machine Learning,ML,Artificial Intelligence,AI,Data Science,Cloud,Oracle
6
6
  Author: Oracle Data Science
@@ -30,7 +30,7 @@ Requires-Dist: pandas>=2.2.0; python_version>='3.9'
30
30
  Requires-Dist: psutil>=5.7.2
31
31
  Requires-Dist: python_jsonschema_objects>=0.3.13
32
32
  Requires-Dist: requests
33
- Requires-Dist: scikit-learn>=1.0
33
+ Requires-Dist: scikit-learn>=1.0,<1.6.0
34
34
  Requires-Dist: tabulate>=0.8.9
35
35
  Requires-Dist: tqdm>=4.59.0
36
36
  Requires-Dist: pydantic>=2.6.3
@@ -39,7 +39,7 @@ Requires-Dist: autots ; extra == "anomaly"
39
39
  Requires-Dist: oracledb ; extra == "anomaly"
40
40
  Requires-Dist: report-creator==1.0.28 ; extra == "anomaly"
41
41
  Requires-Dist: rrcf==0.4.4 ; extra == "anomaly"
42
- Requires-Dist: scikit-learn ; extra == "anomaly"
42
+ Requires-Dist: scikit-learn<1.6.0 ; extra == "anomaly"
43
43
  Requires-Dist: salesforce-merlion[all]==2.0.4 ; extra == "anomaly"
44
44
  Requires-Dist: jupyter_server ; extra == "aqua"
45
45
  Requires-Dist: hdfs[kerberos] ; extra == "bds"