oracle-ads 2.13.0__py3-none-any.whl → 2.13.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ads/aqua/__init__.py +7 -1
- ads/aqua/app.py +24 -23
- ads/aqua/client/client.py +48 -11
- ads/aqua/common/entities.py +28 -1
- ads/aqua/common/enums.py +13 -7
- ads/aqua/common/utils.py +8 -13
- ads/aqua/config/container_config.py +203 -0
- ads/aqua/config/evaluation/evaluation_service_config.py +5 -181
- ads/aqua/constants.py +0 -1
- ads/aqua/evaluation/evaluation.py +4 -4
- ads/aqua/extension/base_handler.py +4 -0
- ads/aqua/extension/model_handler.py +19 -28
- ads/aqua/finetuning/finetuning.py +2 -3
- ads/aqua/model/entities.py +2 -3
- ads/aqua/model/model.py +25 -30
- ads/aqua/modeldeployment/deployment.py +6 -14
- ads/aqua/modeldeployment/entities.py +2 -2
- ads/aqua/server/__init__.py +4 -0
- ads/aqua/server/__main__.py +24 -0
- ads/aqua/server/app.py +47 -0
- ads/aqua/server/aqua_spec.yml +1291 -0
- ads/aqua/ui.py +5 -199
- ads/common/auth.py +20 -11
- ads/common/utils.py +91 -11
- ads/config.py +3 -0
- ads/llm/__init__.py +1 -0
- ads/llm/langchain/plugins/llms/oci_data_science_model_deployment_endpoint.py +32 -23
- ads/model/artifact_downloader.py +4 -1
- ads/model/common/utils.py +15 -3
- ads/model/datascience_model.py +339 -8
- ads/model/model_metadata.py +54 -14
- ads/model/model_version_set.py +5 -3
- ads/model/service/oci_datascience_model.py +477 -5
- ads/opctl/anomaly_detection.py +11 -0
- ads/opctl/forecast.py +11 -0
- ads/opctl/operator/common/utils.py +16 -0
- ads/opctl/operator/lowcode/common/data.py +5 -2
- ads/opctl/operator/lowcode/common/transformations.py +2 -12
- ads/opctl/operator/lowcode/forecast/__main__.py +5 -5
- ads/opctl/operator/lowcode/forecast/model/arima.py +6 -3
- ads/opctl/operator/lowcode/forecast/model/automlx.py +61 -31
- ads/opctl/operator/lowcode/forecast/model/base_model.py +66 -40
- ads/opctl/operator/lowcode/forecast/model/forecast_datasets.py +79 -13
- ads/opctl/operator/lowcode/forecast/model/neuralprophet.py +5 -2
- ads/opctl/operator/lowcode/forecast/model/prophet.py +28 -15
- ads/opctl/operator/lowcode/forecast/model_evaluator.py +13 -15
- ads/opctl/operator/lowcode/forecast/schema.yaml +1 -1
- ads/opctl/operator/lowcode/forecast/whatifserve/deployment_manager.py +7 -0
- ads/opctl/operator/lowcode/forecast/whatifserve/score.py +19 -11
- {oracle_ads-2.13.0.dist-info → oracle_ads-2.13.1.dist-info}/METADATA +18 -15
- {oracle_ads-2.13.0.dist-info → oracle_ads-2.13.1.dist-info}/RECORD +54 -48
- {oracle_ads-2.13.0.dist-info → oracle_ads-2.13.1.dist-info}/WHEEL +1 -1
- ads/aqua/config/evaluation/evaluation_service_model_config.py +0 -8
- {oracle_ads-2.13.0.dist-info → oracle_ads-2.13.1.dist-info}/entry_points.txt +0 -0
- {oracle_ads-2.13.0.dist-info → oracle_ads-2.13.1.dist-info/licenses}/LICENSE.txt +0 -0
@@ -1,8 +1,10 @@
|
|
1
1
|
#!/usr/bin/env python
|
2
2
|
|
3
|
-
# Copyright (c) 2023,
|
3
|
+
# Copyright (c) 2023, 2025 Oracle and/or its affiliates.
|
4
4
|
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
|
5
5
|
|
6
|
+
from typing import Dict, List
|
7
|
+
|
6
8
|
import pandas as pd
|
7
9
|
|
8
10
|
from ads.opctl import logger
|
@@ -21,8 +23,8 @@ from ..operator_config import ForecastOperatorConfig
|
|
21
23
|
|
22
24
|
|
23
25
|
class HistoricalData(AbstractData):
|
24
|
-
def __init__(self, spec
|
25
|
-
super().__init__(spec=spec, name="historical_data")
|
26
|
+
def __init__(self, spec, historical_data = None):
|
27
|
+
super().__init__(spec=spec, name="historical_data", data=historical_data)
|
26
28
|
|
27
29
|
def _ingest_data(self, spec):
|
28
30
|
try:
|
@@ -50,8 +52,11 @@ class HistoricalData(AbstractData):
|
|
50
52
|
|
51
53
|
|
52
54
|
class AdditionalData(AbstractData):
|
53
|
-
def __init__(self, spec, historical_data):
|
54
|
-
if
|
55
|
+
def __init__(self, spec, historical_data, additional_data=None):
|
56
|
+
if additional_data is not None:
|
57
|
+
super().__init__(spec=spec, name="additional_data", data=additional_data)
|
58
|
+
self.additional_regressors = list(self.data.columns)
|
59
|
+
elif spec.additional_data is not None:
|
55
60
|
super().__init__(spec=spec, name="additional_data")
|
56
61
|
add_dates = self.data.index.get_level_values(0).unique().tolist()
|
57
62
|
add_dates.sort()
|
@@ -108,14 +113,15 @@ class AdditionalData(AbstractData):
|
|
108
113
|
|
109
114
|
|
110
115
|
class TestData(AbstractData):
|
111
|
-
def __init__(self, spec):
|
112
|
-
|
116
|
+
def __init__(self, spec, test_data):
|
117
|
+
if test_data is not None or spec.test_data is not None:
|
118
|
+
super().__init__(spec=spec, name="test_data", data=test_data)
|
113
119
|
self.dt_column_name = spec.datetime_column.name
|
114
120
|
self.target_name = spec.target_column
|
115
121
|
|
116
122
|
|
117
123
|
class ForecastDatasets:
|
118
|
-
def __init__(self, config: ForecastOperatorConfig):
|
124
|
+
def __init__(self, config: ForecastOperatorConfig, historical_data=None, additional_data=None, test_data=None):
|
119
125
|
"""Instantiates the DataIO instance.
|
120
126
|
|
121
127
|
Properties
|
@@ -125,11 +131,15 @@ class ForecastDatasets:
|
|
125
131
|
"""
|
126
132
|
self.historical_data: HistoricalData = None
|
127
133
|
self.additional_data: AdditionalData = None
|
128
|
-
|
129
134
|
self._horizon = config.spec.horizon
|
130
135
|
self._datetime_column_name = config.spec.datetime_column.name
|
131
136
|
self._target_col = config.spec.target_column
|
132
|
-
|
137
|
+
if historical_data is not None:
|
138
|
+
self.historical_data = HistoricalData(config.spec, historical_data)
|
139
|
+
self.additional_data = AdditionalData(config.spec, self.historical_data, additional_data)
|
140
|
+
else:
|
141
|
+
self._load_data(config.spec)
|
142
|
+
self.test_data = TestData(config.spec, test_data)
|
133
143
|
|
134
144
|
def _load_data(self, spec):
|
135
145
|
"""Loads forecasting input data."""
|
@@ -167,7 +177,7 @@ class ForecastDatasets:
|
|
167
177
|
self.historical_data.data,
|
168
178
|
self.additional_data.data,
|
169
179
|
],
|
170
|
-
axis=1
|
180
|
+
axis=1,
|
171
181
|
)
|
172
182
|
|
173
183
|
def get_data_by_series(self, include_horizon=True):
|
@@ -198,7 +208,7 @@ class ForecastDatasets:
|
|
198
208
|
return self.get_data_at_series(s_id)[-self._horizon :]
|
199
209
|
|
200
210
|
def has_artificial_series(self):
|
201
|
-
return self.historical_data.
|
211
|
+
return bool(self.historical_data.spec.target_category_columns)
|
202
212
|
|
203
213
|
def get_earliest_timestamp(self):
|
204
214
|
return self.historical_data.get_min_time()
|
@@ -249,7 +259,7 @@ class ForecastOutput:
|
|
249
259
|
target_column: str,
|
250
260
|
dt_column: str,
|
251
261
|
):
|
252
|
-
"""Forecast Output contains all
|
262
|
+
"""Forecast Output contains all the details required to generate the forecast.csv output file.
|
253
263
|
|
254
264
|
init
|
255
265
|
-------
|
@@ -416,3 +426,59 @@ class ForecastOutput:
|
|
416
426
|
for df in self.series_id_map.values():
|
417
427
|
output = pd.concat([output, df])
|
418
428
|
return output.reset_index(drop=True)
|
429
|
+
|
430
|
+
|
431
|
+
class ForecastResults:
|
432
|
+
"""
|
433
|
+
Forecast Results contains all outputs from the forecast run.
|
434
|
+
This class is returned to users who use the Forecast's `operate` method.
|
435
|
+
|
436
|
+
"""
|
437
|
+
|
438
|
+
def set_forecast(self, df: pd.DataFrame):
|
439
|
+
self.forecast = df
|
440
|
+
|
441
|
+
def get_forecast(self):
|
442
|
+
return getattr(self, "forecast", None)
|
443
|
+
|
444
|
+
def set_metrics(self, df: pd.DataFrame):
|
445
|
+
self.metrics = df
|
446
|
+
|
447
|
+
def get_metrics(self):
|
448
|
+
return getattr(self, "metrics", None)
|
449
|
+
|
450
|
+
def set_test_metrics(self, df: pd.DataFrame):
|
451
|
+
self.test_metrics = df
|
452
|
+
|
453
|
+
def get_test_metrics(self):
|
454
|
+
return getattr(self, "test_metrics", None)
|
455
|
+
|
456
|
+
def set_local_explanations(self, df: pd.DataFrame):
|
457
|
+
self.local_explanations = df
|
458
|
+
|
459
|
+
def get_local_explanations(self):
|
460
|
+
return getattr(self, "local_explanations", None)
|
461
|
+
|
462
|
+
def set_global_explanations(self, df: pd.DataFrame):
|
463
|
+
self.global_explanations = df
|
464
|
+
|
465
|
+
def get_global_explanations(self):
|
466
|
+
return getattr(self, "global_explanations", None)
|
467
|
+
|
468
|
+
def set_model_parameters(self, df: pd.DataFrame):
|
469
|
+
self.model_parameters = df
|
470
|
+
|
471
|
+
def get_model_parameters(self):
|
472
|
+
return getattr(self, "model_parameters", None)
|
473
|
+
|
474
|
+
def set_models(self, models: List):
|
475
|
+
self.models = models
|
476
|
+
|
477
|
+
def get_models(self):
|
478
|
+
return getattr(self, "models", None)
|
479
|
+
|
480
|
+
def set_errors_dict(self, errors_dict: Dict):
|
481
|
+
self.errors_dict = errors_dict
|
482
|
+
|
483
|
+
def get_errors_dict(self):
|
484
|
+
return getattr(self, "errors_dict", None)
|
@@ -172,8 +172,10 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
|
|
172
172
|
).values,
|
173
173
|
)
|
174
174
|
|
175
|
-
self.models[s_id] = model
|
176
175
|
self.trainers[s_id] = model.trainer
|
176
|
+
self.models[s_id] = {}
|
177
|
+
self.models[s_id]["model"] = model
|
178
|
+
self.models[s_id]["le"] = self.le[s_id]
|
177
179
|
|
178
180
|
self.model_parameters[s_id] = {
|
179
181
|
"framework": SupportedModels.NeuralProphet,
|
@@ -355,7 +357,8 @@ class NeuralProphetOperatorModel(ForecastOperatorBaseModel):
|
|
355
357
|
|
356
358
|
sec5_text = rc.Heading("Neural Prophet Model Parameters", level=2)
|
357
359
|
model_states = []
|
358
|
-
for s_id,
|
360
|
+
for s_id, artifacts in self.models.items():
|
361
|
+
m = artifacts["model"]
|
359
362
|
model_states.append(
|
360
363
|
pd.Series(
|
361
364
|
m.state_dict(),
|
@@ -1,6 +1,6 @@
|
|
1
1
|
#!/usr/bin/env python
|
2
2
|
|
3
|
-
# Copyright (c) 2024 Oracle and/or its affiliates.
|
3
|
+
# Copyright (c) 2024, 2025 Oracle and/or its affiliates.
|
4
4
|
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
|
5
5
|
|
6
6
|
import logging
|
@@ -43,7 +43,11 @@ def _add_unit(num, unit):
|
|
43
43
|
def _fit_model(data, params, additional_regressors):
|
44
44
|
from prophet import Prophet
|
45
45
|
|
46
|
+
monthly_seasonality = params.pop("monthly_seasonality", False)
|
46
47
|
model = Prophet(**params)
|
48
|
+
if monthly_seasonality:
|
49
|
+
model.add_seasonality(name="monthly", period=30.5, fourier_order=5)
|
50
|
+
params["monthly_seasonality"] = monthly_seasonality
|
47
51
|
for add_reg in additional_regressors:
|
48
52
|
model.add_regressor(add_reg)
|
49
53
|
model.fit(data)
|
@@ -108,7 +112,10 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
|
|
108
112
|
upper_bound=self.get_horizon(forecast["yhat_upper"]).values,
|
109
113
|
lower_bound=self.get_horizon(forecast["yhat_lower"]).values,
|
110
114
|
)
|
111
|
-
|
115
|
+
|
116
|
+
self.models[series_id] = {}
|
117
|
+
self.models[series_id]["model"] = model
|
118
|
+
self.models[series_id]["le"] = self.le[series_id]
|
112
119
|
|
113
120
|
params = vars(model).copy()
|
114
121
|
for param in ["history", "history_dates", "stan_fit"]:
|
@@ -252,11 +259,11 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
|
|
252
259
|
all_sections = []
|
253
260
|
if len(series_ids) > 0:
|
254
261
|
sec1 = _select_plot_list(
|
255
|
-
lambda s_id: self.models[s_id].plot(
|
262
|
+
lambda s_id: self.models[s_id]["model"].plot(
|
256
263
|
self.outputs[s_id], include_legend=True
|
257
264
|
),
|
258
265
|
series_ids=series_ids,
|
259
|
-
target_category_column=self.target_cat_col
|
266
|
+
target_category_column=self.target_cat_col,
|
260
267
|
)
|
261
268
|
section_1 = rc.Block(
|
262
269
|
rc.Heading("Forecast Overview", level=2),
|
@@ -267,25 +274,25 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
|
|
267
274
|
)
|
268
275
|
|
269
276
|
sec2 = _select_plot_list(
|
270
|
-
lambda s_id: self.models[s_id].plot_components(self.outputs[s_id]),
|
277
|
+
lambda s_id: self.models[s_id]["model"].plot_components(self.outputs[s_id]),
|
271
278
|
series_ids=series_ids,
|
272
|
-
target_category_column=self.target_cat_col
|
279
|
+
target_category_column=self.target_cat_col,
|
273
280
|
)
|
274
281
|
section_2 = rc.Block(
|
275
282
|
rc.Heading("Forecast Broken Down by Trend Component", level=2), sec2
|
276
283
|
)
|
277
284
|
|
278
285
|
sec3_figs = {
|
279
|
-
s_id: self.models[s_id].plot(self.outputs[s_id]) for s_id in series_ids
|
286
|
+
s_id: self.models[s_id]["model"].plot(self.outputs[s_id]) for s_id in series_ids
|
280
287
|
}
|
281
288
|
for s_id in series_ids:
|
282
289
|
add_changepoints_to_plot(
|
283
|
-
sec3_figs[s_id].gca(), self.models[s_id], self.outputs[s_id]
|
290
|
+
sec3_figs[s_id].gca(), self.models[s_id]["model"], self.outputs[s_id]
|
284
291
|
)
|
285
292
|
sec3 = _select_plot_list(
|
286
293
|
lambda s_id: sec3_figs[s_id],
|
287
294
|
series_ids=series_ids,
|
288
|
-
target_category_column=self.target_cat_col
|
295
|
+
target_category_column=self.target_cat_col,
|
289
296
|
)
|
290
297
|
section_3 = rc.Block(rc.Heading("Forecast Changepoints", level=2), sec3)
|
291
298
|
|
@@ -294,12 +301,14 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
|
|
294
301
|
sec5_text = rc.Heading("Prophet Model Seasonality Components", level=2)
|
295
302
|
model_states = []
|
296
303
|
for s_id in series_ids:
|
297
|
-
m = self.models[s_id]
|
304
|
+
m = self.models[s_id]["model"]
|
298
305
|
model_states.append(
|
299
306
|
pd.Series(
|
300
307
|
m.seasonalities,
|
301
308
|
index=pd.Index(m.seasonalities.keys(), dtype="object"),
|
302
|
-
name=s_id
|
309
|
+
name=s_id
|
310
|
+
if self.target_cat_col
|
311
|
+
else self.original_target_column,
|
303
312
|
dtype="object",
|
304
313
|
)
|
305
314
|
)
|
@@ -330,11 +339,15 @@ class ProphetOperatorModel(ForecastOperatorBaseModel):
|
|
330
339
|
self.formatted_local_explanation = aggregate_local_explanations
|
331
340
|
|
332
341
|
if not self.target_cat_col:
|
333
|
-
self.formatted_global_explanation =
|
334
|
-
|
335
|
-
|
342
|
+
self.formatted_global_explanation = (
|
343
|
+
self.formatted_global_explanation.rename(
|
344
|
+
{"Series 1": self.original_target_column},
|
345
|
+
axis=1,
|
346
|
+
)
|
347
|
+
)
|
348
|
+
self.formatted_local_explanation.drop(
|
349
|
+
"Series", axis=1, inplace=True
|
336
350
|
)
|
337
|
-
self.formatted_local_explanation.drop("Series", axis=1, inplace=True)
|
338
351
|
|
339
352
|
# Create a markdown section for the global explainability
|
340
353
|
global_explanation_section = rc.Block(
|
@@ -91,19 +91,13 @@ class ModelEvaluator:
|
|
91
91
|
output_dir = operator_config.spec.output_directory.url
|
92
92
|
output_file_path = f'{output_dir}/back_testing/{model}/{backtest}'
|
93
93
|
Path(output_file_path).mkdir(parents=True, exist_ok=True)
|
94
|
-
historical_data_url = f'{output_file_path}/historical.csv'
|
95
|
-
additional_data_url = f'{output_file_path}/additional.csv'
|
96
|
-
test_data_url = f'{output_file_path}/test.csv'
|
97
|
-
historical_data.to_csv(historical_data_url, index=False)
|
98
|
-
additional_data.to_csv(additional_data_url, index=False)
|
99
|
-
test_data.to_csv(test_data_url, index=False)
|
100
94
|
backtest_op_config_draft = operator_config.to_dict()
|
101
95
|
backtest_spec = backtest_op_config_draft["spec"]
|
102
|
-
backtest_spec["
|
103
|
-
|
104
|
-
|
105
|
-
backtest_spec
|
106
|
-
backtest_spec["
|
96
|
+
backtest_spec["datetime_column"]["format"] = None
|
97
|
+
backtest_spec.pop("test_data")
|
98
|
+
backtest_spec.pop("additional_data")
|
99
|
+
backtest_spec.pop("historical_data")
|
100
|
+
backtest_spec["generate_report"] = False
|
107
101
|
backtest_spec["model"] = model
|
108
102
|
backtest_spec['model_kwargs'] = None
|
109
103
|
backtest_spec["output_directory"] = {"url": output_file_path}
|
@@ -118,19 +112,23 @@ class ModelEvaluator:
|
|
118
112
|
def run_all_models(self, datasets: ForecastDatasets, operator_config: ForecastOperatorConfig):
|
119
113
|
cut_offs, train_sets, additional_data, test_sets = self.generate_k_fold_data(datasets, operator_config)
|
120
114
|
metrics = {}
|
115
|
+
date_col = operator_config.spec.datetime_column.name
|
121
116
|
for model in self.models:
|
122
117
|
from .model.factory import ForecastOperatorModelFactory
|
123
118
|
metrics[model] = {}
|
124
119
|
for i in range(len(cut_offs)):
|
125
120
|
try:
|
126
|
-
backtest_historical_data = train_sets[i]
|
127
|
-
backtest_additional_data = additional_data[i]
|
128
|
-
backtest_test_data = test_sets[i]
|
121
|
+
backtest_historical_data = train_sets[i].set_index([date_col, DataColumns.Series])
|
122
|
+
backtest_additional_data = additional_data[i].set_index([date_col, DataColumns.Series])
|
123
|
+
backtest_test_data = test_sets[i].set_index([date_col, DataColumns.Series])
|
129
124
|
backtest_operator_config = self.create_operator_config(operator_config, i, model,
|
130
125
|
backtest_historical_data,
|
131
126
|
backtest_additional_data,
|
132
127
|
backtest_test_data)
|
133
|
-
datasets = ForecastDatasets(backtest_operator_config
|
128
|
+
datasets = ForecastDatasets(backtest_operator_config,
|
129
|
+
backtest_historical_data,
|
130
|
+
backtest_additional_data,
|
131
|
+
backtest_test_data)
|
134
132
|
ForecastOperatorModelFactory.get_model(
|
135
133
|
backtest_operator_config, datasets
|
136
134
|
).generate_report()
|
@@ -15,6 +15,7 @@ import cloudpickle
|
|
15
15
|
|
16
16
|
from ads.opctl import logger
|
17
17
|
from ads.common.model_export_util import prepare_generic_model
|
18
|
+
from ads.opctl.operator.common.utils import create_log_in_log_group
|
18
19
|
from ads.opctl.operator.lowcode.common.utils import write_data, write_simple_json
|
19
20
|
from ads.opctl.operator.lowcode.common.utils import default_signer
|
20
21
|
from ..model.forecast_datasets import AdditionalData
|
@@ -184,6 +185,10 @@ class ModelDeploymentManager:
|
|
184
185
|
|
185
186
|
log_group = self.spec.what_if_analysis.model_deployment.log_group
|
186
187
|
log_id = self.spec.what_if_analysis.model_deployment.log_id
|
188
|
+
if not log_id and not self.test_mode:
|
189
|
+
signer = oci.auth.signers.get_resource_principals_signer()
|
190
|
+
auth = {"signer": signer, "config": {}}
|
191
|
+
log_id = create_log_in_log_group(self.compartment_id, log_group, auth)
|
187
192
|
|
188
193
|
logs_configuration_details_object = CategoryLogDetails(
|
189
194
|
access=LogDetails(log_group_id=log_group,
|
@@ -211,8 +216,10 @@ class ModelDeploymentManager:
|
|
211
216
|
logger.info(f"deployment metadata :{model_deployment.data}")
|
212
217
|
md = data_science.get_model_deployment(model_deployment_id=model_deployment.data.resources[0].identifier)
|
213
218
|
self.deployment_info['model_deployment_ocid'] = md.data.id
|
219
|
+
self.deployment_info['status'] = md.data.lifecycle_state
|
214
220
|
endpoint_url = md.data.model_deployment_url
|
215
221
|
self.deployment_info['model_deployment_endpoint'] = f"{endpoint_url}/predict"
|
222
|
+
self.deployment_info['log_id'] = log_id
|
216
223
|
|
217
224
|
def save_deployment_info(self):
|
218
225
|
output_dir = self.spec.output_directory.url
|
@@ -151,34 +151,42 @@ def get_forecast(future_df, model_name, series_id, model_object, date_col, targe
|
|
151
151
|
pred_obj = model_object.predict(future_regressor=future_reg)
|
152
152
|
return pred_obj.forecast[series_id].tolist()
|
153
153
|
elif model_name == SupportedModels.Prophet and series_id in model_object:
|
154
|
-
model = model_object[series_id]
|
154
|
+
model = model_object[series_id]['model']
|
155
|
+
label_encoder = model_object[series_id]['le']
|
155
156
|
processed = future_df.rename(columns={date_col_name: 'ds', target_column: 'y'})
|
156
|
-
|
157
|
+
encoded_df = label_encoder.transform(processed)
|
158
|
+
forecast = model.predict(encoded_df)
|
157
159
|
return forecast['yhat'].tolist()
|
158
160
|
elif model_name == SupportedModels.NeuralProphet and series_id in model_object:
|
159
|
-
model = model_object[series_id]
|
161
|
+
model = model_object[series_id]['model']
|
162
|
+
label_encoder = model_object[series_id]['le']
|
160
163
|
model.restore_trainer()
|
161
164
|
accepted_regressors = list(model.config_regressors.regressors.keys())
|
162
165
|
data = future_df.rename(columns={date_col_name: 'ds', target_column: 'y'})
|
163
|
-
|
166
|
+
encoded_df = label_encoder.transform(data)
|
167
|
+
future = encoded_df[accepted_regressors + ["ds"]].reset_index(drop=True)
|
164
168
|
future["y"] = None
|
165
169
|
forecast = model.predict(future)
|
166
170
|
return forecast['yhat1'].tolist()
|
167
171
|
elif model_name == SupportedModels.Arima and series_id in model_object:
|
168
|
-
model = model_object[series_id]
|
169
|
-
|
170
|
-
|
172
|
+
model = model_object[series_id]['model']
|
173
|
+
label_encoder = model_object[series_id]['le']
|
174
|
+
predict_cols = model_object[series_id]["predict_component_cols"]
|
175
|
+
encoded_df = label_encoder.transform(future_df)
|
176
|
+
x_pred = encoded_df.set_index(date_col_name)
|
177
|
+
x_pred = x_pred.drop(target_cat_col, axis=1)
|
171
178
|
yhat, conf_int = model.predict(
|
172
179
|
n_periods=horizon,
|
173
|
-
X=x_pred,
|
180
|
+
X=x_pred[predict_cols],
|
174
181
|
return_conf_int=True
|
175
182
|
)
|
176
183
|
yhat_clean = pd.DataFrame(yhat, index=yhat.index, columns=["yhat"])
|
177
184
|
return yhat_clean['yhat'].tolist()
|
178
185
|
elif model_name == SupportedModels.AutoMLX and series_id in model_object:
|
179
|
-
|
180
|
-
|
181
|
-
|
186
|
+
model = model_object[series_id]['model']
|
187
|
+
label_encoder = model_object[series_id]['le']
|
188
|
+
encoded_df = label_encoder.transform(future_df)
|
189
|
+
x_pred = encoded_df.drop(target_cat_col, axis=1)
|
182
190
|
x_pred = x_pred.set_index(date_col_name)
|
183
191
|
forecast = model.forecast(
|
184
192
|
X=x_pred,
|
@@ -1,6 +1,6 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.4
|
2
2
|
Name: oracle_ads
|
3
|
-
Version: 2.13.
|
3
|
+
Version: 2.13.1
|
4
4
|
Summary: Oracle Accelerated Data Science SDK
|
5
5
|
Keywords: Oracle Cloud Infrastructure,OCI,Machine Learning,ML,Artificial Intelligence,AI,Data Science,Cloud,Oracle
|
6
6
|
Author: Oracle Data Science
|
@@ -10,10 +10,10 @@ Classifier: Development Status :: 5 - Production/Stable
|
|
10
10
|
Classifier: Intended Audience :: Developers
|
11
11
|
Classifier: License :: OSI Approved :: Universal Permissive License (UPL)
|
12
12
|
Classifier: Operating System :: OS Independent
|
13
|
-
Classifier: Programming Language :: Python :: 3.8
|
14
13
|
Classifier: Programming Language :: Python :: 3.9
|
15
14
|
Classifier: Programming Language :: Python :: 3.10
|
16
15
|
Classifier: Programming Language :: Python :: 3.11
|
16
|
+
License-File: LICENSE.txt
|
17
17
|
Requires-Dist: PyYAML>=6
|
18
18
|
Requires-Dist: asteval>=0.9.25
|
19
19
|
Requires-Dist: cerberus>=1.3.4
|
@@ -23,10 +23,9 @@ Requires-Dist: gitpython>=3.1.2
|
|
23
23
|
Requires-Dist: jinja2>=2.11.2
|
24
24
|
Requires-Dist: matplotlib>=3.1.3,<=3.8.4
|
25
25
|
Requires-Dist: numpy>=1.19.2,<2.0.0
|
26
|
-
Requires-Dist: oci>=2.
|
26
|
+
Requires-Dist: oci>=2.148.0
|
27
27
|
Requires-Dist: ocifs>=1.1.3
|
28
|
-
Requires-Dist: pandas
|
29
|
-
Requires-Dist: pandas>=2.2.0; python_version>='3.9'
|
28
|
+
Requires-Dist: pandas>=2.2.0
|
30
29
|
Requires-Dist: psutil>=5.7.2
|
31
30
|
Requires-Dist: python_jsonschema_objects>=0.3.13
|
32
31
|
Requires-Dist: requests
|
@@ -39,11 +38,17 @@ Requires-Dist: httpx
|
|
39
38
|
Requires-Dist: oracle_ads[opctl] ; extra == "anomaly"
|
40
39
|
Requires-Dist: autots ; extra == "anomaly"
|
41
40
|
Requires-Dist: oracledb ; extra == "anomaly"
|
42
|
-
Requires-Dist: report-creator==1.0.
|
41
|
+
Requires-Dist: report-creator==1.0.37 ; extra == "anomaly"
|
43
42
|
Requires-Dist: rrcf==0.4.4 ; extra == "anomaly"
|
44
43
|
Requires-Dist: scikit-learn<1.6.0 ; extra == "anomaly"
|
45
44
|
Requires-Dist: salesforce-merlion[all]==2.0.4 ; extra == "anomaly"
|
46
45
|
Requires-Dist: jupyter_server ; extra == "aqua"
|
46
|
+
Requires-Dist: tornado ; extra == "aqua"
|
47
|
+
Requires-Dist: notebook>=6.4,<=6.6 ; extra == "aqua"
|
48
|
+
Requires-Dist: fire ; extra == "aqua"
|
49
|
+
Requires-Dist: cachetools ; extra == "aqua"
|
50
|
+
Requires-Dist: huggingface_hub ; extra == "aqua"
|
51
|
+
Requires-Dist: python-dotenv ; extra == "aqua"
|
47
52
|
Requires-Dist: hdfs[kerberos] ; extra == "bds"
|
48
53
|
Requires-Dist: ibis-framework[impala] ; extra == "bds"
|
49
54
|
Requires-Dist: sqlalchemy ; extra == "bds"
|
@@ -78,7 +83,7 @@ Requires-Dist: sktime ; extra == "forecast"
|
|
78
83
|
Requires-Dist: statsmodels ; extra == "forecast"
|
79
84
|
Requires-Dist: plotly ; extra == "forecast"
|
80
85
|
Requires-Dist: oracledb ; extra == "forecast"
|
81
|
-
Requires-Dist: report-creator==1.0.
|
86
|
+
Requires-Dist: report-creator==1.0.37 ; extra == "forecast"
|
82
87
|
Requires-Dist: geopandas<1.0.0 ; extra == "geo"
|
83
88
|
Requires-Dist: fiona<=1.9.6 ; extra == "geo"
|
84
89
|
Requires-Dist: oracle_ads[viz] ; extra == "geo"
|
@@ -122,11 +127,11 @@ Requires-Dist: scrubadub==2.0.1 ; extra == "pii"
|
|
122
127
|
Requires-Dist: scrubadub_spacy ; extra == "pii"
|
123
128
|
Requires-Dist: spacy-transformers==1.2.5 ; extra == "pii"
|
124
129
|
Requires-Dist: spacy==3.6.1 ; extra == "pii"
|
125
|
-
Requires-Dist: report-creator
|
130
|
+
Requires-Dist: report-creator>=1.0.37 ; extra == "pii"
|
126
131
|
Requires-Dist: oracle_ads[opctl] ; extra == "recommender"
|
127
132
|
Requires-Dist: scikit-surprise ; extra == "recommender"
|
128
133
|
Requires-Dist: plotly ; extra == "recommender"
|
129
|
-
Requires-Dist: report-creator==1.0.
|
134
|
+
Requires-Dist: report-creator==1.0.37 ; extra == "recommender"
|
130
135
|
Requires-Dist: pyspark>=3.0.0 ; extra == "spark"
|
131
136
|
Requires-Dist: oracle_ads[viz] ; extra == "tensorflow"
|
132
137
|
Requires-Dist: tensorflow<=2.15.1 ; extra == "tensorflow"
|
@@ -145,17 +150,15 @@ Requires-Dist: opensearch-py ; extra == "testsuite"
|
|
145
150
|
Requires-Dist: pdfplumber ; extra == "testsuite"
|
146
151
|
Requires-Dist: py4j ; extra == "testsuite"
|
147
152
|
Requires-Dist: pyarrow>=15.0.0 ; extra == "testsuite"
|
148
|
-
Requires-Dist: statsmodels ; extra == "testsuite"
|
149
|
-
Requires-Dist:
|
150
|
-
Requires-Dist: tables ; extra == "testsuite"
|
151
|
-
Requires-Dist: tables>3.9.0 ; extra == "testsuite" and ( python_version>='3.9')
|
153
|
+
Requires-Dist: statsmodels>=0.14.1 ; extra == "testsuite"
|
154
|
+
Requires-Dist: tables>3.9.0 ; extra == "testsuite"
|
152
155
|
Requires-Dist: xlrd>=1.2.0 ; extra == "testsuite"
|
153
156
|
Requires-Dist: spacy>=3.4.2,<3.8 ; extra == "text"
|
154
157
|
Requires-Dist: wordcloud>=1.8.1 ; extra == "text"
|
155
158
|
Requires-Dist: oracle_ads[viz] ; extra == "torch"
|
156
159
|
Requires-Dist: torch ; extra == "torch"
|
157
160
|
Requires-Dist: torchvision ; extra == "torch"
|
158
|
-
Requires-Dist: bokeh
|
161
|
+
Requires-Dist: bokeh ; extra == "viz"
|
159
162
|
Requires-Dist: folium>=0.12.1 ; extra == "viz"
|
160
163
|
Requires-Dist: graphviz<0.17 ; extra == "viz"
|
161
164
|
Requires-Dist: scipy>=1.5.4 ; extra == "viz"
|