oracle-ads 2.11.7__py3-none-any.whl → 2.11.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ads/aqua/__init__.py +24 -14
- ads/aqua/base.py +0 -2
- ads/aqua/cli.py +50 -2
- ads/aqua/decorator.py +8 -0
- ads/aqua/deployment.py +37 -34
- ads/aqua/evaluation.py +106 -49
- ads/aqua/extension/base_handler.py +18 -10
- ads/aqua/extension/common_handler.py +21 -2
- ads/aqua/extension/deployment_handler.py +1 -4
- ads/aqua/extension/evaluation_handler.py +1 -2
- ads/aqua/extension/finetune_handler.py +0 -1
- ads/aqua/extension/ui_handler.py +1 -12
- ads/aqua/extension/utils.py +4 -4
- ads/aqua/finetune.py +24 -11
- ads/aqua/model.py +2 -4
- ads/aqua/utils.py +40 -23
- ads/cli.py +19 -1
- ads/common/serializer.py +5 -4
- ads/common/utils.py +6 -2
- ads/config.py +1 -0
- ads/llm/serializers/runnable_parallel.py +7 -1
- ads/model/datascience_model.py +20 -0
- ads/opctl/operator/lowcode/anomaly/README.md +1 -1
- ads/opctl/operator/lowcode/anomaly/environment.yaml +1 -1
- ads/opctl/operator/lowcode/anomaly/model/automlx.py +15 -10
- ads/opctl/operator/lowcode/anomaly/model/autots.py +9 -10
- ads/opctl/operator/lowcode/anomaly/model/base_model.py +34 -37
- ads/opctl/operator/lowcode/anomaly/model/tods.py +4 -4
- ads/opctl/operator/lowcode/anomaly/schema.yaml +1 -1
- ads/opctl/operator/lowcode/forecast/README.md +1 -1
- ads/opctl/operator/lowcode/forecast/environment.yaml +4 -4
- ads/opctl/operator/lowcode/forecast/model/arima.py +19 -21
- ads/opctl/operator/lowcode/forecast/model/automlx.py +36 -42
- ads/opctl/operator/lowcode/forecast/model/autots.py +41 -25
- ads/opctl/operator/lowcode/forecast/model/base_model.py +93 -107
- ads/opctl/operator/lowcode/forecast/model/neuralprophet.py +51 -45
- ads/opctl/operator/lowcode/forecast/model/prophet.py +32 -27
- ads/opctl/operator/lowcode/forecast/schema.yaml +2 -2
- ads/opctl/operator/lowcode/forecast/utils.py +4 -4
- ads/opctl/operator/lowcode/pii/README.md +1 -1
- ads/opctl/operator/lowcode/pii/environment.yaml +1 -1
- ads/opctl/operator/lowcode/pii/model/report.py +71 -70
- {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.9.dist-info}/METADATA +5 -5
- {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.9.dist-info}/RECORD +47 -47
- {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.9.dist-info}/LICENSE.txt +0 -0
- {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.9.dist-info}/WHEEL +0 -0
- {oracle_ads-2.11.7.dist-info → oracle_ads-2.11.9.dist-info}/entry_points.txt +0 -0
@@ -1,3 +1,9 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*--
|
3
|
+
|
4
|
+
# Copyright (c) 2024 Oracle and/or its affiliates.
|
5
|
+
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
|
6
|
+
|
1
7
|
from langchain.schema.runnable import RunnableParallel
|
2
8
|
from langchain.load.dump import dumpd
|
3
9
|
from langchain.load.load import load
|
@@ -10,7 +16,7 @@ class RunnableParallelSerializer:
|
|
10
16
|
|
11
17
|
@staticmethod
|
12
18
|
def load(config: dict, **kwargs):
|
13
|
-
steps = config
|
19
|
+
steps = config.get("kwargs", dict()).get("steps", dict())
|
14
20
|
steps = {k: load(v, **kwargs) for k, v in steps.items()}
|
15
21
|
return RunnableParallel(**steps)
|
16
22
|
|
ads/model/datascience_model.py
CHANGED
@@ -115,6 +115,8 @@ class DataScienceModel(Builder):
|
|
115
115
|
Model version set ID
|
116
116
|
version_label: str
|
117
117
|
Model version label
|
118
|
+
version_id: str
|
119
|
+
Model version id
|
118
120
|
model_file_description: dict
|
119
121
|
Contains object path details for models created by reference.
|
120
122
|
|
@@ -169,6 +171,8 @@ class DataScienceModel(Builder):
|
|
169
171
|
Sets the model version set ID.
|
170
172
|
with_version_label(self, version_label: str):
|
171
173
|
Sets the model version label.
|
174
|
+
with_version_id(self, version_id: str):
|
175
|
+
Sets the model version id.
|
172
176
|
with_model_file_description: dict
|
173
177
|
Sets path details for models created by reference. Input can be either a dict, string or json file and
|
174
178
|
the schema is dictated by model_file_description_schema.json
|
@@ -209,6 +213,7 @@ class DataScienceModel(Builder):
|
|
209
213
|
CONST_MODEL_VERSION_SET_ID = "modelVersionSetId"
|
210
214
|
CONST_MODEL_VERSION_SET_NAME = "modelVersionSetName"
|
211
215
|
CONST_MODEL_VERSION_LABEL = "versionLabel"
|
216
|
+
CONST_MODEL_VERSION_ID = "versionId"
|
212
217
|
CONST_TIME_CREATED = "timeCreated"
|
213
218
|
CONST_LIFECYCLE_STATE = "lifecycleState"
|
214
219
|
CONST_MODEL_FILE_DESCRIPTION = "modelDescription"
|
@@ -230,6 +235,7 @@ class DataScienceModel(Builder):
|
|
230
235
|
CONST_MODEL_VERSION_SET_ID: "model_version_set_id",
|
231
236
|
CONST_MODEL_VERSION_SET_NAME: "model_version_set_name",
|
232
237
|
CONST_MODEL_VERSION_LABEL: "version_label",
|
238
|
+
CONST_MODEL_VERSION_ID: "version_id",
|
233
239
|
CONST_TIME_CREATED: "time_created",
|
234
240
|
CONST_LIFECYCLE_STATE: "lifecycle_state",
|
235
241
|
CONST_MODEL_FILE_DESCRIPTION: "model_file_description",
|
@@ -612,6 +618,20 @@ class DataScienceModel(Builder):
|
|
612
618
|
"""
|
613
619
|
return self.set_spec(self.CONST_MODEL_VERSION_LABEL, version_label)
|
614
620
|
|
621
|
+
@property
|
622
|
+
def version_id(self) -> str:
|
623
|
+
return self.get_spec(self.CONST_MODEL_VERSION_ID)
|
624
|
+
|
625
|
+
def with_version_id(self, version_id: str):
|
626
|
+
"""Sets the model version id.
|
627
|
+
|
628
|
+
Parameters
|
629
|
+
----------
|
630
|
+
version_id: str
|
631
|
+
The model version id.
|
632
|
+
"""
|
633
|
+
return self.set_spec(self.CONST_MODEL_VERSION_ID, version_id)
|
634
|
+
|
615
635
|
@property
|
616
636
|
def model_file_description(self) -> dict:
|
617
637
|
return self.get_spec(self.CONST_MODEL_FILE_DESCRIPTION)
|
@@ -35,7 +35,7 @@ All generated configurations should be ready to use without the need for any add
|
|
35
35
|
To run anomaly detection locally, create and activate a new conda environment (`ads-anomaly`). Install all the required libraries listed in the `environment.yaml` file.
|
36
36
|
|
37
37
|
```yaml
|
38
|
-
-
|
38
|
+
- report-creator
|
39
39
|
- cerberus
|
40
40
|
- oracle-automlx==23.4.1
|
41
41
|
- oracle-automlx[classic]==23.4.1
|
@@ -1,7 +1,7 @@
|
|
1
1
|
#!/usr/bin/env python
|
2
2
|
# -*- coding: utf-8 -*--
|
3
3
|
|
4
|
-
# Copyright (c) 2023 Oracle and/or its affiliates.
|
4
|
+
# Copyright (c) 2023, 2024 Oracle and/or its affiliates.
|
5
5
|
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
|
6
6
|
|
7
7
|
import pandas as pd
|
@@ -27,8 +27,13 @@ class AutoMLXOperatorModel(AnomalyOperatorBaseModel):
|
|
27
27
|
def _build_model(self) -> pd.DataFrame:
|
28
28
|
from automlx import init
|
29
29
|
import logging
|
30
|
+
|
30
31
|
try:
|
31
|
-
init(
|
32
|
+
init(
|
33
|
+
engine="ray",
|
34
|
+
engine_opts={"ray_setup": {"_temp_dir": "/tmp/ray-temp"}},
|
35
|
+
loglevel=logging.CRITICAL,
|
36
|
+
)
|
32
37
|
except Exception as e:
|
33
38
|
logger.info("Ray already initialized")
|
34
39
|
date_column = self.spec.datetime_column.name
|
@@ -68,21 +73,21 @@ class AutoMLXOperatorModel(AnomalyOperatorBaseModel):
|
|
68
73
|
return anomaly_output
|
69
74
|
|
70
75
|
def _generate_report(self):
|
71
|
-
import
|
76
|
+
import report_creator as rc
|
72
77
|
|
73
78
|
"""The method that needs to be implemented on the particular model level."""
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
+
other_sections = [
|
80
|
+
rc.Heading("Selected Models Overview", level=2),
|
81
|
+
rc.Text(
|
82
|
+
"The following tables provide information regarding the chosen model."
|
83
|
+
),
|
84
|
+
]
|
79
85
|
|
80
|
-
model_description =
|
86
|
+
model_description = rc.Text(
|
81
87
|
"The automlx model automatically pre-processes, selects and engineers "
|
82
88
|
"high-quality features in your dataset, which then given to an automatically "
|
83
89
|
"chosen and optimized machine learning model.."
|
84
90
|
)
|
85
|
-
other_sections = all_sections
|
86
91
|
|
87
92
|
return (
|
88
93
|
model_description,
|
@@ -1,7 +1,7 @@
|
|
1
1
|
#!/usr/bin/env python
|
2
2
|
# -*- coding: utf-8 -*--
|
3
3
|
|
4
|
-
# Copyright (c) 2023 Oracle and/or its affiliates.
|
4
|
+
# Copyright (c) 2023, 2024 Oracle and/or its affiliates.
|
5
5
|
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
|
6
6
|
|
7
7
|
import pandas as pd
|
@@ -81,21 +81,20 @@ class AutoTSOperatorModel(AnomalyOperatorBaseModel):
|
|
81
81
|
return anomaly_output
|
82
82
|
|
83
83
|
def _generate_report(self):
|
84
|
-
import
|
84
|
+
import report_creator as rc
|
85
85
|
|
86
86
|
"""The method that needs to be implemented on the particular model level."""
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
model_description =
|
87
|
+
other_sections = [
|
88
|
+
rc.Heading("Selected Models Overview", level=2),
|
89
|
+
rc.Text(
|
90
|
+
"The following tables provide information regarding the chosen model."
|
91
|
+
),
|
92
|
+
]
|
93
|
+
model_description = rc.Text(
|
94
94
|
"The automlx model automatically pre-processes, selects and engineers "
|
95
95
|
"high-quality features in your dataset, which then given to an automatically "
|
96
96
|
"chosen and optimized machine learning model.."
|
97
97
|
)
|
98
|
-
other_sections = all_sections
|
99
98
|
|
100
99
|
return (
|
101
100
|
model_description,
|
@@ -1,7 +1,7 @@
|
|
1
1
|
#!/usr/bin/env python
|
2
2
|
# -*- coding: utf-8 -*--
|
3
3
|
|
4
|
-
# Copyright (c) 2023 Oracle and/or its affiliates.
|
4
|
+
# Copyright (c) 2023, 2024 Oracle and/or its affiliates.
|
5
5
|
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
|
6
6
|
|
7
7
|
import os
|
@@ -57,7 +57,7 @@ class AnomalyOperatorBaseModel(ABC):
|
|
57
57
|
|
58
58
|
def generate_report(self):
|
59
59
|
"""Generates the report."""
|
60
|
-
import
|
60
|
+
import report_creator as rc
|
61
61
|
import matplotlib.pyplot as plt
|
62
62
|
|
63
63
|
start_time = time.time()
|
@@ -79,12 +79,10 @@ class AnomalyOperatorBaseModel(ABC):
|
|
79
79
|
anomaly_output, test_data, elapsed_time
|
80
80
|
)
|
81
81
|
table_blocks = [
|
82
|
-
|
82
|
+
rc.DataTable(df, label=col, index=True)
|
83
83
|
for col, df in self.datasets.full_data_dict.items()
|
84
84
|
]
|
85
|
-
data_table = (
|
86
|
-
dp.Select(blocks=table_blocks) if len(table_blocks) > 1 else table_blocks[0]
|
87
|
-
)
|
85
|
+
data_table = rc.Select(blocks=table_blocks)
|
88
86
|
date_column = self.spec.datetime_column.name
|
89
87
|
|
90
88
|
blocks = []
|
@@ -106,44 +104,42 @@ class AnomalyOperatorBaseModel(ABC):
|
|
106
104
|
plt.xlabel(date_column)
|
107
105
|
plt.ylabel(col)
|
108
106
|
plt.title(f"`{col}` with reference to anomalies")
|
109
|
-
figure_blocks.append(ax)
|
110
|
-
blocks.append(
|
111
|
-
plots =
|
107
|
+
figure_blocks.append(rc.Widget(ax))
|
108
|
+
blocks.append(rc.Group(*figure_blocks, label=target))
|
109
|
+
plots = rc.Select(blocks)
|
112
110
|
|
113
111
|
report_sections = []
|
114
|
-
title_text =
|
115
|
-
|
116
|
-
yaml_appendix_title =
|
117
|
-
yaml_appendix =
|
118
|
-
summary =
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
)
|
132
|
-
]
|
112
|
+
title_text = rc.Heading("Anomaly Detection Report", level=1)
|
113
|
+
|
114
|
+
yaml_appendix_title = rc.Heading("Reference: YAML File", level=2)
|
115
|
+
yaml_appendix = rc.Yaml(self.config.to_dict())
|
116
|
+
summary = rc.Block(
|
117
|
+
rc.Group(
|
118
|
+
rc.Text(f"You selected the **`{self.spec.model}`** model."),
|
119
|
+
rc.Text(
|
120
|
+
"Based on your dataset, you could have also selected "
|
121
|
+
f"any of the models: `{'`, `'.join(SupportedModels.keys())}`."
|
122
|
+
),
|
123
|
+
rc.Metric(
|
124
|
+
heading="Analysis was completed in ",
|
125
|
+
value=human_time_friendly(elapsed_time),
|
126
|
+
),
|
127
|
+
label="Summary",
|
128
|
+
)
|
133
129
|
)
|
134
|
-
sec_text =
|
135
|
-
sec =
|
130
|
+
sec_text = rc.Heading("Train Evaluation Metrics", level=2)
|
131
|
+
sec = rc.DataTable(self._evaluation_metrics(anomaly_output), index=True)
|
136
132
|
evaluation_metrics_sec = [sec_text, sec]
|
137
133
|
|
138
134
|
test_metrics_sections = []
|
139
135
|
if total_metrics is not None and not total_metrics.empty:
|
140
|
-
sec_text =
|
141
|
-
sec =
|
136
|
+
sec_text = rc.Heading("Test Data Evaluation Metrics", level=2)
|
137
|
+
sec = rc.DataTable(total_metrics, index=True)
|
142
138
|
test_metrics_sections = test_metrics_sections + [sec_text, sec]
|
143
139
|
|
144
140
|
if summary_metrics is not None and not summary_metrics.empty:
|
145
|
-
sec_text =
|
146
|
-
sec =
|
141
|
+
sec_text = rc.Heading("Test Data Summary Metrics", level=2)
|
142
|
+
sec = rc.DataTable(summary_metrics, index=True)
|
147
143
|
test_metrics_sections = test_metrics_sections + [sec_text, sec]
|
148
144
|
|
149
145
|
report_sections = (
|
@@ -248,7 +244,7 @@ class AnomalyOperatorBaseModel(ABC):
|
|
248
244
|
test_metrics: pd.DataFrame,
|
249
245
|
):
|
250
246
|
"""Saves resulting reports to the given folder."""
|
251
|
-
import
|
247
|
+
import report_creator as rc
|
252
248
|
|
253
249
|
unique_output_dir = find_output_dirname(self.spec.output_directory)
|
254
250
|
|
@@ -257,11 +253,12 @@ class AnomalyOperatorBaseModel(ABC):
|
|
257
253
|
else:
|
258
254
|
storage_options = dict()
|
259
255
|
|
260
|
-
#
|
256
|
+
# report-creator html report
|
261
257
|
with tempfile.TemporaryDirectory() as temp_dir:
|
262
258
|
report_local_path = os.path.join(temp_dir, "___report.html")
|
263
259
|
disable_print()
|
264
|
-
|
260
|
+
with rc.ReportCreator("My Report") as report:
|
261
|
+
report.save(rc.Block(*report_sections), report_local_path)
|
265
262
|
enable_print()
|
266
263
|
with open(report_local_path) as f1:
|
267
264
|
with fsspec.open(
|
@@ -1,7 +1,7 @@
|
|
1
1
|
# #!/usr/bin/env python
|
2
2
|
# # -*- coding: utf-8 -*--
|
3
3
|
|
4
|
-
# # Copyright (c) 2023 Oracle and/or its affiliates.
|
4
|
+
# # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
|
5
5
|
# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
|
6
6
|
|
7
7
|
# import importlib
|
@@ -97,16 +97,16 @@
|
|
97
97
|
# return anomaly_output
|
98
98
|
|
99
99
|
# def _generate_report(self):
|
100
|
-
# import
|
100
|
+
# import report_creator as rc
|
101
101
|
|
102
102
|
# """The method that needs to be implemented on the particular model level."""
|
103
|
-
# selected_models_text =
|
103
|
+
# selected_models_text = rc.Text(
|
104
104
|
# f"## Selected Models Overview \n "
|
105
105
|
# "The following tables provide information regarding the chosen model."
|
106
106
|
# )
|
107
107
|
# all_sections = [selected_models_text]
|
108
108
|
|
109
|
-
# model_description =
|
109
|
+
# model_description = rc.Text(
|
110
110
|
# "The tods model is a full-stack automated machine learning system for outlier detection "
|
111
111
|
# "on univariate / multivariate time-series data. It provides exhaustive modules for building "
|
112
112
|
# "machine learning-based outlier detection systems and wide range of algorithms."
|
@@ -10,12 +10,12 @@ dependencies:
|
|
10
10
|
- neuralprophet
|
11
11
|
- pmdarima
|
12
12
|
- statsmodels
|
13
|
-
-
|
13
|
+
- report-creator
|
14
14
|
- cerberus
|
15
15
|
- sktime
|
16
16
|
- shap
|
17
17
|
- autots[additional]
|
18
|
-
- optuna
|
19
|
-
- oracle-automlx
|
20
|
-
- oracle-automlx[forecasting]
|
18
|
+
- optuna
|
19
|
+
- oracle-automlx>=23.4.1
|
20
|
+
- oracle-automlx[forecasting]>=23.4.1
|
21
21
|
- fire
|
@@ -1,7 +1,7 @@
|
|
1
1
|
#!/usr/bin/env python
|
2
2
|
# -*- coding: utf-8 -*--
|
3
3
|
|
4
|
-
# Copyright (c) 2023 Oracle and/or its affiliates.
|
4
|
+
# Copyright (c) 2023, 2024 Oracle and/or its affiliates.
|
5
5
|
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
|
6
6
|
|
7
7
|
import pandas as pd
|
@@ -66,7 +66,7 @@ class ArimaOperatorModel(ForecastOperatorBaseModel):
|
|
66
66
|
target = self.original_target_column
|
67
67
|
self.forecast_output.init_series_output(series_id=s_id, data_at_series=df)
|
68
68
|
# If trend is constant, remove constant columns
|
69
|
-
if
|
69
|
+
if "trend" not in model_kwargs or model_kwargs["trend"] == "c":
|
70
70
|
self.constant_cols[s_id] = df.columns[df.nunique() == 1]
|
71
71
|
df = df.drop(columns=self.constant_cols[s_id])
|
72
72
|
|
@@ -147,29 +147,25 @@ class ArimaOperatorModel(ForecastOperatorBaseModel):
|
|
147
147
|
|
148
148
|
def _generate_report(self):
|
149
149
|
"""The method that needs to be implemented on the particular model level."""
|
150
|
-
import
|
150
|
+
import report_creator as rc
|
151
|
+
|
151
152
|
all_sections = []
|
152
153
|
if len(self.models) > 0:
|
153
|
-
sec5_text =
|
154
|
+
sec5_text = rc.Heading("ARIMA Model Parameters", level=2)
|
154
155
|
blocks = [
|
155
|
-
|
156
|
+
rc.Html(
|
156
157
|
m.summary().as_html(),
|
157
158
|
label=s_id,
|
158
159
|
)
|
159
160
|
for i, (s_id, m) in enumerate(self.models.items())
|
160
161
|
]
|
161
|
-
sec5 =
|
162
|
+
sec5 = rc.Select(blocks=blocks)
|
162
163
|
all_sections = [sec5_text, sec5]
|
163
164
|
|
164
165
|
if self.spec.generate_explanations:
|
165
166
|
try:
|
166
167
|
# If the key is present, call the "explain_model" method
|
167
168
|
self.explain_model()
|
168
|
-
# Create a markdown text block for the global explanation section
|
169
|
-
global_explanation_text = dp.Text(
|
170
|
-
f"## Global Explanation of Models \n "
|
171
|
-
"The following tables provide the feature attribution for the global explainability."
|
172
|
-
)
|
173
169
|
|
174
170
|
# Convert the global explanation data to a DataFrame
|
175
171
|
global_explanation_df = pd.DataFrame(self.global_explanation)
|
@@ -185,9 +181,12 @@ class ArimaOperatorModel(ForecastOperatorBaseModel):
|
|
185
181
|
)
|
186
182
|
|
187
183
|
# Create a markdown section for the global explainability
|
188
|
-
global_explanation_section =
|
189
|
-
"
|
190
|
-
|
184
|
+
global_explanation_section = rc.Block(
|
185
|
+
rc.Heading("Global Explanation of Models", level=2),
|
186
|
+
rc.Text(
|
187
|
+
"The following tables provide the feature attribution for the global explainability."
|
188
|
+
),
|
189
|
+
rc.DataTable(self.formatted_global_explanation, index=True),
|
191
190
|
)
|
192
191
|
|
193
192
|
aggregate_local_explanations = pd.DataFrame()
|
@@ -199,30 +198,29 @@ class ArimaOperatorModel(ForecastOperatorBaseModel):
|
|
199
198
|
)
|
200
199
|
self.formatted_local_explanation = aggregate_local_explanations
|
201
200
|
|
202
|
-
local_explanation_text = dp.Text(f"## Local Explanation of Models \n ")
|
203
201
|
blocks = [
|
204
|
-
|
202
|
+
rc.DataTable(
|
205
203
|
local_ex_df.div(local_ex_df.abs().sum(axis=1), axis=0) * 100,
|
206
204
|
label=s_id,
|
205
|
+
index=True,
|
207
206
|
)
|
208
207
|
for s_id, local_ex_df in self.local_explanation.items()
|
209
208
|
]
|
210
|
-
local_explanation_section = (
|
211
|
-
|
209
|
+
local_explanation_section = rc.Block(
|
210
|
+
rc.Heading("Local Explanation of Models", level=2),
|
211
|
+
rc.Select(blocks=blocks),
|
212
212
|
)
|
213
213
|
|
214
214
|
# Append the global explanation text and section to the "all_sections" list
|
215
215
|
all_sections = all_sections + [
|
216
|
-
global_explanation_text,
|
217
216
|
global_explanation_section,
|
218
|
-
local_explanation_text,
|
219
217
|
local_explanation_section,
|
220
218
|
]
|
221
219
|
except Exception as e:
|
222
220
|
logger.warn(f"Failed to generate Explanations with error: {e}.")
|
223
221
|
logger.debug(f"Full Traceback: {traceback.format_exc()}")
|
224
222
|
|
225
|
-
model_description =
|
223
|
+
model_description = rc.Text(
|
226
224
|
"An autoregressive integrated moving average, or ARIMA, is a statistical "
|
227
225
|
"analysis model that uses time series data to either better understand the "
|
228
226
|
"data set or to predict future trends. A statistical model is autoregressive if "
|