google-cloud-pipeline-components 2.16.0__py3-none-any.whl → 2.17.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google_cloud_pipeline_components/_implementation/llm/generated/refined_image_versions.py +1 -1
- google_cloud_pipeline_components/_implementation/model_evaluation/import_evaluation/__init__.py +14 -0
- google_cloud_pipeline_components/_implementation/model_evaluation/import_evaluation/component.py +208 -0
- google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation/component.py +3 -0
- google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation_preprocessor/component.py +2 -4
- google_cloud_pipeline_components/_implementation/model_evaluation/version.py +1 -1
- google_cloud_pipeline_components/container/preview/custom_job/remote_runner.py +24 -15
- google_cloud_pipeline_components/preview/automl/forecasting/forecasting_ensemble.py +1 -1
- google_cloud_pipeline_components/preview/automl/forecasting/forecasting_stage_1_tuner.py +2 -2
- google_cloud_pipeline_components/preview/automl/forecasting/forecasting_stage_2_tuner.py +2 -2
- google_cloud_pipeline_components/preview/automl/forecasting/learn_to_learn_forecasting_pipeline.yaml +38 -38
- google_cloud_pipeline_components/preview/automl/forecasting/sequence_to_sequence_forecasting_pipeline.yaml +38 -38
- google_cloud_pipeline_components/preview/automl/forecasting/temporal_fusion_transformer_forecasting_pipeline.yaml +38 -38
- google_cloud_pipeline_components/preview/automl/forecasting/time_series_dense_encoder_forecasting_pipeline.yaml +38 -38
- google_cloud_pipeline_components/preview/automl/tabular/auto_feature_engineering.py +1 -1
- google_cloud_pipeline_components/preview/automl/tabular/automl_tabular_feature_selection_pipeline.yaml +45 -45
- google_cloud_pipeline_components/preview/automl/tabular/automl_tabular_v2_pipeline.yaml +47 -47
- google_cloud_pipeline_components/preview/automl/tabular/distillation_stage_feature_transform_engine.py +2 -2
- google_cloud_pipeline_components/preview/automl/tabular/feature_selection.py +2 -2
- google_cloud_pipeline_components/preview/automl/tabular/feature_selection_pipeline.yaml +4 -4
- google_cloud_pipeline_components/preview/automl/tabular/feature_transform_engine.py +3 -3
- google_cloud_pipeline_components/preview/automl/tabular/tabnet_hyperparameter_tuning_job.py +2 -2
- google_cloud_pipeline_components/preview/automl/tabular/tabnet_hyperparameter_tuning_job_pipeline.yaml +15 -15
- google_cloud_pipeline_components/preview/automl/tabular/tabnet_trainer.py +2 -2
- google_cloud_pipeline_components/preview/automl/tabular/tabnet_trainer_pipeline.yaml +13 -13
- google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_hyperparameter_tuning_job.py +2 -2
- google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_hyperparameter_tuning_job_pipeline.yaml +14 -14
- google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_trainer.py +2 -2
- google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_trainer_pipeline.yaml +13 -13
- google_cloud_pipeline_components/preview/automl/tabular/xgboost_hyperparameter_tuning_job_pipeline.yaml +14 -14
- google_cloud_pipeline_components/preview/automl/tabular/xgboost_trainer_pipeline.yaml +13 -13
- google_cloud_pipeline_components/preview/custom_job/utils.py +24 -14
- google_cloud_pipeline_components/preview/model_evaluation/evaluation_llm_classification_pipeline.py +180 -0
- google_cloud_pipeline_components/preview/model_evaluation/evaluation_llm_text_generation_pipeline.py +178 -0
- google_cloud_pipeline_components/preview/model_evaluation/model_based_llm_evaluation/__init__.py +20 -0
- google_cloud_pipeline_components/preview/model_evaluation/model_based_llm_evaluation/autosxs/__init__.py +13 -0
- google_cloud_pipeline_components/preview/model_evaluation/model_based_llm_evaluation/autosxs/autosxs_pipeline.py +109 -0
- google_cloud_pipeline_components/proto/preflight_validations_pb2.py +58 -0
- google_cloud_pipeline_components/proto/template_metadata_pb2.py +21 -17
- google_cloud_pipeline_components/v1/automl/forecasting/bqml_arima_predict_pipeline.yaml +10 -10
- google_cloud_pipeline_components/v1/automl/forecasting/bqml_arima_train_pipeline.yaml +31 -31
- google_cloud_pipeline_components/v1/automl/forecasting/prophet_predict_pipeline.yaml +13 -13
- google_cloud_pipeline_components/v1/automl/forecasting/prophet_trainer.py +3 -3
- google_cloud_pipeline_components/v1/automl/forecasting/prophet_trainer_pipeline.yaml +14 -14
- google_cloud_pipeline_components/v1/automl/tabular/automl_tabular_pipeline.yaml +43 -43
- google_cloud_pipeline_components/v1/automl/tabular/cv_trainer.py +2 -2
- google_cloud_pipeline_components/v1/automl/tabular/ensemble.py +2 -2
- google_cloud_pipeline_components/v1/automl/tabular/finalizer.py +1 -1
- google_cloud_pipeline_components/v1/automl/tabular/infra_validator.py +1 -1
- google_cloud_pipeline_components/v1/automl/tabular/split_materialized_data.py +1 -1
- google_cloud_pipeline_components/v1/automl/tabular/stage_1_tuner.py +2 -2
- google_cloud_pipeline_components/v1/automl/tabular/stats_and_example_gen.py +2 -2
- google_cloud_pipeline_components/v1/automl/tabular/training_configurator_and_validator.py +1 -1
- google_cloud_pipeline_components/v1/automl/tabular/transform.py +2 -2
- google_cloud_pipeline_components/v1/model_evaluation/evaluation_llm_text_generation_pipeline.py +2 -18
- google_cloud_pipeline_components/version.py +1 -1
- {google_cloud_pipeline_components-2.16.0.dist-info → google_cloud_pipeline_components-2.17.0.dist-info}/METADATA +20 -17
- {google_cloud_pipeline_components-2.16.0.dist-info → google_cloud_pipeline_components-2.17.0.dist-info}/RECORD +61 -53
- {google_cloud_pipeline_components-2.16.0.dist-info → google_cloud_pipeline_components-2.17.0.dist-info}/WHEEL +1 -1
- {google_cloud_pipeline_components-2.16.0.dist-info → google_cloud_pipeline_components-2.17.0.dist-info}/LICENSE +0 -0
- {google_cloud_pipeline_components-2.16.0.dist-info → google_cloud_pipeline_components-2.17.0.dist-info}/top_level.txt +0 -0
google_cloud_pipeline_components/_implementation/model_evaluation/import_evaluation/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# Copyright 2023 The Kubeflow Authors. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""Google Cloud Pipeline Evaluation Import Evaluation Component."""
|
google_cloud_pipeline_components/_implementation/model_evaluation/import_evaluation/component.py
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
# Copyright 2023 The Kubeflow Authors. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
from typing import List, Optional
|
|
17
|
+
|
|
18
|
+
from google_cloud_pipeline_components import _image
|
|
19
|
+
from google_cloud_pipeline_components.types.artifact_types import ClassificationMetrics
|
|
20
|
+
from google_cloud_pipeline_components.types.artifact_types import ForecastingMetrics
|
|
21
|
+
from google_cloud_pipeline_components.types.artifact_types import RegressionMetrics
|
|
22
|
+
from google_cloud_pipeline_components.types.artifact_types import VertexModel
|
|
23
|
+
from kfp import dsl
|
|
24
|
+
from kfp.dsl import Input
|
|
25
|
+
from kfp.dsl import Metrics
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dsl.container_component
|
|
29
|
+
def model_evaluation_import(
|
|
30
|
+
model: Input[VertexModel],
|
|
31
|
+
gcp_resources: dsl.OutputPath(str),
|
|
32
|
+
evaluation_resource_name: dsl.OutputPath(str),
|
|
33
|
+
metrics: Optional[Input[Metrics]] = None,
|
|
34
|
+
row_based_metrics: Optional[Input[Metrics]] = None,
|
|
35
|
+
problem_type: Optional[str] = None,
|
|
36
|
+
classification_metrics: Optional[Input[ClassificationMetrics]] = None,
|
|
37
|
+
forecasting_metrics: Optional[Input[ForecastingMetrics]] = None,
|
|
38
|
+
regression_metrics: Optional[Input[RegressionMetrics]] = None,
|
|
39
|
+
text_generation_metrics: Optional[Input[Metrics]] = None,
|
|
40
|
+
question_answering_metrics: Optional[Input[Metrics]] = None,
|
|
41
|
+
summarization_metrics: Optional[Input[Metrics]] = None,
|
|
42
|
+
explanation: Optional[Input[Metrics]] = None,
|
|
43
|
+
feature_attributions: Optional[Input[Metrics]] = None,
|
|
44
|
+
embedding_metrics: Optional[Input[Metrics]] = None,
|
|
45
|
+
display_name: str = "",
|
|
46
|
+
dataset_path: str = "",
|
|
47
|
+
dataset_paths: List[str] = [],
|
|
48
|
+
dataset_type: str = "",
|
|
49
|
+
):
|
|
50
|
+
# fmt: off
|
|
51
|
+
"""Imports a model evaluation artifact to an existing Vertex model with
|
|
52
|
+
ModelService.ImportModelEvaluation.
|
|
53
|
+
|
|
54
|
+
For more details, see
|
|
55
|
+
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models.evaluations
|
|
56
|
+
One of the four metrics inputs must be provided, metrics & problem_type,
|
|
57
|
+
classification_metrics, regression_metrics, or forecasting_metrics.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
model: Vertex model resource that will be the parent resource of the
|
|
61
|
+
uploaded evaluation.
|
|
62
|
+
metrics: Path of metrics generated from an evaluation component.
|
|
63
|
+
row_based_metrics:
|
|
64
|
+
Path of row_based_metrics generated from an evaluation component.
|
|
65
|
+
problem_type: The problem type of the metrics being imported to the
|
|
66
|
+
VertexModel. `classification`, `regression`, `forecasting`,
|
|
67
|
+
`text-generation`, `question-answering`, and `summarization` are the
|
|
68
|
+
currently supported problem types. Must be provided when `metrics` is
|
|
69
|
+
provided.
|
|
70
|
+
classification_metrics: google.ClassificationMetrics artifact generated from
|
|
71
|
+
the ModelEvaluationClassificationOp component.
|
|
72
|
+
forecasting_metrics: google.ForecastingMetrics artifact generated from
|
|
73
|
+
the ModelEvaluationForecastingOp component.
|
|
74
|
+
regression_metrics: google.ClassificationMetrics artifact generated from
|
|
75
|
+
the ModelEvaluationRegressionOp component.
|
|
76
|
+
text_generation_metrics: system.Metrics artifact generated from
|
|
77
|
+
the LLMEvaluationTextGenerationOp component. Subject to change to
|
|
78
|
+
google.TextGenerationMetrics.
|
|
79
|
+
question_answering_metrics: system.Metrics artifact generated from
|
|
80
|
+
the LLMEvaluationTextGenerationOp component. Subject to change to
|
|
81
|
+
google.QuestionAnsweringMetrics.
|
|
82
|
+
summarization_metrics: system.Metrics artifact generated from
|
|
83
|
+
the LLMEvaluationTextGenerationOp component. Subject to change to
|
|
84
|
+
google.SummarizationMetrics.
|
|
85
|
+
explanation: Path for model explanation metrics generated from an evaluation
|
|
86
|
+
component.
|
|
87
|
+
feature_attributions: The feature attributions metrics artifact generated
|
|
88
|
+
from the feature attribution component.
|
|
89
|
+
embedding_metrics: The embedding metrics artifact generated from the
|
|
90
|
+
embedding retrieval metrics component.
|
|
91
|
+
display_name: The display name for the uploaded model evaluation resource.
|
|
92
|
+
"""
|
|
93
|
+
# fmt: on
|
|
94
|
+
return dsl.ContainerSpec(
|
|
95
|
+
image=_image.GCPC_IMAGE_TAG,
|
|
96
|
+
command=[
|
|
97
|
+
"python3",
|
|
98
|
+
"-u",
|
|
99
|
+
"-m",
|
|
100
|
+
"google_cloud_pipeline_components.container._implementation.model_evaluation.import_model_evaluation",
|
|
101
|
+
],
|
|
102
|
+
args=[
|
|
103
|
+
dsl.IfPresentPlaceholder(
|
|
104
|
+
input_name="metrics",
|
|
105
|
+
then=[
|
|
106
|
+
"--metrics",
|
|
107
|
+
metrics.uri,
|
|
108
|
+
"--metrics_explanation",
|
|
109
|
+
metrics.metadata["explanation_gcs_path"],
|
|
110
|
+
],
|
|
111
|
+
),
|
|
112
|
+
dsl.IfPresentPlaceholder(
|
|
113
|
+
input_name="row_based_metrics",
|
|
114
|
+
then=[
|
|
115
|
+
"--row_based_metrics",
|
|
116
|
+
row_based_metrics.uri,
|
|
117
|
+
],
|
|
118
|
+
),
|
|
119
|
+
dsl.IfPresentPlaceholder(
|
|
120
|
+
input_name="explanation",
|
|
121
|
+
then=[
|
|
122
|
+
"--explanation",
|
|
123
|
+
explanation.metadata["explanation_gcs_path"],
|
|
124
|
+
],
|
|
125
|
+
),
|
|
126
|
+
dsl.IfPresentPlaceholder(
|
|
127
|
+
input_name="classification_metrics",
|
|
128
|
+
then=[
|
|
129
|
+
"--classification_metrics",
|
|
130
|
+
classification_metrics.uri,
|
|
131
|
+
],
|
|
132
|
+
),
|
|
133
|
+
dsl.IfPresentPlaceholder(
|
|
134
|
+
input_name="forecasting_metrics",
|
|
135
|
+
then=[
|
|
136
|
+
"--forecasting_metrics",
|
|
137
|
+
forecasting_metrics.uri,
|
|
138
|
+
],
|
|
139
|
+
),
|
|
140
|
+
dsl.IfPresentPlaceholder(
|
|
141
|
+
input_name="regression_metrics",
|
|
142
|
+
then=[
|
|
143
|
+
"--regression_metrics",
|
|
144
|
+
regression_metrics.uri,
|
|
145
|
+
],
|
|
146
|
+
),
|
|
147
|
+
dsl.IfPresentPlaceholder(
|
|
148
|
+
input_name="text_generation_metrics",
|
|
149
|
+
then=[
|
|
150
|
+
"--text_generation_metrics",
|
|
151
|
+
text_generation_metrics.uri,
|
|
152
|
+
],
|
|
153
|
+
),
|
|
154
|
+
dsl.IfPresentPlaceholder(
|
|
155
|
+
input_name="question_answering_metrics",
|
|
156
|
+
then=[
|
|
157
|
+
"--question_answering_metrics",
|
|
158
|
+
question_answering_metrics.uri,
|
|
159
|
+
],
|
|
160
|
+
),
|
|
161
|
+
dsl.IfPresentPlaceholder(
|
|
162
|
+
input_name="summarization_metrics",
|
|
163
|
+
then=[
|
|
164
|
+
"--summarization_metrics",
|
|
165
|
+
summarization_metrics.uri,
|
|
166
|
+
],
|
|
167
|
+
),
|
|
168
|
+
dsl.IfPresentPlaceholder(
|
|
169
|
+
input_name="feature_attributions",
|
|
170
|
+
then=[
|
|
171
|
+
"--feature_attributions",
|
|
172
|
+
feature_attributions.uri,
|
|
173
|
+
],
|
|
174
|
+
),
|
|
175
|
+
dsl.IfPresentPlaceholder(
|
|
176
|
+
input_name="embedding_metrics",
|
|
177
|
+
then=[
|
|
178
|
+
"--embedding_metrics",
|
|
179
|
+
embedding_metrics.uri,
|
|
180
|
+
],
|
|
181
|
+
),
|
|
182
|
+
dsl.IfPresentPlaceholder(
|
|
183
|
+
input_name="problem_type",
|
|
184
|
+
then=[
|
|
185
|
+
"--problem_type",
|
|
186
|
+
problem_type,
|
|
187
|
+
],
|
|
188
|
+
),
|
|
189
|
+
"--display_name",
|
|
190
|
+
display_name,
|
|
191
|
+
"--dataset_path",
|
|
192
|
+
dataset_path,
|
|
193
|
+
"--dataset_paths",
|
|
194
|
+
dataset_paths,
|
|
195
|
+
"--dataset_type",
|
|
196
|
+
dataset_type,
|
|
197
|
+
"--pipeline_job_id",
|
|
198
|
+
dsl.PIPELINE_JOB_ID_PLACEHOLDER,
|
|
199
|
+
"--pipeline_job_resource_name",
|
|
200
|
+
dsl.PIPELINE_JOB_RESOURCE_NAME_PLACEHOLDER,
|
|
201
|
+
"--model_name",
|
|
202
|
+
model.metadata["resourceName"],
|
|
203
|
+
"--gcp_resources",
|
|
204
|
+
gcp_resources,
|
|
205
|
+
"--evaluation_resource_name",
|
|
206
|
+
evaluation_resource_name,
|
|
207
|
+
],
|
|
208
|
+
)
|
google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation/component.py
CHANGED
|
@@ -32,6 +32,7 @@ def model_evaluation_text_generation(
|
|
|
32
32
|
row_based_metrics: Output[Metrics],
|
|
33
33
|
project: str,
|
|
34
34
|
location: str,
|
|
35
|
+
model_name: str,
|
|
35
36
|
evaluation_task: str = 'text-generation',
|
|
36
37
|
target_field_name: str = 'instance.output_text',
|
|
37
38
|
prediction_field_name: str = 'predictions.content',
|
|
@@ -55,6 +56,7 @@ def model_evaluation_text_generation(
|
|
|
55
56
|
Args:
|
|
56
57
|
project: The GCP project that runs the pipeline component.
|
|
57
58
|
location: The GCP region that runs the pipeline component.
|
|
59
|
+
model_name: The name of the model to be evaluated.
|
|
58
60
|
evaluation_task: The task that the large language model will be evaluated
|
|
59
61
|
on. The evaluation component computes a set of metrics relevant to that
|
|
60
62
|
specific task. Currently supported tasks are: `summarization`,
|
|
@@ -124,6 +126,7 @@ def model_evaluation_text_generation(
|
|
|
124
126
|
machine_type=machine_type,
|
|
125
127
|
image_uri=version.LLM_EVAL_IMAGE_TAG,
|
|
126
128
|
args=[
|
|
129
|
+
f'--model_name={model_name}',
|
|
127
130
|
f'--evaluation_task={evaluation_task}',
|
|
128
131
|
f'--target_field_name={target_field_name}',
|
|
129
132
|
f'--prediction_field_name={prediction_field_name}',
|
|
@@ -101,10 +101,8 @@ def evaluation_dataset_preprocessor_internal(
|
|
|
101
101
|
f'--gcs_source_uris={gcs_source_uris}',
|
|
102
102
|
f'--input_field_name={input_field_name}',
|
|
103
103
|
f'--role_field_name={role_field_name}',
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
f'--model_name={model_name}'
|
|
107
|
-
),
|
|
104
|
+
f'--target_field_name={target_field_name}',
|
|
105
|
+
f'--model_name={model_name}',
|
|
108
106
|
f'--output_dirs={output_dirs}',
|
|
109
107
|
'--executor_input={{$.json_escape[1]}}',
|
|
110
108
|
],
|
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
"""Version constants for model evaluation components."""
|
|
15
15
|
|
|
16
16
|
_EVAL_VERSION = 'v0.9.4'
|
|
17
|
-
_LLM_EVAL_VERSION = 'v0.
|
|
17
|
+
_LLM_EVAL_VERSION = 'v0.7'
|
|
18
18
|
|
|
19
19
|
_EVAL_IMAGE_NAME = 'gcr.io/ml-pipeline/model-evaluation'
|
|
20
20
|
_LLM_EVAL_IMAGE_NAME = 'gcr.io/ml-pipeline/llm-model-evaluation'
|
|
@@ -32,23 +32,32 @@ def insert_system_labels_into_payload(payload):
|
|
|
32
32
|
return json.dumps(job_spec)
|
|
33
33
|
|
|
34
34
|
|
|
35
|
-
def
|
|
36
|
-
|
|
35
|
+
def is_json(test_string: str) -> bool:
|
|
36
|
+
try:
|
|
37
|
+
json.loads(test_string)
|
|
38
|
+
except ValueError:
|
|
39
|
+
return False
|
|
40
|
+
return True
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def parse_nested_json_strings(payload):
|
|
44
|
+
"""Parse nested json strings in the payload."""
|
|
37
45
|
|
|
38
46
|
job_spec = json.loads(payload)
|
|
39
|
-
# TODO(b/353577594):
|
|
40
|
-
#
|
|
41
|
-
|
|
42
|
-
if (
|
|
43
|
-
'accelerator_count'
|
|
44
|
-
|
|
47
|
+
# TODO(b/353577594): Nested placeholder fields inside worker_pool_specs are
|
|
48
|
+
# not parsed correctly in backend. Can remove when fix backend logic.
|
|
49
|
+
worker_pool_spec = job_spec['job_spec']['worker_pool_specs'][0]
|
|
50
|
+
if is_json(
|
|
51
|
+
worker_pool_spec.get('machine_spec', {}).get('accelerator_count', '')
|
|
52
|
+
):
|
|
53
|
+
worker_pool_spec['machine_spec']['accelerator_count'] = json.loads(
|
|
54
|
+
worker_pool_spec['machine_spec']['accelerator_count']
|
|
55
|
+
)
|
|
56
|
+
if is_json(
|
|
57
|
+
worker_pool_spec.get('disk_spec', {}).get('boot_disk_size_gb', '')
|
|
45
58
|
):
|
|
46
|
-
|
|
47
|
-
'
|
|
48
|
-
] = int(
|
|
49
|
-
job_spec['job_spec']['worker_pool_specs'][0]['machine_spec'][
|
|
50
|
-
'accelerator_count'
|
|
51
|
-
]
|
|
59
|
+
worker_pool_spec['disk_spec']['boot_disk_size_gb'] = json.loads(
|
|
60
|
+
worker_pool_spec['disk_spec']['boot_disk_size_gb']
|
|
52
61
|
)
|
|
53
62
|
return json.dumps(job_spec)
|
|
54
63
|
|
|
@@ -107,7 +116,7 @@ def create_custom_job(
|
|
|
107
116
|
# Create custom job if it does not exist
|
|
108
117
|
job_name = remote_runner.check_if_job_exists()
|
|
109
118
|
if job_name is None:
|
|
110
|
-
payload =
|
|
119
|
+
payload = parse_nested_json_strings(payload)
|
|
111
120
|
job_name = remote_runner.create_job(
|
|
112
121
|
create_custom_job_with_client,
|
|
113
122
|
insert_system_labels_into_payload(payload),
|
|
@@ -72,7 +72,7 @@ def automl_forecasting_ensemble(
|
|
|
72
72
|
# fmt: on
|
|
73
73
|
job_id = dsl.PIPELINE_JOB_ID_PLACEHOLDER
|
|
74
74
|
task_id = dsl.PIPELINE_TASK_ID_PLACEHOLDER
|
|
75
|
-
image_uri = 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:
|
|
75
|
+
image_uri = 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625'
|
|
76
76
|
display_name = f'automl-forecasting-ensemble-{job_id}-{task_id}'
|
|
77
77
|
|
|
78
78
|
error_file_path = f'{root_dir}/{job_id}/{task_id}/error.pb'
|
|
@@ -99,14 +99,14 @@ def automl_forecasting_stage_1_tuner(
|
|
|
99
99
|
' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
|
|
100
100
|
' "container_spec": {"image_uri":"'
|
|
101
101
|
),
|
|
102
|
-
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:
|
|
102
|
+
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625',
|
|
103
103
|
'", "args": ["forecasting_mp_l2l_stage_1_tuner',
|
|
104
104
|
'", "--region=',
|
|
105
105
|
location,
|
|
106
106
|
'", "--transform_output_path=',
|
|
107
107
|
transform_output.uri,
|
|
108
108
|
'", "--training_docker_uri=',
|
|
109
|
-
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:
|
|
109
|
+
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625',
|
|
110
110
|
'", "--reduce_search_space_mode=',
|
|
111
111
|
reduce_search_space_mode,
|
|
112
112
|
f'", "--component_id={dsl.PIPELINE_TASK_ID_PLACEHOLDER}',
|
|
@@ -97,14 +97,14 @@ def automl_forecasting_stage_2_tuner(
|
|
|
97
97
|
' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
|
|
98
98
|
' "container_spec": {"image_uri":"'
|
|
99
99
|
),
|
|
100
|
-
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:
|
|
100
|
+
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625',
|
|
101
101
|
'", "args": ["forecasting_mp_l2l_stage_2_tuner',
|
|
102
102
|
'", "--region=',
|
|
103
103
|
location,
|
|
104
104
|
'", "--transform_output_path=',
|
|
105
105
|
transform_output.uri,
|
|
106
106
|
'", "--training_docker_uri=',
|
|
107
|
-
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:
|
|
107
|
+
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625',
|
|
108
108
|
f'", "--component_id={dsl.PIPELINE_TASK_ID_PLACEHOLDER}',
|
|
109
109
|
'", "--training_base_dir=',
|
|
110
110
|
root_dir,
|