google-cloud-pipeline-components 2.13.1__py3-none-any.whl → 2.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of google-cloud-pipeline-components might be problematic. Click here for more details.
- google_cloud_pipeline_components/__init__.py +5 -6
- google_cloud_pipeline_components/_implementation/llm/deployment_graph.py +4 -10
- google_cloud_pipeline_components/_implementation/llm/env.py +1 -1
- google_cloud_pipeline_components/_implementation/llm/function_based.py +14 -48
- google_cloud_pipeline_components/_implementation/llm/generated/refined_image_versions.py +1 -1
- google_cloud_pipeline_components/_implementation/llm/reinforcement_learning_graph.py +27 -36
- google_cloud_pipeline_components/_implementation/llm/reward_model_graph.py +26 -41
- google_cloud_pipeline_components/_implementation/llm/rlhf_preprocessor.py +60 -0
- google_cloud_pipeline_components/_implementation/llm/validate_pipeline.py +11 -0
- google_cloud_pipeline_components/_placeholders.py +30 -1
- google_cloud_pipeline_components/preview/automl/forecasting/forecasting_ensemble.py +1 -1
- google_cloud_pipeline_components/preview/automl/forecasting/forecasting_stage_1_tuner.py +2 -2
- google_cloud_pipeline_components/preview/automl/forecasting/forecasting_stage_2_tuner.py +2 -2
- google_cloud_pipeline_components/preview/automl/forecasting/learn_to_learn_forecasting_pipeline.yaml +34 -34
- google_cloud_pipeline_components/preview/automl/forecasting/sequence_to_sequence_forecasting_pipeline.yaml +34 -34
- google_cloud_pipeline_components/preview/automl/forecasting/temporal_fusion_transformer_forecasting_pipeline.yaml +34 -34
- google_cloud_pipeline_components/preview/automl/forecasting/time_series_dense_encoder_forecasting_pipeline.yaml +34 -34
- google_cloud_pipeline_components/preview/automl/tabular/auto_feature_engineering.py +1 -1
- google_cloud_pipeline_components/preview/automl/tabular/automl_tabular_feature_selection_pipeline.yaml +39 -39
- google_cloud_pipeline_components/preview/automl/tabular/automl_tabular_v2_pipeline.yaml +41 -41
- google_cloud_pipeline_components/preview/automl/tabular/distillation_stage_feature_transform_engine.py +2 -2
- google_cloud_pipeline_components/preview/automl/tabular/feature_selection.py +2 -2
- google_cloud_pipeline_components/preview/automl/tabular/feature_selection_pipeline.yaml +4 -4
- google_cloud_pipeline_components/preview/automl/tabular/feature_transform_engine.py +3 -3
- google_cloud_pipeline_components/preview/automl/tabular/tabnet_hyperparameter_tuning_job.py +2 -2
- google_cloud_pipeline_components/preview/automl/tabular/tabnet_hyperparameter_tuning_job_pipeline.yaml +17 -17
- google_cloud_pipeline_components/preview/automl/tabular/tabnet_trainer.py +2 -2
- google_cloud_pipeline_components/preview/automl/tabular/tabnet_trainer_pipeline.yaml +15 -15
- google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_hyperparameter_tuning_job.py +2 -2
- google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_hyperparameter_tuning_job_pipeline.yaml +16 -16
- google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_trainer.py +2 -2
- google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_trainer_pipeline.yaml +15 -15
- google_cloud_pipeline_components/preview/automl/tabular/xgboost_hyperparameter_tuning_job_pipeline.yaml +14 -14
- google_cloud_pipeline_components/preview/automl/tabular/xgboost_trainer_pipeline.yaml +13 -13
- google_cloud_pipeline_components/preview/automl/vision/data_converter.py +3 -1
- google_cloud_pipeline_components/preview/custom_job/component.py +2 -2
- google_cloud_pipeline_components/preview/custom_job/utils.py +3 -2
- google_cloud_pipeline_components/preview/llm/rlhf/component.py +60 -8
- google_cloud_pipeline_components/preview/model_evaluation/__init__.py +1 -1
- google_cloud_pipeline_components/proto/template_metadata_pb2.py +22 -15
- google_cloud_pipeline_components/v1/automl/forecasting/bqml_arima_predict_pipeline.yaml +10 -10
- google_cloud_pipeline_components/v1/automl/forecasting/bqml_arima_train_pipeline.yaml +31 -31
- google_cloud_pipeline_components/v1/automl/forecasting/prophet_trainer.py +3 -3
- google_cloud_pipeline_components/v1/automl/forecasting/prophet_trainer_pipeline.yaml +14 -14
- google_cloud_pipeline_components/v1/automl/tabular/automl_tabular_pipeline.yaml +37 -37
- google_cloud_pipeline_components/v1/automl/tabular/cv_trainer.py +2 -2
- google_cloud_pipeline_components/v1/automl/tabular/ensemble.py +2 -2
- google_cloud_pipeline_components/v1/automl/tabular/finalizer.py +1 -1
- google_cloud_pipeline_components/v1/automl/tabular/infra_validator.py +1 -1
- google_cloud_pipeline_components/v1/automl/tabular/split_materialized_data.py +1 -1
- google_cloud_pipeline_components/v1/automl/tabular/stage_1_tuner.py +2 -2
- google_cloud_pipeline_components/v1/automl/tabular/stats_and_example_gen.py +2 -2
- google_cloud_pipeline_components/v1/automl/tabular/training_configurator_and_validator.py +1 -1
- google_cloud_pipeline_components/v1/automl/tabular/transform.py +2 -2
- google_cloud_pipeline_components/v1/model_evaluation/__init__.py +3 -1
- google_cloud_pipeline_components/v1/model_evaluation/classification_component.py +2 -2
- google_cloud_pipeline_components/{preview → v1}/model_evaluation/model_based_llm_evaluation/__init__.py +2 -2
- google_cloud_pipeline_components/version.py +1 -1
- {google_cloud_pipeline_components-2.13.1.dist-info → google_cloud_pipeline_components-2.14.0.dist-info}/METADATA +18 -19
- {google_cloud_pipeline_components-2.13.1.dist-info → google_cloud_pipeline_components-2.14.0.dist-info}/RECORD +65 -66
- {google_cloud_pipeline_components-2.13.1.dist-info → google_cloud_pipeline_components-2.14.0.dist-info}/WHEEL +1 -1
- google_cloud_pipeline_components/proto/preflight_validations_pb2.py +0 -47
- /google_cloud_pipeline_components/{preview → v1}/model_evaluation/model_based_llm_evaluation/autosxs/__init__.py +0 -0
- /google_cloud_pipeline_components/{preview → v1}/model_evaluation/model_based_llm_evaluation/autosxs/autosxs_pipeline.py +0 -0
- {google_cloud_pipeline_components-2.13.1.dist-info → google_cloud_pipeline_components-2.14.0.dist-info}/LICENSE +0 -0
- {google_cloud_pipeline_components-2.13.1.dist-info → google_cloud_pipeline_components-2.14.0.dist-info}/top_level.txt +0 -0
|
@@ -17,14 +17,13 @@ import warnings
|
|
|
17
17
|
|
|
18
18
|
from google_cloud_pipeline_components.version import __version__
|
|
19
19
|
|
|
20
|
-
if sys.version_info < (3,
|
|
20
|
+
if sys.version_info < (3, 9):
|
|
21
21
|
warnings.warn(
|
|
22
22
|
(
|
|
23
|
-
'
|
|
24
|
-
'
|
|
25
|
-
'
|
|
26
|
-
'
|
|
27
|
-
' more details.'
|
|
23
|
+
' Google Cloud Pipeline Components will drop support for Python 3.8'
|
|
24
|
+
' on Oct 1, 2024. To use new versions of the GCPC SDK after that'
|
|
25
|
+
' date, you will need to upgrade to Python >= 3.9. See'
|
|
26
|
+
' https://devguide.python.org/versions/ for more details.'
|
|
28
27
|
),
|
|
29
28
|
FutureWarning,
|
|
30
29
|
stacklevel=2,
|
|
@@ -34,6 +34,7 @@ PipelineOutput = NamedTuple(
|
|
|
34
34
|
def pipeline(
|
|
35
35
|
output_adapter_path: str,
|
|
36
36
|
large_model_reference: str,
|
|
37
|
+
policy_model_reference: str,
|
|
37
38
|
model_display_name: Optional[str] = None,
|
|
38
39
|
deploy_model: bool = True,
|
|
39
40
|
encryption_spec_key_name: str = '',
|
|
@@ -45,6 +46,7 @@ def pipeline(
|
|
|
45
46
|
Args:
|
|
46
47
|
output_adapter_path: Path to the trained model adapter if LoRA tuning was used.
|
|
47
48
|
large_model_reference: Name of the base model. Supported values are `text-bison@001`, `t5-small`, `t5-large`, `t5-xl` and `t5-xxl`. `text-bison@001` and `t5-small` are supported in `us-central1` and `europe-west4`. `t5-large`, `t5-xl` and `t5-xxl` are only supported in `europe-west4`.
|
|
49
|
+
policy_model_reference: The name of the model for deployment. The name should be in capitalized snake case format.
|
|
48
50
|
model_display_name: Name of the fine-tuned model shown in the Model Registry. If not provided, a default name will be created.
|
|
49
51
|
deploy_model: Whether to deploy the model to an endpoint in `us-central1`. Default is True.
|
|
50
52
|
encryption_spec_key_name: Customer-managed encryption key. If this is set, then all resources created by the CustomJob will be encrypted with the provided encryption key. Note that this is not supported for TPU at the moment.
|
|
@@ -68,14 +70,8 @@ def pipeline(
|
|
|
68
70
|
.set_display_name('Resolve Model Display Name')
|
|
69
71
|
)
|
|
70
72
|
|
|
71
|
-
reference_model_metadata = function_based.resolve_reference_model_metadata(
|
|
72
|
-
large_model_reference=large_model_reference,
|
|
73
|
-
).set_display_name('Resolve Model Metadata')
|
|
74
|
-
|
|
75
73
|
upload_model = function_based.resolve_upload_model(
|
|
76
|
-
large_model_reference=
|
|
77
|
-
'large_model_reference'
|
|
78
|
-
]
|
|
74
|
+
large_model_reference=policy_model_reference,
|
|
79
75
|
).set_display_name('Resolve Upload Model')
|
|
80
76
|
upload_task = upload_llm_model.refined_upload_llm_model(
|
|
81
77
|
project=_placeholders.PROJECT_ID_PLACEHOLDER,
|
|
@@ -90,9 +86,7 @@ def pipeline(
|
|
|
90
86
|
).set_display_name('Upload Model')
|
|
91
87
|
deploy_model = function_based.resolve_deploy_model(
|
|
92
88
|
deploy_model=deploy_model,
|
|
93
|
-
large_model_reference=
|
|
94
|
-
'large_model_reference'
|
|
95
|
-
],
|
|
89
|
+
large_model_reference=policy_model_reference,
|
|
96
90
|
).set_display_name('Resolve Deploy Model')
|
|
97
91
|
deploy_task = deploy_llm_model.deploy_llm_model(
|
|
98
92
|
project=_placeholders.PROJECT_ID_PLACEHOLDER,
|
|
@@ -19,7 +19,7 @@ from google_cloud_pipeline_components._implementation.llm.generated import refin
|
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
def get_private_image_tag() -> str:
|
|
22
|
-
return os.getenv('PRIVATE_IMAGE_TAG') or
|
|
22
|
+
return os.getenv('PRIVATE_IMAGE_TAG') or refined_image_versions.IMAGE_TAG
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
def get_autosxs_image_tag() -> str:
|
|
@@ -231,8 +231,8 @@ def resolve_reference_model_metadata(
|
|
|
231
231
|
'gs://vertex-llm-restricted/cloud-llm-restricted/checkpoints/'
|
|
232
232
|
'safe_flan_t5/xxl/v1/checkpoint_1190000/'
|
|
233
233
|
),
|
|
234
|
-
reward_model_reference='
|
|
235
|
-
reward_model_path='gs://t5-data/pretrained_models/t5x/
|
|
234
|
+
reward_model_reference='T5_XXL',
|
|
235
|
+
reward_model_path='gs://t5-data/pretrained_models/t5x/t5_1_1_xxl',
|
|
236
236
|
is_supported=True,
|
|
237
237
|
),
|
|
238
238
|
'palm-tiny': reference_model_metadata(
|
|
@@ -265,8 +265,10 @@ def resolve_reference_model_metadata(
|
|
|
265
265
|
reference_model_path=(
|
|
266
266
|
'gs://vertex-rlhf-restricted/pretrained_models/palm/t5x_bison/'
|
|
267
267
|
),
|
|
268
|
-
reward_model_reference='
|
|
269
|
-
reward_model_path=
|
|
268
|
+
reward_model_reference='BISON',
|
|
269
|
+
reward_model_path=(
|
|
270
|
+
'gs://vertex-rlhf-restricted/pretrained_models/palm/t5x_bison/'
|
|
271
|
+
),
|
|
270
272
|
is_supported=False, # Deprecated: Use text-bision@001 instead.
|
|
271
273
|
),
|
|
272
274
|
'text-bison@001': reference_model_metadata(
|
|
@@ -274,8 +276,10 @@ def resolve_reference_model_metadata(
|
|
|
274
276
|
reference_model_path=(
|
|
275
277
|
'gs://vertex-rlhf-restricted/pretrained_models/palm/t5x_bison/'
|
|
276
278
|
),
|
|
277
|
-
reward_model_reference='
|
|
278
|
-
reward_model_path=
|
|
279
|
+
reward_model_reference='BISON',
|
|
280
|
+
reward_model_path=(
|
|
281
|
+
'gs://vertex-rlhf-restricted/pretrained_models/palm/t5x_bison/'
|
|
282
|
+
),
|
|
279
283
|
is_supported=True,
|
|
280
284
|
),
|
|
281
285
|
'text-bison@002': reference_model_metadata(
|
|
@@ -292,8 +296,10 @@ def resolve_reference_model_metadata(
|
|
|
292
296
|
reference_model_path=(
|
|
293
297
|
'gs://vertex-rlhf-restricted/pretrained_models/palm/t5x_bison/'
|
|
294
298
|
),
|
|
295
|
-
reward_model_reference='
|
|
296
|
-
reward_model_path=
|
|
299
|
+
reward_model_reference='BISON',
|
|
300
|
+
reward_model_path=(
|
|
301
|
+
'gs://vertex-rlhf-restricted/pretrained_models/palm/t5x_bison/'
|
|
302
|
+
),
|
|
297
303
|
is_supported=True,
|
|
298
304
|
),
|
|
299
305
|
'elephant': reference_model_metadata(
|
|
@@ -372,46 +378,6 @@ def convert_to_delimited_string(items: List[str], delimiter: str = ',') -> str:
|
|
|
372
378
|
return delimiter.join(items)
|
|
373
379
|
|
|
374
380
|
|
|
375
|
-
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
376
|
-
def generate_default_instruction(
|
|
377
|
-
task: str,
|
|
378
|
-
target_sequence_length: int,
|
|
379
|
-
instruction_override: str = '',
|
|
380
|
-
) -> str:
|
|
381
|
-
"""Generates a default instruction if no override is provided."""
|
|
382
|
-
if instruction_override:
|
|
383
|
-
return instruction_override
|
|
384
|
-
task = task.lower()
|
|
385
|
-
if task == 'summarization':
|
|
386
|
-
return f'Summarize in less than {target_sequence_length} words.'
|
|
387
|
-
|
|
388
|
-
elif task == 'question_answer':
|
|
389
|
-
return f'Answer the question in less than {target_sequence_length} words.'
|
|
390
|
-
|
|
391
|
-
else:
|
|
392
|
-
raise ValueError(
|
|
393
|
-
f'Task not recognized: {task}. Supported tasks are: "summarization",'
|
|
394
|
-
' "question_answer".'
|
|
395
|
-
)
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
399
|
-
def resolve_upload_location(upload_location: Optional[str] = None) -> str:
|
|
400
|
-
"""Gets the region to upload the model.
|
|
401
|
-
|
|
402
|
-
Args:
|
|
403
|
-
upload_location: User-specified region to upload the model to.
|
|
404
|
-
|
|
405
|
-
Returns:
|
|
406
|
-
Where to upload the model. If no location is specified, the model will be
|
|
407
|
-
uploaded to the region where the pipeline is running.
|
|
408
|
-
"""
|
|
409
|
-
# pylint: disable=g-import-not-at-top
|
|
410
|
-
import os
|
|
411
|
-
# pylint: enable=g-import-not-at-top
|
|
412
|
-
return upload_location or os.environ['CLOUD_ML_REGION']
|
|
413
|
-
|
|
414
|
-
|
|
415
381
|
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
416
382
|
def resolve_regional_endpoint(upload_location: str) -> str:
|
|
417
383
|
"""Gets the regional endpoint used to upload a model to the registry.
|
|
@@ -41,6 +41,14 @@ def pipeline(
|
|
|
41
41
|
input_reward_adapter_path: str,
|
|
42
42
|
input_preference_dataset_path: str,
|
|
43
43
|
large_model_reference: str,
|
|
44
|
+
reward_model_reference: str,
|
|
45
|
+
policy_model_reference: str,
|
|
46
|
+
policy_model_path: str,
|
|
47
|
+
machine_type: str,
|
|
48
|
+
tuning_location: str,
|
|
49
|
+
accelerator_type: str,
|
|
50
|
+
accelerator_count: int,
|
|
51
|
+
rl_image_uri: str,
|
|
44
52
|
prompt_sequence_length: int = 512,
|
|
45
53
|
target_sequence_length: int = 64,
|
|
46
54
|
lora_dim: int = 1,
|
|
@@ -51,10 +59,10 @@ def pipeline(
|
|
|
51
59
|
kl_coeff: float = 0.1,
|
|
52
60
|
instruction: Optional[str] = None,
|
|
53
61
|
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
|
|
54
|
-
accelerator_type: str = 'GPU',
|
|
55
62
|
location: str = _placeholders.LOCATION_PLACEHOLDER,
|
|
56
63
|
tensorboard_resource_id: str = '',
|
|
57
64
|
encryption_spec_key_name: str = '',
|
|
65
|
+
num_microbatches: int = 0,
|
|
58
66
|
) -> PipelineOutput:
|
|
59
67
|
# fmt: off
|
|
60
68
|
"""Trains a reward model.
|
|
@@ -64,6 +72,14 @@ def pipeline(
|
|
|
64
72
|
input_reward_adapter_path: Path to the reward LoRA adapter to use during reinforcement learning.
|
|
65
73
|
input_preference_dataset_path: Path to preference dataset used by the reward model.
|
|
66
74
|
large_model_reference: Name of the base model. Supported values are `text-bison@001`, `t5-small`, `t5-large`, `t5-xl` and `t5-xxl`. `text-bison@001` and `t5-small` are supported in `us-central1` and `europe-west4`. `t5-large`, `t5-xl` and `t5-xxl` are only supported in `europe-west4`.
|
|
75
|
+
reward_model_reference: Name of the reward model. The name should be in capitalized snake case format.
|
|
76
|
+
policy_model_reference: Name of the policy model. The name should be in capitalized snake case format.
|
|
77
|
+
policy_model_path: The model checkpoint path to the reinforcer model.
|
|
78
|
+
machine_type: The type of the machine to provision for the custom job. Must be a valid GCE instance type and compatible with the accelerator type.
|
|
79
|
+
tuning_location: The GCP region to run the custom job.
|
|
80
|
+
accelerator_type: Specific accelerator type for the custom job.
|
|
81
|
+
accelerator_count: The number of accelerator.
|
|
82
|
+
rl_image_uri: Docker image URI to use for the reinforcement learning training job.
|
|
67
83
|
prompt_sequence_length: Maximum tokenized sequence length for input text. Higher values increase memory overhead. This value should be at most 8192. Default value is 512.
|
|
68
84
|
target_sequence_length: Maximum tokenized sequence length for target text. Higher values increase memory overhead. This value should be at most 1024. Default value is 64.
|
|
69
85
|
lora_dim: The rank of the LoRA adapter. If >0, then use LoRA-tuning. If =0, then use full-tuning. Default is 1.
|
|
@@ -74,7 +90,6 @@ def pipeline(
|
|
|
74
90
|
kl_coeff: Coefficient for KL penalty. This regularizes the policy model and penalizes if it diverges from its initial distribution. If set to 0, the reference language model is not loaded into memory. Default value is 0.1.
|
|
75
91
|
instruction: This field lets the model know what task it needs to perform. Base models have been trained over a large set of varied instructions. You can give a simple and intuitive description of the task and the model will follow it, e.g. "Classify this movie review as positive or negative" or "Translate this sentence to Danish". Do not specify this if your dataset already prepends the instruction to the inputs field.
|
|
76
92
|
project: Project used to run custom jobs. If not specified the project used to run the pipeline will be used.
|
|
77
|
-
accelerator_type: One of 'TPU' or 'GPU'. If 'TPU' is specified, tuning components run in europe-west4. Otherwise tuning components run in us-central1 on GPUs. Default is 'GPU'.
|
|
78
93
|
location: Location used to run non-tuning components, i.e. components that do not require accelerators. If not specified the location used to run the pipeline will be used.
|
|
79
94
|
tensorboard_resource_id: Optional tensorboard resource id in format `projects/{project_number}/locations/{location}/tensorboards/{tensorboard_id}`. If provided, tensorboard metrics will be uploaded to this location.
|
|
80
95
|
encryption_spec_key_name: Customer-managed encryption key. If this is set, then all resources created by the CustomJob will be encrypted with the provided encryption key. Note that this is not supported for TPU at the moment.
|
|
@@ -85,14 +100,6 @@ def pipeline(
|
|
|
85
100
|
"""
|
|
86
101
|
# fmt: on
|
|
87
102
|
prompt_column = 'input_text'
|
|
88
|
-
machine_spec = function_based.resolve_machine_spec(
|
|
89
|
-
accelerator_type=accelerator_type,
|
|
90
|
-
use_test_spec=env.get_use_test_machine_spec(),
|
|
91
|
-
).set_display_name('Resolve Machine Spec')
|
|
92
|
-
|
|
93
|
-
reference_model_metadata = function_based.resolve_reference_model_metadata(
|
|
94
|
-
large_model_reference=large_model_reference,
|
|
95
|
-
).set_display_name('Resolve Model Metadata')
|
|
96
103
|
|
|
97
104
|
processed_dataset = preprocess_chat_dataset.preprocess_chat_dataset(
|
|
98
105
|
large_model_reference=large_model_reference,
|
|
@@ -109,30 +116,18 @@ def pipeline(
|
|
|
109
116
|
# Target field name does not matter because this field is not used.
|
|
110
117
|
targets_field_name='non_existent_targets_field_name',
|
|
111
118
|
output_split_name=env.TRAIN_SPLIT,
|
|
112
|
-
large_model_reference=
|
|
113
|
-
'large_model_reference'
|
|
114
|
-
],
|
|
119
|
+
large_model_reference=policy_model_reference,
|
|
115
120
|
instruction=instruction,
|
|
116
121
|
encryption_spec_key_name=encryption_spec_key_name,
|
|
117
122
|
)
|
|
118
123
|
.set_display_name('Import Prompt Dataset')
|
|
119
124
|
.set_caching_options(False)
|
|
120
125
|
)
|
|
121
|
-
rl_image_uri = function_based.resolve_private_refined_image_uri(
|
|
122
|
-
accelerator_type=machine_spec.outputs['accelerator_type'],
|
|
123
|
-
).set_display_name('Resolve Reinforcer Image URI')
|
|
124
|
-
num_microbatches = function_based.resolve_num_microbatches(
|
|
125
|
-
large_model_reference=reference_model_metadata.outputs[
|
|
126
|
-
'large_model_reference'
|
|
127
|
-
]
|
|
128
|
-
).set_display_name('Resolve Number of Microbatches')
|
|
129
126
|
rl_model = (
|
|
130
127
|
reinforcer.reinforcer(
|
|
131
128
|
project=project,
|
|
132
|
-
location=
|
|
133
|
-
input_reference_model_path=
|
|
134
|
-
'reference_model_path'
|
|
135
|
-
],
|
|
129
|
+
location=tuning_location,
|
|
130
|
+
input_reference_model_path=policy_model_path,
|
|
136
131
|
input_reward_model_path=input_reward_model_path,
|
|
137
132
|
input_reward_adapter_path=input_reward_adapter_path,
|
|
138
133
|
input_dataset_path=prompt_dataset_importer.outputs[
|
|
@@ -140,16 +135,12 @@ def pipeline(
|
|
|
140
135
|
],
|
|
141
136
|
input_preference_dataset_path=input_preference_dataset_path,
|
|
142
137
|
train_steps=reinforcement_learning_train_steps,
|
|
143
|
-
accelerator_type=
|
|
144
|
-
accelerator_count=
|
|
145
|
-
large_model_reference=
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
'reward_model_reference'
|
|
150
|
-
],
|
|
151
|
-
machine_type=machine_spec.outputs['machine_type'],
|
|
152
|
-
image_uri=rl_image_uri.output,
|
|
138
|
+
accelerator_type=accelerator_type,
|
|
139
|
+
accelerator_count=accelerator_count,
|
|
140
|
+
large_model_reference=policy_model_reference,
|
|
141
|
+
reward_model_reference=reward_model_reference,
|
|
142
|
+
machine_type=machine_type,
|
|
143
|
+
image_uri=rl_image_uri,
|
|
153
144
|
inputs_sequence_length=prompt_sequence_length,
|
|
154
145
|
targets_sequence_length=target_sequence_length,
|
|
155
146
|
batch_size=batch_size,
|
|
@@ -157,7 +148,7 @@ def pipeline(
|
|
|
157
148
|
kl_coeff=kl_coeff,
|
|
158
149
|
lora_dim=lora_dim,
|
|
159
150
|
reward_lora_dim=reward_lora_dim,
|
|
160
|
-
num_microbatches=num_microbatches
|
|
151
|
+
num_microbatches=num_microbatches,
|
|
161
152
|
encryption_spec_key_name=encryption_spec_key_name,
|
|
162
153
|
tensorboard_resource_id=tensorboard_resource_id,
|
|
163
154
|
)
|
|
@@ -26,7 +26,6 @@ import kfp
|
|
|
26
26
|
|
|
27
27
|
PipelineOutput = NamedTuple(
|
|
28
28
|
'Outputs',
|
|
29
|
-
reward_model_base_path=str,
|
|
30
29
|
reward_model_adapter_path=str,
|
|
31
30
|
reward_dataset_path=str,
|
|
32
31
|
)
|
|
@@ -39,6 +38,13 @@ PipelineOutput = NamedTuple(
|
|
|
39
38
|
def pipeline(
|
|
40
39
|
preference_dataset: str,
|
|
41
40
|
large_model_reference: str,
|
|
41
|
+
reward_model_reference: str,
|
|
42
|
+
reward_model_path: str,
|
|
43
|
+
machine_type: str,
|
|
44
|
+
tuning_location: str,
|
|
45
|
+
accelerator_type: str,
|
|
46
|
+
accelerator_count: int,
|
|
47
|
+
reward_model_image_uri: str,
|
|
42
48
|
prompt_sequence_length: int = 512,
|
|
43
49
|
target_sequence_length: int = 64,
|
|
44
50
|
batch_size: int = 64,
|
|
@@ -48,10 +54,10 @@ def pipeline(
|
|
|
48
54
|
eval_dataset: Optional[str] = None,
|
|
49
55
|
instruction: Optional[str] = None,
|
|
50
56
|
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
|
|
51
|
-
accelerator_type: str = 'GPU',
|
|
52
57
|
location: str = _placeholders.LOCATION_PLACEHOLDER,
|
|
53
58
|
tensorboard_resource_id: str = '',
|
|
54
59
|
encryption_spec_key_name: str = '',
|
|
60
|
+
num_microbatches: int = 0,
|
|
55
61
|
) -> PipelineOutput:
|
|
56
62
|
# fmt: off
|
|
57
63
|
"""Trains a reward model.
|
|
@@ -59,6 +65,13 @@ def pipeline(
|
|
|
59
65
|
Args:
|
|
60
66
|
preference_dataset: Cloud storage path to a human preference JSONL dataset used to train a reward model. Each example in a preference dataset must contain `candidate_0` and `candidate_1` fields that contain candidate responses, `choice` that specifies the preferred candidate and either `input_text` (if tuning a text model) or `messages` (if tuning a chat model). Chat datasets must contain at least 1 message in a `messages` field. Each message must be valid JSON that contains `author` and `content` fields, where valid `author` values are `user` and `assistant` and `content` must be non-empty. Each row may contain multiple messages, but the first and last author must be the `user`. An optional `context` field may be provided for each example in a chat dataset. If provided, the `context` will preprended to the message `content`. The `instruction` serves as the default context. (Useful if most messages use the same system-level context.) Any context provided in the example will override the default value.
|
|
61
67
|
large_model_reference: Name of the base model. Supported values are `text-bison@001`, `t5-small`, `t5-large`, `t5-xl` and `t5-xxl`. `text-bison@001` and `t5-small` are supported in `us-central1` and `europe-west4`. `t5-large`, `t5-xl` and `t5-xxl` are only supported in `europe-west4`.
|
|
68
|
+
reward_model_reference: Name of the base model. The name should be in capitalized snake case format.
|
|
69
|
+
reward_model_path: The model checkpoint path for the reward model.
|
|
70
|
+
machine_type: The type of the machine to provision for the custom job. Must be a valid GCE instance type and compatible with the accelerator type.
|
|
71
|
+
tuning_location: The GCP region to run the custom job.
|
|
72
|
+
accelerator_type: Specific accelerator type for the custom job.
|
|
73
|
+
accelerator_count: The number of accelerator.
|
|
74
|
+
reward_model_image_uri: Docker image URI to use for the reward model training job.
|
|
62
75
|
prompt_sequence_length: Maximum tokenized sequence length for input text. Higher values increase memory overhead. This value should be at most 8192. Default value is 512.
|
|
63
76
|
target_sequence_length: Maximum tokenized sequence length for target text. Higher values increase memory overhead. This value should be at most 1024. Default value is 64.
|
|
64
77
|
batch_size: Number of examples in each finetuning step. Default is 64.
|
|
@@ -67,13 +80,12 @@ def pipeline(
|
|
|
67
80
|
reward_model_train_steps: Number of steps to use when training a reward model. Default value is 1000.
|
|
68
81
|
instruction: This field lets the model know what task it needs to perform. Base models have been trained over a large set of varied instructions. You can give a simple and intuitive description of the task and the model will follow it, e.g. "Classify this movie review as positive or negative" or "Translate this sentence to Danish". Do not specify this if your dataset already prepends the instruction to the inputs field.
|
|
69
82
|
project: Project used to run custom jobs. If not specified the project used to run the pipeline will be used.
|
|
70
|
-
accelerator_type: One of 'TPU' or 'GPU'. If 'TPU' is specified, tuning components run in europe-west4. Otherwise tuning components run in us-central1 on GPUs. Default is 'GPU'.
|
|
71
83
|
location: Location used to run non-tuning components, i.e. components that do not require accelerators. If not specified the location used to run the pipeline will be used.
|
|
72
84
|
tensorboard_resource_id: Optional tensorboard resource id in format `projects/{project_number}/locations/{location}/tensorboards/{tensorboard_id}`. If provided, tensorboard metrics will be uploaded to this location.
|
|
73
85
|
encryption_spec_key_name: Customer-managed encryption key. If this is set, then all resources created by the CustomJob will be encrypted with the provided encryption key. Note that this is not supported for TPU at the moment.
|
|
86
|
+
num_microbatches: The number of microbatches to break the total batch size into during training.
|
|
74
87
|
|
|
75
88
|
Returns:
|
|
76
|
-
reward_model_base_path: Path to the base model used by the reward model.
|
|
77
89
|
reward_model_adapter_path: Path to the output LoRA adapter.
|
|
78
90
|
reward_dataset_path: Preference dataset use for tuning the reward model.
|
|
79
91
|
"""
|
|
@@ -81,14 +93,6 @@ def pipeline(
|
|
|
81
93
|
prompt_column = 'input_text'
|
|
82
94
|
candidate_columns = ['candidate_0', 'candidate_1']
|
|
83
95
|
choice_column = 'choice'
|
|
84
|
-
machine_spec = function_based.resolve_machine_spec(
|
|
85
|
-
accelerator_type=accelerator_type,
|
|
86
|
-
use_test_spec=env.get_use_test_machine_spec(),
|
|
87
|
-
).set_display_name('Resolve Machine Spec')
|
|
88
|
-
|
|
89
|
-
reference_model_metadata = function_based.resolve_reference_model_metadata(
|
|
90
|
-
large_model_reference=large_model_reference,
|
|
91
|
-
).set_display_name('Resolve Model Metadata')
|
|
92
96
|
|
|
93
97
|
processed_preference_dataset = (
|
|
94
98
|
preprocess_chat_dataset.preprocess_chat_dataset(
|
|
@@ -113,9 +117,7 @@ def pipeline(
|
|
|
113
117
|
comma_separated_candidates_field_names=comma_separated_candidates_field_names.output,
|
|
114
118
|
choice_field_name=choice_column,
|
|
115
119
|
split=env.TRAIN_SPLIT,
|
|
116
|
-
large_model_reference=
|
|
117
|
-
'reward_model_reference'
|
|
118
|
-
],
|
|
120
|
+
large_model_reference=reward_model_reference,
|
|
119
121
|
instruction=instruction,
|
|
120
122
|
encryption_spec_key_name=encryption_spec_key_name,
|
|
121
123
|
)
|
|
@@ -132,9 +134,7 @@ def pipeline(
|
|
|
132
134
|
comma_separated_candidates_field_names=comma_separated_candidates_field_names.output,
|
|
133
135
|
choice_field_name=choice_column,
|
|
134
136
|
split=env.TRAIN_SPLIT,
|
|
135
|
-
large_model_reference=
|
|
136
|
-
'reward_model_reference'
|
|
137
|
-
],
|
|
137
|
+
large_model_reference=reward_model_reference,
|
|
138
138
|
instruction=instruction,
|
|
139
139
|
encryption_spec_key_name=encryption_spec_key_name,
|
|
140
140
|
)
|
|
@@ -142,21 +142,11 @@ def pipeline(
|
|
|
142
142
|
.set_caching_options(False)
|
|
143
143
|
)
|
|
144
144
|
|
|
145
|
-
reward_model_image_uri = function_based.resolve_private_refined_image_uri(
|
|
146
|
-
accelerator_type=machine_spec.outputs['accelerator_type'],
|
|
147
|
-
).set_display_name('Resolve Reward Model Image URI')
|
|
148
|
-
num_microbatches = function_based.resolve_num_microbatches(
|
|
149
|
-
large_model_reference=reference_model_metadata.outputs[
|
|
150
|
-
'reward_model_reference'
|
|
151
|
-
]
|
|
152
|
-
).set_display_name('Resolve Number of Microbatches')
|
|
153
145
|
reward_model = (
|
|
154
146
|
reward_model_trainer.reward_model_trainer(
|
|
155
147
|
project=project,
|
|
156
|
-
location=
|
|
157
|
-
input_model_path=
|
|
158
|
-
'reward_model_path'
|
|
159
|
-
],
|
|
148
|
+
location=tuning_location,
|
|
149
|
+
input_model_path=reward_model_path,
|
|
160
150
|
input_dataset_path=preference_dataset_importer.outputs[
|
|
161
151
|
'output_dataset_path'
|
|
162
152
|
],
|
|
@@ -164,19 +154,17 @@ def pipeline(
|
|
|
164
154
|
'output_dataset_path'
|
|
165
155
|
],
|
|
166
156
|
train_steps=reward_model_train_steps,
|
|
167
|
-
accelerator_type=
|
|
168
|
-
accelerator_count=
|
|
169
|
-
large_model_reference=
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
machine_type=machine_spec.outputs['machine_type'],
|
|
173
|
-
image_uri=reward_model_image_uri.output,
|
|
157
|
+
accelerator_type=accelerator_type,
|
|
158
|
+
accelerator_count=accelerator_count,
|
|
159
|
+
large_model_reference=reward_model_reference,
|
|
160
|
+
machine_type=machine_type,
|
|
161
|
+
image_uri=reward_model_image_uri,
|
|
174
162
|
inputs_sequence_length=prompt_sequence_length,
|
|
175
163
|
targets_sequence_length=target_sequence_length,
|
|
176
164
|
batch_size=batch_size,
|
|
177
165
|
learning_rate_multiplier=reward_model_learning_rate_multiplier,
|
|
178
166
|
lora_dim=lora_dim,
|
|
179
|
-
num_microbatches=num_microbatches
|
|
167
|
+
num_microbatches=num_microbatches,
|
|
180
168
|
encryption_spec_key_name=encryption_spec_key_name,
|
|
181
169
|
tensorboard_resource_id=tensorboard_resource_id,
|
|
182
170
|
)
|
|
@@ -185,9 +173,6 @@ def pipeline(
|
|
|
185
173
|
)
|
|
186
174
|
|
|
187
175
|
return PipelineOutput(
|
|
188
|
-
reward_model_base_path=reference_model_metadata.outputs[
|
|
189
|
-
'reward_model_path'
|
|
190
|
-
],
|
|
191
176
|
reward_model_adapter_path=reward_model.outputs['output_adapter_path'],
|
|
192
177
|
reward_dataset_path=preference_dataset_importer.outputs[
|
|
193
178
|
'output_dataset_path'
|
|
@@ -23,24 +23,65 @@ from kfp import dsl
|
|
|
23
23
|
|
|
24
24
|
@dsl.container_component
|
|
25
25
|
def rlhf_preprocessor(
|
|
26
|
+
large_model_reference: str,
|
|
27
|
+
accelerator_type: str,
|
|
28
|
+
use_test_spec: bool,
|
|
29
|
+
project: str,
|
|
30
|
+
location: str,
|
|
31
|
+
artifact_registry: str,
|
|
32
|
+
tag: str,
|
|
26
33
|
gcp_resources: dsl.OutputPath(str), # pytype: disable=invalid-annotation
|
|
27
34
|
has_tensorboard_id: dsl.OutputPath(bool), # pytype: disable=invalid-annotation
|
|
28
35
|
has_inference_dataset: dsl.OutputPath(bool), # pytype: disable=invalid-annotation
|
|
36
|
+
metadata_large_model_reference: dsl.OutputPath(str), # pytype: disable=invalid-annotation
|
|
37
|
+
metadata_reference_model_path: dsl.OutputPath(str), # pytype: disable=invalid-annotation
|
|
38
|
+
metadata_reward_model_reference: dsl.OutputPath(str), # pytype: disable=invalid-annotation
|
|
39
|
+
metadata_reward_model_path: dsl.OutputPath(str), # pytype: disable=invalid-annotation
|
|
40
|
+
metadata_machine_type: dsl.OutputPath(str), # pytype: disable=invalid-annotation
|
|
41
|
+
metadata_tuning_location: dsl.OutputPath(str), # pytype: disable=invalid-annotation
|
|
42
|
+
metadata_accelerator_type: dsl.OutputPath(str), # pytype: disable=invalid-annotation
|
|
43
|
+
metadata_accelerator_count: dsl.OutputPath(int), # pytype: disable=invalid-annotation
|
|
44
|
+
metadata_refined_image_uri: dsl.OutputPath(str), # pytype: disable=invalid-annotation
|
|
45
|
+
metadata_num_microbatches: dsl.OutputPath(int), # pytype: disable=invalid-annotation
|
|
46
|
+
use_experimental_image: bool = False,
|
|
29
47
|
evaluation_dataset: str = '',
|
|
30
48
|
tensorboard_resource_id: str = '',
|
|
49
|
+
input_reference_model_path: str = '',
|
|
31
50
|
image_uri: str = utils.get_default_image_uri('refined_cpu', ''),
|
|
32
51
|
) -> dsl.ContainerSpec: # pylint: disable=g-doc-args
|
|
52
|
+
# fmt: off
|
|
33
53
|
"""Preprocess RLHF pipeline inputs.
|
|
34
54
|
|
|
35
55
|
Args:
|
|
56
|
+
large_model_reference: The model for fine tuning.
|
|
57
|
+
accelerator_type: Specific accelerator type for the job.
|
|
58
|
+
use_test_spec: Whether to use a lower resource machine for testing.
|
|
59
|
+
project: Project that contains the artifact registry.
|
|
60
|
+
location: Region that contains the artifact registry.
|
|
61
|
+
artifact_registry: Registry that contains Docker images.
|
|
62
|
+
tag: Image tag.
|
|
63
|
+
use_experimental_image: Whether to use refined experimental image.
|
|
36
64
|
evaluation_dataset: Path to evaluation data.
|
|
37
65
|
tensorboard_resource_id: TensorBoard resource id.
|
|
66
|
+
metadata_large_model_reference: The base model for fine tuning. The name should be in capitalized snake case format.
|
|
67
|
+
metadata_reference_model_path: The model checkpoint path for the reinforcer model
|
|
68
|
+
metadata_reward_model_reference: The base model for training reward model. The name should be in capitalized snake case format.
|
|
69
|
+
metadata_reward_model_path: The model checkpoint path for the reward model.
|
|
70
|
+
image_uri: Docker image URI to use for the custom job.
|
|
38
71
|
|
|
39
72
|
Returns:
|
|
40
73
|
gcp_resources: GCP resources that can be used to track the custom job.
|
|
41
74
|
has_tensorboard_id: Whether a tensorboard id is provided.
|
|
42
75
|
has_inference_dataset: Whether inference data are provided.
|
|
76
|
+
metadata_machine_type: The type of the machine to provision for the custom job.
|
|
77
|
+
metadata_tuning_location: The GCP region to run the custom job.
|
|
78
|
+
metadata_accelerator_type: Specific accelerator type for the custom job.
|
|
79
|
+
metadata_accelerator_count: The number of accelerator.
|
|
80
|
+
metadata_refined_image_uri: Docker image URI to use for the custom job.
|
|
81
|
+
metadata_num_microbatches: Number of microbatches to break the total batch
|
|
82
|
+
size into during training.
|
|
43
83
|
"""
|
|
84
|
+
# fmt: on
|
|
44
85
|
return gcpc_utils.build_serverless_customjob_container_spec(
|
|
45
86
|
project=_placeholders.PROJECT_ID_PLACEHOLDER,
|
|
46
87
|
location=_placeholders.LOCATION_PLACEHOLDER,
|
|
@@ -52,8 +93,27 @@ def rlhf_preprocessor(
|
|
|
52
93
|
'--app_name=rlhf_preprocessor',
|
|
53
94
|
f'--evaluation_dataset={evaluation_dataset}',
|
|
54
95
|
f'--tensorboard_resource_id={tensorboard_resource_id}',
|
|
96
|
+
f'--large_model_reference={large_model_reference}',
|
|
97
|
+
f'--input_reference_model_path={input_reference_model_path}',
|
|
98
|
+
f'--accelerator_type={accelerator_type}',
|
|
99
|
+
f'--use_test_spec={use_test_spec}',
|
|
100
|
+
f'--project={project}',
|
|
101
|
+
f'--location={location}',
|
|
102
|
+
f'--artifact_registry={artifact_registry}',
|
|
103
|
+
f'--tag={tag}',
|
|
104
|
+
f'--use_experimental_image={use_experimental_image}',
|
|
55
105
|
f'--has_tensorboard_id_path={has_tensorboard_id}',
|
|
56
106
|
f'--has_inference_dataset_path={has_inference_dataset}',
|
|
107
|
+
f'--metadata_large_model_reference_path={metadata_large_model_reference}',
|
|
108
|
+
f'--metadata_reference_model_path_path={metadata_reference_model_path}',
|
|
109
|
+
f'--metadata_reward_model_reference_path={metadata_reward_model_reference}',
|
|
110
|
+
f'--metadata_reward_model_path_path={metadata_reward_model_path}',
|
|
111
|
+
f'--metadata_machine_type_path={metadata_machine_type}',
|
|
112
|
+
f'--metadata_tuning_location_path={metadata_tuning_location}',
|
|
113
|
+
f'--metadata_accelerator_type_path={metadata_accelerator_type}',
|
|
114
|
+
f'--metadata_accelerator_count_path={metadata_accelerator_count}',
|
|
115
|
+
f'--metadata_refined_image_uri_path={metadata_refined_image_uri}',
|
|
116
|
+
f'--metadata_num_microbatches_path={metadata_num_microbatches}',
|
|
57
117
|
],
|
|
58
118
|
),
|
|
59
119
|
gcp_resources=gcp_resources,
|
|
@@ -79,8 +79,19 @@ def validate_pipeline(
|
|
|
79
79
|
# ]
|
|
80
80
|
# [ Check CMEK
|
|
81
81
|
supported_pipeline_regions = {
|
|
82
|
+
'asia-northeast1',
|
|
83
|
+
'asia-northeast3',
|
|
84
|
+
'asia-southeast1',
|
|
85
|
+
'europe-west1',
|
|
86
|
+
'europe-west2',
|
|
87
|
+
'europe-west3',
|
|
82
88
|
'europe-west4',
|
|
89
|
+
'europe-west9',
|
|
90
|
+
'northamerica-northeast1',
|
|
83
91
|
'us-central1',
|
|
92
|
+
'us-east4',
|
|
93
|
+
'us-west1',
|
|
94
|
+
'us-west4',
|
|
84
95
|
}
|
|
85
96
|
if location not in supported_pipeline_regions:
|
|
86
97
|
raise ValueError(
|
|
@@ -13,9 +13,38 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
"""Placeholders for use in component authoring."""
|
|
15
15
|
|
|
16
|
-
# prefer not using PIPELINE_TASK_ prefix like KFP does for reduced
|
|
16
|
+
# prefer not using PIPELINE_TASK_ or PIPELINE_ prefix like KFP does for reduced
|
|
17
|
+
# verbosity
|
|
17
18
|
PROJECT_ID_PLACEHOLDER = "{{$.pipeline_google_cloud_project_id}}"
|
|
19
|
+
"""A placeholder used to obtain Google Cloud project id where the pipeline
|
|
20
|
+
executes. The placeholder value is set at pipeline runtime.
|
|
21
|
+
"""
|
|
18
22
|
LOCATION_PLACEHOLDER = "{{$.pipeline_google_cloud_location}}"
|
|
23
|
+
"""A placeholder used to obtain Google Cloud location where the pipeline
|
|
24
|
+
executes. The placeholder value is set at pipeline runtime.
|
|
25
|
+
"""
|
|
26
|
+
SERVICE_ACCOUNT_PLACEHOLDER = "{{$.pipeline_service_account}}"
|
|
27
|
+
"""A placeholder used to obtain service account that is defined in [PipelineJob](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.pipelineJobs).
|
|
28
|
+
If PipelineJob doesn't have a service account set, this placeholder will be resolved to default service account.
|
|
29
|
+
The placeholder value is set at pipeline runtime.
|
|
30
|
+
"""
|
|
31
|
+
NETWORK_PLACEHOLDER = "{{$.pipeline_network}}"
|
|
32
|
+
"""A placeholder used to obtain network that is defined in [PipelineJob](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.pipelineJobs).
|
|
33
|
+
If PipelineJob doesn't have a network set, this placeholder will be empty. The
|
|
34
|
+
placeholder value is set at pipeline runtime.
|
|
35
|
+
"""
|
|
36
|
+
PERSISTENT_RESOURCE_ID_PLACEHOLDER = "{{$.pipeline_persistent_resource_id}}"
|
|
37
|
+
"""A placeholder used to obtain persistent resource id that is defined in
|
|
38
|
+
PipelineJob [RuntimeConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.pipelineJobs#PipelineJob.RuntimeConfig).
|
|
39
|
+
If PipelineJob doesn't have a persistent resource id, this placeholder will be
|
|
40
|
+
empty. The placeholder value is set at pipeline runtime.
|
|
41
|
+
"""
|
|
42
|
+
ENCRYPTION_SPEC_KMS_KEY_NAME_PLACEHOLDER = "{{$.pipeline_encryption_key_name}}"
|
|
43
|
+
"""A placeholder used to obtain kmsKeyName that is defined in
|
|
44
|
+
PipelineJob's [EncryptionSpec](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/EncryptionSpec).
|
|
45
|
+
If PipelineJob doesn't have a encryption key name, this placeholder will be
|
|
46
|
+
empty. The placeholder value is set at pipeline runtime.
|
|
47
|
+
"""
|
|
19
48
|
|
|
20
49
|
|
|
21
50
|
# omit placeholder type annotation to avoid dependency on KFP SDK internals
|
|
@@ -72,7 +72,7 @@ def automl_forecasting_ensemble(
|
|
|
72
72
|
# fmt: on
|
|
73
73
|
job_id = dsl.PIPELINE_JOB_ID_PLACEHOLDER
|
|
74
74
|
task_id = dsl.PIPELINE_TASK_ID_PLACEHOLDER
|
|
75
|
-
image_uri = 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:
|
|
75
|
+
image_uri = 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240419_0625'
|
|
76
76
|
display_name = f'automl-forecasting-ensemble-{job_id}-{task_id}'
|
|
77
77
|
|
|
78
78
|
error_file_path = f'{root_dir}/{job_id}/{task_id}/error.pb'
|
|
@@ -99,14 +99,14 @@ def automl_forecasting_stage_1_tuner(
|
|
|
99
99
|
' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
|
|
100
100
|
' "container_spec": {"image_uri":"'
|
|
101
101
|
),
|
|
102
|
-
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:
|
|
102
|
+
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240419_0625',
|
|
103
103
|
'", "args": ["forecasting_mp_l2l_stage_1_tuner',
|
|
104
104
|
'", "--region=',
|
|
105
105
|
location,
|
|
106
106
|
'", "--transform_output_path=',
|
|
107
107
|
transform_output.uri,
|
|
108
108
|
'", "--training_docker_uri=',
|
|
109
|
-
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:
|
|
109
|
+
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240419_0625',
|
|
110
110
|
'", "--reduce_search_space_mode=',
|
|
111
111
|
reduce_search_space_mode,
|
|
112
112
|
f'", "--component_id={dsl.PIPELINE_TASK_ID_PLACEHOLDER}',
|