google-cloud-pipeline-components 2.14.1__py3-none-any.whl → 2.16.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of google-cloud-pipeline-components might be problematic. Click here for more details.

Files changed (88) hide show
  1. google_cloud_pipeline_components/_implementation/llm/generated/refined_image_versions.py +1 -1
  2. google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation_preprocessor/component.py +24 -0
  3. google_cloud_pipeline_components/_implementation/starry_net/__init__.py +41 -0
  4. google_cloud_pipeline_components/_implementation/{model_evaluation/import_evaluation → starry_net/dataprep}/__init__.py +1 -2
  5. google_cloud_pipeline_components/_implementation/starry_net/dataprep/component.py +173 -0
  6. google_cloud_pipeline_components/_implementation/starry_net/evaluation/__init__.py +13 -0
  7. google_cloud_pipeline_components/_implementation/starry_net/evaluation/component.py +23 -0
  8. google_cloud_pipeline_components/_implementation/starry_net/evaluation/evaluation.yaml +197 -0
  9. google_cloud_pipeline_components/_implementation/starry_net/get_training_artifacts/__init__.py +13 -0
  10. google_cloud_pipeline_components/_implementation/starry_net/get_training_artifacts/component.py +62 -0
  11. google_cloud_pipeline_components/_implementation/starry_net/maybe_set_tfrecord_args/__init__.py +13 -0
  12. google_cloud_pipeline_components/_implementation/starry_net/maybe_set_tfrecord_args/component.py +77 -0
  13. google_cloud_pipeline_components/_implementation/starry_net/set_dataprep_args/__init__.py +13 -0
  14. google_cloud_pipeline_components/_implementation/starry_net/set_dataprep_args/component.py +97 -0
  15. google_cloud_pipeline_components/_implementation/starry_net/set_eval_args/__init__.py +13 -0
  16. google_cloud_pipeline_components/_implementation/starry_net/set_eval_args/component.py +76 -0
  17. google_cloud_pipeline_components/_implementation/starry_net/set_test_set/__init__.py +13 -0
  18. google_cloud_pipeline_components/_implementation/starry_net/set_test_set/component.py +48 -0
  19. google_cloud_pipeline_components/_implementation/starry_net/set_tfrecord_args/__init__.py +13 -0
  20. google_cloud_pipeline_components/_implementation/starry_net/set_tfrecord_args/component.py +70 -0
  21. google_cloud_pipeline_components/_implementation/starry_net/set_train_args/__init__.py +13 -0
  22. google_cloud_pipeline_components/_implementation/starry_net/set_train_args/component.py +90 -0
  23. google_cloud_pipeline_components/_implementation/starry_net/train/__init__.py +13 -0
  24. google_cloud_pipeline_components/_implementation/starry_net/train/component.py +220 -0
  25. google_cloud_pipeline_components/_implementation/starry_net/upload_decomposition_plots/__init__.py +13 -0
  26. google_cloud_pipeline_components/_implementation/starry_net/upload_decomposition_plots/component.py +64 -0
  27. google_cloud_pipeline_components/_implementation/starry_net/upload_model/__init__.py +13 -0
  28. google_cloud_pipeline_components/_implementation/starry_net/upload_model/component.py +23 -0
  29. google_cloud_pipeline_components/_implementation/starry_net/upload_model/upload_model.yaml +37 -0
  30. google_cloud_pipeline_components/_implementation/starry_net/version.py +18 -0
  31. google_cloud_pipeline_components/container/preview/custom_job/remote_runner.py +22 -0
  32. google_cloud_pipeline_components/container/utils/error_surfacing.py +45 -0
  33. google_cloud_pipeline_components/container/v1/model/get_model/remote_runner.py +36 -7
  34. google_cloud_pipeline_components/preview/automl/forecasting/forecasting_ensemble.py +1 -1
  35. google_cloud_pipeline_components/preview/automl/forecasting/forecasting_stage_1_tuner.py +2 -2
  36. google_cloud_pipeline_components/preview/automl/forecasting/forecasting_stage_2_tuner.py +2 -2
  37. google_cloud_pipeline_components/preview/automl/forecasting/learn_to_learn_forecasting_pipeline.yaml +38 -34
  38. google_cloud_pipeline_components/preview/automl/forecasting/sequence_to_sequence_forecasting_pipeline.yaml +38 -34
  39. google_cloud_pipeline_components/preview/automl/forecasting/temporal_fusion_transformer_forecasting_pipeline.yaml +38 -34
  40. google_cloud_pipeline_components/preview/automl/forecasting/time_series_dense_encoder_forecasting_pipeline.yaml +38 -34
  41. google_cloud_pipeline_components/preview/automl/forecasting/utils.py +49 -7
  42. google_cloud_pipeline_components/preview/automl/tabular/auto_feature_engineering.py +1 -1
  43. google_cloud_pipeline_components/preview/automl/tabular/automl_tabular_feature_selection_pipeline.yaml +39 -39
  44. google_cloud_pipeline_components/preview/automl/tabular/automl_tabular_v2_pipeline.yaml +41 -41
  45. google_cloud_pipeline_components/preview/automl/tabular/distillation_stage_feature_transform_engine.py +2 -2
  46. google_cloud_pipeline_components/preview/automl/tabular/feature_selection.py +2 -2
  47. google_cloud_pipeline_components/preview/automl/tabular/feature_selection_pipeline.yaml +4 -4
  48. google_cloud_pipeline_components/preview/automl/tabular/feature_transform_engine.py +3 -3
  49. google_cloud_pipeline_components/preview/automl/tabular/tabnet_hyperparameter_tuning_job.py +2 -2
  50. google_cloud_pipeline_components/preview/automl/tabular/tabnet_hyperparameter_tuning_job_pipeline.yaml +15 -15
  51. google_cloud_pipeline_components/preview/automl/tabular/tabnet_trainer.py +2 -2
  52. google_cloud_pipeline_components/preview/automl/tabular/tabnet_trainer_pipeline.yaml +13 -13
  53. google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_hyperparameter_tuning_job.py +2 -2
  54. google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_hyperparameter_tuning_job_pipeline.yaml +14 -14
  55. google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_trainer.py +2 -2
  56. google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_trainer_pipeline.yaml +13 -13
  57. google_cloud_pipeline_components/preview/automl/tabular/xgboost_hyperparameter_tuning_job_pipeline.yaml +14 -14
  58. google_cloud_pipeline_components/preview/automl/tabular/xgboost_trainer_pipeline.yaml +13 -13
  59. google_cloud_pipeline_components/preview/custom_job/utils.py +45 -6
  60. google_cloud_pipeline_components/preview/llm/rlhf/component.py +3 -6
  61. google_cloud_pipeline_components/preview/starry_net/__init__.py +19 -0
  62. google_cloud_pipeline_components/preview/starry_net/component.py +469 -0
  63. google_cloud_pipeline_components/proto/task_error_pb2.py +0 -1
  64. google_cloud_pipeline_components/v1/automl/forecasting/bqml_arima_predict_pipeline.yaml +10 -10
  65. google_cloud_pipeline_components/v1/automl/forecasting/bqml_arima_train_pipeline.yaml +31 -31
  66. google_cloud_pipeline_components/v1/automl/forecasting/prophet_predict_pipeline.yaml +13 -13
  67. google_cloud_pipeline_components/v1/automl/forecasting/prophet_trainer.py +3 -3
  68. google_cloud_pipeline_components/v1/automl/forecasting/prophet_trainer_pipeline.yaml +14 -14
  69. google_cloud_pipeline_components/v1/automl/tabular/automl_tabular_pipeline.yaml +37 -37
  70. google_cloud_pipeline_components/v1/automl/tabular/cv_trainer.py +2 -2
  71. google_cloud_pipeline_components/v1/automl/tabular/ensemble.py +2 -2
  72. google_cloud_pipeline_components/v1/automl/tabular/finalizer.py +1 -1
  73. google_cloud_pipeline_components/v1/automl/tabular/infra_validator.py +1 -1
  74. google_cloud_pipeline_components/v1/automl/tabular/split_materialized_data.py +1 -1
  75. google_cloud_pipeline_components/v1/automl/tabular/stage_1_tuner.py +2 -2
  76. google_cloud_pipeline_components/v1/automl/tabular/stats_and_example_gen.py +2 -2
  77. google_cloud_pipeline_components/v1/automl/tabular/training_configurator_and_validator.py +1 -1
  78. google_cloud_pipeline_components/v1/automl/tabular/transform.py +2 -2
  79. google_cloud_pipeline_components/v1/custom_job/component.py +3 -0
  80. google_cloud_pipeline_components/v1/custom_job/utils.py +4 -0
  81. google_cloud_pipeline_components/v1/model_evaluation/evaluation_llm_text_generation_pipeline.py +21 -0
  82. google_cloud_pipeline_components/version.py +1 -1
  83. {google_cloud_pipeline_components-2.14.1.dist-info → google_cloud_pipeline_components-2.16.0.dist-info}/METADATA +17 -20
  84. {google_cloud_pipeline_components-2.14.1.dist-info → google_cloud_pipeline_components-2.16.0.dist-info}/RECORD +87 -58
  85. {google_cloud_pipeline_components-2.14.1.dist-info → google_cloud_pipeline_components-2.16.0.dist-info}/WHEEL +1 -1
  86. google_cloud_pipeline_components/_implementation/model_evaluation/import_evaluation/component.py +0 -208
  87. {google_cloud_pipeline_components-2.14.1.dist-info → google_cloud_pipeline_components-2.16.0.dist-info}/LICENSE +0 -0
  88. {google_cloud_pipeline_components-2.14.1.dist-info → google_cloud_pipeline_components-2.16.0.dist-info}/top_level.txt +0 -0
@@ -8420,9 +8420,9 @@ deploymentSpec:
8420
8420
  \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
8421
8421
  "\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
8422
8422
  {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
8423
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625", "\",
8423
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625", "\",
8424
8424
  \"args\": [\"l2l_cv_tuner\", \"--transform_output_path=", "{{$.inputs.artifacts[''transform_output''].uri}}",
8425
- "\", \"--training_docker_uri=", "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625",
8425
+ "\", \"--training_docker_uri=", "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625",
8426
8426
  "\", \"--component_id={{$.pipeline_task_uuid}}\", \"--training_base_dir=",
8427
8427
  "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/train\",
8428
8428
  \"--num_parallel_trial=", "{{$.inputs.parameters[''num_parallel_trials'']}}",
@@ -8463,9 +8463,9 @@ deploymentSpec:
8463
8463
  \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
8464
8464
  "\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
8465
8465
  {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
8466
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625", "\",
8466
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625", "\",
8467
8467
  \"args\": [\"l2l_cv_tuner\", \"--transform_output_path=", "{{$.inputs.artifacts[''transform_output''].uri}}",
8468
- "\", \"--training_docker_uri=", "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625",
8468
+ "\", \"--training_docker_uri=", "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625",
8469
8469
  "\", \"--component_id={{$.pipeline_task_uuid}}\", \"--training_base_dir=",
8470
8470
  "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/train\",
8471
8471
  \"--num_parallel_trial=", "{{$.inputs.parameters[''num_parallel_trials'']}}",
@@ -8506,7 +8506,7 @@ deploymentSpec:
8506
8506
  \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
8507
8507
  "\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
8508
8508
  {\"machine_type\": \"n1-highmem-8\"}, \"container_spec\": {\"image_uri\":\"",
8509
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625", "\",
8509
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625", "\",
8510
8510
  \"args\": [\"ensemble\", \"--transform_output_path=", "{{$.inputs.artifacts[''transform_output''].uri}}",
8511
8511
  "\", \"--model_output_path=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/model\",
8512
8512
  \"--custom_model_output_path=", "{{$.inputs.parameters[''root_dir'']}}",
@@ -8518,7 +8518,7 @@ deploymentSpec:
8518
8518
  "\", \"--tuning_result_input_path=", "{{$.inputs.artifacts[''tuning_result_input''].uri}}",
8519
8519
  "\", \"--instance_baseline_path=", "{{$.inputs.artifacts[''instance_baseline''].uri}}",
8520
8520
  "\", \"--warmup_data=", "{{$.inputs.artifacts[''warmup_data''].uri}}", "\",
8521
- \"--prediction_docker_uri=", "us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240419_0625",
8521
+ \"--prediction_docker_uri=", "us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240710_0625",
8522
8522
  "\", \"--model_path=", "{{$.outputs.artifacts[''model''].uri}}", "\", \"--custom_model_path=",
8523
8523
  "{{$.outputs.artifacts[''model_without_custom_ops''].uri}}", "\", \"--explanation_metadata_path=",
8524
8524
  "{{$.outputs.parameters[''explanation_metadata''].output_file}}", ",", "{{$.outputs.artifacts[''explanation_metadata_artifact''].uri}}",
@@ -8547,7 +8547,7 @@ deploymentSpec:
8547
8547
  \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
8548
8548
  "\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
8549
8549
  {\"machine_type\": \"n1-highmem-8\"}, \"container_spec\": {\"image_uri\":\"",
8550
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625", "\",
8550
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625", "\",
8551
8551
  \"args\": [\"ensemble\", \"--transform_output_path=", "{{$.inputs.artifacts[''transform_output''].uri}}",
8552
8552
  "\", \"--model_output_path=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/model\",
8553
8553
  \"--custom_model_output_path=", "{{$.inputs.parameters[''root_dir'']}}",
@@ -8559,7 +8559,7 @@ deploymentSpec:
8559
8559
  "\", \"--tuning_result_input_path=", "{{$.inputs.artifacts[''tuning_result_input''].uri}}",
8560
8560
  "\", \"--instance_baseline_path=", "{{$.inputs.artifacts[''instance_baseline''].uri}}",
8561
8561
  "\", \"--warmup_data=", "{{$.inputs.artifacts[''warmup_data''].uri}}", "\",
8562
- \"--prediction_docker_uri=", "us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240419_0625",
8562
+ \"--prediction_docker_uri=", "us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240710_0625",
8563
8563
  "\", \"--model_path=", "{{$.outputs.artifacts[''model''].uri}}", "\", \"--custom_model_path=",
8564
8564
  "{{$.outputs.artifacts[''model_without_custom_ops''].uri}}", "\", \"--explanation_metadata_path=",
8565
8565
  "{{$.outputs.parameters[''explanation_metadata''].output_file}}", ",", "{{$.outputs.artifacts[''explanation_metadata_artifact''].uri}}",
@@ -8588,7 +8588,7 @@ deploymentSpec:
8588
8588
  \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
8589
8589
  "\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
8590
8590
  {\"machine_type\": \"n1-highmem-8\"}, \"container_spec\": {\"image_uri\":\"",
8591
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625", "\",
8591
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625", "\",
8592
8592
  \"args\": [\"ensemble\", \"--transform_output_path=", "{{$.inputs.artifacts[''transform_output''].uri}}",
8593
8593
  "\", \"--model_output_path=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/model\",
8594
8594
  \"--custom_model_output_path=", "{{$.inputs.parameters[''root_dir'']}}",
@@ -8600,7 +8600,7 @@ deploymentSpec:
8600
8600
  "\", \"--tuning_result_input_path=", "{{$.inputs.artifacts[''tuning_result_input''].uri}}",
8601
8601
  "\", \"--instance_baseline_path=", "{{$.inputs.artifacts[''instance_baseline''].uri}}",
8602
8602
  "\", \"--warmup_data=", "{{$.inputs.artifacts[''warmup_data''].uri}}", "\",
8603
- \"--prediction_docker_uri=", "us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240419_0625",
8603
+ \"--prediction_docker_uri=", "us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240710_0625",
8604
8604
  "\", \"--model_path=", "{{$.outputs.artifacts[''model''].uri}}", "\", \"--custom_model_path=",
8605
8605
  "{{$.outputs.artifacts[''model_without_custom_ops''].uri}}", "\", \"--explanation_metadata_path=",
8606
8606
  "{{$.outputs.parameters[''explanation_metadata''].output_file}}", ",", "{{$.outputs.artifacts[''explanation_metadata_artifact''].uri}}",
@@ -8629,7 +8629,7 @@ deploymentSpec:
8629
8629
  \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
8630
8630
  "\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
8631
8631
  {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
8632
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625", "\",
8632
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625", "\",
8633
8633
  \"args\": [\"cancel_l2l_tuner\", \"--error_file_path=", "{{$.inputs.parameters[''root_dir'']}}",
8634
8634
  "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\", \"--cleanup_lro_job_infos=",
8635
8635
  "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/lro\"]}}]}}"]}'
@@ -8644,7 +8644,7 @@ deploymentSpec:
8644
8644
  args:
8645
8645
  - --executor_input
8646
8646
  - '{{$}}'
8647
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240419_0625
8647
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240710_0625
8648
8648
  resources:
8649
8649
  cpuLimit: 8.0
8650
8650
  memoryLimit: 52.0
@@ -8653,7 +8653,7 @@ deploymentSpec:
8653
8653
  args:
8654
8654
  - --executor_input
8655
8655
  - '{{$}}'
8656
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240419_0625
8656
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240710_0625
8657
8657
  resources:
8658
8658
  cpuLimit: 8.0
8659
8659
  memoryLimit: 52.0
@@ -8662,7 +8662,7 @@ deploymentSpec:
8662
8662
  args:
8663
8663
  - --executor_input
8664
8664
  - '{{$}}'
8665
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240419_0625
8665
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240710_0625
8666
8666
  resources:
8667
8667
  cpuLimit: 8.0
8668
8668
  memoryLimit: 52.0
@@ -8682,9 +8682,9 @@ deploymentSpec:
8682
8682
  \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
8683
8683
  "\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
8684
8684
  {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
8685
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625", "\",
8685
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625", "\",
8686
8686
  \"args\": [\"l2l_stage_1_tuner\", \"--transform_output_path=", "{{$.inputs.artifacts[''transform_output''].uri}}",
8687
- "\", \"--training_docker_uri=", "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625",
8687
+ "\", \"--training_docker_uri=", "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625",
8688
8688
  "\", \"--feature_selection_result_path=", "{{$.inputs.artifacts[''feature_ranking''].uri}}",
8689
8689
  "\", \"--disable_early_stopping=", "{{$.inputs.parameters[''disable_early_stopping'']}}",
8690
8690
  "\", \"--tune_feature_selection_rate=", "{{$.inputs.parameters[''tune_feature_selection_rate'']}}",
@@ -8729,9 +8729,9 @@ deploymentSpec:
8729
8729
  \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
8730
8730
  "\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
8731
8731
  {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
8732
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625", "\",
8732
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625", "\",
8733
8733
  \"args\": [\"l2l_stage_1_tuner\", \"--transform_output_path=", "{{$.inputs.artifacts[''transform_output''].uri}}",
8734
- "\", \"--training_docker_uri=", "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625",
8734
+ "\", \"--training_docker_uri=", "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625",
8735
8735
  "\", \"--feature_selection_result_path=", "{{$.inputs.artifacts[''feature_ranking''].uri}}",
8736
8736
  "\", \"--disable_early_stopping=", "{{$.inputs.parameters[''disable_early_stopping'']}}",
8737
8737
  "\", \"--tune_feature_selection_rate=", "{{$.inputs.parameters[''tune_feature_selection_rate'']}}",
@@ -8776,7 +8776,7 @@ deploymentSpec:
8776
8776
  \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
8777
8777
  "\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
8778
8778
  {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
8779
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625", "\",
8779
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625", "\",
8780
8780
  \"args\": [\"transform\", \"--is_mp=true\", \"--transform_output_artifact_path=",
8781
8781
  "{{$.outputs.artifacts[''transform_output''].uri}}", "\", \"--transform_output_path=",
8782
8782
  "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/transform\",
@@ -8797,7 +8797,7 @@ deploymentSpec:
8797
8797
  \"--dataflow_tmp_dir=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_tmp\",
8798
8798
  \"--dataflow_max_num_workers=", "{{$.inputs.parameters[''dataflow_max_num_workers'']}}",
8799
8799
  "\", \"--dataflow_machine_type=", "{{$.inputs.parameters[''dataflow_machine_type'']}}",
8800
- "\", \"--dataflow_worker_container_image=", "us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240419_0625",
8800
+ "\", \"--dataflow_worker_container_image=", "us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240710_0625",
8801
8801
  "\", \"--dataflow_disk_size_gb=", "{{$.inputs.parameters[''dataflow_disk_size_gb'']}}",
8802
8802
  "\", \"--dataflow_subnetwork_fully_qualified=", "{{$.inputs.parameters[''dataflow_subnetwork'']}}",
8803
8803
  "\", \"--dataflow_use_public_ips=", "{{$.inputs.parameters[''dataflow_use_public_ips'']}}",
@@ -8828,7 +8828,7 @@ deploymentSpec:
8828
8828
  \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
8829
8829
  "\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
8830
8830
  {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
8831
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625", "\",
8831
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625", "\",
8832
8832
  \"args\": [\"transform\", \"--is_mp=true\", \"--transform_output_artifact_path=",
8833
8833
  "{{$.outputs.artifacts[''transform_output''].uri}}", "\", \"--transform_output_path=",
8834
8834
  "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/transform\",
@@ -8849,7 +8849,7 @@ deploymentSpec:
8849
8849
  \"--dataflow_tmp_dir=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_tmp\",
8850
8850
  \"--dataflow_max_num_workers=", "{{$.inputs.parameters[''dataflow_max_num_workers'']}}",
8851
8851
  "\", \"--dataflow_machine_type=", "{{$.inputs.parameters[''dataflow_machine_type'']}}",
8852
- "\", \"--dataflow_worker_container_image=", "us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240419_0625",
8852
+ "\", \"--dataflow_worker_container_image=", "us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240710_0625",
8853
8853
  "\", \"--dataflow_disk_size_gb=", "{{$.inputs.parameters[''dataflow_disk_size_gb'']}}",
8854
8854
  "\", \"--dataflow_subnetwork_fully_qualified=", "{{$.inputs.parameters[''dataflow_subnetwork'']}}",
8855
8855
  "\", \"--dataflow_use_public_ips=", "{{$.inputs.parameters[''dataflow_use_public_ips'']}}",
@@ -8885,7 +8885,7 @@ deploymentSpec:
8885
8885
  \ *\n\ndef _bool_identity(value: bool) -> str:\n \"\"\"Returns boolean\
8886
8886
  \ value.\n\n Args:\n value: Boolean value to return\n\n Returns:\n\
8887
8887
  \ Boolean value.\n \"\"\"\n return 'true' if value else 'false'\n\n"
8888
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240419_0625
8888
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240710_0625
8889
8889
  exec-bool-identity-2:
8890
8890
  container:
8891
8891
  args:
@@ -8907,7 +8907,7 @@ deploymentSpec:
8907
8907
  \ *\n\ndef _bool_identity(value: bool) -> str:\n \"\"\"Returns boolean\
8908
8908
  \ value.\n\n Args:\n value: Boolean value to return\n\n Returns:\n\
8909
8909
  \ Boolean value.\n \"\"\"\n return 'true' if value else 'false'\n\n"
8910
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240419_0625
8910
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240710_0625
8911
8911
  exec-bool-identity-3:
8912
8912
  container:
8913
8913
  args:
@@ -8929,7 +8929,7 @@ deploymentSpec:
8929
8929
  \ *\n\ndef _bool_identity(value: bool) -> str:\n \"\"\"Returns boolean\
8930
8930
  \ value.\n\n Args:\n value: Boolean value to return\n\n Returns:\n\
8931
8931
  \ Boolean value.\n \"\"\"\n return 'true' if value else 'false'\n\n"
8932
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240419_0625
8932
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240710_0625
8933
8933
  exec-calculate-training-parameters:
8934
8934
  container:
8935
8935
  args:
@@ -9021,7 +9021,7 @@ deploymentSpec:
9021
9021
  \ stage_1_single_run_max_secs,\n stage_2_deadline_hours,\n \
9022
9022
  \ stage_2_single_run_max_secs,\n distill_stage_1_deadline_hours,\n\
9023
9023
  \ reduce_search_space_mode,\n )\n\n"
9024
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240419_0625
9024
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240710_0625
9025
9025
  exec-calculate-training-parameters-2:
9026
9026
  container:
9027
9027
  args:
@@ -9113,7 +9113,7 @@ deploymentSpec:
9113
9113
  \ stage_1_single_run_max_secs,\n stage_2_deadline_hours,\n \
9114
9114
  \ stage_2_single_run_max_secs,\n distill_stage_1_deadline_hours,\n\
9115
9115
  \ reduce_search_space_mode,\n )\n\n"
9116
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240419_0625
9116
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240710_0625
9117
9117
  exec-feature-attribution:
9118
9118
  container:
9119
9119
  args:
@@ -9299,7 +9299,7 @@ deploymentSpec:
9299
9299
  \n return collections.namedtuple(\n 'Outputs',\n [\n \
9300
9300
  \ 'model_display_name',\n ],\n )(\n model_display_name,\n )\n\
9301
9301
  \n"
9302
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240419_0625
9302
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240710_0625
9303
9303
  exec-importer:
9304
9304
  importer:
9305
9305
  artifactUri:
@@ -9333,7 +9333,7 @@ deploymentSpec:
9333
9333
  \ 'r') as f:\n split_0_content = f.read()\n with open(split_1, 'r')\
9334
9334
  \ as f:\n split_1_content = f.read()\n with open(splits, 'w') as f:\n\
9335
9335
  \ f.write(','.join([split_0_content, split_1_content]))\n\n"
9336
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240419_0625
9336
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240710_0625
9337
9337
  exec-model-batch-explanation:
9338
9338
  container:
9339
9339
  args:
@@ -10158,7 +10158,7 @@ deploymentSpec:
10158
10158
  \ import json\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
10159
10159
  \ with open(split_uri, 'r') as f:\n data_source = json.loads(f.read())\n\
10160
10160
  \ return data_source['tf_record_data_source']['file_patterns']\n\n"
10161
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240419_0625
10161
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240710_0625
10162
10162
  exec-read-input-uri-2:
10163
10163
  container:
10164
10164
  args:
@@ -10186,7 +10186,7 @@ deploymentSpec:
10186
10186
  \ import json\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
10187
10187
  \ with open(split_uri, 'r') as f:\n data_source = json.loads(f.read())\n\
10188
10188
  \ return data_source['tf_record_data_source']['file_patterns']\n\n"
10189
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240419_0625
10189
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240710_0625
10190
10190
  exec-set-optional-inputs:
10191
10191
  container:
10192
10192
  args:
@@ -10234,7 +10234,7 @@ deploymentSpec:
10234
10234
  \ 'data_source_csv_filenames',\n 'data_source_bigquery_table_path',\n\
10235
10235
  \ ],\n )(\n data_source_csv_filenames,\n data_source_bigquery_table_path,\n\
10236
10236
  \ )\n\n"
10237
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240419_0625
10237
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240710_0625
10238
10238
  exec-string-not-empty:
10239
10239
  container:
10240
10240
  args:
@@ -10258,7 +10258,7 @@ deploymentSpec:
10258
10258
  \n Returns:\n Boolean value. -> 'true' if empty, 'false' if not empty.\
10259
10259
  \ We need to use str\n instead of bool due to a limitation in KFP compiler.\n\
10260
10260
  \ \"\"\"\n return 'true' if value else 'false'\n\n"
10261
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240419_0625
10261
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240710_0625
10262
10262
  exec-tabular-stats-and-example-gen:
10263
10263
  container:
10264
10264
  args:
@@ -10275,7 +10275,7 @@ deploymentSpec:
10275
10275
  \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
10276
10276
  "\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
10277
10277
  {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
10278
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625", "\",
10278
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625", "\",
10279
10279
  \"args\": [\"stats_generator\",", "\"--train_spec={\\\"prediction_type\\\":
10280
10280
  \\\"", "{{$.inputs.parameters[''prediction_type'']}}", "\\\", \\\"target_column\\\":
10281
10281
  \\\"", "{{$.inputs.parameters[''target_column_name'']}}", "\\\", \\\"optimization_objective\\\":
@@ -10308,7 +10308,7 @@ deploymentSpec:
10308
10308
  \"--dataflow_staging_dir=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_staging\",
10309
10309
  \"--dataflow_tmp_dir=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_tmp\",
10310
10310
  \"--dataflow_max_num_workers=", "{{$.inputs.parameters[''dataflow_max_num_workers'']}}",
10311
- "\", \"--dataflow_worker_container_image=", "us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240419_0625",
10311
+ "\", \"--dataflow_worker_container_image=", "us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240710_0625",
10312
10312
  "\", \"--dataflow_machine_type=", "{{$.inputs.parameters[''dataflow_machine_type'']}}",
10313
10313
  "\", \"--dataflow_disk_size_gb=", "{{$.inputs.parameters[''dataflow_disk_size_gb'']}}",
10314
10314
  "\", \"--dataflow_kms_key=", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
@@ -10363,7 +10363,7 @@ deploymentSpec:
10363
10363
  \ f'{directory}/prediction.results-*',\n ],\n 'coder':\
10364
10364
  \ 'PROTO_VALUE',\n },\n }\n with open(result, 'w') as f:\n f.write(json.dumps(data_source))\n\
10365
10365
  \n"
10366
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240419_0625
10366
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240710_0625
10367
10367
  exec-write-bp-result-path-2:
10368
10368
  container:
10369
10369
  args:
@@ -10393,7 +10393,7 @@ deploymentSpec:
10393
10393
  \ f'{directory}/prediction.results-*',\n ],\n 'coder':\
10394
10394
  \ 'PROTO_VALUE',\n },\n }\n with open(result, 'w') as f:\n f.write(json.dumps(data_source))\n\
10395
10395
  \n"
10396
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240419_0625
10396
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240710_0625
10397
10397
  pipelineInfo:
10398
10398
  description: 'Complete AutoML Tables pipeline.
10399
10399
 
@@ -99,11 +99,11 @@ def automl_tabular_cv_trainer(
99
99
  ' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
100
100
  ' "container_spec": {"image_uri":"'
101
101
  ),
102
- 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625',
102
+ 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625',
103
103
  '", "args": ["l2l_cv_tuner", "--transform_output_path=',
104
104
  transform_output.uri,
105
105
  '", "--training_docker_uri=',
106
- 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625',
106
+ 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625',
107
107
  (
108
108
  f'", "--component_id={dsl.PIPELINE_TASK_ID_PLACEHOLDER}",'
109
109
  ' "--training_base_dir='
@@ -106,7 +106,7 @@ def automl_tabular_ensemble(
106
106
  ' 1, "machine_spec": {"machine_type": "n1-highmem-8"},'
107
107
  ' "container_spec": {"image_uri":"'
108
108
  ),
109
- 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625',
109
+ 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625',
110
110
  '", "args": ["ensemble", "--transform_output_path=',
111
111
  transform_output.uri,
112
112
  '", "--model_output_path=',
@@ -137,7 +137,7 @@ def automl_tabular_ensemble(
137
137
  '", "--warmup_data=',
138
138
  warmup_data.uri,
139
139
  '", "--prediction_docker_uri=',
140
- 'us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240419_0625',
140
+ 'us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240710_0625',
141
141
  '", "--model_path=',
142
142
  model.uri,
143
143
  '", "--custom_model_path=',
@@ -72,7 +72,7 @@ def automl_tabular_finalizer(
72
72
  ' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
73
73
  ' "container_spec": {"image_uri":"'
74
74
  ),
75
- 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625',
75
+ 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625',
76
76
  '", "args": ["cancel_l2l_tuner", "--error_file_path=',
77
77
  root_dir,
78
78
  (
@@ -32,7 +32,7 @@ def automl_tabular_infra_validator(
32
32
  # fmt: on
33
33
 
34
34
  return dsl.ContainerSpec(
35
- image='us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240419_0625',
35
+ image='us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240710_0625',
36
36
  command=[],
37
37
  args=['--executor_input', '{{$}}'],
38
38
  )
@@ -52,7 +52,7 @@ def split_materialized_data(
52
52
  # fmt: on
53
53
 
54
54
  return dsl.ContainerSpec(
55
- image='us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240419_0625',
55
+ image='us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240710_0625',
56
56
  command=[
57
57
  'sh',
58
58
  '-ec',
@@ -109,11 +109,11 @@ def automl_tabular_stage_1_tuner(
109
109
  ' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
110
110
  ' "container_spec": {"image_uri":"'
111
111
  ),
112
- 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625',
112
+ 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625',
113
113
  '", "args": ["l2l_stage_1_tuner", "--transform_output_path=',
114
114
  transform_output.uri,
115
115
  '", "--training_docker_uri=',
116
- 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625',
116
+ 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625',
117
117
  '", "--feature_selection_result_path=',
118
118
  feature_ranking.uri,
119
119
  '", "--disable_early_stopping=',
@@ -136,7 +136,7 @@ def tabular_stats_and_example_gen(
136
136
  ' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
137
137
  ' "container_spec": {"image_uri":"'
138
138
  ),
139
- 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625',
139
+ 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625',
140
140
  '", "args": ["stats_generator",',
141
141
  '"--train_spec={\\"prediction_type\\": \\"',
142
142
  prediction_type,
@@ -215,7 +215,7 @@ def tabular_stats_and_example_gen(
215
215
  ),
216
216
  dataflow_max_num_workers,
217
217
  '", "--dataflow_worker_container_image=',
218
- 'us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240419_0625',
218
+ 'us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240710_0625',
219
219
  '", "--dataflow_machine_type=',
220
220
  dataflow_machine_type,
221
221
  '", "--dataflow_disk_size_gb=',
@@ -95,7 +95,7 @@ def training_configurator_and_validator(
95
95
  # fmt: on
96
96
 
97
97
  return dsl.ContainerSpec(
98
- image='us-docker.pkg.dev/vertex-ai/automl-tabular/feature-transform-engine:20240419_0625',
98
+ image='us-docker.pkg.dev/vertex-ai/automl-tabular/feature-transform-engine:20240710_0625',
99
99
  command=[],
100
100
  args=[
101
101
  'training_configurator_and_validator',
@@ -108,7 +108,7 @@ def automl_tabular_transform(
108
108
  ' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
109
109
  ' "container_spec": {"image_uri":"'
110
110
  ),
111
- 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240419_0625',
111
+ 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240710_0625',
112
112
  (
113
113
  '", "args": ["transform", "--is_mp=true",'
114
114
  ' "--transform_output_artifact_path='
@@ -167,7 +167,7 @@ def automl_tabular_transform(
167
167
  '", "--dataflow_machine_type=',
168
168
  dataflow_machine_type,
169
169
  '", "--dataflow_worker_container_image=',
170
- 'us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240419_0625',
170
+ 'us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240710_0625',
171
171
  '", "--dataflow_disk_size_gb=',
172
172
  dataflow_disk_size_gb,
173
173
  '", "--dataflow_subnetwork_fully_qualified=',
@@ -36,6 +36,7 @@ def custom_training_job(
36
36
  base_output_directory: str = '',
37
37
  labels: Dict[str, str] = {},
38
38
  encryption_spec_key_name: str = '',
39
+ persistent_resource_id: str = _placeholders.PERSISTENT_RESOURCE_ID_PLACEHOLDER,
39
40
  project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
40
41
  ):
41
42
  # fmt: off
@@ -55,6 +56,7 @@ def custom_training_job(
55
56
  base_output_directory: The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. See [more information ](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/GcsDestination).
56
57
  labels: The labels with user-defined metadata to organize the CustomJob. See [more information](https://goo.gl/xmQnxf).
57
58
  encryption_spec_key_name: Customer-managed encryption key options for the CustomJob. If this is set, then all resources created by the CustomJob will be encrypted with the provided encryption key.
59
+ persistent_resource_id: The ID of the PersistentResource in the same Project and Location which to run. The default value is a placeholder that will be resolved to the PipelineJob [RuntimeConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.pipelineJobs#PipelineJob.RuntimeConfig)'s persistent resource id at runtime. However, if the PipelineJob doesn't set Persistent Resource as the job level runtime, the placedholder will be resolved to an empty string and the custom job will be run on demand. If the value is set explicitly, the custom job will runs in the specified persistent resource, in this case, please note the network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
58
60
  project: Project to create the custom training job in. Defaults to the project in which the PipelineJob is run.
59
61
 
60
62
  Returns:
@@ -82,6 +84,7 @@ def custom_training_job(
82
84
  'base_output_directory': {
83
85
  'output_uri_prefix': base_output_directory
84
86
  },
87
+ 'persistent_resource_id': persistent_resource_id,
85
88
  },
86
89
  'labels': labels,
87
90
  'encryption_spec': {'kms_key_name': encryption_spec_key_name},
@@ -18,6 +18,7 @@ import textwrap
18
18
  from typing import Callable, Dict, List, Optional
19
19
  import warnings
20
20
 
21
+ from google_cloud_pipeline_components import _placeholders
21
22
  from google_cloud_pipeline_components.v1.custom_job import component
22
23
  from kfp import components
23
24
  import yaml
@@ -68,6 +69,7 @@ def create_custom_training_job_from_component(
68
69
  nfs_mounts: Optional[List[Dict[str, str]]] = None,
69
70
  base_output_directory: str = '',
70
71
  labels: Optional[Dict[str, str]] = None,
72
+ persistent_resource_id: str = _placeholders.PERSISTENT_RESOURCE_ID_PLACEHOLDER,
71
73
  env: Optional[List[Dict[str, str]]] = None,
72
74
  ) -> Callable:
73
75
  # fmt: off
@@ -95,6 +97,7 @@ def create_custom_training_job_from_component(
95
97
  nfs_mounts: A list of [NfsMount](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec#NfsMount) resource specs in Json dict format. For more details about mounting NFS for CustomJob, see [Mount an NFS share for custom training](https://cloud.google.com/vertex-ai/docs/training/train-nfs-share).
96
98
  base_output_directory: The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. See [more information](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/GcsDestination).
97
99
  labels: The labels with user-defined metadata to organize the CustomJob. See [more information](https://goo.gl/xmQnxf).
100
+ persistent_resource_id: The ID of the PersistentResource in the same Project and Location which to run. The default value is a placeholder that will be resolved to the PipelineJob [RuntimeConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.pipelineJobs#PipelineJob.RuntimeConfig)'s persistent resource id at runtime. However, if the PipelineJob doesn't set Persistent Resource as the job level runtime, the placedholder will be resolved to an empty string and the custom job will be run on demand. If the value is set explicitly, the custom job will runs in the specified persistent resource, in this case, please note the network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
98
101
  env: Environment variables to be passed to the container. Takes the form `[{'name': '...', 'value': '...'}]`. Maximum limit is 100.
99
102
 
100
103
  Returns:
@@ -199,6 +202,7 @@ def create_custom_training_job_from_component(
199
202
  'base_output_directory': base_output_directory,
200
203
  'labels': labels or {},
201
204
  'encryption_spec_key_name': encryption_spec_key_name,
205
+ 'persistent_resource_id': persistent_resource_id,
202
206
  }
203
207
 
204
208
  for param_name, default_value in custom_job_param_defaults.items():
@@ -15,10 +15,12 @@
15
15
 
16
16
  from typing import Dict, List, NamedTuple
17
17
 
18
+ from google_cloud_pipeline_components import google_template_metadata
18
19
  from google_cloud_pipeline_components._implementation.model_evaluation import LLMEvaluationPreprocessorOp
19
20
  from google_cloud_pipeline_components._implementation.model_evaluation import LLMEvaluationTextGenerationOp
20
21
  from google_cloud_pipeline_components._implementation.model_evaluation import ModelNamePreprocessorOp
21
22
  from google_cloud_pipeline_components.preview.model_evaluation.model_evaluation_import_component import model_evaluation_import as ModelImportEvaluationOp
23
+ from google_cloud_pipeline_components.proto import template_metadata_pb2
22
24
  from google_cloud_pipeline_components.types.artifact_types import VertexModel
23
25
  from google_cloud_pipeline_components.v1.batch_predict_job import ModelBatchPredictOp
24
26
  from kfp import dsl
@@ -29,7 +31,21 @@ from kfp import dsl
29
31
 
30
32
  _PIPELINE_NAME = 'evaluation-llm-text-generation-pipeline'
31
33
 
34
+ output_gcs_validation = template_metadata_pb2.GoogleCloudStorageValidation(
35
+ gcs_uri='{{$.parameter.batch_predict_gcs_destination_output_uri}}',
36
+ is_input=False,
37
+ default_service_account='{{$.pipeline_google_cloud_project_number}}-compute@developer.gserviceaccount.com',
38
+ override_placeholder='{{$.parameter.service_account}}',
39
+ )
32
40
 
41
+
42
+ @google_template_metadata.set_template_metadata(
43
+ template_metadata=template_metadata_pb2.TemplateMetadata(
44
+ preflight_validations=template_metadata_pb2.ValidationItems(
45
+ gcs_validations=[output_gcs_validation]
46
+ )
47
+ )
48
+ )
33
49
  @dsl.pipeline(name=_PIPELINE_NAME)
34
50
  def evaluation_llm_text_generation_pipeline( # pylint: disable=dangerous-default-value
35
51
  project: str,
@@ -38,6 +54,7 @@ def evaluation_llm_text_generation_pipeline( # pylint: disable=dangerous-defaul
38
54
  batch_predict_gcs_destination_output_uri: str,
39
55
  model_name: str = 'publishers/google/models/text-bison@002',
40
56
  evaluation_task: str = 'text-generation',
57
+ role_field_name: str = 'role',
41
58
  input_field_name: str = 'input_text',
42
59
  target_field_name: str = 'output_text',
43
60
  batch_predict_instances_format: str = 'jsonl',
@@ -76,6 +93,7 @@ def evaluation_llm_text_generation_pipeline( # pylint: disable=dangerous-defaul
76
93
  batch_predict_gcs_destination_output_uri: Required. The Google Cloud Storage location of the directory where the eval pipeline output is to be written to.
77
94
  model_name: The Model name used to run evaluation. Must be a publisher Model or a managed Model sharing the same ancestor location. Starting this job has no impact on any existing deployments of the Model and their resources.
78
95
  evaluation_task: The task that the large language model will be evaluated on. The evaluation component computes a set of metrics relevant to that specific task. Currently supported tasks are: `summarization`, `question-answering`, `text-generation`.
96
+ role_field_name: The field name of the role for input eval dataset instances that contains the input prompts to the LLM.
79
97
  input_field_name: The field name of the input eval dataset instances that contains the input prompts to the LLM.
80
98
  target_field_name: The field name of the eval dataset instance that contains an example reference text response. Alternatively referred to as the ground truth (or ground_truth_column) field. If not set, defaulted to `output_text`.
81
99
  batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. Only "jsonl" is currently supported. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
@@ -124,6 +142,9 @@ def evaluation_llm_text_generation_pipeline( # pylint: disable=dangerous-defaul
124
142
  location=location,
125
143
  gcs_source_uris=batch_predict_gcs_source_uris,
126
144
  input_field_name=input_field_name,
145
+ role_field_name=role_field_name,
146
+ target_field_name=target_field_name,
147
+ model_name=model_name,
127
148
  machine_type=machine_type,
128
149
  service_account=service_account,
129
150
  network=network,
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
  """Google Cloud Pipeline Components version."""
15
15
 
16
- __version__ = "2.14.1"
16
+ __version__ = "2.16.0"