google-cloud-pipeline-components 2.16.1__py3-none-any.whl → 2.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of google-cloud-pipeline-components might be problematic. Click here for more details.

Files changed (64) hide show
  1. google_cloud_pipeline_components/_implementation/llm/generated/refined_image_versions.py +1 -1
  2. google_cloud_pipeline_components/_implementation/model_evaluation/import_evaluation/__init__.py +14 -0
  3. google_cloud_pipeline_components/_implementation/model_evaluation/import_evaluation/component.py +208 -0
  4. google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation/component.py +3 -0
  5. google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation_preprocessor/component.py +2 -4
  6. google_cloud_pipeline_components/_implementation/model_evaluation/version.py +1 -1
  7. google_cloud_pipeline_components/preview/automl/forecasting/forecasting_ensemble.py +1 -1
  8. google_cloud_pipeline_components/preview/automl/forecasting/forecasting_stage_1_tuner.py +2 -2
  9. google_cloud_pipeline_components/preview/automl/forecasting/forecasting_stage_2_tuner.py +2 -2
  10. google_cloud_pipeline_components/preview/automl/forecasting/learn_to_learn_forecasting_pipeline.yaml +34 -34
  11. google_cloud_pipeline_components/preview/automl/forecasting/sequence_to_sequence_forecasting_pipeline.yaml +34 -34
  12. google_cloud_pipeline_components/preview/automl/forecasting/temporal_fusion_transformer_forecasting_pipeline.yaml +34 -34
  13. google_cloud_pipeline_components/preview/automl/forecasting/time_series_dense_encoder_forecasting_pipeline.yaml +34 -34
  14. google_cloud_pipeline_components/preview/automl/tabular/auto_feature_engineering.py +1 -1
  15. google_cloud_pipeline_components/preview/automl/tabular/automl_tabular_feature_selection_pipeline.yaml +39 -39
  16. google_cloud_pipeline_components/preview/automl/tabular/automl_tabular_v2_pipeline.yaml +41 -41
  17. google_cloud_pipeline_components/preview/automl/tabular/distillation_stage_feature_transform_engine.py +2 -2
  18. google_cloud_pipeline_components/preview/automl/tabular/feature_selection.py +2 -2
  19. google_cloud_pipeline_components/preview/automl/tabular/feature_selection_pipeline.yaml +4 -4
  20. google_cloud_pipeline_components/preview/automl/tabular/feature_transform_engine.py +3 -3
  21. google_cloud_pipeline_components/preview/automl/tabular/tabnet_hyperparameter_tuning_job.py +2 -2
  22. google_cloud_pipeline_components/preview/automl/tabular/tabnet_hyperparameter_tuning_job_pipeline.yaml +15 -15
  23. google_cloud_pipeline_components/preview/automl/tabular/tabnet_trainer.py +2 -2
  24. google_cloud_pipeline_components/preview/automl/tabular/tabnet_trainer_pipeline.yaml +13 -13
  25. google_cloud_pipeline_components/preview/automl/tabular/utils.py +1 -1
  26. google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_hyperparameter_tuning_job.py +2 -2
  27. google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_hyperparameter_tuning_job_pipeline.yaml +14 -14
  28. google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_trainer.py +2 -2
  29. google_cloud_pipeline_components/preview/automl/tabular/wide_and_deep_trainer_pipeline.yaml +13 -13
  30. google_cloud_pipeline_components/preview/automl/tabular/xgboost_hyperparameter_tuning_job_pipeline.yaml +14 -14
  31. google_cloud_pipeline_components/preview/automl/tabular/xgboost_trainer_pipeline.yaml +13 -13
  32. google_cloud_pipeline_components/preview/custom_job/__init__.py +9 -0
  33. google_cloud_pipeline_components/preview/model_evaluation/evaluation_llm_classification_pipeline.py +180 -0
  34. google_cloud_pipeline_components/preview/model_evaluation/evaluation_llm_text_generation_pipeline.py +178 -0
  35. google_cloud_pipeline_components/preview/model_evaluation/model_based_llm_evaluation/__init__.py +20 -0
  36. google_cloud_pipeline_components/preview/model_evaluation/model_based_llm_evaluation/autosxs/__init__.py +13 -0
  37. google_cloud_pipeline_components/preview/model_evaluation/model_based_llm_evaluation/autosxs/autosxs_pipeline.py +109 -0
  38. google_cloud_pipeline_components/proto/preflight_validations_pb2.py +58 -0
  39. google_cloud_pipeline_components/v1/automl/forecasting/bqml_arima_predict_pipeline.yaml +10 -10
  40. google_cloud_pipeline_components/v1/automl/forecasting/bqml_arima_train_pipeline.yaml +31 -31
  41. google_cloud_pipeline_components/v1/automl/forecasting/prophet_predict_pipeline.yaml +13 -13
  42. google_cloud_pipeline_components/v1/automl/forecasting/prophet_trainer.py +3 -3
  43. google_cloud_pipeline_components/v1/automl/forecasting/prophet_trainer_pipeline.yaml +14 -14
  44. google_cloud_pipeline_components/v1/automl/tabular/automl_tabular_pipeline.yaml +37 -37
  45. google_cloud_pipeline_components/v1/automl/tabular/cv_trainer.py +2 -2
  46. google_cloud_pipeline_components/v1/automl/tabular/ensemble.py +2 -2
  47. google_cloud_pipeline_components/v1/automl/tabular/finalizer.py +1 -1
  48. google_cloud_pipeline_components/v1/automl/tabular/infra_validator.py +1 -1
  49. google_cloud_pipeline_components/v1/automl/tabular/split_materialized_data.py +1 -1
  50. google_cloud_pipeline_components/v1/automl/tabular/stage_1_tuner.py +2 -2
  51. google_cloud_pipeline_components/v1/automl/tabular/stats_and_example_gen.py +2 -2
  52. google_cloud_pipeline_components/v1/automl/tabular/training_configurator_and_validator.py +1 -1
  53. google_cloud_pipeline_components/v1/automl/tabular/transform.py +2 -2
  54. google_cloud_pipeline_components/v1/automl/tabular/utils.py +1 -1
  55. google_cloud_pipeline_components/v1/custom_job/component.py +5 -2
  56. google_cloud_pipeline_components/v1/custom_job/utils.py +23 -0
  57. google_cloud_pipeline_components/v1/model_evaluation/evaluation_llm_text_generation_pipeline.py +2 -1
  58. google_cloud_pipeline_components/v1/model_evaluation/regression_component.py +1 -1
  59. google_cloud_pipeline_components/version.py +1 -1
  60. {google_cloud_pipeline_components-2.16.1.dist-info → google_cloud_pipeline_components-2.18.0.dist-info}/METADATA +23 -18
  61. {google_cloud_pipeline_components-2.16.1.dist-info → google_cloud_pipeline_components-2.18.0.dist-info}/RECORD +64 -56
  62. {google_cloud_pipeline_components-2.16.1.dist-info → google_cloud_pipeline_components-2.18.0.dist-info}/WHEEL +1 -1
  63. {google_cloud_pipeline_components-2.16.1.dist-info → google_cloud_pipeline_components-2.18.0.dist-info}/LICENSE +0 -0
  64. {google_cloud_pipeline_components-2.16.1.dist-info → google_cloud_pipeline_components-2.18.0.dist-info}/top_level.txt +0 -0
@@ -17,4 +17,4 @@
17
17
  DO NOT EDIT - This file is generated, manual changes will be overridden.
18
18
  """
19
19
 
20
- IMAGE_TAG = '20240623_1707'
20
+ IMAGE_TAG = '20240818_1707'
@@ -0,0 +1,14 @@
1
+ # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Google Cloud Pipeline Evaluation Import Evaluation Component."""
@@ -0,0 +1,208 @@
1
+ # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import List, Optional
17
+
18
+ from google_cloud_pipeline_components import _image
19
+ from google_cloud_pipeline_components.types.artifact_types import ClassificationMetrics
20
+ from google_cloud_pipeline_components.types.artifact_types import ForecastingMetrics
21
+ from google_cloud_pipeline_components.types.artifact_types import RegressionMetrics
22
+ from google_cloud_pipeline_components.types.artifact_types import VertexModel
23
+ from kfp import dsl
24
+ from kfp.dsl import Input
25
+ from kfp.dsl import Metrics
26
+
27
+
28
+ @dsl.container_component
29
+ def model_evaluation_import(
30
+ model: Input[VertexModel],
31
+ gcp_resources: dsl.OutputPath(str),
32
+ evaluation_resource_name: dsl.OutputPath(str),
33
+ metrics: Optional[Input[Metrics]] = None,
34
+ row_based_metrics: Optional[Input[Metrics]] = None,
35
+ problem_type: Optional[str] = None,
36
+ classification_metrics: Optional[Input[ClassificationMetrics]] = None,
37
+ forecasting_metrics: Optional[Input[ForecastingMetrics]] = None,
38
+ regression_metrics: Optional[Input[RegressionMetrics]] = None,
39
+ text_generation_metrics: Optional[Input[Metrics]] = None,
40
+ question_answering_metrics: Optional[Input[Metrics]] = None,
41
+ summarization_metrics: Optional[Input[Metrics]] = None,
42
+ explanation: Optional[Input[Metrics]] = None,
43
+ feature_attributions: Optional[Input[Metrics]] = None,
44
+ embedding_metrics: Optional[Input[Metrics]] = None,
45
+ display_name: str = "",
46
+ dataset_path: str = "",
47
+ dataset_paths: List[str] = [],
48
+ dataset_type: str = "",
49
+ ):
50
+ # fmt: off
51
+ """Imports a model evaluation artifact to an existing Vertex model with
52
+ ModelService.ImportModelEvaluation.
53
+
54
+ For more details, see
55
+ https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models.evaluations
56
+ One of the four metrics inputs must be provided, metrics & problem_type,
57
+ classification_metrics, regression_metrics, or forecasting_metrics.
58
+
59
+ Args:
60
+ model: Vertex model resource that will be the parent resource of the
61
+ uploaded evaluation.
62
+ metrics: Path of metrics generated from an evaluation component.
63
+ row_based_metrics:
64
+ Path of row_based_metrics generated from an evaluation component.
65
+ problem_type: The problem type of the metrics being imported to the
66
+ VertexModel. `classification`, `regression`, `forecasting`,
67
+ `text-generation`, `question-answering`, and `summarization` are the
68
+ currently supported problem types. Must be provided when `metrics` is
69
+ provided.
70
+ classification_metrics: google.ClassificationMetrics artifact generated from
71
+ the ModelEvaluationClassificationOp component.
72
+ forecasting_metrics: google.ForecastingMetrics artifact generated from
73
+ the ModelEvaluationForecastingOp component.
74
+ regression_metrics: google.ClassificationMetrics artifact generated from
75
+ the ModelEvaluationRegressionOp component.
76
+ text_generation_metrics: system.Metrics artifact generated from
77
+ the LLMEvaluationTextGenerationOp component. Subject to change to
78
+ google.TextGenerationMetrics.
79
+ question_answering_metrics: system.Metrics artifact generated from
80
+ the LLMEvaluationTextGenerationOp component. Subject to change to
81
+ google.QuestionAnsweringMetrics.
82
+ summarization_metrics: system.Metrics artifact generated from
83
+ the LLMEvaluationTextGenerationOp component. Subject to change to
84
+ google.SummarizationMetrics.
85
+ explanation: Path for model explanation metrics generated from an evaluation
86
+ component.
87
+ feature_attributions: The feature attributions metrics artifact generated
88
+ from the feature attribution component.
89
+ embedding_metrics: The embedding metrics artifact generated from the
90
+ embedding retrieval metrics component.
91
+ display_name: The display name for the uploaded model evaluation resource.
92
+ """
93
+ # fmt: on
94
+ return dsl.ContainerSpec(
95
+ image=_image.GCPC_IMAGE_TAG,
96
+ command=[
97
+ "python3",
98
+ "-u",
99
+ "-m",
100
+ "google_cloud_pipeline_components.container._implementation.model_evaluation.import_model_evaluation",
101
+ ],
102
+ args=[
103
+ dsl.IfPresentPlaceholder(
104
+ input_name="metrics",
105
+ then=[
106
+ "--metrics",
107
+ metrics.uri,
108
+ "--metrics_explanation",
109
+ metrics.metadata["explanation_gcs_path"],
110
+ ],
111
+ ),
112
+ dsl.IfPresentPlaceholder(
113
+ input_name="row_based_metrics",
114
+ then=[
115
+ "--row_based_metrics",
116
+ row_based_metrics.uri,
117
+ ],
118
+ ),
119
+ dsl.IfPresentPlaceholder(
120
+ input_name="explanation",
121
+ then=[
122
+ "--explanation",
123
+ explanation.metadata["explanation_gcs_path"],
124
+ ],
125
+ ),
126
+ dsl.IfPresentPlaceholder(
127
+ input_name="classification_metrics",
128
+ then=[
129
+ "--classification_metrics",
130
+ classification_metrics.uri,
131
+ ],
132
+ ),
133
+ dsl.IfPresentPlaceholder(
134
+ input_name="forecasting_metrics",
135
+ then=[
136
+ "--forecasting_metrics",
137
+ forecasting_metrics.uri,
138
+ ],
139
+ ),
140
+ dsl.IfPresentPlaceholder(
141
+ input_name="regression_metrics",
142
+ then=[
143
+ "--regression_metrics",
144
+ regression_metrics.uri,
145
+ ],
146
+ ),
147
+ dsl.IfPresentPlaceholder(
148
+ input_name="text_generation_metrics",
149
+ then=[
150
+ "--text_generation_metrics",
151
+ text_generation_metrics.uri,
152
+ ],
153
+ ),
154
+ dsl.IfPresentPlaceholder(
155
+ input_name="question_answering_metrics",
156
+ then=[
157
+ "--question_answering_metrics",
158
+ question_answering_metrics.uri,
159
+ ],
160
+ ),
161
+ dsl.IfPresentPlaceholder(
162
+ input_name="summarization_metrics",
163
+ then=[
164
+ "--summarization_metrics",
165
+ summarization_metrics.uri,
166
+ ],
167
+ ),
168
+ dsl.IfPresentPlaceholder(
169
+ input_name="feature_attributions",
170
+ then=[
171
+ "--feature_attributions",
172
+ feature_attributions.uri,
173
+ ],
174
+ ),
175
+ dsl.IfPresentPlaceholder(
176
+ input_name="embedding_metrics",
177
+ then=[
178
+ "--embedding_metrics",
179
+ embedding_metrics.uri,
180
+ ],
181
+ ),
182
+ dsl.IfPresentPlaceholder(
183
+ input_name="problem_type",
184
+ then=[
185
+ "--problem_type",
186
+ problem_type,
187
+ ],
188
+ ),
189
+ "--display_name",
190
+ display_name,
191
+ "--dataset_path",
192
+ dataset_path,
193
+ "--dataset_paths",
194
+ dataset_paths,
195
+ "--dataset_type",
196
+ dataset_type,
197
+ "--pipeline_job_id",
198
+ dsl.PIPELINE_JOB_ID_PLACEHOLDER,
199
+ "--pipeline_job_resource_name",
200
+ dsl.PIPELINE_JOB_RESOURCE_NAME_PLACEHOLDER,
201
+ "--model_name",
202
+ model.metadata["resourceName"],
203
+ "--gcp_resources",
204
+ gcp_resources,
205
+ "--evaluation_resource_name",
206
+ evaluation_resource_name,
207
+ ],
208
+ )
@@ -32,6 +32,7 @@ def model_evaluation_text_generation(
32
32
  row_based_metrics: Output[Metrics],
33
33
  project: str,
34
34
  location: str,
35
+ model_name: str,
35
36
  evaluation_task: str = 'text-generation',
36
37
  target_field_name: str = 'instance.output_text',
37
38
  prediction_field_name: str = 'predictions.content',
@@ -55,6 +56,7 @@ def model_evaluation_text_generation(
55
56
  Args:
56
57
  project: The GCP project that runs the pipeline component.
57
58
  location: The GCP region that runs the pipeline component.
59
+ model_name: The name of the model to be evaluated.
58
60
  evaluation_task: The task that the large language model will be evaluated
59
61
  on. The evaluation component computes a set of metrics relevant to that
60
62
  specific task. Currently supported tasks are: `summarization`,
@@ -124,6 +126,7 @@ def model_evaluation_text_generation(
124
126
  machine_type=machine_type,
125
127
  image_uri=version.LLM_EVAL_IMAGE_TAG,
126
128
  args=[
129
+ f'--model_name={model_name}',
127
130
  f'--evaluation_task={evaluation_task}',
128
131
  f'--target_field_name={target_field_name}',
129
132
  f'--prediction_field_name={prediction_field_name}',
@@ -101,10 +101,8 @@ def evaluation_dataset_preprocessor_internal(
101
101
  f'--gcs_source_uris={gcs_source_uris}',
102
102
  f'--input_field_name={input_field_name}',
103
103
  f'--role_field_name={role_field_name}',
104
- (
105
- f'--target_field_name={target_field_name}'
106
- f'--model_name={model_name}'
107
- ),
104
+ f'--target_field_name={target_field_name}',
105
+ f'--model_name={model_name}',
108
106
  f'--output_dirs={output_dirs}',
109
107
  '--executor_input={{$.json_escape[1]}}',
110
108
  ],
@@ -14,7 +14,7 @@
14
14
  """Version constants for model evaluation components."""
15
15
 
16
16
  _EVAL_VERSION = 'v0.9.4'
17
- _LLM_EVAL_VERSION = 'v0.6'
17
+ _LLM_EVAL_VERSION = 'v0.7'
18
18
 
19
19
  _EVAL_IMAGE_NAME = 'gcr.io/ml-pipeline/model-evaluation'
20
20
  _LLM_EVAL_IMAGE_NAME = 'gcr.io/ml-pipeline/llm-model-evaluation'
@@ -72,7 +72,7 @@ def automl_forecasting_ensemble(
72
72
  # fmt: on
73
73
  job_id = dsl.PIPELINE_JOB_ID_PLACEHOLDER
74
74
  task_id = dsl.PIPELINE_TASK_ID_PLACEHOLDER
75
- image_uri = 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625'
75
+ image_uri = 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20241121_0625'
76
76
  display_name = f'automl-forecasting-ensemble-{job_id}-{task_id}'
77
77
 
78
78
  error_file_path = f'{root_dir}/{job_id}/{task_id}/error.pb'
@@ -99,14 +99,14 @@ def automl_forecasting_stage_1_tuner(
99
99
  ' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
100
100
  ' "container_spec": {"image_uri":"'
101
101
  ),
102
- 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625',
102
+ 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20241121_0625',
103
103
  '", "args": ["forecasting_mp_l2l_stage_1_tuner',
104
104
  '", "--region=',
105
105
  location,
106
106
  '", "--transform_output_path=',
107
107
  transform_output.uri,
108
108
  '", "--training_docker_uri=',
109
- 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625',
109
+ 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20241121_0625',
110
110
  '", "--reduce_search_space_mode=',
111
111
  reduce_search_space_mode,
112
112
  f'", "--component_id={dsl.PIPELINE_TASK_ID_PLACEHOLDER}',
@@ -97,14 +97,14 @@ def automl_forecasting_stage_2_tuner(
97
97
  ' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
98
98
  ' "container_spec": {"image_uri":"'
99
99
  ),
100
- 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625',
100
+ 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20241121_0625',
101
101
  '", "args": ["forecasting_mp_l2l_stage_2_tuner',
102
102
  '", "--region=',
103
103
  location,
104
104
  '", "--transform_output_path=',
105
105
  transform_output.uri,
106
106
  '", "--training_docker_uri=',
107
- 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625',
107
+ 'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20241121_0625',
108
108
  f'", "--component_id={dsl.PIPELINE_TASK_ID_PLACEHOLDER}',
109
109
  '", "--training_base_dir=',
110
110
  root_dir,
@@ -5577,7 +5577,7 @@ deploymentSpec:
5577
5577
  - '{"display_name": "automl-forecasting-ensemble-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}",
5578
5578
  "encryption_spec": {"kms_key_name": "{{$.inputs.parameters[''encryption_spec_key_name'']}}"},
5579
5579
  "job_spec": {"worker_pool_specs": [{"replica_count": 1, "machine_spec":
5580
- {"machine_type": "n1-highmem-8"}, "container_spec": {"image_uri": "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625",
5580
+ {"machine_type": "n1-highmem-8"}, "container_spec": {"image_uri": "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20241121_0625",
5581
5581
  "args": ["forecasting_mp_ensemble", "--transform_output_path={{$.inputs.artifacts[''transform_output''].uri}}",
5582
5582
  "--error_file_path={{$.inputs.parameters[''root_dir'']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb",
5583
5583
  "--metadata_path={{$.inputs.artifacts[''metadata''].uri}}", "--tuning_result_input_path={{$.inputs.artifacts[''tuning_result_input''].uri}}",
@@ -5611,7 +5611,7 @@ deploymentSpec:
5611
5611
  - '{"display_name": "automl-forecasting-ensemble-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}",
5612
5612
  "encryption_spec": {"kms_key_name": "{{$.inputs.parameters[''encryption_spec_key_name'']}}"},
5613
5613
  "job_spec": {"worker_pool_specs": [{"replica_count": 1, "machine_spec":
5614
- {"machine_type": "n1-highmem-8"}, "container_spec": {"image_uri": "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625",
5614
+ {"machine_type": "n1-highmem-8"}, "container_spec": {"image_uri": "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20241121_0625",
5615
5615
  "args": ["forecasting_mp_ensemble", "--transform_output_path={{$.inputs.artifacts[''transform_output''].uri}}",
5616
5616
  "--error_file_path={{$.inputs.parameters[''root_dir'']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb",
5617
5617
  "--metadata_path={{$.inputs.artifacts[''metadata''].uri}}", "--tuning_result_input_path={{$.inputs.artifacts[''tuning_result_input''].uri}}",
@@ -5646,11 +5646,11 @@ deploymentSpec:
5646
5646
  \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
5647
5647
  "\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
5648
5648
  {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
5649
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625",
5649
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20241121_0625",
5650
5650
  "\", \"args\": [\"forecasting_mp_l2l_stage_1_tuner", "\", \"--region=",
5651
5651
  "{{$.inputs.parameters[''location'']}}", "\", \"--transform_output_path=",
5652
5652
  "{{$.inputs.artifacts[''transform_output''].uri}}", "\", \"--training_docker_uri=",
5653
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625",
5653
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20241121_0625",
5654
5654
  "\", \"--reduce_search_space_mode=", "{{$.inputs.parameters[''reduce_search_space_mode'']}}",
5655
5655
  "\", \"--component_id={{$.pipeline_task_uuid}}", "\", \"--training_base_dir=",
5656
5656
  "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/train",
@@ -5689,11 +5689,11 @@ deploymentSpec:
5689
5689
  \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
5690
5690
  "\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
5691
5691
  {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
5692
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625",
5692
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20241121_0625",
5693
5693
  "\", \"args\": [\"forecasting_mp_l2l_stage_2_tuner", "\", \"--region=",
5694
5694
  "{{$.inputs.parameters[''location'']}}", "\", \"--transform_output_path=",
5695
5695
  "{{$.inputs.artifacts[''transform_output''].uri}}", "\", \"--training_docker_uri=",
5696
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20240808_0625",
5696
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/forecasting-training:20241121_0625",
5697
5697
  "\", \"--component_id={{$.pipeline_task_uuid}}", "\", \"--training_base_dir=",
5698
5698
  "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/train",
5699
5699
  "\", \"--num_parallel_trial=", "{{$.inputs.parameters[''num_parallel_trials'']}}",
@@ -5732,7 +5732,7 @@ deploymentSpec:
5732
5732
  \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
5733
5733
  "\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
5734
5734
  {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
5735
- "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625", "\",
5735
+ "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20241121_0625", "\",
5736
5736
  \"args\": [\"cancel_l2l_tuner\", \"--error_file_path=", "{{$.inputs.parameters[''root_dir'']}}",
5737
5737
  "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\", \"--cleanup_lro_job_infos=",
5738
5738
  "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/lro\"]}}]}}"]}'
@@ -5797,7 +5797,7 @@ deploymentSpec:
5797
5797
  \ 'stage_2_single_run_max_secs',\n ],\n )(\n stage_1_deadline_hours,\n\
5798
5798
  \ stage_1_single_run_max_secs,\n stage_2_deadline_hours,\n \
5799
5799
  \ stage_2_single_run_max_secs,\n )\n\n"
5800
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
5800
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20241121_0625
5801
5801
  exec-calculate-training-parameters-2:
5802
5802
  container:
5803
5803
  args:
@@ -5853,7 +5853,7 @@ deploymentSpec:
5853
5853
  \ 'stage_2_single_run_max_secs',\n ],\n )(\n stage_1_deadline_hours,\n\
5854
5854
  \ stage_1_single_run_max_secs,\n stage_2_deadline_hours,\n \
5855
5855
  \ stage_2_single_run_max_secs,\n )\n\n"
5856
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
5856
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20241121_0625
5857
5857
  exec-feature-attribution:
5858
5858
  container:
5859
5859
  args:
@@ -6044,8 +6044,8 @@ deploymentSpec:
6044
6044
  "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_tmp"]}'
6045
6045
  - '{"Concat": ["--dataflow_max_num_workers=", "{{$.inputs.parameters[''dataflow_max_num_workers'']}}"]}'
6046
6046
  - '{"Concat": ["--dataflow_machine_type=", "{{$.inputs.parameters[''dataflow_machine_type'']}}"]}'
6047
- - --dataflow_worker_container_image=us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240808_0625
6048
- - --feature_transform_engine_docker_uri=us-docker.pkg.dev/vertex-ai/automl-tabular/feature-transform-engine:20240808_0625
6047
+ - --dataflow_worker_container_image=us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20241121_0625
6048
+ - --feature_transform_engine_docker_uri=us-docker.pkg.dev/vertex-ai/automl-tabular/feature-transform-engine:20241121_0625
6049
6049
  - '{"Concat": ["--dataflow_disk_size_gb=", "{{$.inputs.parameters[''dataflow_disk_size_gb'']}}"]}'
6050
6050
  - '{"Concat": ["--dataflow_subnetwork_fully_qualified=", "{{$.inputs.parameters[''dataflow_subnetwork'']}}"]}'
6051
6051
  - '{"Concat": ["--dataflow_use_public_ips=", "{{$.inputs.parameters[''dataflow_use_public_ips'']}}"]}'
@@ -6062,7 +6062,7 @@ deploymentSpec:
6062
6062
  - '{"IfPresent": {"InputName": "group_temporal_total_weight", "Then": {"Concat":
6063
6063
  ["--group_temporal_total_weight=", "{{$.inputs.parameters[''group_temporal_total_weight'']}}"]}}}'
6064
6064
  - '{"Concat": ["--encryption_spec_key_name=", "{{$.inputs.parameters[''encryption_spec_key_name'']}}"]}'
6065
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/feature-transform-engine:20240808_0625
6065
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/feature-transform-engine:20241121_0625
6066
6066
  resources:
6067
6067
  cpuLimit: 8.0
6068
6068
  memoryLimit: 30.0
@@ -6093,7 +6093,7 @@ deploymentSpec:
6093
6093
  \ = 'point'\n else:\n forecasting_type = 'quantile'\n\n return collections.namedtuple(\n\
6094
6094
  \ 'Outputs',\n (\n 'forecasting_type',\n 'quantiles',\n\
6095
6095
  \ ),\n )(forecasting_type, quantiles)\n\n"
6096
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
6096
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20241121_0625
6097
6097
  exec-finalize-eval-quantile-parameters-2:
6098
6098
  container:
6099
6099
  args:
@@ -6121,7 +6121,7 @@ deploymentSpec:
6121
6121
  \ = 'point'\n else:\n forecasting_type = 'quantile'\n\n return collections.namedtuple(\n\
6122
6122
  \ 'Outputs',\n (\n 'forecasting_type',\n 'quantiles',\n\
6123
6123
  \ ),\n )(forecasting_type, quantiles)\n\n"
6124
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
6124
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20241121_0625
6125
6125
  exec-get-or-create-model-description:
6126
6126
  container:
6127
6127
  args:
@@ -6150,7 +6150,7 @@ deploymentSpec:
6150
6150
  \ return f'{original_description} From: {pipeline_url}'\n\n # The pipeline\
6151
6151
  \ url contains KFP placeholders injected at runtime.\n return f'Vertex\
6152
6152
  \ forecasting model trained in the pipeline: {pipeline_url}'\n\n"
6153
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
6153
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20241121_0625
6154
6154
  exec-get-or-create-model-description-2:
6155
6155
  container:
6156
6156
  args:
@@ -6179,7 +6179,7 @@ deploymentSpec:
6179
6179
  \ return f'{original_description} From: {pipeline_url}'\n\n # The pipeline\
6180
6180
  \ url contains KFP placeholders injected at runtime.\n return f'Vertex\
6181
6181
  \ forecasting model trained in the pipeline: {pipeline_url}'\n\n"
6182
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
6182
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20241121_0625
6183
6183
  exec-get-prediction-image-uri:
6184
6184
  container:
6185
6185
  args:
@@ -6202,14 +6202,14 @@ deploymentSpec:
6202
6202
  Returns the prediction image corresponding to the given model type.\"\"\"\
6203
6203
  \n # Keys come from AutoMlTimeSeriesForecastingTrainSpec.\n # The URIs\
6204
6204
  \ must be hardcoded without any breaks in the code so string\n # replacement\
6205
- \ will work correctly.\n images = {\n 'l2l': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-l2l:20240808_0625',\n\
6206
- \ 'seq2seq': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-seq2seq:20240808_0625',\n\
6207
- \ 'tft': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-tft:20240808_0625',\n\
6208
- \ 'tide': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-tide:20240808_0625',\n\
6205
+ \ will work correctly.\n images = {\n 'l2l': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-l2l:20241121_0625',\n\
6206
+ \ 'seq2seq': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-seq2seq:20241121_0625',\n\
6207
+ \ 'tft': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-tft:20241121_0625',\n\
6208
+ \ 'tide': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-tide:20241121_0625',\n\
6209
6209
  \ }\n if model_type not in images:\n raise ValueError(\n f'Invalid\
6210
6210
  \ forecasting model type: {model_type}. Valid options are: '\n f'{images.keys()}.'\n\
6211
6211
  \ )\n return images[model_type]\n\n"
6212
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
6212
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20241121_0625
6213
6213
  exec-get-prediction-image-uri-2:
6214
6214
  container:
6215
6215
  args:
@@ -6232,14 +6232,14 @@ deploymentSpec:
6232
6232
  Returns the prediction image corresponding to the given model type.\"\"\"\
6233
6233
  \n # Keys come from AutoMlTimeSeriesForecastingTrainSpec.\n # The URIs\
6234
6234
  \ must be hardcoded without any breaks in the code so string\n # replacement\
6235
- \ will work correctly.\n images = {\n 'l2l': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-l2l:20240808_0625',\n\
6236
- \ 'seq2seq': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-seq2seq:20240808_0625',\n\
6237
- \ 'tft': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-tft:20240808_0625',\n\
6238
- \ 'tide': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-tide:20240808_0625',\n\
6235
+ \ will work correctly.\n images = {\n 'l2l': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-l2l:20241121_0625',\n\
6236
+ \ 'seq2seq': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-seq2seq:20241121_0625',\n\
6237
+ \ 'tft': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-tft:20241121_0625',\n\
6238
+ \ 'tide': 'us-docker.pkg.dev/vertex-ai/automl-tabular/forecasting-prediction-server-tide:20241121_0625',\n\
6239
6239
  \ }\n if model_type not in images:\n raise ValueError(\n f'Invalid\
6240
6240
  \ forecasting model type: {model_type}. Valid options are: '\n f'{images.keys()}.'\n\
6241
6241
  \ )\n return images[model_type]\n\n"
6242
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
6242
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20241121_0625
6243
6243
  exec-get-predictions-column:
6244
6244
  container:
6245
6245
  args:
@@ -6262,7 +6262,7 @@ deploymentSpec:
6262
6262
  \ str) -> str:\n \"\"\"Generates the BP output's target column name.\"\"\
6263
6263
  \"\n if forecasting_type == 'quantile':\n return f'predicted_{target_column}.quantile_predictions'\n\
6264
6264
  \ return f'predicted_{target_column}.value'\n\n"
6265
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
6265
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20241121_0625
6266
6266
  exec-get-predictions-column-2:
6267
6267
  container:
6268
6268
  args:
@@ -6285,7 +6285,7 @@ deploymentSpec:
6285
6285
  \ str) -> str:\n \"\"\"Generates the BP output's target column name.\"\"\
6286
6286
  \"\n if forecasting_type == 'quantile':\n return f'predicted_{target_column}.quantile_predictions'\n\
6287
6287
  \ return f'predicted_{target_column}.value'\n\n"
6288
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
6288
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20241121_0625
6289
6289
  exec-importer:
6290
6290
  importer:
6291
6291
  artifactUri:
@@ -6817,7 +6817,7 @@ deploymentSpec:
6817
6817
  \ 'model_display_name',\n 'transformations',\n ],\n\
6818
6818
  \ )(\n data_source_csv_filenames,\n data_source_bigquery_table_path,\n\
6819
6819
  \ model_display_name,\n transformations,\n )\n\n"
6820
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
6820
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20241121_0625
6821
6821
  exec-split-materialized-data:
6822
6822
  container:
6823
6823
  args:
@@ -6863,7 +6863,7 @@ deploymentSpec:
6863
6863
  \ 'w') as f:\n f.write(file_patterns[0])\n\n with tf.io.gfile.GFile(materialized_eval_split,\
6864
6864
  \ 'w') as f:\n f.write(file_patterns[1])\n\n with tf.io.gfile.GFile(materialized_test_split,\
6865
6865
  \ 'w') as f:\n f.write(file_patterns[2])\n\n"
6866
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240808_0625
6866
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20241121_0625
6867
6867
  exec-string-not-empty:
6868
6868
  container:
6869
6869
  args:
@@ -6887,7 +6887,7 @@ deploymentSpec:
6887
6887
  \n Returns:\n Boolean value. -> 'true' if empty, 'false' if not empty.\
6888
6888
  \ We need to use str\n instead of bool due to a limitation in KFP compiler.\n\
6889
6889
  \ \"\"\"\n return 'true' if value else 'false'\n\n"
6890
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
6890
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20241121_0625
6891
6891
  exec-table-to-uri:
6892
6892
  container:
6893
6893
  args:
@@ -6917,7 +6917,7 @@ deploymentSpec:
6917
6917
  \ if use_bq_prefix:\n bq_uri = 'bq://' + bq_uri\n outputs.append(bq_uri)\n\
6918
6918
  \ return collections.namedtuple(\n 'Outputs',\n ['project_id',\
6919
6919
  \ 'dataset_id', 'table_id', 'uri'],\n )(*outputs)\n\n"
6920
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
6920
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20241121_0625
6921
6921
  exec-table-to-uri-2:
6922
6922
  container:
6923
6923
  args:
@@ -6947,7 +6947,7 @@ deploymentSpec:
6947
6947
  \ if use_bq_prefix:\n bq_uri = 'bq://' + bq_uri\n outputs.append(bq_uri)\n\
6948
6948
  \ return collections.namedtuple(\n 'Outputs',\n ['project_id',\
6949
6949
  \ 'dataset_id', 'table_id', 'uri'],\n )(*outputs)\n\n"
6950
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
6950
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20241121_0625
6951
6951
  exec-training-configurator-and-validator:
6952
6952
  container:
6953
6953
  args:
@@ -6992,7 +6992,7 @@ deploymentSpec:
6992
6992
  ["--temporal_total_weight=", "{{$.inputs.parameters[''temporal_total_weight'']}}"]}}}'
6993
6993
  - '{"IfPresent": {"InputName": "group_temporal_total_weight", "Then": {"Concat":
6994
6994
  ["--group_temporal_total_weight=", "{{$.inputs.parameters[''group_temporal_total_weight'']}}"]}}}'
6995
- image: us-docker.pkg.dev/vertex-ai/automl-tabular/feature-transform-engine:20240808_0625
6995
+ image: us-docker.pkg.dev/vertex-ai/automl-tabular/feature-transform-engine:20241121_0625
6996
6996
  pipelineInfo:
6997
6997
  description: The AutoML Forecasting pipeline.
6998
6998
  name: learn-to-learn-forecasting