google-cloud-pipeline-components 2.16.1__py3-none-any.whl → 2.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (19) hide show
  1. google_cloud_pipeline_components/_implementation/llm/generated/refined_image_versions.py +1 -1
  2. google_cloud_pipeline_components/_implementation/model_evaluation/import_evaluation/__init__.py +14 -0
  3. google_cloud_pipeline_components/_implementation/model_evaluation/import_evaluation/component.py +208 -0
  4. google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation/component.py +3 -0
  5. google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation_preprocessor/component.py +2 -4
  6. google_cloud_pipeline_components/_implementation/model_evaluation/version.py +1 -1
  7. google_cloud_pipeline_components/preview/model_evaluation/evaluation_llm_classification_pipeline.py +180 -0
  8. google_cloud_pipeline_components/preview/model_evaluation/evaluation_llm_text_generation_pipeline.py +178 -0
  9. google_cloud_pipeline_components/preview/model_evaluation/model_based_llm_evaluation/__init__.py +20 -0
  10. google_cloud_pipeline_components/preview/model_evaluation/model_based_llm_evaluation/autosxs/__init__.py +13 -0
  11. google_cloud_pipeline_components/preview/model_evaluation/model_based_llm_evaluation/autosxs/autosxs_pipeline.py +109 -0
  12. google_cloud_pipeline_components/proto/preflight_validations_pb2.py +58 -0
  13. google_cloud_pipeline_components/v1/model_evaluation/evaluation_llm_text_generation_pipeline.py +2 -1
  14. google_cloud_pipeline_components/version.py +1 -1
  15. {google_cloud_pipeline_components-2.16.1.dist-info → google_cloud_pipeline_components-2.17.0.dist-info}/METADATA +20 -17
  16. {google_cloud_pipeline_components-2.16.1.dist-info → google_cloud_pipeline_components-2.17.0.dist-info}/RECORD +19 -11
  17. {google_cloud_pipeline_components-2.16.1.dist-info → google_cloud_pipeline_components-2.17.0.dist-info}/WHEEL +1 -1
  18. {google_cloud_pipeline_components-2.16.1.dist-info → google_cloud_pipeline_components-2.17.0.dist-info}/LICENSE +0 -0
  19. {google_cloud_pipeline_components-2.16.1.dist-info → google_cloud_pipeline_components-2.17.0.dist-info}/top_level.txt +0 -0
@@ -17,4 +17,4 @@
17
17
  DO NOT EDIT - This file is generated, manual changes will be overridden.
18
18
  """
19
19
 
20
- IMAGE_TAG = '20240623_1707'
20
+ IMAGE_TAG = '20240818_1707'
@@ -0,0 +1,14 @@
1
+ # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Google Cloud Pipeline Evaluation Import Evaluation Component."""
@@ -0,0 +1,208 @@
1
+ # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import List, Optional
17
+
18
+ from google_cloud_pipeline_components import _image
19
+ from google_cloud_pipeline_components.types.artifact_types import ClassificationMetrics
20
+ from google_cloud_pipeline_components.types.artifact_types import ForecastingMetrics
21
+ from google_cloud_pipeline_components.types.artifact_types import RegressionMetrics
22
+ from google_cloud_pipeline_components.types.artifact_types import VertexModel
23
+ from kfp import dsl
24
+ from kfp.dsl import Input
25
+ from kfp.dsl import Metrics
26
+
27
+
28
+ @dsl.container_component
29
+ def model_evaluation_import(
30
+ model: Input[VertexModel],
31
+ gcp_resources: dsl.OutputPath(str),
32
+ evaluation_resource_name: dsl.OutputPath(str),
33
+ metrics: Optional[Input[Metrics]] = None,
34
+ row_based_metrics: Optional[Input[Metrics]] = None,
35
+ problem_type: Optional[str] = None,
36
+ classification_metrics: Optional[Input[ClassificationMetrics]] = None,
37
+ forecasting_metrics: Optional[Input[ForecastingMetrics]] = None,
38
+ regression_metrics: Optional[Input[RegressionMetrics]] = None,
39
+ text_generation_metrics: Optional[Input[Metrics]] = None,
40
+ question_answering_metrics: Optional[Input[Metrics]] = None,
41
+ summarization_metrics: Optional[Input[Metrics]] = None,
42
+ explanation: Optional[Input[Metrics]] = None,
43
+ feature_attributions: Optional[Input[Metrics]] = None,
44
+ embedding_metrics: Optional[Input[Metrics]] = None,
45
+ display_name: str = "",
46
+ dataset_path: str = "",
47
+ dataset_paths: List[str] = [],
48
+ dataset_type: str = "",
49
+ ):
50
+ # fmt: off
51
+ """Imports a model evaluation artifact to an existing Vertex model with
52
+ ModelService.ImportModelEvaluation.
53
+
54
+ For more details, see
55
+ https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models.evaluations
56
+ One of the four metrics inputs must be provided, metrics & problem_type,
57
+ classification_metrics, regression_metrics, or forecasting_metrics.
58
+
59
+ Args:
60
+ model: Vertex model resource that will be the parent resource of the
61
+ uploaded evaluation.
62
+ metrics: Path of metrics generated from an evaluation component.
63
+ row_based_metrics:
64
+ Path of row_based_metrics generated from an evaluation component.
65
+ problem_type: The problem type of the metrics being imported to the
66
+ VertexModel. `classification`, `regression`, `forecasting`,
67
+ `text-generation`, `question-answering`, and `summarization` are the
68
+ currently supported problem types. Must be provided when `metrics` is
69
+ provided.
70
+ classification_metrics: google.ClassificationMetrics artifact generated from
71
+ the ModelEvaluationClassificationOp component.
72
+ forecasting_metrics: google.ForecastingMetrics artifact generated from
73
+ the ModelEvaluationForecastingOp component.
74
+ regression_metrics: google.ClassificationMetrics artifact generated from
75
+ the ModelEvaluationRegressionOp component.
76
+ text_generation_metrics: system.Metrics artifact generated from
77
+ the LLMEvaluationTextGenerationOp component. Subject to change to
78
+ google.TextGenerationMetrics.
79
+ question_answering_metrics: system.Metrics artifact generated from
80
+ the LLMEvaluationTextGenerationOp component. Subject to change to
81
+ google.QuestionAnsweringMetrics.
82
+ summarization_metrics: system.Metrics artifact generated from
83
+ the LLMEvaluationTextGenerationOp component. Subject to change to
84
+ google.SummarizationMetrics.
85
+ explanation: Path for model explanation metrics generated from an evaluation
86
+ component.
87
+ feature_attributions: The feature attributions metrics artifact generated
88
+ from the feature attribution component.
89
+ embedding_metrics: The embedding metrics artifact generated from the
90
+ embedding retrieval metrics component.
91
+ display_name: The display name for the uploaded model evaluation resource.
92
+ """
93
+ # fmt: on
94
+ return dsl.ContainerSpec(
95
+ image=_image.GCPC_IMAGE_TAG,
96
+ command=[
97
+ "python3",
98
+ "-u",
99
+ "-m",
100
+ "google_cloud_pipeline_components.container._implementation.model_evaluation.import_model_evaluation",
101
+ ],
102
+ args=[
103
+ dsl.IfPresentPlaceholder(
104
+ input_name="metrics",
105
+ then=[
106
+ "--metrics",
107
+ metrics.uri,
108
+ "--metrics_explanation",
109
+ metrics.metadata["explanation_gcs_path"],
110
+ ],
111
+ ),
112
+ dsl.IfPresentPlaceholder(
113
+ input_name="row_based_metrics",
114
+ then=[
115
+ "--row_based_metrics",
116
+ row_based_metrics.uri,
117
+ ],
118
+ ),
119
+ dsl.IfPresentPlaceholder(
120
+ input_name="explanation",
121
+ then=[
122
+ "--explanation",
123
+ explanation.metadata["explanation_gcs_path"],
124
+ ],
125
+ ),
126
+ dsl.IfPresentPlaceholder(
127
+ input_name="classification_metrics",
128
+ then=[
129
+ "--classification_metrics",
130
+ classification_metrics.uri,
131
+ ],
132
+ ),
133
+ dsl.IfPresentPlaceholder(
134
+ input_name="forecasting_metrics",
135
+ then=[
136
+ "--forecasting_metrics",
137
+ forecasting_metrics.uri,
138
+ ],
139
+ ),
140
+ dsl.IfPresentPlaceholder(
141
+ input_name="regression_metrics",
142
+ then=[
143
+ "--regression_metrics",
144
+ regression_metrics.uri,
145
+ ],
146
+ ),
147
+ dsl.IfPresentPlaceholder(
148
+ input_name="text_generation_metrics",
149
+ then=[
150
+ "--text_generation_metrics",
151
+ text_generation_metrics.uri,
152
+ ],
153
+ ),
154
+ dsl.IfPresentPlaceholder(
155
+ input_name="question_answering_metrics",
156
+ then=[
157
+ "--question_answering_metrics",
158
+ question_answering_metrics.uri,
159
+ ],
160
+ ),
161
+ dsl.IfPresentPlaceholder(
162
+ input_name="summarization_metrics",
163
+ then=[
164
+ "--summarization_metrics",
165
+ summarization_metrics.uri,
166
+ ],
167
+ ),
168
+ dsl.IfPresentPlaceholder(
169
+ input_name="feature_attributions",
170
+ then=[
171
+ "--feature_attributions",
172
+ feature_attributions.uri,
173
+ ],
174
+ ),
175
+ dsl.IfPresentPlaceholder(
176
+ input_name="embedding_metrics",
177
+ then=[
178
+ "--embedding_metrics",
179
+ embedding_metrics.uri,
180
+ ],
181
+ ),
182
+ dsl.IfPresentPlaceholder(
183
+ input_name="problem_type",
184
+ then=[
185
+ "--problem_type",
186
+ problem_type,
187
+ ],
188
+ ),
189
+ "--display_name",
190
+ display_name,
191
+ "--dataset_path",
192
+ dataset_path,
193
+ "--dataset_paths",
194
+ dataset_paths,
195
+ "--dataset_type",
196
+ dataset_type,
197
+ "--pipeline_job_id",
198
+ dsl.PIPELINE_JOB_ID_PLACEHOLDER,
199
+ "--pipeline_job_resource_name",
200
+ dsl.PIPELINE_JOB_RESOURCE_NAME_PLACEHOLDER,
201
+ "--model_name",
202
+ model.metadata["resourceName"],
203
+ "--gcp_resources",
204
+ gcp_resources,
205
+ "--evaluation_resource_name",
206
+ evaluation_resource_name,
207
+ ],
208
+ )
@@ -32,6 +32,7 @@ def model_evaluation_text_generation(
32
32
  row_based_metrics: Output[Metrics],
33
33
  project: str,
34
34
  location: str,
35
+ model_name: str,
35
36
  evaluation_task: str = 'text-generation',
36
37
  target_field_name: str = 'instance.output_text',
37
38
  prediction_field_name: str = 'predictions.content',
@@ -55,6 +56,7 @@ def model_evaluation_text_generation(
55
56
  Args:
56
57
  project: The GCP project that runs the pipeline component.
57
58
  location: The GCP region that runs the pipeline component.
59
+ model_name: The name of the model to be evaluated.
58
60
  evaluation_task: The task that the large language model will be evaluated
59
61
  on. The evaluation component computes a set of metrics relevant to that
60
62
  specific task. Currently supported tasks are: `summarization`,
@@ -124,6 +126,7 @@ def model_evaluation_text_generation(
124
126
  machine_type=machine_type,
125
127
  image_uri=version.LLM_EVAL_IMAGE_TAG,
126
128
  args=[
129
+ f'--model_name={model_name}',
127
130
  f'--evaluation_task={evaluation_task}',
128
131
  f'--target_field_name={target_field_name}',
129
132
  f'--prediction_field_name={prediction_field_name}',
@@ -101,10 +101,8 @@ def evaluation_dataset_preprocessor_internal(
101
101
  f'--gcs_source_uris={gcs_source_uris}',
102
102
  f'--input_field_name={input_field_name}',
103
103
  f'--role_field_name={role_field_name}',
104
- (
105
- f'--target_field_name={target_field_name}'
106
- f'--model_name={model_name}'
107
- ),
104
+ f'--target_field_name={target_field_name}',
105
+ f'--model_name={model_name}',
108
106
  f'--output_dirs={output_dirs}',
109
107
  '--executor_input={{$.json_escape[1]}}',
110
108
  ],
@@ -14,7 +14,7 @@
14
14
  """Version constants for model evaluation components."""
15
15
 
16
16
  _EVAL_VERSION = 'v0.9.4'
17
- _LLM_EVAL_VERSION = 'v0.6'
17
+ _LLM_EVAL_VERSION = 'v0.7'
18
18
 
19
19
  _EVAL_IMAGE_NAME = 'gcr.io/ml-pipeline/model-evaluation'
20
20
  _LLM_EVAL_IMAGE_NAME = 'gcr.io/ml-pipeline/llm-model-evaluation'
@@ -0,0 +1,180 @@
1
+ # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Vertex Gen AI Evaluation for text classification task."""
15
+
16
+ from typing import Dict, List, NamedTuple
17
+
18
+ from google_cloud_pipeline_components._implementation.model_evaluation import LLMEvaluationClassificationPredictionsPostprocessorOp
19
+ from google_cloud_pipeline_components._implementation.model_evaluation import LLMEvaluationPreprocessorOp
20
+ from google_cloud_pipeline_components._implementation.model_evaluation import ModelImportEvaluationOp
21
+ from google_cloud_pipeline_components.types.artifact_types import ClassificationMetrics
22
+ from google_cloud_pipeline_components.types.artifact_types import VertexModel
23
+ from google_cloud_pipeline_components.v1.batch_predict_job import ModelBatchPredictOp
24
+ from google_cloud_pipeline_components.v1.model_evaluation.classification_component import model_evaluation_classification as ModelEvaluationClassificationOp
25
+ from kfp import dsl
26
+ # pylint: disable=unused-argument, unexpected-keyword-arg
27
+
28
+ _PIPELINE_NAME = 'evaluation-llm-classification-pipeline'
29
+
30
+
31
+ @dsl.pipeline(name=_PIPELINE_NAME)
32
+ def evaluation_llm_classification_pipeline( # pylint: disable=dangerous-default-value
33
+ project: str,
34
+ location: str,
35
+ target_field_name: str,
36
+ batch_predict_gcs_source_uris: List[str],
37
+ batch_predict_gcs_destination_output_uri: str,
38
+ model_name: str = 'publishers/google/models/text-bison@002',
39
+ evaluation_task: str = 'text-classification',
40
+ evaluation_class_labels: List[str] = [],
41
+ input_field_name: str = 'input_text',
42
+ batch_predict_instances_format: str = 'jsonl',
43
+ batch_predict_predictions_format: str = 'jsonl',
44
+ batch_predict_model_parameters: Dict[str, str] = {},
45
+ machine_type: str = 'e2-highmem-16',
46
+ service_account: str = '',
47
+ network: str = '',
48
+ dataflow_machine_type: str = 'n1-standard-4',
49
+ dataflow_disk_size_gb: int = 50,
50
+ dataflow_max_num_workers: int = 5,
51
+ dataflow_service_account: str = '',
52
+ dataflow_subnetwork: str = '',
53
+ dataflow_use_public_ips: bool = True,
54
+ encryption_spec_key_name: str = '',
55
+ evaluation_display_name: str = 'evaluation-llm-classification-pipeline-{{$.pipeline_job_uuid}}',
56
+ ) -> NamedTuple(
57
+ 'outputs',
58
+ evaluation_metrics=ClassificationMetrics,
59
+ evaluation_resource_name=str,
60
+ ):
61
+ # fmt: off
62
+ """The LLM Text Classification Evaluation pipeline.
63
+
64
+ Args:
65
+ project: Required. The GCP project that runs the pipeline components.
66
+ location: Required. The GCP region that runs the pipeline components.
67
+ target_field_name: Required. The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
68
+ batch_predict_gcs_source_uris: Required. Google Cloud Storage URI(-s) to your instances data to run batch prediction on. The instances data should also contain the ground truth (target) data, used for evaluation. May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
69
+ batch_predict_gcs_destination_output_uri: Required. The Google Cloud Storage location of the directory where the output is to be written to.
70
+ model_name: The Model name used to run evaluation. Must be a publisher Model or a managed Model sharing the same ancestor location. Starting this job has no impact on any existing deployments of the Model and their resources.
71
+ evaluation_task: The task that the large language model will be evaluated on. The evaluation component computes a set of metrics relevant to that specific task. Currently supported Classification tasks is: `text-classification`.
72
+ evaluation_class_labels: The JSON array of class names for the target_field, in the same order they appear in the batch predictions input file.
73
+ input_field_name: The field name of the input eval dataset instances that contains the input prompts to the LLM.
74
+ batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
75
+ batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
76
+ batch_predict_model_parameters: A map of parameters that govern the predictions. Some acceptable parameters include: maxOutputTokens, topK, topP, and temperature.
77
+ machine_type: The machine type of the custom jobs in this pipeline. If not set, defaulted to `e2-highmem-16`. More details: https://cloud.google.com/compute/docs/machine-resource
78
+ service_account: Sets the default service account for workload run-as account. The service account running the pipeline (https://cloud.google.com/vertex-ai/docs/pipelines/configure-project#service-account) submitting jobs must have act-as permission on this run-as account. If unspecified, the Vertex AI Custom Code Service Agent(https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) for the CustomJob's project.
79
+ network: The full name of the Compute Engine network to which the job should be peered. For example, `projects/12345/global/networks/myVPC`. Format is of the form `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is a network name, as in `myVPC`. To specify this field, you must have already configured VPC Network Peering for Vertex AI (https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If left unspecified, the job is not peered with any network.
80
+ dataflow_machine_type: The Dataflow machine type for evaluation components.
81
+ dataflow_disk_size_gb: The disk size (in GB) of the machine executing the evaluation run. If not set, defaulted to `50`.
82
+ dataflow_max_num_workers: The max number of workers executing the evaluation run. If not set, defaulted to `5`.
83
+ dataflow_service_account: Custom service account to run Dataflow jobs.
84
+ dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
85
+ dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
86
+ encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
87
+ evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
88
+
89
+ Returns:
90
+ evaluation_metrics: ClassificationMetrics Artifact for LLM Text Classification.
91
+ evaluation_resource_name: If run on an user's managed VertexModel, the imported evaluation resource name. Empty if run on a publisher model.
92
+ """
93
+ # fmt: on
94
+ outputs = NamedTuple(
95
+ 'outputs',
96
+ evaluation_metrics=ClassificationMetrics,
97
+ evaluation_resource_name=str,
98
+ )
99
+
100
+ get_vertex_model_task = dsl.importer(
101
+ artifact_uri=(
102
+ f'https://{location}-aiplatform.googleapis.com/v1/{model_name}'
103
+ ),
104
+ artifact_class=VertexModel,
105
+ metadata={'resourceName': model_name},
106
+ )
107
+ get_vertex_model_task.set_display_name('get-vertex-model')
108
+
109
+ eval_dataset_preprocessor_task = LLMEvaluationPreprocessorOp(
110
+ project=project,
111
+ location=location,
112
+ gcs_source_uris=batch_predict_gcs_source_uris,
113
+ input_field_name=input_field_name,
114
+ machine_type=machine_type,
115
+ service_account=service_account,
116
+ network=network,
117
+ encryption_spec_key_name=encryption_spec_key_name,
118
+ )
119
+ batch_predict_task = ModelBatchPredictOp(
120
+ project=project,
121
+ location=location,
122
+ model=get_vertex_model_task.outputs['artifact'],
123
+ job_display_name='evaluation-batch-predict-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}',
124
+ gcs_source_uris=eval_dataset_preprocessor_task.outputs[
125
+ 'preprocessed_gcs_source_uris'
126
+ ],
127
+ instances_format=batch_predict_instances_format,
128
+ predictions_format=batch_predict_predictions_format,
129
+ gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
130
+ model_parameters=batch_predict_model_parameters,
131
+ encryption_spec_key_name=encryption_spec_key_name,
132
+ )
133
+
134
+ postprocessor_task = LLMEvaluationClassificationPredictionsPostprocessorOp(
135
+ project=project,
136
+ batch_prediction_results=batch_predict_task.outputs[
137
+ 'gcs_output_directory'
138
+ ],
139
+ class_labels=evaluation_class_labels,
140
+ location=location,
141
+ machine_type=machine_type,
142
+ network=network,
143
+ service_account=service_account,
144
+ encryption_spec_key_name=encryption_spec_key_name,
145
+ )
146
+
147
+ eval_task = ModelEvaluationClassificationOp(
148
+ project=project,
149
+ location=location,
150
+ class_labels=postprocessor_task.outputs['postprocessed_class_labels'],
151
+ target_field_name=target_field_name,
152
+ predictions_gcs_source=postprocessor_task.outputs[
153
+ 'postprocessed_predictions_gcs_source'
154
+ ],
155
+ prediction_label_column='prediction.classes',
156
+ prediction_score_column='prediction.scores',
157
+ predictions_format=batch_predict_predictions_format,
158
+ dataflow_machine_type=dataflow_machine_type,
159
+ dataflow_max_workers_num=dataflow_max_num_workers,
160
+ dataflow_disk_size_gb=dataflow_disk_size_gb,
161
+ dataflow_service_account=dataflow_service_account,
162
+ dataflow_subnetwork=dataflow_subnetwork,
163
+ dataflow_use_public_ips=dataflow_use_public_ips,
164
+ encryption_spec_key_name=encryption_spec_key_name,
165
+ )
166
+
167
+ import_evaluation_task = ModelImportEvaluationOp(
168
+ classification_metrics=eval_task.outputs['evaluation_metrics'],
169
+ model=get_vertex_model_task.outputs['artifact'],
170
+ dataset_type=batch_predict_instances_format,
171
+ dataset_paths=batch_predict_gcs_source_uris,
172
+ display_name=evaluation_display_name,
173
+ )
174
+
175
+ return outputs(
176
+ evaluation_metrics=eval_task.outputs['evaluation_metrics'],
177
+ evaluation_resource_name=import_evaluation_task.outputs[
178
+ 'evaluation_resource_name'
179
+ ],
180
+ )
@@ -0,0 +1,178 @@
1
+ # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Vertex Gen AI Evaluation for Text Generation/QA/Summarization tasks."""
15
+
16
+ from typing import Dict, List, NamedTuple
17
+
18
+ from google_cloud_pipeline_components._implementation.model_evaluation import LLMEvaluationPreprocessorOp
19
+ from google_cloud_pipeline_components._implementation.model_evaluation import LLMEvaluationTextGenerationOp
20
+ from google_cloud_pipeline_components._implementation.model_evaluation import ModelImportEvaluationOp
21
+ from google_cloud_pipeline_components.types.artifact_types import VertexModel
22
+ from google_cloud_pipeline_components.v1.batch_predict_job import ModelBatchPredictOp
23
+ from kfp import dsl
24
+ # pylint: disable=unused-argument, unexpected-keyword-arg
25
+
26
+
27
+ _PIPELINE_NAME = 'evaluation-llm-text-generation-pipeline'
28
+
29
+
30
+ @dsl.pipeline(name=_PIPELINE_NAME)
31
+ def evaluation_llm_text_generation_pipeline( # pylint: disable=dangerous-default-value
32
+ project: str,
33
+ location: str,
34
+ batch_predict_gcs_source_uris: List[str],
35
+ batch_predict_gcs_destination_output_uri: str,
36
+ model_name: str = 'publishers/google/models/text-bison@002',
37
+ evaluation_task: str = 'text-generation',
38
+ input_field_name: str = 'input_text',
39
+ target_field_name: str = 'output_text',
40
+ batch_predict_instances_format: str = 'jsonl',
41
+ batch_predict_predictions_format: str = 'jsonl',
42
+ batch_predict_model_parameters: Dict[str, str] = {},
43
+ enable_row_based_metrics: bool = False,
44
+ machine_type: str = 'e2-highmem-16',
45
+ service_account: str = '',
46
+ network: str = '',
47
+ encryption_spec_key_name: str = '',
48
+ evaluation_display_name: str = 'evaluation-llm-text-generation-pipeline-{{$.pipeline_job_uuid}}',
49
+ ) -> NamedTuple(
50
+ 'outputs', evaluation_metrics=dsl.Metrics, evaluation_resource_name=str
51
+ ):
52
+ # fmt: off
53
+ """LLM Text Generation Evaluation pipeline.
54
+
55
+ This pipeline supports evaluating large language models, publisher or managed
56
+ models, performing the following generative tasks: `summarization`, `question-answering`, and `text-generation`.
57
+
58
+ Args:
59
+ project: Required. The GCP project that runs the pipeline components.
60
+ location: Required. The GCP region that runs the pipeline components.
61
+ batch_predict_gcs_source_uris: Required. Google Cloud Storage URI(s) to your eval dataset instances data to run batch prediction on. The instances data should also contain the ground truth (target) data, used for evaluation. May contain wildcards. For more information on [wildcards](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames). For more details about this [input config](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig). The content of gcs source files should be preset to one of the following formats:
62
+ 1) Prediction & Evaluation Dataset format, guaranteeing "prompt" and "ground_truth" attributes are included
63
+ {
64
+ "prompt": "your input/prompt text",
65
+ "ground_truth": "your ground truth output text"
66
+ }
67
+ or
68
+ 2) Tuning Dataset format, guaranteeing "input_text" and "output_text" attributes are included.
69
+ {
70
+ "input_text": "your input/prompt text",
71
+ "output_text": "your ground truth output text"
72
+ }
73
+ batch_predict_gcs_destination_output_uri: Required. The Google Cloud Storage location of the directory where the eval pipeline output is to be written to.
74
+ model_name: The Model name used to run evaluation. Must be a publisher Model or a managed Model sharing the same ancestor location. Starting this job has no impact on any existing deployments of the Model and their resources.
75
+ evaluation_task: The task that the large language model will be evaluated on. The evaluation component computes a set of metrics relevant to that specific task. Currently supported tasks are: `summarization`, `question-answering`, `text-generation`.
76
+ input_field_name: The field name of the input eval dataset instances that contains the input prompts to the LLM.
77
+ target_field_name: The field name of the eval dataset instance that contains an example reference text response. Alternatively referred to as the ground truth (or ground_truth_column) field. If not set, defaulted to `output_text`.
78
+ batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. Only "jsonl" is currently supported. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
79
+ batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. Only "jsonl" is currently supported. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
80
+ batch_predict_model_parameters: A map of parameters that govern the predictions. Some acceptable parameters include: maxOutputTokens, topK, topP, and temperature.
81
+ enable_row_based_metrics: Flag of if row based metrics is enabled, default value is false.
82
+ machine_type: The machine type of this custom job. If not set, defaulted to `e2-highmem-16`. More details: https://cloud.google.com/compute/docs/machine-resource
83
+ service_account: Sets the default service account for workload run-as account. The service account running the pipeline (https://cloud.google.com/vertex-ai/docs/pipelines/configure-project#service-account) submitting jobs must have act-as permission on this run-as account. If unspecified, the Vertex AI Custom Code Service Agent(https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) for the CustomJob's project.
84
+ network: The full name of the Compute Engine network to which the job should be peered. For example, `projects/12345/global/networks/myVPC`. Format is of the form `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is a network name, as in `myVPC`. To specify this field, you must have already configured VPC Network Peering for Vertex AI (https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If left unspecified, the job is not peered with any network.
85
+ encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
86
+ evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
87
+
88
+ Returns:
89
+ evaluation_metrics: Metrics Artifact for LLM Text Generation.
90
+ evaluation_resource_name: If run on a user's managed VertexModel, the imported evaluation resource name. Empty if run on a publisher model.
91
+ """
92
+ # fmt: on
93
+ outputs = NamedTuple(
94
+ 'outputs',
95
+ evaluation_metrics=dsl.Metrics,
96
+ evaluation_resource_name=str,
97
+ )
98
+
99
+ get_vertex_model_task = dsl.importer(
100
+ artifact_uri=(
101
+ f'https://{location}-aiplatform.googleapis.com/v1/{model_name}'
102
+ ),
103
+ artifact_class=VertexModel,
104
+ metadata={'resourceName': model_name},
105
+ )
106
+ get_vertex_model_task.set_display_name('get-vertex-model')
107
+
108
+ eval_dataset_preprocessor_task = LLMEvaluationPreprocessorOp(
109
+ project=project,
110
+ location=location,
111
+ gcs_source_uris=batch_predict_gcs_source_uris,
112
+ input_field_name=input_field_name,
113
+ machine_type=machine_type,
114
+ service_account=service_account,
115
+ network=network,
116
+ encryption_spec_key_name=encryption_spec_key_name,
117
+ )
118
+ batch_predict_task = ModelBatchPredictOp(
119
+ project=project,
120
+ location=location,
121
+ model=get_vertex_model_task.outputs['artifact'],
122
+ job_display_name='evaluation-batch-predict-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}',
123
+ gcs_source_uris=eval_dataset_preprocessor_task.outputs[
124
+ 'preprocessed_gcs_source_uris'
125
+ ],
126
+ instances_format=batch_predict_instances_format,
127
+ predictions_format=batch_predict_predictions_format,
128
+ gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
129
+ model_parameters=batch_predict_model_parameters,
130
+ encryption_spec_key_name=encryption_spec_key_name,
131
+ )
132
+
133
+ eval_task = LLMEvaluationTextGenerationOp(
134
+ project=project,
135
+ location=location,
136
+ evaluation_task=evaluation_task,
137
+ target_field_name=f'instance.{target_field_name}',
138
+ predictions_format=batch_predict_predictions_format,
139
+ enable_row_based_metrics=enable_row_based_metrics,
140
+ joined_predictions_gcs_source=batch_predict_task.outputs[
141
+ 'gcs_output_directory'
142
+ ],
143
+ machine_type=machine_type,
144
+ service_account=service_account,
145
+ network=network,
146
+ encryption_spec_key_name=encryption_spec_key_name,
147
+ )
148
+
149
+ with dsl.If(enable_row_based_metrics == True):
150
+ import_evaluation_task_with_row_based_metrics = ModelImportEvaluationOp(
151
+ metrics=eval_task.outputs['evaluation_metrics'],
152
+ row_based_metrics=eval_task.outputs['row_based_metrics'],
153
+ model=get_vertex_model_task.outputs['artifact'],
154
+ problem_type=evaluation_task,
155
+ dataset_type=batch_predict_predictions_format,
156
+ dataset_paths=batch_predict_gcs_source_uris,
157
+ display_name=evaluation_display_name,
158
+ )
159
+ with dsl.Else():
160
+ import_evaluation_task = ModelImportEvaluationOp(
161
+ metrics=eval_task.outputs['evaluation_metrics'],
162
+ model=get_vertex_model_task.outputs['artifact'],
163
+ problem_type=evaluation_task,
164
+ dataset_type=batch_predict_predictions_format,
165
+ dataset_paths=batch_predict_gcs_source_uris,
166
+ display_name=evaluation_display_name,
167
+ )
168
+
169
+ oneof = dsl.OneOf(
170
+ import_evaluation_task_with_row_based_metrics.outputs[
171
+ 'evaluation_resource_name'
172
+ ],
173
+ import_evaluation_task.outputs['evaluation_resource_name'],
174
+ )
175
+ return outputs(
176
+ evaluation_metrics=eval_task.outputs['evaluation_metrics'],
177
+ evaluation_resource_name=oneof,
178
+ )
@@ -0,0 +1,20 @@
1
+ # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Model based LLM evaluation preview components."""
15
+
16
+ from google_cloud_pipeline_components.preview.model_evaluation.model_based_llm_evaluation.autosxs.autosxs_pipeline import autosxs_pipeline
17
+
18
+ __all__ = [
19
+ 'autosxs_pipeline',
20
+ ]
@@ -0,0 +1,13 @@
1
+ # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
@@ -0,0 +1,109 @@
1
+ # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Optimization AI Inference and AutoSxS pipeline function."""
15
+
16
+ from typing import Any, Dict, List
17
+
18
+ from google_cloud_pipeline_components import _placeholders
19
+ from google_cloud_pipeline_components._implementation.llm import batch_prediction_pairwise
20
+ from google_cloud_pipeline_components._implementation.llm import model_evaluation_text_generation_pairwise
21
+ from google_cloud_pipeline_components._implementation.llm import online_evaluation_pairwise
22
+ from kfp import dsl
23
+
24
+
25
+ # pylint: disable=dangerous-default-value,g-bare-generic,unused-argument
26
+ @dsl.pipeline(
27
+ name='autosxs-template',
28
+ description='Determines the SxS winrate between two models.',
29
+ )
30
+ def autosxs_pipeline(
31
+ evaluation_dataset: str,
32
+ task: str,
33
+ id_columns: List[str],
34
+ model_a: str = '',
35
+ model_b: str = '',
36
+ autorater_prompt_parameters: Dict[str, Dict[str, str]] = {},
37
+ model_a_prompt_parameters: Dict[str, Dict[str, str]] = {},
38
+ model_b_prompt_parameters: Dict[str, Dict[str, str]] = {},
39
+ response_column_a: str = '',
40
+ response_column_b: str = '',
41
+ model_a_parameters: Dict[str, str] = {},
42
+ model_b_parameters: Dict[str, str] = {},
43
+ human_preference_column: str = '',
44
+ project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
45
+ location: str = _placeholders.LOCATION_PLACEHOLDER,
46
+ judgments_format: str = 'jsonl',
47
+ bigquery_destination_prefix: str = '',
48
+ experimental_args: Dict[str, Any] = {},
49
+ ):
50
+ # fmt: off
51
+ """Evaluates two models side-by-side using an arbiter model.
52
+
53
+ Args:
54
+ evaluation_dataset: A BigQuery table or comma-separated list of GCS paths to a JSONL dataset containing evaluation examples.
55
+ task: Evaluation task in the form `{task}@{version}`. task can be one of `[summarization, question_answering]`. Version is an integer with 3 digits or "latest". Ex: `summarization@001` or `question_answering@latest`.
56
+ id_columns: The columns which distinguish unique evaluation examples.
57
+ model_a: A fully-qualified model resource name (`projects/{project}/locations/{location}/models/{model}@{version}`) or publisher model resource name (`publishers/{publisher}/models/{model}`). This parameter is optional if Model A responses are specified.
58
+ model_b: A fully-qualified model resource name (`projects/{project}/locations/{location}/models/{model}@{version}`) or publisher model resource name (`publishers/{publisher}/models/{model}`). This parameter is optional if Model B responses are specified.
59
+ autorater_prompt_parameters: Map of autorater prompt parameters to columns or templates. The expected parameters are: `inference_instruction` (details on how to perform a task) and `inference_context` (content to reference to perform the task). As an example, `{'inference_context': {'column': 'my_prompt'}}` uses the evaluation dataset's `my_prompt` column for the AutoRater's context.
60
+ model_a_prompt_parameters: Map of Model A prompt template parameters to columns or templates. This parameter is optional if Model A predictions are predefined. Example - `{'prompt': {'column': 'my_prompt'}}` uses the evaluation dataset's `my_prompt` column for the prompt parameter named `prompt`.
61
+ model_b_prompt_parameters: Map of Model B prompt template parameters to columns or templates. This parameter is optional if Model B predictions are predefined. Example - `{'prompt': {'column': 'my_prompt'}}` uses the evaluation dataset's `my_prompt` column for the prompt parameter named `prompt`.
62
+ response_column_a: Either the name of a column in the evaluation dataset containing predefined predictions, or the name of the column in the Model A output containing predictions. If no value is provided, the correct model output column name will attempt to be inferred.
63
+ response_column_b: Either the name of a column in the evaluation dataset containing predefined predictions, or the name of the column in the Model B output containing predictions. If no value is provided, the correct model output column name will attempt to be inferred.
64
+ model_a_parameters: The parameters that govern the predictions from model A, such as temperature or maximum output tokens.
65
+ model_b_parameters: The parameters that govern the predictions from model B, such as temperature or maximum output tokens.
66
+ human_preference_column: The column containing ground truth winners for each example. Providing this parameter adds additional metrics for checking the AutoRater alignment with human preferences.
67
+ project: Project used to run custom jobs. This should be the same project used to run the pipeline.
68
+ location: Location used to run custom jobs. This should be the same location used to run the pipeline.
69
+ judgments_format: The format to write judgments to. Can be either `[json, bigquery]`.
70
+ bigquery_destination_prefix: BigQuery table to write judgments to if the specified format is 'bigquery'.
71
+ experimental_args: Experimentally released arguments. Subject to change.
72
+ """
73
+ # fmt: on
74
+ responses = batch_prediction_pairwise.batch_prediction_pairwise(
75
+ display_name='autosxs-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}',
76
+ evaluation_dataset=evaluation_dataset,
77
+ id_columns=id_columns,
78
+ task=task,
79
+ autorater_prompt_parameters=autorater_prompt_parameters,
80
+ response_column_a=response_column_a,
81
+ response_column_b=response_column_b,
82
+ model_a=model_a,
83
+ model_b=model_b,
84
+ model_a_prompt_parameters=model_a_prompt_parameters,
85
+ model_b_prompt_parameters=model_b_prompt_parameters,
86
+ model_a_parameters=model_a_parameters,
87
+ model_b_parameters=model_b_parameters,
88
+ human_preference_column=human_preference_column,
89
+ experimental_args=experimental_args,
90
+ ).set_display_name('AutoSxS Batch Prediction')
91
+
92
+ winners = online_evaluation_pairwise.online_evaluation_pairwise(
93
+ inference_output_uri=responses.outputs[
94
+ 'preprocessed_evaluation_dataset_uri'
95
+ ],
96
+ id_columns=id_columns,
97
+ human_preference_column=human_preference_column,
98
+ task=task,
99
+ judgments_format=judgments_format,
100
+ bigquery_destination_prefix=bigquery_destination_prefix,
101
+ experimental_args=experimental_args,
102
+ ).set_display_name('AutoSxS Autorater')
103
+
104
+ model_evaluation_text_generation_pairwise.model_evaluation_text_generation_pairwise(
105
+ judgments_dir=winners.outputs['judgments_uri'],
106
+ human_preference_column=human_preference_column,
107
+ ).set_display_name(
108
+ 'AutoSxS Metrics'
109
+ )
@@ -0,0 +1,58 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
3
+ # Protobuf Python Version: 0.20240110.0
4
+ """Generated protocol buffer code."""
5
+ from google.protobuf import descriptor as _descriptor
6
+ from google.protobuf import descriptor_pool as _descriptor_pool
7
+ from google.protobuf import symbol_database as _symbol_database
8
+ from google.protobuf.internal import builder as _builder
9
+ # @@protoc_insertion_point(imports)
10
+
11
+ _sym_db = _symbol_database.Default()
12
+
13
+
14
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
15
+ b'\n\x13preflight_validations.proto\x12\x15preflight_validations"\x90\x02\n\x0eValidationItem\x12O\n\x0bsa_metadata\x18\x02'
16
+ b' \x01(\x0b\x32\x38.preflight_validations.GoogleCloudServiceAccountMetadataH\x00\x12P\n\x0equota_metadata\x18\x03'
17
+ b' \x01(\x0b\x32\x36.preflight_validations.GoogleCloudProjectQuotaMetadataH\x00\x12O\n\x0c\x61pi_metadata\x18\x04'
18
+ b' \x01(\x0b\x32\x37.preflight_validations.GoogleCloudApiEnablementMetadataH\x00\x42\n\n\x08metadata"\xeb\x01\n\x1fGoogleCloudProjectQuotaMetadata\x12\x14\n\x0cservice_name\x18\x01'
19
+ b' \x01(\t\x12s\n\x17metrics_recommendations\x18\x02'
20
+ b' \x03(\x0b\x32R.preflight_validations.GoogleCloudProjectQuotaMetadata.MetricsRecommendationsEntry\x1a=\n\x1bMetricsRecommendationsEntry\x12\x0b\n\x03key\x18\x01'
21
+ b' \x01(\t\x12\r\n\x05value\x18\x02'
22
+ b' \x01(\x03:\x02\x38\x01"P\n!GoogleCloudServiceAccountMetadata\x12\x16\n\x0eprincipal_name\x18\x01'
23
+ b' \x01(\t\x12\x13\n\x0bpermissions\x18\x02 \x03(\t"9\n'
24
+ b' GoogleCloudApiEnablementMetadata\x12\x15\n\rservice_names\x18\x01'
25
+ b' \x03(\tB\x02P\x01\x62\x06proto3'
26
+ )
27
+
28
+ _globals = globals()
29
+ _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
30
+ _builder.BuildTopDescriptorsAndMessages(
31
+ DESCRIPTOR,
32
+ 'google_cloud_pipeline_components.google_cloud_pipeline_components.proto.preflight_validations_pb2',
33
+ _globals,
34
+ )
35
+ if not _descriptor._USE_C_DESCRIPTORS:
36
+ _globals['DESCRIPTOR']._loaded_options = None
37
+ _globals['DESCRIPTOR']._serialized_options = b'P\001'
38
+ _globals[
39
+ '_GOOGLECLOUDPROJECTQUOTAMETADATA_METRICSRECOMMENDATIONSENTRY'
40
+ ]._loaded_options = None
41
+ _globals[
42
+ '_GOOGLECLOUDPROJECTQUOTAMETADATA_METRICSRECOMMENDATIONSENTRY'
43
+ ]._serialized_options = b'8\001'
44
+ _globals['_VALIDATIONITEM']._serialized_start = 142
45
+ _globals['_VALIDATIONITEM']._serialized_end = 414
46
+ _globals['_GOOGLECLOUDPROJECTQUOTAMETADATA']._serialized_start = 417
47
+ _globals['_GOOGLECLOUDPROJECTQUOTAMETADATA']._serialized_end = 652
48
+ _globals[
49
+ '_GOOGLECLOUDPROJECTQUOTAMETADATA_METRICSRECOMMENDATIONSENTRY'
50
+ ]._serialized_start = 591
51
+ _globals[
52
+ '_GOOGLECLOUDPROJECTQUOTAMETADATA_METRICSRECOMMENDATIONSENTRY'
53
+ ]._serialized_end = 652
54
+ _globals['_GOOGLECLOUDSERVICEACCOUNTMETADATA']._serialized_start = 654
55
+ _globals['_GOOGLECLOUDSERVICEACCOUNTMETADATA']._serialized_end = 734
56
+ _globals['_GOOGLECLOUDAPIENABLEMENTMETADATA']._serialized_start = 736
57
+ _globals['_GOOGLECLOUDAPIENABLEMENTMETADATA']._serialized_end = 793
58
+ # @@protoc_insertion_point(module_scope)
@@ -151,8 +151,9 @@ def evaluation_llm_text_generation_pipeline( # pylint: disable=dangerous-defaul
151
151
  eval_task = LLMEvaluationTextGenerationOp(
152
152
  project=project,
153
153
  location=location,
154
+ model_name=model_name,
154
155
  evaluation_task=evaluation_task,
155
- target_field_name=f'instance.{target_field_name}',
156
+ target_field_name=target_field_name,
156
157
  predictions_format=batch_predict_predictions_format,
157
158
  enable_row_based_metrics=enable_row_based_metrics,
158
159
  joined_predictions_gcs_source=batch_predict_task.outputs[
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
  """Google Cloud Pipeline Components version."""
15
15
 
16
- __version__ = "2.16.1"
16
+ __version__ = "2.17.0"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: google-cloud-pipeline-components
3
- Version: 2.16.1
3
+ Version: 2.17.0
4
4
  Summary: This SDK enables a set of First Party (Google owned) pipeline components that allow users to take their experience from Vertex AI SDK and other Google Cloud services and create a corresponding pipeline using KFP or Managed Pipelines.
5
5
  Home-page: https://github.com/kubeflow/pipelines/tree/master/components/google-cloud
6
6
  Author: The Google Cloud Pipeline Components authors
@@ -10,6 +10,7 @@ Project-URL: User Documentation, https://cloud.google.com/vertex-ai/docs/pipelin
10
10
  Project-URL: Reference Documentation, https://google-cloud-pipeline-components.readthedocs.io/
11
11
  Project-URL: Source, https://github.com/kubeflow/pipelines/tree/master/components/google-cloud
12
12
  Project-URL: Release Notes, https://github.com/kubeflow/pipelines/tree/master/components/google-cloud/RELEASE.md
13
+ Platform: UNKNOWN
13
14
  Classifier: Development Status :: 4 - Beta
14
15
  Classifier: Operating System :: Unix
15
16
  Classifier: Operating System :: MacOS
@@ -30,24 +31,24 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
31
  Requires-Python: >=3.8.0,<3.12.0
31
32
  Description-Content-Type: text/markdown
32
33
  License-File: LICENSE
33
- Requires-Dist: google-api-core !=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev,>=1.31.5
34
- Requires-Dist: kfp <=2.7.0,>=2.6.0
35
- Requires-Dist: google-cloud-aiplatform <2,>=1.14.0
36
- Requires-Dist: Jinja2 <4,>=3.1.2
34
+ Requires-Dist: google-api-core (!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev,>=1.31.5)
35
+ Requires-Dist: kfp (<=2.7.0,>=2.6.0)
36
+ Requires-Dist: google-cloud-aiplatform (<2,>=1.14.0)
37
+ Requires-Dist: Jinja2 (<4,>=3.1.2)
37
38
  Provides-Extra: docs
38
- Requires-Dist: protobuf <5,>=4.21.1 ; extra == 'docs'
39
- Requires-Dist: grpcio-status <=1.47.0 ; extra == 'docs'
40
- Requires-Dist: commonmark ==0.9.1 ; extra == 'docs'
41
- Requires-Dist: autodocsumm ==0.2.9 ; extra == 'docs'
42
- Requires-Dist: sphinx <6.0.0,>=5.0.2 ; extra == 'docs'
43
- Requires-Dist: sphinx-immaterial ==0.9.0 ; extra == 'docs'
44
- Requires-Dist: sphinx-rtd-theme ==2.0.0 ; extra == 'docs'
45
- Requires-Dist: m2r2 ==0.3.3.post2 ; extra == 'docs'
46
- Requires-Dist: sphinx-notfound-page ==0.8.3 ; extra == 'docs'
39
+ Requires-Dist: protobuf (<5,>=4.21.1) ; extra == 'docs'
40
+ Requires-Dist: grpcio-status (<=1.47.0) ; extra == 'docs'
41
+ Requires-Dist: commonmark (==0.9.1) ; extra == 'docs'
42
+ Requires-Dist: autodocsumm (==0.2.9) ; extra == 'docs'
43
+ Requires-Dist: sphinx (<6.0.0,>=5.0.2) ; extra == 'docs'
44
+ Requires-Dist: sphinx-immaterial (==0.9.0) ; extra == 'docs'
45
+ Requires-Dist: sphinx-rtd-theme (==2.0.0) ; extra == 'docs'
46
+ Requires-Dist: m2r2 (==0.3.3.post2) ; extra == 'docs'
47
+ Requires-Dist: sphinx-notfound-page (==0.8.3) ; extra == 'docs'
47
48
  Provides-Extra: tests
48
- Requires-Dist: mock >=4.0.0 ; extra == 'tests'
49
- Requires-Dist: flake8 >=3.0.0 ; extra == 'tests'
50
- Requires-Dist: pytest >=6.0.0 ; extra == 'tests'
49
+ Requires-Dist: mock (>=4.0.0) ; extra == 'tests'
50
+ Requires-Dist: flake8 (>=3.0.0) ; extra == 'tests'
51
+ Requires-Dist: pytest (>=6.0.0) ; extra == 'tests'
51
52
 
52
53
  # Google Cloud Pipeline Components
53
54
 
@@ -92,3 +93,5 @@ Use the following command to install Google Cloud Pipeline Components from [PyPI
92
93
  ```shell
93
94
  pip install -U google-cloud-pipeline-components
94
95
  ```
96
+
97
+
@@ -2,7 +2,7 @@ google_cloud_pipeline_components/__init__.py,sha256=HOU1WNUeWpY1HjluW8vbI7Cai_EF
2
2
  google_cloud_pipeline_components/_image.py,sha256=lANDYNk1WSuGZSoTTRcWdjsUvCkkA-PmwouTM9Et7fY,828
3
3
  google_cloud_pipeline_components/_placeholders.py,sha256=9yIbieYjZkZJg4xuKklxWQVn3Z-9ryb9Q6V15Jqpe98,3090
4
4
  google_cloud_pipeline_components/utils.py,sha256=9FG7umyEXhyUvtNeC46NuQ04olDMR3o-Wp78V1xs8GY,11045
5
- google_cloud_pipeline_components/version.py,sha256=SP3sOlCOMI-zlRMGGvuqYoBYWVLHF-u5h5tqKoQiyMM,678
5
+ google_cloud_pipeline_components/version.py,sha256=oiNOAYZSDGBwtvZJCPchicTrMKCZ_ty9jdtzziPGfgY,678
6
6
  google_cloud_pipeline_components/_implementation/__init__.py,sha256=sb6SfJl6rt3AKjiWxd-KO9DSiZ3PzGZRcsqKuc1A2Cg,606
7
7
  google_cloud_pipeline_components/_implementation/llm/__init__.py,sha256=sb6SfJl6rt3AKjiWxd-KO9DSiZ3PzGZRcsqKuc1A2Cg,606
8
8
  google_cloud_pipeline_components/_implementation/llm/batch_prediction_pairwise.py,sha256=mARB-tDYFr0tpBrLCIh481H4LDuXdr_8UyKVUaOF5Cw,7569
@@ -30,13 +30,13 @@ google_cloud_pipeline_components/_implementation/llm/utils.py,sha256=E250cmvw0QU
30
30
  google_cloud_pipeline_components/_implementation/llm/utils_test.py,sha256=co8gWyrowY5CpkFNsaLGQlD_gpIykkVI7czxIizp5cM,2864
31
31
  google_cloud_pipeline_components/_implementation/llm/validate_pipeline.py,sha256=35E2jx3wG_1-qnF9u6GygEmekJ3dG9LklJx2Bh6ayJ8,4350
32
32
  google_cloud_pipeline_components/_implementation/llm/generated/__init__.py,sha256=LYxMCPeZcfGqRbt3mo3hY7U02A6G8aWdP_RtdoqxNrQ,606
33
- google_cloud_pipeline_components/_implementation/llm/generated/refined_image_versions.py,sha256=dSLCXSQKd4M1HxSIhoAmASPIqDa2qx7BMzHB97vIEz8,763
33
+ google_cloud_pipeline_components/_implementation/llm/generated/refined_image_versions.py,sha256=UJ2J8TOz1rDNgSqUe-Mj2k0-G_Eb8fu7Yg4BwaG2IrM,763
34
34
  google_cloud_pipeline_components/_implementation/model/__init__.py,sha256=KmOW74re0WZ93DWM1lqqQYbv6w1aIW66BMV3gaAdg3s,811
35
35
  google_cloud_pipeline_components/_implementation/model/get_model/__init__.py,sha256=cXMkDUZHVSbXeXSa3qsI6Ef8Tad9nmusw5NUZaYORdE,662
36
36
  google_cloud_pipeline_components/_implementation/model/get_model/component.py,sha256=H2sbMTWCw8nMDMT-Ni9-pdzVXEFmHYjtP3z1LcI5m5w,2307
37
37
  google_cloud_pipeline_components/_implementation/model_evaluation/__init__.py,sha256=oB1e-LSzK0pctcXf6M9Uiu3_83QxC7RnUy4lU1dlOJs,4692
38
38
  google_cloud_pipeline_components/_implementation/model_evaluation/utils.py,sha256=9V34RtPZSRNeBwcsImaZM6YC3T7CafT_E00Iby4KHxw,3540
39
- google_cloud_pipeline_components/_implementation/model_evaluation/version.py,sha256=QHCvg6WZkQq-VdX7hbWCQHK-7SjHpxSppGa9ppyJNmk,963
39
+ google_cloud_pipeline_components/_implementation/model_evaluation/version.py,sha256=XvzrHsB6WzGMjkz8-PvSJhQToWTbUByPPSoz9DLzKF4,963
40
40
  google_cloud_pipeline_components/_implementation/model_evaluation/chunking/__init__.py,sha256=PRHVsIq1dFb0mweuU0kfUUP90FbX--kxdBGCpwfLTgA,665
41
41
  google_cloud_pipeline_components/_implementation/model_evaluation/chunking/component.py,sha256=lv0eNdIBtnCUs9wi9CGUWE_fd4zodGN7M8fTBiCg0g0,4436
42
42
  google_cloud_pipeline_components/_implementation/model_evaluation/chunking/feature_store_grounding_pipeline.py,sha256=zGCMJhBCWBV-Dg5gz-wpheLQhD-GuEW_fgcb0cWa78U,4167
@@ -57,6 +57,8 @@ google_cloud_pipeline_components/_implementation/model_evaluation/feature_extrac
57
57
  google_cloud_pipeline_components/_implementation/model_evaluation/feature_extractor/component.py,sha256=u4P3hSGBnFXtn8MKb8g1J9FAA74VpowBmPTmSDFq9J4,5539
58
58
  google_cloud_pipeline_components/_implementation/model_evaluation/import_evaluated_annotation/__init__.py,sha256=-znXeLR3Uhon4BsRcAqHdVhcSUCqkY7rFouQFLD6Atg,684
59
59
  google_cloud_pipeline_components/_implementation/model_evaluation/import_evaluated_annotation/component.py,sha256=d1QlNA_hpSJNgvwRJFGKZnVi0zc0KhZEHzVkq5wJWXQ,3053
60
+ google_cloud_pipeline_components/_implementation/model_evaluation/import_evaluation/__init__.py,sha256=eG4D5YbXHkveV6PP1uX8ZL48LCZ1mUtKGKb520dpILg,674
61
+ google_cloud_pipeline_components/_implementation/model_evaluation/import_evaluation/component.py,sha256=uZEtGhRtkCtjJFxBHFv8LSm_m7KlJxgmMhc2Whw6Mrw,8026
60
62
  google_cloud_pipeline_components/_implementation/model_evaluation/llm_classification_postprocessor/__init__.py,sha256=kEQ4aaKnV-KulHqbhb6eJIZzk4O7uSBRPzm_e3q_hcA,697
61
63
  google_cloud_pipeline_components/_implementation/model_evaluation/llm_classification_postprocessor/component.py,sha256=H0g7nMK3JVdQonLe5jpKvvzm9N2YzFGQUWJwwfxQ33s,11227
62
64
  google_cloud_pipeline_components/_implementation/model_evaluation/llm_embedding/__init__.py,sha256=CUSeP0l2KFuo2wbw00DE5Zur0jpgHpZ1aThg7yqWuGY,680
@@ -64,9 +66,9 @@ google_cloud_pipeline_components/_implementation/model_evaluation/llm_embedding/
64
66
  google_cloud_pipeline_components/_implementation/model_evaluation/llm_embedding_retrieval/__init__.py,sha256=Q9YimgEKkKP8QW8fV50nNRjWXdt0_90Qr_gfQ0A9fao,691
65
67
  google_cloud_pipeline_components/_implementation/model_evaluation/llm_embedding_retrieval/component.py,sha256=er26AxV0ydpKFbhrLs2IIia9hbLxXhe78Q-fQvvK_20,7265
66
68
  google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation/__init__.py,sha256=gDyltb_vTZRncaVZbMUkXYBHZsEg_CuaPAQVWUOGy7c,671
67
- google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation/component.py,sha256=rclGlXxDr6vozQ6elDDCDbdJXWjG461Z9nmTaiKGsFs,7478
69
+ google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation/component.py,sha256=vU3EsXoquUpsbu999rByKkZTvMYLTkH-Xdj2ycI-KnA,7598
68
70
  google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation_preprocessor/__init__.py,sha256=LLvQQ9Mv_md1POK_XErR7NUr-XAZX28w5KD0EQiT32w,684
69
- google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation_preprocessor/component.py,sha256=L6dPaWuOr-BhP7yAatFGVTX2MYTuI2f88W_oR5ee88E,9170
71
+ google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation_preprocessor/component.py,sha256=HOj4_2DOXVP0s0NkVAY1_q4j4okqEcHpdfPsE7W2dI8,9131
70
72
  google_cloud_pipeline_components/_implementation/model_evaluation/llm_information_retrieval_preprocessor/__init__.py,sha256=UcAfohvqwJ7X8rlV6I2RsZ5ohYWbafXI5yYRAAwvspE,695
71
73
  google_cloud_pipeline_components/_implementation/model_evaluation/llm_information_retrieval_preprocessor/component.py,sha256=hpwra0rXUP_LG6WNdnB9RD0cy1Gn3InrzAUJIk-oRc4,8142
72
74
  google_cloud_pipeline_components/_implementation/model_evaluation/llm_retrieval_metrics/__init__.py,sha256=oVVVTCE230KYGcuz_rXYEuypZmm6OKg7K2BsFtpcDw0,701
@@ -360,15 +362,21 @@ google_cloud_pipeline_components/preview/llm/rlhf/__init__.py,sha256=sb6SfJl6rt3
360
362
  google_cloud_pipeline_components/preview/llm/rlhf/component.py,sha256=9XpSVUga81GTqkn4qMBddhc41IzY0-p4IBC4UFP50G4,15339
361
363
  google_cloud_pipeline_components/preview/model_evaluation/__init__.py,sha256=n076N7S2HEdMuSZ8cyKM6acjCjslOHjCx4SR49asRME,2121
362
364
  google_cloud_pipeline_components/preview/model_evaluation/data_bias_component.py,sha256=YiwkWfbGymX_lDIg_x7AP6nYMm3MQp_NgV8xuSZxCpU,5791
365
+ google_cloud_pipeline_components/preview/model_evaluation/evaluation_llm_classification_pipeline.py,sha256=IjCIakZFh7KB6kmku7ztaH-qFm9LoEctwwMtxmDOg5g,11262
366
+ google_cloud_pipeline_components/preview/model_evaluation/evaluation_llm_text_generation_pipeline.py,sha256=afS9-rkKbQHDwOldtFC8w7Xv9UOqIU-Xt6ZEim14fqg,10761
363
367
  google_cloud_pipeline_components/preview/model_evaluation/feature_attribution_component.py,sha256=XWrI1inQ9hKixFrp2LUdgu7hONYUvbsxv2GXZ-UTkCY,7450
364
368
  google_cloud_pipeline_components/preview/model_evaluation/feature_attribution_graph_component.py,sha256=jesgBUKbIB_qQoYb5-Bv_LBbFHl0tPyMlVFx-o1eE6k,13624
365
369
  google_cloud_pipeline_components/preview/model_evaluation/model_bias_component.py,sha256=R8WhT8jf_OOpMuABRh2BYTDEcfiGAf6VA-vFgiTymYY,6674
366
370
  google_cloud_pipeline_components/preview/model_evaluation/model_evaluation_import_component.py,sha256=ePHbJqWYbZR5wCgJ8aVgDrOcWSoAiVq7df6HxTw1gdY,8118
367
371
  google_cloud_pipeline_components/preview/model_evaluation/utils.py,sha256=oRlEvA3zMSTzgxJklZD0A-BzFDx0-PsBHBXZ4kmaREY,7539
372
+ google_cloud_pipeline_components/preview/model_evaluation/model_based_llm_evaluation/__init__.py,sha256=ee_EyGhwqXIjR3Rx9t-o2gV9TssU-VErMU7LtDA7s9k,838
373
+ google_cloud_pipeline_components/preview/model_evaluation/model_based_llm_evaluation/autosxs/__init__.py,sha256=sb6SfJl6rt3AKjiWxd-KO9DSiZ3PzGZRcsqKuc1A2Cg,606
374
+ google_cloud_pipeline_components/preview/model_evaluation/model_based_llm_evaluation/autosxs/autosxs_pipeline.py,sha256=rymTHXZZCMFtwzf9qOjDx2qd2yCdTYzUO0kfja7kAiE,7055
368
375
  google_cloud_pipeline_components/preview/starry_net/__init__.py,sha256=yEpcicPBsGqgahrCtcJ06a_7E2pUgdp32U0yWwFRy7E,796
369
376
  google_cloud_pipeline_components/preview/starry_net/component.py,sha256=SdmUfL7xZfK8yWOuVqDczqTqJMShAga3rJxgj4mFCxE,24424
370
377
  google_cloud_pipeline_components/proto/__init__.py,sha256=aiPUc6gpQwG9cRTYfw3ChFCJfDr3vAIsm2eMYUDJjJQ,661
371
378
  google_cloud_pipeline_components/proto/gcp_resources_pb2.py,sha256=ssNNm4zjiWbuBUS7IH6kyrvvfmcC_Z5F7hOAuQe_YLk,2134
379
+ google_cloud_pipeline_components/proto/preflight_validations_pb2.py,sha256=ZZFTISXvqQl-284O_zPJrgXO4REPLwn3qxXg8BTJHM0,3056
372
380
  google_cloud_pipeline_components/proto/task_error_pb2.py,sha256=PPz4luEm3Pa3YDn4mtpqEPl8QVSFa44pV8W2KbWRq1I,1235
373
381
  google_cloud_pipeline_components/proto/template_metadata_pb2.py,sha256=jAIJgc5UWYz8E0Vf3IbjPlyiWI3kCNYrJHyAUaQTpgc,10067
374
382
  google_cloud_pipeline_components/types/__init__.py,sha256=1WFkL49QEy-gNb6ywQOE4yZkD7DoULAeiL1tLdb3S28,606
@@ -544,7 +552,7 @@ google_cloud_pipeline_components/v1/model_evaluation/evaluation_automl_tabular_p
544
552
  google_cloud_pipeline_components/v1/model_evaluation/evaluation_automl_unstructure_data_pipeline.py,sha256=LlMSMVbiZGXOZo4SqEQZZu9GQWWzHnwhrygwmDkqVdA,42457
545
553
  google_cloud_pipeline_components/v1/model_evaluation/evaluation_feature_attribution_pipeline.py,sha256=wEu67EPXb9x9PcYMYmnuETKbAz_lP5Kewv439IiFxIM,51226
546
554
  google_cloud_pipeline_components/v1/model_evaluation/evaluation_llm_classification_pipeline.py,sha256=vF_EOPg7S3UCTCNw6GrhQz_Sn8ctPZ-JpJEt6JlSF3Y,12036
547
- google_cloud_pipeline_components/v1/model_evaluation/evaluation_llm_text_generation_pipeline.py,sha256=pqRLTWQpJLwn4kKQ5o5hx9_FlcPDcIC0Z3joykJHBhs,11854
555
+ google_cloud_pipeline_components/v1/model_evaluation/evaluation_llm_text_generation_pipeline.py,sha256=1WD_fbYSdYrXMUEJiULgMY5Vi2BlklmvwkhtZWZeTxs,11869
548
556
  google_cloud_pipeline_components/v1/model_evaluation/forecasting_component.py,sha256=gOnvKAJWa3velczeuVBCzW6b_tcc2v_lNFqHXGhjD44,10017
549
557
  google_cloud_pipeline_components/v1/model_evaluation/regression_component.py,sha256=eFrjrKQot3-SlRCoKoTOEsyp2Xj0GfDtrjpxTDKAHYY,9117
550
558
  google_cloud_pipeline_components/v1/model_evaluation/model_based_llm_evaluation/__init__.py,sha256=57gYyoAJwLIeXoLTTyVqOWgkj7dlUP4IKM4OmMabCRM,828
@@ -554,8 +562,8 @@ google_cloud_pipeline_components/v1/vertex_notification_email/__init__.py,sha256
554
562
  google_cloud_pipeline_components/v1/vertex_notification_email/component.py,sha256=Dau8ZI0mzLBnLOUBQm6EtK8gbtX1u57t76Ud5qlg9xc,2163
555
563
  google_cloud_pipeline_components/v1/wait_gcp_resources/__init__.py,sha256=w6dfz-rYsYnxFapRH1Dix3GVz0mhPW0m1IVpE6z8jbg,878
556
564
  google_cloud_pipeline_components/v1/wait_gcp_resources/component.py,sha256=Nsfj5c3eeZq83fHLvv2IlpK4jrjxLxRksFYOl5W6JnA,2468
557
- google_cloud_pipeline_components-2.16.1.dist-info/LICENSE,sha256=VAc1R5OxOELKsX5L5Ldp5THfNtxtt1cMIZBaC0Jdj5Q,13118
558
- google_cloud_pipeline_components-2.16.1.dist-info/METADATA,sha256=mKcwBgD0FKhbdCC315q_jU7a2GijYVZ_UtPRQHbqfMQ,5810
559
- google_cloud_pipeline_components-2.16.1.dist-info/WHEEL,sha256=HiCZjzuy6Dw0hdX5R3LCFPDmFS4BWl8H-8W39XfmgX4,91
560
- google_cloud_pipeline_components-2.16.1.dist-info/top_level.txt,sha256=E8T4T8KGMGLXbHvt2goa98oezRpxryPC6QhWBZ27Hhc,33
561
- google_cloud_pipeline_components-2.16.1.dist-info/RECORD,,
565
+ google_cloud_pipeline_components-2.17.0.dist-info/LICENSE,sha256=VAc1R5OxOELKsX5L5Ldp5THfNtxtt1cMIZBaC0Jdj5Q,13118
566
+ google_cloud_pipeline_components-2.17.0.dist-info/METADATA,sha256=X7lKaGEXx4JV6NTMgGp9sZjOB8fHRl25ULQln2Ov6tM,5862
567
+ google_cloud_pipeline_components-2.17.0.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
568
+ google_cloud_pipeline_components-2.17.0.dist-info/top_level.txt,sha256=E8T4T8KGMGLXbHvt2goa98oezRpxryPC6QhWBZ27Hhc,33
569
+ google_cloud_pipeline_components-2.17.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (72.2.0)
2
+ Generator: bdist_wheel (0.37.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5