google-cloud-pipeline-components 2.14.0__py3-none-any.whl → 2.15.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of google-cloud-pipeline-components might be problematic. Click here for more details.

Files changed (64) hide show
  1. google_cloud_pipeline_components/_implementation/llm/deployment_graph.py +10 -26
  2. google_cloud_pipeline_components/_implementation/llm/generated/refined_image_versions.py +1 -1
  3. google_cloud_pipeline_components/_implementation/llm/infer_preprocessor.py +109 -0
  4. google_cloud_pipeline_components/_implementation/llm/online_evaluation_pairwise.py +8 -0
  5. google_cloud_pipeline_components/_implementation/llm/reward_model_graph.py +5 -6
  6. google_cloud_pipeline_components/_implementation/llm/rlhf_preprocessor.py +24 -0
  7. google_cloud_pipeline_components/_implementation/model_evaluation/__init__.py +0 -12
  8. google_cloud_pipeline_components/_implementation/model_evaluation/llm_embedding/evaluation_llm_embedding_pipeline.py +2 -1
  9. google_cloud_pipeline_components/_implementation/model_evaluation/llm_evaluation_preprocessor/component.py +14 -0
  10. google_cloud_pipeline_components/_implementation/starry_net/__init__.py +41 -0
  11. google_cloud_pipeline_components/_implementation/{model_evaluation/import_evaluation → starry_net/dataprep}/__init__.py +1 -2
  12. google_cloud_pipeline_components/_implementation/starry_net/dataprep/component.py +159 -0
  13. google_cloud_pipeline_components/_implementation/starry_net/evaluation/__init__.py +13 -0
  14. google_cloud_pipeline_components/_implementation/starry_net/evaluation/component.py +23 -0
  15. google_cloud_pipeline_components/_implementation/starry_net/evaluation/evaluation.yaml +197 -0
  16. google_cloud_pipeline_components/_implementation/starry_net/get_training_artifacts/__init__.py +13 -0
  17. google_cloud_pipeline_components/_implementation/starry_net/get_training_artifacts/component.py +62 -0
  18. google_cloud_pipeline_components/_implementation/starry_net/maybe_set_tfrecord_args/__init__.py +13 -0
  19. google_cloud_pipeline_components/_implementation/starry_net/maybe_set_tfrecord_args/component.py +77 -0
  20. google_cloud_pipeline_components/_implementation/starry_net/set_dataprep_args/__init__.py +13 -0
  21. google_cloud_pipeline_components/_implementation/starry_net/set_dataprep_args/component.py +97 -0
  22. google_cloud_pipeline_components/_implementation/starry_net/set_eval_args/__init__.py +13 -0
  23. google_cloud_pipeline_components/_implementation/starry_net/set_eval_args/component.py +76 -0
  24. google_cloud_pipeline_components/_implementation/starry_net/set_test_set/__init__.py +13 -0
  25. google_cloud_pipeline_components/_implementation/starry_net/set_test_set/component.py +48 -0
  26. google_cloud_pipeline_components/_implementation/starry_net/set_tfrecord_args/__init__.py +13 -0
  27. google_cloud_pipeline_components/_implementation/starry_net/set_tfrecord_args/component.py +70 -0
  28. google_cloud_pipeline_components/_implementation/starry_net/set_train_args/__init__.py +13 -0
  29. google_cloud_pipeline_components/_implementation/starry_net/set_train_args/component.py +90 -0
  30. google_cloud_pipeline_components/_implementation/starry_net/train/__init__.py +13 -0
  31. google_cloud_pipeline_components/_implementation/starry_net/train/component.py +209 -0
  32. google_cloud_pipeline_components/_implementation/starry_net/upload_decomposition_plots/__init__.py +13 -0
  33. google_cloud_pipeline_components/_implementation/starry_net/upload_decomposition_plots/component.py +59 -0
  34. google_cloud_pipeline_components/_implementation/starry_net/upload_model/__init__.py +13 -0
  35. google_cloud_pipeline_components/_implementation/starry_net/upload_model/component.py +23 -0
  36. google_cloud_pipeline_components/_implementation/starry_net/upload_model/upload_model.yaml +37 -0
  37. google_cloud_pipeline_components/_implementation/starry_net/version.py +18 -0
  38. google_cloud_pipeline_components/container/utils/error_surfacing.py +45 -0
  39. google_cloud_pipeline_components/container/v1/model/get_model/remote_runner.py +36 -7
  40. google_cloud_pipeline_components/preview/llm/infer/component.py +22 -25
  41. google_cloud_pipeline_components/preview/llm/rlhf/component.py +15 -8
  42. google_cloud_pipeline_components/preview/model_evaluation/__init__.py +4 -1
  43. google_cloud_pipeline_components/{_implementation/model_evaluation/import_evaluation/component.py → preview/model_evaluation/model_evaluation_import_component.py} +4 -3
  44. google_cloud_pipeline_components/preview/starry_net/__init__.py +19 -0
  45. google_cloud_pipeline_components/preview/starry_net/component.py +443 -0
  46. google_cloud_pipeline_components/proto/task_error_pb2.py +32 -0
  47. google_cloud_pipeline_components/v1/automl/forecasting/prophet_predict_pipeline.yaml +13 -13
  48. google_cloud_pipeline_components/v1/automl/forecasting/prophet_trainer.py +10 -0
  49. google_cloud_pipeline_components/v1/automl/forecasting/prophet_trainer_pipeline.yaml +4 -1
  50. google_cloud_pipeline_components/v1/model_evaluation/error_analysis_pipeline.py +8 -10
  51. google_cloud_pipeline_components/v1/model_evaluation/evaluated_annotation_pipeline.py +2 -2
  52. google_cloud_pipeline_components/v1/model_evaluation/evaluation_automl_tabular_feature_attribution_pipeline.py +2 -2
  53. google_cloud_pipeline_components/v1/model_evaluation/evaluation_automl_tabular_pipeline.py +2 -2
  54. google_cloud_pipeline_components/v1/model_evaluation/evaluation_automl_unstructure_data_pipeline.py +2 -2
  55. google_cloud_pipeline_components/v1/model_evaluation/evaluation_feature_attribution_pipeline.py +2 -2
  56. google_cloud_pipeline_components/v1/model_evaluation/evaluation_llm_classification_pipeline.py +4 -2
  57. google_cloud_pipeline_components/v1/model_evaluation/evaluation_llm_text_generation_pipeline.py +8 -2
  58. google_cloud_pipeline_components/v1/model_evaluation/model_based_llm_evaluation/autosxs/autosxs_pipeline.py +1 -0
  59. google_cloud_pipeline_components/version.py +1 -1
  60. {google_cloud_pipeline_components-2.14.0.dist-info → google_cloud_pipeline_components-2.15.0.dist-info}/METADATA +17 -20
  61. {google_cloud_pipeline_components-2.14.0.dist-info → google_cloud_pipeline_components-2.15.0.dist-info}/RECORD +64 -32
  62. {google_cloud_pipeline_components-2.14.0.dist-info → google_cloud_pipeline_components-2.15.0.dist-info}/WHEEL +1 -1
  63. {google_cloud_pipeline_components-2.14.0.dist-info → google_cloud_pipeline_components-2.15.0.dist-info}/LICENSE +0 -0
  64. {google_cloud_pipeline_components-2.14.0.dist-info → google_cloud_pipeline_components-2.15.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,23 @@
1
+ # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Starry Net Upload Model Component."""
15
+
16
+ import os
17
+
18
+ from kfp import components
19
+
20
+ # TODO(b/346580764)
21
+ upload_model = components.load_component_from_file(
22
+ os.path.join(os.path.dirname(__file__), 'upload_model.yaml')
23
+ )
@@ -0,0 +1,37 @@
1
+ name: model_upload
2
+ inputs:
3
+ - {name: project, type: String}
4
+ - {name: location, type: String, default: "us-central1"}
5
+ - {name: display_name, type: String}
6
+ - {name: description, type: String, optional: true, default: ''}
7
+ - {name: unmanaged_container_model, type: google.UnmanagedContainerModel, optional: true}
8
+ - {name: encryption_spec_key_name, type: String, optional: true, default: ''}
9
+ - {name: labels, type: JsonObject, optional: true, default: '{}'}
10
+ - {name: parent_model, type: google.VertexModel, optional: true}
11
+ outputs:
12
+ - {name: model, type: google.VertexModel}
13
+ - {name: gcp_resources, type: String}
14
+ implementation:
15
+ container:
16
+ image: gcr.io/ml-pipeline/automl-tables-private:1.0.17
17
+ command: [python3, -u, -m, launcher]
18
+ args: [
19
+ --type, UploadModel,
20
+ --payload,
21
+ concat: [
22
+ '{',
23
+ '"display_name": "', {inputValue: display_name}, '"',
24
+ ', "description": "', {inputValue: description}, '"',
25
+ ', "encryption_spec": {"kms_key_name":"', {inputValue: encryption_spec_key_name}, '"}',
26
+ ', "labels": ', {inputValue: labels},
27
+ '}'
28
+ ],
29
+ --project, {inputValue: project},
30
+ --location, {inputValue: location},
31
+ --gcp_resources, {outputPath: gcp_resources},
32
+ --executor_input, "{{$}}",
33
+ {if: {
34
+ cond: {isPresent: parent_model},
35
+ then: ["--parent_model_name", "{{$.inputs.artifacts['parent_model'].metadata['resourceName']}}",]
36
+ }},
37
+ ]
@@ -0,0 +1,18 @@
1
+ # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Version constants for starry net components."""
15
+
16
+ DATAPREP_VERSION = '20240617_2225_RC00'
17
+ PREDICTOR_VERSION = '20240617_2142_RC00'
18
+ TRAINER_VERSION = '20240617_2142_RC00'
@@ -0,0 +1,45 @@
1
+ # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Utilities for surface user defined error messages."""
16
+
17
+ import json
18
+ import os
19
+ from google.protobuf import json_format
20
+ from google_cloud_pipeline_components.proto import task_error_pb2
21
+
22
+
23
+ def write_customized_error(
24
+ executor_input: str, error: task_error_pb2.TaskError
25
+ ):
26
+ """Writes a TaskError customized by the author of the pipelines to a JSON file ('executor_error.json') in the output directory specified in the executor input.
27
+
28
+ Args:
29
+ executor_input: JSON string containing executor input data.
30
+ error: TaskError protocol buffer message.
31
+ """
32
+ executor_input_json = json.loads(executor_input)
33
+ os.makedirs(
34
+ os.path.dirname(executor_input_json['outputs']['outputFile']),
35
+ exist_ok=True,
36
+ )
37
+ executor_out_path = executor_input_json['outputs']['outputFile']
38
+ directory_path = os.path.dirname(executor_out_path)
39
+ executor_error_path = os.path.join(directory_path, 'executor_error.json')
40
+ error_dict = json_format.MessageToDict(error)
41
+ with open(
42
+ executor_error_path,
43
+ 'w',
44
+ ) as f:
45
+ json.dump(error_dict, f)
@@ -12,13 +12,33 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  """Remote runner for Get Model based on the Vertex AI SDK."""
15
-
15
+ import contextlib
16
+ from typing import Tuple, Type, Union
16
17
  from google.api_core.client_options import ClientOptions
17
18
  from google.cloud import aiplatform_v1 as aip_v1
18
19
  from google_cloud_pipeline_components.container.utils import artifact_utils
20
+ from google_cloud_pipeline_components.container.utils import error_surfacing
21
+ from google_cloud_pipeline_components.proto import task_error_pb2
19
22
  from google_cloud_pipeline_components.types import artifact_types
20
23
 
21
24
 
25
+ @contextlib.contextmanager
26
+ def catch_write_and_raise(
27
+ executor_input: str,
28
+ exception_types: Union[
29
+ Type[Exception], Tuple[Type[Exception], ...]
30
+ ] = Exception,
31
+ ):
32
+ """Context manager to catch specified exceptions, log them using error_surfacing, and then re-raise."""
33
+ try:
34
+ yield
35
+ except exception_types as e:
36
+ task_error = task_error_pb2.TaskError()
37
+ task_error.error_message = str(e)
38
+ error_surfacing.write_customized_error(executor_input, task_error)
39
+ raise
40
+
41
+
22
42
  def get_model(
23
43
  executor_input,
24
44
  model_name: str,
@@ -26,11 +46,16 @@ def get_model(
26
46
  location: str,
27
47
  ) -> None:
28
48
  """Get model."""
29
- if not location or not project:
30
- raise ValueError(
31
- 'Model resource name must be in the format'
32
- ' projects/{project}/locations/{location}/models/{model_name}'
33
- )
49
+ with catch_write_and_raise(
50
+ executor_input=executor_input,
51
+ exception_types=ValueError,
52
+ ):
53
+ if not location or not project:
54
+ model_name_error_message = (
55
+ 'Model resource name must be in the format'
56
+ ' projects/{project}/locations/{location}/models/{model_name}'
57
+ )
58
+ raise ValueError(model_name_error_message)
34
59
  api_endpoint = location + '-aiplatform.googleapis.com'
35
60
  vertex_uri_prefix = f'https://{api_endpoint}/v1/'
36
61
  model_resource_name = (
@@ -40,7 +65,11 @@ def get_model(
40
65
  client_options = ClientOptions(api_endpoint=api_endpoint)
41
66
  client = aip_v1.ModelServiceClient(client_options=client_options)
42
67
  request = aip_v1.GetModelRequest(name=model_resource_name)
43
- get_model_response = client.get_model(request)
68
+ with catch_write_and_raise(
69
+ executor_input=executor_input,
70
+ exception_types=Exception,
71
+ ):
72
+ get_model_response = client.get_model(request)
44
73
  resp_model_name_without_version = get_model_response.name.split('@', 1)[0]
45
74
  model_resource_name = (
46
75
  f'{resp_model_name_without_version}@{get_model_response.version_id}'
@@ -18,7 +18,7 @@ from typing import NamedTuple, Optional
18
18
  from google_cloud_pipeline_components import _placeholders
19
19
  from google_cloud_pipeline_components._implementation.llm import bulk_inferrer
20
20
  from google_cloud_pipeline_components._implementation.llm import env
21
- from google_cloud_pipeline_components._implementation.llm import function_based
21
+ from google_cloud_pipeline_components._implementation.llm import infer_preprocessor
22
22
  from google_cloud_pipeline_components._implementation.llm import preprocess_chat_dataset
23
23
  from google_cloud_pipeline_components._implementation.llm import private_text_importer
24
24
  import kfp
@@ -66,14 +66,16 @@ def infer_pipeline(
66
66
  """
67
67
  # fmt: on
68
68
  prompt_column = 'input_text'
69
- machine_spec = function_based.resolve_machine_spec(
69
+ preprocess_metadata = infer_preprocessor.infer_preprocessor(
70
+ large_model_reference=large_model_reference,
70
71
  accelerator_type=accelerator_type,
71
72
  use_test_spec=env.get_use_test_machine_spec(),
72
- ).set_display_name('Resolve Machine Spec')
73
- reference_model_metadata = function_based.resolve_reference_model_metadata(
74
- large_model_reference=large_model_reference,
75
- reference_model_path=model_checkpoint,
76
- ).set_display_name('Resolve Model Metadata')
73
+ project=env.PRIVATE_ARTIFACT_REGISTRY_PROJECT,
74
+ location=env.PRIVATE_ARTIFACT_REGISTRY_LOCATION,
75
+ artifact_registry=env.PRIVATE_ARTIFACT_REGISTRY,
76
+ tag=env.get_private_image_tag(),
77
+ instruction=instruction,
78
+ ).set_display_name('Preprocess Inputs')
77
79
 
78
80
  processed_dataset = preprocess_chat_dataset.preprocess_chat_dataset(
79
81
  large_model_reference=large_model_reference,
@@ -82,10 +84,6 @@ def infer_pipeline(
82
84
  dataset_type='prompt',
83
85
  ).set_display_name('Preprocess Dataset')
84
86
 
85
- resolved_text_instruction = function_based.resolve_instruction(
86
- large_model_reference=large_model_reference,
87
- instruction=instruction,
88
- ).set_display_name('Resolve Instruction')
89
87
  prompt_dataset_importer = (
90
88
  private_text_importer.private_text_importer(
91
89
  project=project,
@@ -94,35 +92,34 @@ def infer_pipeline(
94
92
  inputs_field_name=prompt_column,
95
93
  targets_field_name='', # ignore targets_field_name
96
94
  output_split_name=env.TRAIN_SPLIT,
97
- large_model_reference=reference_model_metadata.outputs[
98
- 'large_model_reference'
95
+ large_model_reference=preprocess_metadata.outputs[
96
+ 'metadata_large_model_reference'
99
97
  ],
100
- instruction=resolved_text_instruction.output,
98
+ instruction=preprocess_metadata.outputs['metadata_instruction'],
101
99
  encryption_spec_key_name=encryption_spec_key_name,
102
100
  )
103
101
  .set_display_name('Import Prompt Dataset')
104
102
  .set_caching_options(False)
105
103
  )
106
104
 
107
- bulk_inferrer_image_uri = function_based.resolve_private_refined_image_uri(
108
- accelerator_type=machine_spec.outputs['accelerator_type'],
109
- ).set_display_name('Resolve Bulk Inferrer Image URI')
110
105
  bulk_inference = bulk_inferrer.bulk_inferrer(
111
106
  project=project,
112
- location=machine_spec.outputs['tuning_location'],
113
- input_model=reference_model_metadata.outputs['reference_model_path'],
107
+ location=preprocess_metadata.outputs['metadata_tuning_location'],
108
+ input_model=preprocess_metadata.outputs['metadata_reference_model_path'],
114
109
  input_dataset_path=prompt_dataset_importer.outputs['imported_data_path'],
115
110
  dataset_split=env.TRAIN_SPLIT,
116
111
  inputs_sequence_length=prompt_sequence_length,
117
112
  targets_sequence_length=target_sequence_length,
118
- large_model_reference=reference_model_metadata.outputs[
119
- 'large_model_reference'
113
+ large_model_reference=preprocess_metadata.outputs[
114
+ 'metadata_large_model_reference'
120
115
  ],
121
116
  sampling_strategy=sampling_strategy,
122
- accelerator_type=machine_spec.outputs['accelerator_type'],
123
- accelerator_count=machine_spec.outputs['accelerator_count'],
124
- machine_type=machine_spec.outputs['machine_type'],
125
- image_uri=bulk_inferrer_image_uri.output,
117
+ accelerator_type=preprocess_metadata.outputs['metadata_accelerator_type'],
118
+ accelerator_count=preprocess_metadata.outputs[
119
+ 'metadata_accelerator_count'
120
+ ],
121
+ machine_type=preprocess_metadata.outputs['metadata_machine_type'],
122
+ image_uri=preprocess_metadata.outputs['metadata_refined_image_uri'],
126
123
  encryption_spec_key_name=encryption_spec_key_name,
127
124
  ).set_display_name('Bulk Inferrer')
128
125
 
@@ -106,6 +106,9 @@ def rlhf_pipeline(
106
106
  tag=env.get_private_image_tag(),
107
107
  evaluation_dataset=eval_dataset,
108
108
  tensorboard_resource_id=tensorboard_resource_id,
109
+ upload_location=location,
110
+ model_display_name=model_display_name,
111
+ deploy_model=deploy_model,
109
112
  ).set_display_name('Preprocess Inputs')
110
113
  num_microbatches = preprocess_metadata.outputs['metadata_num_microbatches']
111
114
 
@@ -133,6 +136,9 @@ def rlhf_pipeline(
133
136
  reward_model_image_uri=preprocess_metadata.outputs[
134
137
  'metadata_refined_image_uri'
135
138
  ],
139
+ comma_separated_candidates_field_names=preprocess_metadata.outputs[
140
+ 'metadata_candidate_columns_string'
141
+ ],
136
142
  prompt_sequence_length=prompt_sequence_length,
137
143
  target_sequence_length=target_sequence_length,
138
144
  eval_dataset=validate_pipeline_task.outputs[
@@ -200,12 +206,9 @@ def rlhf_pipeline(
200
206
  has_inference_dataset == True, # pylint: disable=singleton-comparison
201
207
  name='Perform Inference',
202
208
  ):
203
- has_model_checkpoint = function_based.value_exists(
204
- value=rl_model_pipeline.outputs['output_model_path']
205
- ).set_display_name('Resolve Model Checkpoint')
206
- with kfp.dsl.Condition(
207
- has_model_checkpoint.output == True, # pylint: disable=singleton-comparison
208
- name='Test Model Checkpoint Exists',
209
+ with kfp.dsl.If(
210
+ rl_model_pipeline.outputs['output_model_path'] != '',
211
+ name='CheckModel Checkpoint Exists',
209
212
  ):
210
213
  component.infer_pipeline(
211
214
  project=project,
@@ -226,10 +229,14 @@ def rlhf_pipeline(
226
229
  policy_model_reference=preprocess_metadata.outputs[
227
230
  'metadata_large_model_reference'
228
231
  ],
229
- model_display_name=model_display_name,
230
- deploy_model=deploy_model,
232
+ model_display_name=preprocess_metadata.outputs[
233
+ 'metadata_model_display_name'
234
+ ],
235
+ deploy_model=preprocess_metadata.outputs['metadata_deploy_model'],
236
+ upload_model=preprocess_metadata.outputs['metadata_upload_model'],
231
237
  encryption_spec_key_name=encryption_spec_key_name,
232
238
  upload_location=location,
239
+ regional_endpoint=preprocess_metadata.outputs['metadata_upload_location'],
233
240
  ).set_display_name('Upload and Deploy Tuned Model')
234
241
 
235
242
  return PipelineOutput(
@@ -1,4 +1,4 @@
1
- # Copyright 2021 The Kubeflow Authors. All Rights Reserved.
1
+ # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -17,10 +17,12 @@ from google_cloud_pipeline_components.preview.model_evaluation.data_bias_compone
17
17
  from google_cloud_pipeline_components.preview.model_evaluation.feature_attribution_component import feature_attribution as ModelEvaluationFeatureAttributionOp
18
18
  from google_cloud_pipeline_components.preview.model_evaluation.feature_attribution_graph_component import feature_attribution_graph_component as FeatureAttributionGraphComponentOp
19
19
  from google_cloud_pipeline_components.preview.model_evaluation.model_bias_component import detect_model_bias as DetectModelBiasOp
20
+ from google_cloud_pipeline_components.preview.model_evaluation.model_evaluation_import_component import model_evaluation_import as ModelImportEvaluationOp
20
21
  from google_cloud_pipeline_components.v1.model_evaluation.evaluation_llm_classification_pipeline import evaluation_llm_classification_pipeline
21
22
  from google_cloud_pipeline_components.v1.model_evaluation.evaluation_llm_text_generation_pipeline import evaluation_llm_text_generation_pipeline
22
23
  from google_cloud_pipeline_components.v1.model_evaluation.model_based_llm_evaluation.autosxs.autosxs_pipeline import autosxs_pipeline
23
24
 
25
+
24
26
  __all__ = [
25
27
  'autosxs_pipeline',
26
28
  'evaluation_llm_classification_pipeline',
@@ -29,4 +31,5 @@ __all__ = [
29
31
  'FeatureAttributionGraphComponentOp',
30
32
  'DetectModelBiasOp',
31
33
  'DetectDataBiasOp',
34
+ 'ModelImportEvaluationOp',
32
35
  ]
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
1
+ # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -53,8 +53,9 @@ def model_evaluation_import(
53
53
 
54
54
  For more details, see
55
55
  https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models.evaluations
56
- One of the four metrics inputs must be provided, metrics & problem_type,
57
- classification_metrics, regression_metrics, or forecasting_metrics.
56
+ One of the metrics inputs must be provided, metrics & problem_type,
57
+ classification_metrics, regression_metrics, or forecasting_metrics, text_generation_metrics,
58
+ question_answering_metrics, summarization_metrics, embedding_metrics.
58
59
 
59
60
  Args:
60
61
  model: Vertex model resource that will be the parent resource of the
@@ -0,0 +1,19 @@
1
+ # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Starry Net Forecasting Pipeline."""
16
+
17
+ from google_cloud_pipeline_components.preview.starry_net.component import starry_net # pylint: disable=g-importing-member
18
+
19
+ __all__ = ['starry_net']