google-cloud-pipeline-components 2.11.0__py3-none-any.whl → 2.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of google-cloud-pipeline-components might be problematic. Click here for more details.
- google_cloud_pipeline_components/_implementation/llm/deployment_graph.py +1 -6
- google_cloud_pipeline_components/_implementation/llm/env.py +1 -1
- google_cloud_pipeline_components/_implementation/llm/function_based.py +36 -98
- google_cloud_pipeline_components/_implementation/llm/generated/refined_image_versions.py +1 -1
- google_cloud_pipeline_components/_implementation/llm/reinforcement_learning_graph.py +3 -17
- google_cloud_pipeline_components/_implementation/llm/reinforcer.py +6 -0
- google_cloud_pipeline_components/_implementation/llm/reward_model_graph.py +2 -17
- google_cloud_pipeline_components/_implementation/llm/reward_model_trainer.py +6 -0
- google_cloud_pipeline_components/_implementation/llm/upload_llm_model.py +3 -3
- google_cloud_pipeline_components/_implementation/llm/utils.py +15 -0
- google_cloud_pipeline_components/_implementation/llm/validate_pipeline.py +8 -1
- google_cloud_pipeline_components/_implementation/model_evaluation/__init__.py +2 -0
- google_cloud_pipeline_components/_implementation/model_evaluation/model_name_preprocessor/__init__.py +14 -0
- google_cloud_pipeline_components/_implementation/model_evaluation/model_name_preprocessor/component.py +74 -0
- google_cloud_pipeline_components/preview/llm/rlaif/component.py +5 -3
- google_cloud_pipeline_components/preview/llm/rlhf/component.py +1 -1
- google_cloud_pipeline_components/preview/model_evaluation/__init__.py +2 -2
- google_cloud_pipeline_components/proto/preflight_validations_pb2.py +19 -30
- google_cloud_pipeline_components/v1/model_evaluation/__init__.py +4 -0
- google_cloud_pipeline_components/{preview → v1}/model_evaluation/evaluation_llm_classification_pipeline.py +14 -2
- google_cloud_pipeline_components/{preview → v1}/model_evaluation/evaluation_llm_text_generation_pipeline.py +27 -15
- google_cloud_pipeline_components/version.py +1 -1
- {google_cloud_pipeline_components-2.11.0.dist-info → google_cloud_pipeline_components-2.13.0.dist-info}/METADATA +1 -2
- {google_cloud_pipeline_components-2.11.0.dist-info → google_cloud_pipeline_components-2.13.0.dist-info}/RECORD +27 -25
- {google_cloud_pipeline_components-2.11.0.dist-info → google_cloud_pipeline_components-2.13.0.dist-info}/WHEEL +1 -1
- {google_cloud_pipeline_components-2.11.0.dist-info → google_cloud_pipeline_components-2.13.0.dist-info}/LICENSE +0 -0
- {google_cloud_pipeline_components-2.11.0.dist-info → google_cloud_pipeline_components-2.13.0.dist-info}/top_level.txt +0 -0
|
@@ -55,11 +55,6 @@ def pipeline(
|
|
|
55
55
|
endpoint_resource_name: Path the Online Prediction Endpoint. This will be an empty string if the model was not deployed.
|
|
56
56
|
"""
|
|
57
57
|
# fmt: on
|
|
58
|
-
adapter_artifact = kfp.dsl.importer(
|
|
59
|
-
artifact_uri=output_adapter_path,
|
|
60
|
-
artifact_class=kfp.dsl.Artifact,
|
|
61
|
-
).set_display_name('Import Tuned Adapter')
|
|
62
|
-
|
|
63
58
|
regional_endpoint = function_based.resolve_regional_endpoint(
|
|
64
59
|
upload_location=upload_location
|
|
65
60
|
).set_display_name('Resolve Regional Endpoint')
|
|
@@ -86,7 +81,7 @@ def pipeline(
|
|
|
86
81
|
project=_placeholders.PROJECT_ID_PLACEHOLDER,
|
|
87
82
|
location=upload_location,
|
|
88
83
|
regional_endpoint=regional_endpoint.output,
|
|
89
|
-
artifact_uri=
|
|
84
|
+
artifact_uri=output_adapter_path,
|
|
90
85
|
model_display_name=display_name.output,
|
|
91
86
|
model_reference_name=large_model_reference,
|
|
92
87
|
upload_model=upload_model.output,
|
|
@@ -19,7 +19,7 @@ from google_cloud_pipeline_components._implementation.llm.generated import refin
|
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
def get_private_image_tag() -> str:
|
|
22
|
-
return os.getenv('PRIVATE_IMAGE_TAG') or
|
|
22
|
+
return os.getenv('PRIVATE_IMAGE_TAG') or '20240330_0352_RC00'
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
def get_autosxs_image_tag() -> str:
|
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
"""Python function-based components used in KFP pipelies."""
|
|
15
15
|
import functools
|
|
16
|
-
from typing import
|
|
16
|
+
from typing import List, NamedTuple, Optional
|
|
17
17
|
|
|
18
18
|
from google_cloud_pipeline_components import _image
|
|
19
19
|
from google_cloud_pipeline_components._implementation.llm import env
|
|
@@ -22,7 +22,7 @@ from kfp import dsl
|
|
|
22
22
|
|
|
23
23
|
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
24
24
|
def resolve_machine_spec(
|
|
25
|
-
accelerator_type: str = '',
|
|
25
|
+
accelerator_type: str = 'GPU',
|
|
26
26
|
use_test_spec: bool = False,
|
|
27
27
|
) -> NamedTuple(
|
|
28
28
|
'MachineSpec',
|
|
@@ -37,7 +37,8 @@ def resolve_machine_spec(
|
|
|
37
37
|
accelerator_type: One of 'TPU' or 'GPU'. If 'TPU' is specified, tuning
|
|
38
38
|
components run in europe-west4. Otherwise tuning components run in
|
|
39
39
|
us-central1 on GPUs. Default is 'GPU'.
|
|
40
|
-
use_test_spec: Whether to use a lower resource machine for testing.
|
|
40
|
+
use_test_spec: Whether to use a lower resource machine for testing. If True,
|
|
41
|
+
a machine with the specified `accelerator_type` is provisioned.
|
|
41
42
|
|
|
42
43
|
Returns:
|
|
43
44
|
Machine spec.
|
|
@@ -61,14 +62,27 @@ def resolve_machine_spec(
|
|
|
61
62
|
accelerator_count=32,
|
|
62
63
|
tuning_location='europe-west4',
|
|
63
64
|
)
|
|
64
|
-
|
|
65
|
+
elif accelerator_type == 'GPU':
|
|
65
66
|
return outputs(
|
|
66
67
|
machine_type='a2-highgpu-1g',
|
|
67
68
|
accelerator_type='NVIDIA_TESLA_A100',
|
|
68
69
|
accelerator_count=1,
|
|
69
70
|
tuning_location='us-central1',
|
|
70
71
|
)
|
|
71
|
-
|
|
72
|
+
elif accelerator_type == 'CPU':
|
|
73
|
+
return outputs(
|
|
74
|
+
machine_type='e2-standard-16',
|
|
75
|
+
accelerator_type='ACCELERATOR_TYPE_UNSPECIFIED',
|
|
76
|
+
accelerator_count=0,
|
|
77
|
+
tuning_location='us-central1',
|
|
78
|
+
)
|
|
79
|
+
else:
|
|
80
|
+
raise ValueError(
|
|
81
|
+
f'Unsupported test accelerator_type {accelerator_type}. Must be one '
|
|
82
|
+
'of TPU, GPU or CPU.'
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
if accelerator_type == 'TPU':
|
|
72
86
|
return outputs(
|
|
73
87
|
machine_type='cloud-tpu',
|
|
74
88
|
accelerator_type='TPU_V3',
|
|
@@ -82,10 +96,11 @@ def resolve_machine_spec(
|
|
|
82
96
|
accelerator_count=8,
|
|
83
97
|
tuning_location='us-central1',
|
|
84
98
|
)
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
99
|
+
else:
|
|
100
|
+
raise ValueError(
|
|
101
|
+
f'Unsupported accelerator_type {accelerator_type}. Must be one of'
|
|
102
|
+
'TPU or GPU.'
|
|
103
|
+
)
|
|
89
104
|
|
|
90
105
|
|
|
91
106
|
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
@@ -114,7 +129,7 @@ def resolve_refined_image_uri(
|
|
|
114
129
|
Raises:
|
|
115
130
|
ValueError: if an unsupported accelerator type is provided.
|
|
116
131
|
"""
|
|
117
|
-
if not accelerator_type:
|
|
132
|
+
if not accelerator_type or accelerator_type == 'ACCELERATOR_TYPE_UNSPECIFIED':
|
|
118
133
|
accelerator_postfix = 'cpu'
|
|
119
134
|
elif 'TPU' in accelerator_type:
|
|
120
135
|
accelerator_postfix = 'tpu'
|
|
@@ -144,22 +159,6 @@ resolve_private_refined_image_uri = functools.partial(
|
|
|
144
159
|
)
|
|
145
160
|
|
|
146
161
|
|
|
147
|
-
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
148
|
-
def resolve_data_paths(
|
|
149
|
-
input_dataset: str,
|
|
150
|
-
) -> NamedTuple('DataPaths', tfds_data_dir=str, tfds_name=str):
|
|
151
|
-
"""Resolves dataset paths needed by downstream components."""
|
|
152
|
-
# pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported
|
|
153
|
-
import os
|
|
154
|
-
# pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported
|
|
155
|
-
outputs = NamedTuple('DataPaths', tfds_data_dir=str, tfds_name=str)
|
|
156
|
-
tfds_data_dir, tfds_name = os.path.split(input_dataset)
|
|
157
|
-
return outputs(
|
|
158
|
-
tfds_data_dir=tfds_data_dir,
|
|
159
|
-
tfds_name=tfds_name,
|
|
160
|
-
)
|
|
161
|
-
|
|
162
|
-
|
|
163
162
|
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
164
163
|
def resolve_reference_model_metadata(
|
|
165
164
|
large_model_reference: str,
|
|
@@ -279,6 +278,15 @@ def resolve_reference_model_metadata(
|
|
|
279
278
|
reward_model_path='gs://vertex-rlhf-restricted/pretrained_models/palm/t5x_otter_pretrain/',
|
|
280
279
|
is_supported=True,
|
|
281
280
|
),
|
|
281
|
+
'text-bison@002': reference_model_metadata(
|
|
282
|
+
large_model_reference='BISON_002',
|
|
283
|
+
reference_model_path=(
|
|
284
|
+
'gs://vertex-rlhf-restricted/pretrained_models/palm/t5x_bison_002/'
|
|
285
|
+
),
|
|
286
|
+
reward_model_reference='BISON_002',
|
|
287
|
+
reward_model_path='gs://vertex-rlhf-restricted/pretrained_models/palm/t5x_bison_002/',
|
|
288
|
+
is_supported=True,
|
|
289
|
+
),
|
|
282
290
|
'chat-bison@001': reference_model_metadata(
|
|
283
291
|
large_model_reference='BISON',
|
|
284
292
|
reference_model_path=(
|
|
@@ -445,7 +453,7 @@ def resolve_deploy_model(
|
|
|
445
453
|
deploy_model: bool, large_model_reference: str
|
|
446
454
|
) -> bool:
|
|
447
455
|
"""Resolves runtime parameter that determines whether the tuned model should be deployed."""
|
|
448
|
-
supported_models = {'BISON'}
|
|
456
|
+
supported_models = {'BISON', 'BISON_002'}
|
|
449
457
|
if deploy_model and large_model_reference in supported_models:
|
|
450
458
|
return True
|
|
451
459
|
return False
|
|
@@ -466,18 +474,10 @@ def value_exists(value: Optional[str] = None) -> bool:
|
|
|
466
474
|
return True
|
|
467
475
|
|
|
468
476
|
|
|
469
|
-
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
470
|
-
def resolve_candidate_columns(
|
|
471
|
-
candidate_columns: Optional[List[str]] = None,
|
|
472
|
-
) -> List[str]:
|
|
473
|
-
"""Returns candidate columns provided by the user or the default: ['candidate_0', 'candidate_1']."""
|
|
474
|
-
return candidate_columns or ['candidate_0', 'candidate_1']
|
|
475
|
-
|
|
476
|
-
|
|
477
477
|
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
478
478
|
def resolve_upload_model(large_model_reference: str) -> bool:
|
|
479
479
|
"""Returns whether the model should be uploaded."""
|
|
480
|
-
supported_models = {'BISON'}
|
|
480
|
+
supported_models = {'BISON', 'BISON_002'}
|
|
481
481
|
if large_model_reference in supported_models:
|
|
482
482
|
return True
|
|
483
483
|
return False
|
|
@@ -515,65 +515,3 @@ def resolve_num_microbatches(large_model_reference: str) -> int:
|
|
|
515
515
|
if 'llama' in large_model_reference.lower():
|
|
516
516
|
return 2
|
|
517
517
|
return 0
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
521
|
-
def read_file(path: str) -> str:
|
|
522
|
-
"""Reads the contents of the given file."""
|
|
523
|
-
# pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported
|
|
524
|
-
import re
|
|
525
|
-
# pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported
|
|
526
|
-
|
|
527
|
-
path = re.sub('^gs://', '/gcs/', path)
|
|
528
|
-
with open(path, 'r') as f:
|
|
529
|
-
return f.read()
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
533
|
-
def get_usage_metric(metadata: Dict[str, Any], key: str) -> bool: # pytype: disable=unsupported-operands
|
|
534
|
-
"""Extracts a single usage metric from metadata."""
|
|
535
|
-
return metadata[key]
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
539
|
-
def dump_dict(value: Dict[Any, Any]) -> str:
|
|
540
|
-
"""Dumps the given dict to a JSON string."""
|
|
541
|
-
# pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported
|
|
542
|
-
import json
|
|
543
|
-
# pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported
|
|
544
|
-
|
|
545
|
-
return json.dumps(value).replace('"', '\\"')
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
549
|
-
def dump_list(value: List[Any]) -> str:
|
|
550
|
-
"""Dumps the given dict to a JSON string."""
|
|
551
|
-
# pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported
|
|
552
|
-
import json
|
|
553
|
-
# pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported
|
|
554
|
-
|
|
555
|
-
return json.dumps(value).replace('"', '\\"')
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
559
|
-
def identity(
|
|
560
|
-
x: str,
|
|
561
|
-
) -> str:
|
|
562
|
-
return x
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
566
|
-
def get_uri(artifact: dsl.Input[dsl.Artifact], is_dir: bool = False) -> str: # pytype: disable=unsupported-operands
|
|
567
|
-
"""Extracts the URI from an artifact."""
|
|
568
|
-
# pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported
|
|
569
|
-
import os
|
|
570
|
-
# pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported
|
|
571
|
-
|
|
572
|
-
if is_dir:
|
|
573
|
-
return os.path.join(artifact.uri, '*')
|
|
574
|
-
return artifact.uri
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
@dsl.component(base_image=_image.GCPC_IMAGE_TAG, install_kfp_package=False)
|
|
578
|
-
def get_empty_string() -> str:
|
|
579
|
-
return ''
|
|
@@ -53,7 +53,7 @@ def pipeline(
|
|
|
53
53
|
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
|
|
54
54
|
accelerator_type: str = 'GPU',
|
|
55
55
|
location: str = _placeholders.LOCATION_PLACEHOLDER,
|
|
56
|
-
tensorboard_resource_id:
|
|
56
|
+
tensorboard_resource_id: str = '',
|
|
57
57
|
encryption_spec_key_name: str = '',
|
|
58
58
|
) -> PipelineOutput:
|
|
59
59
|
# fmt: off
|
|
@@ -159,26 +159,12 @@ def pipeline(
|
|
|
159
159
|
reward_lora_dim=reward_lora_dim,
|
|
160
160
|
num_microbatches=num_microbatches.output,
|
|
161
161
|
encryption_spec_key_name=encryption_spec_key_name,
|
|
162
|
+
tensorboard_resource_id=tensorboard_resource_id,
|
|
162
163
|
)
|
|
163
164
|
.set_display_name('Reinforcer')
|
|
164
165
|
.set_caching_options(False)
|
|
165
166
|
)
|
|
166
|
-
|
|
167
|
-
value=tensorboard_resource_id
|
|
168
|
-
).set_display_name('Resolve Tensorboard Resource ID')
|
|
169
|
-
with kfp.dsl.Condition( # pytype: disable=wrong-arg-types
|
|
170
|
-
has_tensorboard_id.output == True, # pylint: disable=singleton-comparison, g-explicit-bool-comparison
|
|
171
|
-
name='Upload Reinforcement Learning Tensorboard Metrics',
|
|
172
|
-
):
|
|
173
|
-
_ = upload_tensorboard_metrics.upload_tensorboard_metrics(
|
|
174
|
-
tensorboard_resource_id=tensorboard_resource_id,
|
|
175
|
-
metrics_directory=rl_model.outputs['tensorboard_metrics'],
|
|
176
|
-
experiment_name=(
|
|
177
|
-
'rl-model-tuner-'
|
|
178
|
-
f'{kfp.dsl.PIPELINE_JOB_ID_PLACEHOLDER}-'
|
|
179
|
-
f'{kfp.dsl.PIPELINE_TASK_ID_PLACEHOLDER}'
|
|
180
|
-
),
|
|
181
|
-
).set_display_name('Reinforcement Learning Tensorboard Metrics Uploader')
|
|
167
|
+
|
|
182
168
|
return PipelineOutput(
|
|
183
169
|
output_model_path=rl_model.outputs['output_model_path'],
|
|
184
170
|
output_adapter_path=rl_model.outputs['output_adapter_path'],
|
|
@@ -48,6 +48,7 @@ def reinforcer(
|
|
|
48
48
|
reward_lora_dim: int = 4,
|
|
49
49
|
num_microbatches: int = 0,
|
|
50
50
|
encryption_spec_key_name: str = '',
|
|
51
|
+
tensorboard_resource_id: str = '',
|
|
51
52
|
) -> kfp.dsl.ContainerSpec: # pylint: disable=g-doc-args
|
|
52
53
|
"""Trains a model using reinforcement learning.
|
|
53
54
|
|
|
@@ -91,6 +92,9 @@ def reinforcer(
|
|
|
91
92
|
then all resources created by the CustomJob will be encrypted with the
|
|
92
93
|
provided encryption key. Note that this is not supported for TPU at the
|
|
93
94
|
moment.
|
|
95
|
+
tensorboard_resource_id: Optional tensorboard resource id. Format:
|
|
96
|
+
`projects/{project_number}/locations/{location}/tensorboards/{tensorboard_id}`.
|
|
97
|
+
If provided, tensorboard metrics will be uploaded to this location.
|
|
94
98
|
|
|
95
99
|
Returns:
|
|
96
100
|
output_model_path: Path to the trained model checkpoint.
|
|
@@ -133,6 +137,8 @@ def reinforcer(
|
|
|
133
137
|
f'--num_microbatches={num_microbatches}',
|
|
134
138
|
],
|
|
135
139
|
encryption_spec_key_name=encryption_spec_key_name,
|
|
140
|
+
base_output_directory=tensorboard_metrics.uri,
|
|
141
|
+
tensorboard=tensorboard_resource_id,
|
|
136
142
|
),
|
|
137
143
|
gcp_resources=gcp_resources,
|
|
138
144
|
)
|
|
@@ -50,7 +50,7 @@ def pipeline(
|
|
|
50
50
|
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
|
|
51
51
|
accelerator_type: str = 'GPU',
|
|
52
52
|
location: str = _placeholders.LOCATION_PLACEHOLDER,
|
|
53
|
-
tensorboard_resource_id:
|
|
53
|
+
tensorboard_resource_id: str = '',
|
|
54
54
|
encryption_spec_key_name: str = '',
|
|
55
55
|
) -> PipelineOutput:
|
|
56
56
|
# fmt: off
|
|
@@ -178,27 +178,12 @@ def pipeline(
|
|
|
178
178
|
lora_dim=lora_dim,
|
|
179
179
|
num_microbatches=num_microbatches.output,
|
|
180
180
|
encryption_spec_key_name=encryption_spec_key_name,
|
|
181
|
+
tensorboard_resource_id=tensorboard_resource_id,
|
|
181
182
|
)
|
|
182
183
|
.set_display_name('Reward Model Trainer')
|
|
183
184
|
.set_caching_options(False)
|
|
184
185
|
)
|
|
185
186
|
|
|
186
|
-
has_tensorboard_id = function_based.value_exists(
|
|
187
|
-
value=tensorboard_resource_id
|
|
188
|
-
).set_display_name('Resolve TensorBoard Resource ID')
|
|
189
|
-
with kfp.dsl.Condition( # pytype: disable=wrong-arg-types
|
|
190
|
-
has_tensorboard_id.output == True, # pylint: disable=singleton-comparison, g-explicit-bool-comparison
|
|
191
|
-
name='Upload Reward Model TensorBoard Metrics',
|
|
192
|
-
):
|
|
193
|
-
_ = upload_tensorboard_metrics.upload_tensorboard_metrics(
|
|
194
|
-
tensorboard_resource_id=tensorboard_resource_id,
|
|
195
|
-
metrics_directory=reward_model.outputs['tensorboard_metrics'],
|
|
196
|
-
experiment_name=(
|
|
197
|
-
'reward-model-tuner-'
|
|
198
|
-
f'{kfp.dsl.PIPELINE_JOB_ID_PLACEHOLDER}-'
|
|
199
|
-
f'{kfp.dsl.PIPELINE_TASK_ID_PLACEHOLDER}'
|
|
200
|
-
),
|
|
201
|
-
).set_display_name('Reward Model TensorBoard Metrics Uploader')
|
|
202
187
|
return PipelineOutput(
|
|
203
188
|
reward_model_base_path=reference_model_metadata.outputs[
|
|
204
189
|
'reward_model_path'
|
|
@@ -42,6 +42,7 @@ def reward_model_trainer(
|
|
|
42
42
|
lora_dim: int = 4,
|
|
43
43
|
num_microbatches: int = 0,
|
|
44
44
|
encryption_spec_key_name: str = '',
|
|
45
|
+
tensorboard_resource_id: str = '',
|
|
45
46
|
) -> kfp.dsl.ContainerSpec: # pylint: disable=g-doc-args
|
|
46
47
|
"""Trains a reward model.
|
|
47
48
|
|
|
@@ -76,6 +77,9 @@ def reward_model_trainer(
|
|
|
76
77
|
then all resources created by the CustomJob will be encrypted with the
|
|
77
78
|
provided encryption key. Note that this is not supported for TPU at the
|
|
78
79
|
moment.
|
|
80
|
+
tensorboard_resource_id: Optional tensorboard resource id. Format:
|
|
81
|
+
`projects/{project_number}/locations/{location}/tensorboards/{tensorboard_id}`.
|
|
82
|
+
If provided, tensorboard metrics will be uploaded to this location.
|
|
79
83
|
|
|
80
84
|
Returns:
|
|
81
85
|
output_adapter_path: Trained reward LoRA adapter.
|
|
@@ -110,6 +114,8 @@ def reward_model_trainer(
|
|
|
110
114
|
f'--num_microbatches={num_microbatches}',
|
|
111
115
|
],
|
|
112
116
|
encryption_spec_key_name=encryption_spec_key_name,
|
|
117
|
+
base_output_directory=tensorboard_metrics.uri,
|
|
118
|
+
tensorboard=tensorboard_resource_id,
|
|
113
119
|
),
|
|
114
120
|
gcp_resources=gcp_resources,
|
|
115
121
|
)
|
|
@@ -26,7 +26,7 @@ from kfp import dsl
|
|
|
26
26
|
def refined_upload_llm_model(
|
|
27
27
|
project: str,
|
|
28
28
|
location: str,
|
|
29
|
-
artifact_uri:
|
|
29
|
+
artifact_uri: str,
|
|
30
30
|
model_reference_name: str,
|
|
31
31
|
model_display_name: str,
|
|
32
32
|
regional_endpoint: str,
|
|
@@ -41,7 +41,7 @@ def refined_upload_llm_model(
|
|
|
41
41
|
Args:
|
|
42
42
|
project: Name of the GCP project.
|
|
43
43
|
location: Location for model upload and deployment.
|
|
44
|
-
artifact_uri:
|
|
44
|
+
artifact_uri: Path to the artifact to upload.
|
|
45
45
|
model_reference_name: Large model reference name.
|
|
46
46
|
model_display_name: Name of the model (shown in Model Registry).
|
|
47
47
|
regional_endpoint: Regional API endpoint.
|
|
@@ -88,7 +88,7 @@ def refined_upload_llm_model(
|
|
|
88
88
|
'largeModelReference': {'name': model_reference_name},
|
|
89
89
|
'labels': labels,
|
|
90
90
|
'generatedModelSource': {'genie_source': {'base_model_uri': ''}},
|
|
91
|
-
'artifactUri': artifact_uri
|
|
91
|
+
'artifactUri': artifact_uri,
|
|
92
92
|
}
|
|
93
93
|
}
|
|
94
94
|
if encryption_spec_key_name:
|
|
@@ -30,6 +30,8 @@ def build_payload(
|
|
|
30
30
|
encryption_spec_key_name: str = '',
|
|
31
31
|
labels: Optional[Dict[str, str]] = None,
|
|
32
32
|
scheduling: Optional[Dict[str, Any]] = None,
|
|
33
|
+
base_output_directory: Optional[str] = None,
|
|
34
|
+
tensorboard: Optional[str] = None,
|
|
33
35
|
) -> Dict[str, Any]:
|
|
34
36
|
"""Generates payload for a custom training job.
|
|
35
37
|
|
|
@@ -50,6 +52,11 @@ def build_payload(
|
|
|
50
52
|
moment.
|
|
51
53
|
labels: The labels with user-defined metadata to organize CustomJobs.
|
|
52
54
|
scheduling: Scheduling options for a CustomJob.
|
|
55
|
+
base_output_directory: Cloud Storage location to store the output of this
|
|
56
|
+
CustomJob
|
|
57
|
+
tensorboard: The name of a Vertex AI TensorBoard resource to which this
|
|
58
|
+
CustomJob will upload TensorBoard logs. Format:
|
|
59
|
+
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
|
|
53
60
|
|
|
54
61
|
Returns:
|
|
55
62
|
Custom job payload.
|
|
@@ -96,6 +103,14 @@ def build_payload(
|
|
|
96
103
|
if scheduling:
|
|
97
104
|
payload['job_spec']['scheduling'] = scheduling
|
|
98
105
|
|
|
106
|
+
if base_output_directory:
|
|
107
|
+
payload['job_spec']['base_output_directory'] = {
|
|
108
|
+
'output_uri_prefix': base_output_directory
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
if tensorboard:
|
|
112
|
+
payload['job_spec']['tensorboard'] = tensorboard
|
|
113
|
+
|
|
99
114
|
return payload
|
|
100
115
|
|
|
101
116
|
|
|
@@ -88,7 +88,14 @@ def validate_pipeline(
|
|
|
88
88
|
f' {supported_pipeline_regions}.'
|
|
89
89
|
)
|
|
90
90
|
|
|
91
|
-
|
|
91
|
+
valid_cmek_accelerator_types = {
|
|
92
|
+
'GPU',
|
|
93
|
+
'CPU', # Only used for testing.
|
|
94
|
+
}
|
|
95
|
+
valid_cmek_config = (
|
|
96
|
+
location == 'us-central1'
|
|
97
|
+
and accelerator_type in valid_cmek_accelerator_types
|
|
98
|
+
)
|
|
92
99
|
if encryption_spec_key_name and not valid_cmek_config:
|
|
93
100
|
raise ValueError(
|
|
94
101
|
'encryption_spec_key_name (CMEK) is only supported for GPU training'
|
|
@@ -37,6 +37,7 @@ from google_cloud_pipeline_components._implementation.model_evaluation.llm_safet
|
|
|
37
37
|
from google_cloud_pipeline_components._implementation.model_evaluation.llm_safety_bias.evaluation_llm_safety_bias_pipeline import evaluation_llm_safety_bias_pipeline
|
|
38
38
|
from google_cloud_pipeline_components._implementation.model_evaluation.model_inference.component import model_inference_and_evaluation_component
|
|
39
39
|
from google_cloud_pipeline_components._implementation.model_evaluation.model_inference.component import model_inference_component
|
|
40
|
+
from google_cloud_pipeline_components._implementation.model_evaluation.model_name_preprocessor.component import model_name_preprocessor as ModelNamePreprocessorOp
|
|
40
41
|
from google_cloud_pipeline_components._implementation.model_evaluation.target_field_data_remover.component import target_field_data_remover as TargetFieldDataRemoverOp
|
|
41
42
|
from google_cloud_pipeline_components._implementation.model_evaluation.text2sql.evaluation_llm_text2sql_pipeline import evaluation_llm_text2sql_pipeline
|
|
42
43
|
|
|
@@ -63,6 +64,7 @@ __all__ = [
|
|
|
63
64
|
'ModelEvaluationFeatureAttributionOp',
|
|
64
65
|
'ModelImportEvaluatedAnnotationOp',
|
|
65
66
|
'ModelImportEvaluationOp',
|
|
67
|
+
'ModelNamePreprocessorOp',
|
|
66
68
|
'TargetFieldDataRemoverOp',
|
|
67
69
|
'model_inference_component',
|
|
68
70
|
'model_inference_and_evaluation_component',
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# Copyright 2023 The Kubeflow Authors. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""Model name preprocessor Component."""
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# Copyright 2023 The Kubeflow Authors. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""Model name preprocessor component used in KFP pipelines."""
|
|
15
|
+
|
|
16
|
+
from google_cloud_pipeline_components._implementation.model_evaluation import version
|
|
17
|
+
from kfp.dsl import container_component
|
|
18
|
+
from kfp.dsl import ContainerSpec
|
|
19
|
+
from kfp.dsl import OutputPath
|
|
20
|
+
from kfp.dsl import PIPELINE_ROOT_PLACEHOLDER
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@container_component
|
|
24
|
+
def model_name_preprocessor(
|
|
25
|
+
gcp_resources: OutputPath(str),
|
|
26
|
+
processed_model_name: OutputPath(str),
|
|
27
|
+
project: str,
|
|
28
|
+
location: str,
|
|
29
|
+
model_name: str,
|
|
30
|
+
service_account: str = '',
|
|
31
|
+
):
|
|
32
|
+
"""Preprocess inputs for text2sql evaluation pipeline.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
project: Required. The GCP project that runs the pipeline component.
|
|
36
|
+
location: Required. The GCP region that runs the pipeline component.
|
|
37
|
+
model_name: The Model name used to run evaluation. Must be a publisher
|
|
38
|
+
Model or a managed Model sharing the same ancestor location. Starting
|
|
39
|
+
this job has no impact on any existing deployments of the Model and
|
|
40
|
+
their resources.
|
|
41
|
+
service_account: Sets the default service account for workload run-as
|
|
42
|
+
account. The service account running the pipeline
|
|
43
|
+
(https://cloud.google.com/vertex-ai/docs/pipelines/configure-project#service-account)
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
gcp_resources (str):
|
|
47
|
+
Serialized gcp_resources proto tracking the custom job.
|
|
48
|
+
processed_model_name (str):
|
|
49
|
+
Preprocessed model name.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
return ContainerSpec(
|
|
53
|
+
image=version.LLM_EVAL_IMAGE_TAG,
|
|
54
|
+
args=[
|
|
55
|
+
'--model_name_preprocessor',
|
|
56
|
+
'true',
|
|
57
|
+
'--project',
|
|
58
|
+
project,
|
|
59
|
+
'--location',
|
|
60
|
+
location,
|
|
61
|
+
'--root_dir',
|
|
62
|
+
f'{PIPELINE_ROOT_PLACEHOLDER}',
|
|
63
|
+
'--model_name',
|
|
64
|
+
model_name,
|
|
65
|
+
'--processed_model_name',
|
|
66
|
+
processed_model_name,
|
|
67
|
+
'--service_account',
|
|
68
|
+
service_account,
|
|
69
|
+
'--gcp_resources',
|
|
70
|
+
gcp_resources,
|
|
71
|
+
'--executor_input',
|
|
72
|
+
'{{$}}',
|
|
73
|
+
],
|
|
74
|
+
)
|
|
@@ -40,6 +40,7 @@ def rlaif_pipeline(
|
|
|
40
40
|
prompt_dataset: str,
|
|
41
41
|
preference_prompt_dataset: str,
|
|
42
42
|
large_model_reference: str,
|
|
43
|
+
task_type: str,
|
|
43
44
|
model_display_name: Optional[str] = None,
|
|
44
45
|
prompt_sequence_length: int = 512,
|
|
45
46
|
target_sequence_length: int = 64,
|
|
@@ -65,7 +66,9 @@ def rlaif_pipeline(
|
|
|
65
66
|
|
|
66
67
|
Args:
|
|
67
68
|
prompt_dataset: Cloud storage path to an unlabled JSONL dataset that contains prompts. Text datasets must contain an `input_text` field that contains the prompt. Chat datasets must contain at least 1 message in a `messages` field. Each message must be valid JSON that contains `author` and `content` fields, where valid `author` values are `user` and `assistant` and `content` must be non-empty. Each row may contain multiple messages, but the first and last author must be the `user`. An optional `context` field may be provided for each example in a chat dataset. If provided, the `context` will preprended to the message `content`. The `instruction` serves as the default context. (Useful if most messages use the same system-level context.) Any context provided in the example will override the default value.
|
|
68
|
-
preference_prompt_dataset: The prompt dataset used for two models' inferences to build the side by side comparison AI feedback.
|
|
69
|
+
preference_prompt_dataset: The prompt dataset used for two models' inferences to build the side by side comparison AI feedback.
|
|
70
|
+
large_model_reference: Name of the base model. Supported values are `text-bison@001`, `t5-small`, `t5-large`, `t5-xl` and `t5-xxl`. `text-bison@001` and `t5-small` are supported in `us-central1` and `europe-west4`. `t5-large`, `t5-xl` and `t5-xxl` are only supported in `europe-west4`.
|
|
71
|
+
task_type: Evaluation task in the form {task}@{version}. task can be one of "summarization", "question_answering". Version is an integer with 3 digits or "latest". Ex: summarization@001 or question_answering@latest.
|
|
69
72
|
model_display_name: Name of the fine-tuned model shown in the Model Registry. If not provided, a default name will be created.
|
|
70
73
|
prompt_sequence_length: Maximum tokenized sequence length for input text. Higher values increase memory overhead. This value should be at most 8192. Default value is 512.
|
|
71
74
|
target_sequence_length: Maximum tokenized sequence length for target text. Higher values increase memory overhead. This value should be at most 1024. Default value is 64.
|
|
@@ -90,7 +93,6 @@ def rlaif_pipeline(
|
|
|
90
93
|
"""
|
|
91
94
|
# fmt: on
|
|
92
95
|
id_columns = ['content']
|
|
93
|
-
task = 'summarization@001'
|
|
94
96
|
deploy_model = True
|
|
95
97
|
|
|
96
98
|
output_prediction_gcs_path_a = infer.infer_pipeline(
|
|
@@ -129,7 +131,7 @@ def rlaif_pipeline(
|
|
|
129
131
|
autosxs = online_evaluation_pairwise.online_evaluation_pairwise(
|
|
130
132
|
inference_output_uri=inference_output_uri,
|
|
131
133
|
id_columns=id_columns,
|
|
132
|
-
task=
|
|
134
|
+
task=task_type,
|
|
133
135
|
).set_display_name('Build AI Feedback')
|
|
134
136
|
|
|
135
137
|
preference_dataset = (
|
|
@@ -53,7 +53,7 @@ def rlhf_pipeline(
|
|
|
53
53
|
accelerator_type: str = 'GPU',
|
|
54
54
|
location: str = _placeholders.LOCATION_PLACEHOLDER,
|
|
55
55
|
encryption_spec_key_name: str = '',
|
|
56
|
-
tensorboard_resource_id:
|
|
56
|
+
tensorboard_resource_id: str = '',
|
|
57
57
|
) -> PipelineOutput:
|
|
58
58
|
# fmt: off
|
|
59
59
|
"""Performs reinforcement learning from human feedback.
|
|
@@ -14,12 +14,12 @@
|
|
|
14
14
|
"""Model evaluation preview components."""
|
|
15
15
|
|
|
16
16
|
from google_cloud_pipeline_components.preview.model_evaluation.data_bias_component import detect_data_bias as DetectDataBiasOp
|
|
17
|
-
from google_cloud_pipeline_components.preview.model_evaluation.evaluation_llm_classification_pipeline import evaluation_llm_classification_pipeline
|
|
18
|
-
from google_cloud_pipeline_components.preview.model_evaluation.evaluation_llm_text_generation_pipeline import evaluation_llm_text_generation_pipeline
|
|
19
17
|
from google_cloud_pipeline_components.preview.model_evaluation.feature_attribution_component import feature_attribution as ModelEvaluationFeatureAttributionOp
|
|
20
18
|
from google_cloud_pipeline_components.preview.model_evaluation.feature_attribution_graph_component import feature_attribution_graph_component as FeatureAttributionGraphComponentOp
|
|
21
19
|
from google_cloud_pipeline_components.preview.model_evaluation.model_based_llm_evaluation.autosxs.autosxs_pipeline import autosxs_pipeline
|
|
22
20
|
from google_cloud_pipeline_components.preview.model_evaluation.model_bias_component import detect_model_bias as DetectModelBiasOp
|
|
21
|
+
from google_cloud_pipeline_components.v1.model_evaluation.evaluation_llm_classification_pipeline import evaluation_llm_classification_pipeline
|
|
22
|
+
from google_cloud_pipeline_components.v1.model_evaluation.evaluation_llm_text_generation_pipeline import evaluation_llm_text_generation_pipeline
|
|
23
23
|
|
|
24
24
|
__all__ = [
|
|
25
25
|
'autosxs_pipeline',
|
|
@@ -12,16 +12,17 @@ _sym_db = _symbol_database.Default()
|
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
|
|
15
|
-
b'\n\x13preflight_validations.proto\x12\x15preflight_validations"\
|
|
16
|
-
b' \
|
|
17
|
-
b' \
|
|
18
|
-
b' \
|
|
19
|
-
b' \x01(\t\
|
|
20
|
-
b' \
|
|
21
|
-
b' \x01(\
|
|
22
|
-
b' \x01(\
|
|
23
|
-
b' \x01(\t\x12\x13\n\x0bpermissions\x18\
|
|
24
|
-
b'
|
|
15
|
+
b'\n\x13preflight_validations.proto\x12\x15preflight_validations"\x8e\x02\n\x0fValidationItems\x12R\n\x0esa_validations\x18\x01'
|
|
16
|
+
b' \x03(\x0b\x32:.preflight_validations.GoogleCloudServiceAccountValidation\x12S\n\x11quota_validations\x18\x02'
|
|
17
|
+
b' \x03(\x0b\x32\x38.preflight_validations.GoogleCloudProjectQuotaValidation\x12R\n\x0f\x61pi_validations\x18\x03'
|
|
18
|
+
b' \x03(\x0b\x32\x39.preflight_validations.GoogleCloudApiEnablementValidation"p\n!GoogleCloudProjectQuotaValidation\x12\x13\n\x0bmetric_name\x18\x01'
|
|
19
|
+
b' \x01(\t\x12\x15\n\x0bint64_value\x18\x02'
|
|
20
|
+
b' \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x03'
|
|
21
|
+
b' \x01(\x01H\x00\x42\x07\n\x05value"\x8d\x01\n#GoogleCloudServiceAccountValidation\x12\x1f\n\x17\x64\x65\x66\x61ult_principal_email\x18\x01'
|
|
22
|
+
b' \x01(\t\x12\x1c\n\x14override_placeholder\x18\x02'
|
|
23
|
+
b' \x01(\t\x12\x13\n\x0bpermissions\x18\x03'
|
|
24
|
+
b' \x03(\t\x12\x12\n\nrole_names\x18\x04'
|
|
25
|
+
b' \x03(\t";\n"GoogleCloudApiEnablementValidation\x12\x15\n\rservice_names\x18\x01'
|
|
25
26
|
b' \x03(\tB\x02P\x01\x62\x06proto3'
|
|
26
27
|
)
|
|
27
28
|
|
|
@@ -35,24 +36,12 @@ _builder.BuildTopDescriptorsAndMessages(
|
|
|
35
36
|
if not _descriptor._USE_C_DESCRIPTORS:
|
|
36
37
|
_globals['DESCRIPTOR']._loaded_options = None
|
|
37
38
|
_globals['DESCRIPTOR']._serialized_options = b'P\001'
|
|
38
|
-
_globals[
|
|
39
|
-
|
|
40
|
-
].
|
|
41
|
-
_globals[
|
|
42
|
-
|
|
43
|
-
].
|
|
44
|
-
_globals['
|
|
45
|
-
_globals['
|
|
46
|
-
_globals['_GOOGLECLOUDPROJECTQUOTAMETADATA']._serialized_start = 417
|
|
47
|
-
_globals['_GOOGLECLOUDPROJECTQUOTAMETADATA']._serialized_end = 652
|
|
48
|
-
_globals[
|
|
49
|
-
'_GOOGLECLOUDPROJECTQUOTAMETADATA_METRICSRECOMMENDATIONSENTRY'
|
|
50
|
-
]._serialized_start = 591
|
|
51
|
-
_globals[
|
|
52
|
-
'_GOOGLECLOUDPROJECTQUOTAMETADATA_METRICSRECOMMENDATIONSENTRY'
|
|
53
|
-
]._serialized_end = 652
|
|
54
|
-
_globals['_GOOGLECLOUDSERVICEACCOUNTMETADATA']._serialized_start = 654
|
|
55
|
-
_globals['_GOOGLECLOUDSERVICEACCOUNTMETADATA']._serialized_end = 734
|
|
56
|
-
_globals['_GOOGLECLOUDAPIENABLEMENTMETADATA']._serialized_start = 736
|
|
57
|
-
_globals['_GOOGLECLOUDAPIENABLEMENTMETADATA']._serialized_end = 793
|
|
39
|
+
_globals['_VALIDATIONITEMS']._serialized_start = 142
|
|
40
|
+
_globals['_VALIDATIONITEMS']._serialized_end = 412
|
|
41
|
+
_globals['_GOOGLECLOUDPROJECTQUOTAVALIDATION']._serialized_start = 414
|
|
42
|
+
_globals['_GOOGLECLOUDPROJECTQUOTAVALIDATION']._serialized_end = 526
|
|
43
|
+
_globals['_GOOGLECLOUDSERVICEACCOUNTVALIDATION']._serialized_start = 529
|
|
44
|
+
_globals['_GOOGLECLOUDSERVICEACCOUNTVALIDATION']._serialized_end = 670
|
|
45
|
+
_globals['_GOOGLECLOUDAPIENABLEMENTVALIDATION']._serialized_start = 672
|
|
46
|
+
_globals['_GOOGLECLOUDAPIENABLEMENTVALIDATION']._serialized_end = 731
|
|
58
47
|
# @@protoc_insertion_point(module_scope)
|
|
@@ -20,6 +20,8 @@ from google_cloud_pipeline_components.v1.model_evaluation.evaluation_automl_tabu
|
|
|
20
20
|
from google_cloud_pipeline_components.v1.model_evaluation.evaluation_automl_tabular_pipeline import evaluation_automl_tabular_pipeline
|
|
21
21
|
from google_cloud_pipeline_components.v1.model_evaluation.evaluation_automl_unstructure_data_pipeline import evaluation_automl_unstructure_data_pipeline
|
|
22
22
|
from google_cloud_pipeline_components.v1.model_evaluation.evaluation_feature_attribution_pipeline import evaluation_feature_attribution_pipeline
|
|
23
|
+
from google_cloud_pipeline_components.v1.model_evaluation.evaluation_llm_classification_pipeline import evaluation_llm_classification_pipeline
|
|
24
|
+
from google_cloud_pipeline_components.v1.model_evaluation.evaluation_llm_text_generation_pipeline import evaluation_llm_text_generation_pipeline
|
|
23
25
|
from google_cloud_pipeline_components.v1.model_evaluation.forecasting_component import model_evaluation_forecasting as ModelEvaluationForecastingOp
|
|
24
26
|
from google_cloud_pipeline_components.v1.model_evaluation.regression_component import model_evaluation_regression as ModelEvaluationRegressionOp
|
|
25
27
|
|
|
@@ -30,6 +32,8 @@ __all__ = [
|
|
|
30
32
|
'evaluation_automl_tabular_pipeline',
|
|
31
33
|
'evaluation_automl_unstructure_data_pipeline',
|
|
32
34
|
'evaluation_feature_attribution_pipeline',
|
|
35
|
+
'evaluation_llm_classification_pipeline',
|
|
36
|
+
'evaluation_llm_text_generation_pipeline',
|
|
33
37
|
'ModelEvaluationClassificationOp',
|
|
34
38
|
'ModelEvaluationRegressionOp',
|
|
35
39
|
'ModelEvaluationForecastingOp',
|
|
@@ -18,6 +18,7 @@ from typing import Dict, List, NamedTuple
|
|
|
18
18
|
from google_cloud_pipeline_components._implementation.model_evaluation import LLMEvaluationClassificationPredictionsPostprocessorOp
|
|
19
19
|
from google_cloud_pipeline_components._implementation.model_evaluation import LLMEvaluationPreprocessorOp
|
|
20
20
|
from google_cloud_pipeline_components._implementation.model_evaluation import ModelImportEvaluationOp
|
|
21
|
+
from google_cloud_pipeline_components._implementation.model_evaluation import ModelNamePreprocessorOp
|
|
21
22
|
from google_cloud_pipeline_components.types.artifact_types import ClassificationMetrics
|
|
22
23
|
from google_cloud_pipeline_components.types.artifact_types import VertexModel
|
|
23
24
|
from google_cloud_pipeline_components.v1.batch_predict_job import ModelBatchPredictOp
|
|
@@ -97,12 +98,23 @@ def evaluation_llm_classification_pipeline( # pylint: disable=dangerous-default
|
|
|
97
98
|
evaluation_resource_name=str,
|
|
98
99
|
)
|
|
99
100
|
|
|
101
|
+
preprocessed_model_name = ModelNamePreprocessorOp(
|
|
102
|
+
project=project,
|
|
103
|
+
location=location,
|
|
104
|
+
model_name=model_name,
|
|
105
|
+
service_account=service_account,
|
|
106
|
+
)
|
|
107
|
+
|
|
100
108
|
get_vertex_model_task = dsl.importer(
|
|
101
109
|
artifact_uri=(
|
|
102
|
-
f'https://{location}-aiplatform.googleapis.com/v1/{
|
|
110
|
+
f'https://{location}-aiplatform.googleapis.com/v1/{preprocessed_model_name.outputs["processed_model_name"]}'
|
|
103
111
|
),
|
|
104
112
|
artifact_class=VertexModel,
|
|
105
|
-
metadata={
|
|
113
|
+
metadata={
|
|
114
|
+
'resourceName': preprocessed_model_name.outputs[
|
|
115
|
+
'processed_model_name'
|
|
116
|
+
]
|
|
117
|
+
},
|
|
106
118
|
)
|
|
107
119
|
get_vertex_model_task.set_display_name('get-vertex-model')
|
|
108
120
|
|
|
@@ -18,6 +18,7 @@ from typing import Dict, List, NamedTuple
|
|
|
18
18
|
from google_cloud_pipeline_components._implementation.model_evaluation import LLMEvaluationPreprocessorOp
|
|
19
19
|
from google_cloud_pipeline_components._implementation.model_evaluation import LLMEvaluationTextGenerationOp
|
|
20
20
|
from google_cloud_pipeline_components._implementation.model_evaluation import ModelImportEvaluationOp
|
|
21
|
+
from google_cloud_pipeline_components._implementation.model_evaluation import ModelNamePreprocessorOp
|
|
21
22
|
from google_cloud_pipeline_components.types.artifact_types import VertexModel
|
|
22
23
|
from google_cloud_pipeline_components.v1.batch_predict_job import ModelBatchPredictOp
|
|
23
24
|
from kfp import dsl
|
|
@@ -33,6 +34,7 @@ def evaluation_llm_text_generation_pipeline( # pylint: disable=dangerous-defaul
|
|
|
33
34
|
location: str,
|
|
34
35
|
batch_predict_gcs_source_uris: List[str],
|
|
35
36
|
batch_predict_gcs_destination_output_uri: str,
|
|
37
|
+
service_account: str,
|
|
36
38
|
model_name: str = 'publishers/google/models/text-bison@002',
|
|
37
39
|
evaluation_task: str = 'text-generation',
|
|
38
40
|
input_field_name: str = 'input_text',
|
|
@@ -42,7 +44,6 @@ def evaluation_llm_text_generation_pipeline( # pylint: disable=dangerous-defaul
|
|
|
42
44
|
batch_predict_model_parameters: Dict[str, str] = {},
|
|
43
45
|
enable_row_based_metrics: bool = False,
|
|
44
46
|
machine_type: str = 'e2-standard-4',
|
|
45
|
-
service_account: str = '',
|
|
46
47
|
network: str = '',
|
|
47
48
|
encryption_spec_key_name: str = '',
|
|
48
49
|
evaluation_display_name: str = 'evaluation-llm-text-generation-pipeline-{{$.pipeline_job_uuid}}',
|
|
@@ -59,18 +60,19 @@ def evaluation_llm_text_generation_pipeline( # pylint: disable=dangerous-defaul
|
|
|
59
60
|
project: Required. The GCP project that runs the pipeline components.
|
|
60
61
|
location: Required. The GCP region that runs the pipeline components.
|
|
61
62
|
batch_predict_gcs_source_uris: Required. Google Cloud Storage URI(s) to your eval dataset instances data to run batch prediction on. The instances data should also contain the ground truth (target) data, used for evaluation. May contain wildcards. For more information on [wildcards](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames). For more details about this [input config](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig). The content of gcs source files should be preset to one of the following formats:
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
63
|
+
1) Prediction & Evaluation Dataset format, guaranteeing "prompt" and "ground_truth" attributes are included
|
|
64
|
+
{
|
|
65
|
+
"prompt": "your input/prompt text",
|
|
66
|
+
"ground_truth": "your ground truth output text"
|
|
67
|
+
}
|
|
68
|
+
or
|
|
69
|
+
2) Tuning Dataset format, guaranteeing "input_text" and "output_text" attributes are included.
|
|
70
|
+
{
|
|
71
|
+
"input_text": "your input/prompt text",
|
|
72
|
+
"output_text": "your ground truth output text"
|
|
73
|
+
}
|
|
73
74
|
batch_predict_gcs_destination_output_uri: Required. The Google Cloud Storage location of the directory where the eval pipeline output is to be written to.
|
|
75
|
+
service_account: Required. Sets the default service account for workload run-as account. The service account running the pipeline (https://cloud.google.com/vertex-ai/docs/pipelines/configure-project#service-account) submitting jobs must have act-as permission on this run-as account.
|
|
74
76
|
model_name: The Model name used to run evaluation. Must be a publisher Model or a managed Model sharing the same ancestor location. Starting this job has no impact on any existing deployments of the Model and their resources.
|
|
75
77
|
evaluation_task: The task that the large language model will be evaluated on. The evaluation component computes a set of metrics relevant to that specific task. Currently supported tasks are: `summarization`, `question-answering`, `text-generation`.
|
|
76
78
|
input_field_name: The field name of the input eval dataset instances that contains the input prompts to the LLM.
|
|
@@ -80,7 +82,6 @@ def evaluation_llm_text_generation_pipeline( # pylint: disable=dangerous-defaul
|
|
|
80
82
|
batch_predict_model_parameters: A map of parameters that govern the predictions. Some acceptable parameters include: maxOutputTokens, topK, topP, and temperature.
|
|
81
83
|
enable_row_based_metrics: Flag of if row based metrics is enabled, default value is false.
|
|
82
84
|
machine_type: The machine type of this custom job. If not set, defaulted to `e2-standard-4`. More details: https://cloud.google.com/compute/docs/machine-resource
|
|
83
|
-
service_account: Sets the default service account for workload run-as account. The service account running the pipeline (https://cloud.google.com/vertex-ai/docs/pipelines/configure-project#service-account) submitting jobs must have act-as permission on this run-as account. If unspecified, the Vertex AI Custom Code Service Agent(https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) for the CustomJob's project.
|
|
84
85
|
network: The full name of the Compute Engine network to which the job should be peered. For example, `projects/12345/global/networks/myVPC`. Format is of the form `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is a network name, as in `myVPC`. To specify this field, you must have already configured VPC Network Peering for Vertex AI (https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If left unspecified, the job is not peered with any network.
|
|
85
86
|
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
|
|
86
87
|
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
|
|
@@ -96,12 +97,23 @@ def evaluation_llm_text_generation_pipeline( # pylint: disable=dangerous-defaul
|
|
|
96
97
|
evaluation_resource_name=str,
|
|
97
98
|
)
|
|
98
99
|
|
|
100
|
+
preprocessed_model_name = ModelNamePreprocessorOp(
|
|
101
|
+
project=project,
|
|
102
|
+
location=location,
|
|
103
|
+
model_name=model_name,
|
|
104
|
+
service_account=service_account,
|
|
105
|
+
)
|
|
106
|
+
|
|
99
107
|
get_vertex_model_task = dsl.importer(
|
|
100
108
|
artifact_uri=(
|
|
101
|
-
f'https://{location}-aiplatform.googleapis.com/v1/{
|
|
109
|
+
f'https://{location}-aiplatform.googleapis.com/v1/{preprocessed_model_name.outputs["processed_model_name"]}'
|
|
102
110
|
),
|
|
103
111
|
artifact_class=VertexModel,
|
|
104
|
-
metadata={
|
|
112
|
+
metadata={
|
|
113
|
+
'resourceName': preprocessed_model_name.outputs[
|
|
114
|
+
'processed_model_name'
|
|
115
|
+
]
|
|
116
|
+
},
|
|
105
117
|
)
|
|
106
118
|
get_vertex_model_task.set_display_name('get-vertex-model')
|
|
107
119
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: google-cloud-pipeline-components
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.13.0
|
|
4
4
|
Summary: This SDK enables a set of First Party (Google owned) pipeline components that allow users to take their experience from Vertex AI SDK and other Google Cloud services and create a corresponding pipeline using KFP or Managed Pipelines.
|
|
5
5
|
Home-page: https://github.com/kubeflow/pipelines/tree/master/components/google-cloud
|
|
6
6
|
Author: The Google Cloud Pipeline Components authors
|
|
@@ -31,7 +31,6 @@ Classifier: Topic :: Software Development :: Libraries
|
|
|
31
31
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
32
32
|
Requires-Python: >=3.7.0,<3.12.0
|
|
33
33
|
Description-Content-Type: text/markdown
|
|
34
|
-
License-File: LICENSE
|
|
35
34
|
Requires-Dist: google-api-core (!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev,>=1.31.5)
|
|
36
35
|
Requires-Dist: kfp (<=2.7.0,>=2.6.0)
|
|
37
36
|
Requires-Dist: google-cloud-aiplatform (<2,>=1.14.0)
|
|
@@ -2,38 +2,38 @@ google_cloud_pipeline_components/__init__.py,sha256=3Mr8_YbBkTzArlgPDkUKoMzoKHZx
|
|
|
2
2
|
google_cloud_pipeline_components/_image.py,sha256=lANDYNk1WSuGZSoTTRcWdjsUvCkkA-PmwouTM9Et7fY,828
|
|
3
3
|
google_cloud_pipeline_components/_placeholders.py,sha256=517N_NQthPEBFJtsy8NE3WkBJm_dmwmlXdYNtk5gH-c,1233
|
|
4
4
|
google_cloud_pipeline_components/utils.py,sha256=9FG7umyEXhyUvtNeC46NuQ04olDMR3o-Wp78V1xs8GY,11045
|
|
5
|
-
google_cloud_pipeline_components/version.py,sha256=
|
|
5
|
+
google_cloud_pipeline_components/version.py,sha256=MSozmdtxEpg_eKMjFnu4JHwlDy18-IUD1wgUqboJ-Vc,678
|
|
6
6
|
google_cloud_pipeline_components/_implementation/__init__.py,sha256=sb6SfJl6rt3AKjiWxd-KO9DSiZ3PzGZRcsqKuc1A2Cg,606
|
|
7
7
|
google_cloud_pipeline_components/_implementation/llm/__init__.py,sha256=sb6SfJl6rt3AKjiWxd-KO9DSiZ3PzGZRcsqKuc1A2Cg,606
|
|
8
8
|
google_cloud_pipeline_components/_implementation/llm/batch_prediction_pairwise.py,sha256=mARB-tDYFr0tpBrLCIh481H4LDuXdr_8UyKVUaOF5Cw,7569
|
|
9
9
|
google_cloud_pipeline_components/_implementation/llm/bulk_inferrer.py,sha256=k-MHsyESWboHdNeRm9q3MOGwbtZr37QNGm_Y_cGsETA,3958
|
|
10
10
|
google_cloud_pipeline_components/_implementation/llm/deploy_llm_model.py,sha256=Jjme37_M32AK1r_MmHMHZvW8k9LJiqfZ4eJEquWoTt4,5029
|
|
11
|
-
google_cloud_pipeline_components/_implementation/llm/deployment_graph.py,sha256=
|
|
12
|
-
google_cloud_pipeline_components/_implementation/llm/env.py,sha256=
|
|
13
|
-
google_cloud_pipeline_components/_implementation/llm/function_based.py,sha256=
|
|
11
|
+
google_cloud_pipeline_components/_implementation/llm/deployment_graph.py,sha256=T6UgAco2Xk8Y0PjP5AfLSHSkE0V8m4OhrT4wTUdjGI4,5032
|
|
12
|
+
google_cloud_pipeline_components/_implementation/llm/env.py,sha256=cnzBS0-a2_T-w54T2E6RtshjbEg8CJuycqoIsVrOJZI,1852
|
|
13
|
+
google_cloud_pipeline_components/_implementation/llm/function_based.py,sha256=EPAY7uGukSfCJt9wkYOvXwJ_6WnkZBTBijYaMUCUcEg,18845
|
|
14
14
|
google_cloud_pipeline_components/_implementation/llm/model_evaluation_text_generation_pairwise.py,sha256=W64FNFfbarOcDw1sSG9J-BQCVPUYx3zLAmwLhhGhth0,5102
|
|
15
15
|
google_cloud_pipeline_components/_implementation/llm/online_evaluation_pairwise.py,sha256=khEli4fSoIQlaiReXOm3zsvsM7BIdbJOXFGUzjcvxOI,5299
|
|
16
16
|
google_cloud_pipeline_components/_implementation/llm/preference_data_formatter.py,sha256=xqgvMsFgFIuiDAv3V3lf4XJSnLjZSTCHknGkmUcP9fs,5025
|
|
17
17
|
google_cloud_pipeline_components/_implementation/llm/preprocess_chat_dataset.py,sha256=dMf2uXW4znn9W8xv9ZRNSPI6nZvp64FTq5GAqjXFRDk,11746
|
|
18
18
|
google_cloud_pipeline_components/_implementation/llm/private_text_comparison_importer.py,sha256=Cwug1Tmk6tvg-l_qyxA6qr1-rslx33RkxA17sedFCz4,4133
|
|
19
19
|
google_cloud_pipeline_components/_implementation/llm/private_text_importer.py,sha256=go6SqWEH5fuDuXYeIzglQIARNG_etOwhyhCsbQgFI8I,4418
|
|
20
|
-
google_cloud_pipeline_components/_implementation/llm/reinforcement_learning_graph.py,sha256=
|
|
21
|
-
google_cloud_pipeline_components/_implementation/llm/reinforcer.py,sha256=
|
|
22
|
-
google_cloud_pipeline_components/_implementation/llm/reward_model_graph.py,sha256=
|
|
23
|
-
google_cloud_pipeline_components/_implementation/llm/reward_model_trainer.py,sha256=
|
|
20
|
+
google_cloud_pipeline_components/_implementation/llm/reinforcement_learning_graph.py,sha256=qFZRwHSXHKE4TCroZwFs4pAO5WJfhIvPqCMUlvoKHvg,10152
|
|
21
|
+
google_cloud_pipeline_components/_implementation/llm/reinforcer.py,sha256=yeg_2DGonlmf21KQYQXN28Mza0d6P0IPt9lDEyK5de8,6948
|
|
22
|
+
google_cloud_pipeline_components/_implementation/llm/reward_model_graph.py,sha256=b6IVt75nORhSmotK0dIwRN1Bx7qKAk6fBdQwQq0paEE,10767
|
|
23
|
+
google_cloud_pipeline_components/_implementation/llm/reward_model_trainer.py,sha256=xkj2hnHpZMLutGNxoewTVUsRR2bkLNkVUhWwjY9EXKA,5596
|
|
24
24
|
google_cloud_pipeline_components/_implementation/llm/rlhf_preprocessor.py,sha256=IqwZ2PXGOOfBNeDIp3exKeqPn6kGwaSaozJEr4cbhDs,2510
|
|
25
25
|
google_cloud_pipeline_components/_implementation/llm/supervised_fine_tuner.py,sha256=5HzjhMXMRmaWGv3BlAi8lBg6MMlaaHlbqKez3ZcX5Ss,4951
|
|
26
|
-
google_cloud_pipeline_components/_implementation/llm/upload_llm_model.py,sha256=
|
|
26
|
+
google_cloud_pipeline_components/_implementation/llm/upload_llm_model.py,sha256=iLtHLtrXzDbRHbgOW99SA9XDdhr_8omhLX1bTP-tsaE,4724
|
|
27
27
|
google_cloud_pipeline_components/_implementation/llm/upload_tensorboard_metrics.py,sha256=BN-0TQFl49TcE54ltBRt4iZYTjO7718eCLwHKR58ips,4010
|
|
28
|
-
google_cloud_pipeline_components/_implementation/llm/utils.py,sha256=
|
|
28
|
+
google_cloud_pipeline_components/_implementation/llm/utils.py,sha256=E250cmvw0QUnt8NLDl5crK6K1o1FguUglQIdBVqfwQI,5548
|
|
29
29
|
google_cloud_pipeline_components/_implementation/llm/utils_test.py,sha256=co8gWyrowY5CpkFNsaLGQlD_gpIykkVI7czxIizp5cM,2864
|
|
30
|
-
google_cloud_pipeline_components/_implementation/llm/validate_pipeline.py,sha256=
|
|
30
|
+
google_cloud_pipeline_components/_implementation/llm/validate_pipeline.py,sha256=SRIqcvZIvFgioH1zvcUsJtmYi8apEsmHtrhmUVK-Pgs,4078
|
|
31
31
|
google_cloud_pipeline_components/_implementation/llm/generated/__init__.py,sha256=LYxMCPeZcfGqRbt3mo3hY7U02A6G8aWdP_RtdoqxNrQ,606
|
|
32
|
-
google_cloud_pipeline_components/_implementation/llm/generated/refined_image_versions.py,sha256=
|
|
32
|
+
google_cloud_pipeline_components/_implementation/llm/generated/refined_image_versions.py,sha256=C9B3k4m6r22Qvg7GN1E8S_Os3hkk4akAy7ML5nON42Y,763
|
|
33
33
|
google_cloud_pipeline_components/_implementation/model/__init__.py,sha256=KmOW74re0WZ93DWM1lqqQYbv6w1aIW66BMV3gaAdg3s,811
|
|
34
34
|
google_cloud_pipeline_components/_implementation/model/get_model/__init__.py,sha256=cXMkDUZHVSbXeXSa3qsI6Ef8Tad9nmusw5NUZaYORdE,662
|
|
35
35
|
google_cloud_pipeline_components/_implementation/model/get_model/component.py,sha256=H2sbMTWCw8nMDMT-Ni9-pdzVXEFmHYjtP3z1LcI5m5w,2307
|
|
36
|
-
google_cloud_pipeline_components/_implementation/model_evaluation/__init__.py,sha256=
|
|
36
|
+
google_cloud_pipeline_components/_implementation/model_evaluation/__init__.py,sha256=eq9g2RsGgKmIHsjsUEvoD1GFmix0Pk-GIzU1hfapCbM,5839
|
|
37
37
|
google_cloud_pipeline_components/_implementation/model_evaluation/utils.py,sha256=9V34RtPZSRNeBwcsImaZM6YC3T7CafT_E00Iby4KHxw,3540
|
|
38
38
|
google_cloud_pipeline_components/_implementation/model_evaluation/version.py,sha256=QHCvg6WZkQq-VdX7hbWCQHK-7SjHpxSppGa9ppyJNmk,963
|
|
39
39
|
google_cloud_pipeline_components/_implementation/model_evaluation/chunking/__init__.py,sha256=PRHVsIq1dFb0mweuU0kfUUP90FbX--kxdBGCpwfLTgA,665
|
|
@@ -77,6 +77,8 @@ google_cloud_pipeline_components/_implementation/model_evaluation/llm_safety_bia
|
|
|
77
77
|
google_cloud_pipeline_components/_implementation/model_evaluation/llm_safety_bias/evaluation_llm_safety_bias_pipeline.py,sha256=n5I3_RPbDR3XZh8E2z_3hX2H3oVZB5nFzmrXOM3lB-A,7005
|
|
78
78
|
google_cloud_pipeline_components/_implementation/model_evaluation/model_inference/__init__.py,sha256=71Kfj1wk1UuUch15F1u2Nv47v2lAjeL28uZ-8Fw61_c,672
|
|
79
79
|
google_cloud_pipeline_components/_implementation/model_evaluation/model_inference/component.py,sha256=0A9-gUDLg7lb3KBXd1ipOb1hnY0z48ey_M2NDIXFkqU,16211
|
|
80
|
+
google_cloud_pipeline_components/_implementation/model_evaluation/model_name_preprocessor/__init__.py,sha256=HSHK9T5ApjeFSjrTvweQkYf2PiCZ481NpEcv1DGfeR4,647
|
|
81
|
+
google_cloud_pipeline_components/_implementation/model_evaluation/model_name_preprocessor/component.py,sha256=--RQmRZ-m4nlX4SgF-b4NxpcX9pIGkHv7YnAkdoe2OA,2583
|
|
80
82
|
google_cloud_pipeline_components/_implementation/model_evaluation/target_field_data_remover/__init__.py,sha256=B4VuH-pN_qGbJjaVNWB5b2vfdPP5yqqTphRNLukMY6o,682
|
|
81
83
|
google_cloud_pipeline_components/_implementation/model_evaluation/target_field_data_remover/component.py,sha256=OeMON9Oms1xso5Emm4W5q6oUgcix9XWLlKoTmN-OUfI,5738
|
|
82
84
|
google_cloud_pipeline_components/_implementation/model_evaluation/text2sql/__init__.py,sha256=xGhjYMo_kirteCvrJqoF2jiSLexdkSRY0C-2sNkNlbk,664
|
|
@@ -325,13 +327,11 @@ google_cloud_pipeline_components/preview/llm/__init__.py,sha256=acmo31OFe-d7Ubo5
|
|
|
325
327
|
google_cloud_pipeline_components/preview/llm/infer/__init__.py,sha256=sb6SfJl6rt3AKjiWxd-KO9DSiZ3PzGZRcsqKuc1A2Cg,606
|
|
326
328
|
google_cloud_pipeline_components/preview/llm/infer/component.py,sha256=lRksuPdsDj9UYkBWh7Rb9w-HZ_SbrTAgZKeTtQYmkR8,7798
|
|
327
329
|
google_cloud_pipeline_components/preview/llm/rlaif/__init__.py,sha256=LYxMCPeZcfGqRbt3mo3hY7U02A6G8aWdP_RtdoqxNrQ,606
|
|
328
|
-
google_cloud_pipeline_components/preview/llm/rlaif/component.py,sha256=
|
|
330
|
+
google_cloud_pipeline_components/preview/llm/rlaif/component.py,sha256=F4uzuz2pcLDd5IJ6--IkZf2dcmimiYfCivwmPwL_QIg,10801
|
|
329
331
|
google_cloud_pipeline_components/preview/llm/rlhf/__init__.py,sha256=sb6SfJl6rt3AKjiWxd-KO9DSiZ3PzGZRcsqKuc1A2Cg,606
|
|
330
|
-
google_cloud_pipeline_components/preview/llm/rlhf/component.py,sha256=
|
|
331
|
-
google_cloud_pipeline_components/preview/model_evaluation/__init__.py,sha256=
|
|
332
|
+
google_cloud_pipeline_components/preview/llm/rlhf/component.py,sha256=xZfzOZ91_TB7yJgWT63WmC9jQiiROsavIZgC7lsWNIk,12613
|
|
333
|
+
google_cloud_pipeline_components/preview/model_evaluation/__init__.py,sha256=GvsNkxuUH0Z9HpZpTXxTPRL8621lWDbXMwg_UUDtrCg,1939
|
|
332
334
|
google_cloud_pipeline_components/preview/model_evaluation/data_bias_component.py,sha256=YiwkWfbGymX_lDIg_x7AP6nYMm3MQp_NgV8xuSZxCpU,5791
|
|
333
|
-
google_cloud_pipeline_components/preview/model_evaluation/evaluation_llm_classification_pipeline.py,sha256=IjCIakZFh7KB6kmku7ztaH-qFm9LoEctwwMtxmDOg5g,11262
|
|
334
|
-
google_cloud_pipeline_components/preview/model_evaluation/evaluation_llm_text_generation_pipeline.py,sha256=vogBy45SASKWyfj4lSpk4lSu4TFMdAlc2Mkveih5ZhQ,10761
|
|
335
335
|
google_cloud_pipeline_components/preview/model_evaluation/feature_attribution_component.py,sha256=XWrI1inQ9hKixFrp2LUdgu7hONYUvbsxv2GXZ-UTkCY,7450
|
|
336
336
|
google_cloud_pipeline_components/preview/model_evaluation/feature_attribution_graph_component.py,sha256=jesgBUKbIB_qQoYb5-Bv_LBbFHl0tPyMlVFx-o1eE6k,13624
|
|
337
337
|
google_cloud_pipeline_components/preview/model_evaluation/model_bias_component.py,sha256=R8WhT8jf_OOpMuABRh2BYTDEcfiGAf6VA-vFgiTymYY,6674
|
|
@@ -341,7 +341,7 @@ google_cloud_pipeline_components/preview/model_evaluation/model_based_llm_evalua
|
|
|
341
341
|
google_cloud_pipeline_components/preview/model_evaluation/model_based_llm_evaluation/autosxs/autosxs_pipeline.py,sha256=Sw-OvuJnDId9l1kmpX6zgOA-nEskUV9EQ7rcHqShI9g,8856
|
|
342
342
|
google_cloud_pipeline_components/proto/__init__.py,sha256=aiPUc6gpQwG9cRTYfw3ChFCJfDr3vAIsm2eMYUDJjJQ,661
|
|
343
343
|
google_cloud_pipeline_components/proto/gcp_resources_pb2.py,sha256=ssNNm4zjiWbuBUS7IH6kyrvvfmcC_Z5F7hOAuQe_YLk,2134
|
|
344
|
-
google_cloud_pipeline_components/proto/preflight_validations_pb2.py,sha256=
|
|
344
|
+
google_cloud_pipeline_components/proto/preflight_validations_pb2.py,sha256=5MioLb12O4to8LYXFv-irH31BKot2LGtXvxOeXZSQlQ,2588
|
|
345
345
|
google_cloud_pipeline_components/proto/template_metadata_pb2.py,sha256=chzvvNZj5XEvifJbN77L7ZJv7jta-4ycHUKjTLZYCAg,7773
|
|
346
346
|
google_cloud_pipeline_components/types/__init__.py,sha256=1WFkL49QEy-gNb6ywQOE4yZkD7DoULAeiL1tLdb3S28,606
|
|
347
347
|
google_cloud_pipeline_components/types/artifact_types.py,sha256=zvwvzRuFb_s1VS1mtKkltOOACATJk-kG7dVFOUasfw4,23725
|
|
@@ -507,7 +507,7 @@ google_cloud_pipeline_components/v1/model/get_model/__init__.py,sha256=oAWKl9PXS
|
|
|
507
507
|
google_cloud_pipeline_components/v1/model/get_model/component.py,sha256=lx5x2MJ-Ji75z12W1RMqKlGzxbmgZZAmYBy48XQmWFI,2089
|
|
508
508
|
google_cloud_pipeline_components/v1/model/upload_model/__init__.py,sha256=6uwVQw6h3TXxei5imUE4JaS97XXzDRPQyNnTE-qFjck,661
|
|
509
509
|
google_cloud_pipeline_components/v1/model/upload_model/component.py,sha256=6zy9G2AK2twiyT-B2X15qovvi6qHu0koRzzzelgN8CQ,7280
|
|
510
|
-
google_cloud_pipeline_components/v1/model_evaluation/__init__.py,sha256=
|
|
510
|
+
google_cloud_pipeline_components/v1/model_evaluation/__init__.py,sha256=haAiMuha2cEVJQjOcbf3XblUBdSieLm_JUT_wwyVssc,2739
|
|
511
511
|
google_cloud_pipeline_components/v1/model_evaluation/classification_component.py,sha256=x0pUY4OwFIkmS11Q7rDLI6fspaDUBo6wU5BBP2jAKC0,12122
|
|
512
512
|
google_cloud_pipeline_components/v1/model_evaluation/error_analysis_pipeline.py,sha256=l972cEWDViVV41oCy0jTsX96Pau49D3KdJA3yAjKEY0,20122
|
|
513
513
|
google_cloud_pipeline_components/v1/model_evaluation/evaluated_annotation_pipeline.py,sha256=JskLsIHvLNNvNaMD8gTa0NWlB5gKiSSyqeC78Fn5OW8,12142
|
|
@@ -515,14 +515,16 @@ google_cloud_pipeline_components/v1/model_evaluation/evaluation_automl_tabular_f
|
|
|
515
515
|
google_cloud_pipeline_components/v1/model_evaluation/evaluation_automl_tabular_pipeline.py,sha256=p-GH_tVqffHwck5Sll0BHsnvVAHQk48WNAUohZxATcs,37108
|
|
516
516
|
google_cloud_pipeline_components/v1/model_evaluation/evaluation_automl_unstructure_data_pipeline.py,sha256=aiZOK5BE5mdqJL3s4pU1Y_ynHvWBE9JIxl9UrJuNsco,42404
|
|
517
517
|
google_cloud_pipeline_components/v1/model_evaluation/evaluation_feature_attribution_pipeline.py,sha256=ChDwHvPCn0prrK1FLvEhAbaTmA153M9NG3Wj3QIlNHs,51173
|
|
518
|
+
google_cloud_pipeline_components/v1/model_evaluation/evaluation_llm_classification_pipeline.py,sha256=Vf_O-8VWKZlR_tCmFNNcQWp6VWK8c67IBWKxbt3ZQBg,11672
|
|
519
|
+
google_cloud_pipeline_components/v1/model_evaluation/evaluation_llm_text_generation_pipeline.py,sha256=ufVZwpEerSLLo_yGJVh7cgBBTeL3RmkVuWKHkCiaQ9U,11056
|
|
518
520
|
google_cloud_pipeline_components/v1/model_evaluation/forecasting_component.py,sha256=gOnvKAJWa3velczeuVBCzW6b_tcc2v_lNFqHXGhjD44,10017
|
|
519
521
|
google_cloud_pipeline_components/v1/model_evaluation/regression_component.py,sha256=eFrjrKQot3-SlRCoKoTOEsyp2Xj0GfDtrjpxTDKAHYY,9117
|
|
520
522
|
google_cloud_pipeline_components/v1/vertex_notification_email/__init__.py,sha256=YIRljNy_oHY_vRda-kfhm5QiulNd_SIIPbmpzOiYJ0k,863
|
|
521
523
|
google_cloud_pipeline_components/v1/vertex_notification_email/component.py,sha256=Dau8ZI0mzLBnLOUBQm6EtK8gbtX1u57t76Ud5qlg9xc,2163
|
|
522
524
|
google_cloud_pipeline_components/v1/wait_gcp_resources/__init__.py,sha256=w6dfz-rYsYnxFapRH1Dix3GVz0mhPW0m1IVpE6z8jbg,878
|
|
523
525
|
google_cloud_pipeline_components/v1/wait_gcp_resources/component.py,sha256=Nsfj5c3eeZq83fHLvv2IlpK4jrjxLxRksFYOl5W6JnA,2468
|
|
524
|
-
google_cloud_pipeline_components-2.
|
|
525
|
-
google_cloud_pipeline_components-2.
|
|
526
|
-
google_cloud_pipeline_components-2.
|
|
527
|
-
google_cloud_pipeline_components-2.
|
|
528
|
-
google_cloud_pipeline_components-2.
|
|
526
|
+
google_cloud_pipeline_components-2.13.0.dist-info/LICENSE,sha256=VAc1R5OxOELKsX5L5Ldp5THfNtxtt1cMIZBaC0Jdj5Q,13118
|
|
527
|
+
google_cloud_pipeline_components-2.13.0.dist-info/METADATA,sha256=Sm911jjQnk7LD_wKCBuZ2elaBUbL3nqtZYnbmDgOKR8,5890
|
|
528
|
+
google_cloud_pipeline_components-2.13.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
|
529
|
+
google_cloud_pipeline_components-2.13.0.dist-info/top_level.txt,sha256=E8T4T8KGMGLXbHvt2goa98oezRpxryPC6QhWBZ27Hhc,33
|
|
530
|
+
google_cloud_pipeline_components-2.13.0.dist-info/RECORD,,
|
|
File without changes
|