google-genai 1.29.0__py3-none-any.whl → 1.31.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +41 -37
- google/genai/_automatic_function_calling_util.py +12 -0
- google/genai/_live_converters.py +51 -6
- google/genai/_tokens_converters.py +26 -3
- google/genai/_transformers.py +51 -0
- google/genai/batches.py +166 -3
- google/genai/caches.py +51 -6
- google/genai/chats.py +1 -0
- google/genai/files.py +1 -0
- google/genai/live.py +92 -88
- google/genai/models.py +416 -16
- google/genai/operations.py +1 -0
- google/genai/tokens.py +1 -0
- google/genai/tunings.py +315 -43
- google/genai/types.py +1518 -421
- google/genai/version.py +1 -1
- {google_genai-1.29.0.dist-info → google_genai-1.31.0.dist-info}/METADATA +1 -1
- google_genai-1.31.0.dist-info/RECORD +35 -0
- google_genai-1.29.0.dist-info/RECORD +0 -35
- {google_genai-1.29.0.dist-info → google_genai-1.31.0.dist-info}/WHEEL +0 -0
- {google_genai-1.29.0.dist-info → google_genai-1.31.0.dist-info}/licenses/LICENSE +0 -0
- {google_genai-1.29.0.dist-info → google_genai-1.31.0.dist-info}/top_level.txt +0 -0
google/genai/types.py
CHANGED
@@ -26,7 +26,7 @@ import types as builtin_types
|
|
26
26
|
import typing
|
27
27
|
from typing import Any, Callable, Literal, Optional, Sequence, Union, _UnionGenericAlias # type: ignore
|
28
28
|
import pydantic
|
29
|
-
from pydantic import Field
|
29
|
+
from pydantic import ConfigDict, Field, PrivateAttr, model_validator
|
30
30
|
from typing_extensions import Self, TypedDict
|
31
31
|
from . import _common
|
32
32
|
|
@@ -76,10 +76,20 @@ else:
|
|
76
76
|
McpClientSession = None
|
77
77
|
McpCallToolResult = None
|
78
78
|
|
79
|
+
if typing.TYPE_CHECKING:
|
80
|
+
import yaml
|
81
|
+
else:
|
82
|
+
try:
|
83
|
+
import yaml
|
84
|
+
except ImportError:
|
85
|
+
yaml = None
|
86
|
+
|
79
87
|
logger = logging.getLogger('google_genai.types')
|
80
88
|
|
81
89
|
T = typing.TypeVar('T', bound='GenerateContentResponse')
|
82
90
|
|
91
|
+
MetricSubclass = typing.TypeVar('MetricSubclass', bound='Metric')
|
92
|
+
|
83
93
|
|
84
94
|
class Outcome(_common.CaseInSensitiveEnum):
|
85
95
|
"""Required. Outcome of the code execution."""
|
@@ -223,15 +233,6 @@ class ApiSpec(_common.CaseInSensitiveEnum):
|
|
223
233
|
"""Elastic search API spec."""
|
224
234
|
|
225
235
|
|
226
|
-
class Environment(_common.CaseInSensitiveEnum):
|
227
|
-
"""Required. The environment being operated."""
|
228
|
-
|
229
|
-
ENVIRONMENT_UNSPECIFIED = 'ENVIRONMENT_UNSPECIFIED'
|
230
|
-
"""Defaults to browser."""
|
231
|
-
ENVIRONMENT_BROWSER = 'ENVIRONMENT_BROWSER'
|
232
|
-
"""Operates in a web browser."""
|
233
|
-
|
234
|
-
|
235
236
|
class UrlRetrievalStatus(_common.CaseInSensitiveEnum):
|
236
237
|
"""Status of the url retrieval."""
|
237
238
|
|
@@ -398,6 +399,17 @@ class JobState(_common.CaseInSensitiveEnum):
|
|
398
399
|
"""The job is partially succeeded, some results may be missing due to errors."""
|
399
400
|
|
400
401
|
|
402
|
+
class TuningMode(_common.CaseInSensitiveEnum):
|
403
|
+
"""Tuning mode."""
|
404
|
+
|
405
|
+
TUNING_MODE_UNSPECIFIED = 'TUNING_MODE_UNSPECIFIED'
|
406
|
+
"""Tuning mode is unspecified."""
|
407
|
+
TUNING_MODE_FULL = 'TUNING_MODE_FULL'
|
408
|
+
"""Full fine-tuning mode."""
|
409
|
+
TUNING_MODE_PEFT_ADAPTER = 'TUNING_MODE_PEFT_ADAPTER'
|
410
|
+
"""PEFT adapter tuning mode."""
|
411
|
+
|
412
|
+
|
401
413
|
class AdapterSize(_common.CaseInSensitiveEnum):
|
402
414
|
"""Optional. Adapter size for tuning."""
|
403
415
|
|
@@ -417,6 +429,22 @@ class AdapterSize(_common.CaseInSensitiveEnum):
|
|
417
429
|
"""Adapter size 32."""
|
418
430
|
|
419
431
|
|
432
|
+
class JSONSchemaType(Enum):
|
433
|
+
"""The type of the data supported by JSON Schema.
|
434
|
+
|
435
|
+
The values of the enums are lower case strings, while the values of the enums
|
436
|
+
for the Type class are upper case strings.
|
437
|
+
"""
|
438
|
+
|
439
|
+
NULL = 'null'
|
440
|
+
BOOLEAN = 'boolean'
|
441
|
+
OBJECT = 'object'
|
442
|
+
ARRAY = 'array'
|
443
|
+
NUMBER = 'number'
|
444
|
+
INTEGER = 'integer'
|
445
|
+
STRING = 'string'
|
446
|
+
|
447
|
+
|
420
448
|
class FeatureSelectionPreference(_common.CaseInSensitiveEnum):
|
421
449
|
"""Options for feature selection preference."""
|
422
450
|
|
@@ -448,6 +476,15 @@ class DynamicRetrievalConfigMode(_common.CaseInSensitiveEnum):
|
|
448
476
|
"""Run retrieval only when system decides it is necessary."""
|
449
477
|
|
450
478
|
|
479
|
+
class Environment(_common.CaseInSensitiveEnum):
|
480
|
+
"""The environment being operated."""
|
481
|
+
|
482
|
+
ENVIRONMENT_UNSPECIFIED = 'ENVIRONMENT_UNSPECIFIED'
|
483
|
+
"""Defaults to browser."""
|
484
|
+
ENVIRONMENT_BROWSER = 'ENVIRONMENT_BROWSER'
|
485
|
+
"""Operates in a web browser."""
|
486
|
+
|
487
|
+
|
451
488
|
class FunctionCallingConfigMode(_common.CaseInSensitiveEnum):
|
452
489
|
"""Config for the function calling config mode."""
|
453
490
|
|
@@ -543,6 +580,16 @@ class EditMode(_common.CaseInSensitiveEnum):
|
|
543
580
|
EDIT_MODE_PRODUCT_IMAGE = 'EDIT_MODE_PRODUCT_IMAGE'
|
544
581
|
|
545
582
|
|
583
|
+
class SegmentMode(_common.CaseInSensitiveEnum):
|
584
|
+
"""Enum that represents the segmentation mode."""
|
585
|
+
|
586
|
+
FOREGROUND = 'FOREGROUND'
|
587
|
+
BACKGROUND = 'BACKGROUND'
|
588
|
+
PROMPT = 'PROMPT'
|
589
|
+
SEMANTIC = 'SEMANTIC'
|
590
|
+
INTERACTIVE = 'INTERACTIVE'
|
591
|
+
|
592
|
+
|
546
593
|
class VideoCompressionQuality(_common.CaseInSensitiveEnum):
|
547
594
|
"""Enum that controls the compression quality of the generated videos."""
|
548
595
|
|
@@ -588,6 +635,19 @@ class MediaModality(_common.CaseInSensitiveEnum):
|
|
588
635
|
"""Document, e.g. PDF."""
|
589
636
|
|
590
637
|
|
638
|
+
class FunctionResponseScheduling(_common.CaseInSensitiveEnum):
|
639
|
+
"""Specifies how the response should be scheduled in the conversation."""
|
640
|
+
|
641
|
+
SCHEDULING_UNSPECIFIED = 'SCHEDULING_UNSPECIFIED'
|
642
|
+
"""This value is unused."""
|
643
|
+
SILENT = 'SILENT'
|
644
|
+
"""Only add the result to the conversation context, do not interrupt or trigger generation."""
|
645
|
+
WHEN_IDLE = 'WHEN_IDLE'
|
646
|
+
"""Add the result to the conversation context, and prompt to generate output without interrupting ongoing generation."""
|
647
|
+
INTERRUPT = 'INTERRUPT'
|
648
|
+
"""Add the result to the conversation context, interrupt ongoing generation and prompt to generate output."""
|
649
|
+
|
650
|
+
|
591
651
|
class StartSensitivity(_common.CaseInSensitiveEnum):
|
592
652
|
"""Start of speech sensitivity."""
|
593
653
|
|
@@ -632,19 +692,6 @@ class TurnCoverage(_common.CaseInSensitiveEnum):
|
|
632
692
|
"""The users turn includes all realtime input since the last turn, including inactivity (e.g. silence on the audio stream)."""
|
633
693
|
|
634
694
|
|
635
|
-
class FunctionResponseScheduling(_common.CaseInSensitiveEnum):
|
636
|
-
"""Specifies how the response should be scheduled in the conversation."""
|
637
|
-
|
638
|
-
SCHEDULING_UNSPECIFIED = 'SCHEDULING_UNSPECIFIED'
|
639
|
-
"""This value is unused."""
|
640
|
-
SILENT = 'SILENT'
|
641
|
-
"""Only add the result to the conversation context, do not interrupt or trigger generation."""
|
642
|
-
WHEN_IDLE = 'WHEN_IDLE'
|
643
|
-
"""Add the result to the conversation context, and prompt to generate output without interrupting ongoing generation."""
|
644
|
-
INTERRUPT = 'INTERRUPT'
|
645
|
-
"""Add the result to the conversation context, interrupt ongoing generation and prompt to generate output."""
|
646
|
-
|
647
|
-
|
648
695
|
class Scale(_common.CaseInSensitiveEnum):
|
649
696
|
"""Scale of the generated music."""
|
650
697
|
|
@@ -1131,67 +1178,6 @@ class Content(_common.BaseModel):
|
|
1131
1178
|
)
|
1132
1179
|
|
1133
1180
|
|
1134
|
-
class UserContent(Content):
|
1135
|
-
"""UserContent facilitates the creation of a Content object with a user role.
|
1136
|
-
|
1137
|
-
Example usages:
|
1138
|
-
|
1139
|
-
|
1140
|
-
- Create a user Content object with a string:
|
1141
|
-
user_content = UserContent("Why is the sky blue?")
|
1142
|
-
- Create a user Content object with a file data Part object:
|
1143
|
-
user_content = UserContent(Part.from_uri(file_uril="gs://bucket/file.txt",
|
1144
|
-
mime_type="text/plain"))
|
1145
|
-
- Create a user Content object with byte data Part object:
|
1146
|
-
user_content = UserContent(Part.from_bytes(data=b"Hello, World!",
|
1147
|
-
mime_type="text/plain"))
|
1148
|
-
|
1149
|
-
You can create a user Content object using other classmethods in the Part
|
1150
|
-
class as well.
|
1151
|
-
You can also create a user Content using a list of Part objects or strings.
|
1152
|
-
"""
|
1153
|
-
|
1154
|
-
role: Literal['user'] = Field(default='user', init=False, frozen=True)
|
1155
|
-
parts: list[Part] = Field()
|
1156
|
-
|
1157
|
-
def __init__(
|
1158
|
-
self, parts: Union['PartUnionDict', list['PartUnionDict'], list['Part']]
|
1159
|
-
):
|
1160
|
-
from . import _transformers as t
|
1161
|
-
|
1162
|
-
super().__init__(parts=t.t_parts(parts=parts))
|
1163
|
-
|
1164
|
-
|
1165
|
-
class ModelContent(Content):
|
1166
|
-
"""ModelContent facilitates the creation of a Content object with a model role.
|
1167
|
-
|
1168
|
-
Example usages:
|
1169
|
-
|
1170
|
-
- Create a model Content object with a string:
|
1171
|
-
model_content = ModelContent("Why is the sky blue?")
|
1172
|
-
- Create a model Content object with a file data Part object:
|
1173
|
-
model_content = ModelContent(Part.from_uri(file_uril="gs://bucket/file.txt",
|
1174
|
-
mime_type="text/plain"))
|
1175
|
-
- Create a model Content object with byte data Part object:
|
1176
|
-
model_content = ModelContent(Part.from_bytes(data=b"Hello, World!",
|
1177
|
-
mime_type="text/plain"))
|
1178
|
-
|
1179
|
-
You can create a model Content object using other classmethods in the Part
|
1180
|
-
class as well.
|
1181
|
-
You can also create a model Content using a list of Part objects or strings.
|
1182
|
-
"""
|
1183
|
-
|
1184
|
-
role: Literal['model'] = Field(default='model', init=False, frozen=True)
|
1185
|
-
parts: list[Part] = Field()
|
1186
|
-
|
1187
|
-
def __init__(
|
1188
|
-
self, parts: Union['PartUnionDict', list['PartUnionDict'], list['Part']]
|
1189
|
-
):
|
1190
|
-
from . import _transformers as t
|
1191
|
-
|
1192
|
-
super().__init__(parts=t.t_parts(parts=parts))
|
1193
|
-
|
1194
|
-
|
1195
1181
|
class ContentDict(TypedDict, total=False):
|
1196
1182
|
"""Contains the multi-part content of a message."""
|
1197
1183
|
|
@@ -1334,23 +1320,7 @@ class HttpOptionsDict(TypedDict, total=False):
|
|
1334
1320
|
HttpOptionsOrDict = Union[HttpOptions, HttpOptionsDict]
|
1335
1321
|
|
1336
1322
|
|
1337
|
-
class
|
1338
|
-
"""The type of the data supported by JSON Schema.
|
1339
|
-
|
1340
|
-
The values of the enums are lower case strings, while the values of the enums
|
1341
|
-
for the Type class are upper case strings.
|
1342
|
-
"""
|
1343
|
-
|
1344
|
-
NULL = 'null'
|
1345
|
-
BOOLEAN = 'boolean'
|
1346
|
-
OBJECT = 'object'
|
1347
|
-
ARRAY = 'array'
|
1348
|
-
NUMBER = 'number'
|
1349
|
-
INTEGER = 'integer'
|
1350
|
-
STRING = 'string'
|
1351
|
-
|
1352
|
-
|
1353
|
-
class JSONSchema(pydantic.BaseModel):
|
1323
|
+
class JSONSchema(_common.BaseModel):
|
1354
1324
|
"""A subset of JSON Schema according to 2020-12 JSON Schema draft.
|
1355
1325
|
|
1356
1326
|
Represents a subset of a JSON Schema object that is used by the Gemini model.
|
@@ -2237,7 +2207,16 @@ class FunctionDeclaration(_common.BaseModel):
|
|
2237
2207
|
callable: Callable[..., Any],
|
2238
2208
|
behavior: Optional[Behavior] = None,
|
2239
2209
|
) -> 'FunctionDeclaration':
|
2240
|
-
"""Converts a Callable to a FunctionDeclaration based on the client.
|
2210
|
+
"""Converts a Callable to a FunctionDeclaration based on the client.
|
2211
|
+
|
2212
|
+
Note: For best results prefer
|
2213
|
+
[Google-style
|
2214
|
+
docstring](https://google.github.io/styleguide/pyguide.html#383-functions-and-methods)
|
2215
|
+
when describing arguments. This function does **not** parse argument
|
2216
|
+
descriptions into the property description slots of the resulting structure.
|
2217
|
+
Instead it sends the whole docstring in the top-level function description.
|
2218
|
+
Google-style docstring are closest to what the model is trained on.
|
2219
|
+
"""
|
2241
2220
|
if client.vertexai:
|
2242
2221
|
return cls.from_callable_with_api_option(
|
2243
2222
|
callable=callable, api_option='VERTEX_AI', behavior=behavior
|
@@ -2325,6 +2304,11 @@ class GoogleSearch(_common.BaseModel):
|
|
2325
2304
|
If customers set a start time, they must set an end time (and vice versa).
|
2326
2305
|
""",
|
2327
2306
|
)
|
2307
|
+
exclude_domains: Optional[list[str]] = Field(
|
2308
|
+
default=None,
|
2309
|
+
description="""Optional. List of domains to be excluded from the search results.
|
2310
|
+
The default limit is 2000 domains.""",
|
2311
|
+
)
|
2328
2312
|
|
2329
2313
|
|
2330
2314
|
class GoogleSearchDict(TypedDict, total=False):
|
@@ -2335,6 +2319,10 @@ class GoogleSearchDict(TypedDict, total=False):
|
|
2335
2319
|
If customers set a start time, they must set an end time (and vice versa).
|
2336
2320
|
"""
|
2337
2321
|
|
2322
|
+
exclude_domains: Optional[list[str]]
|
2323
|
+
"""Optional. List of domains to be excluded from the search results.
|
2324
|
+
The default limit is 2000 domains."""
|
2325
|
+
|
2338
2326
|
|
2339
2327
|
GoogleSearchOrDict = Union[GoogleSearch, GoogleSearchDict]
|
2340
2328
|
|
@@ -2391,13 +2379,17 @@ GoogleSearchRetrievalOrDict = Union[
|
|
2391
2379
|
class EnterpriseWebSearch(_common.BaseModel):
|
2392
2380
|
"""Tool to search public web data, powered by Vertex AI Search and Sec4 compliance."""
|
2393
2381
|
|
2394
|
-
|
2382
|
+
exclude_domains: Optional[list[str]] = Field(
|
2383
|
+
default=None,
|
2384
|
+
description="""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains.""",
|
2385
|
+
)
|
2395
2386
|
|
2396
2387
|
|
2397
2388
|
class EnterpriseWebSearchDict(TypedDict, total=False):
|
2398
2389
|
"""Tool to search public web data, powered by Vertex AI Search and Sec4 compliance."""
|
2399
2390
|
|
2400
|
-
|
2391
|
+
exclude_domains: Optional[list[str]]
|
2392
|
+
"""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains."""
|
2401
2393
|
|
2402
2394
|
|
2403
2395
|
EnterpriseWebSearchOrDict = Union[EnterpriseWebSearch, EnterpriseWebSearchDict]
|
@@ -2607,6 +2599,24 @@ class UrlContextDict(TypedDict, total=False):
|
|
2607
2599
|
UrlContextOrDict = Union[UrlContext, UrlContextDict]
|
2608
2600
|
|
2609
2601
|
|
2602
|
+
class ToolComputerUse(_common.BaseModel):
|
2603
|
+
"""Tool to support computer use."""
|
2604
|
+
|
2605
|
+
environment: Optional[Environment] = Field(
|
2606
|
+
default=None, description="""Required. The environment being operated."""
|
2607
|
+
)
|
2608
|
+
|
2609
|
+
|
2610
|
+
class ToolComputerUseDict(TypedDict, total=False):
|
2611
|
+
"""Tool to support computer use."""
|
2612
|
+
|
2613
|
+
environment: Optional[Environment]
|
2614
|
+
"""Required. The environment being operated."""
|
2615
|
+
|
2616
|
+
|
2617
|
+
ToolComputerUseOrDict = Union[ToolComputerUse, ToolComputerUseDict]
|
2618
|
+
|
2619
|
+
|
2610
2620
|
class ApiAuthApiKeyConfig(_common.BaseModel):
|
2611
2621
|
"""The API secret."""
|
2612
2622
|
|
@@ -3167,24 +3177,6 @@ class ToolCodeExecutionDict(TypedDict, total=False):
|
|
3167
3177
|
ToolCodeExecutionOrDict = Union[ToolCodeExecution, ToolCodeExecutionDict]
|
3168
3178
|
|
3169
3179
|
|
3170
|
-
class ToolComputerUse(_common.BaseModel):
|
3171
|
-
"""Tool to support computer use."""
|
3172
|
-
|
3173
|
-
environment: Optional[Environment] = Field(
|
3174
|
-
default=None, description="""Required. The environment being operated."""
|
3175
|
-
)
|
3176
|
-
|
3177
|
-
|
3178
|
-
class ToolComputerUseDict(TypedDict, total=False):
|
3179
|
-
"""Tool to support computer use."""
|
3180
|
-
|
3181
|
-
environment: Optional[Environment]
|
3182
|
-
"""Required. The environment being operated."""
|
3183
|
-
|
3184
|
-
|
3185
|
-
ToolComputerUseOrDict = Union[ToolComputerUse, ToolComputerUseDict]
|
3186
|
-
|
3187
|
-
|
3188
3180
|
class Tool(_common.BaseModel):
|
3189
3181
|
"""Tool details of a tool that the model may use to generate a response."""
|
3190
3182
|
|
@@ -3219,13 +3211,15 @@ class Tool(_common.BaseModel):
|
|
3219
3211
|
default=None,
|
3220
3212
|
description="""Optional. Tool to support URL context retrieval.""",
|
3221
3213
|
)
|
3222
|
-
|
3214
|
+
computer_use: Optional[ToolComputerUse] = Field(
|
3223
3215
|
default=None,
|
3224
|
-
description="""Optional.
|
3216
|
+
description="""Optional. Tool to support the model interacting directly with the
|
3217
|
+
computer. If enabled, it automatically populates computer-use specific
|
3218
|
+
Function Declarations.""",
|
3225
3219
|
)
|
3226
|
-
|
3220
|
+
code_execution: Optional[ToolCodeExecution] = Field(
|
3227
3221
|
default=None,
|
3228
|
-
description="""Optional.
|
3222
|
+
description="""Optional. CodeExecution tool type. Enables the model to execute code as part of generation.""",
|
3229
3223
|
)
|
3230
3224
|
|
3231
3225
|
|
@@ -3256,12 +3250,14 @@ class ToolDict(TypedDict, total=False):
|
|
3256
3250
|
url_context: Optional[UrlContextDict]
|
3257
3251
|
"""Optional. Tool to support URL context retrieval."""
|
3258
3252
|
|
3253
|
+
computer_use: Optional[ToolComputerUseDict]
|
3254
|
+
"""Optional. Tool to support the model interacting directly with the
|
3255
|
+
computer. If enabled, it automatically populates computer-use specific
|
3256
|
+
Function Declarations."""
|
3257
|
+
|
3259
3258
|
code_execution: Optional[ToolCodeExecutionDict]
|
3260
3259
|
"""Optional. CodeExecution tool type. Enables the model to execute code as part of generation."""
|
3261
3260
|
|
3262
|
-
computer_use: Optional[ToolComputerUseDict]
|
3263
|
-
"""Optional. Tool to support the model interacting directly with the computer. If enabled, it automatically populates computer-use specific Function Declarations."""
|
3264
|
-
|
3265
3261
|
|
3266
3262
|
ToolOrDict = Union[Tool, ToolDict]
|
3267
3263
|
if _is_mcp_imported:
|
@@ -4485,66 +4481,235 @@ class UrlContextMetadataDict(TypedDict, total=False):
|
|
4485
4481
|
UrlContextMetadataOrDict = Union[UrlContextMetadata, UrlContextMetadataDict]
|
4486
4482
|
|
4487
4483
|
|
4488
|
-
class
|
4489
|
-
"""
|
4484
|
+
class GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution(_common.BaseModel):
|
4485
|
+
"""Author attribution for a photo or review."""
|
4490
4486
|
|
4491
|
-
|
4492
|
-
default=None,
|
4493
|
-
description="""Page where chunk starts in the document. Inclusive. 1-indexed.""",
|
4487
|
+
display_name: Optional[str] = Field(
|
4488
|
+
default=None, description="""Name of the author of the Photo or Review."""
|
4494
4489
|
)
|
4495
|
-
|
4490
|
+
photo_uri: Optional[str] = Field(
|
4496
4491
|
default=None,
|
4497
|
-
description="""
|
4492
|
+
description="""Profile photo URI of the author of the Photo or Review.""",
|
4493
|
+
)
|
4494
|
+
uri: Optional[str] = Field(
|
4495
|
+
default=None, description="""URI of the author of the Photo or Review."""
|
4498
4496
|
)
|
4499
4497
|
|
4500
4498
|
|
4501
|
-
class
|
4502
|
-
|
4499
|
+
class GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionDict(
|
4500
|
+
TypedDict, total=False
|
4501
|
+
):
|
4502
|
+
"""Author attribution for a photo or review."""
|
4503
4503
|
|
4504
|
-
|
4505
|
-
"""
|
4504
|
+
display_name: Optional[str]
|
4505
|
+
"""Name of the author of the Photo or Review."""
|
4506
4506
|
|
4507
|
-
|
4508
|
-
"""
|
4507
|
+
photo_uri: Optional[str]
|
4508
|
+
"""Profile photo URI of the author of the Photo or Review."""
|
4509
|
+
|
4510
|
+
uri: Optional[str]
|
4511
|
+
"""URI of the author of the Photo or Review."""
|
4509
4512
|
|
4510
4513
|
|
4511
|
-
|
4514
|
+
GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionOrDict = Union[
|
4515
|
+
GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution,
|
4516
|
+
GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionDict,
|
4517
|
+
]
|
4512
4518
|
|
4513
4519
|
|
4514
|
-
class
|
4515
|
-
"""
|
4520
|
+
class GroundingChunkMapsPlaceAnswerSourcesReviewSnippet(_common.BaseModel):
|
4521
|
+
"""Encapsulates a review snippet."""
|
4516
4522
|
|
4517
|
-
|
4523
|
+
author_attribution: Optional[
|
4524
|
+
GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution
|
4525
|
+
] = Field(default=None, description="""This review's author.""")
|
4526
|
+
flag_content_uri: Optional[str] = Field(
|
4518
4527
|
default=None,
|
4519
|
-
description="""
|
4528
|
+
description="""A link where users can flag a problem with the review.""",
|
4520
4529
|
)
|
4521
|
-
|
4522
|
-
default=None, description="""
|
4530
|
+
google_maps_uri: Optional[str] = Field(
|
4531
|
+
default=None, description="""A link to show the review on Google Maps."""
|
4532
|
+
)
|
4533
|
+
relative_publish_time_description: Optional[str] = Field(
|
4534
|
+
default=None,
|
4535
|
+
description="""A string of formatted recent time, expressing the review time relative to the current time in a form appropriate for the language and country.""",
|
4536
|
+
)
|
4537
|
+
review: Optional[str] = Field(
|
4538
|
+
default=None,
|
4539
|
+
description="""A reference representing this place review which may be used to look up this place review again.""",
|
4523
4540
|
)
|
4524
4541
|
|
4525
4542
|
|
4526
|
-
class
|
4527
|
-
|
4543
|
+
class GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict(
|
4544
|
+
TypedDict, total=False
|
4545
|
+
):
|
4546
|
+
"""Encapsulates a review snippet."""
|
4528
4547
|
|
4529
|
-
|
4530
|
-
|
4548
|
+
author_attribution: Optional[
|
4549
|
+
GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionDict
|
4550
|
+
]
|
4551
|
+
"""This review's author."""
|
4531
4552
|
|
4532
|
-
|
4533
|
-
"""
|
4553
|
+
flag_content_uri: Optional[str]
|
4554
|
+
"""A link where users can flag a problem with the review."""
|
4534
4555
|
|
4556
|
+
google_maps_uri: Optional[str]
|
4557
|
+
"""A link to show the review on Google Maps."""
|
4535
4558
|
|
4536
|
-
|
4559
|
+
relative_publish_time_description: Optional[str]
|
4560
|
+
"""A string of formatted recent time, expressing the review time relative to the current time in a form appropriate for the language and country."""
|
4537
4561
|
|
4562
|
+
review: Optional[str]
|
4563
|
+
"""A reference representing this place review which may be used to look up this place review again."""
|
4538
4564
|
|
4539
|
-
class GroundingChunkRetrievedContext(_common.BaseModel):
|
4540
|
-
"""Chunk from context retrieved by the retrieval tools."""
|
4541
4565
|
|
4542
|
-
|
4566
|
+
GroundingChunkMapsPlaceAnswerSourcesReviewSnippetOrDict = Union[
|
4567
|
+
GroundingChunkMapsPlaceAnswerSourcesReviewSnippet,
|
4568
|
+
GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict,
|
4569
|
+
]
|
4570
|
+
|
4571
|
+
|
4572
|
+
class GroundingChunkMapsPlaceAnswerSources(_common.BaseModel):
|
4573
|
+
"""Sources used to generate the place answer."""
|
4574
|
+
|
4575
|
+
flag_content_uri: Optional[str] = Field(
|
4543
4576
|
default=None,
|
4544
|
-
description="""
|
4545
|
-
)
|
4546
|
-
|
4547
|
-
|
4577
|
+
description="""A link where users can flag a problem with the generated answer.""",
|
4578
|
+
)
|
4579
|
+
review_snippets: Optional[
|
4580
|
+
list[GroundingChunkMapsPlaceAnswerSourcesReviewSnippet]
|
4581
|
+
] = Field(
|
4582
|
+
default=None,
|
4583
|
+
description="""Snippets of reviews that are used to generate the answer.""",
|
4584
|
+
)
|
4585
|
+
|
4586
|
+
|
4587
|
+
class GroundingChunkMapsPlaceAnswerSourcesDict(TypedDict, total=False):
|
4588
|
+
"""Sources used to generate the place answer."""
|
4589
|
+
|
4590
|
+
flag_content_uri: Optional[str]
|
4591
|
+
"""A link where users can flag a problem with the generated answer."""
|
4592
|
+
|
4593
|
+
review_snippets: Optional[
|
4594
|
+
list[GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict]
|
4595
|
+
]
|
4596
|
+
"""Snippets of reviews that are used to generate the answer."""
|
4597
|
+
|
4598
|
+
|
4599
|
+
GroundingChunkMapsPlaceAnswerSourcesOrDict = Union[
|
4600
|
+
GroundingChunkMapsPlaceAnswerSources,
|
4601
|
+
GroundingChunkMapsPlaceAnswerSourcesDict,
|
4602
|
+
]
|
4603
|
+
|
4604
|
+
|
4605
|
+
class GroundingChunkMaps(_common.BaseModel):
|
4606
|
+
"""Chunk from Google Maps."""
|
4607
|
+
|
4608
|
+
place_answer_sources: Optional[GroundingChunkMapsPlaceAnswerSources] = Field(
|
4609
|
+
default=None,
|
4610
|
+
description="""Sources used to generate the place answer. This includes review snippets and photos that were used to generate the answer, as well as uris to flag content.""",
|
4611
|
+
)
|
4612
|
+
place_id: Optional[str] = Field(
|
4613
|
+
default=None,
|
4614
|
+
description="""This Place's resource name, in `places/{place_id}` format. Can be used to look up the Place.""",
|
4615
|
+
)
|
4616
|
+
text: Optional[str] = Field(
|
4617
|
+
default=None, description="""Text of the chunk."""
|
4618
|
+
)
|
4619
|
+
title: Optional[str] = Field(
|
4620
|
+
default=None, description="""Title of the chunk."""
|
4621
|
+
)
|
4622
|
+
uri: Optional[str] = Field(
|
4623
|
+
default=None, description="""URI reference of the chunk."""
|
4624
|
+
)
|
4625
|
+
|
4626
|
+
|
4627
|
+
class GroundingChunkMapsDict(TypedDict, total=False):
|
4628
|
+
"""Chunk from Google Maps."""
|
4629
|
+
|
4630
|
+
place_answer_sources: Optional[GroundingChunkMapsPlaceAnswerSourcesDict]
|
4631
|
+
"""Sources used to generate the place answer. This includes review snippets and photos that were used to generate the answer, as well as uris to flag content."""
|
4632
|
+
|
4633
|
+
place_id: Optional[str]
|
4634
|
+
"""This Place's resource name, in `places/{place_id}` format. Can be used to look up the Place."""
|
4635
|
+
|
4636
|
+
text: Optional[str]
|
4637
|
+
"""Text of the chunk."""
|
4638
|
+
|
4639
|
+
title: Optional[str]
|
4640
|
+
"""Title of the chunk."""
|
4641
|
+
|
4642
|
+
uri: Optional[str]
|
4643
|
+
"""URI reference of the chunk."""
|
4644
|
+
|
4645
|
+
|
4646
|
+
GroundingChunkMapsOrDict = Union[GroundingChunkMaps, GroundingChunkMapsDict]
|
4647
|
+
|
4648
|
+
|
4649
|
+
class RagChunkPageSpan(_common.BaseModel):
|
4650
|
+
"""Represents where the chunk starts and ends in the document."""
|
4651
|
+
|
4652
|
+
first_page: Optional[int] = Field(
|
4653
|
+
default=None,
|
4654
|
+
description="""Page where chunk starts in the document. Inclusive. 1-indexed.""",
|
4655
|
+
)
|
4656
|
+
last_page: Optional[int] = Field(
|
4657
|
+
default=None,
|
4658
|
+
description="""Page where chunk ends in the document. Inclusive. 1-indexed.""",
|
4659
|
+
)
|
4660
|
+
|
4661
|
+
|
4662
|
+
class RagChunkPageSpanDict(TypedDict, total=False):
|
4663
|
+
"""Represents where the chunk starts and ends in the document."""
|
4664
|
+
|
4665
|
+
first_page: Optional[int]
|
4666
|
+
"""Page where chunk starts in the document. Inclusive. 1-indexed."""
|
4667
|
+
|
4668
|
+
last_page: Optional[int]
|
4669
|
+
"""Page where chunk ends in the document. Inclusive. 1-indexed."""
|
4670
|
+
|
4671
|
+
|
4672
|
+
RagChunkPageSpanOrDict = Union[RagChunkPageSpan, RagChunkPageSpanDict]
|
4673
|
+
|
4674
|
+
|
4675
|
+
class RagChunk(_common.BaseModel):
|
4676
|
+
"""A RagChunk includes the content of a chunk of a RagFile, and associated metadata."""
|
4677
|
+
|
4678
|
+
page_span: Optional[RagChunkPageSpan] = Field(
|
4679
|
+
default=None,
|
4680
|
+
description="""If populated, represents where the chunk starts and ends in the document.""",
|
4681
|
+
)
|
4682
|
+
text: Optional[str] = Field(
|
4683
|
+
default=None, description="""The content of the chunk."""
|
4684
|
+
)
|
4685
|
+
|
4686
|
+
|
4687
|
+
class RagChunkDict(TypedDict, total=False):
|
4688
|
+
"""A RagChunk includes the content of a chunk of a RagFile, and associated metadata."""
|
4689
|
+
|
4690
|
+
page_span: Optional[RagChunkPageSpanDict]
|
4691
|
+
"""If populated, represents where the chunk starts and ends in the document."""
|
4692
|
+
|
4693
|
+
text: Optional[str]
|
4694
|
+
"""The content of the chunk."""
|
4695
|
+
|
4696
|
+
|
4697
|
+
RagChunkOrDict = Union[RagChunk, RagChunkDict]
|
4698
|
+
|
4699
|
+
|
4700
|
+
class GroundingChunkRetrievedContext(_common.BaseModel):
|
4701
|
+
"""Chunk from context retrieved by the retrieval tools."""
|
4702
|
+
|
4703
|
+
document_name: Optional[str] = Field(
|
4704
|
+
default=None,
|
4705
|
+
description="""Output only. The full document name for the referenced Vertex AI Search document.""",
|
4706
|
+
)
|
4707
|
+
rag_chunk: Optional[RagChunk] = Field(
|
4708
|
+
default=None,
|
4709
|
+
description="""Additional context for the RAG retrieval result. This is only populated when using the RAG retrieval tool.""",
|
4710
|
+
)
|
4711
|
+
text: Optional[str] = Field(
|
4712
|
+
default=None, description="""Text of the attribution."""
|
4548
4713
|
)
|
4549
4714
|
title: Optional[str] = Field(
|
4550
4715
|
default=None, description="""Title of the attribution."""
|
@@ -4557,6 +4722,9 @@ class GroundingChunkRetrievedContext(_common.BaseModel):
|
|
4557
4722
|
class GroundingChunkRetrievedContextDict(TypedDict, total=False):
|
4558
4723
|
"""Chunk from context retrieved by the retrieval tools."""
|
4559
4724
|
|
4725
|
+
document_name: Optional[str]
|
4726
|
+
"""Output only. The full document name for the referenced Vertex AI Search document."""
|
4727
|
+
|
4560
4728
|
rag_chunk: Optional[RagChunkDict]
|
4561
4729
|
"""Additional context for the RAG retrieval result. This is only populated when using the RAG retrieval tool."""
|
4562
4730
|
|
@@ -4608,6 +4776,9 @@ GroundingChunkWebOrDict = Union[GroundingChunkWeb, GroundingChunkWebDict]
|
|
4608
4776
|
class GroundingChunk(_common.BaseModel):
|
4609
4777
|
"""Grounding chunk."""
|
4610
4778
|
|
4779
|
+
maps: Optional[GroundingChunkMaps] = Field(
|
4780
|
+
default=None, description="""Grounding chunk from Google Maps."""
|
4781
|
+
)
|
4611
4782
|
retrieved_context: Optional[GroundingChunkRetrievedContext] = Field(
|
4612
4783
|
default=None,
|
4613
4784
|
description="""Grounding chunk from context retrieved by the retrieval tools.""",
|
@@ -4620,6 +4791,9 @@ class GroundingChunk(_common.BaseModel):
|
|
4620
4791
|
class GroundingChunkDict(TypedDict, total=False):
|
4621
4792
|
"""Grounding chunk."""
|
4622
4793
|
|
4794
|
+
maps: Optional[GroundingChunkMapsDict]
|
4795
|
+
"""Grounding chunk from Google Maps."""
|
4796
|
+
|
4623
4797
|
retrieved_context: Optional[GroundingChunkRetrievedContextDict]
|
4624
4798
|
"""Grounding chunk from context retrieved by the retrieval tools."""
|
4625
4799
|
|
@@ -4751,6 +4925,10 @@ SearchEntryPointOrDict = Union[SearchEntryPoint, SearchEntryPointDict]
|
|
4751
4925
|
class GroundingMetadata(_common.BaseModel):
|
4752
4926
|
"""Metadata returned to client when grounding is enabled."""
|
4753
4927
|
|
4928
|
+
google_maps_widget_context_token: Optional[str] = Field(
|
4929
|
+
default=None,
|
4930
|
+
description="""Optional. Output only. Resource name of the Google Maps widget context token to be used with the PlacesContextElement widget to render contextual data. This is populated only for Google Maps grounding.""",
|
4931
|
+
)
|
4754
4932
|
grounding_chunks: Optional[list[GroundingChunk]] = Field(
|
4755
4933
|
default=None,
|
4756
4934
|
description="""List of supporting references retrieved from specified grounding source.""",
|
@@ -4778,6 +4956,9 @@ class GroundingMetadata(_common.BaseModel):
|
|
4778
4956
|
class GroundingMetadataDict(TypedDict, total=False):
|
4779
4957
|
"""Metadata returned to client when grounding is enabled."""
|
4780
4958
|
|
4959
|
+
google_maps_widget_context_token: Optional[str]
|
4960
|
+
"""Optional. Output only. Resource name of the Google Maps widget context token to be used with the PlacesContextElement widget to render contextual data. This is populated only for Google Maps grounding."""
|
4961
|
+
|
4781
4962
|
grounding_chunks: Optional[list[GroundingChunkDict]]
|
4782
4963
|
"""List of supporting references retrieved from specified grounding source."""
|
4783
4964
|
|
@@ -7009,6 +7190,236 @@ RecontextImageResponseOrDict = Union[
|
|
7009
7190
|
]
|
7010
7191
|
|
7011
7192
|
|
7193
|
+
class ScribbleImage(_common.BaseModel):
|
7194
|
+
"""An image mask representing a brush scribble."""
|
7195
|
+
|
7196
|
+
image: Optional[Image] = Field(
|
7197
|
+
default=None,
|
7198
|
+
description="""The brush scribble to guide segmentation. Valid for the interactive mode.""",
|
7199
|
+
)
|
7200
|
+
|
7201
|
+
|
7202
|
+
class ScribbleImageDict(TypedDict, total=False):
|
7203
|
+
"""An image mask representing a brush scribble."""
|
7204
|
+
|
7205
|
+
image: Optional[ImageDict]
|
7206
|
+
"""The brush scribble to guide segmentation. Valid for the interactive mode."""
|
7207
|
+
|
7208
|
+
|
7209
|
+
ScribbleImageOrDict = Union[ScribbleImage, ScribbleImageDict]
|
7210
|
+
|
7211
|
+
|
7212
|
+
class SegmentImageSource(_common.BaseModel):
|
7213
|
+
"""A set of source input(s) for image segmentation."""
|
7214
|
+
|
7215
|
+
prompt: Optional[str] = Field(
|
7216
|
+
default=None,
|
7217
|
+
description="""A text prompt for guiding the model during image segmentation.
|
7218
|
+
Required for prompt mode and semantic mode, disallowed for other modes.""",
|
7219
|
+
)
|
7220
|
+
image: Optional[Image] = Field(
|
7221
|
+
default=None, description="""The image to be segmented."""
|
7222
|
+
)
|
7223
|
+
scribble_image: Optional[ScribbleImage] = Field(
|
7224
|
+
default=None,
|
7225
|
+
description="""The brush scribble to guide segmentation.
|
7226
|
+
Required for the interactive mode, disallowed for other modes.""",
|
7227
|
+
)
|
7228
|
+
|
7229
|
+
|
7230
|
+
class SegmentImageSourceDict(TypedDict, total=False):
|
7231
|
+
"""A set of source input(s) for image segmentation."""
|
7232
|
+
|
7233
|
+
prompt: Optional[str]
|
7234
|
+
"""A text prompt for guiding the model during image segmentation.
|
7235
|
+
Required for prompt mode and semantic mode, disallowed for other modes."""
|
7236
|
+
|
7237
|
+
image: Optional[ImageDict]
|
7238
|
+
"""The image to be segmented."""
|
7239
|
+
|
7240
|
+
scribble_image: Optional[ScribbleImageDict]
|
7241
|
+
"""The brush scribble to guide segmentation.
|
7242
|
+
Required for the interactive mode, disallowed for other modes."""
|
7243
|
+
|
7244
|
+
|
7245
|
+
SegmentImageSourceOrDict = Union[SegmentImageSource, SegmentImageSourceDict]
|
7246
|
+
|
7247
|
+
|
7248
|
+
class SegmentImageConfig(_common.BaseModel):
|
7249
|
+
"""Configuration for segmenting an image."""
|
7250
|
+
|
7251
|
+
http_options: Optional[HttpOptions] = Field(
|
7252
|
+
default=None, description="""Used to override HTTP request options."""
|
7253
|
+
)
|
7254
|
+
mode: Optional[SegmentMode] = Field(
|
7255
|
+
default=None, description="""The segmentation mode to use."""
|
7256
|
+
)
|
7257
|
+
max_predictions: Optional[int] = Field(
|
7258
|
+
default=None,
|
7259
|
+
description="""The maximum number of predictions to return up to, by top
|
7260
|
+
confidence score.""",
|
7261
|
+
)
|
7262
|
+
confidence_threshold: Optional[float] = Field(
|
7263
|
+
default=None,
|
7264
|
+
description="""The confidence score threshold for the detections as a decimal
|
7265
|
+
value. Only predictions with a confidence score higher than this
|
7266
|
+
threshold will be returned.""",
|
7267
|
+
)
|
7268
|
+
mask_dilation: Optional[float] = Field(
|
7269
|
+
default=None,
|
7270
|
+
description="""A decimal value representing how much dilation to apply to the
|
7271
|
+
masks. 0 for no dilation. 1.0 means the masked area covers the whole
|
7272
|
+
image.""",
|
7273
|
+
)
|
7274
|
+
binary_color_threshold: Optional[float] = Field(
|
7275
|
+
default=None,
|
7276
|
+
description="""The binary color threshold to apply to the masks. The threshold
|
7277
|
+
can be set to a decimal value between 0 and 255 non-inclusive.
|
7278
|
+
Set to -1 for no binary color thresholding.""",
|
7279
|
+
)
|
7280
|
+
|
7281
|
+
|
7282
|
+
class SegmentImageConfigDict(TypedDict, total=False):
|
7283
|
+
"""Configuration for segmenting an image."""
|
7284
|
+
|
7285
|
+
http_options: Optional[HttpOptionsDict]
|
7286
|
+
"""Used to override HTTP request options."""
|
7287
|
+
|
7288
|
+
mode: Optional[SegmentMode]
|
7289
|
+
"""The segmentation mode to use."""
|
7290
|
+
|
7291
|
+
max_predictions: Optional[int]
|
7292
|
+
"""The maximum number of predictions to return up to, by top
|
7293
|
+
confidence score."""
|
7294
|
+
|
7295
|
+
confidence_threshold: Optional[float]
|
7296
|
+
"""The confidence score threshold for the detections as a decimal
|
7297
|
+
value. Only predictions with a confidence score higher than this
|
7298
|
+
threshold will be returned."""
|
7299
|
+
|
7300
|
+
mask_dilation: Optional[float]
|
7301
|
+
"""A decimal value representing how much dilation to apply to the
|
7302
|
+
masks. 0 for no dilation. 1.0 means the masked area covers the whole
|
7303
|
+
image."""
|
7304
|
+
|
7305
|
+
binary_color_threshold: Optional[float]
|
7306
|
+
"""The binary color threshold to apply to the masks. The threshold
|
7307
|
+
can be set to a decimal value between 0 and 255 non-inclusive.
|
7308
|
+
Set to -1 for no binary color thresholding."""
|
7309
|
+
|
7310
|
+
|
7311
|
+
SegmentImageConfigOrDict = Union[SegmentImageConfig, SegmentImageConfigDict]
|
7312
|
+
|
7313
|
+
|
7314
|
+
class _SegmentImageParameters(_common.BaseModel):
|
7315
|
+
"""The parameters for segmenting an image."""
|
7316
|
+
|
7317
|
+
model: Optional[str] = Field(
|
7318
|
+
default=None,
|
7319
|
+
description="""ID of the model to use. For a list of models, see `Google models
|
7320
|
+
<https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""",
|
7321
|
+
)
|
7322
|
+
source: Optional[SegmentImageSource] = Field(
|
7323
|
+
default=None,
|
7324
|
+
description="""A set of source input(s) for image segmentation.""",
|
7325
|
+
)
|
7326
|
+
config: Optional[SegmentImageConfig] = Field(
|
7327
|
+
default=None, description="""Configuration for image segmentation."""
|
7328
|
+
)
|
7329
|
+
|
7330
|
+
|
7331
|
+
class _SegmentImageParametersDict(TypedDict, total=False):
|
7332
|
+
"""The parameters for segmenting an image."""
|
7333
|
+
|
7334
|
+
model: Optional[str]
|
7335
|
+
"""ID of the model to use. For a list of models, see `Google models
|
7336
|
+
<https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_."""
|
7337
|
+
|
7338
|
+
source: Optional[SegmentImageSourceDict]
|
7339
|
+
"""A set of source input(s) for image segmentation."""
|
7340
|
+
|
7341
|
+
config: Optional[SegmentImageConfigDict]
|
7342
|
+
"""Configuration for image segmentation."""
|
7343
|
+
|
7344
|
+
|
7345
|
+
_SegmentImageParametersOrDict = Union[
|
7346
|
+
_SegmentImageParameters, _SegmentImageParametersDict
|
7347
|
+
]
|
7348
|
+
|
7349
|
+
|
7350
|
+
class EntityLabel(_common.BaseModel):
|
7351
|
+
"""An entity representing the segmented area."""
|
7352
|
+
|
7353
|
+
label: Optional[str] = Field(
|
7354
|
+
default=None, description="""The label of the segmented entity."""
|
7355
|
+
)
|
7356
|
+
score: Optional[float] = Field(
|
7357
|
+
default=None,
|
7358
|
+
description="""The confidence score of the detected label.""",
|
7359
|
+
)
|
7360
|
+
|
7361
|
+
|
7362
|
+
class EntityLabelDict(TypedDict, total=False):
|
7363
|
+
"""An entity representing the segmented area."""
|
7364
|
+
|
7365
|
+
label: Optional[str]
|
7366
|
+
"""The label of the segmented entity."""
|
7367
|
+
|
7368
|
+
score: Optional[float]
|
7369
|
+
"""The confidence score of the detected label."""
|
7370
|
+
|
7371
|
+
|
7372
|
+
EntityLabelOrDict = Union[EntityLabel, EntityLabelDict]
|
7373
|
+
|
7374
|
+
|
7375
|
+
class GeneratedImageMask(_common.BaseModel):
|
7376
|
+
"""A generated image mask."""
|
7377
|
+
|
7378
|
+
mask: Optional[Image] = Field(
|
7379
|
+
default=None, description="""The generated image mask."""
|
7380
|
+
)
|
7381
|
+
labels: Optional[list[EntityLabel]] = Field(
|
7382
|
+
default=None,
|
7383
|
+
description="""The detected entities on the segmented area.""",
|
7384
|
+
)
|
7385
|
+
|
7386
|
+
|
7387
|
+
class GeneratedImageMaskDict(TypedDict, total=False):
|
7388
|
+
"""A generated image mask."""
|
7389
|
+
|
7390
|
+
mask: Optional[ImageDict]
|
7391
|
+
"""The generated image mask."""
|
7392
|
+
|
7393
|
+
labels: Optional[list[EntityLabelDict]]
|
7394
|
+
"""The detected entities on the segmented area."""
|
7395
|
+
|
7396
|
+
|
7397
|
+
GeneratedImageMaskOrDict = Union[GeneratedImageMask, GeneratedImageMaskDict]
|
7398
|
+
|
7399
|
+
|
7400
|
+
class SegmentImageResponse(_common.BaseModel):
|
7401
|
+
"""The output images response."""
|
7402
|
+
|
7403
|
+
generated_masks: Optional[list[GeneratedImageMask]] = Field(
|
7404
|
+
default=None,
|
7405
|
+
description="""List of generated image masks.
|
7406
|
+
""",
|
7407
|
+
)
|
7408
|
+
|
7409
|
+
|
7410
|
+
class SegmentImageResponseDict(TypedDict, total=False):
|
7411
|
+
"""The output images response."""
|
7412
|
+
|
7413
|
+
generated_masks: Optional[list[GeneratedImageMaskDict]]
|
7414
|
+
"""List of generated image masks.
|
7415
|
+
"""
|
7416
|
+
|
7417
|
+
|
7418
|
+
SegmentImageResponseOrDict = Union[
|
7419
|
+
SegmentImageResponse, SegmentImageResponseDict
|
7420
|
+
]
|
7421
|
+
|
7422
|
+
|
7012
7423
|
class GetModelConfig(_common.BaseModel):
|
7013
7424
|
"""Optional parameters for models.get method."""
|
7014
7425
|
|
@@ -7437,7 +7848,7 @@ class GenerationConfigThinkingConfig(_common.BaseModel):
|
|
7437
7848
|
)
|
7438
7849
|
thinking_budget: Optional[int] = Field(
|
7439
7850
|
default=None,
|
7440
|
-
description="""Optional. Indicates the thinking budget in tokens.
|
7851
|
+
description="""Optional. Indicates the thinking budget in tokens.""",
|
7441
7852
|
)
|
7442
7853
|
|
7443
7854
|
|
@@ -7448,7 +7859,7 @@ class GenerationConfigThinkingConfigDict(TypedDict, total=False):
|
|
7448
7859
|
"""Optional. Indicates whether to include thoughts in the response. If true, thoughts are returned only when available."""
|
7449
7860
|
|
7450
7861
|
thinking_budget: Optional[int]
|
7451
|
-
"""Optional. Indicates the thinking budget in tokens.
|
7862
|
+
"""Optional. Indicates the thinking budget in tokens."""
|
7452
7863
|
|
7453
7864
|
|
7454
7865
|
GenerationConfigThinkingConfigOrDict = Union[
|
@@ -7943,6 +8354,40 @@ class VideoDict(TypedDict, total=False):
|
|
7943
8354
|
VideoOrDict = Union[Video, VideoDict]
|
7944
8355
|
|
7945
8356
|
|
8357
|
+
class VideoGenerationReferenceImage(_common.BaseModel):
|
8358
|
+
"""A reference image for video generation."""
|
8359
|
+
|
8360
|
+
image: Optional[Image] = Field(
|
8361
|
+
default=None,
|
8362
|
+
description="""The reference image.
|
8363
|
+
""",
|
8364
|
+
)
|
8365
|
+
reference_type: Optional[str] = Field(
|
8366
|
+
default=None,
|
8367
|
+
description="""The type of the reference image, which defines how the reference
|
8368
|
+
image will be used to generate the video. Supported values are 'asset'
|
8369
|
+
or 'style'.""",
|
8370
|
+
)
|
8371
|
+
|
8372
|
+
|
8373
|
+
class VideoGenerationReferenceImageDict(TypedDict, total=False):
|
8374
|
+
"""A reference image for video generation."""
|
8375
|
+
|
8376
|
+
image: Optional[ImageDict]
|
8377
|
+
"""The reference image.
|
8378
|
+
"""
|
8379
|
+
|
8380
|
+
reference_type: Optional[str]
|
8381
|
+
"""The type of the reference image, which defines how the reference
|
8382
|
+
image will be used to generate the video. Supported values are 'asset'
|
8383
|
+
or 'style'."""
|
8384
|
+
|
8385
|
+
|
8386
|
+
VideoGenerationReferenceImageOrDict = Union[
|
8387
|
+
VideoGenerationReferenceImage, VideoGenerationReferenceImageDict
|
8388
|
+
]
|
8389
|
+
|
8390
|
+
|
7946
8391
|
class GenerateVideosConfig(_common.BaseModel):
|
7947
8392
|
"""Configuration for generating videos."""
|
7948
8393
|
|
@@ -7998,6 +8443,14 @@ class GenerateVideosConfig(_common.BaseModel):
|
|
7998
8443
|
default=None,
|
7999
8444
|
description="""Image to use as the last frame of generated videos. Only supported for image to video use cases.""",
|
8000
8445
|
)
|
8446
|
+
reference_images: Optional[list[VideoGenerationReferenceImage]] = Field(
|
8447
|
+
default=None,
|
8448
|
+
description="""The images to use as the references to generate the videos.
|
8449
|
+
If this field is provided, the text prompt field must also be provided.
|
8450
|
+
The image, video, or last_frame field are not supported. Each image must
|
8451
|
+
be associated with a type. Veo 2 supports up to 3 asset images *or* 1
|
8452
|
+
style image.""",
|
8453
|
+
)
|
8001
8454
|
compression_quality: Optional[VideoCompressionQuality] = Field(
|
8002
8455
|
default=None,
|
8003
8456
|
description="""Compression quality of the generated videos.""",
|
@@ -8049,6 +8502,13 @@ class GenerateVideosConfigDict(TypedDict, total=False):
|
|
8049
8502
|
last_frame: Optional[ImageDict]
|
8050
8503
|
"""Image to use as the last frame of generated videos. Only supported for image to video use cases."""
|
8051
8504
|
|
8505
|
+
reference_images: Optional[list[VideoGenerationReferenceImageDict]]
|
8506
|
+
"""The images to use as the references to generate the videos.
|
8507
|
+
If this field is provided, the text prompt field must also be provided.
|
8508
|
+
The image, video, or last_frame field are not supported. Each image must
|
8509
|
+
be associated with a type. Veo 2 supports up to 3 asset images *or* 1
|
8510
|
+
style image."""
|
8511
|
+
|
8052
8512
|
compression_quality: Optional[VideoCompressionQuality]
|
8053
8513
|
"""Compression quality of the generated videos."""
|
8054
8514
|
|
@@ -8360,7 +8820,7 @@ class TunedModel(_common.BaseModel):
|
|
8360
8820
|
|
8361
8821
|
model: Optional[str] = Field(
|
8362
8822
|
default=None,
|
8363
|
-
description="""Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}
|
8823
|
+
description="""Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}@{version_id}` When tuning from a base model, the version_id will be 1. For continuous tuning, the version id will be incremented by 1 from the last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`""",
|
8364
8824
|
)
|
8365
8825
|
endpoint: Optional[str] = Field(
|
8366
8826
|
default=None,
|
@@ -8377,7 +8837,7 @@ class TunedModel(_common.BaseModel):
|
|
8377
8837
|
class TunedModelDict(TypedDict, total=False):
|
8378
8838
|
|
8379
8839
|
model: Optional[str]
|
8380
|
-
"""Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}
|
8840
|
+
"""Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}@{version_id}` When tuning from a base model, the version_id will be 1. For continuous tuning, the version id will be incremented by 1 from the last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`"""
|
8381
8841
|
|
8382
8842
|
endpoint: Optional[str]
|
8383
8843
|
"""Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`."""
|
@@ -8391,7 +8851,275 @@ class TunedModelDict(TypedDict, total=False):
|
|
8391
8851
|
TunedModelOrDict = Union[TunedModel, TunedModelDict]
|
8392
8852
|
|
8393
8853
|
|
8394
|
-
class
|
8854
|
+
class GcsDestination(_common.BaseModel):
|
8855
|
+
"""The Google Cloud Storage location where the output is to be written to."""
|
8856
|
+
|
8857
|
+
output_uri_prefix: Optional[str] = Field(
|
8858
|
+
default=None,
|
8859
|
+
description="""Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist.""",
|
8860
|
+
)
|
8861
|
+
|
8862
|
+
@pydantic.model_validator(mode='after')
|
8863
|
+
def _validate_gcs_path(self) -> 'GcsDestination':
|
8864
|
+
if self.output_uri_prefix and not self.output_uri_prefix.startswith(
|
8865
|
+
'gs://'
|
8866
|
+
):
|
8867
|
+
raise ValueError(
|
8868
|
+
'output_uri_prefix must be a valid GCS path starting with "gs://".'
|
8869
|
+
)
|
8870
|
+
return self
|
8871
|
+
|
8872
|
+
|
8873
|
+
class GcsDestinationDict(TypedDict, total=False):
|
8874
|
+
"""The Google Cloud Storage location where the output is to be written to."""
|
8875
|
+
|
8876
|
+
output_uri_prefix: Optional[str]
|
8877
|
+
"""Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist."""
|
8878
|
+
|
8879
|
+
|
8880
|
+
GcsDestinationOrDict = Union[GcsDestination, GcsDestinationDict]
|
8881
|
+
|
8882
|
+
|
8883
|
+
class OutputConfig(_common.BaseModel):
|
8884
|
+
"""Config for evaluation output."""
|
8885
|
+
|
8886
|
+
gcs_destination: Optional[GcsDestination] = Field(
|
8887
|
+
default=None,
|
8888
|
+
description="""Cloud storage destination for evaluation output.""",
|
8889
|
+
)
|
8890
|
+
|
8891
|
+
|
8892
|
+
class OutputConfigDict(TypedDict, total=False):
|
8893
|
+
"""Config for evaluation output."""
|
8894
|
+
|
8895
|
+
gcs_destination: Optional[GcsDestinationDict]
|
8896
|
+
"""Cloud storage destination for evaluation output."""
|
8897
|
+
|
8898
|
+
|
8899
|
+
OutputConfigOrDict = Union[OutputConfig, OutputConfigDict]
|
8900
|
+
|
8901
|
+
|
8902
|
+
class AutoraterConfig(_common.BaseModel):
|
8903
|
+
"""Autorater config used for evaluation."""
|
8904
|
+
|
8905
|
+
sampling_count: Optional[int] = Field(
|
8906
|
+
default=None,
|
8907
|
+
description="""Number of samples for each instance in the dataset.
|
8908
|
+
If not specified, the default is 4. Minimum value is 1, maximum value
|
8909
|
+
is 32.""",
|
8910
|
+
)
|
8911
|
+
flip_enabled: Optional[bool] = Field(
|
8912
|
+
default=None,
|
8913
|
+
description="""Optional. Default is true. Whether to flip the candidate and baseline
|
8914
|
+
responses. This is only applicable to the pairwise metric. If enabled, also
|
8915
|
+
provide PairwiseMetricSpec.candidate_response_field_name and
|
8916
|
+
PairwiseMetricSpec.baseline_response_field_name. When rendering
|
8917
|
+
PairwiseMetricSpec.metric_prompt_template, the candidate and baseline
|
8918
|
+
fields will be flipped for half of the samples to reduce bias.""",
|
8919
|
+
)
|
8920
|
+
autorater_model: Optional[str] = Field(
|
8921
|
+
default=None,
|
8922
|
+
description="""The fully qualified name of the publisher model or tuned autorater
|
8923
|
+
endpoint to use.
|
8924
|
+
|
8925
|
+
Publisher model format:
|
8926
|
+
`projects/{project}/locations/{location}/publishers/*/models/*`
|
8927
|
+
|
8928
|
+
Tuned model endpoint format:
|
8929
|
+
`projects/{project}/locations/{location}/endpoints/{endpoint}`""",
|
8930
|
+
)
|
8931
|
+
|
8932
|
+
|
8933
|
+
class AutoraterConfigDict(TypedDict, total=False):
|
8934
|
+
"""Autorater config used for evaluation."""
|
8935
|
+
|
8936
|
+
sampling_count: Optional[int]
|
8937
|
+
"""Number of samples for each instance in the dataset.
|
8938
|
+
If not specified, the default is 4. Minimum value is 1, maximum value
|
8939
|
+
is 32."""
|
8940
|
+
|
8941
|
+
flip_enabled: Optional[bool]
|
8942
|
+
"""Optional. Default is true. Whether to flip the candidate and baseline
|
8943
|
+
responses. This is only applicable to the pairwise metric. If enabled, also
|
8944
|
+
provide PairwiseMetricSpec.candidate_response_field_name and
|
8945
|
+
PairwiseMetricSpec.baseline_response_field_name. When rendering
|
8946
|
+
PairwiseMetricSpec.metric_prompt_template, the candidate and baseline
|
8947
|
+
fields will be flipped for half of the samples to reduce bias."""
|
8948
|
+
|
8949
|
+
autorater_model: Optional[str]
|
8950
|
+
"""The fully qualified name of the publisher model or tuned autorater
|
8951
|
+
endpoint to use.
|
8952
|
+
|
8953
|
+
Publisher model format:
|
8954
|
+
`projects/{project}/locations/{location}/publishers/*/models/*`
|
8955
|
+
|
8956
|
+
Tuned model endpoint format:
|
8957
|
+
`projects/{project}/locations/{location}/endpoints/{endpoint}`"""
|
8958
|
+
|
8959
|
+
|
8960
|
+
AutoraterConfigOrDict = Union[AutoraterConfig, AutoraterConfigDict]
|
8961
|
+
|
8962
|
+
|
8963
|
+
class Metric(_common.BaseModel):
|
8964
|
+
"""The metric used for evaluation."""
|
8965
|
+
|
8966
|
+
name: Optional[str] = Field(
|
8967
|
+
default=None, description="""The name of the metric."""
|
8968
|
+
)
|
8969
|
+
custom_function: Optional[Callable[..., Any]] = Field(
|
8970
|
+
default=None,
|
8971
|
+
description="""The custom function that defines the end-to-end logic for metric computation.""",
|
8972
|
+
)
|
8973
|
+
prompt_template: Optional[str] = Field(
|
8974
|
+
default=None, description="""The prompt template for the metric."""
|
8975
|
+
)
|
8976
|
+
judge_model_system_instruction: Optional[str] = Field(
|
8977
|
+
default=None,
|
8978
|
+
description="""The system instruction for the judge model.""",
|
8979
|
+
)
|
8980
|
+
return_raw_output: Optional[bool] = Field(
|
8981
|
+
default=None,
|
8982
|
+
description="""Whether to return the raw output from the judge model.""",
|
8983
|
+
)
|
8984
|
+
parse_and_reduce_fn: Optional[Callable[..., Any]] = Field(
|
8985
|
+
default=None,
|
8986
|
+
description="""The parse and reduce function for the judge model.""",
|
8987
|
+
)
|
8988
|
+
aggregate_summary_fn: Optional[Callable[..., Any]] = Field(
|
8989
|
+
default=None,
|
8990
|
+
description="""The aggregate summary function for the judge model.""",
|
8991
|
+
)
|
8992
|
+
|
8993
|
+
# Allow extra fields to support metric-specific config fields.
|
8994
|
+
model_config = ConfigDict(extra='allow')
|
8995
|
+
|
8996
|
+
_is_predefined: bool = PrivateAttr(default=False)
|
8997
|
+
"""A boolean indicating whether the metric is predefined."""
|
8998
|
+
|
8999
|
+
_config_source: Optional[str] = PrivateAttr(default=None)
|
9000
|
+
"""An optional string indicating the source of the metric configuration."""
|
9001
|
+
|
9002
|
+
_version: Optional[str] = PrivateAttr(default=None)
|
9003
|
+
"""An optional string indicating the version of the metric."""
|
9004
|
+
|
9005
|
+
@model_validator(mode='after') # type: ignore[arg-type]
|
9006
|
+
@classmethod
|
9007
|
+
def validate_name(cls, model: 'Metric') -> 'Metric':
|
9008
|
+
if not model.name:
|
9009
|
+
raise ValueError('Metric name cannot be empty.')
|
9010
|
+
model.name = model.name.lower()
|
9011
|
+
return model
|
9012
|
+
|
9013
|
+
def to_yaml_file(self, file_path: str, version: Optional[str] = None) -> None:
|
9014
|
+
"""Dumps the metric object to a YAML file.
|
9015
|
+
|
9016
|
+
Args:
|
9017
|
+
file_path: The path to the YAML file.
|
9018
|
+
version: Optional version string to include in the YAML output.
|
9019
|
+
|
9020
|
+
Raises:
|
9021
|
+
ImportError: If the pyyaml library is not installed.
|
9022
|
+
"""
|
9023
|
+
if yaml is None:
|
9024
|
+
raise ImportError(
|
9025
|
+
'YAML serialization requires the pyyaml library. Please install'
|
9026
|
+
" it using 'pip install google-cloud-aiplatform[evaluation]'."
|
9027
|
+
)
|
9028
|
+
|
9029
|
+
fields_to_exclude_callables = set()
|
9030
|
+
for field_name, field_info in self.model_fields.items():
|
9031
|
+
annotation = field_info.annotation
|
9032
|
+
origin = typing.get_origin(annotation)
|
9033
|
+
|
9034
|
+
is_field_callable_type = False
|
9035
|
+
if annotation is Callable or origin is Callable: # type: ignore[comparison-overlap]
|
9036
|
+
is_field_callable_type = True
|
9037
|
+
elif origin is Union:
|
9038
|
+
args = typing.get_args(annotation)
|
9039
|
+
if any(
|
9040
|
+
arg is Callable or typing.get_origin(arg) is Callable
|
9041
|
+
for arg in args
|
9042
|
+
):
|
9043
|
+
is_field_callable_type = True
|
9044
|
+
|
9045
|
+
if is_field_callable_type:
|
9046
|
+
fields_to_exclude_callables.add(field_name)
|
9047
|
+
|
9048
|
+
data_to_dump = self.model_dump(
|
9049
|
+
exclude_unset=True,
|
9050
|
+
exclude_none=True,
|
9051
|
+
mode='json',
|
9052
|
+
exclude=fields_to_exclude_callables
|
9053
|
+
if fields_to_exclude_callables
|
9054
|
+
else None,
|
9055
|
+
)
|
9056
|
+
|
9057
|
+
if version:
|
9058
|
+
data_to_dump['version'] = version
|
9059
|
+
|
9060
|
+
with open(file_path, 'w', encoding='utf-8') as f:
|
9061
|
+
yaml.dump(data_to_dump, f, sort_keys=False, allow_unicode=True)
|
9062
|
+
|
9063
|
+
|
9064
|
+
class MetricDict(TypedDict, total=False):
|
9065
|
+
"""The metric used for evaluation."""
|
9066
|
+
|
9067
|
+
name: Optional[str]
|
9068
|
+
"""The name of the metric."""
|
9069
|
+
|
9070
|
+
custom_function: Optional[Callable[..., Any]]
|
9071
|
+
"""The custom function that defines the end-to-end logic for metric computation."""
|
9072
|
+
|
9073
|
+
prompt_template: Optional[str]
|
9074
|
+
"""The prompt template for the metric."""
|
9075
|
+
|
9076
|
+
judge_model_system_instruction: Optional[str]
|
9077
|
+
"""The system instruction for the judge model."""
|
9078
|
+
|
9079
|
+
return_raw_output: Optional[bool]
|
9080
|
+
"""Whether to return the raw output from the judge model."""
|
9081
|
+
|
9082
|
+
parse_and_reduce_fn: Optional[Callable[..., Any]]
|
9083
|
+
"""The parse and reduce function for the judge model."""
|
9084
|
+
|
9085
|
+
aggregate_summary_fn: Optional[Callable[..., Any]]
|
9086
|
+
"""The aggregate summary function for the judge model."""
|
9087
|
+
|
9088
|
+
|
9089
|
+
MetricOrDict = Union[Metric, MetricDict]
|
9090
|
+
|
9091
|
+
|
9092
|
+
class EvaluationConfig(_common.BaseModel):
|
9093
|
+
"""Evaluation config for tuning."""
|
9094
|
+
|
9095
|
+
metrics: Optional[list[Metric]] = Field(
|
9096
|
+
default=None, description="""The metrics used for evaluation."""
|
9097
|
+
)
|
9098
|
+
output_config: Optional[OutputConfig] = Field(
|
9099
|
+
default=None, description="""Config for evaluation output."""
|
9100
|
+
)
|
9101
|
+
autorater_config: Optional[AutoraterConfig] = Field(
|
9102
|
+
default=None, description="""Autorater config for evaluation."""
|
9103
|
+
)
|
9104
|
+
|
9105
|
+
|
9106
|
+
class EvaluationConfigDict(TypedDict, total=False):
|
9107
|
+
"""Evaluation config for tuning."""
|
9108
|
+
|
9109
|
+
metrics: Optional[list[MetricDict]]
|
9110
|
+
"""The metrics used for evaluation."""
|
9111
|
+
|
9112
|
+
output_config: Optional[OutputConfigDict]
|
9113
|
+
"""Config for evaluation output."""
|
9114
|
+
|
9115
|
+
autorater_config: Optional[AutoraterConfigDict]
|
9116
|
+
"""Autorater config for evaluation."""
|
9117
|
+
|
9118
|
+
|
9119
|
+
EvaluationConfigOrDict = Union[EvaluationConfig, EvaluationConfigDict]
|
9120
|
+
|
9121
|
+
|
9122
|
+
class GoogleRpcStatus(_common.BaseModel):
|
8395
9123
|
"""The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs.
|
8396
9124
|
|
8397
9125
|
It is used by [gRPC](https://github.com/grpc). Each `Status` message contains
|
@@ -8436,19 +9164,60 @@ class GoogleRpcStatusDict(TypedDict, total=False):
|
|
8436
9164
|
GoogleRpcStatusOrDict = Union[GoogleRpcStatus, GoogleRpcStatusDict]
|
8437
9165
|
|
8438
9166
|
|
9167
|
+
class PreTunedModel(_common.BaseModel):
|
9168
|
+
"""A pre-tuned model for continuous tuning."""
|
9169
|
+
|
9170
|
+
base_model: Optional[str] = Field(
|
9171
|
+
default=None,
|
9172
|
+
description="""Output only. The name of the base model this PreTunedModel was tuned from.""",
|
9173
|
+
)
|
9174
|
+
checkpoint_id: Optional[str] = Field(
|
9175
|
+
default=None,
|
9176
|
+
description="""Optional. The source checkpoint id. If not specified, the default checkpoint will be used.""",
|
9177
|
+
)
|
9178
|
+
tuned_model_name: Optional[str] = Field(
|
9179
|
+
default=None,
|
9180
|
+
description="""The resource name of the Model. E.g., a model resource name with a specified version id or alias: `projects/{project}/locations/{location}/models/{model}@{version_id}` `projects/{project}/locations/{location}/models/{model}@{alias}` Or, omit the version id to use the default version: `projects/{project}/locations/{location}/models/{model}`""",
|
9181
|
+
)
|
9182
|
+
|
9183
|
+
|
9184
|
+
class PreTunedModelDict(TypedDict, total=False):
|
9185
|
+
"""A pre-tuned model for continuous tuning."""
|
9186
|
+
|
9187
|
+
base_model: Optional[str]
|
9188
|
+
"""Output only. The name of the base model this PreTunedModel was tuned from."""
|
9189
|
+
|
9190
|
+
checkpoint_id: Optional[str]
|
9191
|
+
"""Optional. The source checkpoint id. If not specified, the default checkpoint will be used."""
|
9192
|
+
|
9193
|
+
tuned_model_name: Optional[str]
|
9194
|
+
"""The resource name of the Model. E.g., a model resource name with a specified version id or alias: `projects/{project}/locations/{location}/models/{model}@{version_id}` `projects/{project}/locations/{location}/models/{model}@{alias}` Or, omit the version id to use the default version: `projects/{project}/locations/{location}/models/{model}`"""
|
9195
|
+
|
9196
|
+
|
9197
|
+
PreTunedModelOrDict = Union[PreTunedModel, PreTunedModelDict]
|
9198
|
+
|
9199
|
+
|
8439
9200
|
class SupervisedHyperParameters(_common.BaseModel):
|
8440
9201
|
"""Hyperparameters for SFT."""
|
8441
9202
|
|
8442
9203
|
adapter_size: Optional[AdapterSize] = Field(
|
8443
9204
|
default=None, description="""Optional. Adapter size for tuning."""
|
8444
9205
|
)
|
9206
|
+
batch_size: Optional[int] = Field(
|
9207
|
+
default=None,
|
9208
|
+
description="""Optional. Batch size for tuning. This feature is only available for open source models.""",
|
9209
|
+
)
|
8445
9210
|
epoch_count: Optional[int] = Field(
|
8446
9211
|
default=None,
|
8447
9212
|
description="""Optional. Number of complete passes the model makes over the entire training dataset during training.""",
|
8448
9213
|
)
|
9214
|
+
learning_rate: Optional[float] = Field(
|
9215
|
+
default=None,
|
9216
|
+
description="""Optional. Learning rate for tuning. Mutually exclusive with `learning_rate_multiplier`. This feature is only available for open source models.""",
|
9217
|
+
)
|
8449
9218
|
learning_rate_multiplier: Optional[float] = Field(
|
8450
9219
|
default=None,
|
8451
|
-
description="""Optional. Multiplier for adjusting the default learning rate. Mutually exclusive with `learning_rate`.""",
|
9220
|
+
description="""Optional. Multiplier for adjusting the default learning rate. Mutually exclusive with `learning_rate`. This feature is only available for 1P models.""",
|
8452
9221
|
)
|
8453
9222
|
|
8454
9223
|
|
@@ -8458,11 +9227,17 @@ class SupervisedHyperParametersDict(TypedDict, total=False):
|
|
8458
9227
|
adapter_size: Optional[AdapterSize]
|
8459
9228
|
"""Optional. Adapter size for tuning."""
|
8460
9229
|
|
9230
|
+
batch_size: Optional[int]
|
9231
|
+
"""Optional. Batch size for tuning. This feature is only available for open source models."""
|
9232
|
+
|
8461
9233
|
epoch_count: Optional[int]
|
8462
9234
|
"""Optional. Number of complete passes the model makes over the entire training dataset during training."""
|
8463
9235
|
|
9236
|
+
learning_rate: Optional[float]
|
9237
|
+
"""Optional. Learning rate for tuning. Mutually exclusive with `learning_rate_multiplier`. This feature is only available for open source models."""
|
9238
|
+
|
8464
9239
|
learning_rate_multiplier: Optional[float]
|
8465
|
-
"""Optional. Multiplier for adjusting the default learning rate. Mutually exclusive with `learning_rate`."""
|
9240
|
+
"""Optional. Multiplier for adjusting the default learning rate. Mutually exclusive with `learning_rate`. This feature is only available for 1P models."""
|
8466
9241
|
|
8467
9242
|
|
8468
9243
|
SupervisedHyperParametersOrDict = Union[
|
@@ -8484,6 +9259,9 @@ class SupervisedTuningSpec(_common.BaseModel):
|
|
8484
9259
|
default=None,
|
8485
9260
|
description="""Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset.""",
|
8486
9261
|
)
|
9262
|
+
tuning_mode: Optional[TuningMode] = Field(
|
9263
|
+
default=None, description="""Tuning mode."""
|
9264
|
+
)
|
8487
9265
|
validation_dataset_uri: Optional[str] = Field(
|
8488
9266
|
default=None,
|
8489
9267
|
description="""Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset.""",
|
@@ -8502,6 +9280,9 @@ class SupervisedTuningSpecDict(TypedDict, total=False):
|
|
8502
9280
|
training_dataset_uri: Optional[str]
|
8503
9281
|
"""Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset."""
|
8504
9282
|
|
9283
|
+
tuning_mode: Optional[TuningMode]
|
9284
|
+
"""Tuning mode."""
|
9285
|
+
|
8505
9286
|
validation_dataset_uri: Optional[str]
|
8506
9287
|
"""Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset."""
|
8507
9288
|
|
@@ -8701,6 +9482,132 @@ DistillationDataStatsOrDict = Union[
|
|
8701
9482
|
]
|
8702
9483
|
|
8703
9484
|
|
9485
|
+
class GeminiPreferenceExampleCompletion(_common.BaseModel):
|
9486
|
+
"""Completion and its preference score."""
|
9487
|
+
|
9488
|
+
completion: Optional[Content] = Field(
|
9489
|
+
default=None,
|
9490
|
+
description="""Single turn completion for the given prompt.""",
|
9491
|
+
)
|
9492
|
+
score: Optional[float] = Field(
|
9493
|
+
default=None, description="""The score for the given completion."""
|
9494
|
+
)
|
9495
|
+
|
9496
|
+
|
9497
|
+
class GeminiPreferenceExampleCompletionDict(TypedDict, total=False):
|
9498
|
+
"""Completion and its preference score."""
|
9499
|
+
|
9500
|
+
completion: Optional[ContentDict]
|
9501
|
+
"""Single turn completion for the given prompt."""
|
9502
|
+
|
9503
|
+
score: Optional[float]
|
9504
|
+
"""The score for the given completion."""
|
9505
|
+
|
9506
|
+
|
9507
|
+
GeminiPreferenceExampleCompletionOrDict = Union[
|
9508
|
+
GeminiPreferenceExampleCompletion, GeminiPreferenceExampleCompletionDict
|
9509
|
+
]
|
9510
|
+
|
9511
|
+
|
9512
|
+
class GeminiPreferenceExample(_common.BaseModel):
|
9513
|
+
"""Input example for preference optimization."""
|
9514
|
+
|
9515
|
+
completions: Optional[list[GeminiPreferenceExampleCompletion]] = Field(
|
9516
|
+
default=None, description="""List of completions for a given prompt."""
|
9517
|
+
)
|
9518
|
+
contents: Optional[list[Content]] = Field(
|
9519
|
+
default=None,
|
9520
|
+
description="""Multi-turn contents that represents the Prompt.""",
|
9521
|
+
)
|
9522
|
+
|
9523
|
+
|
9524
|
+
class GeminiPreferenceExampleDict(TypedDict, total=False):
|
9525
|
+
"""Input example for preference optimization."""
|
9526
|
+
|
9527
|
+
completions: Optional[list[GeminiPreferenceExampleCompletionDict]]
|
9528
|
+
"""List of completions for a given prompt."""
|
9529
|
+
|
9530
|
+
contents: Optional[list[ContentDict]]
|
9531
|
+
"""Multi-turn contents that represents the Prompt."""
|
9532
|
+
|
9533
|
+
|
9534
|
+
GeminiPreferenceExampleOrDict = Union[
|
9535
|
+
GeminiPreferenceExample, GeminiPreferenceExampleDict
|
9536
|
+
]
|
9537
|
+
|
9538
|
+
|
9539
|
+
class PreferenceOptimizationDataStats(_common.BaseModel):
|
9540
|
+
"""Statistics computed for datasets used for preference optimization."""
|
9541
|
+
|
9542
|
+
score_variance_per_example_distribution: Optional[DatasetDistribution] = (
|
9543
|
+
Field(
|
9544
|
+
default=None,
|
9545
|
+
description="""Output only. Dataset distributions for scores variance per example.""",
|
9546
|
+
)
|
9547
|
+
)
|
9548
|
+
scores_distribution: Optional[DatasetDistribution] = Field(
|
9549
|
+
default=None,
|
9550
|
+
description="""Output only. Dataset distributions for scores.""",
|
9551
|
+
)
|
9552
|
+
total_billable_token_count: Optional[int] = Field(
|
9553
|
+
default=None,
|
9554
|
+
description="""Output only. Number of billable tokens in the tuning dataset.""",
|
9555
|
+
)
|
9556
|
+
tuning_dataset_example_count: Optional[int] = Field(
|
9557
|
+
default=None,
|
9558
|
+
description="""Output only. Number of examples in the tuning dataset.""",
|
9559
|
+
)
|
9560
|
+
tuning_step_count: Optional[int] = Field(
|
9561
|
+
default=None,
|
9562
|
+
description="""Output only. Number of tuning steps for this Tuning Job.""",
|
9563
|
+
)
|
9564
|
+
user_dataset_examples: Optional[list[GeminiPreferenceExample]] = Field(
|
9565
|
+
default=None,
|
9566
|
+
description="""Output only. Sample user examples in the training dataset.""",
|
9567
|
+
)
|
9568
|
+
user_input_token_distribution: Optional[DatasetDistribution] = Field(
|
9569
|
+
default=None,
|
9570
|
+
description="""Output only. Dataset distributions for the user input tokens.""",
|
9571
|
+
)
|
9572
|
+
user_output_token_distribution: Optional[DatasetDistribution] = Field(
|
9573
|
+
default=None,
|
9574
|
+
description="""Output only. Dataset distributions for the user output tokens.""",
|
9575
|
+
)
|
9576
|
+
|
9577
|
+
|
9578
|
+
class PreferenceOptimizationDataStatsDict(TypedDict, total=False):
|
9579
|
+
"""Statistics computed for datasets used for preference optimization."""
|
9580
|
+
|
9581
|
+
score_variance_per_example_distribution: Optional[DatasetDistributionDict]
|
9582
|
+
"""Output only. Dataset distributions for scores variance per example."""
|
9583
|
+
|
9584
|
+
scores_distribution: Optional[DatasetDistributionDict]
|
9585
|
+
"""Output only. Dataset distributions for scores."""
|
9586
|
+
|
9587
|
+
total_billable_token_count: Optional[int]
|
9588
|
+
"""Output only. Number of billable tokens in the tuning dataset."""
|
9589
|
+
|
9590
|
+
tuning_dataset_example_count: Optional[int]
|
9591
|
+
"""Output only. Number of examples in the tuning dataset."""
|
9592
|
+
|
9593
|
+
tuning_step_count: Optional[int]
|
9594
|
+
"""Output only. Number of tuning steps for this Tuning Job."""
|
9595
|
+
|
9596
|
+
user_dataset_examples: Optional[list[GeminiPreferenceExampleDict]]
|
9597
|
+
"""Output only. Sample user examples in the training dataset."""
|
9598
|
+
|
9599
|
+
user_input_token_distribution: Optional[DatasetDistributionDict]
|
9600
|
+
"""Output only. Dataset distributions for the user input tokens."""
|
9601
|
+
|
9602
|
+
user_output_token_distribution: Optional[DatasetDistributionDict]
|
9603
|
+
"""Output only. Dataset distributions for the user output tokens."""
|
9604
|
+
|
9605
|
+
|
9606
|
+
PreferenceOptimizationDataStatsOrDict = Union[
|
9607
|
+
PreferenceOptimizationDataStats, PreferenceOptimizationDataStatsDict
|
9608
|
+
]
|
9609
|
+
|
9610
|
+
|
8704
9611
|
class SupervisedTuningDatasetDistributionDatasetBucket(_common.BaseModel):
|
8705
9612
|
"""Dataset bucket used to create a histogram for the distribution given a population of values."""
|
8706
9613
|
|
@@ -8932,6 +9839,12 @@ class TuningDataStats(_common.BaseModel):
|
|
8932
9839
|
distillation_data_stats: Optional[DistillationDataStats] = Field(
|
8933
9840
|
default=None, description="""Output only. Statistics for distillation."""
|
8934
9841
|
)
|
9842
|
+
preference_optimization_data_stats: Optional[
|
9843
|
+
PreferenceOptimizationDataStats
|
9844
|
+
] = Field(
|
9845
|
+
default=None,
|
9846
|
+
description="""Output only. Statistics for preference optimization.""",
|
9847
|
+
)
|
8935
9848
|
supervised_tuning_data_stats: Optional[SupervisedTuningDataStats] = Field(
|
8936
9849
|
default=None, description="""The SFT Tuning data stats."""
|
8937
9850
|
)
|
@@ -8943,6 +9856,11 @@ class TuningDataStatsDict(TypedDict, total=False):
|
|
8943
9856
|
distillation_data_stats: Optional[DistillationDataStatsDict]
|
8944
9857
|
"""Output only. Statistics for distillation."""
|
8945
9858
|
|
9859
|
+
preference_optimization_data_stats: Optional[
|
9860
|
+
PreferenceOptimizationDataStatsDict
|
9861
|
+
]
|
9862
|
+
"""Output only. Statistics for preference optimization."""
|
9863
|
+
|
8946
9864
|
supervised_tuning_data_stats: Optional[SupervisedTuningDataStatsDict]
|
8947
9865
|
"""The SFT Tuning data stats."""
|
8948
9866
|
|
@@ -9004,101 +9922,6 @@ PartnerModelTuningSpecOrDict = Union[
|
|
9004
9922
|
]
|
9005
9923
|
|
9006
9924
|
|
9007
|
-
class DistillationHyperParameters(_common.BaseModel):
|
9008
|
-
"""Hyperparameters for Distillation."""
|
9009
|
-
|
9010
|
-
adapter_size: Optional[AdapterSize] = Field(
|
9011
|
-
default=None, description="""Optional. Adapter size for distillation."""
|
9012
|
-
)
|
9013
|
-
epoch_count: Optional[int] = Field(
|
9014
|
-
default=None,
|
9015
|
-
description="""Optional. Number of complete passes the model makes over the entire training dataset during training.""",
|
9016
|
-
)
|
9017
|
-
learning_rate_multiplier: Optional[float] = Field(
|
9018
|
-
default=None,
|
9019
|
-
description="""Optional. Multiplier for adjusting the default learning rate.""",
|
9020
|
-
)
|
9021
|
-
|
9022
|
-
|
9023
|
-
class DistillationHyperParametersDict(TypedDict, total=False):
|
9024
|
-
"""Hyperparameters for Distillation."""
|
9025
|
-
|
9026
|
-
adapter_size: Optional[AdapterSize]
|
9027
|
-
"""Optional. Adapter size for distillation."""
|
9028
|
-
|
9029
|
-
epoch_count: Optional[int]
|
9030
|
-
"""Optional. Number of complete passes the model makes over the entire training dataset during training."""
|
9031
|
-
|
9032
|
-
learning_rate_multiplier: Optional[float]
|
9033
|
-
"""Optional. Multiplier for adjusting the default learning rate."""
|
9034
|
-
|
9035
|
-
|
9036
|
-
DistillationHyperParametersOrDict = Union[
|
9037
|
-
DistillationHyperParameters, DistillationHyperParametersDict
|
9038
|
-
]
|
9039
|
-
|
9040
|
-
|
9041
|
-
class DistillationSpec(_common.BaseModel):
|
9042
|
-
"""Tuning Spec for Distillation."""
|
9043
|
-
|
9044
|
-
base_teacher_model: Optional[str] = Field(
|
9045
|
-
default=None,
|
9046
|
-
description="""The base teacher model that is being distilled. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/tuning#supported_models).""",
|
9047
|
-
)
|
9048
|
-
hyper_parameters: Optional[DistillationHyperParameters] = Field(
|
9049
|
-
default=None,
|
9050
|
-
description="""Optional. Hyperparameters for Distillation.""",
|
9051
|
-
)
|
9052
|
-
pipeline_root_directory: Optional[str] = Field(
|
9053
|
-
default=None,
|
9054
|
-
description="""Deprecated. A path in a Cloud Storage bucket, which will be treated as the root output directory of the distillation pipeline. It is used by the system to generate the paths of output artifacts.""",
|
9055
|
-
)
|
9056
|
-
student_model: Optional[str] = Field(
|
9057
|
-
default=None,
|
9058
|
-
description="""The student model that is being tuned, e.g., "google/gemma-2b-1.1-it". Deprecated. Use base_model instead.""",
|
9059
|
-
)
|
9060
|
-
training_dataset_uri: Optional[str] = Field(
|
9061
|
-
default=None,
|
9062
|
-
description="""Deprecated. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""",
|
9063
|
-
)
|
9064
|
-
tuned_teacher_model_source: Optional[str] = Field(
|
9065
|
-
default=None,
|
9066
|
-
description="""The resource name of the Tuned teacher model. Format: `projects/{project}/locations/{location}/models/{model}`.""",
|
9067
|
-
)
|
9068
|
-
validation_dataset_uri: Optional[str] = Field(
|
9069
|
-
default=None,
|
9070
|
-
description="""Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""",
|
9071
|
-
)
|
9072
|
-
|
9073
|
-
|
9074
|
-
class DistillationSpecDict(TypedDict, total=False):
|
9075
|
-
"""Tuning Spec for Distillation."""
|
9076
|
-
|
9077
|
-
base_teacher_model: Optional[str]
|
9078
|
-
"""The base teacher model that is being distilled. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/tuning#supported_models)."""
|
9079
|
-
|
9080
|
-
hyper_parameters: Optional[DistillationHyperParametersDict]
|
9081
|
-
"""Optional. Hyperparameters for Distillation."""
|
9082
|
-
|
9083
|
-
pipeline_root_directory: Optional[str]
|
9084
|
-
"""Deprecated. A path in a Cloud Storage bucket, which will be treated as the root output directory of the distillation pipeline. It is used by the system to generate the paths of output artifacts."""
|
9085
|
-
|
9086
|
-
student_model: Optional[str]
|
9087
|
-
"""The student model that is being tuned, e.g., "google/gemma-2b-1.1-it". Deprecated. Use base_model instead."""
|
9088
|
-
|
9089
|
-
training_dataset_uri: Optional[str]
|
9090
|
-
"""Deprecated. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file."""
|
9091
|
-
|
9092
|
-
tuned_teacher_model_source: Optional[str]
|
9093
|
-
"""The resource name of the Tuned teacher model. Format: `projects/{project}/locations/{location}/models/{model}`."""
|
9094
|
-
|
9095
|
-
validation_dataset_uri: Optional[str]
|
9096
|
-
"""Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file."""
|
9097
|
-
|
9098
|
-
|
9099
|
-
DistillationSpecOrDict = Union[DistillationSpec, DistillationSpecDict]
|
9100
|
-
|
9101
|
-
|
9102
9925
|
class TuningJob(_common.BaseModel):
|
9103
9926
|
"""A tuning job."""
|
9104
9927
|
|
@@ -9145,6 +9968,9 @@ class TuningJob(_common.BaseModel):
|
|
9145
9968
|
default=None,
|
9146
9969
|
description="""Output only. The tuned model resources associated with this TuningJob.""",
|
9147
9970
|
)
|
9971
|
+
pre_tuned_model: Optional[PreTunedModel] = Field(
|
9972
|
+
default=None, description="""The pre-tuned model for continuous tuning."""
|
9973
|
+
)
|
9148
9974
|
supervised_tuning_spec: Optional[SupervisedTuningSpec] = Field(
|
9149
9975
|
default=None, description="""Tuning Spec for Supervised Fine Tuning."""
|
9150
9976
|
)
|
@@ -9160,8 +9986,12 @@ class TuningJob(_common.BaseModel):
|
|
9160
9986
|
default=None,
|
9161
9987
|
description="""Tuning Spec for open sourced and third party Partner models.""",
|
9162
9988
|
)
|
9163
|
-
|
9164
|
-
default=None, description="""
|
9989
|
+
evaluation_config: Optional[EvaluationConfig] = Field(
|
9990
|
+
default=None, description=""""""
|
9991
|
+
)
|
9992
|
+
custom_base_model: Optional[str] = Field(
|
9993
|
+
default=None,
|
9994
|
+
description="""Optional. The user-provided path to custom model weights. Set this field to tune a custom model. The path must be a Cloud Storage directory that contains the model weights in .safetensors format along with associated model metadata files. If this field is set, the base_model field must still be set to indicate which base model the custom model is derived from. This feature is only available for open source models.""",
|
9165
9995
|
)
|
9166
9996
|
experiment: Optional[str] = Field(
|
9167
9997
|
default=None,
|
@@ -9171,16 +10001,14 @@ class TuningJob(_common.BaseModel):
|
|
9171
10001
|
default=None,
|
9172
10002
|
description="""Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.""",
|
9173
10003
|
)
|
10004
|
+
output_uri: Optional[str] = Field(
|
10005
|
+
default=None,
|
10006
|
+
description="""Optional. Cloud Storage path to the directory where tuning job outputs are written to. This field is only available and required for open source models.""",
|
10007
|
+
)
|
9174
10008
|
pipeline_job: Optional[str] = Field(
|
9175
10009
|
default=None,
|
9176
10010
|
description="""Output only. The resource name of the PipelineJob associated with the TuningJob. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`.""",
|
9177
10011
|
)
|
9178
|
-
satisfies_pzi: Optional[bool] = Field(
|
9179
|
-
default=None, description="""Output only. Reserved for future use."""
|
9180
|
-
)
|
9181
|
-
satisfies_pzs: Optional[bool] = Field(
|
9182
|
-
default=None, description="""Output only. Reserved for future use."""
|
9183
|
-
)
|
9184
10012
|
service_account: Optional[str] = Field(
|
9185
10013
|
default=None,
|
9186
10014
|
description="""The service account that the tuningJob workload runs as. If not specified, the Vertex AI Secure Fine-Tuned Service Agent in the project will be used. See https://cloud.google.com/iam/docs/service-agents#vertex-ai-secure-fine-tuning-service-agent Users starting the pipeline must have the `iam.serviceAccounts.actAs` permission on this service account.""",
|
@@ -9237,6 +10065,9 @@ class TuningJobDict(TypedDict, total=False):
|
|
9237
10065
|
tuned_model: Optional[TunedModelDict]
|
9238
10066
|
"""Output only. The tuned model resources associated with this TuningJob."""
|
9239
10067
|
|
10068
|
+
pre_tuned_model: Optional[PreTunedModelDict]
|
10069
|
+
"""The pre-tuned model for continuous tuning."""
|
10070
|
+
|
9240
10071
|
supervised_tuning_spec: Optional[SupervisedTuningSpecDict]
|
9241
10072
|
"""Tuning Spec for Supervised Fine Tuning."""
|
9242
10073
|
|
@@ -9249,8 +10080,11 @@ class TuningJobDict(TypedDict, total=False):
|
|
9249
10080
|
partner_model_tuning_spec: Optional[PartnerModelTuningSpecDict]
|
9250
10081
|
"""Tuning Spec for open sourced and third party Partner models."""
|
9251
10082
|
|
9252
|
-
|
9253
|
-
"""
|
10083
|
+
evaluation_config: Optional[EvaluationConfigDict]
|
10084
|
+
""""""
|
10085
|
+
|
10086
|
+
custom_base_model: Optional[str]
|
10087
|
+
"""Optional. The user-provided path to custom model weights. Set this field to tune a custom model. The path must be a Cloud Storage directory that contains the model weights in .safetensors format along with associated model metadata files. If this field is set, the base_model field must still be set to indicate which base model the custom model is derived from. This feature is only available for open source models."""
|
9254
10088
|
|
9255
10089
|
experiment: Optional[str]
|
9256
10090
|
"""Output only. The Experiment associated with this TuningJob."""
|
@@ -9258,15 +10092,12 @@ class TuningJobDict(TypedDict, total=False):
|
|
9258
10092
|
labels: Optional[dict[str, str]]
|
9259
10093
|
"""Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels."""
|
9260
10094
|
|
10095
|
+
output_uri: Optional[str]
|
10096
|
+
"""Optional. Cloud Storage path to the directory where tuning job outputs are written to. This field is only available and required for open source models."""
|
10097
|
+
|
9261
10098
|
pipeline_job: Optional[str]
|
9262
10099
|
"""Output only. The resource name of the PipelineJob associated with the TuningJob. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`."""
|
9263
10100
|
|
9264
|
-
satisfies_pzi: Optional[bool]
|
9265
|
-
"""Output only. Reserved for future use."""
|
9266
|
-
|
9267
|
-
satisfies_pzs: Optional[bool]
|
9268
|
-
"""Output only. Reserved for future use."""
|
9269
|
-
|
9270
10101
|
service_account: Optional[str]
|
9271
10102
|
"""The service account that the tuningJob workload runs as. If not specified, the Vertex AI Secure Fine-Tuned Service Agent in the project will be used. See https://cloud.google.com/iam/docs/service-agents#vertex-ai-secure-fine-tuning-service-agent Users starting the pipeline must have the `iam.serviceAccounts.actAs` permission on this service account."""
|
9272
10103
|
|
@@ -9472,6 +10303,10 @@ class CreateTuningJobConfig(_common.BaseModel):
|
|
9472
10303
|
default=None,
|
9473
10304
|
description="""If set to true, disable intermediate checkpoints for SFT and only the last checkpoint will be exported. Otherwise, enable intermediate checkpoints for SFT.""",
|
9474
10305
|
)
|
10306
|
+
pre_tuned_model_checkpoint_id: Optional[str] = Field(
|
10307
|
+
default=None,
|
10308
|
+
description="""The optional checkpoint id of the pre-tuned model to use for tuning, if applicable.""",
|
10309
|
+
)
|
9475
10310
|
adapter_size: Optional[AdapterSize] = Field(
|
9476
10311
|
default=None, description="""Adapter size for tuning."""
|
9477
10312
|
)
|
@@ -9483,6 +10318,9 @@ class CreateTuningJobConfig(_common.BaseModel):
|
|
9483
10318
|
default=None,
|
9484
10319
|
description="""The learning rate hyperparameter for tuning. If not set, a default of 0.001 or 0.0002 will be calculated based on the number of training examples.""",
|
9485
10320
|
)
|
10321
|
+
evaluation_config: Optional[EvaluationConfig] = Field(
|
10322
|
+
default=None, description="""Evaluation config for the tuning job."""
|
10323
|
+
)
|
9486
10324
|
|
9487
10325
|
|
9488
10326
|
class CreateTuningJobConfigDict(TypedDict, total=False):
|
@@ -9509,6 +10347,9 @@ class CreateTuningJobConfigDict(TypedDict, total=False):
|
|
9509
10347
|
export_last_checkpoint_only: Optional[bool]
|
9510
10348
|
"""If set to true, disable intermediate checkpoints for SFT and only the last checkpoint will be exported. Otherwise, enable intermediate checkpoints for SFT."""
|
9511
10349
|
|
10350
|
+
pre_tuned_model_checkpoint_id: Optional[str]
|
10351
|
+
"""The optional checkpoint id of the pre-tuned model to use for tuning, if applicable."""
|
10352
|
+
|
9512
10353
|
adapter_size: Optional[AdapterSize]
|
9513
10354
|
"""Adapter size for tuning."""
|
9514
10355
|
|
@@ -9518,18 +10359,24 @@ class CreateTuningJobConfigDict(TypedDict, total=False):
|
|
9518
10359
|
learning_rate: Optional[float]
|
9519
10360
|
"""The learning rate hyperparameter for tuning. If not set, a default of 0.001 or 0.0002 will be calculated based on the number of training examples."""
|
9520
10361
|
|
10362
|
+
evaluation_config: Optional[EvaluationConfigDict]
|
10363
|
+
"""Evaluation config for the tuning job."""
|
10364
|
+
|
9521
10365
|
|
9522
10366
|
CreateTuningJobConfigOrDict = Union[
|
9523
10367
|
CreateTuningJobConfig, CreateTuningJobConfigDict
|
9524
10368
|
]
|
9525
10369
|
|
9526
10370
|
|
9527
|
-
class
|
10371
|
+
class _CreateTuningJobParametersPrivate(_common.BaseModel):
|
9528
10372
|
"""Supervised fine-tuning job creation parameters - optional fields."""
|
9529
10373
|
|
9530
10374
|
base_model: Optional[str] = Field(
|
9531
10375
|
default=None,
|
9532
|
-
description="""The base model that is being tuned, e.g., "gemini-
|
10376
|
+
description="""The base model that is being tuned, e.g., "gemini-2.5-flash".""",
|
10377
|
+
)
|
10378
|
+
pre_tuned_model: Optional[PreTunedModel] = Field(
|
10379
|
+
default=None, description="""The PreTunedModel that is being tuned."""
|
9533
10380
|
)
|
9534
10381
|
training_dataset: Optional[TuningDataset] = Field(
|
9535
10382
|
default=None,
|
@@ -9540,11 +10387,14 @@ class _CreateTuningJobParameters(_common.BaseModel):
|
|
9540
10387
|
)
|
9541
10388
|
|
9542
10389
|
|
9543
|
-
class
|
10390
|
+
class _CreateTuningJobParametersPrivateDict(TypedDict, total=False):
|
9544
10391
|
"""Supervised fine-tuning job creation parameters - optional fields."""
|
9545
10392
|
|
9546
10393
|
base_model: Optional[str]
|
9547
|
-
"""The base model that is being tuned, e.g., "gemini-
|
10394
|
+
"""The base model that is being tuned, e.g., "gemini-2.5-flash"."""
|
10395
|
+
|
10396
|
+
pre_tuned_model: Optional[PreTunedModelDict]
|
10397
|
+
"""The PreTunedModel that is being tuned."""
|
9548
10398
|
|
9549
10399
|
training_dataset: Optional[TuningDatasetDict]
|
9550
10400
|
"""Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file."""
|
@@ -9553,8 +10403,8 @@ class _CreateTuningJobParametersDict(TypedDict, total=False):
|
|
9553
10403
|
"""Configuration for the tuning job."""
|
9554
10404
|
|
9555
10405
|
|
9556
|
-
|
9557
|
-
|
10406
|
+
_CreateTuningJobParametersPrivateOrDict = Union[
|
10407
|
+
_CreateTuningJobParametersPrivate, _CreateTuningJobParametersPrivateDict
|
9558
10408
|
]
|
9559
10409
|
|
9560
10410
|
|
@@ -12342,95 +13192,6 @@ class LiveServerMessageDict(TypedDict, total=False):
|
|
12342
13192
|
LiveServerMessageOrDict = Union[LiveServerMessage, LiveServerMessageDict]
|
12343
13193
|
|
12344
13194
|
|
12345
|
-
class AutomaticActivityDetection(_common.BaseModel):
|
12346
|
-
"""Configures automatic detection of activity."""
|
12347
|
-
|
12348
|
-
disabled: Optional[bool] = Field(
|
12349
|
-
default=None,
|
12350
|
-
description="""If enabled, detected voice and text input count as activity. If disabled, the client must send activity signals.""",
|
12351
|
-
)
|
12352
|
-
start_of_speech_sensitivity: Optional[StartSensitivity] = Field(
|
12353
|
-
default=None,
|
12354
|
-
description="""Determines how likely speech is to be detected.""",
|
12355
|
-
)
|
12356
|
-
end_of_speech_sensitivity: Optional[EndSensitivity] = Field(
|
12357
|
-
default=None,
|
12358
|
-
description="""Determines how likely detected speech is ended.""",
|
12359
|
-
)
|
12360
|
-
prefix_padding_ms: Optional[int] = Field(
|
12361
|
-
default=None,
|
12362
|
-
description="""The required duration of detected speech before start-of-speech is committed. The lower this value the more sensitive the start-of-speech detection is and the shorter speech can be recognized. However, this also increases the probability of false positives.""",
|
12363
|
-
)
|
12364
|
-
silence_duration_ms: Optional[int] = Field(
|
12365
|
-
default=None,
|
12366
|
-
description="""The required duration of detected non-speech (e.g. silence) before end-of-speech is committed. The larger this value, the longer speech gaps can be without interrupting the user's activity but this will increase the model's latency.""",
|
12367
|
-
)
|
12368
|
-
|
12369
|
-
|
12370
|
-
class AutomaticActivityDetectionDict(TypedDict, total=False):
|
12371
|
-
"""Configures automatic detection of activity."""
|
12372
|
-
|
12373
|
-
disabled: Optional[bool]
|
12374
|
-
"""If enabled, detected voice and text input count as activity. If disabled, the client must send activity signals."""
|
12375
|
-
|
12376
|
-
start_of_speech_sensitivity: Optional[StartSensitivity]
|
12377
|
-
"""Determines how likely speech is to be detected."""
|
12378
|
-
|
12379
|
-
end_of_speech_sensitivity: Optional[EndSensitivity]
|
12380
|
-
"""Determines how likely detected speech is ended."""
|
12381
|
-
|
12382
|
-
prefix_padding_ms: Optional[int]
|
12383
|
-
"""The required duration of detected speech before start-of-speech is committed. The lower this value the more sensitive the start-of-speech detection is and the shorter speech can be recognized. However, this also increases the probability of false positives."""
|
12384
|
-
|
12385
|
-
silence_duration_ms: Optional[int]
|
12386
|
-
"""The required duration of detected non-speech (e.g. silence) before end-of-speech is committed. The larger this value, the longer speech gaps can be without interrupting the user's activity but this will increase the model's latency."""
|
12387
|
-
|
12388
|
-
|
12389
|
-
AutomaticActivityDetectionOrDict = Union[
|
12390
|
-
AutomaticActivityDetection, AutomaticActivityDetectionDict
|
12391
|
-
]
|
12392
|
-
|
12393
|
-
|
12394
|
-
class RealtimeInputConfig(_common.BaseModel):
|
12395
|
-
"""Marks the end of user activity.
|
12396
|
-
|
12397
|
-
This can only be sent if automatic (i.e. server-side) activity detection is
|
12398
|
-
disabled.
|
12399
|
-
"""
|
12400
|
-
|
12401
|
-
automatic_activity_detection: Optional[AutomaticActivityDetection] = Field(
|
12402
|
-
default=None,
|
12403
|
-
description="""If not set, automatic activity detection is enabled by default. If automatic voice detection is disabled, the client must send activity signals.""",
|
12404
|
-
)
|
12405
|
-
activity_handling: Optional[ActivityHandling] = Field(
|
12406
|
-
default=None, description="""Defines what effect activity has."""
|
12407
|
-
)
|
12408
|
-
turn_coverage: Optional[TurnCoverage] = Field(
|
12409
|
-
default=None,
|
12410
|
-
description="""Defines which input is included in the user's turn.""",
|
12411
|
-
)
|
12412
|
-
|
12413
|
-
|
12414
|
-
class RealtimeInputConfigDict(TypedDict, total=False):
|
12415
|
-
"""Marks the end of user activity.
|
12416
|
-
|
12417
|
-
This can only be sent if automatic (i.e. server-side) activity detection is
|
12418
|
-
disabled.
|
12419
|
-
"""
|
12420
|
-
|
12421
|
-
automatic_activity_detection: Optional[AutomaticActivityDetectionDict]
|
12422
|
-
"""If not set, automatic activity detection is enabled by default. If automatic voice detection is disabled, the client must send activity signals."""
|
12423
|
-
|
12424
|
-
activity_handling: Optional[ActivityHandling]
|
12425
|
-
"""Defines what effect activity has."""
|
12426
|
-
|
12427
|
-
turn_coverage: Optional[TurnCoverage]
|
12428
|
-
"""Defines which input is included in the user's turn."""
|
12429
|
-
|
12430
|
-
|
12431
|
-
RealtimeInputConfigOrDict = Union[RealtimeInputConfig, RealtimeInputConfigDict]
|
12432
|
-
|
12433
|
-
|
12434
13195
|
class SessionResumptionConfig(_common.BaseModel):
|
12435
13196
|
"""Configuration of session resumption mechanism.
|
12436
13197
|
|
@@ -12557,16 +13318,105 @@ class ProactivityConfig(_common.BaseModel):
|
|
12557
13318
|
)
|
12558
13319
|
|
12559
13320
|
|
12560
|
-
class ProactivityConfigDict(TypedDict, total=False):
|
12561
|
-
"""Config for proactivity features."""
|
13321
|
+
class ProactivityConfigDict(TypedDict, total=False):
|
13322
|
+
"""Config for proactivity features."""
|
13323
|
+
|
13324
|
+
proactive_audio: Optional[bool]
|
13325
|
+
"""If enabled, the model can reject responding to the last prompt. For
|
13326
|
+
example, this allows the model to ignore out of context speech or to stay
|
13327
|
+
silent if the user did not make a request, yet."""
|
13328
|
+
|
13329
|
+
|
13330
|
+
ProactivityConfigOrDict = Union[ProactivityConfig, ProactivityConfigDict]
|
13331
|
+
|
13332
|
+
|
13333
|
+
class AutomaticActivityDetection(_common.BaseModel):
|
13334
|
+
"""Configures automatic detection of activity."""
|
13335
|
+
|
13336
|
+
disabled: Optional[bool] = Field(
|
13337
|
+
default=None,
|
13338
|
+
description="""If enabled, detected voice and text input count as activity. If disabled, the client must send activity signals.""",
|
13339
|
+
)
|
13340
|
+
start_of_speech_sensitivity: Optional[StartSensitivity] = Field(
|
13341
|
+
default=None,
|
13342
|
+
description="""Determines how likely speech is to be detected.""",
|
13343
|
+
)
|
13344
|
+
end_of_speech_sensitivity: Optional[EndSensitivity] = Field(
|
13345
|
+
default=None,
|
13346
|
+
description="""Determines how likely detected speech is ended.""",
|
13347
|
+
)
|
13348
|
+
prefix_padding_ms: Optional[int] = Field(
|
13349
|
+
default=None,
|
13350
|
+
description="""The required duration of detected speech before start-of-speech is committed. The lower this value the more sensitive the start-of-speech detection is and the shorter speech can be recognized. However, this also increases the probability of false positives.""",
|
13351
|
+
)
|
13352
|
+
silence_duration_ms: Optional[int] = Field(
|
13353
|
+
default=None,
|
13354
|
+
description="""The required duration of detected non-speech (e.g. silence) before end-of-speech is committed. The larger this value, the longer speech gaps can be without interrupting the user's activity but this will increase the model's latency.""",
|
13355
|
+
)
|
13356
|
+
|
13357
|
+
|
13358
|
+
class AutomaticActivityDetectionDict(TypedDict, total=False):
|
13359
|
+
"""Configures automatic detection of activity."""
|
13360
|
+
|
13361
|
+
disabled: Optional[bool]
|
13362
|
+
"""If enabled, detected voice and text input count as activity. If disabled, the client must send activity signals."""
|
13363
|
+
|
13364
|
+
start_of_speech_sensitivity: Optional[StartSensitivity]
|
13365
|
+
"""Determines how likely speech is to be detected."""
|
13366
|
+
|
13367
|
+
end_of_speech_sensitivity: Optional[EndSensitivity]
|
13368
|
+
"""Determines how likely detected speech is ended."""
|
13369
|
+
|
13370
|
+
prefix_padding_ms: Optional[int]
|
13371
|
+
"""The required duration of detected speech before start-of-speech is committed. The lower this value the more sensitive the start-of-speech detection is and the shorter speech can be recognized. However, this also increases the probability of false positives."""
|
13372
|
+
|
13373
|
+
silence_duration_ms: Optional[int]
|
13374
|
+
"""The required duration of detected non-speech (e.g. silence) before end-of-speech is committed. The larger this value, the longer speech gaps can be without interrupting the user's activity but this will increase the model's latency."""
|
13375
|
+
|
13376
|
+
|
13377
|
+
AutomaticActivityDetectionOrDict = Union[
|
13378
|
+
AutomaticActivityDetection, AutomaticActivityDetectionDict
|
13379
|
+
]
|
13380
|
+
|
13381
|
+
|
13382
|
+
class RealtimeInputConfig(_common.BaseModel):
|
13383
|
+
"""Marks the end of user activity.
|
13384
|
+
|
13385
|
+
This can only be sent if automatic (i.e. server-side) activity detection is
|
13386
|
+
disabled.
|
13387
|
+
"""
|
13388
|
+
|
13389
|
+
automatic_activity_detection: Optional[AutomaticActivityDetection] = Field(
|
13390
|
+
default=None,
|
13391
|
+
description="""If not set, automatic activity detection is enabled by default. If automatic voice detection is disabled, the client must send activity signals.""",
|
13392
|
+
)
|
13393
|
+
activity_handling: Optional[ActivityHandling] = Field(
|
13394
|
+
default=None, description="""Defines what effect activity has."""
|
13395
|
+
)
|
13396
|
+
turn_coverage: Optional[TurnCoverage] = Field(
|
13397
|
+
default=None,
|
13398
|
+
description="""Defines which input is included in the user's turn.""",
|
13399
|
+
)
|
13400
|
+
|
13401
|
+
|
13402
|
+
class RealtimeInputConfigDict(TypedDict, total=False):
|
13403
|
+
"""Marks the end of user activity.
|
13404
|
+
|
13405
|
+
This can only be sent if automatic (i.e. server-side) activity detection is
|
13406
|
+
disabled.
|
13407
|
+
"""
|
13408
|
+
|
13409
|
+
automatic_activity_detection: Optional[AutomaticActivityDetectionDict]
|
13410
|
+
"""If not set, automatic activity detection is enabled by default. If automatic voice detection is disabled, the client must send activity signals."""
|
13411
|
+
|
13412
|
+
activity_handling: Optional[ActivityHandling]
|
13413
|
+
"""Defines what effect activity has."""
|
12562
13414
|
|
12563
|
-
|
12564
|
-
"""
|
12565
|
-
example, this allows the model to ignore out of context speech or to stay
|
12566
|
-
silent if the user did not make a request, yet."""
|
13415
|
+
turn_coverage: Optional[TurnCoverage]
|
13416
|
+
"""Defines which input is included in the user's turn."""
|
12567
13417
|
|
12568
13418
|
|
12569
|
-
|
13419
|
+
RealtimeInputConfigOrDict = Union[RealtimeInputConfig, RealtimeInputConfigDict]
|
12570
13420
|
|
12571
13421
|
|
12572
13422
|
class LiveClientSetup(_common.BaseModel):
|
@@ -12881,6 +13731,44 @@ LiveClientRealtimeInputOrDict = Union[
|
|
12881
13731
|
]
|
12882
13732
|
|
12883
13733
|
|
13734
|
+
class LiveClientToolResponse(_common.BaseModel):
|
13735
|
+
"""Client generated response to a `ToolCall` received from the server.
|
13736
|
+
|
13737
|
+
Individual `FunctionResponse` objects are matched to the respective
|
13738
|
+
`FunctionCall` objects by the `id` field.
|
13739
|
+
|
13740
|
+
Note that in the unary and server-streaming GenerateContent APIs function
|
13741
|
+
calling happens by exchanging the `Content` parts, while in the bidi
|
13742
|
+
GenerateContent APIs function calling happens over this dedicated set of
|
13743
|
+
messages.
|
13744
|
+
"""
|
13745
|
+
|
13746
|
+
function_responses: Optional[list[FunctionResponse]] = Field(
|
13747
|
+
default=None, description="""The response to the function calls."""
|
13748
|
+
)
|
13749
|
+
|
13750
|
+
|
13751
|
+
class LiveClientToolResponseDict(TypedDict, total=False):
|
13752
|
+
"""Client generated response to a `ToolCall` received from the server.
|
13753
|
+
|
13754
|
+
Individual `FunctionResponse` objects are matched to the respective
|
13755
|
+
`FunctionCall` objects by the `id` field.
|
13756
|
+
|
13757
|
+
Note that in the unary and server-streaming GenerateContent APIs function
|
13758
|
+
calling happens by exchanging the `Content` parts, while in the bidi
|
13759
|
+
GenerateContent APIs function calling happens over this dedicated set of
|
13760
|
+
messages.
|
13761
|
+
"""
|
13762
|
+
|
13763
|
+
function_responses: Optional[list[FunctionResponseDict]]
|
13764
|
+
"""The response to the function calls."""
|
13765
|
+
|
13766
|
+
|
13767
|
+
LiveClientToolResponseOrDict = Union[
|
13768
|
+
LiveClientToolResponse, LiveClientToolResponseDict
|
13769
|
+
]
|
13770
|
+
|
13771
|
+
|
12884
13772
|
if _is_pillow_image_imported:
|
12885
13773
|
BlobImageUnion = Union[PIL_Image, Blob]
|
12886
13774
|
else:
|
@@ -12966,44 +13854,6 @@ LiveSendRealtimeInputParametersOrDict = Union[
|
|
12966
13854
|
]
|
12967
13855
|
|
12968
13856
|
|
12969
|
-
class LiveClientToolResponse(_common.BaseModel):
|
12970
|
-
"""Client generated response to a `ToolCall` received from the server.
|
12971
|
-
|
12972
|
-
Individual `FunctionResponse` objects are matched to the respective
|
12973
|
-
`FunctionCall` objects by the `id` field.
|
12974
|
-
|
12975
|
-
Note that in the unary and server-streaming GenerateContent APIs function
|
12976
|
-
calling happens by exchanging the `Content` parts, while in the bidi
|
12977
|
-
GenerateContent APIs function calling happens over this dedicated set of
|
12978
|
-
messages.
|
12979
|
-
"""
|
12980
|
-
|
12981
|
-
function_responses: Optional[list[FunctionResponse]] = Field(
|
12982
|
-
default=None, description="""The response to the function calls."""
|
12983
|
-
)
|
12984
|
-
|
12985
|
-
|
12986
|
-
class LiveClientToolResponseDict(TypedDict, total=False):
|
12987
|
-
"""Client generated response to a `ToolCall` received from the server.
|
12988
|
-
|
12989
|
-
Individual `FunctionResponse` objects are matched to the respective
|
12990
|
-
`FunctionCall` objects by the `id` field.
|
12991
|
-
|
12992
|
-
Note that in the unary and server-streaming GenerateContent APIs function
|
12993
|
-
calling happens by exchanging the `Content` parts, while in the bidi
|
12994
|
-
GenerateContent APIs function calling happens over this dedicated set of
|
12995
|
-
messages.
|
12996
|
-
"""
|
12997
|
-
|
12998
|
-
function_responses: Optional[list[FunctionResponseDict]]
|
12999
|
-
"""The response to the function calls."""
|
13000
|
-
|
13001
|
-
|
13002
|
-
LiveClientToolResponseOrDict = Union[
|
13003
|
-
LiveClientToolResponse, LiveClientToolResponseDict
|
13004
|
-
]
|
13005
|
-
|
13006
|
-
|
13007
13857
|
class LiveClientMessage(_common.BaseModel):
|
13008
13858
|
"""Messages sent by the client in the API call."""
|
13009
13859
|
|
@@ -13885,3 +14735,250 @@ class CreateAuthTokenParametersDict(TypedDict, total=False):
|
|
13885
14735
|
CreateAuthTokenParametersOrDict = Union[
|
13886
14736
|
CreateAuthTokenParameters, CreateAuthTokenParametersDict
|
13887
14737
|
]
|
14738
|
+
|
14739
|
+
|
14740
|
+
class CreateTuningJobParameters(_common.BaseModel):
|
14741
|
+
"""Supervised fine-tuning job creation parameters - optional fields."""
|
14742
|
+
|
14743
|
+
base_model: Optional[str] = Field(
|
14744
|
+
default=None,
|
14745
|
+
description="""The base model that is being tuned, e.g., "gemini-2.5-flash".""",
|
14746
|
+
)
|
14747
|
+
training_dataset: Optional[TuningDataset] = Field(
|
14748
|
+
default=None,
|
14749
|
+
description="""Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""",
|
14750
|
+
)
|
14751
|
+
config: Optional[CreateTuningJobConfig] = Field(
|
14752
|
+
default=None, description="""Configuration for the tuning job."""
|
14753
|
+
)
|
14754
|
+
|
14755
|
+
|
14756
|
+
class CreateTuningJobParametersDict(TypedDict, total=False):
|
14757
|
+
"""Supervised fine-tuning job creation parameters - optional fields."""
|
14758
|
+
|
14759
|
+
base_model: Optional[str]
|
14760
|
+
"""The base model that is being tuned, e.g., "gemini-2.5-flash"."""
|
14761
|
+
|
14762
|
+
training_dataset: Optional[TuningDatasetDict]
|
14763
|
+
"""Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file."""
|
14764
|
+
|
14765
|
+
config: Optional[CreateTuningJobConfigDict]
|
14766
|
+
"""Configuration for the tuning job."""
|
14767
|
+
|
14768
|
+
|
14769
|
+
CreateTuningJobParametersOrDict = Union[
|
14770
|
+
CreateTuningJobParameters, CreateTuningJobParametersDict
|
14771
|
+
]
|
14772
|
+
|
14773
|
+
|
14774
|
+
class UserContent(Content):
|
14775
|
+
"""UserContent facilitates the creation of a Content object with a user role.
|
14776
|
+
|
14777
|
+
Example usages:
|
14778
|
+
|
14779
|
+
|
14780
|
+
- Create a user Content object with a string:
|
14781
|
+
user_content = UserContent("Why is the sky blue?")
|
14782
|
+
- Create a user Content object with a file data Part object:
|
14783
|
+
user_content = UserContent(Part.from_uri(file_uril="gs://bucket/file.txt",
|
14784
|
+
mime_type="text/plain"))
|
14785
|
+
- Create a user Content object with byte data Part object:
|
14786
|
+
user_content = UserContent(Part.from_bytes(data=b"Hello, World!",
|
14787
|
+
mime_type="text/plain"))
|
14788
|
+
|
14789
|
+
You can create a user Content object using other classmethods in the Part
|
14790
|
+
class as well.
|
14791
|
+
You can also create a user Content using a list of Part objects or strings.
|
14792
|
+
"""
|
14793
|
+
|
14794
|
+
role: Literal['user'] = Field(default='user', init=False, frozen=True)
|
14795
|
+
parts: list[Part] = Field()
|
14796
|
+
|
14797
|
+
def __init__(
|
14798
|
+
self, parts: Union['PartUnionDict', list['PartUnionDict'], list['Part']]
|
14799
|
+
):
|
14800
|
+
from . import _transformers as t
|
14801
|
+
|
14802
|
+
super().__init__(parts=t.t_parts(parts=parts))
|
14803
|
+
|
14804
|
+
|
14805
|
+
class ModelContent(Content):
|
14806
|
+
"""ModelContent facilitates the creation of a Content object with a model role.
|
14807
|
+
|
14808
|
+
Example usages:
|
14809
|
+
|
14810
|
+
- Create a model Content object with a string:
|
14811
|
+
model_content = ModelContent("Why is the sky blue?")
|
14812
|
+
- Create a model Content object with a file data Part object:
|
14813
|
+
model_content = ModelContent(Part.from_uri(file_uril="gs://bucket/file.txt",
|
14814
|
+
mime_type="text/plain"))
|
14815
|
+
- Create a model Content object with byte data Part object:
|
14816
|
+
model_content = ModelContent(Part.from_bytes(data=b"Hello, World!",
|
14817
|
+
mime_type="text/plain"))
|
14818
|
+
|
14819
|
+
You can create a model Content object using other classmethods in the Part
|
14820
|
+
class as well.
|
14821
|
+
You can also create a model Content using a list of Part objects or strings.
|
14822
|
+
"""
|
14823
|
+
|
14824
|
+
role: Literal['model'] = Field(default='model', init=False, frozen=True)
|
14825
|
+
parts: list[Part] = Field()
|
14826
|
+
|
14827
|
+
def __init__(
|
14828
|
+
self, parts: Union['PartUnionDict', list['PartUnionDict'], list['Part']]
|
14829
|
+
):
|
14830
|
+
from . import _transformers as t
|
14831
|
+
|
14832
|
+
super().__init__(parts=t.t_parts(parts=parts))
|
14833
|
+
|
14834
|
+
|
14835
|
+
class CustomOutputFormatConfig(_common.BaseModel):
|
14836
|
+
"""Config for custom output format."""
|
14837
|
+
|
14838
|
+
return_raw_output: Optional[bool] = Field(
|
14839
|
+
default=None, description="""Optional. Whether to return raw output."""
|
14840
|
+
)
|
14841
|
+
|
14842
|
+
|
14843
|
+
class CustomOutputFormatConfigDict(TypedDict, total=False):
|
14844
|
+
"""Config for custom output format."""
|
14845
|
+
|
14846
|
+
return_raw_output: Optional[bool]
|
14847
|
+
"""Optional. Whether to return raw output."""
|
14848
|
+
|
14849
|
+
|
14850
|
+
CustomOutputFormatConfigOrDict = Union[
|
14851
|
+
CustomOutputFormatConfig, CustomOutputFormatConfigDict
|
14852
|
+
]
|
14853
|
+
|
14854
|
+
|
14855
|
+
class BleuSpec(_common.BaseModel):
|
14856
|
+
"""Spec for bleu metric."""
|
14857
|
+
|
14858
|
+
use_effective_order: Optional[bool] = Field(
|
14859
|
+
default=None,
|
14860
|
+
description="""Optional. Whether to use_effective_order to compute bleu score.""",
|
14861
|
+
)
|
14862
|
+
|
14863
|
+
|
14864
|
+
class BleuSpecDict(TypedDict, total=False):
|
14865
|
+
"""Spec for bleu metric."""
|
14866
|
+
|
14867
|
+
use_effective_order: Optional[bool]
|
14868
|
+
"""Optional. Whether to use_effective_order to compute bleu score."""
|
14869
|
+
|
14870
|
+
|
14871
|
+
BleuSpecOrDict = Union[BleuSpec, BleuSpecDict]
|
14872
|
+
|
14873
|
+
|
14874
|
+
class PairwiseMetricSpec(_common.BaseModel):
|
14875
|
+
"""Spec for pairwise metric."""
|
14876
|
+
|
14877
|
+
metric_prompt_template: Optional[str] = Field(
|
14878
|
+
default=None,
|
14879
|
+
description="""Required. Metric prompt template for pairwise metric.""",
|
14880
|
+
)
|
14881
|
+
baseline_response_field_name: Optional[str] = Field(
|
14882
|
+
default=None,
|
14883
|
+
description="""Optional. The field name of the baseline response.""",
|
14884
|
+
)
|
14885
|
+
candidate_response_field_name: Optional[str] = Field(
|
14886
|
+
default=None,
|
14887
|
+
description="""Optional. The field name of the candidate response.""",
|
14888
|
+
)
|
14889
|
+
custom_output_format_config: Optional[CustomOutputFormatConfig] = Field(
|
14890
|
+
default=None,
|
14891
|
+
description="""Optional. CustomOutputFormatConfig allows customization of metric output. When this config is set, the default output is replaced with the raw output string. If a custom format is chosen, the `pairwise_choice` and `explanation` fields in the corresponding metric result will be empty.""",
|
14892
|
+
)
|
14893
|
+
system_instruction: Optional[str] = Field(
|
14894
|
+
default=None,
|
14895
|
+
description="""Optional. System instructions for pairwise metric.""",
|
14896
|
+
)
|
14897
|
+
|
14898
|
+
|
14899
|
+
class PairwiseMetricSpecDict(TypedDict, total=False):
|
14900
|
+
"""Spec for pairwise metric."""
|
14901
|
+
|
14902
|
+
metric_prompt_template: Optional[str]
|
14903
|
+
"""Required. Metric prompt template for pairwise metric."""
|
14904
|
+
|
14905
|
+
baseline_response_field_name: Optional[str]
|
14906
|
+
"""Optional. The field name of the baseline response."""
|
14907
|
+
|
14908
|
+
candidate_response_field_name: Optional[str]
|
14909
|
+
"""Optional. The field name of the candidate response."""
|
14910
|
+
|
14911
|
+
custom_output_format_config: Optional[CustomOutputFormatConfigDict]
|
14912
|
+
"""Optional. CustomOutputFormatConfig allows customization of metric output. When this config is set, the default output is replaced with the raw output string. If a custom format is chosen, the `pairwise_choice` and `explanation` fields in the corresponding metric result will be empty."""
|
14913
|
+
|
14914
|
+
system_instruction: Optional[str]
|
14915
|
+
"""Optional. System instructions for pairwise metric."""
|
14916
|
+
|
14917
|
+
|
14918
|
+
PairwiseMetricSpecOrDict = Union[PairwiseMetricSpec, PairwiseMetricSpecDict]
|
14919
|
+
|
14920
|
+
|
14921
|
+
class PointwiseMetricSpec(_common.BaseModel):
|
14922
|
+
"""Spec for pointwise metric."""
|
14923
|
+
|
14924
|
+
metric_prompt_template: Optional[str] = Field(
|
14925
|
+
default=None,
|
14926
|
+
description="""Required. Metric prompt template for pointwise metric.""",
|
14927
|
+
)
|
14928
|
+
custom_output_format_config: Optional[CustomOutputFormatConfig] = Field(
|
14929
|
+
default=None,
|
14930
|
+
description="""Optional. CustomOutputFormatConfig allows customization of metric output. By default, metrics return a score and explanation. When this config is set, the default output is replaced with either: - The raw output string. - A parsed output based on a user-defined schema. If a custom format is chosen, the `score` and `explanation` fields in the corresponding metric result will be empty.""",
|
14931
|
+
)
|
14932
|
+
system_instruction: Optional[str] = Field(
|
14933
|
+
default=None,
|
14934
|
+
description="""Optional. System instructions for pointwise metric.""",
|
14935
|
+
)
|
14936
|
+
|
14937
|
+
|
14938
|
+
class PointwiseMetricSpecDict(TypedDict, total=False):
|
14939
|
+
"""Spec for pointwise metric."""
|
14940
|
+
|
14941
|
+
metric_prompt_template: Optional[str]
|
14942
|
+
"""Required. Metric prompt template for pointwise metric."""
|
14943
|
+
|
14944
|
+
custom_output_format_config: Optional[CustomOutputFormatConfigDict]
|
14945
|
+
"""Optional. CustomOutputFormatConfig allows customization of metric output. By default, metrics return a score and explanation. When this config is set, the default output is replaced with either: - The raw output string. - A parsed output based on a user-defined schema. If a custom format is chosen, the `score` and `explanation` fields in the corresponding metric result will be empty."""
|
14946
|
+
|
14947
|
+
system_instruction: Optional[str]
|
14948
|
+
"""Optional. System instructions for pointwise metric."""
|
14949
|
+
|
14950
|
+
|
14951
|
+
PointwiseMetricSpecOrDict = Union[PointwiseMetricSpec, PointwiseMetricSpecDict]
|
14952
|
+
|
14953
|
+
|
14954
|
+
class RougeSpec(_common.BaseModel):
|
14955
|
+
"""Spec for rouge metric."""
|
14956
|
+
|
14957
|
+
rouge_type: Optional[str] = Field(
|
14958
|
+
default=None,
|
14959
|
+
description="""Optional. Supported rouge types are rougen[1-9], rougeL, and rougeLsum.""",
|
14960
|
+
)
|
14961
|
+
split_summaries: Optional[bool] = Field(
|
14962
|
+
default=None,
|
14963
|
+
description="""Optional. Whether to split summaries while using rougeLsum.""",
|
14964
|
+
)
|
14965
|
+
use_stemmer: Optional[bool] = Field(
|
14966
|
+
default=None,
|
14967
|
+
description="""Optional. Whether to use stemmer to compute rouge score.""",
|
14968
|
+
)
|
14969
|
+
|
14970
|
+
|
14971
|
+
class RougeSpecDict(TypedDict, total=False):
|
14972
|
+
"""Spec for rouge metric."""
|
14973
|
+
|
14974
|
+
rouge_type: Optional[str]
|
14975
|
+
"""Optional. Supported rouge types are rougen[1-9], rougeL, and rougeLsum."""
|
14976
|
+
|
14977
|
+
split_summaries: Optional[bool]
|
14978
|
+
"""Optional. Whether to split summaries while using rougeLsum."""
|
14979
|
+
|
14980
|
+
use_stemmer: Optional[bool]
|
14981
|
+
"""Optional. Whether to use stemmer to compute rouge score."""
|
14982
|
+
|
14983
|
+
|
14984
|
+
RougeSpecOrDict = Union[RougeSpec, RougeSpecDict]
|