google-genai 1.28.0__py3-none-any.whl → 1.30.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/types.py CHANGED
@@ -26,7 +26,7 @@ import types as builtin_types
26
26
  import typing
27
27
  from typing import Any, Callable, Literal, Optional, Sequence, Union, _UnionGenericAlias # type: ignore
28
28
  import pydantic
29
- from pydantic import Field
29
+ from pydantic import ConfigDict, Field, PrivateAttr, model_validator
30
30
  from typing_extensions import Self, TypedDict
31
31
  from . import _common
32
32
 
@@ -76,10 +76,20 @@ else:
76
76
  McpClientSession = None
77
77
  McpCallToolResult = None
78
78
 
79
+ if typing.TYPE_CHECKING:
80
+ import yaml
81
+ else:
82
+ try:
83
+ import yaml
84
+ except ImportError:
85
+ yaml = None
86
+
79
87
  logger = logging.getLogger('google_genai.types')
80
88
 
81
89
  T = typing.TypeVar('T', bound='GenerateContentResponse')
82
90
 
91
+ MetricSubclass = typing.TypeVar('MetricSubclass', bound='Metric')
92
+
83
93
 
84
94
  class Outcome(_common.CaseInSensitiveEnum):
85
95
  """Required. Outcome of the code execution."""
@@ -223,15 +233,6 @@ class ApiSpec(_common.CaseInSensitiveEnum):
223
233
  """Elastic search API spec."""
224
234
 
225
235
 
226
- class Environment(_common.CaseInSensitiveEnum):
227
- """Required. The environment being operated."""
228
-
229
- ENVIRONMENT_UNSPECIFIED = 'ENVIRONMENT_UNSPECIFIED'
230
- """Defaults to browser."""
231
- ENVIRONMENT_BROWSER = 'ENVIRONMENT_BROWSER'
232
- """Operates in a web browser."""
233
-
234
-
235
236
  class UrlRetrievalStatus(_common.CaseInSensitiveEnum):
236
237
  """Status of the url retrieval."""
237
238
 
@@ -241,6 +242,10 @@ class UrlRetrievalStatus(_common.CaseInSensitiveEnum):
241
242
  """Url retrieval is successful."""
242
243
  URL_RETRIEVAL_STATUS_ERROR = 'URL_RETRIEVAL_STATUS_ERROR'
243
244
  """Url retrieval is failed due to error."""
245
+ URL_RETRIEVAL_STATUS_PAYWALL = 'URL_RETRIEVAL_STATUS_PAYWALL'
246
+ """Url retrieval is failed because the content is behind paywall."""
247
+ URL_RETRIEVAL_STATUS_UNSAFE = 'URL_RETRIEVAL_STATUS_UNSAFE'
248
+ """Url retrieval is failed because the content is unsafe."""
244
249
 
245
250
 
246
251
  class FinishReason(_common.CaseInSensitiveEnum):
@@ -394,6 +399,17 @@ class JobState(_common.CaseInSensitiveEnum):
394
399
  """The job is partially succeeded, some results may be missing due to errors."""
395
400
 
396
401
 
402
+ class TuningMode(_common.CaseInSensitiveEnum):
403
+ """Tuning mode."""
404
+
405
+ TUNING_MODE_UNSPECIFIED = 'TUNING_MODE_UNSPECIFIED'
406
+ """Tuning mode is unspecified."""
407
+ TUNING_MODE_FULL = 'TUNING_MODE_FULL'
408
+ """Full fine-tuning mode."""
409
+ TUNING_MODE_PEFT_ADAPTER = 'TUNING_MODE_PEFT_ADAPTER'
410
+ """PEFT adapter tuning mode."""
411
+
412
+
397
413
  class AdapterSize(_common.CaseInSensitiveEnum):
398
414
  """Optional. Adapter size for tuning."""
399
415
 
@@ -444,6 +460,15 @@ class DynamicRetrievalConfigMode(_common.CaseInSensitiveEnum):
444
460
  """Run retrieval only when system decides it is necessary."""
445
461
 
446
462
 
463
+ class Environment(_common.CaseInSensitiveEnum):
464
+ """The environment being operated."""
465
+
466
+ ENVIRONMENT_UNSPECIFIED = 'ENVIRONMENT_UNSPECIFIED'
467
+ """Defaults to browser."""
468
+ ENVIRONMENT_BROWSER = 'ENVIRONMENT_BROWSER'
469
+ """Operates in a web browser."""
470
+
471
+
447
472
  class FunctionCallingConfigMode(_common.CaseInSensitiveEnum):
448
473
  """Config for the function calling config mode."""
449
474
 
@@ -672,6 +697,22 @@ class Scale(_common.CaseInSensitiveEnum):
672
697
  """B major or Ab minor."""
673
698
 
674
699
 
700
+ class MusicGenerationMode(_common.CaseInSensitiveEnum):
701
+ """The mode of music generation."""
702
+
703
+ MUSIC_GENERATION_MODE_UNSPECIFIED = 'MUSIC_GENERATION_MODE_UNSPECIFIED'
704
+ """Rely on the server default generation mode."""
705
+ QUALITY = 'QUALITY'
706
+ """Steer text prompts to regions of latent space with higher quality
707
+ music."""
708
+ DIVERSITY = 'DIVERSITY'
709
+ """Steer text prompts to regions of latent space with a larger
710
+ diversity of music."""
711
+ VOCALIZATION = 'VOCALIZATION'
712
+ """Steer text prompts to regions of latent space more likely to
713
+ generate music with vocals."""
714
+
715
+
675
716
  class LiveMusicPlaybackControl(_common.CaseInSensitiveEnum):
676
717
  """The playback control signal to apply to the music generation."""
677
718
 
@@ -2092,20 +2133,50 @@ class FunctionDeclaration(_common.BaseModel):
2092
2133
  from . import _automatic_function_calling_util
2093
2134
 
2094
2135
  parameters_properties = {}
2136
+ parameters_json_schema = {}
2095
2137
  annotation_under_future = typing.get_type_hints(callable)
2096
- for name, param in inspect.signature(callable).parameters.items():
2097
- if param.kind in (
2098
- inspect.Parameter.POSITIONAL_OR_KEYWORD,
2099
- inspect.Parameter.KEYWORD_ONLY,
2100
- inspect.Parameter.POSITIONAL_ONLY,
2101
- ):
2102
- # This snippet catches the case when type hints are stored as strings
2103
- if isinstance(param.annotation, str):
2104
- param = param.replace(annotation=annotation_under_future[name])
2105
- schema = _automatic_function_calling_util._parse_schema_from_parameter(
2106
- api_option, param, callable.__name__
2107
- )
2108
- parameters_properties[name] = schema
2138
+ try:
2139
+ for name, param in inspect.signature(callable).parameters.items():
2140
+ if param.kind in (
2141
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
2142
+ inspect.Parameter.KEYWORD_ONLY,
2143
+ inspect.Parameter.POSITIONAL_ONLY,
2144
+ ):
2145
+ param = _automatic_function_calling_util._handle_params_as_deferred_annotations(
2146
+ param, annotation_under_future, name
2147
+ )
2148
+ schema = (
2149
+ _automatic_function_calling_util._parse_schema_from_parameter(
2150
+ api_option, param, callable.__name__
2151
+ )
2152
+ )
2153
+ parameters_properties[name] = schema
2154
+ except ValueError:
2155
+ parameters_properties = {}
2156
+ for name, param in inspect.signature(callable).parameters.items():
2157
+ if param.kind in (
2158
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
2159
+ inspect.Parameter.KEYWORD_ONLY,
2160
+ inspect.Parameter.POSITIONAL_ONLY,
2161
+ ):
2162
+ try:
2163
+ param = _automatic_function_calling_util._handle_params_as_deferred_annotations(
2164
+ param, annotation_under_future, name
2165
+ )
2166
+ param_schema_adapter = pydantic.TypeAdapter(
2167
+ param.annotation,
2168
+ config=pydantic.ConfigDict(arbitrary_types_allowed=True),
2169
+ )
2170
+ json_schema_dict = param_schema_adapter.json_schema()
2171
+ json_schema_dict = _automatic_function_calling_util._add_unevaluated_items_to_fixed_len_tuple_schema(
2172
+ json_schema_dict
2173
+ )
2174
+ parameters_json_schema[name] = json_schema_dict
2175
+ except Exception as e:
2176
+ _automatic_function_calling_util._raise_for_unsupported_param(
2177
+ param, callable.__name__, e
2178
+ )
2179
+
2109
2180
  declaration = FunctionDeclaration(
2110
2181
  name=callable.__name__,
2111
2182
  description=inspect.cleandoc(callable.__doc__)
@@ -2123,6 +2194,8 @@ class FunctionDeclaration(_common.BaseModel):
2123
2194
  declaration.parameters
2124
2195
  )
2125
2196
  )
2197
+ elif parameters_json_schema:
2198
+ declaration.parameters_json_schema = parameters_json_schema
2126
2199
  # TODO: b/421991354 - Remove this check once the bug is fixed.
2127
2200
  if api_option == 'GEMINI_API':
2128
2201
  return declaration
@@ -2142,13 +2215,39 @@ class FunctionDeclaration(_common.BaseModel):
2142
2215
  return_value = return_value.replace(
2143
2216
  annotation=annotation_under_future['return']
2144
2217
  )
2145
- declaration.response = (
2146
- _automatic_function_calling_util._parse_schema_from_parameter(
2147
- api_option,
2148
- return_value,
2149
- callable.__name__,
2218
+ response_schema: Optional[Schema] = None
2219
+ response_json_schema: Optional[Union[dict[str, Any], Schema]] = {}
2220
+ try:
2221
+ response_schema = (
2222
+ _automatic_function_calling_util._parse_schema_from_parameter(
2223
+ api_option,
2224
+ return_value,
2225
+ callable.__name__,
2226
+ )
2227
+ )
2228
+ if response_schema.any_of is not None:
2229
+ # To handle any_of, we need to use responseJsonSchema
2230
+ response_json_schema = response_schema
2231
+ response_schema = None
2232
+ except ValueError:
2233
+ try:
2234
+ return_value_schema_adapter = pydantic.TypeAdapter(
2235
+ return_value.annotation,
2236
+ config=pydantic.ConfigDict(arbitrary_types_allowed=True),
2150
2237
  )
2151
- )
2238
+ response_json_schema = return_value_schema_adapter.json_schema()
2239
+ response_json_schema = _automatic_function_calling_util._add_unevaluated_items_to_fixed_len_tuple_schema(
2240
+ response_json_schema
2241
+ )
2242
+ except Exception as e:
2243
+ _automatic_function_calling_util._raise_for_unsupported_param(
2244
+ return_value, callable.__name__, e
2245
+ )
2246
+
2247
+ if response_schema:
2248
+ declaration.response = response_schema
2249
+ elif response_json_schema:
2250
+ declaration.response_json_schema = response_json_schema
2152
2251
  return declaration
2153
2252
 
2154
2253
  @classmethod
@@ -2159,7 +2258,16 @@ class FunctionDeclaration(_common.BaseModel):
2159
2258
  callable: Callable[..., Any],
2160
2259
  behavior: Optional[Behavior] = None,
2161
2260
  ) -> 'FunctionDeclaration':
2162
- """Converts a Callable to a FunctionDeclaration based on the client."""
2261
+ """Converts a Callable to a FunctionDeclaration based on the client.
2262
+
2263
+ Note: For best results prefer
2264
+ [Google-style
2265
+ docstring](https://google.github.io/styleguide/pyguide.html#383-functions-and-methods)
2266
+ when describing arguments. This function does **not** parse argument
2267
+ descriptions into the property description slots of the resulting structure.
2268
+ Instead it sends the whole docstring in the top-level function description.
2269
+ Google-style docstring are closest to what the model is trained on.
2270
+ """
2163
2271
  if client.vertexai:
2164
2272
  return cls.from_callable_with_api_option(
2165
2273
  callable=callable, api_option='VERTEX_AI', behavior=behavior
@@ -2247,6 +2355,11 @@ class GoogleSearch(_common.BaseModel):
2247
2355
  If customers set a start time, they must set an end time (and vice versa).
2248
2356
  """,
2249
2357
  )
2358
+ exclude_domains: Optional[list[str]] = Field(
2359
+ default=None,
2360
+ description="""Optional. List of domains to be excluded from the search results.
2361
+ The default limit is 2000 domains.""",
2362
+ )
2250
2363
 
2251
2364
 
2252
2365
  class GoogleSearchDict(TypedDict, total=False):
@@ -2257,6 +2370,10 @@ class GoogleSearchDict(TypedDict, total=False):
2257
2370
  If customers set a start time, they must set an end time (and vice versa).
2258
2371
  """
2259
2372
 
2373
+ exclude_domains: Optional[list[str]]
2374
+ """Optional. List of domains to be excluded from the search results.
2375
+ The default limit is 2000 domains."""
2376
+
2260
2377
 
2261
2378
  GoogleSearchOrDict = Union[GoogleSearch, GoogleSearchDict]
2262
2379
 
@@ -2313,13 +2430,17 @@ GoogleSearchRetrievalOrDict = Union[
2313
2430
  class EnterpriseWebSearch(_common.BaseModel):
2314
2431
  """Tool to search public web data, powered by Vertex AI Search and Sec4 compliance."""
2315
2432
 
2316
- pass
2433
+ exclude_domains: Optional[list[str]] = Field(
2434
+ default=None,
2435
+ description="""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains.""",
2436
+ )
2317
2437
 
2318
2438
 
2319
2439
  class EnterpriseWebSearchDict(TypedDict, total=False):
2320
2440
  """Tool to search public web data, powered by Vertex AI Search and Sec4 compliance."""
2321
2441
 
2322
- pass
2442
+ exclude_domains: Optional[list[str]]
2443
+ """Optional. List of domains to be excluded from the search results. The default limit is 2000 domains."""
2323
2444
 
2324
2445
 
2325
2446
  EnterpriseWebSearchOrDict = Union[EnterpriseWebSearch, EnterpriseWebSearchDict]
@@ -2529,6 +2650,24 @@ class UrlContextDict(TypedDict, total=False):
2529
2650
  UrlContextOrDict = Union[UrlContext, UrlContextDict]
2530
2651
 
2531
2652
 
2653
+ class ToolComputerUse(_common.BaseModel):
2654
+ """Tool to support computer use."""
2655
+
2656
+ environment: Optional[Environment] = Field(
2657
+ default=None, description="""Required. The environment being operated."""
2658
+ )
2659
+
2660
+
2661
+ class ToolComputerUseDict(TypedDict, total=False):
2662
+ """Tool to support computer use."""
2663
+
2664
+ environment: Optional[Environment]
2665
+ """Required. The environment being operated."""
2666
+
2667
+
2668
+ ToolComputerUseOrDict = Union[ToolComputerUse, ToolComputerUseDict]
2669
+
2670
+
2532
2671
  class ApiAuthApiKeyConfig(_common.BaseModel):
2533
2672
  """The API secret."""
2534
2673
 
@@ -3089,24 +3228,6 @@ class ToolCodeExecutionDict(TypedDict, total=False):
3089
3228
  ToolCodeExecutionOrDict = Union[ToolCodeExecution, ToolCodeExecutionDict]
3090
3229
 
3091
3230
 
3092
- class ToolComputerUse(_common.BaseModel):
3093
- """Tool to support computer use."""
3094
-
3095
- environment: Optional[Environment] = Field(
3096
- default=None, description="""Required. The environment being operated."""
3097
- )
3098
-
3099
-
3100
- class ToolComputerUseDict(TypedDict, total=False):
3101
- """Tool to support computer use."""
3102
-
3103
- environment: Optional[Environment]
3104
- """Required. The environment being operated."""
3105
-
3106
-
3107
- ToolComputerUseOrDict = Union[ToolComputerUse, ToolComputerUseDict]
3108
-
3109
-
3110
3231
  class Tool(_common.BaseModel):
3111
3232
  """Tool details of a tool that the model may use to generate a response."""
3112
3233
 
@@ -3141,13 +3262,15 @@ class Tool(_common.BaseModel):
3141
3262
  default=None,
3142
3263
  description="""Optional. Tool to support URL context retrieval.""",
3143
3264
  )
3144
- code_execution: Optional[ToolCodeExecution] = Field(
3265
+ computer_use: Optional[ToolComputerUse] = Field(
3145
3266
  default=None,
3146
- description="""Optional. CodeExecution tool type. Enables the model to execute code as part of generation.""",
3267
+ description="""Optional. Tool to support the model interacting directly with the
3268
+ computer. If enabled, it automatically populates computer-use specific
3269
+ Function Declarations.""",
3147
3270
  )
3148
- computer_use: Optional[ToolComputerUse] = Field(
3271
+ code_execution: Optional[ToolCodeExecution] = Field(
3149
3272
  default=None,
3150
- description="""Optional. Tool to support the model interacting directly with the computer. If enabled, it automatically populates computer-use specific Function Declarations.""",
3273
+ description="""Optional. CodeExecution tool type. Enables the model to execute code as part of generation.""",
3151
3274
  )
3152
3275
 
3153
3276
 
@@ -3178,12 +3301,14 @@ class ToolDict(TypedDict, total=False):
3178
3301
  url_context: Optional[UrlContextDict]
3179
3302
  """Optional. Tool to support URL context retrieval."""
3180
3303
 
3304
+ computer_use: Optional[ToolComputerUseDict]
3305
+ """Optional. Tool to support the model interacting directly with the
3306
+ computer. If enabled, it automatically populates computer-use specific
3307
+ Function Declarations."""
3308
+
3181
3309
  code_execution: Optional[ToolCodeExecutionDict]
3182
3310
  """Optional. CodeExecution tool type. Enables the model to execute code as part of generation."""
3183
3311
 
3184
- computer_use: Optional[ToolComputerUseDict]
3185
- """Optional. Tool to support the model interacting directly with the computer. If enabled, it automatically populates computer-use specific Function Declarations."""
3186
-
3187
3312
 
3188
3313
  ToolOrDict = Union[Tool, ToolDict]
3189
3314
  if _is_mcp_imported:
@@ -3676,19 +3801,25 @@ class FileDict(TypedDict, total=False):
3676
3801
 
3677
3802
  FileOrDict = Union[File, FileDict]
3678
3803
 
3804
+
3679
3805
  if _is_pillow_image_imported:
3680
- PartUnion = Union[File, Part, PIL_Image, str]
3806
+ PartUnion = Union[str, PIL_Image, File, Part]
3681
3807
  else:
3682
- PartUnion = Union[File, Part, str] # type: ignore[misc]
3808
+ PartUnion = Union[str, File, Part] # type: ignore[misc]
3683
3809
 
3684
3810
 
3685
- PartUnionDict = Union[PartUnion, PartDict]
3811
+ if _is_pillow_image_imported:
3812
+ PartUnionDict = Union[str, PIL_Image, File, FileDict, Part, PartDict]
3813
+ else:
3814
+ PartUnionDict = Union[str, File, FileDict, Part, PartDict] # type: ignore[misc]
3686
3815
 
3687
3816
 
3688
- ContentUnion = Union[Content, list[PartUnion], PartUnion]
3817
+ ContentUnion = Union[Content, PartUnion, list[PartUnion]]
3689
3818
 
3690
3819
 
3691
- ContentUnionDict = Union[ContentUnion, ContentDict]
3820
+ ContentUnionDict = Union[
3821
+ Content, ContentDict, PartUnionDict, list[PartUnionDict]
3822
+ ]
3692
3823
 
3693
3824
 
3694
3825
  class GenerationConfigRoutingConfigAutoRoutingMode(_common.BaseModel):
@@ -3764,10 +3895,10 @@ GenerationConfigRoutingConfigOrDict = Union[
3764
3895
  ]
3765
3896
 
3766
3897
 
3767
- SpeechConfigUnion = Union[SpeechConfig, str]
3898
+ SpeechConfigUnion = Union[str, SpeechConfig]
3768
3899
 
3769
3900
 
3770
- SpeechConfigUnionDict = Union[SpeechConfigUnion, SpeechConfigDict]
3901
+ SpeechConfigUnionDict = Union[str, SpeechConfig, SpeechConfigDict]
3771
3902
 
3772
3903
 
3773
3904
  class GenerateContentConfig(_common.BaseModel):
@@ -4160,10 +4291,10 @@ GenerateContentConfigOrDict = Union[
4160
4291
  ]
4161
4292
 
4162
4293
 
4163
- ContentListUnion = Union[list[ContentUnion], ContentUnion]
4294
+ ContentListUnion = Union[ContentUnion, list[ContentUnion]]
4164
4295
 
4165
4296
 
4166
- ContentListUnionDict = Union[list[ContentUnionDict], ContentUnionDict]
4297
+ ContentListUnionDict = Union[ContentUnionDict, list[ContentUnionDict]]
4167
4298
 
4168
4299
 
4169
4300
  class _GenerateContentParameters(_common.BaseModel):
@@ -4401,6 +4532,171 @@ class UrlContextMetadataDict(TypedDict, total=False):
4401
4532
  UrlContextMetadataOrDict = Union[UrlContextMetadata, UrlContextMetadataDict]
4402
4533
 
4403
4534
 
4535
+ class GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution(_common.BaseModel):
4536
+ """Author attribution for a photo or review."""
4537
+
4538
+ display_name: Optional[str] = Field(
4539
+ default=None, description="""Name of the author of the Photo or Review."""
4540
+ )
4541
+ photo_uri: Optional[str] = Field(
4542
+ default=None,
4543
+ description="""Profile photo URI of the author of the Photo or Review.""",
4544
+ )
4545
+ uri: Optional[str] = Field(
4546
+ default=None, description="""URI of the author of the Photo or Review."""
4547
+ )
4548
+
4549
+
4550
+ class GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionDict(
4551
+ TypedDict, total=False
4552
+ ):
4553
+ """Author attribution for a photo or review."""
4554
+
4555
+ display_name: Optional[str]
4556
+ """Name of the author of the Photo or Review."""
4557
+
4558
+ photo_uri: Optional[str]
4559
+ """Profile photo URI of the author of the Photo or Review."""
4560
+
4561
+ uri: Optional[str]
4562
+ """URI of the author of the Photo or Review."""
4563
+
4564
+
4565
+ GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionOrDict = Union[
4566
+ GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution,
4567
+ GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionDict,
4568
+ ]
4569
+
4570
+
4571
+ class GroundingChunkMapsPlaceAnswerSourcesReviewSnippet(_common.BaseModel):
4572
+ """Encapsulates a review snippet."""
4573
+
4574
+ author_attribution: Optional[
4575
+ GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution
4576
+ ] = Field(default=None, description="""This review's author.""")
4577
+ flag_content_uri: Optional[str] = Field(
4578
+ default=None,
4579
+ description="""A link where users can flag a problem with the review.""",
4580
+ )
4581
+ google_maps_uri: Optional[str] = Field(
4582
+ default=None, description="""A link to show the review on Google Maps."""
4583
+ )
4584
+ relative_publish_time_description: Optional[str] = Field(
4585
+ default=None,
4586
+ description="""A string of formatted recent time, expressing the review time relative to the current time in a form appropriate for the language and country.""",
4587
+ )
4588
+ review: Optional[str] = Field(
4589
+ default=None,
4590
+ description="""A reference representing this place review which may be used to look up this place review again.""",
4591
+ )
4592
+
4593
+
4594
+ class GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict(
4595
+ TypedDict, total=False
4596
+ ):
4597
+ """Encapsulates a review snippet."""
4598
+
4599
+ author_attribution: Optional[
4600
+ GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionDict
4601
+ ]
4602
+ """This review's author."""
4603
+
4604
+ flag_content_uri: Optional[str]
4605
+ """A link where users can flag a problem with the review."""
4606
+
4607
+ google_maps_uri: Optional[str]
4608
+ """A link to show the review on Google Maps."""
4609
+
4610
+ relative_publish_time_description: Optional[str]
4611
+ """A string of formatted recent time, expressing the review time relative to the current time in a form appropriate for the language and country."""
4612
+
4613
+ review: Optional[str]
4614
+ """A reference representing this place review which may be used to look up this place review again."""
4615
+
4616
+
4617
+ GroundingChunkMapsPlaceAnswerSourcesReviewSnippetOrDict = Union[
4618
+ GroundingChunkMapsPlaceAnswerSourcesReviewSnippet,
4619
+ GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict,
4620
+ ]
4621
+
4622
+
4623
+ class GroundingChunkMapsPlaceAnswerSources(_common.BaseModel):
4624
+ """Sources used to generate the place answer."""
4625
+
4626
+ flag_content_uri: Optional[str] = Field(
4627
+ default=None,
4628
+ description="""A link where users can flag a problem with the generated answer.""",
4629
+ )
4630
+ review_snippets: Optional[
4631
+ list[GroundingChunkMapsPlaceAnswerSourcesReviewSnippet]
4632
+ ] = Field(
4633
+ default=None,
4634
+ description="""Snippets of reviews that are used to generate the answer.""",
4635
+ )
4636
+
4637
+
4638
+ class GroundingChunkMapsPlaceAnswerSourcesDict(TypedDict, total=False):
4639
+ """Sources used to generate the place answer."""
4640
+
4641
+ flag_content_uri: Optional[str]
4642
+ """A link where users can flag a problem with the generated answer."""
4643
+
4644
+ review_snippets: Optional[
4645
+ list[GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict]
4646
+ ]
4647
+ """Snippets of reviews that are used to generate the answer."""
4648
+
4649
+
4650
+ GroundingChunkMapsPlaceAnswerSourcesOrDict = Union[
4651
+ GroundingChunkMapsPlaceAnswerSources,
4652
+ GroundingChunkMapsPlaceAnswerSourcesDict,
4653
+ ]
4654
+
4655
+
4656
+ class GroundingChunkMaps(_common.BaseModel):
4657
+ """Chunk from Google Maps."""
4658
+
4659
+ place_answer_sources: Optional[GroundingChunkMapsPlaceAnswerSources] = Field(
4660
+ default=None,
4661
+ description="""Sources used to generate the place answer. This includes review snippets and photos that were used to generate the answer, as well as uris to flag content.""",
4662
+ )
4663
+ place_id: Optional[str] = Field(
4664
+ default=None,
4665
+ description="""This Place's resource name, in `places/{place_id}` format. Can be used to look up the Place.""",
4666
+ )
4667
+ text: Optional[str] = Field(
4668
+ default=None, description="""Text of the chunk."""
4669
+ )
4670
+ title: Optional[str] = Field(
4671
+ default=None, description="""Title of the chunk."""
4672
+ )
4673
+ uri: Optional[str] = Field(
4674
+ default=None, description="""URI reference of the chunk."""
4675
+ )
4676
+
4677
+
4678
+ class GroundingChunkMapsDict(TypedDict, total=False):
4679
+ """Chunk from Google Maps."""
4680
+
4681
+ place_answer_sources: Optional[GroundingChunkMapsPlaceAnswerSourcesDict]
4682
+ """Sources used to generate the place answer. This includes review snippets and photos that were used to generate the answer, as well as uris to flag content."""
4683
+
4684
+ place_id: Optional[str]
4685
+ """This Place's resource name, in `places/{place_id}` format. Can be used to look up the Place."""
4686
+
4687
+ text: Optional[str]
4688
+ """Text of the chunk."""
4689
+
4690
+ title: Optional[str]
4691
+ """Title of the chunk."""
4692
+
4693
+ uri: Optional[str]
4694
+ """URI reference of the chunk."""
4695
+
4696
+
4697
+ GroundingChunkMapsOrDict = Union[GroundingChunkMaps, GroundingChunkMapsDict]
4698
+
4699
+
4404
4700
  class RagChunkPageSpan(_common.BaseModel):
4405
4701
  """Represents where the chunk starts and ends in the document."""
4406
4702
 
@@ -4455,6 +4751,10 @@ RagChunkOrDict = Union[RagChunk, RagChunkDict]
4455
4751
  class GroundingChunkRetrievedContext(_common.BaseModel):
4456
4752
  """Chunk from context retrieved by the retrieval tools."""
4457
4753
 
4754
+ document_name: Optional[str] = Field(
4755
+ default=None,
4756
+ description="""Output only. The full document name for the referenced Vertex AI Search document.""",
4757
+ )
4458
4758
  rag_chunk: Optional[RagChunk] = Field(
4459
4759
  default=None,
4460
4760
  description="""Additional context for the RAG retrieval result. This is only populated when using the RAG retrieval tool.""",
@@ -4473,6 +4773,9 @@ class GroundingChunkRetrievedContext(_common.BaseModel):
4473
4773
  class GroundingChunkRetrievedContextDict(TypedDict, total=False):
4474
4774
  """Chunk from context retrieved by the retrieval tools."""
4475
4775
 
4776
+ document_name: Optional[str]
4777
+ """Output only. The full document name for the referenced Vertex AI Search document."""
4778
+
4476
4779
  rag_chunk: Optional[RagChunkDict]
4477
4780
  """Additional context for the RAG retrieval result. This is only populated when using the RAG retrieval tool."""
4478
4781
 
@@ -4524,6 +4827,9 @@ GroundingChunkWebOrDict = Union[GroundingChunkWeb, GroundingChunkWebDict]
4524
4827
  class GroundingChunk(_common.BaseModel):
4525
4828
  """Grounding chunk."""
4526
4829
 
4830
+ maps: Optional[GroundingChunkMaps] = Field(
4831
+ default=None, description="""Grounding chunk from Google Maps."""
4832
+ )
4527
4833
  retrieved_context: Optional[GroundingChunkRetrievedContext] = Field(
4528
4834
  default=None,
4529
4835
  description="""Grounding chunk from context retrieved by the retrieval tools.""",
@@ -4536,6 +4842,9 @@ class GroundingChunk(_common.BaseModel):
4536
4842
  class GroundingChunkDict(TypedDict, total=False):
4537
4843
  """Grounding chunk."""
4538
4844
 
4845
+ maps: Optional[GroundingChunkMapsDict]
4846
+ """Grounding chunk from Google Maps."""
4847
+
4539
4848
  retrieved_context: Optional[GroundingChunkRetrievedContextDict]
4540
4849
  """Grounding chunk from context retrieved by the retrieval tools."""
4541
4850
 
@@ -4667,6 +4976,10 @@ SearchEntryPointOrDict = Union[SearchEntryPoint, SearchEntryPointDict]
4667
4976
  class GroundingMetadata(_common.BaseModel):
4668
4977
  """Metadata returned to client when grounding is enabled."""
4669
4978
 
4979
+ google_maps_widget_context_token: Optional[str] = Field(
4980
+ default=None,
4981
+ description="""Optional. Output only. Resource name of the Google Maps widget context token to be used with the PlacesContextElement widget to render contextual data. This is populated only for Google Maps grounding.""",
4982
+ )
4670
4983
  grounding_chunks: Optional[list[GroundingChunk]] = Field(
4671
4984
  default=None,
4672
4985
  description="""List of supporting references retrieved from specified grounding source.""",
@@ -4694,6 +5007,9 @@ class GroundingMetadata(_common.BaseModel):
4694
5007
  class GroundingMetadataDict(TypedDict, total=False):
4695
5008
  """Metadata returned to client when grounding is enabled."""
4696
5009
 
5010
+ google_maps_widget_context_token: Optional[str]
5011
+ """Optional. Output only. Resource name of the Google Maps widget context token to be used with the PlacesContextElement widget to render contextual data. This is populated only for Google Maps grounding."""
5012
+
4697
5013
  grounding_chunks: Optional[list[GroundingChunkDict]]
4698
5014
  """List of supporting references retrieved from specified grounding source."""
4699
5015
 
@@ -5118,11 +5434,6 @@ class GenerateContentResponse(_common.BaseModel):
5118
5434
  description="""Timestamp when the request is made to the server.
5119
5435
  """,
5120
5436
  )
5121
- response_id: Optional[str] = Field(
5122
- default=None,
5123
- description="""Identifier for each response.
5124
- """,
5125
- )
5126
5437
  model_version: Optional[str] = Field(
5127
5438
  default=None,
5128
5439
  description="""Output only. The model version used to generate the response.""",
@@ -5131,6 +5442,10 @@ class GenerateContentResponse(_common.BaseModel):
5131
5442
  default=None,
5132
5443
  description="""Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations.""",
5133
5444
  )
5445
+ response_id: Optional[str] = Field(
5446
+ default=None,
5447
+ description="""Output only. response_id is used to identify each response. It is the encoding of the event_id.""",
5448
+ )
5134
5449
  usage_metadata: Optional[GenerateContentResponseUsageMetadata] = Field(
5135
5450
  default=None, description="""Usage metadata about the response(s)."""
5136
5451
  )
@@ -5377,16 +5692,15 @@ class GenerateContentResponseDict(TypedDict, total=False):
5377
5692
  """Timestamp when the request is made to the server.
5378
5693
  """
5379
5694
 
5380
- response_id: Optional[str]
5381
- """Identifier for each response.
5382
- """
5383
-
5384
5695
  model_version: Optional[str]
5385
5696
  """Output only. The model version used to generate the response."""
5386
5697
 
5387
5698
  prompt_feedback: Optional[GenerateContentResponsePromptFeedbackDict]
5388
5699
  """Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations."""
5389
5700
 
5701
+ response_id: Optional[str]
5702
+ """Output only. response_id is used to identify each response. It is the encoding of the event_id."""
5703
+
5390
5704
  usage_metadata: Optional[GenerateContentResponseUsageMetadataDict]
5391
5705
  """Usage metadata about the response(s)."""
5392
5706
 
@@ -6729,55 +7043,253 @@ UpscaleImageResponseOrDict = Union[
6729
7043
  ]
6730
7044
 
6731
7045
 
6732
- class GetModelConfig(_common.BaseModel):
6733
- """Optional parameters for models.get method."""
7046
+ class ProductImage(_common.BaseModel):
7047
+ """An image of the product."""
6734
7048
 
6735
- http_options: Optional[HttpOptions] = Field(
6736
- default=None, description="""Used to override HTTP request options."""
7049
+ product_image: Optional[Image] = Field(
7050
+ default=None,
7051
+ description="""An image of the product to be recontextualized.""",
6737
7052
  )
6738
7053
 
6739
7054
 
6740
- class GetModelConfigDict(TypedDict, total=False):
6741
- """Optional parameters for models.get method."""
7055
+ class ProductImageDict(TypedDict, total=False):
7056
+ """An image of the product."""
6742
7057
 
6743
- http_options: Optional[HttpOptionsDict]
6744
- """Used to override HTTP request options."""
7058
+ product_image: Optional[ImageDict]
7059
+ """An image of the product to be recontextualized."""
6745
7060
 
6746
7061
 
6747
- GetModelConfigOrDict = Union[GetModelConfig, GetModelConfigDict]
7062
+ ProductImageOrDict = Union[ProductImage, ProductImageDict]
6748
7063
 
6749
7064
 
6750
- class _GetModelParameters(_common.BaseModel):
7065
+ class RecontextImageSource(_common.BaseModel):
7066
+ """A set of source input(s) for image recontextualization."""
6751
7067
 
6752
- model: Optional[str] = Field(default=None, description="""""")
6753
- config: Optional[GetModelConfig] = Field(
6754
- default=None, description="""Optional parameters for the request."""
7068
+ prompt: Optional[str] = Field(
7069
+ default=None,
7070
+ description="""A text prompt for guiding the model during image
7071
+ recontextualization. Not supported for Virtual Try-On.""",
7072
+ )
7073
+ person_image: Optional[Image] = Field(
7074
+ default=None,
7075
+ description="""Image of the person or subject who will be wearing the
7076
+ product(s).""",
7077
+ )
7078
+ product_images: Optional[list[ProductImage]] = Field(
7079
+ default=None, description="""A list of product images."""
6755
7080
  )
6756
7081
 
6757
7082
 
6758
- class _GetModelParametersDict(TypedDict, total=False):
7083
+ class RecontextImageSourceDict(TypedDict, total=False):
7084
+ """A set of source input(s) for image recontextualization."""
6759
7085
 
6760
- model: Optional[str]
6761
- """"""
7086
+ prompt: Optional[str]
7087
+ """A text prompt for guiding the model during image
7088
+ recontextualization. Not supported for Virtual Try-On."""
6762
7089
 
6763
- config: Optional[GetModelConfigDict]
6764
- """Optional parameters for the request."""
7090
+ person_image: Optional[ImageDict]
7091
+ """Image of the person or subject who will be wearing the
7092
+ product(s)."""
6765
7093
 
7094
+ product_images: Optional[list[ProductImageDict]]
7095
+ """A list of product images."""
6766
7096
 
6767
- _GetModelParametersOrDict = Union[_GetModelParameters, _GetModelParametersDict]
6768
7097
 
7098
+ RecontextImageSourceOrDict = Union[
7099
+ RecontextImageSource, RecontextImageSourceDict
7100
+ ]
6769
7101
 
6770
- class Endpoint(_common.BaseModel):
6771
- """An endpoint where you deploy models."""
6772
7102
 
6773
- name: Optional[str] = Field(
6774
- default=None, description="""Resource name of the endpoint."""
7103
+ class RecontextImageConfig(_common.BaseModel):
7104
+ """Configuration for recontextualizing an image."""
7105
+
7106
+ http_options: Optional[HttpOptions] = Field(
7107
+ default=None, description="""Used to override HTTP request options."""
6775
7108
  )
6776
- deployed_model_id: Optional[str] = Field(
6777
- default=None,
6778
- description="""ID of the model that's deployed to the endpoint.""",
7109
+ number_of_images: Optional[int] = Field(
7110
+ default=None, description="""Number of images to generate."""
6779
7111
  )
6780
-
7112
+ base_steps: Optional[int] = Field(
7113
+ default=None,
7114
+ description="""The number of sampling steps. A higher value has better image
7115
+ quality, while a lower value has better latency.""",
7116
+ )
7117
+ output_gcs_uri: Optional[str] = Field(
7118
+ default=None,
7119
+ description="""Cloud Storage URI used to store the generated images.""",
7120
+ )
7121
+ seed: Optional[int] = Field(
7122
+ default=None, description="""Random seed for image generation."""
7123
+ )
7124
+ safety_filter_level: Optional[SafetyFilterLevel] = Field(
7125
+ default=None, description="""Filter level for safety filtering."""
7126
+ )
7127
+ person_generation: Optional[PersonGeneration] = Field(
7128
+ default=None,
7129
+ description="""Whether allow to generate person images, and restrict to specific
7130
+ ages.""",
7131
+ )
7132
+ output_mime_type: Optional[str] = Field(
7133
+ default=None, description="""MIME type of the generated image."""
7134
+ )
7135
+ output_compression_quality: Optional[int] = Field(
7136
+ default=None,
7137
+ description="""Compression quality of the generated image (for ``image/jpeg``
7138
+ only).""",
7139
+ )
7140
+ enhance_prompt: Optional[bool] = Field(
7141
+ default=None, description="""Whether to use the prompt rewriting logic."""
7142
+ )
7143
+
7144
+
7145
+ class RecontextImageConfigDict(TypedDict, total=False):
7146
+ """Configuration for recontextualizing an image."""
7147
+
7148
+ http_options: Optional[HttpOptionsDict]
7149
+ """Used to override HTTP request options."""
7150
+
7151
+ number_of_images: Optional[int]
7152
+ """Number of images to generate."""
7153
+
7154
+ base_steps: Optional[int]
7155
+ """The number of sampling steps. A higher value has better image
7156
+ quality, while a lower value has better latency."""
7157
+
7158
+ output_gcs_uri: Optional[str]
7159
+ """Cloud Storage URI used to store the generated images."""
7160
+
7161
+ seed: Optional[int]
7162
+ """Random seed for image generation."""
7163
+
7164
+ safety_filter_level: Optional[SafetyFilterLevel]
7165
+ """Filter level for safety filtering."""
7166
+
7167
+ person_generation: Optional[PersonGeneration]
7168
+ """Whether allow to generate person images, and restrict to specific
7169
+ ages."""
7170
+
7171
+ output_mime_type: Optional[str]
7172
+ """MIME type of the generated image."""
7173
+
7174
+ output_compression_quality: Optional[int]
7175
+ """Compression quality of the generated image (for ``image/jpeg``
7176
+ only)."""
7177
+
7178
+ enhance_prompt: Optional[bool]
7179
+ """Whether to use the prompt rewriting logic."""
7180
+
7181
+
7182
+ RecontextImageConfigOrDict = Union[
7183
+ RecontextImageConfig, RecontextImageConfigDict
7184
+ ]
7185
+
7186
+
7187
+ class _RecontextImageParameters(_common.BaseModel):
7188
+ """The parameters for recontextualizing an image."""
7189
+
7190
+ model: Optional[str] = Field(
7191
+ default=None,
7192
+ description="""ID of the model to use. For a list of models, see `Google models
7193
+ <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""",
7194
+ )
7195
+ source: Optional[RecontextImageSource] = Field(
7196
+ default=None,
7197
+ description="""A set of source input(s) for image recontextualization.""",
7198
+ )
7199
+ config: Optional[RecontextImageConfig] = Field(
7200
+ default=None,
7201
+ description="""Configuration for image recontextualization.""",
7202
+ )
7203
+
7204
+
7205
+ class _RecontextImageParametersDict(TypedDict, total=False):
7206
+ """The parameters for recontextualizing an image."""
7207
+
7208
+ model: Optional[str]
7209
+ """ID of the model to use. For a list of models, see `Google models
7210
+ <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_."""
7211
+
7212
+ source: Optional[RecontextImageSourceDict]
7213
+ """A set of source input(s) for image recontextualization."""
7214
+
7215
+ config: Optional[RecontextImageConfigDict]
7216
+ """Configuration for image recontextualization."""
7217
+
7218
+
7219
+ _RecontextImageParametersOrDict = Union[
7220
+ _RecontextImageParameters, _RecontextImageParametersDict
7221
+ ]
7222
+
7223
+
7224
+ class RecontextImageResponse(_common.BaseModel):
7225
+ """The output images response."""
7226
+
7227
+ generated_images: Optional[list[GeneratedImage]] = Field(
7228
+ default=None, description="""List of generated images."""
7229
+ )
7230
+
7231
+
7232
+ class RecontextImageResponseDict(TypedDict, total=False):
7233
+ """The output images response."""
7234
+
7235
+ generated_images: Optional[list[GeneratedImageDict]]
7236
+ """List of generated images."""
7237
+
7238
+
7239
+ RecontextImageResponseOrDict = Union[
7240
+ RecontextImageResponse, RecontextImageResponseDict
7241
+ ]
7242
+
7243
+
7244
+ class GetModelConfig(_common.BaseModel):
7245
+ """Optional parameters for models.get method."""
7246
+
7247
+ http_options: Optional[HttpOptions] = Field(
7248
+ default=None, description="""Used to override HTTP request options."""
7249
+ )
7250
+
7251
+
7252
+ class GetModelConfigDict(TypedDict, total=False):
7253
+ """Optional parameters for models.get method."""
7254
+
7255
+ http_options: Optional[HttpOptionsDict]
7256
+ """Used to override HTTP request options."""
7257
+
7258
+
7259
+ GetModelConfigOrDict = Union[GetModelConfig, GetModelConfigDict]
7260
+
7261
+
7262
+ class _GetModelParameters(_common.BaseModel):
7263
+
7264
+ model: Optional[str] = Field(default=None, description="""""")
7265
+ config: Optional[GetModelConfig] = Field(
7266
+ default=None, description="""Optional parameters for the request."""
7267
+ )
7268
+
7269
+
7270
+ class _GetModelParametersDict(TypedDict, total=False):
7271
+
7272
+ model: Optional[str]
7273
+ """"""
7274
+
7275
+ config: Optional[GetModelConfigDict]
7276
+ """Optional parameters for the request."""
7277
+
7278
+
7279
+ _GetModelParametersOrDict = Union[_GetModelParameters, _GetModelParametersDict]
7280
+
7281
+
7282
+ class Endpoint(_common.BaseModel):
7283
+ """An endpoint where you deploy models."""
7284
+
7285
+ name: Optional[str] = Field(
7286
+ default=None, description="""Resource name of the endpoint."""
7287
+ )
7288
+ deployed_model_id: Optional[str] = Field(
7289
+ default=None,
7290
+ description="""ID of the model that's deployed to the endpoint.""",
7291
+ )
7292
+
6781
7293
 
6782
7294
  class EndpointDict(TypedDict, total=False):
6783
7295
  """An endpoint where you deploy models."""
@@ -7157,7 +7669,7 @@ class GenerationConfigThinkingConfig(_common.BaseModel):
7157
7669
  )
7158
7670
  thinking_budget: Optional[int] = Field(
7159
7671
  default=None,
7160
- description="""Optional. Indicates the thinking budget in tokens. This is only applied when enable_thinking is true.""",
7672
+ description="""Optional. Indicates the thinking budget in tokens.""",
7161
7673
  )
7162
7674
 
7163
7675
 
@@ -7168,7 +7680,7 @@ class GenerationConfigThinkingConfigDict(TypedDict, total=False):
7168
7680
  """Optional. Indicates whether to include thoughts in the response. If true, thoughts are returned only when available."""
7169
7681
 
7170
7682
  thinking_budget: Optional[int]
7171
- """Optional. Indicates the thinking budget in tokens. This is only applied when enable_thinking is true."""
7683
+ """Optional. Indicates the thinking budget in tokens."""
7172
7684
 
7173
7685
 
7174
7686
  GenerationConfigThinkingConfigOrDict = Union[
@@ -8080,7 +8592,7 @@ class TunedModel(_common.BaseModel):
8080
8592
 
8081
8593
  model: Optional[str] = Field(
8082
8594
  default=None,
8083
- description="""Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}`.""",
8595
+ description="""Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}@{version_id}` When tuning from a base model, the version_id will be 1. For continuous tuning, the version id will be incremented by 1 from the last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`""",
8084
8596
  )
8085
8597
  endpoint: Optional[str] = Field(
8086
8598
  default=None,
@@ -8097,7 +8609,7 @@ class TunedModel(_common.BaseModel):
8097
8609
  class TunedModelDict(TypedDict, total=False):
8098
8610
 
8099
8611
  model: Optional[str]
8100
- """Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}`."""
8612
+ """Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}@{version_id}` When tuning from a base model, the version_id will be 1. For continuous tuning, the version id will be incremented by 1 from the last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`"""
8101
8613
 
8102
8614
  endpoint: Optional[str]
8103
8615
  """Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`."""
@@ -8111,6 +8623,274 @@ class TunedModelDict(TypedDict, total=False):
8111
8623
  TunedModelOrDict = Union[TunedModel, TunedModelDict]
8112
8624
 
8113
8625
 
8626
+ class GcsDestination(_common.BaseModel):
8627
+ """The Google Cloud Storage location where the output is to be written to."""
8628
+
8629
+ output_uri_prefix: Optional[str] = Field(
8630
+ default=None,
8631
+ description="""Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist.""",
8632
+ )
8633
+
8634
+ @pydantic.model_validator(mode='after')
8635
+ def _validate_gcs_path(self) -> 'GcsDestination':
8636
+ if self.output_uri_prefix and not self.output_uri_prefix.startswith(
8637
+ 'gs://'
8638
+ ):
8639
+ raise ValueError(
8640
+ 'output_uri_prefix must be a valid GCS path starting with "gs://".'
8641
+ )
8642
+ return self
8643
+
8644
+
8645
+ class GcsDestinationDict(TypedDict, total=False):
8646
+ """The Google Cloud Storage location where the output is to be written to."""
8647
+
8648
+ output_uri_prefix: Optional[str]
8649
+ """Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist."""
8650
+
8651
+
8652
+ GcsDestinationOrDict = Union[GcsDestination, GcsDestinationDict]
8653
+
8654
+
8655
+ class OutputConfig(_common.BaseModel):
8656
+ """Config for evaluation output."""
8657
+
8658
+ gcs_destination: Optional[GcsDestination] = Field(
8659
+ default=None,
8660
+ description="""Cloud storage destination for evaluation output.""",
8661
+ )
8662
+
8663
+
8664
+ class OutputConfigDict(TypedDict, total=False):
8665
+ """Config for evaluation output."""
8666
+
8667
+ gcs_destination: Optional[GcsDestinationDict]
8668
+ """Cloud storage destination for evaluation output."""
8669
+
8670
+
8671
+ OutputConfigOrDict = Union[OutputConfig, OutputConfigDict]
8672
+
8673
+
8674
+ class AutoraterConfig(_common.BaseModel):
8675
+ """Autorater config used for evaluation."""
8676
+
8677
+ sampling_count: Optional[int] = Field(
8678
+ default=None,
8679
+ description="""Number of samples for each instance in the dataset.
8680
+ If not specified, the default is 4. Minimum value is 1, maximum value
8681
+ is 32.""",
8682
+ )
8683
+ flip_enabled: Optional[bool] = Field(
8684
+ default=None,
8685
+ description="""Optional. Default is true. Whether to flip the candidate and baseline
8686
+ responses. This is only applicable to the pairwise metric. If enabled, also
8687
+ provide PairwiseMetricSpec.candidate_response_field_name and
8688
+ PairwiseMetricSpec.baseline_response_field_name. When rendering
8689
+ PairwiseMetricSpec.metric_prompt_template, the candidate and baseline
8690
+ fields will be flipped for half of the samples to reduce bias.""",
8691
+ )
8692
+ autorater_model: Optional[str] = Field(
8693
+ default=None,
8694
+ description="""The fully qualified name of the publisher model or tuned autorater
8695
+ endpoint to use.
8696
+
8697
+ Publisher model format:
8698
+ `projects/{project}/locations/{location}/publishers/*/models/*`
8699
+
8700
+ Tuned model endpoint format:
8701
+ `projects/{project}/locations/{location}/endpoints/{endpoint}`""",
8702
+ )
8703
+
8704
+
8705
+ class AutoraterConfigDict(TypedDict, total=False):
8706
+ """Autorater config used for evaluation."""
8707
+
8708
+ sampling_count: Optional[int]
8709
+ """Number of samples for each instance in the dataset.
8710
+ If not specified, the default is 4. Minimum value is 1, maximum value
8711
+ is 32."""
8712
+
8713
+ flip_enabled: Optional[bool]
8714
+ """Optional. Default is true. Whether to flip the candidate and baseline
8715
+ responses. This is only applicable to the pairwise metric. If enabled, also
8716
+ provide PairwiseMetricSpec.candidate_response_field_name and
8717
+ PairwiseMetricSpec.baseline_response_field_name. When rendering
8718
+ PairwiseMetricSpec.metric_prompt_template, the candidate and baseline
8719
+ fields will be flipped for half of the samples to reduce bias."""
8720
+
8721
+ autorater_model: Optional[str]
8722
+ """The fully qualified name of the publisher model or tuned autorater
8723
+ endpoint to use.
8724
+
8725
+ Publisher model format:
8726
+ `projects/{project}/locations/{location}/publishers/*/models/*`
8727
+
8728
+ Tuned model endpoint format:
8729
+ `projects/{project}/locations/{location}/endpoints/{endpoint}`"""
8730
+
8731
+
8732
+ AutoraterConfigOrDict = Union[AutoraterConfig, AutoraterConfigDict]
8733
+
8734
+
8735
+ class Metric(_common.BaseModel):
8736
+ """The metric used for evaluation."""
8737
+
8738
+ name: Optional[str] = Field(
8739
+ default=None, description="""The name of the metric."""
8740
+ )
8741
+ custom_function: Optional[Callable[..., Any]] = Field(
8742
+ default=None,
8743
+ description="""The custom function that defines the end-to-end logic for metric computation.""",
8744
+ )
8745
+ prompt_template: Optional[str] = Field(
8746
+ default=None, description="""The prompt template for the metric."""
8747
+ )
8748
+ judge_model_system_instruction: Optional[str] = Field(
8749
+ default=None,
8750
+ description="""The system instruction for the judge model.""",
8751
+ )
8752
+ return_raw_output: Optional[bool] = Field(
8753
+ default=None,
8754
+ description="""Whether to return the raw output from the judge model.""",
8755
+ )
8756
+ parse_and_reduce_fn: Optional[Callable[..., Any]] = Field(
8757
+ default=None,
8758
+ description="""The parse and reduce function for the judge model.""",
8759
+ )
8760
+ aggregate_summary_fn: Optional[Callable[..., Any]] = Field(
8761
+ default=None,
8762
+ description="""The aggregate summary function for the judge model.""",
8763
+ )
8764
+
8765
+ # Allow extra fields to support metric-specific config fields.
8766
+ model_config = ConfigDict(extra='allow')
8767
+
8768
+ _is_predefined: bool = PrivateAttr(default=False)
8769
+ """A boolean indicating whether the metric is predefined."""
8770
+
8771
+ _config_source: Optional[str] = PrivateAttr(default=None)
8772
+ """An optional string indicating the source of the metric configuration."""
8773
+
8774
+ _version: Optional[str] = PrivateAttr(default=None)
8775
+ """An optional string indicating the version of the metric."""
8776
+
8777
+ @model_validator(mode='after') # type: ignore[arg-type]
8778
+ @classmethod
8779
+ def validate_name(cls, model: 'Metric') -> 'Metric':
8780
+ if not model.name:
8781
+ raise ValueError('Metric name cannot be empty.')
8782
+ model.name = model.name.lower()
8783
+ return model
8784
+
8785
+ def to_yaml_file(self, file_path: str, version: Optional[str] = None) -> None:
8786
+ """Dumps the metric object to a YAML file.
8787
+
8788
+ Args:
8789
+ file_path: The path to the YAML file.
8790
+ version: Optional version string to include in the YAML output.
8791
+
8792
+ Raises:
8793
+ ImportError: If the pyyaml library is not installed.
8794
+ """
8795
+ if yaml is None:
8796
+ raise ImportError(
8797
+ 'YAML serialization requires the pyyaml library. Please install'
8798
+ " it using 'pip install google-cloud-aiplatform[evaluation]'."
8799
+ )
8800
+
8801
+ fields_to_exclude_callables = set()
8802
+ for field_name, field_info in self.model_fields.items():
8803
+ annotation = field_info.annotation
8804
+ origin = typing.get_origin(annotation)
8805
+
8806
+ is_field_callable_type = False
8807
+ if annotation is Callable or origin is Callable: # type: ignore[comparison-overlap]
8808
+ is_field_callable_type = True
8809
+ elif origin is Union:
8810
+ args = typing.get_args(annotation)
8811
+ if any(
8812
+ arg is Callable or typing.get_origin(arg) is Callable
8813
+ for arg in args
8814
+ ):
8815
+ is_field_callable_type = True
8816
+
8817
+ if is_field_callable_type:
8818
+ fields_to_exclude_callables.add(field_name)
8819
+
8820
+ data_to_dump = self.model_dump(
8821
+ exclude_unset=True,
8822
+ exclude_none=True,
8823
+ mode='json',
8824
+ exclude=fields_to_exclude_callables
8825
+ if fields_to_exclude_callables
8826
+ else None,
8827
+ )
8828
+
8829
+ if version:
8830
+ data_to_dump['version'] = version
8831
+
8832
+ with open(file_path, 'w', encoding='utf-8') as f:
8833
+ yaml.dump(data_to_dump, f, sort_keys=False, allow_unicode=True)
8834
+
8835
+
8836
+ class MetricDict(TypedDict, total=False):
8837
+ """The metric used for evaluation."""
8838
+
8839
+ name: Optional[str]
8840
+ """The name of the metric."""
8841
+
8842
+ custom_function: Optional[Callable[..., Any]]
8843
+ """The custom function that defines the end-to-end logic for metric computation."""
8844
+
8845
+ prompt_template: Optional[str]
8846
+ """The prompt template for the metric."""
8847
+
8848
+ judge_model_system_instruction: Optional[str]
8849
+ """The system instruction for the judge model."""
8850
+
8851
+ return_raw_output: Optional[bool]
8852
+ """Whether to return the raw output from the judge model."""
8853
+
8854
+ parse_and_reduce_fn: Optional[Callable[..., Any]]
8855
+ """The parse and reduce function for the judge model."""
8856
+
8857
+ aggregate_summary_fn: Optional[Callable[..., Any]]
8858
+ """The aggregate summary function for the judge model."""
8859
+
8860
+
8861
+ MetricOrDict = Union[Metric, MetricDict]
8862
+
8863
+
8864
+ class EvaluationConfig(_common.BaseModel):
8865
+ """Evaluation config for tuning."""
8866
+
8867
+ metrics: Optional[list[Metric]] = Field(
8868
+ default=None, description="""The metrics used for evaluation."""
8869
+ )
8870
+ output_config: Optional[OutputConfig] = Field(
8871
+ default=None, description="""Config for evaluation output."""
8872
+ )
8873
+ autorater_config: Optional[AutoraterConfig] = Field(
8874
+ default=None, description="""Autorater config for evaluation."""
8875
+ )
8876
+
8877
+
8878
+ class EvaluationConfigDict(TypedDict, total=False):
8879
+ """Evaluation config for tuning."""
8880
+
8881
+ metrics: Optional[list[MetricDict]]
8882
+ """The metrics used for evaluation."""
8883
+
8884
+ output_config: Optional[OutputConfigDict]
8885
+ """Config for evaluation output."""
8886
+
8887
+ autorater_config: Optional[AutoraterConfigDict]
8888
+ """Autorater config for evaluation."""
8889
+
8890
+
8891
+ EvaluationConfigOrDict = Union[EvaluationConfig, EvaluationConfigDict]
8892
+
8893
+
8114
8894
  class GoogleRpcStatus(_common.BaseModel):
8115
8895
  """The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs.
8116
8896
 
@@ -8156,19 +8936,60 @@ class GoogleRpcStatusDict(TypedDict, total=False):
8156
8936
  GoogleRpcStatusOrDict = Union[GoogleRpcStatus, GoogleRpcStatusDict]
8157
8937
 
8158
8938
 
8939
+ class PreTunedModel(_common.BaseModel):
8940
+ """A pre-tuned model for continuous tuning."""
8941
+
8942
+ base_model: Optional[str] = Field(
8943
+ default=None,
8944
+ description="""Output only. The name of the base model this PreTunedModel was tuned from.""",
8945
+ )
8946
+ checkpoint_id: Optional[str] = Field(
8947
+ default=None,
8948
+ description="""Optional. The source checkpoint id. If not specified, the default checkpoint will be used.""",
8949
+ )
8950
+ tuned_model_name: Optional[str] = Field(
8951
+ default=None,
8952
+ description="""The resource name of the Model. E.g., a model resource name with a specified version id or alias: `projects/{project}/locations/{location}/models/{model}@{version_id}` `projects/{project}/locations/{location}/models/{model}@{alias}` Or, omit the version id to use the default version: `projects/{project}/locations/{location}/models/{model}`""",
8953
+ )
8954
+
8955
+
8956
+ class PreTunedModelDict(TypedDict, total=False):
8957
+ """A pre-tuned model for continuous tuning."""
8958
+
8959
+ base_model: Optional[str]
8960
+ """Output only. The name of the base model this PreTunedModel was tuned from."""
8961
+
8962
+ checkpoint_id: Optional[str]
8963
+ """Optional. The source checkpoint id. If not specified, the default checkpoint will be used."""
8964
+
8965
+ tuned_model_name: Optional[str]
8966
+ """The resource name of the Model. E.g., a model resource name with a specified version id or alias: `projects/{project}/locations/{location}/models/{model}@{version_id}` `projects/{project}/locations/{location}/models/{model}@{alias}` Or, omit the version id to use the default version: `projects/{project}/locations/{location}/models/{model}`"""
8967
+
8968
+
8969
+ PreTunedModelOrDict = Union[PreTunedModel, PreTunedModelDict]
8970
+
8971
+
8159
8972
  class SupervisedHyperParameters(_common.BaseModel):
8160
8973
  """Hyperparameters for SFT."""
8161
8974
 
8162
8975
  adapter_size: Optional[AdapterSize] = Field(
8163
8976
  default=None, description="""Optional. Adapter size for tuning."""
8164
8977
  )
8978
+ batch_size: Optional[int] = Field(
8979
+ default=None,
8980
+ description="""Optional. Batch size for tuning. This feature is only available for open source models.""",
8981
+ )
8165
8982
  epoch_count: Optional[int] = Field(
8166
8983
  default=None,
8167
8984
  description="""Optional. Number of complete passes the model makes over the entire training dataset during training.""",
8168
8985
  )
8986
+ learning_rate: Optional[float] = Field(
8987
+ default=None,
8988
+ description="""Optional. Learning rate for tuning. Mutually exclusive with `learning_rate_multiplier`. This feature is only available for open source models.""",
8989
+ )
8169
8990
  learning_rate_multiplier: Optional[float] = Field(
8170
8991
  default=None,
8171
- description="""Optional. Multiplier for adjusting the default learning rate. Mutually exclusive with `learning_rate`.""",
8992
+ description="""Optional. Multiplier for adjusting the default learning rate. Mutually exclusive with `learning_rate`. This feature is only available for 1P models.""",
8172
8993
  )
8173
8994
 
8174
8995
 
@@ -8178,11 +8999,17 @@ class SupervisedHyperParametersDict(TypedDict, total=False):
8178
8999
  adapter_size: Optional[AdapterSize]
8179
9000
  """Optional. Adapter size for tuning."""
8180
9001
 
9002
+ batch_size: Optional[int]
9003
+ """Optional. Batch size for tuning. This feature is only available for open source models."""
9004
+
8181
9005
  epoch_count: Optional[int]
8182
9006
  """Optional. Number of complete passes the model makes over the entire training dataset during training."""
8183
9007
 
9008
+ learning_rate: Optional[float]
9009
+ """Optional. Learning rate for tuning. Mutually exclusive with `learning_rate_multiplier`. This feature is only available for open source models."""
9010
+
8184
9011
  learning_rate_multiplier: Optional[float]
8185
- """Optional. Multiplier for adjusting the default learning rate. Mutually exclusive with `learning_rate`."""
9012
+ """Optional. Multiplier for adjusting the default learning rate. Mutually exclusive with `learning_rate`. This feature is only available for 1P models."""
8186
9013
 
8187
9014
 
8188
9015
  SupervisedHyperParametersOrDict = Union[
@@ -8204,6 +9031,9 @@ class SupervisedTuningSpec(_common.BaseModel):
8204
9031
  default=None,
8205
9032
  description="""Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset.""",
8206
9033
  )
9034
+ tuning_mode: Optional[TuningMode] = Field(
9035
+ default=None, description="""Tuning mode."""
9036
+ )
8207
9037
  validation_dataset_uri: Optional[str] = Field(
8208
9038
  default=None,
8209
9039
  description="""Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset.""",
@@ -8222,6 +9052,9 @@ class SupervisedTuningSpecDict(TypedDict, total=False):
8222
9052
  training_dataset_uri: Optional[str]
8223
9053
  """Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset."""
8224
9054
 
9055
+ tuning_mode: Optional[TuningMode]
9056
+ """Tuning mode."""
9057
+
8225
9058
  validation_dataset_uri: Optional[str]
8226
9059
  """Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset."""
8227
9060
 
@@ -8421,6 +9254,132 @@ DistillationDataStatsOrDict = Union[
8421
9254
  ]
8422
9255
 
8423
9256
 
9257
+ class GeminiPreferenceExampleCompletion(_common.BaseModel):
9258
+ """Completion and its preference score."""
9259
+
9260
+ completion: Optional[Content] = Field(
9261
+ default=None,
9262
+ description="""Single turn completion for the given prompt.""",
9263
+ )
9264
+ score: Optional[float] = Field(
9265
+ default=None, description="""The score for the given completion."""
9266
+ )
9267
+
9268
+
9269
+ class GeminiPreferenceExampleCompletionDict(TypedDict, total=False):
9270
+ """Completion and its preference score."""
9271
+
9272
+ completion: Optional[ContentDict]
9273
+ """Single turn completion for the given prompt."""
9274
+
9275
+ score: Optional[float]
9276
+ """The score for the given completion."""
9277
+
9278
+
9279
+ GeminiPreferenceExampleCompletionOrDict = Union[
9280
+ GeminiPreferenceExampleCompletion, GeminiPreferenceExampleCompletionDict
9281
+ ]
9282
+
9283
+
9284
+ class GeminiPreferenceExample(_common.BaseModel):
9285
+ """Input example for preference optimization."""
9286
+
9287
+ completions: Optional[list[GeminiPreferenceExampleCompletion]] = Field(
9288
+ default=None, description="""List of completions for a given prompt."""
9289
+ )
9290
+ contents: Optional[list[Content]] = Field(
9291
+ default=None,
9292
+ description="""Multi-turn contents that represents the Prompt.""",
9293
+ )
9294
+
9295
+
9296
+ class GeminiPreferenceExampleDict(TypedDict, total=False):
9297
+ """Input example for preference optimization."""
9298
+
9299
+ completions: Optional[list[GeminiPreferenceExampleCompletionDict]]
9300
+ """List of completions for a given prompt."""
9301
+
9302
+ contents: Optional[list[ContentDict]]
9303
+ """Multi-turn contents that represents the Prompt."""
9304
+
9305
+
9306
+ GeminiPreferenceExampleOrDict = Union[
9307
+ GeminiPreferenceExample, GeminiPreferenceExampleDict
9308
+ ]
9309
+
9310
+
9311
+ class PreferenceOptimizationDataStats(_common.BaseModel):
9312
+ """Statistics computed for datasets used for preference optimization."""
9313
+
9314
+ score_variance_per_example_distribution: Optional[DatasetDistribution] = (
9315
+ Field(
9316
+ default=None,
9317
+ description="""Output only. Dataset distributions for scores variance per example.""",
9318
+ )
9319
+ )
9320
+ scores_distribution: Optional[DatasetDistribution] = Field(
9321
+ default=None,
9322
+ description="""Output only. Dataset distributions for scores.""",
9323
+ )
9324
+ total_billable_token_count: Optional[int] = Field(
9325
+ default=None,
9326
+ description="""Output only. Number of billable tokens in the tuning dataset.""",
9327
+ )
9328
+ tuning_dataset_example_count: Optional[int] = Field(
9329
+ default=None,
9330
+ description="""Output only. Number of examples in the tuning dataset.""",
9331
+ )
9332
+ tuning_step_count: Optional[int] = Field(
9333
+ default=None,
9334
+ description="""Output only. Number of tuning steps for this Tuning Job.""",
9335
+ )
9336
+ user_dataset_examples: Optional[list[GeminiPreferenceExample]] = Field(
9337
+ default=None,
9338
+ description="""Output only. Sample user examples in the training dataset.""",
9339
+ )
9340
+ user_input_token_distribution: Optional[DatasetDistribution] = Field(
9341
+ default=None,
9342
+ description="""Output only. Dataset distributions for the user input tokens.""",
9343
+ )
9344
+ user_output_token_distribution: Optional[DatasetDistribution] = Field(
9345
+ default=None,
9346
+ description="""Output only. Dataset distributions for the user output tokens.""",
9347
+ )
9348
+
9349
+
9350
+ class PreferenceOptimizationDataStatsDict(TypedDict, total=False):
9351
+ """Statistics computed for datasets used for preference optimization."""
9352
+
9353
+ score_variance_per_example_distribution: Optional[DatasetDistributionDict]
9354
+ """Output only. Dataset distributions for scores variance per example."""
9355
+
9356
+ scores_distribution: Optional[DatasetDistributionDict]
9357
+ """Output only. Dataset distributions for scores."""
9358
+
9359
+ total_billable_token_count: Optional[int]
9360
+ """Output only. Number of billable tokens in the tuning dataset."""
9361
+
9362
+ tuning_dataset_example_count: Optional[int]
9363
+ """Output only. Number of examples in the tuning dataset."""
9364
+
9365
+ tuning_step_count: Optional[int]
9366
+ """Output only. Number of tuning steps for this Tuning Job."""
9367
+
9368
+ user_dataset_examples: Optional[list[GeminiPreferenceExampleDict]]
9369
+ """Output only. Sample user examples in the training dataset."""
9370
+
9371
+ user_input_token_distribution: Optional[DatasetDistributionDict]
9372
+ """Output only. Dataset distributions for the user input tokens."""
9373
+
9374
+ user_output_token_distribution: Optional[DatasetDistributionDict]
9375
+ """Output only. Dataset distributions for the user output tokens."""
9376
+
9377
+
9378
+ PreferenceOptimizationDataStatsOrDict = Union[
9379
+ PreferenceOptimizationDataStats, PreferenceOptimizationDataStatsDict
9380
+ ]
9381
+
9382
+
8424
9383
  class SupervisedTuningDatasetDistributionDatasetBucket(_common.BaseModel):
8425
9384
  """Dataset bucket used to create a histogram for the distribution given a population of values."""
8426
9385
 
@@ -8652,6 +9611,12 @@ class TuningDataStats(_common.BaseModel):
8652
9611
  distillation_data_stats: Optional[DistillationDataStats] = Field(
8653
9612
  default=None, description="""Output only. Statistics for distillation."""
8654
9613
  )
9614
+ preference_optimization_data_stats: Optional[
9615
+ PreferenceOptimizationDataStats
9616
+ ] = Field(
9617
+ default=None,
9618
+ description="""Output only. Statistics for preference optimization.""",
9619
+ )
8655
9620
  supervised_tuning_data_stats: Optional[SupervisedTuningDataStats] = Field(
8656
9621
  default=None, description="""The SFT Tuning data stats."""
8657
9622
  )
@@ -8663,127 +9628,47 @@ class TuningDataStatsDict(TypedDict, total=False):
8663
9628
  distillation_data_stats: Optional[DistillationDataStatsDict]
8664
9629
  """Output only. Statistics for distillation."""
8665
9630
 
9631
+ preference_optimization_data_stats: Optional[
9632
+ PreferenceOptimizationDataStatsDict
9633
+ ]
9634
+ """Output only. Statistics for preference optimization."""
9635
+
8666
9636
  supervised_tuning_data_stats: Optional[SupervisedTuningDataStatsDict]
8667
9637
  """The SFT Tuning data stats."""
8668
9638
 
8669
9639
 
8670
- TuningDataStatsOrDict = Union[TuningDataStats, TuningDataStatsDict]
8671
-
8672
-
8673
- class EncryptionSpec(_common.BaseModel):
8674
- """Represents a customer-managed encryption key spec that can be applied to a top-level resource."""
8675
-
8676
- kms_key_name: Optional[str] = Field(
8677
- default=None,
8678
- description="""Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.""",
8679
- )
8680
-
8681
-
8682
- class EncryptionSpecDict(TypedDict, total=False):
8683
- """Represents a customer-managed encryption key spec that can be applied to a top-level resource."""
8684
-
8685
- kms_key_name: Optional[str]
8686
- """Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created."""
8687
-
8688
-
8689
- EncryptionSpecOrDict = Union[EncryptionSpec, EncryptionSpecDict]
8690
-
8691
-
8692
- class PartnerModelTuningSpec(_common.BaseModel):
8693
- """Tuning spec for Partner models."""
8694
-
8695
- hyper_parameters: Optional[dict[str, Any]] = Field(
8696
- default=None,
8697
- description="""Hyperparameters for tuning. The accepted hyper_parameters and their valid range of values will differ depending on the base model.""",
8698
- )
8699
- training_dataset_uri: Optional[str] = Field(
8700
- default=None,
8701
- description="""Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""",
8702
- )
8703
- validation_dataset_uri: Optional[str] = Field(
8704
- default=None,
8705
- description="""Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""",
8706
- )
8707
-
8708
-
8709
- class PartnerModelTuningSpecDict(TypedDict, total=False):
8710
- """Tuning spec for Partner models."""
8711
-
8712
- hyper_parameters: Optional[dict[str, Any]]
8713
- """Hyperparameters for tuning. The accepted hyper_parameters and their valid range of values will differ depending on the base model."""
8714
-
8715
- training_dataset_uri: Optional[str]
8716
- """Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file."""
8717
-
8718
- validation_dataset_uri: Optional[str]
8719
- """Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file."""
8720
-
8721
-
8722
- PartnerModelTuningSpecOrDict = Union[
8723
- PartnerModelTuningSpec, PartnerModelTuningSpecDict
8724
- ]
8725
-
8726
-
8727
- class DistillationHyperParameters(_common.BaseModel):
8728
- """Hyperparameters for Distillation."""
8729
-
8730
- adapter_size: Optional[AdapterSize] = Field(
8731
- default=None, description="""Optional. Adapter size for distillation."""
8732
- )
8733
- epoch_count: Optional[int] = Field(
8734
- default=None,
8735
- description="""Optional. Number of complete passes the model makes over the entire training dataset during training.""",
8736
- )
8737
- learning_rate_multiplier: Optional[float] = Field(
8738
- default=None,
8739
- description="""Optional. Multiplier for adjusting the default learning rate.""",
8740
- )
8741
-
8742
-
8743
- class DistillationHyperParametersDict(TypedDict, total=False):
8744
- """Hyperparameters for Distillation."""
9640
+ TuningDataStatsOrDict = Union[TuningDataStats, TuningDataStatsDict]
8745
9641
 
8746
- adapter_size: Optional[AdapterSize]
8747
- """Optional. Adapter size for distillation."""
8748
9642
 
8749
- epoch_count: Optional[int]
8750
- """Optional. Number of complete passes the model makes over the entire training dataset during training."""
9643
+ class EncryptionSpec(_common.BaseModel):
9644
+ """Represents a customer-managed encryption key spec that can be applied to a top-level resource."""
8751
9645
 
8752
- learning_rate_multiplier: Optional[float]
8753
- """Optional. Multiplier for adjusting the default learning rate."""
9646
+ kms_key_name: Optional[str] = Field(
9647
+ default=None,
9648
+ description="""Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.""",
9649
+ )
8754
9650
 
8755
9651
 
8756
- DistillationHyperParametersOrDict = Union[
8757
- DistillationHyperParameters, DistillationHyperParametersDict
8758
- ]
9652
+ class EncryptionSpecDict(TypedDict, total=False):
9653
+ """Represents a customer-managed encryption key spec that can be applied to a top-level resource."""
9654
+
9655
+ kms_key_name: Optional[str]
9656
+ """Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created."""
8759
9657
 
8760
9658
 
8761
- class DistillationSpec(_common.BaseModel):
8762
- """Tuning Spec for Distillation."""
9659
+ EncryptionSpecOrDict = Union[EncryptionSpec, EncryptionSpecDict]
8763
9660
 
8764
- base_teacher_model: Optional[str] = Field(
8765
- default=None,
8766
- description="""The base teacher model that is being distilled. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/tuning#supported_models).""",
8767
- )
8768
- hyper_parameters: Optional[DistillationHyperParameters] = Field(
8769
- default=None,
8770
- description="""Optional. Hyperparameters for Distillation.""",
8771
- )
8772
- pipeline_root_directory: Optional[str] = Field(
8773
- default=None,
8774
- description="""Deprecated. A path in a Cloud Storage bucket, which will be treated as the root output directory of the distillation pipeline. It is used by the system to generate the paths of output artifacts.""",
8775
- )
8776
- student_model: Optional[str] = Field(
9661
+
9662
+ class PartnerModelTuningSpec(_common.BaseModel):
9663
+ """Tuning spec for Partner models."""
9664
+
9665
+ hyper_parameters: Optional[dict[str, Any]] = Field(
8777
9666
  default=None,
8778
- description="""The student model that is being tuned, e.g., "google/gemma-2b-1.1-it". Deprecated. Use base_model instead.""",
9667
+ description="""Hyperparameters for tuning. The accepted hyper_parameters and their valid range of values will differ depending on the base model.""",
8779
9668
  )
8780
9669
  training_dataset_uri: Optional[str] = Field(
8781
9670
  default=None,
8782
- description="""Deprecated. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""",
8783
- )
8784
- tuned_teacher_model_source: Optional[str] = Field(
8785
- default=None,
8786
- description="""The resource name of the Tuned teacher model. Format: `projects/{project}/locations/{location}/models/{model}`.""",
9671
+ description="""Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""",
8787
9672
  )
8788
9673
  validation_dataset_uri: Optional[str] = Field(
8789
9674
  default=None,
@@ -8791,32 +9676,22 @@ class DistillationSpec(_common.BaseModel):
8791
9676
  )
8792
9677
 
8793
9678
 
8794
- class DistillationSpecDict(TypedDict, total=False):
8795
- """Tuning Spec for Distillation."""
8796
-
8797
- base_teacher_model: Optional[str]
8798
- """The base teacher model that is being distilled. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/tuning#supported_models)."""
8799
-
8800
- hyper_parameters: Optional[DistillationHyperParametersDict]
8801
- """Optional. Hyperparameters for Distillation."""
8802
-
8803
- pipeline_root_directory: Optional[str]
8804
- """Deprecated. A path in a Cloud Storage bucket, which will be treated as the root output directory of the distillation pipeline. It is used by the system to generate the paths of output artifacts."""
9679
+ class PartnerModelTuningSpecDict(TypedDict, total=False):
9680
+ """Tuning spec for Partner models."""
8805
9681
 
8806
- student_model: Optional[str]
8807
- """The student model that is being tuned, e.g., "google/gemma-2b-1.1-it". Deprecated. Use base_model instead."""
9682
+ hyper_parameters: Optional[dict[str, Any]]
9683
+ """Hyperparameters for tuning. The accepted hyper_parameters and their valid range of values will differ depending on the base model."""
8808
9684
 
8809
9685
  training_dataset_uri: Optional[str]
8810
- """Deprecated. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file."""
8811
-
8812
- tuned_teacher_model_source: Optional[str]
8813
- """The resource name of the Tuned teacher model. Format: `projects/{project}/locations/{location}/models/{model}`."""
9686
+ """Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file."""
8814
9687
 
8815
9688
  validation_dataset_uri: Optional[str]
8816
9689
  """Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file."""
8817
9690
 
8818
9691
 
8819
- DistillationSpecOrDict = Union[DistillationSpec, DistillationSpecDict]
9692
+ PartnerModelTuningSpecOrDict = Union[
9693
+ PartnerModelTuningSpec, PartnerModelTuningSpecDict
9694
+ ]
8820
9695
 
8821
9696
 
8822
9697
  class TuningJob(_common.BaseModel):
@@ -8865,6 +9740,9 @@ class TuningJob(_common.BaseModel):
8865
9740
  default=None,
8866
9741
  description="""Output only. The tuned model resources associated with this TuningJob.""",
8867
9742
  )
9743
+ pre_tuned_model: Optional[PreTunedModel] = Field(
9744
+ default=None, description="""The pre-tuned model for continuous tuning."""
9745
+ )
8868
9746
  supervised_tuning_spec: Optional[SupervisedTuningSpec] = Field(
8869
9747
  default=None, description="""Tuning Spec for Supervised Fine Tuning."""
8870
9748
  )
@@ -8880,8 +9758,12 @@ class TuningJob(_common.BaseModel):
8880
9758
  default=None,
8881
9759
  description="""Tuning Spec for open sourced and third party Partner models.""",
8882
9760
  )
8883
- distillation_spec: Optional[DistillationSpec] = Field(
8884
- default=None, description="""Tuning Spec for Distillation."""
9761
+ evaluation_config: Optional[EvaluationConfig] = Field(
9762
+ default=None, description=""""""
9763
+ )
9764
+ custom_base_model: Optional[str] = Field(
9765
+ default=None,
9766
+ description="""Optional. The user-provided path to custom model weights. Set this field to tune a custom model. The path must be a Cloud Storage directory that contains the model weights in .safetensors format along with associated model metadata files. If this field is set, the base_model field must still be set to indicate which base model the custom model is derived from. This feature is only available for open source models.""",
8885
9767
  )
8886
9768
  experiment: Optional[str] = Field(
8887
9769
  default=None,
@@ -8891,16 +9773,14 @@ class TuningJob(_common.BaseModel):
8891
9773
  default=None,
8892
9774
  description="""Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.""",
8893
9775
  )
9776
+ output_uri: Optional[str] = Field(
9777
+ default=None,
9778
+ description="""Optional. Cloud Storage path to the directory where tuning job outputs are written to. This field is only available and required for open source models.""",
9779
+ )
8894
9780
  pipeline_job: Optional[str] = Field(
8895
9781
  default=None,
8896
9782
  description="""Output only. The resource name of the PipelineJob associated with the TuningJob. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`.""",
8897
9783
  )
8898
- satisfies_pzi: Optional[bool] = Field(
8899
- default=None, description="""Output only. Reserved for future use."""
8900
- )
8901
- satisfies_pzs: Optional[bool] = Field(
8902
- default=None, description="""Output only. Reserved for future use."""
8903
- )
8904
9784
  service_account: Optional[str] = Field(
8905
9785
  default=None,
8906
9786
  description="""The service account that the tuningJob workload runs as. If not specified, the Vertex AI Secure Fine-Tuned Service Agent in the project will be used. See https://cloud.google.com/iam/docs/service-agents#vertex-ai-secure-fine-tuning-service-agent Users starting the pipeline must have the `iam.serviceAccounts.actAs` permission on this service account.""",
@@ -8957,6 +9837,9 @@ class TuningJobDict(TypedDict, total=False):
8957
9837
  tuned_model: Optional[TunedModelDict]
8958
9838
  """Output only. The tuned model resources associated with this TuningJob."""
8959
9839
 
9840
+ pre_tuned_model: Optional[PreTunedModelDict]
9841
+ """The pre-tuned model for continuous tuning."""
9842
+
8960
9843
  supervised_tuning_spec: Optional[SupervisedTuningSpecDict]
8961
9844
  """Tuning Spec for Supervised Fine Tuning."""
8962
9845
 
@@ -8969,8 +9852,11 @@ class TuningJobDict(TypedDict, total=False):
8969
9852
  partner_model_tuning_spec: Optional[PartnerModelTuningSpecDict]
8970
9853
  """Tuning Spec for open sourced and third party Partner models."""
8971
9854
 
8972
- distillation_spec: Optional[DistillationSpecDict]
8973
- """Tuning Spec for Distillation."""
9855
+ evaluation_config: Optional[EvaluationConfigDict]
9856
+ """"""
9857
+
9858
+ custom_base_model: Optional[str]
9859
+ """Optional. The user-provided path to custom model weights. Set this field to tune a custom model. The path must be a Cloud Storage directory that contains the model weights in .safetensors format along with associated model metadata files. If this field is set, the base_model field must still be set to indicate which base model the custom model is derived from. This feature is only available for open source models."""
8974
9860
 
8975
9861
  experiment: Optional[str]
8976
9862
  """Output only. The Experiment associated with this TuningJob."""
@@ -8978,15 +9864,12 @@ class TuningJobDict(TypedDict, total=False):
8978
9864
  labels: Optional[dict[str, str]]
8979
9865
  """Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels."""
8980
9866
 
9867
+ output_uri: Optional[str]
9868
+ """Optional. Cloud Storage path to the directory where tuning job outputs are written to. This field is only available and required for open source models."""
9869
+
8981
9870
  pipeline_job: Optional[str]
8982
9871
  """Output only. The resource name of the PipelineJob associated with the TuningJob. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`."""
8983
9872
 
8984
- satisfies_pzi: Optional[bool]
8985
- """Output only. Reserved for future use."""
8986
-
8987
- satisfies_pzs: Optional[bool]
8988
- """Output only. Reserved for future use."""
8989
-
8990
9873
  service_account: Optional[str]
8991
9874
  """The service account that the tuningJob workload runs as. If not specified, the Vertex AI Secure Fine-Tuned Service Agent in the project will be used. See https://cloud.google.com/iam/docs/service-agents#vertex-ai-secure-fine-tuning-service-agent Users starting the pipeline must have the `iam.serviceAccounts.actAs` permission on this service account."""
8992
9875
 
@@ -9192,6 +10075,10 @@ class CreateTuningJobConfig(_common.BaseModel):
9192
10075
  default=None,
9193
10076
  description="""If set to true, disable intermediate checkpoints for SFT and only the last checkpoint will be exported. Otherwise, enable intermediate checkpoints for SFT.""",
9194
10077
  )
10078
+ pre_tuned_model_checkpoint_id: Optional[str] = Field(
10079
+ default=None,
10080
+ description="""The optional checkpoint id of the pre-tuned model to use for tuning, if applicable.""",
10081
+ )
9195
10082
  adapter_size: Optional[AdapterSize] = Field(
9196
10083
  default=None, description="""Adapter size for tuning."""
9197
10084
  )
@@ -9203,6 +10090,9 @@ class CreateTuningJobConfig(_common.BaseModel):
9203
10090
  default=None,
9204
10091
  description="""The learning rate hyperparameter for tuning. If not set, a default of 0.001 or 0.0002 will be calculated based on the number of training examples.""",
9205
10092
  )
10093
+ evaluation_config: Optional[EvaluationConfig] = Field(
10094
+ default=None, description="""Evaluation config for the tuning job."""
10095
+ )
9206
10096
 
9207
10097
 
9208
10098
  class CreateTuningJobConfigDict(TypedDict, total=False):
@@ -9229,6 +10119,9 @@ class CreateTuningJobConfigDict(TypedDict, total=False):
9229
10119
  export_last_checkpoint_only: Optional[bool]
9230
10120
  """If set to true, disable intermediate checkpoints for SFT and only the last checkpoint will be exported. Otherwise, enable intermediate checkpoints for SFT."""
9231
10121
 
10122
+ pre_tuned_model_checkpoint_id: Optional[str]
10123
+ """The optional checkpoint id of the pre-tuned model to use for tuning, if applicable."""
10124
+
9232
10125
  adapter_size: Optional[AdapterSize]
9233
10126
  """Adapter size for tuning."""
9234
10127
 
@@ -9238,18 +10131,24 @@ class CreateTuningJobConfigDict(TypedDict, total=False):
9238
10131
  learning_rate: Optional[float]
9239
10132
  """The learning rate hyperparameter for tuning. If not set, a default of 0.001 or 0.0002 will be calculated based on the number of training examples."""
9240
10133
 
10134
+ evaluation_config: Optional[EvaluationConfigDict]
10135
+ """Evaluation config for the tuning job."""
10136
+
9241
10137
 
9242
10138
  CreateTuningJobConfigOrDict = Union[
9243
10139
  CreateTuningJobConfig, CreateTuningJobConfigDict
9244
10140
  ]
9245
10141
 
9246
10142
 
9247
- class _CreateTuningJobParameters(_common.BaseModel):
10143
+ class _CreateTuningJobParametersPrivate(_common.BaseModel):
9248
10144
  """Supervised fine-tuning job creation parameters - optional fields."""
9249
10145
 
9250
10146
  base_model: Optional[str] = Field(
9251
10147
  default=None,
9252
- description="""The base model that is being tuned, e.g., "gemini-1.0-pro-002".""",
10148
+ description="""The base model that is being tuned, e.g., "gemini-2.5-flash".""",
10149
+ )
10150
+ pre_tuned_model: Optional[PreTunedModel] = Field(
10151
+ default=None, description="""The PreTunedModel that is being tuned."""
9253
10152
  )
9254
10153
  training_dataset: Optional[TuningDataset] = Field(
9255
10154
  default=None,
@@ -9260,11 +10159,14 @@ class _CreateTuningJobParameters(_common.BaseModel):
9260
10159
  )
9261
10160
 
9262
10161
 
9263
- class _CreateTuningJobParametersDict(TypedDict, total=False):
10162
+ class _CreateTuningJobParametersPrivateDict(TypedDict, total=False):
9264
10163
  """Supervised fine-tuning job creation parameters - optional fields."""
9265
10164
 
9266
10165
  base_model: Optional[str]
9267
- """The base model that is being tuned, e.g., "gemini-1.0-pro-002"."""
10166
+ """The base model that is being tuned, e.g., "gemini-2.5-flash"."""
10167
+
10168
+ pre_tuned_model: Optional[PreTunedModelDict]
10169
+ """The PreTunedModel that is being tuned."""
9268
10170
 
9269
10171
  training_dataset: Optional[TuningDatasetDict]
9270
10172
  """Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file."""
@@ -9273,8 +10175,8 @@ class _CreateTuningJobParametersDict(TypedDict, total=False):
9273
10175
  """Configuration for the tuning job."""
9274
10176
 
9275
10177
 
9276
- _CreateTuningJobParametersOrDict = Union[
9277
- _CreateTuningJobParameters, _CreateTuningJobParametersDict
10178
+ _CreateTuningJobParametersPrivateOrDict = Union[
10179
+ _CreateTuningJobParametersPrivate, _CreateTuningJobParametersPrivateDict
9278
10180
  ]
9279
10181
 
9280
10182
 
@@ -10765,6 +11667,9 @@ _DeleteBatchJobParametersOrDict = Union[
10765
11667
  class DeleteResourceJob(_common.BaseModel):
10766
11668
  """The return value of delete operation."""
10767
11669
 
11670
+ sdk_http_response: Optional[HttpResponse] = Field(
11671
+ default=None, description="""Used to retain the full HTTP response."""
11672
+ )
10768
11673
  name: Optional[str] = Field(default=None, description="""""")
10769
11674
  done: Optional[bool] = Field(default=None, description="""""")
10770
11675
  error: Optional[JobError] = Field(default=None, description="""""")
@@ -10773,6 +11678,9 @@ class DeleteResourceJob(_common.BaseModel):
10773
11678
  class DeleteResourceJobDict(TypedDict, total=False):
10774
11679
  """The return value of delete operation."""
10775
11680
 
11681
+ sdk_http_response: Optional[HttpResponseDict]
11682
+ """Used to retain the full HTTP response."""
11683
+
10776
11684
  name: Optional[str]
10777
11685
  """"""
10778
11686
 
@@ -12594,13 +13502,55 @@ LiveClientRealtimeInputOrDict = Union[
12594
13502
  LiveClientRealtimeInput, LiveClientRealtimeInputDict
12595
13503
  ]
12596
13504
 
13505
+
13506
+ class LiveClientToolResponse(_common.BaseModel):
13507
+ """Client generated response to a `ToolCall` received from the server.
13508
+
13509
+ Individual `FunctionResponse` objects are matched to the respective
13510
+ `FunctionCall` objects by the `id` field.
13511
+
13512
+ Note that in the unary and server-streaming GenerateContent APIs function
13513
+ calling happens by exchanging the `Content` parts, while in the bidi
13514
+ GenerateContent APIs function calling happens over this dedicated set of
13515
+ messages.
13516
+ """
13517
+
13518
+ function_responses: Optional[list[FunctionResponse]] = Field(
13519
+ default=None, description="""The response to the function calls."""
13520
+ )
13521
+
13522
+
13523
+ class LiveClientToolResponseDict(TypedDict, total=False):
13524
+ """Client generated response to a `ToolCall` received from the server.
13525
+
13526
+ Individual `FunctionResponse` objects are matched to the respective
13527
+ `FunctionCall` objects by the `id` field.
13528
+
13529
+ Note that in the unary and server-streaming GenerateContent APIs function
13530
+ calling happens by exchanging the `Content` parts, while in the bidi
13531
+ GenerateContent APIs function calling happens over this dedicated set of
13532
+ messages.
13533
+ """
13534
+
13535
+ function_responses: Optional[list[FunctionResponseDict]]
13536
+ """The response to the function calls."""
13537
+
13538
+
13539
+ LiveClientToolResponseOrDict = Union[
13540
+ LiveClientToolResponse, LiveClientToolResponseDict
13541
+ ]
13542
+
13543
+
12597
13544
  if _is_pillow_image_imported:
12598
- BlobImageUnion = Union[Blob, PIL_Image]
13545
+ BlobImageUnion = Union[PIL_Image, Blob]
12599
13546
  else:
12600
13547
  BlobImageUnion = Blob # type: ignore[misc]
12601
13548
 
12602
13549
 
12603
- BlobImageUnionDict = Union[BlobImageUnion, BlobDict]
13550
+ if _is_pillow_image_imported:
13551
+ BlobImageUnionDict = Union[PIL_Image, Blob, BlobDict]
13552
+ else:
13553
+ BlobImageUnionDict = Union[Blob, BlobDict] # type: ignore[misc]
12604
13554
 
12605
13555
 
12606
13556
  class LiveSendRealtimeInputParameters(_common.BaseModel):
@@ -12676,44 +13626,6 @@ LiveSendRealtimeInputParametersOrDict = Union[
12676
13626
  ]
12677
13627
 
12678
13628
 
12679
- class LiveClientToolResponse(_common.BaseModel):
12680
- """Client generated response to a `ToolCall` received from the server.
12681
-
12682
- Individual `FunctionResponse` objects are matched to the respective
12683
- `FunctionCall` objects by the `id` field.
12684
-
12685
- Note that in the unary and server-streaming GenerateContent APIs function
12686
- calling happens by exchanging the `Content` parts, while in the bidi
12687
- GenerateContent APIs function calling happens over this dedicated set of
12688
- messages.
12689
- """
12690
-
12691
- function_responses: Optional[list[FunctionResponse]] = Field(
12692
- default=None, description="""The response to the function calls."""
12693
- )
12694
-
12695
-
12696
- class LiveClientToolResponseDict(TypedDict, total=False):
12697
- """Client generated response to a `ToolCall` received from the server.
12698
-
12699
- Individual `FunctionResponse` objects are matched to the respective
12700
- `FunctionCall` objects by the `id` field.
12701
-
12702
- Note that in the unary and server-streaming GenerateContent APIs function
12703
- calling happens by exchanging the `Content` parts, while in the bidi
12704
- GenerateContent APIs function calling happens over this dedicated set of
12705
- messages.
12706
- """
12707
-
12708
- function_responses: Optional[list[FunctionResponseDict]]
12709
- """The response to the function calls."""
12710
-
12711
-
12712
- LiveClientToolResponseOrDict = Union[
12713
- LiveClientToolResponse, LiveClientToolResponseDict
12714
- ]
12715
-
12716
-
12717
13629
  class LiveClientMessage(_common.BaseModel):
12718
13630
  """Messages sent by the client in the API call."""
12719
13631
 
@@ -13120,6 +14032,10 @@ class LiveMusicGenerationConfig(_common.BaseModel):
13120
14032
  default=None,
13121
14033
  description="""Whether the audio output should contain only bass and drums.""",
13122
14034
  )
14035
+ music_generation_mode: Optional[MusicGenerationMode] = Field(
14036
+ default=None,
14037
+ description="""The mode of music generation. Default mode is QUALITY.""",
14038
+ )
13123
14039
 
13124
14040
 
13125
14041
  class LiveMusicGenerationConfigDict(TypedDict, total=False):
@@ -13163,6 +14079,9 @@ class LiveMusicGenerationConfigDict(TypedDict, total=False):
13163
14079
  only_bass_and_drums: Optional[bool]
13164
14080
  """Whether the audio output should contain only bass and drums."""
13165
14081
 
14082
+ music_generation_mode: Optional[MusicGenerationMode]
14083
+ """The mode of music generation. Default mode is QUALITY."""
14084
+
13166
14085
 
13167
14086
  LiveMusicGenerationConfigOrDict = Union[
13168
14087
  LiveMusicGenerationConfig, LiveMusicGenerationConfigDict
@@ -13588,3 +14507,189 @@ class CreateAuthTokenParametersDict(TypedDict, total=False):
13588
14507
  CreateAuthTokenParametersOrDict = Union[
13589
14508
  CreateAuthTokenParameters, CreateAuthTokenParametersDict
13590
14509
  ]
14510
+
14511
+
14512
+ class CreateTuningJobParameters(_common.BaseModel):
14513
+ """Supervised fine-tuning job creation parameters - optional fields."""
14514
+
14515
+ base_model: Optional[str] = Field(
14516
+ default=None,
14517
+ description="""The base model that is being tuned, e.g., "gemini-2.5-flash".""",
14518
+ )
14519
+ training_dataset: Optional[TuningDataset] = Field(
14520
+ default=None,
14521
+ description="""Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""",
14522
+ )
14523
+ config: Optional[CreateTuningJobConfig] = Field(
14524
+ default=None, description="""Configuration for the tuning job."""
14525
+ )
14526
+
14527
+
14528
+ class CreateTuningJobParametersDict(TypedDict, total=False):
14529
+ """Supervised fine-tuning job creation parameters - optional fields."""
14530
+
14531
+ base_model: Optional[str]
14532
+ """The base model that is being tuned, e.g., "gemini-2.5-flash"."""
14533
+
14534
+ training_dataset: Optional[TuningDatasetDict]
14535
+ """Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file."""
14536
+
14537
+ config: Optional[CreateTuningJobConfigDict]
14538
+ """Configuration for the tuning job."""
14539
+
14540
+
14541
+ CreateTuningJobParametersOrDict = Union[
14542
+ CreateTuningJobParameters, CreateTuningJobParametersDict
14543
+ ]
14544
+
14545
+
14546
+ class CustomOutputFormatConfig(_common.BaseModel):
14547
+ """Config for custom output format."""
14548
+
14549
+ return_raw_output: Optional[bool] = Field(
14550
+ default=None, description="""Optional. Whether to return raw output."""
14551
+ )
14552
+
14553
+
14554
+ class CustomOutputFormatConfigDict(TypedDict, total=False):
14555
+ """Config for custom output format."""
14556
+
14557
+ return_raw_output: Optional[bool]
14558
+ """Optional. Whether to return raw output."""
14559
+
14560
+
14561
+ CustomOutputFormatConfigOrDict = Union[
14562
+ CustomOutputFormatConfig, CustomOutputFormatConfigDict
14563
+ ]
14564
+
14565
+
14566
+ class BleuSpec(_common.BaseModel):
14567
+ """Spec for bleu metric."""
14568
+
14569
+ use_effective_order: Optional[bool] = Field(
14570
+ default=None,
14571
+ description="""Optional. Whether to use_effective_order to compute bleu score.""",
14572
+ )
14573
+
14574
+
14575
+ class BleuSpecDict(TypedDict, total=False):
14576
+ """Spec for bleu metric."""
14577
+
14578
+ use_effective_order: Optional[bool]
14579
+ """Optional. Whether to use_effective_order to compute bleu score."""
14580
+
14581
+
14582
+ BleuSpecOrDict = Union[BleuSpec, BleuSpecDict]
14583
+
14584
+
14585
+ class PairwiseMetricSpec(_common.BaseModel):
14586
+ """Spec for pairwise metric."""
14587
+
14588
+ metric_prompt_template: Optional[str] = Field(
14589
+ default=None,
14590
+ description="""Required. Metric prompt template for pairwise metric.""",
14591
+ )
14592
+ baseline_response_field_name: Optional[str] = Field(
14593
+ default=None,
14594
+ description="""Optional. The field name of the baseline response.""",
14595
+ )
14596
+ candidate_response_field_name: Optional[str] = Field(
14597
+ default=None,
14598
+ description="""Optional. The field name of the candidate response.""",
14599
+ )
14600
+ custom_output_format_config: Optional[CustomOutputFormatConfig] = Field(
14601
+ default=None,
14602
+ description="""Optional. CustomOutputFormatConfig allows customization of metric output. When this config is set, the default output is replaced with the raw output string. If a custom format is chosen, the `pairwise_choice` and `explanation` fields in the corresponding metric result will be empty.""",
14603
+ )
14604
+ system_instruction: Optional[str] = Field(
14605
+ default=None,
14606
+ description="""Optional. System instructions for pairwise metric.""",
14607
+ )
14608
+
14609
+
14610
+ class PairwiseMetricSpecDict(TypedDict, total=False):
14611
+ """Spec for pairwise metric."""
14612
+
14613
+ metric_prompt_template: Optional[str]
14614
+ """Required. Metric prompt template for pairwise metric."""
14615
+
14616
+ baseline_response_field_name: Optional[str]
14617
+ """Optional. The field name of the baseline response."""
14618
+
14619
+ candidate_response_field_name: Optional[str]
14620
+ """Optional. The field name of the candidate response."""
14621
+
14622
+ custom_output_format_config: Optional[CustomOutputFormatConfigDict]
14623
+ """Optional. CustomOutputFormatConfig allows customization of metric output. When this config is set, the default output is replaced with the raw output string. If a custom format is chosen, the `pairwise_choice` and `explanation` fields in the corresponding metric result will be empty."""
14624
+
14625
+ system_instruction: Optional[str]
14626
+ """Optional. System instructions for pairwise metric."""
14627
+
14628
+
14629
+ PairwiseMetricSpecOrDict = Union[PairwiseMetricSpec, PairwiseMetricSpecDict]
14630
+
14631
+
14632
+ class PointwiseMetricSpec(_common.BaseModel):
14633
+ """Spec for pointwise metric."""
14634
+
14635
+ metric_prompt_template: Optional[str] = Field(
14636
+ default=None,
14637
+ description="""Required. Metric prompt template for pointwise metric.""",
14638
+ )
14639
+ custom_output_format_config: Optional[CustomOutputFormatConfig] = Field(
14640
+ default=None,
14641
+ description="""Optional. CustomOutputFormatConfig allows customization of metric output. By default, metrics return a score and explanation. When this config is set, the default output is replaced with either: - The raw output string. - A parsed output based on a user-defined schema. If a custom format is chosen, the `score` and `explanation` fields in the corresponding metric result will be empty.""",
14642
+ )
14643
+ system_instruction: Optional[str] = Field(
14644
+ default=None,
14645
+ description="""Optional. System instructions for pointwise metric.""",
14646
+ )
14647
+
14648
+
14649
+ class PointwiseMetricSpecDict(TypedDict, total=False):
14650
+ """Spec for pointwise metric."""
14651
+
14652
+ metric_prompt_template: Optional[str]
14653
+ """Required. Metric prompt template for pointwise metric."""
14654
+
14655
+ custom_output_format_config: Optional[CustomOutputFormatConfigDict]
14656
+ """Optional. CustomOutputFormatConfig allows customization of metric output. By default, metrics return a score and explanation. When this config is set, the default output is replaced with either: - The raw output string. - A parsed output based on a user-defined schema. If a custom format is chosen, the `score` and `explanation` fields in the corresponding metric result will be empty."""
14657
+
14658
+ system_instruction: Optional[str]
14659
+ """Optional. System instructions for pointwise metric."""
14660
+
14661
+
14662
+ PointwiseMetricSpecOrDict = Union[PointwiseMetricSpec, PointwiseMetricSpecDict]
14663
+
14664
+
14665
+ class RougeSpec(_common.BaseModel):
14666
+ """Spec for rouge metric."""
14667
+
14668
+ rouge_type: Optional[str] = Field(
14669
+ default=None,
14670
+ description="""Optional. Supported rouge types are rougen[1-9], rougeL, and rougeLsum.""",
14671
+ )
14672
+ split_summaries: Optional[bool] = Field(
14673
+ default=None,
14674
+ description="""Optional. Whether to split summaries while using rougeLsum.""",
14675
+ )
14676
+ use_stemmer: Optional[bool] = Field(
14677
+ default=None,
14678
+ description="""Optional. Whether to use stemmer to compute rouge score.""",
14679
+ )
14680
+
14681
+
14682
+ class RougeSpecDict(TypedDict, total=False):
14683
+ """Spec for rouge metric."""
14684
+
14685
+ rouge_type: Optional[str]
14686
+ """Optional. Supported rouge types are rougen[1-9], rougeL, and rougeLsum."""
14687
+
14688
+ split_summaries: Optional[bool]
14689
+ """Optional. Whether to split summaries while using rougeLsum."""
14690
+
14691
+ use_stemmer: Optional[bool]
14692
+ """Optional. Whether to use stemmer to compute rouge score."""
14693
+
14694
+
14695
+ RougeSpecOrDict = Union[RougeSpec, RougeSpecDict]