google-genai 1.28.0__py3-none-any.whl → 1.29.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/types.py CHANGED
@@ -241,6 +241,10 @@ class UrlRetrievalStatus(_common.CaseInSensitiveEnum):
241
241
  """Url retrieval is successful."""
242
242
  URL_RETRIEVAL_STATUS_ERROR = 'URL_RETRIEVAL_STATUS_ERROR'
243
243
  """Url retrieval is failed due to error."""
244
+ URL_RETRIEVAL_STATUS_PAYWALL = 'URL_RETRIEVAL_STATUS_PAYWALL'
245
+ """Url retrieval is failed because the content is behind paywall."""
246
+ URL_RETRIEVAL_STATUS_UNSAFE = 'URL_RETRIEVAL_STATUS_UNSAFE'
247
+ """Url retrieval is failed because the content is unsafe."""
244
248
 
245
249
 
246
250
  class FinishReason(_common.CaseInSensitiveEnum):
@@ -672,6 +676,22 @@ class Scale(_common.CaseInSensitiveEnum):
672
676
  """B major or Ab minor."""
673
677
 
674
678
 
679
+ class MusicGenerationMode(_common.CaseInSensitiveEnum):
680
+ """The mode of music generation."""
681
+
682
+ MUSIC_GENERATION_MODE_UNSPECIFIED = 'MUSIC_GENERATION_MODE_UNSPECIFIED'
683
+ """Rely on the server default generation mode."""
684
+ QUALITY = 'QUALITY'
685
+ """Steer text prompts to regions of latent space with higher quality
686
+ music."""
687
+ DIVERSITY = 'DIVERSITY'
688
+ """Steer text prompts to regions of latent space with a larger
689
+ diversity of music."""
690
+ VOCALIZATION = 'VOCALIZATION'
691
+ """Steer text prompts to regions of latent space more likely to
692
+ generate music with vocals."""
693
+
694
+
675
695
  class LiveMusicPlaybackControl(_common.CaseInSensitiveEnum):
676
696
  """The playback control signal to apply to the music generation."""
677
697
 
@@ -2092,20 +2112,50 @@ class FunctionDeclaration(_common.BaseModel):
2092
2112
  from . import _automatic_function_calling_util
2093
2113
 
2094
2114
  parameters_properties = {}
2115
+ parameters_json_schema = {}
2095
2116
  annotation_under_future = typing.get_type_hints(callable)
2096
- for name, param in inspect.signature(callable).parameters.items():
2097
- if param.kind in (
2098
- inspect.Parameter.POSITIONAL_OR_KEYWORD,
2099
- inspect.Parameter.KEYWORD_ONLY,
2100
- inspect.Parameter.POSITIONAL_ONLY,
2101
- ):
2102
- # This snippet catches the case when type hints are stored as strings
2103
- if isinstance(param.annotation, str):
2104
- param = param.replace(annotation=annotation_under_future[name])
2105
- schema = _automatic_function_calling_util._parse_schema_from_parameter(
2106
- api_option, param, callable.__name__
2107
- )
2108
- parameters_properties[name] = schema
2117
+ try:
2118
+ for name, param in inspect.signature(callable).parameters.items():
2119
+ if param.kind in (
2120
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
2121
+ inspect.Parameter.KEYWORD_ONLY,
2122
+ inspect.Parameter.POSITIONAL_ONLY,
2123
+ ):
2124
+ param = _automatic_function_calling_util._handle_params_as_deferred_annotations(
2125
+ param, annotation_under_future, name
2126
+ )
2127
+ schema = (
2128
+ _automatic_function_calling_util._parse_schema_from_parameter(
2129
+ api_option, param, callable.__name__
2130
+ )
2131
+ )
2132
+ parameters_properties[name] = schema
2133
+ except ValueError:
2134
+ parameters_properties = {}
2135
+ for name, param in inspect.signature(callable).parameters.items():
2136
+ if param.kind in (
2137
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
2138
+ inspect.Parameter.KEYWORD_ONLY,
2139
+ inspect.Parameter.POSITIONAL_ONLY,
2140
+ ):
2141
+ try:
2142
+ param = _automatic_function_calling_util._handle_params_as_deferred_annotations(
2143
+ param, annotation_under_future, name
2144
+ )
2145
+ param_schema_adapter = pydantic.TypeAdapter(
2146
+ param.annotation,
2147
+ config=pydantic.ConfigDict(arbitrary_types_allowed=True),
2148
+ )
2149
+ json_schema_dict = param_schema_adapter.json_schema()
2150
+ json_schema_dict = _automatic_function_calling_util._add_unevaluated_items_to_fixed_len_tuple_schema(
2151
+ json_schema_dict
2152
+ )
2153
+ parameters_json_schema[name] = json_schema_dict
2154
+ except Exception as e:
2155
+ _automatic_function_calling_util._raise_for_unsupported_param(
2156
+ param, callable.__name__, e
2157
+ )
2158
+
2109
2159
  declaration = FunctionDeclaration(
2110
2160
  name=callable.__name__,
2111
2161
  description=inspect.cleandoc(callable.__doc__)
@@ -2123,6 +2173,8 @@ class FunctionDeclaration(_common.BaseModel):
2123
2173
  declaration.parameters
2124
2174
  )
2125
2175
  )
2176
+ elif parameters_json_schema:
2177
+ declaration.parameters_json_schema = parameters_json_schema
2126
2178
  # TODO: b/421991354 - Remove this check once the bug is fixed.
2127
2179
  if api_option == 'GEMINI_API':
2128
2180
  return declaration
@@ -2142,13 +2194,39 @@ class FunctionDeclaration(_common.BaseModel):
2142
2194
  return_value = return_value.replace(
2143
2195
  annotation=annotation_under_future['return']
2144
2196
  )
2145
- declaration.response = (
2146
- _automatic_function_calling_util._parse_schema_from_parameter(
2147
- api_option,
2148
- return_value,
2149
- callable.__name__,
2197
+ response_schema: Optional[Schema] = None
2198
+ response_json_schema: Optional[Union[dict[str, Any], Schema]] = {}
2199
+ try:
2200
+ response_schema = (
2201
+ _automatic_function_calling_util._parse_schema_from_parameter(
2202
+ api_option,
2203
+ return_value,
2204
+ callable.__name__,
2205
+ )
2206
+ )
2207
+ if response_schema.any_of is not None:
2208
+ # To handle any_of, we need to use responseJsonSchema
2209
+ response_json_schema = response_schema
2210
+ response_schema = None
2211
+ except ValueError:
2212
+ try:
2213
+ return_value_schema_adapter = pydantic.TypeAdapter(
2214
+ return_value.annotation,
2215
+ config=pydantic.ConfigDict(arbitrary_types_allowed=True),
2150
2216
  )
2151
- )
2217
+ response_json_schema = return_value_schema_adapter.json_schema()
2218
+ response_json_schema = _automatic_function_calling_util._add_unevaluated_items_to_fixed_len_tuple_schema(
2219
+ response_json_schema
2220
+ )
2221
+ except Exception as e:
2222
+ _automatic_function_calling_util._raise_for_unsupported_param(
2223
+ return_value, callable.__name__, e
2224
+ )
2225
+
2226
+ if response_schema:
2227
+ declaration.response = response_schema
2228
+ elif response_json_schema:
2229
+ declaration.response_json_schema = response_json_schema
2152
2230
  return declaration
2153
2231
 
2154
2232
  @classmethod
@@ -3676,19 +3754,25 @@ class FileDict(TypedDict, total=False):
3676
3754
 
3677
3755
  FileOrDict = Union[File, FileDict]
3678
3756
 
3757
+
3679
3758
  if _is_pillow_image_imported:
3680
- PartUnion = Union[File, Part, PIL_Image, str]
3759
+ PartUnion = Union[str, PIL_Image, File, Part]
3681
3760
  else:
3682
- PartUnion = Union[File, Part, str] # type: ignore[misc]
3761
+ PartUnion = Union[str, File, Part] # type: ignore[misc]
3683
3762
 
3684
3763
 
3685
- PartUnionDict = Union[PartUnion, PartDict]
3764
+ if _is_pillow_image_imported:
3765
+ PartUnionDict = Union[str, PIL_Image, File, FileDict, Part, PartDict]
3766
+ else:
3767
+ PartUnionDict = Union[str, File, FileDict, Part, PartDict] # type: ignore[misc]
3686
3768
 
3687
3769
 
3688
- ContentUnion = Union[Content, list[PartUnion], PartUnion]
3770
+ ContentUnion = Union[Content, PartUnion, list[PartUnion]]
3689
3771
 
3690
3772
 
3691
- ContentUnionDict = Union[ContentUnion, ContentDict]
3773
+ ContentUnionDict = Union[
3774
+ Content, ContentDict, PartUnionDict, list[PartUnionDict]
3775
+ ]
3692
3776
 
3693
3777
 
3694
3778
  class GenerationConfigRoutingConfigAutoRoutingMode(_common.BaseModel):
@@ -3764,10 +3848,10 @@ GenerationConfigRoutingConfigOrDict = Union[
3764
3848
  ]
3765
3849
 
3766
3850
 
3767
- SpeechConfigUnion = Union[SpeechConfig, str]
3851
+ SpeechConfigUnion = Union[str, SpeechConfig]
3768
3852
 
3769
3853
 
3770
- SpeechConfigUnionDict = Union[SpeechConfigUnion, SpeechConfigDict]
3854
+ SpeechConfigUnionDict = Union[str, SpeechConfig, SpeechConfigDict]
3771
3855
 
3772
3856
 
3773
3857
  class GenerateContentConfig(_common.BaseModel):
@@ -4160,10 +4244,10 @@ GenerateContentConfigOrDict = Union[
4160
4244
  ]
4161
4245
 
4162
4246
 
4163
- ContentListUnion = Union[list[ContentUnion], ContentUnion]
4247
+ ContentListUnion = Union[ContentUnion, list[ContentUnion]]
4164
4248
 
4165
4249
 
4166
- ContentListUnionDict = Union[list[ContentUnionDict], ContentUnionDict]
4250
+ ContentListUnionDict = Union[ContentUnionDict, list[ContentUnionDict]]
4167
4251
 
4168
4252
 
4169
4253
  class _GenerateContentParameters(_common.BaseModel):
@@ -5118,11 +5202,6 @@ class GenerateContentResponse(_common.BaseModel):
5118
5202
  description="""Timestamp when the request is made to the server.
5119
5203
  """,
5120
5204
  )
5121
- response_id: Optional[str] = Field(
5122
- default=None,
5123
- description="""Identifier for each response.
5124
- """,
5125
- )
5126
5205
  model_version: Optional[str] = Field(
5127
5206
  default=None,
5128
5207
  description="""Output only. The model version used to generate the response.""",
@@ -5131,6 +5210,10 @@ class GenerateContentResponse(_common.BaseModel):
5131
5210
  default=None,
5132
5211
  description="""Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations.""",
5133
5212
  )
5213
+ response_id: Optional[str] = Field(
5214
+ default=None,
5215
+ description="""Output only. response_id is used to identify each response. It is the encoding of the event_id.""",
5216
+ )
5134
5217
  usage_metadata: Optional[GenerateContentResponseUsageMetadata] = Field(
5135
5218
  default=None, description="""Usage metadata about the response(s)."""
5136
5219
  )
@@ -5377,16 +5460,15 @@ class GenerateContentResponseDict(TypedDict, total=False):
5377
5460
  """Timestamp when the request is made to the server.
5378
5461
  """
5379
5462
 
5380
- response_id: Optional[str]
5381
- """Identifier for each response.
5382
- """
5383
-
5384
5463
  model_version: Optional[str]
5385
5464
  """Output only. The model version used to generate the response."""
5386
5465
 
5387
5466
  prompt_feedback: Optional[GenerateContentResponsePromptFeedbackDict]
5388
5467
  """Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations."""
5389
5468
 
5469
+ response_id: Optional[str]
5470
+ """Output only. response_id is used to identify each response. It is the encoding of the event_id."""
5471
+
5390
5472
  usage_metadata: Optional[GenerateContentResponseUsageMetadataDict]
5391
5473
  """Usage metadata about the response(s)."""
5392
5474
 
@@ -6729,6 +6811,204 @@ UpscaleImageResponseOrDict = Union[
6729
6811
  ]
6730
6812
 
6731
6813
 
6814
+ class ProductImage(_common.BaseModel):
6815
+ """An image of the product."""
6816
+
6817
+ product_image: Optional[Image] = Field(
6818
+ default=None,
6819
+ description="""An image of the product to be recontextualized.""",
6820
+ )
6821
+
6822
+
6823
+ class ProductImageDict(TypedDict, total=False):
6824
+ """An image of the product."""
6825
+
6826
+ product_image: Optional[ImageDict]
6827
+ """An image of the product to be recontextualized."""
6828
+
6829
+
6830
+ ProductImageOrDict = Union[ProductImage, ProductImageDict]
6831
+
6832
+
6833
+ class RecontextImageSource(_common.BaseModel):
6834
+ """A set of source input(s) for image recontextualization."""
6835
+
6836
+ prompt: Optional[str] = Field(
6837
+ default=None,
6838
+ description="""A text prompt for guiding the model during image
6839
+ recontextualization. Not supported for Virtual Try-On.""",
6840
+ )
6841
+ person_image: Optional[Image] = Field(
6842
+ default=None,
6843
+ description="""Image of the person or subject who will be wearing the
6844
+ product(s).""",
6845
+ )
6846
+ product_images: Optional[list[ProductImage]] = Field(
6847
+ default=None, description="""A list of product images."""
6848
+ )
6849
+
6850
+
6851
+ class RecontextImageSourceDict(TypedDict, total=False):
6852
+ """A set of source input(s) for image recontextualization."""
6853
+
6854
+ prompt: Optional[str]
6855
+ """A text prompt for guiding the model during image
6856
+ recontextualization. Not supported for Virtual Try-On."""
6857
+
6858
+ person_image: Optional[ImageDict]
6859
+ """Image of the person or subject who will be wearing the
6860
+ product(s)."""
6861
+
6862
+ product_images: Optional[list[ProductImageDict]]
6863
+ """A list of product images."""
6864
+
6865
+
6866
+ RecontextImageSourceOrDict = Union[
6867
+ RecontextImageSource, RecontextImageSourceDict
6868
+ ]
6869
+
6870
+
6871
+ class RecontextImageConfig(_common.BaseModel):
6872
+ """Configuration for recontextualizing an image."""
6873
+
6874
+ http_options: Optional[HttpOptions] = Field(
6875
+ default=None, description="""Used to override HTTP request options."""
6876
+ )
6877
+ number_of_images: Optional[int] = Field(
6878
+ default=None, description="""Number of images to generate."""
6879
+ )
6880
+ base_steps: Optional[int] = Field(
6881
+ default=None,
6882
+ description="""The number of sampling steps. A higher value has better image
6883
+ quality, while a lower value has better latency.""",
6884
+ )
6885
+ output_gcs_uri: Optional[str] = Field(
6886
+ default=None,
6887
+ description="""Cloud Storage URI used to store the generated images.""",
6888
+ )
6889
+ seed: Optional[int] = Field(
6890
+ default=None, description="""Random seed for image generation."""
6891
+ )
6892
+ safety_filter_level: Optional[SafetyFilterLevel] = Field(
6893
+ default=None, description="""Filter level for safety filtering."""
6894
+ )
6895
+ person_generation: Optional[PersonGeneration] = Field(
6896
+ default=None,
6897
+ description="""Whether allow to generate person images, and restrict to specific
6898
+ ages.""",
6899
+ )
6900
+ output_mime_type: Optional[str] = Field(
6901
+ default=None, description="""MIME type of the generated image."""
6902
+ )
6903
+ output_compression_quality: Optional[int] = Field(
6904
+ default=None,
6905
+ description="""Compression quality of the generated image (for ``image/jpeg``
6906
+ only).""",
6907
+ )
6908
+ enhance_prompt: Optional[bool] = Field(
6909
+ default=None, description="""Whether to use the prompt rewriting logic."""
6910
+ )
6911
+
6912
+
6913
+ class RecontextImageConfigDict(TypedDict, total=False):
6914
+ """Configuration for recontextualizing an image."""
6915
+
6916
+ http_options: Optional[HttpOptionsDict]
6917
+ """Used to override HTTP request options."""
6918
+
6919
+ number_of_images: Optional[int]
6920
+ """Number of images to generate."""
6921
+
6922
+ base_steps: Optional[int]
6923
+ """The number of sampling steps. A higher value has better image
6924
+ quality, while a lower value has better latency."""
6925
+
6926
+ output_gcs_uri: Optional[str]
6927
+ """Cloud Storage URI used to store the generated images."""
6928
+
6929
+ seed: Optional[int]
6930
+ """Random seed for image generation."""
6931
+
6932
+ safety_filter_level: Optional[SafetyFilterLevel]
6933
+ """Filter level for safety filtering."""
6934
+
6935
+ person_generation: Optional[PersonGeneration]
6936
+ """Whether allow to generate person images, and restrict to specific
6937
+ ages."""
6938
+
6939
+ output_mime_type: Optional[str]
6940
+ """MIME type of the generated image."""
6941
+
6942
+ output_compression_quality: Optional[int]
6943
+ """Compression quality of the generated image (for ``image/jpeg``
6944
+ only)."""
6945
+
6946
+ enhance_prompt: Optional[bool]
6947
+ """Whether to use the prompt rewriting logic."""
6948
+
6949
+
6950
+ RecontextImageConfigOrDict = Union[
6951
+ RecontextImageConfig, RecontextImageConfigDict
6952
+ ]
6953
+
6954
+
6955
+ class _RecontextImageParameters(_common.BaseModel):
6956
+ """The parameters for recontextualizing an image."""
6957
+
6958
+ model: Optional[str] = Field(
6959
+ default=None,
6960
+ description="""ID of the model to use. For a list of models, see `Google models
6961
+ <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""",
6962
+ )
6963
+ source: Optional[RecontextImageSource] = Field(
6964
+ default=None,
6965
+ description="""A set of source input(s) for image recontextualization.""",
6966
+ )
6967
+ config: Optional[RecontextImageConfig] = Field(
6968
+ default=None,
6969
+ description="""Configuration for image recontextualization.""",
6970
+ )
6971
+
6972
+
6973
+ class _RecontextImageParametersDict(TypedDict, total=False):
6974
+ """The parameters for recontextualizing an image."""
6975
+
6976
+ model: Optional[str]
6977
+ """ID of the model to use. For a list of models, see `Google models
6978
+ <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_."""
6979
+
6980
+ source: Optional[RecontextImageSourceDict]
6981
+ """A set of source input(s) for image recontextualization."""
6982
+
6983
+ config: Optional[RecontextImageConfigDict]
6984
+ """Configuration for image recontextualization."""
6985
+
6986
+
6987
+ _RecontextImageParametersOrDict = Union[
6988
+ _RecontextImageParameters, _RecontextImageParametersDict
6989
+ ]
6990
+
6991
+
6992
+ class RecontextImageResponse(_common.BaseModel):
6993
+ """The output images response."""
6994
+
6995
+ generated_images: Optional[list[GeneratedImage]] = Field(
6996
+ default=None, description="""List of generated images."""
6997
+ )
6998
+
6999
+
7000
+ class RecontextImageResponseDict(TypedDict, total=False):
7001
+ """The output images response."""
7002
+
7003
+ generated_images: Optional[list[GeneratedImageDict]]
7004
+ """List of generated images."""
7005
+
7006
+
7007
+ RecontextImageResponseOrDict = Union[
7008
+ RecontextImageResponse, RecontextImageResponseDict
7009
+ ]
7010
+
7011
+
6732
7012
  class GetModelConfig(_common.BaseModel):
6733
7013
  """Optional parameters for models.get method."""
6734
7014
 
@@ -10765,6 +11045,9 @@ _DeleteBatchJobParametersOrDict = Union[
10765
11045
  class DeleteResourceJob(_common.BaseModel):
10766
11046
  """The return value of delete operation."""
10767
11047
 
11048
+ sdk_http_response: Optional[HttpResponse] = Field(
11049
+ default=None, description="""Used to retain the full HTTP response."""
11050
+ )
10768
11051
  name: Optional[str] = Field(default=None, description="""""")
10769
11052
  done: Optional[bool] = Field(default=None, description="""""")
10770
11053
  error: Optional[JobError] = Field(default=None, description="""""")
@@ -10773,6 +11056,9 @@ class DeleteResourceJob(_common.BaseModel):
10773
11056
  class DeleteResourceJobDict(TypedDict, total=False):
10774
11057
  """The return value of delete operation."""
10775
11058
 
11059
+ sdk_http_response: Optional[HttpResponseDict]
11060
+ """Used to retain the full HTTP response."""
11061
+
10776
11062
  name: Optional[str]
10777
11063
  """"""
10778
11064
 
@@ -12594,13 +12880,17 @@ LiveClientRealtimeInputOrDict = Union[
12594
12880
  LiveClientRealtimeInput, LiveClientRealtimeInputDict
12595
12881
  ]
12596
12882
 
12883
+
12597
12884
  if _is_pillow_image_imported:
12598
- BlobImageUnion = Union[Blob, PIL_Image]
12885
+ BlobImageUnion = Union[PIL_Image, Blob]
12599
12886
  else:
12600
12887
  BlobImageUnion = Blob # type: ignore[misc]
12601
12888
 
12602
12889
 
12603
- BlobImageUnionDict = Union[BlobImageUnion, BlobDict]
12890
+ if _is_pillow_image_imported:
12891
+ BlobImageUnionDict = Union[PIL_Image, Blob, BlobDict]
12892
+ else:
12893
+ BlobImageUnionDict = Union[Blob, BlobDict] # type: ignore[misc]
12604
12894
 
12605
12895
 
12606
12896
  class LiveSendRealtimeInputParameters(_common.BaseModel):
@@ -13120,6 +13410,10 @@ class LiveMusicGenerationConfig(_common.BaseModel):
13120
13410
  default=None,
13121
13411
  description="""Whether the audio output should contain only bass and drums.""",
13122
13412
  )
13413
+ music_generation_mode: Optional[MusicGenerationMode] = Field(
13414
+ default=None,
13415
+ description="""The mode of music generation. Default mode is QUALITY.""",
13416
+ )
13123
13417
 
13124
13418
 
13125
13419
  class LiveMusicGenerationConfigDict(TypedDict, total=False):
@@ -13163,6 +13457,9 @@ class LiveMusicGenerationConfigDict(TypedDict, total=False):
13163
13457
  only_bass_and_drums: Optional[bool]
13164
13458
  """Whether the audio output should contain only bass and drums."""
13165
13459
 
13460
+ music_generation_mode: Optional[MusicGenerationMode]
13461
+ """The mode of music generation. Default mode is QUALITY."""
13462
+
13166
13463
 
13167
13464
  LiveMusicGenerationConfigOrDict = Union[
13168
13465
  LiveMusicGenerationConfig, LiveMusicGenerationConfigDict
google/genai/version.py CHANGED
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
- __version__ = '1.28.0' # x-release-please-version
16
+ __version__ = '1.29.0' # x-release-please-version
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.28.0
3
+ Version: 1.29.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -25,7 +25,7 @@ Requires-Dist: google-auth<3.0.0,>=2.14.1
25
25
  Requires-Dist: httpx<1.0.0,>=0.28.1
26
26
  Requires-Dist: pydantic<3.0.0,>=2.0.0
27
27
  Requires-Dist: requests<3.0.0,>=2.28.1
28
- Requires-Dist: tenacity<9.0.0,>=8.2.3
28
+ Requires-Dist: tenacity<9.2.0,>=8.2.3
29
29
  Requires-Dist: websockets<15.1.0,>=13.0.0
30
30
  Requires-Dist: typing-extensions<5.0.0,>=4.11.0
31
31
  Provides-Extra: aiohttp
@@ -1,35 +1,35 @@
1
1
  google/genai/__init__.py,sha256=SKz_9WQKA3R4OpJIDJlgssVfizLNDG2tuWtOD9pxrPE,729
2
2
  google/genai/_adapters.py,sha256=Kok38miNYJff2n--l0zEK_hbq0y2rWOH7k75J7SMYbQ,1744
3
- google/genai/_api_client.py,sha256=xleLwNglnLnRUVOZeKrdSyBjcZNIqjx2DX9djz904gk,57552
3
+ google/genai/_api_client.py,sha256=1KBUwi12eM3rhm-M0OStQiIVKD2PNdKJk66dlwNGKHE,60122
4
4
  google/genai/_api_module.py,sha256=lj8eUWx8_LBGBz-49qz6_ywWm3GYp3d8Bg5JoOHbtbI,902
5
- google/genai/_automatic_function_calling_util.py,sha256=IJkPq2fT9pYxYm5Pbu5-e0nBoZKoZla7yT4_txWRKLs,10324
5
+ google/genai/_automatic_function_calling_util.py,sha256=Y_GyvIznYWLEga0rnmrvjlj-UYWbYYDmwjbSthaZXcM,11293
6
6
  google/genai/_base_url.py,sha256=E5H4dew14Y16qfnB3XRnjSCi19cJVlkaMNoM_8ip-PM,1597
7
7
  google/genai/_common.py,sha256=SmBlz7AQZbKbT8KE5vsvK5Iz1OaRwHF6J51KDJBbjMo,19936
8
8
  google/genai/_extra_utils.py,sha256=6mxUnbwKkrAJ9zR8JP4nCKuA0F1BFqAc2t5mzdAONxk,20603
9
- google/genai/_live_converters.py,sha256=pIcEfsAxjV1zeWmxMNigDD7ETHiuutp-a2y0HQThdmU,100162
9
+ google/genai/_live_converters.py,sha256=mzQbhJ1dbo-H2rw48az7c4gISJP35e7_DdrgWSQTPOc,100532
10
10
  google/genai/_mcp_utils.py,sha256=HuWJ8FUjquv40Mf_QjcL5r5yXWrS-JjINsjlOSbbyAc,3870
11
11
  google/genai/_replay_api_client.py,sha256=MRUxUWCC9PoTk-NxsK3rGB8yw8cCrUa8ZeTVU3oEguw,21729
12
12
  google/genai/_test_api_client.py,sha256=4ruFIy5_1qcbKqqIBu3HSQbpSOBrxiecBtDZaTGFR1s,4797
13
13
  google/genai/_tokens_converters.py,sha256=ClWTsgcqn91zSw_qTqLPTNSP1-_G8s-NlBCD8-DQniw,23803
14
14
  google/genai/_transformers.py,sha256=l7Rf4OvaDG_91jTrdCeHLBAm1J7_pyYwJW1oQM6Q9MI,37431
15
- google/genai/batches.py,sha256=a8X0wi8D6d7WC8873C2oVtk_NBYbd8xeI77GSkww4Nc,81094
15
+ google/genai/batches.py,sha256=f-baI1oLgPRnk1NnzQBJD58QuJAPw6dC-AMsWyfZrWI,81719
16
16
  google/genai/caches.py,sha256=isEzVYJgQVOjHf0XkFl86HOXzoYFXB-PgEVqwuo1V4s,64673
17
17
  google/genai/chats.py,sha256=0QdOUeWEYDQgAWBy1f7a3z3yY9S8tXSowUzNrzazzj4,16651
18
18
  google/genai/client.py,sha256=wXnfZBSv9p-yKtX_gabUrfBXoYHuqHhzK_VgwRttMgY,10777
19
- google/genai/errors.py,sha256=Cd3M1la1VgvRh0vLgOzL9x8fgXnw96A6tj-kfsS0wi8,5586
19
+ google/genai/errors.py,sha256=GlEvypbRgF3h5BxlocmWVf-S9kzERA_lwGLCMyAvpcY,5714
20
20
  google/genai/files.py,sha256=Z9CP2RLAZlZDE3zWXVNA2LF3x7wJTXOhNzDoSyHmr9k,40154
21
21
  google/genai/live.py,sha256=R7cAtesP5-Yp685H00EpgmzNsMNjERIR8L-m5e5kEMI,39469
22
22
  google/genai/live_music.py,sha256=3GG9nsto8Vhkohcs-4CPMS4DFp1ZtMuLYzHfvEPYAeg,6971
23
- google/genai/models.py,sha256=ci7dOuCGm8ky4m-rD7-_Ur9hzGUKWBofmlKZIA_BwLQ,242026
23
+ google/genai/models.py,sha256=tJq_aOIX6qU66lkIgPCdXA5Xartu-ceox3mqQbiTubQ,253997
24
24
  google/genai/operations.py,sha256=3eudPaItN6_JJKMWNT9lLIJLUGyAQfFK1xken7Rv8vQ,12814
25
25
  google/genai/pagers.py,sha256=m0SfWWn1EJs2k1On3DZx371qb8g2BRm_188ExsicIRc,7098
26
26
  google/genai/py.typed,sha256=RsMFoLwBkAvY05t6izop4UHZtqOPLiKp3GkIEizzmQY,40
27
27
  google/genai/tokens.py,sha256=PvAIGl93bLCtc3bxaBS1-NE4et_xn4XBZ1bJn9ZYxP8,12502
28
28
  google/genai/tunings.py,sha256=ctSApLv-Lpjrkm2-JE8fQo332706EMXp9b9u552wNTE,49996
29
- google/genai/types.py,sha256=rIiMTLQQiRz7frgL3uchuXUCQciJEIMBV-B_1okILck,476572
30
- google/genai/version.py,sha256=YPf09ydfdYGYELmthoUhjWan0Yv6J2Cz6saNEHhqN-c,627
31
- google_genai-1.28.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
32
- google_genai-1.28.0.dist-info/METADATA,sha256=_G5eO1nP7bswlUTGcuh46FEbSF_vLZ7Ir6G76ANNISk,43091
33
- google_genai-1.28.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
34
- google_genai-1.28.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
35
- google_genai-1.28.0.dist-info/RECORD,,
29
+ google/genai/types.py,sha256=ySiXuZgdfLJKB3Ko4U0FdbIfRH5QHdCjPfHjYqyKSV0,486786
30
+ google/genai/version.py,sha256=5OGSscZC2ch3x_xtIREzcipDkeeB3GB-TjqER2xhRqY,627
31
+ google_genai-1.29.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
32
+ google_genai-1.29.0.dist-info/METADATA,sha256=RIa3Hr8eCgqM1gTB3uBPRrWucRxJNI0ptBH8mTzpFWI,43091
33
+ google_genai-1.29.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
34
+ google_genai-1.29.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
35
+ google_genai-1.29.0.dist-info/RECORD,,