google-genai 1.41.0__py3-none-any.whl → 1.43.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +2 -1
- google/genai/_common.py +213 -77
- google/genai/_live_converters.py +717 -3098
- google/genai/_replay_api_client.py +9 -5
- google/genai/_tokens_converters.py +23 -434
- google/genai/_transformers.py +42 -12
- google/genai/batches.py +125 -1054
- google/genai/caches.py +69 -847
- google/genai/errors.py +9 -2
- google/genai/files.py +12 -171
- google/genai/live.py +10 -11
- google/genai/live_music.py +24 -27
- google/genai/models.py +333 -1828
- google/genai/operations.py +6 -32
- google/genai/tokens.py +2 -12
- google/genai/tunings.py +18 -197
- google/genai/types.py +154 -3
- google/genai/version.py +1 -1
- {google_genai-1.41.0.dist-info → google_genai-1.43.0.dist-info}/METADATA +40 -38
- google_genai-1.43.0.dist-info/RECORD +39 -0
- google_genai-1.41.0.dist-info/RECORD +0 -39
- {google_genai-1.41.0.dist-info → google_genai-1.43.0.dist-info}/WHEEL +0 -0
- {google_genai-1.41.0.dist-info → google_genai-1.43.0.dist-info}/licenses/LICENSE +0 -0
- {google_genai-1.41.0.dist-info → google_genai-1.43.0.dist-info}/top_level.txt +0 -0
google/genai/types.py
CHANGED
@@ -1117,6 +1117,40 @@ class FunctionResponsePart(_common.BaseModel):
|
|
1117
1117
|
default=None, description="""Optional. URI based data."""
|
1118
1118
|
)
|
1119
1119
|
|
1120
|
+
@classmethod
|
1121
|
+
def from_bytes(cls, *, data: bytes, mime_type: str) -> 'FunctionResponsePart':
|
1122
|
+
"""Creates a FunctionResponsePart from bytes and mime type.
|
1123
|
+
|
1124
|
+
Args:
|
1125
|
+
data (bytes): The bytes of the data
|
1126
|
+
mime_type (str): mime_type: The MIME type of the data.
|
1127
|
+
"""
|
1128
|
+
inline_data = FunctionResponseBlob(
|
1129
|
+
data=data,
|
1130
|
+
mime_type=mime_type,
|
1131
|
+
)
|
1132
|
+
return cls(inline_data=inline_data)
|
1133
|
+
|
1134
|
+
@classmethod
|
1135
|
+
def from_uri(
|
1136
|
+
cls, *, file_uri: str, mime_type: Optional[str] = None
|
1137
|
+
) -> 'FunctionResponsePart':
|
1138
|
+
"""Creates a FunctionResponsePart from a file uri.
|
1139
|
+
|
1140
|
+
Args:
|
1141
|
+
file_uri (str): The uri of the file
|
1142
|
+
mime_type (str): mime_type: The MIME type of the file. If not provided,
|
1143
|
+
the MIME type will be automatically determined.
|
1144
|
+
"""
|
1145
|
+
if mime_type is None:
|
1146
|
+
import mimetypes
|
1147
|
+
|
1148
|
+
mime_type, _ = mimetypes.guess_type(file_uri)
|
1149
|
+
if not mime_type:
|
1150
|
+
raise ValueError(f'Failed to determine mime type for file: {file_uri}')
|
1151
|
+
file_data = FunctionResponseFileData(file_uri=file_uri, mime_type=mime_type)
|
1152
|
+
return cls(file_data=file_data)
|
1153
|
+
|
1120
1154
|
|
1121
1155
|
class FunctionResponsePartDict(TypedDict, total=False):
|
1122
1156
|
"""A datatype containing media that is part of a `FunctionResponse` message.
|
@@ -1274,7 +1308,7 @@ class Part(_common.BaseModel):
|
|
1274
1308
|
|
1275
1309
|
Args:
|
1276
1310
|
file_uri (str): The uri of the file
|
1277
|
-
mime_type (str): mime_type: The MIME type of the
|
1311
|
+
mime_type (str): mime_type: The MIME type of the file. If not provided,
|
1278
1312
|
the MIME type will be automatically determined.
|
1279
1313
|
"""
|
1280
1314
|
if mime_type is None:
|
@@ -1305,9 +1339,15 @@ class Part(_common.BaseModel):
|
|
1305
1339
|
|
1306
1340
|
@classmethod
|
1307
1341
|
def from_function_response(
|
1308
|
-
cls,
|
1342
|
+
cls,
|
1343
|
+
*,
|
1344
|
+
name: str,
|
1345
|
+
response: dict[str, Any],
|
1346
|
+
parts: Optional[list[FunctionResponsePart]] = None,
|
1309
1347
|
) -> 'Part':
|
1310
|
-
function_response = FunctionResponse(
|
1348
|
+
function_response = FunctionResponse(
|
1349
|
+
name=name, response=response, parts=parts
|
1350
|
+
)
|
1311
1351
|
return cls(function_response=function_response)
|
1312
1352
|
|
1313
1353
|
@classmethod
|
@@ -2783,6 +2823,10 @@ class GoogleMaps(_common.BaseModel):
|
|
2783
2823
|
default=None,
|
2784
2824
|
description="""Optional. Auth config for the Google Maps tool.""",
|
2785
2825
|
)
|
2826
|
+
enable_widget: Optional[bool] = Field(
|
2827
|
+
default=None,
|
2828
|
+
description="""Optional. If true, include the widget context token in the response.""",
|
2829
|
+
)
|
2786
2830
|
|
2787
2831
|
|
2788
2832
|
class GoogleMapsDict(TypedDict, total=False):
|
@@ -2791,6 +2835,9 @@ class GoogleMapsDict(TypedDict, total=False):
|
|
2791
2835
|
auth_config: Optional[AuthConfigDict]
|
2792
2836
|
"""Optional. Auth config for the Google Maps tool."""
|
2793
2837
|
|
2838
|
+
enable_widget: Optional[bool]
|
2839
|
+
"""Optional. If true, include the widget context token in the response."""
|
2840
|
+
|
2794
2841
|
|
2795
2842
|
GoogleMapsOrDict = Union[GoogleMaps, GoogleMapsDict]
|
2796
2843
|
|
@@ -4810,6 +4857,12 @@ class GroundingChunkMapsPlaceAnswerSourcesReviewSnippet(_common.BaseModel):
|
|
4810
4857
|
default=None,
|
4811
4858
|
description="""A reference representing this place review which may be used to look up this place review again.""",
|
4812
4859
|
)
|
4860
|
+
review_id: Optional[str] = Field(
|
4861
|
+
default=None, description="""Id of the review referencing the place."""
|
4862
|
+
)
|
4863
|
+
title: Optional[str] = Field(
|
4864
|
+
default=None, description="""Title of the review."""
|
4865
|
+
)
|
4813
4866
|
|
4814
4867
|
|
4815
4868
|
class GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict(
|
@@ -4834,6 +4887,12 @@ class GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict(
|
|
4834
4887
|
review: Optional[str]
|
4835
4888
|
"""A reference representing this place review which may be used to look up this place review again."""
|
4836
4889
|
|
4890
|
+
review_id: Optional[str]
|
4891
|
+
"""Id of the review referencing the place."""
|
4892
|
+
|
4893
|
+
title: Optional[str]
|
4894
|
+
"""Title of the review."""
|
4895
|
+
|
4837
4896
|
|
4838
4897
|
GroundingChunkMapsPlaceAnswerSourcesReviewSnippetOrDict = Union[
|
4839
4898
|
GroundingChunkMapsPlaceAnswerSourcesReviewSnippet,
|
@@ -5194,6 +5253,39 @@ class SearchEntryPointDict(TypedDict, total=False):
|
|
5194
5253
|
SearchEntryPointOrDict = Union[SearchEntryPoint, SearchEntryPointDict]
|
5195
5254
|
|
5196
5255
|
|
5256
|
+
class GroundingMetadataSourceFlaggingUri(_common.BaseModel):
|
5257
|
+
"""Source content flagging uri for a place or review.
|
5258
|
+
|
5259
|
+
This is currently populated only for Google Maps grounding.
|
5260
|
+
"""
|
5261
|
+
|
5262
|
+
flag_content_uri: Optional[str] = Field(
|
5263
|
+
default=None,
|
5264
|
+
description="""A link where users can flag a problem with the source (place or review).""",
|
5265
|
+
)
|
5266
|
+
source_id: Optional[str] = Field(
|
5267
|
+
default=None, description="""Id of the place or review."""
|
5268
|
+
)
|
5269
|
+
|
5270
|
+
|
5271
|
+
class GroundingMetadataSourceFlaggingUriDict(TypedDict, total=False):
|
5272
|
+
"""Source content flagging uri for a place or review.
|
5273
|
+
|
5274
|
+
This is currently populated only for Google Maps grounding.
|
5275
|
+
"""
|
5276
|
+
|
5277
|
+
flag_content_uri: Optional[str]
|
5278
|
+
"""A link where users can flag a problem with the source (place or review)."""
|
5279
|
+
|
5280
|
+
source_id: Optional[str]
|
5281
|
+
"""Id of the place or review."""
|
5282
|
+
|
5283
|
+
|
5284
|
+
GroundingMetadataSourceFlaggingUriOrDict = Union[
|
5285
|
+
GroundingMetadataSourceFlaggingUri, GroundingMetadataSourceFlaggingUriDict
|
5286
|
+
]
|
5287
|
+
|
5288
|
+
|
5197
5289
|
class GroundingMetadata(_common.BaseModel):
|
5198
5290
|
"""Metadata returned to client when grounding is enabled."""
|
5199
5291
|
|
@@ -5219,6 +5311,12 @@ class GroundingMetadata(_common.BaseModel):
|
|
5219
5311
|
default=None,
|
5220
5312
|
description="""Optional. Google search entry for the following-up web searches.""",
|
5221
5313
|
)
|
5314
|
+
source_flagging_uris: Optional[list[GroundingMetadataSourceFlaggingUri]] = (
|
5315
|
+
Field(
|
5316
|
+
default=None,
|
5317
|
+
description="""Optional. Output only. List of source flagging uris. This is currently populated only for Google Maps grounding.""",
|
5318
|
+
)
|
5319
|
+
)
|
5222
5320
|
web_search_queries: Optional[list[str]] = Field(
|
5223
5321
|
default=None,
|
5224
5322
|
description="""Optional. Web search queries for the following-up web search.""",
|
@@ -5246,6 +5344,9 @@ class GroundingMetadataDict(TypedDict, total=False):
|
|
5246
5344
|
search_entry_point: Optional[SearchEntryPointDict]
|
5247
5345
|
"""Optional. Google search entry for the following-up web searches."""
|
5248
5346
|
|
5347
|
+
source_flagging_uris: Optional[list[GroundingMetadataSourceFlaggingUriDict]]
|
5348
|
+
"""Optional. Output only. List of source flagging uris. This is currently populated only for Google Maps grounding."""
|
5349
|
+
|
5249
5350
|
web_search_queries: Optional[list[str]]
|
5250
5351
|
"""Optional. Web search queries for the following-up web search."""
|
5251
5352
|
|
@@ -6259,6 +6360,10 @@ class GenerateImagesConfig(_common.BaseModel):
|
|
6259
6360
|
default=None,
|
6260
6361
|
description="""Whether to add a watermark to the generated images.""",
|
6261
6362
|
)
|
6363
|
+
labels: Optional[dict[str, str]] = Field(
|
6364
|
+
default=None,
|
6365
|
+
description="""User specified labels to track billing usage.""",
|
6366
|
+
)
|
6262
6367
|
image_size: Optional[str] = Field(
|
6263
6368
|
default=None,
|
6264
6369
|
description="""The size of the largest dimension of the generated image.
|
@@ -6324,6 +6429,9 @@ class GenerateImagesConfigDict(TypedDict, total=False):
|
|
6324
6429
|
add_watermark: Optional[bool]
|
6325
6430
|
"""Whether to add a watermark to the generated images."""
|
6326
6431
|
|
6432
|
+
labels: Optional[dict[str, str]]
|
6433
|
+
"""User specified labels to track billing usage."""
|
6434
|
+
|
6327
6435
|
image_size: Optional[str]
|
6328
6436
|
"""The size of the largest dimension of the generated image.
|
6329
6437
|
Supported sizes are 1K and 2K (not supported for Imagen 3 models)."""
|
@@ -6901,6 +7009,10 @@ class EditImageConfig(_common.BaseModel):
|
|
6901
7009
|
default=None,
|
6902
7010
|
description="""Whether to add a watermark to the generated images.""",
|
6903
7011
|
)
|
7012
|
+
labels: Optional[dict[str, str]] = Field(
|
7013
|
+
default=None,
|
7014
|
+
description="""User specified labels to track billing usage.""",
|
7015
|
+
)
|
6904
7016
|
edit_mode: Optional[EditMode] = Field(
|
6905
7017
|
default=None,
|
6906
7018
|
description="""Describes the editing mode for the request.""",
|
@@ -6967,6 +7079,9 @@ class EditImageConfigDict(TypedDict, total=False):
|
|
6967
7079
|
add_watermark: Optional[bool]
|
6968
7080
|
"""Whether to add a watermark to the generated images."""
|
6969
7081
|
|
7082
|
+
labels: Optional[dict[str, str]]
|
7083
|
+
"""User specified labels to track billing usage."""
|
7084
|
+
|
6970
7085
|
edit_mode: Optional[EditMode]
|
6971
7086
|
"""Describes the editing mode for the request."""
|
6972
7087
|
|
@@ -7082,6 +7197,10 @@ class _UpscaleImageAPIConfig(_common.BaseModel):
|
|
7082
7197
|
output image will have be more different from the input image, but
|
7083
7198
|
with finer details and less noise.""",
|
7084
7199
|
)
|
7200
|
+
labels: Optional[dict[str, str]] = Field(
|
7201
|
+
default=None,
|
7202
|
+
description="""User specified labels to track billing usage.""",
|
7203
|
+
)
|
7085
7204
|
number_of_images: Optional[int] = Field(default=None, description="""""")
|
7086
7205
|
mode: Optional[str] = Field(default=None, description="""""")
|
7087
7206
|
|
@@ -7121,6 +7240,9 @@ class _UpscaleImageAPIConfigDict(TypedDict, total=False):
|
|
7121
7240
|
output image will have be more different from the input image, but
|
7122
7241
|
with finer details and less noise."""
|
7123
7242
|
|
7243
|
+
labels: Optional[dict[str, str]]
|
7244
|
+
"""User specified labels to track billing usage."""
|
7245
|
+
|
7124
7246
|
number_of_images: Optional[int]
|
7125
7247
|
""""""
|
7126
7248
|
|
@@ -7297,6 +7419,10 @@ class RecontextImageConfig(_common.BaseModel):
|
|
7297
7419
|
enhance_prompt: Optional[bool] = Field(
|
7298
7420
|
default=None, description="""Whether to use the prompt rewriting logic."""
|
7299
7421
|
)
|
7422
|
+
labels: Optional[dict[str, str]] = Field(
|
7423
|
+
default=None,
|
7424
|
+
description="""User specified labels to track billing usage.""",
|
7425
|
+
)
|
7300
7426
|
|
7301
7427
|
|
7302
7428
|
class RecontextImageConfigDict(TypedDict, total=False):
|
@@ -7338,6 +7464,9 @@ class RecontextImageConfigDict(TypedDict, total=False):
|
|
7338
7464
|
enhance_prompt: Optional[bool]
|
7339
7465
|
"""Whether to use the prompt rewriting logic."""
|
7340
7466
|
|
7467
|
+
labels: Optional[dict[str, str]]
|
7468
|
+
"""User specified labels to track billing usage."""
|
7469
|
+
|
7341
7470
|
|
7342
7471
|
RecontextImageConfigOrDict = Union[
|
7343
7472
|
RecontextImageConfig, RecontextImageConfigDict
|
@@ -7488,6 +7617,10 @@ class SegmentImageConfig(_common.BaseModel):
|
|
7488
7617
|
can be set to a decimal value between 0 and 255 non-inclusive.
|
7489
7618
|
Set to -1 for no binary color thresholding.""",
|
7490
7619
|
)
|
7620
|
+
labels: Optional[dict[str, str]] = Field(
|
7621
|
+
default=None,
|
7622
|
+
description="""User specified labels to track billing usage.""",
|
7623
|
+
)
|
7491
7624
|
|
7492
7625
|
|
7493
7626
|
class SegmentImageConfigDict(TypedDict, total=False):
|
@@ -7518,6 +7651,9 @@ class SegmentImageConfigDict(TypedDict, total=False):
|
|
7518
7651
|
can be set to a decimal value between 0 and 255 non-inclusive.
|
7519
7652
|
Set to -1 for no binary color thresholding."""
|
7520
7653
|
|
7654
|
+
labels: Optional[dict[str, str]]
|
7655
|
+
"""User specified labels to track billing usage."""
|
7656
|
+
|
7521
7657
|
|
7522
7658
|
SegmentImageConfigOrDict = Union[SegmentImageConfig, SegmentImageConfigDict]
|
7523
7659
|
|
@@ -8980,6 +9116,7 @@ class GenerateVideosOperation(_common.BaseModel, Operation):
|
|
8980
9116
|
cls, api_response: Any, is_vertex_ai: bool = False
|
8981
9117
|
) -> Self:
|
8982
9118
|
"""Instantiates a GenerateVideosOperation from an API response."""
|
9119
|
+
|
8983
9120
|
if is_vertex_ai:
|
8984
9121
|
response_dict = _GenerateVideosOperation_from_vertex(api_response)
|
8985
9122
|
else:
|
@@ -11629,6 +11766,10 @@ class InlinedRequest(_common.BaseModel):
|
|
11629
11766
|
description="""Content of the request.
|
11630
11767
|
""",
|
11631
11768
|
)
|
11769
|
+
metadata: Optional[dict[str, str]] = Field(
|
11770
|
+
default=None,
|
11771
|
+
description="""The metadata to be associated with the request.""",
|
11772
|
+
)
|
11632
11773
|
config: Optional[GenerateContentConfig] = Field(
|
11633
11774
|
default=None,
|
11634
11775
|
description="""Configuration that contains optional model parameters.
|
@@ -11647,6 +11788,9 @@ class InlinedRequestDict(TypedDict, total=False):
|
|
11647
11788
|
"""Content of the request.
|
11648
11789
|
"""
|
11649
11790
|
|
11791
|
+
metadata: Optional[dict[str, str]]
|
11792
|
+
"""The metadata to be associated with the request."""
|
11793
|
+
|
11650
11794
|
config: Optional[GenerateContentConfigDict]
|
11651
11795
|
"""Configuration that contains optional model parameters.
|
11652
11796
|
"""
|
@@ -13000,6 +13144,10 @@ class UpscaleImageConfig(_common.BaseModel):
|
|
13000
13144
|
output image will have be more different from the input image, but
|
13001
13145
|
with finer details and less noise.""",
|
13002
13146
|
)
|
13147
|
+
labels: Optional[dict[str, str]] = Field(
|
13148
|
+
default=None,
|
13149
|
+
description="""User specified labels to track billing usage.""",
|
13150
|
+
)
|
13003
13151
|
|
13004
13152
|
|
13005
13153
|
class UpscaleImageConfigDict(TypedDict, total=False):
|
@@ -13038,6 +13186,9 @@ class UpscaleImageConfigDict(TypedDict, total=False):
|
|
13038
13186
|
output image will have be more different from the input image, but
|
13039
13187
|
with finer details and less noise."""
|
13040
13188
|
|
13189
|
+
labels: Optional[dict[str, str]]
|
13190
|
+
"""User specified labels to track billing usage."""
|
13191
|
+
|
13041
13192
|
|
13042
13193
|
UpscaleImageConfigOrDict = Union[UpscaleImageConfig, UpscaleImageConfigDict]
|
13043
13194
|
|
google/genai/version.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: google-genai
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.43.0
|
4
4
|
Summary: GenAI Python SDK
|
5
5
|
Author-email: Google LLC <googleapis-packages@google.com>
|
6
6
|
License: Apache-2.0
|
@@ -296,7 +296,7 @@ See the 'Create a client' section above to initialize a client.
|
|
296
296
|
|
297
297
|
```python
|
298
298
|
response = client.models.generate_content(
|
299
|
-
model='gemini-2.
|
299
|
+
model='gemini-2.5-flash', contents='Why is the sky blue?'
|
300
300
|
)
|
301
301
|
print(response.text)
|
302
302
|
```
|
@@ -313,7 +313,7 @@ python code.
|
|
313
313
|
```python
|
314
314
|
file = client.files.upload(file='a11.txt')
|
315
315
|
response = client.models.generate_content(
|
316
|
-
model='gemini-2.
|
316
|
+
model='gemini-2.5-flash',
|
317
317
|
contents=['Could you summarize this file?', file]
|
318
318
|
)
|
319
319
|
print(response.text)
|
@@ -617,7 +617,7 @@ print(async_pager[0])
|
|
617
617
|
from google.genai import types
|
618
618
|
|
619
619
|
response = client.models.generate_content(
|
620
|
-
model='gemini-2.
|
620
|
+
model='gemini-2.5-flash',
|
621
621
|
contents='Say something bad.',
|
622
622
|
config=types.GenerateContentConfig(
|
623
623
|
safety_settings=[
|
@@ -651,7 +651,7 @@ def get_current_weather(location: str) -> str:
|
|
651
651
|
|
652
652
|
|
653
653
|
response = client.models.generate_content(
|
654
|
-
model='gemini-2.
|
654
|
+
model='gemini-2.5-flash',
|
655
655
|
contents='What is the weather like in Boston?',
|
656
656
|
config=types.GenerateContentConfig(tools=[get_current_weather]),
|
657
657
|
)
|
@@ -667,7 +667,7 @@ as follows:
|
|
667
667
|
from google.genai import types
|
668
668
|
|
669
669
|
response = client.models.generate_content(
|
670
|
-
model='gemini-2.
|
670
|
+
model='gemini-2.5-flash',
|
671
671
|
contents='What is the weather like in Boston?',
|
672
672
|
config=types.GenerateContentConfig(
|
673
673
|
tools=[get_current_weather],
|
@@ -714,7 +714,7 @@ function = types.FunctionDeclaration(
|
|
714
714
|
tool = types.Tool(function_declarations=[function])
|
715
715
|
|
716
716
|
response = client.models.generate_content(
|
717
|
-
model='gemini-2.
|
717
|
+
model='gemini-2.5-flash',
|
718
718
|
contents='What is the weather like in Boston?',
|
719
719
|
config=types.GenerateContentConfig(tools=[tool]),
|
720
720
|
)
|
@@ -758,7 +758,7 @@ function_response_content = types.Content(
|
|
758
758
|
)
|
759
759
|
|
760
760
|
response = client.models.generate_content(
|
761
|
-
model='gemini-2.
|
761
|
+
model='gemini-2.5-flash',
|
762
762
|
contents=[
|
763
763
|
user_prompt_content,
|
764
764
|
function_call_content,
|
@@ -793,7 +793,7 @@ def get_current_weather(location: str) -> str:
|
|
793
793
|
return "sunny"
|
794
794
|
|
795
795
|
response = client.models.generate_content(
|
796
|
-
model="gemini-2.
|
796
|
+
model="gemini-2.5-flash",
|
797
797
|
contents="What is the weather like in Boston?",
|
798
798
|
config=types.GenerateContentConfig(
|
799
799
|
tools=[get_current_weather],
|
@@ -823,7 +823,7 @@ def get_current_weather(location: str) -> str:
|
|
823
823
|
return "sunny"
|
824
824
|
|
825
825
|
response = client.models.generate_content(
|
826
|
-
model="gemini-2.
|
826
|
+
model="gemini-2.5-flash",
|
827
827
|
contents="What is the weather like in Boston?",
|
828
828
|
config=types.GenerateContentConfig(
|
829
829
|
tools=[get_current_weather],
|
@@ -913,7 +913,7 @@ user_profile = {
|
|
913
913
|
}
|
914
914
|
|
915
915
|
response = client.models.generate_content(
|
916
|
-
model='gemini-2.
|
916
|
+
model='gemini-2.5-flash',
|
917
917
|
contents='Give me a random user profile.',
|
918
918
|
config={
|
919
919
|
'response_mime_type': 'application/json',
|
@@ -943,7 +943,7 @@ class CountryInfo(BaseModel):
|
|
943
943
|
|
944
944
|
|
945
945
|
response = client.models.generate_content(
|
946
|
-
model='gemini-2.
|
946
|
+
model='gemini-2.5-flash',
|
947
947
|
contents='Give me information for the United States.',
|
948
948
|
config=types.GenerateContentConfig(
|
949
949
|
response_mime_type='application/json',
|
@@ -957,7 +957,7 @@ print(response.text)
|
|
957
957
|
from google.genai import types
|
958
958
|
|
959
959
|
response = client.models.generate_content(
|
960
|
-
model='gemini-2.
|
960
|
+
model='gemini-2.5-flash',
|
961
961
|
contents='Give me information for the United States.',
|
962
962
|
config=types.GenerateContentConfig(
|
963
963
|
response_mime_type='application/json',
|
@@ -995,6 +995,8 @@ You can set response_mime_type to 'text/x.enum' to return one of those enum
|
|
995
995
|
values as the response.
|
996
996
|
|
997
997
|
```python
|
998
|
+
from enum import Enum
|
999
|
+
|
998
1000
|
class InstrumentEnum(Enum):
|
999
1001
|
PERCUSSION = 'Percussion'
|
1000
1002
|
STRING = 'String'
|
@@ -1003,7 +1005,7 @@ class InstrumentEnum(Enum):
|
|
1003
1005
|
KEYBOARD = 'Keyboard'
|
1004
1006
|
|
1005
1007
|
response = client.models.generate_content(
|
1006
|
-
model='gemini-2.
|
1008
|
+
model='gemini-2.5-flash',
|
1007
1009
|
contents='What instrument plays multiple notes at once?',
|
1008
1010
|
config={
|
1009
1011
|
'response_mime_type': 'text/x.enum',
|
@@ -1029,7 +1031,7 @@ class InstrumentEnum(Enum):
|
|
1029
1031
|
KEYBOARD = 'Keyboard'
|
1030
1032
|
|
1031
1033
|
response = client.models.generate_content(
|
1032
|
-
model='gemini-2.
|
1034
|
+
model='gemini-2.5-flash',
|
1033
1035
|
contents='What instrument plays multiple notes at once?',
|
1034
1036
|
config={
|
1035
1037
|
'response_mime_type': 'application/json',
|
@@ -1048,7 +1050,7 @@ to you, rather than being returned as one chunk.
|
|
1048
1050
|
|
1049
1051
|
```python
|
1050
1052
|
for chunk in client.models.generate_content_stream(
|
1051
|
-
model='gemini-2.
|
1053
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
1052
1054
|
):
|
1053
1055
|
print(chunk.text, end='')
|
1054
1056
|
```
|
@@ -1062,7 +1064,7 @@ you can use the `from_uri` class method to create a `Part` object.
|
|
1062
1064
|
from google.genai import types
|
1063
1065
|
|
1064
1066
|
for chunk in client.models.generate_content_stream(
|
1065
|
-
model='gemini-2.
|
1067
|
+
model='gemini-2.5-flash',
|
1066
1068
|
contents=[
|
1067
1069
|
'What is this image about?',
|
1068
1070
|
types.Part.from_uri(
|
@@ -1086,7 +1088,7 @@ with open(YOUR_IMAGE_PATH, 'rb') as f:
|
|
1086
1088
|
image_bytes = f.read()
|
1087
1089
|
|
1088
1090
|
for chunk in client.models.generate_content_stream(
|
1089
|
-
model='gemini-2.
|
1091
|
+
model='gemini-2.5-flash',
|
1090
1092
|
contents=[
|
1091
1093
|
'What is this image about?',
|
1092
1094
|
types.Part.from_bytes(data=image_bytes, mime_type=YOUR_IMAGE_MIME_TYPE),
|
@@ -1105,7 +1107,7 @@ of `client.models.generate_content`
|
|
1105
1107
|
|
1106
1108
|
```python
|
1107
1109
|
response = await client.aio.models.generate_content(
|
1108
|
-
model='gemini-2.
|
1110
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
1109
1111
|
)
|
1110
1112
|
|
1111
1113
|
print(response.text)
|
@@ -1116,7 +1118,7 @@ print(response.text)
|
|
1116
1118
|
|
1117
1119
|
```python
|
1118
1120
|
async for chunk in await client.aio.models.generate_content_stream(
|
1119
|
-
model='gemini-2.
|
1121
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
1120
1122
|
):
|
1121
1123
|
print(chunk.text, end='')
|
1122
1124
|
```
|
@@ -1125,7 +1127,7 @@ async for chunk in await client.aio.models.generate_content_stream(
|
|
1125
1127
|
|
1126
1128
|
```python
|
1127
1129
|
response = client.models.count_tokens(
|
1128
|
-
model='gemini-2.
|
1130
|
+
model='gemini-2.5-flash',
|
1129
1131
|
contents='why is the sky blue?',
|
1130
1132
|
)
|
1131
1133
|
print(response)
|
@@ -1137,7 +1139,7 @@ Compute tokens is only supported in Vertex AI.
|
|
1137
1139
|
|
1138
1140
|
```python
|
1139
1141
|
response = client.models.compute_tokens(
|
1140
|
-
model='gemini-2.
|
1142
|
+
model='gemini-2.5-flash',
|
1141
1143
|
contents='why is the sky blue?',
|
1142
1144
|
)
|
1143
1145
|
print(response)
|
@@ -1147,7 +1149,7 @@ print(response)
|
|
1147
1149
|
|
1148
1150
|
```python
|
1149
1151
|
response = await client.aio.models.count_tokens(
|
1150
|
-
model='gemini-2.
|
1152
|
+
model='gemini-2.5-flash',
|
1151
1153
|
contents='why is the sky blue?',
|
1152
1154
|
)
|
1153
1155
|
print(response)
|
@@ -1156,14 +1158,14 @@ print(response)
|
|
1156
1158
|
#### Local Count Tokens
|
1157
1159
|
|
1158
1160
|
```python
|
1159
|
-
tokenizer = genai.LocalTokenizer(model_name='gemini-2.
|
1161
|
+
tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
|
1160
1162
|
result = tokenizer.count_tokens("What is your name?")
|
1161
1163
|
```
|
1162
1164
|
|
1163
1165
|
#### Local Compute Tokens
|
1164
1166
|
|
1165
1167
|
```python
|
1166
|
-
tokenizer = genai.LocalTokenizer(model_name='gemini-2.
|
1168
|
+
tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
|
1167
1169
|
result = tokenizer.compute_tokens("What is your name?")
|
1168
1170
|
```
|
1169
1171
|
|
@@ -1376,7 +1378,7 @@ that it can reflect on its previous responses (i.e., engage in an ongoing
|
|
1376
1378
|
### Send Message (Synchronous Non-Streaming)
|
1377
1379
|
|
1378
1380
|
```python
|
1379
|
-
chat = client.chats.create(model='gemini-2.
|
1381
|
+
chat = client.chats.create(model='gemini-2.5-flash')
|
1380
1382
|
response = chat.send_message('tell me a story')
|
1381
1383
|
print(response.text)
|
1382
1384
|
response = chat.send_message('summarize the story you told me in 1 sentence')
|
@@ -1386,7 +1388,7 @@ print(response.text)
|
|
1386
1388
|
### Send Message (Synchronous Streaming)
|
1387
1389
|
|
1388
1390
|
```python
|
1389
|
-
chat = client.chats.create(model='gemini-2.
|
1391
|
+
chat = client.chats.create(model='gemini-2.5-flash')
|
1390
1392
|
for chunk in chat.send_message_stream('tell me a story'):
|
1391
1393
|
print(chunk.text)
|
1392
1394
|
```
|
@@ -1394,7 +1396,7 @@ for chunk in chat.send_message_stream('tell me a story'):
|
|
1394
1396
|
### Send Message (Asynchronous Non-Streaming)
|
1395
1397
|
|
1396
1398
|
```python
|
1397
|
-
chat = client.aio.chats.create(model='gemini-2.
|
1399
|
+
chat = client.aio.chats.create(model='gemini-2.5-flash')
|
1398
1400
|
response = await chat.send_message('tell me a story')
|
1399
1401
|
print(response.text)
|
1400
1402
|
```
|
@@ -1402,7 +1404,7 @@ print(response.text)
|
|
1402
1404
|
### Send Message (Asynchronous Streaming)
|
1403
1405
|
|
1404
1406
|
```python
|
1405
|
-
chat = client.aio.chats.create(model='gemini-2.
|
1407
|
+
chat = client.aio.chats.create(model='gemini-2.5-flash')
|
1406
1408
|
async for chunk in await chat.send_message_stream('tell me a story'):
|
1407
1409
|
print(chunk.text)
|
1408
1410
|
```
|
@@ -1461,7 +1463,7 @@ else:
|
|
1461
1463
|
file_uris = [file1.uri, file2.uri]
|
1462
1464
|
|
1463
1465
|
cached_content = client.caches.create(
|
1464
|
-
model='gemini-2.
|
1466
|
+
model='gemini-2.5-flash',
|
1465
1467
|
config=types.CreateCachedContentConfig(
|
1466
1468
|
contents=[
|
1467
1469
|
types.Content(
|
@@ -1496,7 +1498,7 @@ cached_content = client.caches.get(name=cached_content.name)
|
|
1496
1498
|
from google.genai import types
|
1497
1499
|
|
1498
1500
|
response = client.models.generate_content(
|
1499
|
-
model='gemini-2.
|
1501
|
+
model='gemini-2.5-flash',
|
1500
1502
|
contents='Summarize the pdfs',
|
1501
1503
|
config=types.GenerateContentConfig(
|
1502
1504
|
cached_content=cached_content.name,
|
@@ -1518,7 +1520,7 @@ section above to initialize a client.
|
|
1518
1520
|
```python
|
1519
1521
|
from google.genai import types
|
1520
1522
|
|
1521
|
-
model = 'gemini-2.
|
1523
|
+
model = 'gemini-2.5-flash'
|
1522
1524
|
training_dataset = types.TuningDataset(
|
1523
1525
|
# or gcs_uri=my_vertex_multimodal_dataset
|
1524
1526
|
gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
|
@@ -1672,7 +1674,7 @@ Vertex AI:
|
|
1672
1674
|
```python
|
1673
1675
|
# Specify model and source file only, destination and job display name will be auto-populated
|
1674
1676
|
job = client.batches.create(
|
1675
|
-
model='gemini-2.
|
1677
|
+
model='gemini-2.5-flash',
|
1676
1678
|
src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
|
1677
1679
|
)
|
1678
1680
|
|
@@ -1684,7 +1686,7 @@ Gemini Developer API:
|
|
1684
1686
|
```python
|
1685
1687
|
# Create a batch job with inlined requests
|
1686
1688
|
batch_job = client.batches.create(
|
1687
|
-
model="gemini-2.
|
1689
|
+
model="gemini-2.5-flash",
|
1688
1690
|
src=[{
|
1689
1691
|
"contents": [{
|
1690
1692
|
"parts": [{
|
@@ -1699,7 +1701,7 @@ batch_job = client.batches.create(
|
|
1699
1701
|
job
|
1700
1702
|
```
|
1701
1703
|
|
1702
|
-
In order to create a batch job with file name. Need to upload a
|
1704
|
+
In order to create a batch job with file name. Need to upload a json file.
|
1703
1705
|
For example myrequests.json:
|
1704
1706
|
|
1705
1707
|
```
|
@@ -1712,14 +1714,14 @@ Then upload the file.
|
|
1712
1714
|
```python
|
1713
1715
|
# Upload the file
|
1714
1716
|
file = client.files.upload(
|
1715
|
-
file='
|
1716
|
-
config=types.UploadFileConfig(display_name='
|
1717
|
+
file='myrequests.json',
|
1718
|
+
config=types.UploadFileConfig(display_name='test-json')
|
1717
1719
|
)
|
1718
1720
|
|
1719
1721
|
# Create a batch job with file name
|
1720
1722
|
batch_job = client.batches.create(
|
1721
1723
|
model="gemini-2.0-flash",
|
1722
|
-
src="files/
|
1724
|
+
src="files/test-json",
|
1723
1725
|
)
|
1724
1726
|
```
|
1725
1727
|
|