google-genai 1.40.0__tar.gz → 1.42.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {google_genai-1.40.0/google_genai.egg-info → google_genai-1.42.0}/PKG-INFO +40 -38
- {google_genai-1.40.0 → google_genai-1.42.0}/README.md +39 -37
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_api_client.py +2 -1
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_common.py +213 -77
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_extra_utils.py +72 -1
- google_genai-1.42.0/google/genai/_live_converters.py +1401 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_replay_api_client.py +8 -4
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_tokens_converters.py +20 -424
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_transformers.py +42 -12
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/batches.py +113 -1063
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/caches.py +67 -863
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/errors.py +9 -2
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/files.py +29 -268
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/live.py +10 -11
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/live_music.py +24 -27
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/models.py +322 -1835
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/operations.py +6 -32
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/tokens.py +2 -12
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/tunings.py +24 -197
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/types.py +187 -5
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/version.py +1 -1
- {google_genai-1.40.0 → google_genai-1.42.0/google_genai.egg-info}/PKG-INFO +40 -38
- {google_genai-1.40.0 → google_genai-1.42.0}/pyproject.toml +1 -1
- google_genai-1.40.0/google/genai/_live_converters.py +0 -3750
- {google_genai-1.40.0 → google_genai-1.42.0}/LICENSE +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/MANIFEST.in +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/__init__.py +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_adapters.py +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_api_module.py +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_automatic_function_calling_util.py +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_base_transformers.py +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_base_url.py +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_local_tokenizer_loader.py +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_mcp_utils.py +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_operations_converters.py +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/_test_api_client.py +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/chats.py +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/client.py +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/local_tokenizer.py +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/pagers.py +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google/genai/py.typed +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google_genai.egg-info/SOURCES.txt +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google_genai.egg-info/dependency_links.txt +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google_genai.egg-info/requires.txt +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/google_genai.egg-info/top_level.txt +0 -0
- {google_genai-1.40.0 → google_genai-1.42.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: google-genai
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.42.0
|
4
4
|
Summary: GenAI Python SDK
|
5
5
|
Author-email: Google LLC <googleapis-packages@google.com>
|
6
6
|
License: Apache-2.0
|
@@ -296,7 +296,7 @@ See the 'Create a client' section above to initialize a client.
|
|
296
296
|
|
297
297
|
```python
|
298
298
|
response = client.models.generate_content(
|
299
|
-
model='gemini-2.
|
299
|
+
model='gemini-2.5-flash', contents='Why is the sky blue?'
|
300
300
|
)
|
301
301
|
print(response.text)
|
302
302
|
```
|
@@ -313,7 +313,7 @@ python code.
|
|
313
313
|
```python
|
314
314
|
file = client.files.upload(file='a11.txt')
|
315
315
|
response = client.models.generate_content(
|
316
|
-
model='gemini-2.
|
316
|
+
model='gemini-2.5-flash',
|
317
317
|
contents=['Could you summarize this file?', file]
|
318
318
|
)
|
319
319
|
print(response.text)
|
@@ -617,7 +617,7 @@ print(async_pager[0])
|
|
617
617
|
from google.genai import types
|
618
618
|
|
619
619
|
response = client.models.generate_content(
|
620
|
-
model='gemini-2.
|
620
|
+
model='gemini-2.5-flash',
|
621
621
|
contents='Say something bad.',
|
622
622
|
config=types.GenerateContentConfig(
|
623
623
|
safety_settings=[
|
@@ -651,7 +651,7 @@ def get_current_weather(location: str) -> str:
|
|
651
651
|
|
652
652
|
|
653
653
|
response = client.models.generate_content(
|
654
|
-
model='gemini-2.
|
654
|
+
model='gemini-2.5-flash',
|
655
655
|
contents='What is the weather like in Boston?',
|
656
656
|
config=types.GenerateContentConfig(tools=[get_current_weather]),
|
657
657
|
)
|
@@ -667,7 +667,7 @@ as follows:
|
|
667
667
|
from google.genai import types
|
668
668
|
|
669
669
|
response = client.models.generate_content(
|
670
|
-
model='gemini-2.
|
670
|
+
model='gemini-2.5-flash',
|
671
671
|
contents='What is the weather like in Boston?',
|
672
672
|
config=types.GenerateContentConfig(
|
673
673
|
tools=[get_current_weather],
|
@@ -714,7 +714,7 @@ function = types.FunctionDeclaration(
|
|
714
714
|
tool = types.Tool(function_declarations=[function])
|
715
715
|
|
716
716
|
response = client.models.generate_content(
|
717
|
-
model='gemini-2.
|
717
|
+
model='gemini-2.5-flash',
|
718
718
|
contents='What is the weather like in Boston?',
|
719
719
|
config=types.GenerateContentConfig(tools=[tool]),
|
720
720
|
)
|
@@ -758,7 +758,7 @@ function_response_content = types.Content(
|
|
758
758
|
)
|
759
759
|
|
760
760
|
response = client.models.generate_content(
|
761
|
-
model='gemini-2.
|
761
|
+
model='gemini-2.5-flash',
|
762
762
|
contents=[
|
763
763
|
user_prompt_content,
|
764
764
|
function_call_content,
|
@@ -793,7 +793,7 @@ def get_current_weather(location: str) -> str:
|
|
793
793
|
return "sunny"
|
794
794
|
|
795
795
|
response = client.models.generate_content(
|
796
|
-
model="gemini-2.
|
796
|
+
model="gemini-2.5-flash",
|
797
797
|
contents="What is the weather like in Boston?",
|
798
798
|
config=types.GenerateContentConfig(
|
799
799
|
tools=[get_current_weather],
|
@@ -823,7 +823,7 @@ def get_current_weather(location: str) -> str:
|
|
823
823
|
return "sunny"
|
824
824
|
|
825
825
|
response = client.models.generate_content(
|
826
|
-
model="gemini-2.
|
826
|
+
model="gemini-2.5-flash",
|
827
827
|
contents="What is the weather like in Boston?",
|
828
828
|
config=types.GenerateContentConfig(
|
829
829
|
tools=[get_current_weather],
|
@@ -913,7 +913,7 @@ user_profile = {
|
|
913
913
|
}
|
914
914
|
|
915
915
|
response = client.models.generate_content(
|
916
|
-
model='gemini-2.
|
916
|
+
model='gemini-2.5-flash',
|
917
917
|
contents='Give me a random user profile.',
|
918
918
|
config={
|
919
919
|
'response_mime_type': 'application/json',
|
@@ -943,7 +943,7 @@ class CountryInfo(BaseModel):
|
|
943
943
|
|
944
944
|
|
945
945
|
response = client.models.generate_content(
|
946
|
-
model='gemini-2.
|
946
|
+
model='gemini-2.5-flash',
|
947
947
|
contents='Give me information for the United States.',
|
948
948
|
config=types.GenerateContentConfig(
|
949
949
|
response_mime_type='application/json',
|
@@ -957,7 +957,7 @@ print(response.text)
|
|
957
957
|
from google.genai import types
|
958
958
|
|
959
959
|
response = client.models.generate_content(
|
960
|
-
model='gemini-2.
|
960
|
+
model='gemini-2.5-flash',
|
961
961
|
contents='Give me information for the United States.',
|
962
962
|
config=types.GenerateContentConfig(
|
963
963
|
response_mime_type='application/json',
|
@@ -995,6 +995,8 @@ You can set response_mime_type to 'text/x.enum' to return one of those enum
|
|
995
995
|
values as the response.
|
996
996
|
|
997
997
|
```python
|
998
|
+
from enum import Enum
|
999
|
+
|
998
1000
|
class InstrumentEnum(Enum):
|
999
1001
|
PERCUSSION = 'Percussion'
|
1000
1002
|
STRING = 'String'
|
@@ -1003,7 +1005,7 @@ class InstrumentEnum(Enum):
|
|
1003
1005
|
KEYBOARD = 'Keyboard'
|
1004
1006
|
|
1005
1007
|
response = client.models.generate_content(
|
1006
|
-
model='gemini-2.
|
1008
|
+
model='gemini-2.5-flash',
|
1007
1009
|
contents='What instrument plays multiple notes at once?',
|
1008
1010
|
config={
|
1009
1011
|
'response_mime_type': 'text/x.enum',
|
@@ -1029,7 +1031,7 @@ class InstrumentEnum(Enum):
|
|
1029
1031
|
KEYBOARD = 'Keyboard'
|
1030
1032
|
|
1031
1033
|
response = client.models.generate_content(
|
1032
|
-
model='gemini-2.
|
1034
|
+
model='gemini-2.5-flash',
|
1033
1035
|
contents='What instrument plays multiple notes at once?',
|
1034
1036
|
config={
|
1035
1037
|
'response_mime_type': 'application/json',
|
@@ -1048,7 +1050,7 @@ to you, rather than being returned as one chunk.
|
|
1048
1050
|
|
1049
1051
|
```python
|
1050
1052
|
for chunk in client.models.generate_content_stream(
|
1051
|
-
model='gemini-2.
|
1053
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
1052
1054
|
):
|
1053
1055
|
print(chunk.text, end='')
|
1054
1056
|
```
|
@@ -1062,7 +1064,7 @@ you can use the `from_uri` class method to create a `Part` object.
|
|
1062
1064
|
from google.genai import types
|
1063
1065
|
|
1064
1066
|
for chunk in client.models.generate_content_stream(
|
1065
|
-
model='gemini-2.
|
1067
|
+
model='gemini-2.5-flash',
|
1066
1068
|
contents=[
|
1067
1069
|
'What is this image about?',
|
1068
1070
|
types.Part.from_uri(
|
@@ -1086,7 +1088,7 @@ with open(YOUR_IMAGE_PATH, 'rb') as f:
|
|
1086
1088
|
image_bytes = f.read()
|
1087
1089
|
|
1088
1090
|
for chunk in client.models.generate_content_stream(
|
1089
|
-
model='gemini-2.
|
1091
|
+
model='gemini-2.5-flash',
|
1090
1092
|
contents=[
|
1091
1093
|
'What is this image about?',
|
1092
1094
|
types.Part.from_bytes(data=image_bytes, mime_type=YOUR_IMAGE_MIME_TYPE),
|
@@ -1105,7 +1107,7 @@ of `client.models.generate_content`
|
|
1105
1107
|
|
1106
1108
|
```python
|
1107
1109
|
response = await client.aio.models.generate_content(
|
1108
|
-
model='gemini-2.
|
1110
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
1109
1111
|
)
|
1110
1112
|
|
1111
1113
|
print(response.text)
|
@@ -1116,7 +1118,7 @@ print(response.text)
|
|
1116
1118
|
|
1117
1119
|
```python
|
1118
1120
|
async for chunk in await client.aio.models.generate_content_stream(
|
1119
|
-
model='gemini-2.
|
1121
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
1120
1122
|
):
|
1121
1123
|
print(chunk.text, end='')
|
1122
1124
|
```
|
@@ -1125,7 +1127,7 @@ async for chunk in await client.aio.models.generate_content_stream(
|
|
1125
1127
|
|
1126
1128
|
```python
|
1127
1129
|
response = client.models.count_tokens(
|
1128
|
-
model='gemini-2.
|
1130
|
+
model='gemini-2.5-flash',
|
1129
1131
|
contents='why is the sky blue?',
|
1130
1132
|
)
|
1131
1133
|
print(response)
|
@@ -1137,7 +1139,7 @@ Compute tokens is only supported in Vertex AI.
|
|
1137
1139
|
|
1138
1140
|
```python
|
1139
1141
|
response = client.models.compute_tokens(
|
1140
|
-
model='gemini-2.
|
1142
|
+
model='gemini-2.5-flash',
|
1141
1143
|
contents='why is the sky blue?',
|
1142
1144
|
)
|
1143
1145
|
print(response)
|
@@ -1147,7 +1149,7 @@ print(response)
|
|
1147
1149
|
|
1148
1150
|
```python
|
1149
1151
|
response = await client.aio.models.count_tokens(
|
1150
|
-
model='gemini-2.
|
1152
|
+
model='gemini-2.5-flash',
|
1151
1153
|
contents='why is the sky blue?',
|
1152
1154
|
)
|
1153
1155
|
print(response)
|
@@ -1156,14 +1158,14 @@ print(response)
|
|
1156
1158
|
#### Local Count Tokens
|
1157
1159
|
|
1158
1160
|
```python
|
1159
|
-
tokenizer = genai.LocalTokenizer(model_name='gemini-2.
|
1161
|
+
tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
|
1160
1162
|
result = tokenizer.count_tokens("What is your name?")
|
1161
1163
|
```
|
1162
1164
|
|
1163
1165
|
#### Local Compute Tokens
|
1164
1166
|
|
1165
1167
|
```python
|
1166
|
-
tokenizer = genai.LocalTokenizer(model_name='gemini-2.
|
1168
|
+
tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
|
1167
1169
|
result = tokenizer.compute_tokens("What is your name?")
|
1168
1170
|
```
|
1169
1171
|
|
@@ -1376,7 +1378,7 @@ that it can reflect on its previous responses (i.e., engage in an ongoing
|
|
1376
1378
|
### Send Message (Synchronous Non-Streaming)
|
1377
1379
|
|
1378
1380
|
```python
|
1379
|
-
chat = client.chats.create(model='gemini-2.
|
1381
|
+
chat = client.chats.create(model='gemini-2.5-flash')
|
1380
1382
|
response = chat.send_message('tell me a story')
|
1381
1383
|
print(response.text)
|
1382
1384
|
response = chat.send_message('summarize the story you told me in 1 sentence')
|
@@ -1386,7 +1388,7 @@ print(response.text)
|
|
1386
1388
|
### Send Message (Synchronous Streaming)
|
1387
1389
|
|
1388
1390
|
```python
|
1389
|
-
chat = client.chats.create(model='gemini-2.
|
1391
|
+
chat = client.chats.create(model='gemini-2.5-flash')
|
1390
1392
|
for chunk in chat.send_message_stream('tell me a story'):
|
1391
1393
|
print(chunk.text)
|
1392
1394
|
```
|
@@ -1394,7 +1396,7 @@ for chunk in chat.send_message_stream('tell me a story'):
|
|
1394
1396
|
### Send Message (Asynchronous Non-Streaming)
|
1395
1397
|
|
1396
1398
|
```python
|
1397
|
-
chat = client.aio.chats.create(model='gemini-2.
|
1399
|
+
chat = client.aio.chats.create(model='gemini-2.5-flash')
|
1398
1400
|
response = await chat.send_message('tell me a story')
|
1399
1401
|
print(response.text)
|
1400
1402
|
```
|
@@ -1402,7 +1404,7 @@ print(response.text)
|
|
1402
1404
|
### Send Message (Asynchronous Streaming)
|
1403
1405
|
|
1404
1406
|
```python
|
1405
|
-
chat = client.aio.chats.create(model='gemini-2.
|
1407
|
+
chat = client.aio.chats.create(model='gemini-2.5-flash')
|
1406
1408
|
async for chunk in await chat.send_message_stream('tell me a story'):
|
1407
1409
|
print(chunk.text)
|
1408
1410
|
```
|
@@ -1461,7 +1463,7 @@ else:
|
|
1461
1463
|
file_uris = [file1.uri, file2.uri]
|
1462
1464
|
|
1463
1465
|
cached_content = client.caches.create(
|
1464
|
-
model='gemini-2.
|
1466
|
+
model='gemini-2.5-flash',
|
1465
1467
|
config=types.CreateCachedContentConfig(
|
1466
1468
|
contents=[
|
1467
1469
|
types.Content(
|
@@ -1496,7 +1498,7 @@ cached_content = client.caches.get(name=cached_content.name)
|
|
1496
1498
|
from google.genai import types
|
1497
1499
|
|
1498
1500
|
response = client.models.generate_content(
|
1499
|
-
model='gemini-2.
|
1501
|
+
model='gemini-2.5-flash',
|
1500
1502
|
contents='Summarize the pdfs',
|
1501
1503
|
config=types.GenerateContentConfig(
|
1502
1504
|
cached_content=cached_content.name,
|
@@ -1518,7 +1520,7 @@ section above to initialize a client.
|
|
1518
1520
|
```python
|
1519
1521
|
from google.genai import types
|
1520
1522
|
|
1521
|
-
model = 'gemini-2.
|
1523
|
+
model = 'gemini-2.5-flash'
|
1522
1524
|
training_dataset = types.TuningDataset(
|
1523
1525
|
# or gcs_uri=my_vertex_multimodal_dataset
|
1524
1526
|
gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
|
@@ -1672,7 +1674,7 @@ Vertex AI:
|
|
1672
1674
|
```python
|
1673
1675
|
# Specify model and source file only, destination and job display name will be auto-populated
|
1674
1676
|
job = client.batches.create(
|
1675
|
-
model='gemini-2.
|
1677
|
+
model='gemini-2.5-flash',
|
1676
1678
|
src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
|
1677
1679
|
)
|
1678
1680
|
|
@@ -1684,7 +1686,7 @@ Gemini Developer API:
|
|
1684
1686
|
```python
|
1685
1687
|
# Create a batch job with inlined requests
|
1686
1688
|
batch_job = client.batches.create(
|
1687
|
-
model="gemini-2.
|
1689
|
+
model="gemini-2.5-flash",
|
1688
1690
|
src=[{
|
1689
1691
|
"contents": [{
|
1690
1692
|
"parts": [{
|
@@ -1699,7 +1701,7 @@ batch_job = client.batches.create(
|
|
1699
1701
|
job
|
1700
1702
|
```
|
1701
1703
|
|
1702
|
-
In order to create a batch job with file name. Need to upload a
|
1704
|
+
In order to create a batch job with file name. Need to upload a json file.
|
1703
1705
|
For example myrequests.json:
|
1704
1706
|
|
1705
1707
|
```
|
@@ -1712,14 +1714,14 @@ Then upload the file.
|
|
1712
1714
|
```python
|
1713
1715
|
# Upload the file
|
1714
1716
|
file = client.files.upload(
|
1715
|
-
file='
|
1716
|
-
config=types.UploadFileConfig(display_name='
|
1717
|
+
file='myrequests.json',
|
1718
|
+
config=types.UploadFileConfig(display_name='test-json')
|
1717
1719
|
)
|
1718
1720
|
|
1719
1721
|
# Create a batch job with file name
|
1720
1722
|
batch_job = client.batches.create(
|
1721
1723
|
model="gemini-2.0-flash",
|
1722
|
-
src="files/
|
1724
|
+
src="files/test-json",
|
1723
1725
|
)
|
1724
1726
|
```
|
1725
1727
|
|
@@ -259,7 +259,7 @@ See the 'Create a client' section above to initialize a client.
|
|
259
259
|
|
260
260
|
```python
|
261
261
|
response = client.models.generate_content(
|
262
|
-
model='gemini-2.
|
262
|
+
model='gemini-2.5-flash', contents='Why is the sky blue?'
|
263
263
|
)
|
264
264
|
print(response.text)
|
265
265
|
```
|
@@ -276,7 +276,7 @@ python code.
|
|
276
276
|
```python
|
277
277
|
file = client.files.upload(file='a11.txt')
|
278
278
|
response = client.models.generate_content(
|
279
|
-
model='gemini-2.
|
279
|
+
model='gemini-2.5-flash',
|
280
280
|
contents=['Could you summarize this file?', file]
|
281
281
|
)
|
282
282
|
print(response.text)
|
@@ -580,7 +580,7 @@ print(async_pager[0])
|
|
580
580
|
from google.genai import types
|
581
581
|
|
582
582
|
response = client.models.generate_content(
|
583
|
-
model='gemini-2.
|
583
|
+
model='gemini-2.5-flash',
|
584
584
|
contents='Say something bad.',
|
585
585
|
config=types.GenerateContentConfig(
|
586
586
|
safety_settings=[
|
@@ -614,7 +614,7 @@ def get_current_weather(location: str) -> str:
|
|
614
614
|
|
615
615
|
|
616
616
|
response = client.models.generate_content(
|
617
|
-
model='gemini-2.
|
617
|
+
model='gemini-2.5-flash',
|
618
618
|
contents='What is the weather like in Boston?',
|
619
619
|
config=types.GenerateContentConfig(tools=[get_current_weather]),
|
620
620
|
)
|
@@ -630,7 +630,7 @@ as follows:
|
|
630
630
|
from google.genai import types
|
631
631
|
|
632
632
|
response = client.models.generate_content(
|
633
|
-
model='gemini-2.
|
633
|
+
model='gemini-2.5-flash',
|
634
634
|
contents='What is the weather like in Boston?',
|
635
635
|
config=types.GenerateContentConfig(
|
636
636
|
tools=[get_current_weather],
|
@@ -677,7 +677,7 @@ function = types.FunctionDeclaration(
|
|
677
677
|
tool = types.Tool(function_declarations=[function])
|
678
678
|
|
679
679
|
response = client.models.generate_content(
|
680
|
-
model='gemini-2.
|
680
|
+
model='gemini-2.5-flash',
|
681
681
|
contents='What is the weather like in Boston?',
|
682
682
|
config=types.GenerateContentConfig(tools=[tool]),
|
683
683
|
)
|
@@ -721,7 +721,7 @@ function_response_content = types.Content(
|
|
721
721
|
)
|
722
722
|
|
723
723
|
response = client.models.generate_content(
|
724
|
-
model='gemini-2.
|
724
|
+
model='gemini-2.5-flash',
|
725
725
|
contents=[
|
726
726
|
user_prompt_content,
|
727
727
|
function_call_content,
|
@@ -756,7 +756,7 @@ def get_current_weather(location: str) -> str:
|
|
756
756
|
return "sunny"
|
757
757
|
|
758
758
|
response = client.models.generate_content(
|
759
|
-
model="gemini-2.
|
759
|
+
model="gemini-2.5-flash",
|
760
760
|
contents="What is the weather like in Boston?",
|
761
761
|
config=types.GenerateContentConfig(
|
762
762
|
tools=[get_current_weather],
|
@@ -786,7 +786,7 @@ def get_current_weather(location: str) -> str:
|
|
786
786
|
return "sunny"
|
787
787
|
|
788
788
|
response = client.models.generate_content(
|
789
|
-
model="gemini-2.
|
789
|
+
model="gemini-2.5-flash",
|
790
790
|
contents="What is the weather like in Boston?",
|
791
791
|
config=types.GenerateContentConfig(
|
792
792
|
tools=[get_current_weather],
|
@@ -876,7 +876,7 @@ user_profile = {
|
|
876
876
|
}
|
877
877
|
|
878
878
|
response = client.models.generate_content(
|
879
|
-
model='gemini-2.
|
879
|
+
model='gemini-2.5-flash',
|
880
880
|
contents='Give me a random user profile.',
|
881
881
|
config={
|
882
882
|
'response_mime_type': 'application/json',
|
@@ -906,7 +906,7 @@ class CountryInfo(BaseModel):
|
|
906
906
|
|
907
907
|
|
908
908
|
response = client.models.generate_content(
|
909
|
-
model='gemini-2.
|
909
|
+
model='gemini-2.5-flash',
|
910
910
|
contents='Give me information for the United States.',
|
911
911
|
config=types.GenerateContentConfig(
|
912
912
|
response_mime_type='application/json',
|
@@ -920,7 +920,7 @@ print(response.text)
|
|
920
920
|
from google.genai import types
|
921
921
|
|
922
922
|
response = client.models.generate_content(
|
923
|
-
model='gemini-2.
|
923
|
+
model='gemini-2.5-flash',
|
924
924
|
contents='Give me information for the United States.',
|
925
925
|
config=types.GenerateContentConfig(
|
926
926
|
response_mime_type='application/json',
|
@@ -958,6 +958,8 @@ You can set response_mime_type to 'text/x.enum' to return one of those enum
|
|
958
958
|
values as the response.
|
959
959
|
|
960
960
|
```python
|
961
|
+
from enum import Enum
|
962
|
+
|
961
963
|
class InstrumentEnum(Enum):
|
962
964
|
PERCUSSION = 'Percussion'
|
963
965
|
STRING = 'String'
|
@@ -966,7 +968,7 @@ class InstrumentEnum(Enum):
|
|
966
968
|
KEYBOARD = 'Keyboard'
|
967
969
|
|
968
970
|
response = client.models.generate_content(
|
969
|
-
model='gemini-2.
|
971
|
+
model='gemini-2.5-flash',
|
970
972
|
contents='What instrument plays multiple notes at once?',
|
971
973
|
config={
|
972
974
|
'response_mime_type': 'text/x.enum',
|
@@ -992,7 +994,7 @@ class InstrumentEnum(Enum):
|
|
992
994
|
KEYBOARD = 'Keyboard'
|
993
995
|
|
994
996
|
response = client.models.generate_content(
|
995
|
-
model='gemini-2.
|
997
|
+
model='gemini-2.5-flash',
|
996
998
|
contents='What instrument plays multiple notes at once?',
|
997
999
|
config={
|
998
1000
|
'response_mime_type': 'application/json',
|
@@ -1011,7 +1013,7 @@ to you, rather than being returned as one chunk.
|
|
1011
1013
|
|
1012
1014
|
```python
|
1013
1015
|
for chunk in client.models.generate_content_stream(
|
1014
|
-
model='gemini-2.
|
1016
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
1015
1017
|
):
|
1016
1018
|
print(chunk.text, end='')
|
1017
1019
|
```
|
@@ -1025,7 +1027,7 @@ you can use the `from_uri` class method to create a `Part` object.
|
|
1025
1027
|
from google.genai import types
|
1026
1028
|
|
1027
1029
|
for chunk in client.models.generate_content_stream(
|
1028
|
-
model='gemini-2.
|
1030
|
+
model='gemini-2.5-flash',
|
1029
1031
|
contents=[
|
1030
1032
|
'What is this image about?',
|
1031
1033
|
types.Part.from_uri(
|
@@ -1049,7 +1051,7 @@ with open(YOUR_IMAGE_PATH, 'rb') as f:
|
|
1049
1051
|
image_bytes = f.read()
|
1050
1052
|
|
1051
1053
|
for chunk in client.models.generate_content_stream(
|
1052
|
-
model='gemini-2.
|
1054
|
+
model='gemini-2.5-flash',
|
1053
1055
|
contents=[
|
1054
1056
|
'What is this image about?',
|
1055
1057
|
types.Part.from_bytes(data=image_bytes, mime_type=YOUR_IMAGE_MIME_TYPE),
|
@@ -1068,7 +1070,7 @@ of `client.models.generate_content`
|
|
1068
1070
|
|
1069
1071
|
```python
|
1070
1072
|
response = await client.aio.models.generate_content(
|
1071
|
-
model='gemini-2.
|
1073
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
1072
1074
|
)
|
1073
1075
|
|
1074
1076
|
print(response.text)
|
@@ -1079,7 +1081,7 @@ print(response.text)
|
|
1079
1081
|
|
1080
1082
|
```python
|
1081
1083
|
async for chunk in await client.aio.models.generate_content_stream(
|
1082
|
-
model='gemini-2.
|
1084
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
1083
1085
|
):
|
1084
1086
|
print(chunk.text, end='')
|
1085
1087
|
```
|
@@ -1088,7 +1090,7 @@ async for chunk in await client.aio.models.generate_content_stream(
|
|
1088
1090
|
|
1089
1091
|
```python
|
1090
1092
|
response = client.models.count_tokens(
|
1091
|
-
model='gemini-2.
|
1093
|
+
model='gemini-2.5-flash',
|
1092
1094
|
contents='why is the sky blue?',
|
1093
1095
|
)
|
1094
1096
|
print(response)
|
@@ -1100,7 +1102,7 @@ Compute tokens is only supported in Vertex AI.
|
|
1100
1102
|
|
1101
1103
|
```python
|
1102
1104
|
response = client.models.compute_tokens(
|
1103
|
-
model='gemini-2.
|
1105
|
+
model='gemini-2.5-flash',
|
1104
1106
|
contents='why is the sky blue?',
|
1105
1107
|
)
|
1106
1108
|
print(response)
|
@@ -1110,7 +1112,7 @@ print(response)
|
|
1110
1112
|
|
1111
1113
|
```python
|
1112
1114
|
response = await client.aio.models.count_tokens(
|
1113
|
-
model='gemini-2.
|
1115
|
+
model='gemini-2.5-flash',
|
1114
1116
|
contents='why is the sky blue?',
|
1115
1117
|
)
|
1116
1118
|
print(response)
|
@@ -1119,14 +1121,14 @@ print(response)
|
|
1119
1121
|
#### Local Count Tokens
|
1120
1122
|
|
1121
1123
|
```python
|
1122
|
-
tokenizer = genai.LocalTokenizer(model_name='gemini-2.
|
1124
|
+
tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
|
1123
1125
|
result = tokenizer.count_tokens("What is your name?")
|
1124
1126
|
```
|
1125
1127
|
|
1126
1128
|
#### Local Compute Tokens
|
1127
1129
|
|
1128
1130
|
```python
|
1129
|
-
tokenizer = genai.LocalTokenizer(model_name='gemini-2.
|
1131
|
+
tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
|
1130
1132
|
result = tokenizer.compute_tokens("What is your name?")
|
1131
1133
|
```
|
1132
1134
|
|
@@ -1339,7 +1341,7 @@ that it can reflect on its previous responses (i.e., engage in an ongoing
|
|
1339
1341
|
### Send Message (Synchronous Non-Streaming)
|
1340
1342
|
|
1341
1343
|
```python
|
1342
|
-
chat = client.chats.create(model='gemini-2.
|
1344
|
+
chat = client.chats.create(model='gemini-2.5-flash')
|
1343
1345
|
response = chat.send_message('tell me a story')
|
1344
1346
|
print(response.text)
|
1345
1347
|
response = chat.send_message('summarize the story you told me in 1 sentence')
|
@@ -1349,7 +1351,7 @@ print(response.text)
|
|
1349
1351
|
### Send Message (Synchronous Streaming)
|
1350
1352
|
|
1351
1353
|
```python
|
1352
|
-
chat = client.chats.create(model='gemini-2.
|
1354
|
+
chat = client.chats.create(model='gemini-2.5-flash')
|
1353
1355
|
for chunk in chat.send_message_stream('tell me a story'):
|
1354
1356
|
print(chunk.text)
|
1355
1357
|
```
|
@@ -1357,7 +1359,7 @@ for chunk in chat.send_message_stream('tell me a story'):
|
|
1357
1359
|
### Send Message (Asynchronous Non-Streaming)
|
1358
1360
|
|
1359
1361
|
```python
|
1360
|
-
chat = client.aio.chats.create(model='gemini-2.
|
1362
|
+
chat = client.aio.chats.create(model='gemini-2.5-flash')
|
1361
1363
|
response = await chat.send_message('tell me a story')
|
1362
1364
|
print(response.text)
|
1363
1365
|
```
|
@@ -1365,7 +1367,7 @@ print(response.text)
|
|
1365
1367
|
### Send Message (Asynchronous Streaming)
|
1366
1368
|
|
1367
1369
|
```python
|
1368
|
-
chat = client.aio.chats.create(model='gemini-2.
|
1370
|
+
chat = client.aio.chats.create(model='gemini-2.5-flash')
|
1369
1371
|
async for chunk in await chat.send_message_stream('tell me a story'):
|
1370
1372
|
print(chunk.text)
|
1371
1373
|
```
|
@@ -1424,7 +1426,7 @@ else:
|
|
1424
1426
|
file_uris = [file1.uri, file2.uri]
|
1425
1427
|
|
1426
1428
|
cached_content = client.caches.create(
|
1427
|
-
model='gemini-2.
|
1429
|
+
model='gemini-2.5-flash',
|
1428
1430
|
config=types.CreateCachedContentConfig(
|
1429
1431
|
contents=[
|
1430
1432
|
types.Content(
|
@@ -1459,7 +1461,7 @@ cached_content = client.caches.get(name=cached_content.name)
|
|
1459
1461
|
from google.genai import types
|
1460
1462
|
|
1461
1463
|
response = client.models.generate_content(
|
1462
|
-
model='gemini-2.
|
1464
|
+
model='gemini-2.5-flash',
|
1463
1465
|
contents='Summarize the pdfs',
|
1464
1466
|
config=types.GenerateContentConfig(
|
1465
1467
|
cached_content=cached_content.name,
|
@@ -1481,7 +1483,7 @@ section above to initialize a client.
|
|
1481
1483
|
```python
|
1482
1484
|
from google.genai import types
|
1483
1485
|
|
1484
|
-
model = 'gemini-2.
|
1486
|
+
model = 'gemini-2.5-flash'
|
1485
1487
|
training_dataset = types.TuningDataset(
|
1486
1488
|
# or gcs_uri=my_vertex_multimodal_dataset
|
1487
1489
|
gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
|
@@ -1635,7 +1637,7 @@ Vertex AI:
|
|
1635
1637
|
```python
|
1636
1638
|
# Specify model and source file only, destination and job display name will be auto-populated
|
1637
1639
|
job = client.batches.create(
|
1638
|
-
model='gemini-2.
|
1640
|
+
model='gemini-2.5-flash',
|
1639
1641
|
src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
|
1640
1642
|
)
|
1641
1643
|
|
@@ -1647,7 +1649,7 @@ Gemini Developer API:
|
|
1647
1649
|
```python
|
1648
1650
|
# Create a batch job with inlined requests
|
1649
1651
|
batch_job = client.batches.create(
|
1650
|
-
model="gemini-2.
|
1652
|
+
model="gemini-2.5-flash",
|
1651
1653
|
src=[{
|
1652
1654
|
"contents": [{
|
1653
1655
|
"parts": [{
|
@@ -1662,7 +1664,7 @@ batch_job = client.batches.create(
|
|
1662
1664
|
job
|
1663
1665
|
```
|
1664
1666
|
|
1665
|
-
In order to create a batch job with file name. Need to upload a
|
1667
|
+
In order to create a batch job with file name. Need to upload a json file.
|
1666
1668
|
For example myrequests.json:
|
1667
1669
|
|
1668
1670
|
```
|
@@ -1675,14 +1677,14 @@ Then upload the file.
|
|
1675
1677
|
```python
|
1676
1678
|
# Upload the file
|
1677
1679
|
file = client.files.upload(
|
1678
|
-
file='
|
1679
|
-
config=types.UploadFileConfig(display_name='
|
1680
|
+
file='myrequests.json',
|
1681
|
+
config=types.UploadFileConfig(display_name='test-json')
|
1680
1682
|
)
|
1681
1683
|
|
1682
1684
|
# Create a batch job with file name
|
1683
1685
|
batch_job = client.batches.create(
|
1684
1686
|
model="gemini-2.0-flash",
|
1685
|
-
src="files/
|
1687
|
+
src="files/test-json",
|
1686
1688
|
)
|
1687
1689
|
```
|
1688
1690
|
|
@@ -80,7 +80,7 @@ if TYPE_CHECKING:
|
|
80
80
|
|
81
81
|
logger = logging.getLogger('google_genai._api_client')
|
82
82
|
CHUNK_SIZE = 8 * 1024 * 1024 # 8 MB chunk size
|
83
|
-
READ_BUFFER_SIZE = 2**
|
83
|
+
READ_BUFFER_SIZE = 2**22
|
84
84
|
MAX_RETRY_COUNT = 3
|
85
85
|
INITIAL_RETRY_DELAY = 1 # second
|
86
86
|
DELAY_MULTIPLIER = 2
|
@@ -489,6 +489,7 @@ def retry_args(options: Optional[HttpRetryOptions]) -> _common.StringDict:
|
|
489
489
|
'retry': retry,
|
490
490
|
'reraise': True,
|
491
491
|
'wait': wait,
|
492
|
+
'before_sleep': tenacity.before_sleep_log(logger, logging.INFO),
|
492
493
|
}
|
493
494
|
|
494
495
|
|