google-genai 1.30.0__tar.gz → 1.48.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {google_genai-1.30.0 → google_genai-1.48.0}/PKG-INFO +303 -167
- google_genai-1.30.0/google_genai.egg-info/PKG-INFO → google_genai-1.48.0/README.md +295 -195
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/_api_client.py +331 -242
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/_automatic_function_calling_util.py +12 -0
- google_genai-1.48.0/google/genai/_base_transformers.py +26 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/_common.py +222 -69
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/_extra_utils.py +117 -12
- google_genai-1.48.0/google/genai/_live_converters.py +1429 -0
- google_genai-1.48.0/google/genai/_local_tokenizer_loader.py +223 -0
- google_genai-1.48.0/google/genai/_operations_converters.py +298 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/_replay_api_client.py +36 -9
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/_tokens_converters.py +148 -502
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/_transformers.py +191 -100
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/batches.py +929 -1195
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/caches.py +363 -1107
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/client.py +90 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/errors.py +12 -2
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/files.py +61 -302
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/live.py +83 -32
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/live_music.py +24 -27
- google_genai-1.48.0/google/genai/local_tokenizer.py +395 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/models.py +2356 -3255
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/operations.py +126 -21
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/tokens.py +2 -12
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/tunings.py +578 -498
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/types.py +3466 -1608
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/version.py +1 -1
- google_genai-1.30.0/README.md → google_genai-1.48.0/google_genai.egg-info/PKG-INFO +331 -161
- {google_genai-1.30.0 → google_genai-1.48.0}/google_genai.egg-info/SOURCES.txt +5 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google_genai.egg-info/requires.txt +5 -1
- {google_genai-1.30.0 → google_genai-1.48.0}/pyproject.toml +18 -14
- google_genai-1.48.0/setup.cfg +10 -0
- google_genai-1.30.0/google/genai/_live_converters.py +0 -3521
- google_genai-1.30.0/setup.cfg +0 -4
- {google_genai-1.30.0 → google_genai-1.48.0}/LICENSE +0 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/MANIFEST.in +0 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/__init__.py +0 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/_adapters.py +0 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/_api_module.py +0 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/_base_url.py +0 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/_mcp_utils.py +0 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/_test_api_client.py +0 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/chats.py +0 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/pagers.py +0 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google/genai/py.typed +0 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google_genai.egg-info/dependency_links.txt +0 -0
- {google_genai-1.30.0 → google_genai-1.48.0}/google_genai.egg-info/top_level.txt +0 -0
|
@@ -1,35 +1,37 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: google-genai
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.48.0
|
|
4
4
|
Summary: GenAI Python SDK
|
|
5
5
|
Author-email: Google LLC <googleapis-packages@google.com>
|
|
6
|
-
License: Apache-2.0
|
|
6
|
+
License-Expression: Apache-2.0
|
|
7
7
|
Project-URL: Homepage, https://github.com/googleapis/python-genai
|
|
8
8
|
Classifier: Intended Audience :: Developers
|
|
9
|
-
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
9
|
Classifier: Operating System :: OS Independent
|
|
11
10
|
Classifier: Programming Language :: Python
|
|
12
11
|
Classifier: Programming Language :: Python :: 3
|
|
13
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
14
12
|
Classifier: Programming Language :: Python :: 3.10
|
|
15
13
|
Classifier: Programming Language :: Python :: 3.11
|
|
16
14
|
Classifier: Programming Language :: Python :: 3.12
|
|
17
15
|
Classifier: Programming Language :: Python :: 3.13
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
18
17
|
Classifier: Topic :: Internet
|
|
19
18
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
20
|
-
Requires-Python: >=3.
|
|
19
|
+
Requires-Python: >=3.10
|
|
21
20
|
Description-Content-Type: text/markdown
|
|
22
21
|
License-File: LICENSE
|
|
23
22
|
Requires-Dist: anyio<5.0.0,>=4.8.0
|
|
24
23
|
Requires-Dist: google-auth<3.0.0,>=2.14.1
|
|
25
24
|
Requires-Dist: httpx<1.0.0,>=0.28.1
|
|
26
|
-
Requires-Dist: pydantic<3.0.0,>=2.
|
|
25
|
+
Requires-Dist: pydantic<3.0.0,>=2.9.0
|
|
27
26
|
Requires-Dist: requests<3.0.0,>=2.28.1
|
|
28
27
|
Requires-Dist: tenacity<9.2.0,>=8.2.3
|
|
29
28
|
Requires-Dist: websockets<15.1.0,>=13.0.0
|
|
30
29
|
Requires-Dist: typing-extensions<5.0.0,>=4.11.0
|
|
31
30
|
Provides-Extra: aiohttp
|
|
32
31
|
Requires-Dist: aiohttp<4.0.0; extra == "aiohttp"
|
|
32
|
+
Provides-Extra: local-tokenizer
|
|
33
|
+
Requires-Dist: sentencepiece>=0.2.0; extra == "local-tokenizer"
|
|
34
|
+
Requires-Dist: protobuf; extra == "local-tokenizer"
|
|
33
35
|
Dynamic: license-file
|
|
34
36
|
|
|
35
37
|
# Google Gen AI SDK
|
|
@@ -55,6 +57,12 @@ APIs.
|
|
|
55
57
|
pip install google-genai
|
|
56
58
|
```
|
|
57
59
|
|
|
60
|
+
<small>With `uv`:</small>
|
|
61
|
+
|
|
62
|
+
```sh
|
|
63
|
+
uv pip install google-genai
|
|
64
|
+
```
|
|
65
|
+
|
|
58
66
|
## Imports
|
|
59
67
|
|
|
60
68
|
```python
|
|
@@ -113,6 +121,83 @@ from google import genai
|
|
|
113
121
|
client = genai.Client()
|
|
114
122
|
```
|
|
115
123
|
|
|
124
|
+
## Close a client
|
|
125
|
+
|
|
126
|
+
Explicitly close the sync client to ensure that resources, such as the
|
|
127
|
+
underlying HTTP connections, are properly cleaned up and closed.
|
|
128
|
+
|
|
129
|
+
```python
|
|
130
|
+
from google.genai import Client
|
|
131
|
+
|
|
132
|
+
client = Client()
|
|
133
|
+
response_1 = client.models.generate_content(
|
|
134
|
+
model=MODEL_ID,
|
|
135
|
+
contents='Hello',
|
|
136
|
+
)
|
|
137
|
+
response_2 = client.models.generate_content(
|
|
138
|
+
model=MODEL_ID,
|
|
139
|
+
contents='Ask a question',
|
|
140
|
+
)
|
|
141
|
+
# Close the sync client to release resources.
|
|
142
|
+
client.close()
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
To explicitly close the async client:
|
|
146
|
+
|
|
147
|
+
```python
|
|
148
|
+
from google.genai import Client
|
|
149
|
+
|
|
150
|
+
aclient = Client(
|
|
151
|
+
vertexai=True, project='my-project-id', location='us-central1'
|
|
152
|
+
).aio
|
|
153
|
+
response_1 = await aclient.models.generate_content(
|
|
154
|
+
model=MODEL_ID,
|
|
155
|
+
contents='Hello',
|
|
156
|
+
)
|
|
157
|
+
response_2 = await aclient.models.generate_content(
|
|
158
|
+
model=MODEL_ID,
|
|
159
|
+
contents='Ask a question',
|
|
160
|
+
)
|
|
161
|
+
# Close the async client to release resources.
|
|
162
|
+
await aclient.aclose()
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
## Client context managers
|
|
166
|
+
|
|
167
|
+
By using the sync client context manager, it will close the underlying
|
|
168
|
+
sync client when exiting the with block.
|
|
169
|
+
|
|
170
|
+
```python
|
|
171
|
+
from google.genai import Client
|
|
172
|
+
|
|
173
|
+
with Client() as client:
|
|
174
|
+
response_1 = client.models.generate_content(
|
|
175
|
+
model=MODEL_ID,
|
|
176
|
+
contents='Hello',
|
|
177
|
+
)
|
|
178
|
+
response_2 = client.models.generate_content(
|
|
179
|
+
model=MODEL_ID,
|
|
180
|
+
contents='Ask a question',
|
|
181
|
+
)
|
|
182
|
+
```
|
|
183
|
+
|
|
184
|
+
By using the async client context manager, it will close the underlying
|
|
185
|
+
async client when exiting the with block.
|
|
186
|
+
|
|
187
|
+
```python
|
|
188
|
+
from google.genai import Client
|
|
189
|
+
|
|
190
|
+
async with Client().aio as aclient:
|
|
191
|
+
response_1 = await aclient.models.generate_content(
|
|
192
|
+
model=MODEL_ID,
|
|
193
|
+
contents='Hello',
|
|
194
|
+
)
|
|
195
|
+
response_2 = await aclient.models.generate_content(
|
|
196
|
+
model=MODEL_ID,
|
|
197
|
+
contents='Ask a question',
|
|
198
|
+
)
|
|
199
|
+
```
|
|
200
|
+
|
|
116
201
|
### API Selection
|
|
117
202
|
|
|
118
203
|
By default, the SDK uses the beta API endpoints provided by Google to support
|
|
@@ -155,7 +240,6 @@ Additional args of `aiohttp.ClientSession.request()` ([see _RequestOptions args]
|
|
|
155
240
|
through the following way:
|
|
156
241
|
|
|
157
242
|
```python
|
|
158
|
-
|
|
159
243
|
http_options = types.HttpOptions(
|
|
160
244
|
async_client_args={'cookies': ..., 'ssl': ...},
|
|
161
245
|
)
|
|
@@ -169,7 +253,6 @@ Both httpx and aiohttp libraries use `urllib.request.getproxies` from
|
|
|
169
253
|
environment variables. Before client initialization, you may set proxy (and
|
|
170
254
|
optional SSL_CERT_FILE) by setting the environment variables:
|
|
171
255
|
|
|
172
|
-
|
|
173
256
|
```bash
|
|
174
257
|
export HTTPS_PROXY='http://username:password@proxy_uri:port'
|
|
175
258
|
export SSL_CERT_FILE='client.pem'
|
|
@@ -180,15 +263,31 @@ args to `httpx.Client()`. You may install `httpx[socks]` to use it.
|
|
|
180
263
|
Then, you can pass it through the following way:
|
|
181
264
|
|
|
182
265
|
```python
|
|
183
|
-
|
|
184
266
|
http_options = types.HttpOptions(
|
|
185
267
|
client_args={'proxy': 'socks5://user:pass@host:port'},
|
|
186
|
-
async_client_args={'proxy': 'socks5://user:pass@host:port'}
|
|
268
|
+
async_client_args={'proxy': 'socks5://user:pass@host:port'},
|
|
187
269
|
)
|
|
188
270
|
|
|
189
271
|
client=Client(..., http_options=http_options)
|
|
190
272
|
```
|
|
191
273
|
|
|
274
|
+
### Custom base url
|
|
275
|
+
|
|
276
|
+
In some cases you might need a custom base url (for example, API gateway proxy
|
|
277
|
+
server) and bypass some authentication checks for project, location, or API key.
|
|
278
|
+
You may pass the custom base url like this:
|
|
279
|
+
|
|
280
|
+
```python
|
|
281
|
+
base_url = 'https://test-api-gateway-proxy.com'
|
|
282
|
+
client = Client(
|
|
283
|
+
vertexai=True, # Currently only vertexai=True is supported
|
|
284
|
+
http_options={
|
|
285
|
+
'base_url': base_url,
|
|
286
|
+
'headers': {'Authorization': 'Bearer test_token'},
|
|
287
|
+
},
|
|
288
|
+
)
|
|
289
|
+
```
|
|
290
|
+
|
|
192
291
|
## Types
|
|
193
292
|
|
|
194
293
|
Parameter types can be specified as either dictionaries(`TypedDict`) or
|
|
@@ -202,15 +301,37 @@ See the 'Create a client' section above to initialize a client.
|
|
|
202
301
|
|
|
203
302
|
### Generate Content
|
|
204
303
|
|
|
205
|
-
#### with text content
|
|
304
|
+
#### with text content input (text output)
|
|
206
305
|
|
|
207
306
|
```python
|
|
208
307
|
response = client.models.generate_content(
|
|
209
|
-
model='gemini-2.
|
|
308
|
+
model='gemini-2.5-flash', contents='Why is the sky blue?'
|
|
210
309
|
)
|
|
211
310
|
print(response.text)
|
|
212
311
|
```
|
|
213
312
|
|
|
313
|
+
#### with text content input (image output)
|
|
314
|
+
|
|
315
|
+
```python
|
|
316
|
+
from google.genai import types
|
|
317
|
+
|
|
318
|
+
response = client.models.generate_content(
|
|
319
|
+
model='gemini-2.5-flash-image',
|
|
320
|
+
contents='A cartoon infographic for flying sneakers',
|
|
321
|
+
config=types.GenerateContentConfig(
|
|
322
|
+
response_modalities=["IMAGE"],
|
|
323
|
+
image_config=types.ImageConfig(
|
|
324
|
+
aspect_ratio="9:16",
|
|
325
|
+
),
|
|
326
|
+
),
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
for part in response.parts:
|
|
330
|
+
if part.inline_data:
|
|
331
|
+
generated_image = part.as_image()
|
|
332
|
+
generated_image.show()
|
|
333
|
+
```
|
|
334
|
+
|
|
214
335
|
#### with uploaded file (Gemini Developer API only)
|
|
215
336
|
download the file in console.
|
|
216
337
|
|
|
@@ -223,7 +344,7 @@ python code.
|
|
|
223
344
|
```python
|
|
224
345
|
file = client.files.upload(file='a11.txt')
|
|
225
346
|
response = client.models.generate_content(
|
|
226
|
-
model='gemini-2.
|
|
347
|
+
model='gemini-2.5-flash',
|
|
227
348
|
contents=['Could you summarize this file?', file]
|
|
228
349
|
)
|
|
229
350
|
print(response.text)
|
|
@@ -243,8 +364,8 @@ This is the canonical way to provide contents, SDK will not do any conversion.
|
|
|
243
364
|
from google.genai import types
|
|
244
365
|
|
|
245
366
|
contents = types.Content(
|
|
246
|
-
|
|
247
|
-
|
|
367
|
+
role='user',
|
|
368
|
+
parts=[types.Part.from_text(text='Why is the sky blue?')]
|
|
248
369
|
)
|
|
249
370
|
```
|
|
250
371
|
|
|
@@ -252,10 +373,10 @@ SDK converts this to
|
|
|
252
373
|
|
|
253
374
|
```python
|
|
254
375
|
[
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
376
|
+
types.Content(
|
|
377
|
+
role='user',
|
|
378
|
+
parts=[types.Part.from_text(text='Why is the sky blue?')]
|
|
379
|
+
)
|
|
259
380
|
]
|
|
260
381
|
```
|
|
261
382
|
|
|
@@ -269,11 +390,11 @@ The SDK will assume this is a text part, and it converts this into the following
|
|
|
269
390
|
|
|
270
391
|
```python
|
|
271
392
|
[
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
393
|
+
types.UserContent(
|
|
394
|
+
parts=[
|
|
395
|
+
types.Part.from_text(text='Why is the sky blue?')
|
|
396
|
+
]
|
|
397
|
+
)
|
|
277
398
|
]
|
|
278
399
|
```
|
|
279
400
|
|
|
@@ -291,12 +412,12 @@ like the following:
|
|
|
291
412
|
|
|
292
413
|
```python
|
|
293
414
|
[
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
415
|
+
types.UserContent(
|
|
416
|
+
parts=[
|
|
417
|
+
types.Part.from_text(text='Why is the sky blue?'),
|
|
418
|
+
types.Part.from_text(text='Why is the cloud white?'),
|
|
419
|
+
]
|
|
420
|
+
)
|
|
300
421
|
]
|
|
301
422
|
```
|
|
302
423
|
|
|
@@ -309,8 +430,8 @@ Where a `types.UserContent` is a subclass of `types.Content`, the
|
|
|
309
430
|
from google.genai import types
|
|
310
431
|
|
|
311
432
|
contents = types.Part.from_function_call(
|
|
312
|
-
|
|
313
|
-
|
|
433
|
+
name='get_weather_by_location',
|
|
434
|
+
args={'location': 'Boston'}
|
|
314
435
|
)
|
|
315
436
|
```
|
|
316
437
|
|
|
@@ -318,14 +439,14 @@ The SDK converts a function call part to a content with a `model` role:
|
|
|
318
439
|
|
|
319
440
|
```python
|
|
320
441
|
[
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
442
|
+
types.ModelContent(
|
|
443
|
+
parts=[
|
|
444
|
+
types.Part.from_function_call(
|
|
445
|
+
name='get_weather_by_location',
|
|
446
|
+
args={'location': 'Boston'}
|
|
447
|
+
)
|
|
448
|
+
]
|
|
449
|
+
)
|
|
329
450
|
]
|
|
330
451
|
```
|
|
331
452
|
|
|
@@ -338,14 +459,14 @@ Where a `types.ModelContent` is a subclass of `types.Content`, the
|
|
|
338
459
|
from google.genai import types
|
|
339
460
|
|
|
340
461
|
contents = [
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
462
|
+
types.Part.from_function_call(
|
|
463
|
+
name='get_weather_by_location',
|
|
464
|
+
args={'location': 'Boston'}
|
|
465
|
+
),
|
|
466
|
+
types.Part.from_function_call(
|
|
467
|
+
name='get_weather_by_location',
|
|
468
|
+
args={'location': 'New York'}
|
|
469
|
+
),
|
|
349
470
|
]
|
|
350
471
|
```
|
|
351
472
|
|
|
@@ -353,18 +474,18 @@ The SDK converts a list of function call parts to the a content with a `model` r
|
|
|
353
474
|
|
|
354
475
|
```python
|
|
355
476
|
[
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
477
|
+
types.ModelContent(
|
|
478
|
+
parts=[
|
|
479
|
+
types.Part.from_function_call(
|
|
480
|
+
name='get_weather_by_location',
|
|
481
|
+
args={'location': 'Boston'}
|
|
482
|
+
),
|
|
483
|
+
types.Part.from_function_call(
|
|
484
|
+
name='get_weather_by_location',
|
|
485
|
+
args={'location': 'New York'}
|
|
486
|
+
)
|
|
487
|
+
]
|
|
488
|
+
)
|
|
368
489
|
]
|
|
369
490
|
```
|
|
370
491
|
|
|
@@ -377,8 +498,8 @@ Where a `types.ModelContent` is a subclass of `types.Content`, the
|
|
|
377
498
|
from google.genai import types
|
|
378
499
|
|
|
379
500
|
contents = types.Part.from_uri(
|
|
380
|
-
|
|
381
|
-
|
|
501
|
+
file_uri: 'gs://generativeai-downloads/images/scones.jpg',
|
|
502
|
+
mime_type: 'image/jpeg',
|
|
382
503
|
)
|
|
383
504
|
```
|
|
384
505
|
|
|
@@ -386,12 +507,12 @@ The SDK converts all non function call parts into a content with a `user` role.
|
|
|
386
507
|
|
|
387
508
|
```python
|
|
388
509
|
[
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
510
|
+
types.UserContent(parts=[
|
|
511
|
+
types.Part.from_uri(
|
|
512
|
+
file_uri: 'gs://generativeai-downloads/images/scones.jpg',
|
|
513
|
+
mime_type: 'image/jpeg',
|
|
514
|
+
)
|
|
515
|
+
])
|
|
395
516
|
]
|
|
396
517
|
```
|
|
397
518
|
|
|
@@ -401,11 +522,11 @@ The SDK converts all non function call parts into a content with a `user` role.
|
|
|
401
522
|
from google.genai import types
|
|
402
523
|
|
|
403
524
|
contents = [
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
525
|
+
types.Part.from_text('What is this image about?'),
|
|
526
|
+
types.Part.from_uri(
|
|
527
|
+
file_uri: 'gs://generativeai-downloads/images/scones.jpg',
|
|
528
|
+
mime_type: 'image/jpeg',
|
|
529
|
+
)
|
|
409
530
|
]
|
|
410
531
|
```
|
|
411
532
|
|
|
@@ -413,15 +534,15 @@ The SDK will convert the list of parts into a content with a `user` role
|
|
|
413
534
|
|
|
414
535
|
```python
|
|
415
536
|
[
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
537
|
+
types.UserContent(
|
|
538
|
+
parts=[
|
|
539
|
+
types.Part.from_text('What is this image about?'),
|
|
540
|
+
types.Part.from_uri(
|
|
541
|
+
file_uri: 'gs://generativeai-downloads/images/scones.jpg',
|
|
542
|
+
mime_type: 'image/jpeg',
|
|
543
|
+
)
|
|
544
|
+
]
|
|
545
|
+
)
|
|
425
546
|
]
|
|
426
547
|
```
|
|
427
548
|
|
|
@@ -527,7 +648,7 @@ print(async_pager[0])
|
|
|
527
648
|
from google.genai import types
|
|
528
649
|
|
|
529
650
|
response = client.models.generate_content(
|
|
530
|
-
model='gemini-2.
|
|
651
|
+
model='gemini-2.5-flash',
|
|
531
652
|
contents='Say something bad.',
|
|
532
653
|
config=types.GenerateContentConfig(
|
|
533
654
|
safety_settings=[
|
|
@@ -555,13 +676,13 @@ def get_current_weather(location: str) -> str:
|
|
|
555
676
|
"""Returns the current weather.
|
|
556
677
|
|
|
557
678
|
Args:
|
|
558
|
-
|
|
679
|
+
location: The city and state, e.g. San Francisco, CA
|
|
559
680
|
"""
|
|
560
681
|
return 'sunny'
|
|
561
682
|
|
|
562
683
|
|
|
563
684
|
response = client.models.generate_content(
|
|
564
|
-
model='gemini-2.
|
|
685
|
+
model='gemini-2.5-flash',
|
|
565
686
|
contents='What is the weather like in Boston?',
|
|
566
687
|
config=types.GenerateContentConfig(tools=[get_current_weather]),
|
|
567
688
|
)
|
|
@@ -577,14 +698,14 @@ as follows:
|
|
|
577
698
|
from google.genai import types
|
|
578
699
|
|
|
579
700
|
response = client.models.generate_content(
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
701
|
+
model='gemini-2.5-flash',
|
|
702
|
+
contents='What is the weather like in Boston?',
|
|
703
|
+
config=types.GenerateContentConfig(
|
|
704
|
+
tools=[get_current_weather],
|
|
705
|
+
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
706
|
+
disable=True
|
|
707
|
+
),
|
|
586
708
|
),
|
|
587
|
-
),
|
|
588
709
|
)
|
|
589
710
|
```
|
|
590
711
|
|
|
@@ -624,7 +745,7 @@ function = types.FunctionDeclaration(
|
|
|
624
745
|
tool = types.Tool(function_declarations=[function])
|
|
625
746
|
|
|
626
747
|
response = client.models.generate_content(
|
|
627
|
-
model='gemini-2.
|
|
748
|
+
model='gemini-2.5-flash',
|
|
628
749
|
contents='What is the weather like in Boston?',
|
|
629
750
|
config=types.GenerateContentConfig(tools=[tool]),
|
|
630
751
|
)
|
|
@@ -668,7 +789,7 @@ function_response_content = types.Content(
|
|
|
668
789
|
)
|
|
669
790
|
|
|
670
791
|
response = client.models.generate_content(
|
|
671
|
-
model='gemini-2.
|
|
792
|
+
model='gemini-2.5-flash',
|
|
672
793
|
contents=[
|
|
673
794
|
user_prompt_content,
|
|
674
795
|
function_call_content,
|
|
@@ -698,12 +819,12 @@ def get_current_weather(location: str) -> str:
|
|
|
698
819
|
"""Returns the current weather.
|
|
699
820
|
|
|
700
821
|
Args:
|
|
701
|
-
|
|
822
|
+
location: The city and state, e.g. San Francisco, CA
|
|
702
823
|
"""
|
|
703
824
|
return "sunny"
|
|
704
825
|
|
|
705
826
|
response = client.models.generate_content(
|
|
706
|
-
model="gemini-2.
|
|
827
|
+
model="gemini-2.5-flash",
|
|
707
828
|
contents="What is the weather like in Boston?",
|
|
708
829
|
config=types.GenerateContentConfig(
|
|
709
830
|
tools=[get_current_weather],
|
|
@@ -728,12 +849,12 @@ def get_current_weather(location: str) -> str:
|
|
|
728
849
|
"""Returns the current weather.
|
|
729
850
|
|
|
730
851
|
Args:
|
|
731
|
-
|
|
852
|
+
location: The city and state, e.g. San Francisco, CA
|
|
732
853
|
"""
|
|
733
854
|
return "sunny"
|
|
734
855
|
|
|
735
856
|
response = client.models.generate_content(
|
|
736
|
-
model="gemini-2.
|
|
857
|
+
model="gemini-2.5-flash",
|
|
737
858
|
contents="What is the weather like in Boston?",
|
|
738
859
|
config=types.GenerateContentConfig(
|
|
739
860
|
tools=[get_current_weather],
|
|
@@ -823,11 +944,11 @@ user_profile = {
|
|
|
823
944
|
}
|
|
824
945
|
|
|
825
946
|
response = client.models.generate_content(
|
|
826
|
-
model='gemini-2.
|
|
827
|
-
contents='Give me
|
|
947
|
+
model='gemini-2.5-flash',
|
|
948
|
+
contents='Give me a random user profile.',
|
|
828
949
|
config={
|
|
829
950
|
'response_mime_type': 'application/json',
|
|
830
|
-
'response_json_schema':
|
|
951
|
+
'response_json_schema': user_profile
|
|
831
952
|
},
|
|
832
953
|
)
|
|
833
954
|
print(response.parsed)
|
|
@@ -853,7 +974,7 @@ class CountryInfo(BaseModel):
|
|
|
853
974
|
|
|
854
975
|
|
|
855
976
|
response = client.models.generate_content(
|
|
856
|
-
model='gemini-2.
|
|
977
|
+
model='gemini-2.5-flash',
|
|
857
978
|
contents='Give me information for the United States.',
|
|
858
979
|
config=types.GenerateContentConfig(
|
|
859
980
|
response_mime_type='application/json',
|
|
@@ -867,7 +988,7 @@ print(response.text)
|
|
|
867
988
|
from google.genai import types
|
|
868
989
|
|
|
869
990
|
response = client.models.generate_content(
|
|
870
|
-
model='gemini-2.
|
|
991
|
+
model='gemini-2.5-flash',
|
|
871
992
|
contents='Give me information for the United States.',
|
|
872
993
|
config=types.GenerateContentConfig(
|
|
873
994
|
response_mime_type='application/json',
|
|
@@ -905,21 +1026,23 @@ You can set response_mime_type to 'text/x.enum' to return one of those enum
|
|
|
905
1026
|
values as the response.
|
|
906
1027
|
|
|
907
1028
|
```python
|
|
1029
|
+
from enum import Enum
|
|
1030
|
+
|
|
908
1031
|
class InstrumentEnum(Enum):
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
1032
|
+
PERCUSSION = 'Percussion'
|
|
1033
|
+
STRING = 'String'
|
|
1034
|
+
WOODWIND = 'Woodwind'
|
|
1035
|
+
BRASS = 'Brass'
|
|
1036
|
+
KEYBOARD = 'Keyboard'
|
|
914
1037
|
|
|
915
1038
|
response = client.models.generate_content(
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
1039
|
+
model='gemini-2.5-flash',
|
|
1040
|
+
contents='What instrument plays multiple notes at once?',
|
|
1041
|
+
config={
|
|
1042
|
+
'response_mime_type': 'text/x.enum',
|
|
1043
|
+
'response_schema': InstrumentEnum,
|
|
1044
|
+
},
|
|
1045
|
+
)
|
|
923
1046
|
print(response.text)
|
|
924
1047
|
```
|
|
925
1048
|
|
|
@@ -932,20 +1055,20 @@ identical but in quotes.
|
|
|
932
1055
|
from enum import Enum
|
|
933
1056
|
|
|
934
1057
|
class InstrumentEnum(Enum):
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
1058
|
+
PERCUSSION = 'Percussion'
|
|
1059
|
+
STRING = 'String'
|
|
1060
|
+
WOODWIND = 'Woodwind'
|
|
1061
|
+
BRASS = 'Brass'
|
|
1062
|
+
KEYBOARD = 'Keyboard'
|
|
940
1063
|
|
|
941
1064
|
response = client.models.generate_content(
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
1065
|
+
model='gemini-2.5-flash',
|
|
1066
|
+
contents='What instrument plays multiple notes at once?',
|
|
1067
|
+
config={
|
|
1068
|
+
'response_mime_type': 'application/json',
|
|
1069
|
+
'response_schema': InstrumentEnum,
|
|
1070
|
+
},
|
|
1071
|
+
)
|
|
949
1072
|
print(response.text)
|
|
950
1073
|
```
|
|
951
1074
|
|
|
@@ -958,7 +1081,7 @@ to you, rather than being returned as one chunk.
|
|
|
958
1081
|
|
|
959
1082
|
```python
|
|
960
1083
|
for chunk in client.models.generate_content_stream(
|
|
961
|
-
model='gemini-2.
|
|
1084
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
|
962
1085
|
):
|
|
963
1086
|
print(chunk.text, end='')
|
|
964
1087
|
```
|
|
@@ -972,7 +1095,7 @@ you can use the `from_uri` class method to create a `Part` object.
|
|
|
972
1095
|
from google.genai import types
|
|
973
1096
|
|
|
974
1097
|
for chunk in client.models.generate_content_stream(
|
|
975
|
-
model='gemini-2.
|
|
1098
|
+
model='gemini-2.5-flash',
|
|
976
1099
|
contents=[
|
|
977
1100
|
'What is this image about?',
|
|
978
1101
|
types.Part.from_uri(
|
|
@@ -996,7 +1119,7 @@ with open(YOUR_IMAGE_PATH, 'rb') as f:
|
|
|
996
1119
|
image_bytes = f.read()
|
|
997
1120
|
|
|
998
1121
|
for chunk in client.models.generate_content_stream(
|
|
999
|
-
model='gemini-2.
|
|
1122
|
+
model='gemini-2.5-flash',
|
|
1000
1123
|
contents=[
|
|
1001
1124
|
'What is this image about?',
|
|
1002
1125
|
types.Part.from_bytes(data=image_bytes, mime_type=YOUR_IMAGE_MIME_TYPE),
|
|
@@ -1015,7 +1138,7 @@ of `client.models.generate_content`
|
|
|
1015
1138
|
|
|
1016
1139
|
```python
|
|
1017
1140
|
response = await client.aio.models.generate_content(
|
|
1018
|
-
model='gemini-2.
|
|
1141
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
|
1019
1142
|
)
|
|
1020
1143
|
|
|
1021
1144
|
print(response.text)
|
|
@@ -1023,10 +1146,9 @@ print(response.text)
|
|
|
1023
1146
|
|
|
1024
1147
|
### Generate Content (Asynchronous Streaming)
|
|
1025
1148
|
|
|
1026
|
-
|
|
1027
1149
|
```python
|
|
1028
1150
|
async for chunk in await client.aio.models.generate_content_stream(
|
|
1029
|
-
model='gemini-2.
|
|
1151
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
|
1030
1152
|
):
|
|
1031
1153
|
print(chunk.text, end='')
|
|
1032
1154
|
```
|
|
@@ -1035,7 +1157,7 @@ async for chunk in await client.aio.models.generate_content_stream(
|
|
|
1035
1157
|
|
|
1036
1158
|
```python
|
|
1037
1159
|
response = client.models.count_tokens(
|
|
1038
|
-
model='gemini-2.
|
|
1160
|
+
model='gemini-2.5-flash',
|
|
1039
1161
|
contents='why is the sky blue?',
|
|
1040
1162
|
)
|
|
1041
1163
|
print(response)
|
|
@@ -1047,7 +1169,7 @@ Compute tokens is only supported in Vertex AI.
|
|
|
1047
1169
|
|
|
1048
1170
|
```python
|
|
1049
1171
|
response = client.models.compute_tokens(
|
|
1050
|
-
model='gemini-2.
|
|
1172
|
+
model='gemini-2.5-flash',
|
|
1051
1173
|
contents='why is the sky blue?',
|
|
1052
1174
|
)
|
|
1053
1175
|
print(response)
|
|
@@ -1057,17 +1179,31 @@ print(response)
|
|
|
1057
1179
|
|
|
1058
1180
|
```python
|
|
1059
1181
|
response = await client.aio.models.count_tokens(
|
|
1060
|
-
model='gemini-2.
|
|
1182
|
+
model='gemini-2.5-flash',
|
|
1061
1183
|
contents='why is the sky blue?',
|
|
1062
1184
|
)
|
|
1063
1185
|
print(response)
|
|
1064
1186
|
```
|
|
1065
1187
|
|
|
1188
|
+
#### Local Count Tokens
|
|
1189
|
+
|
|
1190
|
+
```python
|
|
1191
|
+
tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
|
|
1192
|
+
result = tokenizer.count_tokens("What is your name?")
|
|
1193
|
+
```
|
|
1194
|
+
|
|
1195
|
+
#### Local Compute Tokens
|
|
1196
|
+
|
|
1197
|
+
```python
|
|
1198
|
+
tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
|
|
1199
|
+
result = tokenizer.compute_tokens("What is your name?")
|
|
1200
|
+
```
|
|
1201
|
+
|
|
1066
1202
|
### Embed Content
|
|
1067
1203
|
|
|
1068
1204
|
```python
|
|
1069
1205
|
response = client.models.embed_content(
|
|
1070
|
-
model='
|
|
1206
|
+
model='gemini-embedding-001',
|
|
1071
1207
|
contents='why is the sky blue?',
|
|
1072
1208
|
)
|
|
1073
1209
|
print(response)
|
|
@@ -1078,7 +1214,7 @@ from google.genai import types
|
|
|
1078
1214
|
|
|
1079
1215
|
# multiple contents with config
|
|
1080
1216
|
response = client.models.embed_content(
|
|
1081
|
-
model='
|
|
1217
|
+
model='gemini-embedding-001',
|
|
1082
1218
|
contents=['why is the sky blue?', 'What is your age?'],
|
|
1083
1219
|
config=types.EmbedContentConfig(output_dimensionality=10),
|
|
1084
1220
|
)
|
|
@@ -1272,7 +1408,7 @@ that it can reflect on its previous responses (i.e., engage in an ongoing
|
|
|
1272
1408
|
### Send Message (Synchronous Non-Streaming)
|
|
1273
1409
|
|
|
1274
1410
|
```python
|
|
1275
|
-
chat = client.chats.create(model='gemini-2.
|
|
1411
|
+
chat = client.chats.create(model='gemini-2.5-flash')
|
|
1276
1412
|
response = chat.send_message('tell me a story')
|
|
1277
1413
|
print(response.text)
|
|
1278
1414
|
response = chat.send_message('summarize the story you told me in 1 sentence')
|
|
@@ -1282,7 +1418,7 @@ print(response.text)
|
|
|
1282
1418
|
### Send Message (Synchronous Streaming)
|
|
1283
1419
|
|
|
1284
1420
|
```python
|
|
1285
|
-
chat = client.chats.create(model='gemini-2.
|
|
1421
|
+
chat = client.chats.create(model='gemini-2.5-flash')
|
|
1286
1422
|
for chunk in chat.send_message_stream('tell me a story'):
|
|
1287
1423
|
print(chunk.text)
|
|
1288
1424
|
```
|
|
@@ -1290,7 +1426,7 @@ for chunk in chat.send_message_stream('tell me a story'):
|
|
|
1290
1426
|
### Send Message (Asynchronous Non-Streaming)
|
|
1291
1427
|
|
|
1292
1428
|
```python
|
|
1293
|
-
chat = client.aio.chats.create(model='gemini-2.
|
|
1429
|
+
chat = client.aio.chats.create(model='gemini-2.5-flash')
|
|
1294
1430
|
response = await chat.send_message('tell me a story')
|
|
1295
1431
|
print(response.text)
|
|
1296
1432
|
```
|
|
@@ -1298,7 +1434,7 @@ print(response.text)
|
|
|
1298
1434
|
### Send Message (Asynchronous Streaming)
|
|
1299
1435
|
|
|
1300
1436
|
```python
|
|
1301
|
-
chat = client.aio.chats.create(model='gemini-2.
|
|
1437
|
+
chat = client.aio.chats.create(model='gemini-2.5-flash')
|
|
1302
1438
|
async for chunk in await chat.send_message_stream('tell me a story'):
|
|
1303
1439
|
print(chunk.text)
|
|
1304
1440
|
```
|
|
@@ -1308,7 +1444,7 @@ async for chunk in await chat.send_message_stream('tell me a story'):
|
|
|
1308
1444
|
Files are only supported in Gemini Developer API. See the 'Create a client'
|
|
1309
1445
|
section above to initialize a client.
|
|
1310
1446
|
|
|
1311
|
-
```
|
|
1447
|
+
```sh
|
|
1312
1448
|
!gsutil cp gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf .
|
|
1313
1449
|
!gsutil cp gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf .
|
|
1314
1450
|
```
|
|
@@ -1357,7 +1493,7 @@ else:
|
|
|
1357
1493
|
file_uris = [file1.uri, file2.uri]
|
|
1358
1494
|
|
|
1359
1495
|
cached_content = client.caches.create(
|
|
1360
|
-
model='gemini-2.
|
|
1496
|
+
model='gemini-2.5-flash',
|
|
1361
1497
|
config=types.CreateCachedContentConfig(
|
|
1362
1498
|
contents=[
|
|
1363
1499
|
types.Content(
|
|
@@ -1392,7 +1528,7 @@ cached_content = client.caches.get(name=cached_content.name)
|
|
|
1392
1528
|
from google.genai import types
|
|
1393
1529
|
|
|
1394
1530
|
response = client.models.generate_content(
|
|
1395
|
-
model='gemini-2.
|
|
1531
|
+
model='gemini-2.5-flash',
|
|
1396
1532
|
contents='Summarize the pdfs',
|
|
1397
1533
|
config=types.GenerateContentConfig(
|
|
1398
1534
|
cached_content=cached_content.name,
|
|
@@ -1409,14 +1545,14 @@ section above to initialize a client.
|
|
|
1409
1545
|
|
|
1410
1546
|
### Tune
|
|
1411
1547
|
|
|
1412
|
-
- Vertex AI supports tuning from GCS source or from a Vertex Multimodal Dataset
|
|
1548
|
+
- Vertex AI supports tuning from GCS source or from a [Vertex AI Multimodal Dataset](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/multimodal/datasets)
|
|
1413
1549
|
|
|
1414
1550
|
```python
|
|
1415
1551
|
from google.genai import types
|
|
1416
1552
|
|
|
1417
|
-
model = 'gemini-2.
|
|
1553
|
+
model = 'gemini-2.5-flash'
|
|
1418
1554
|
training_dataset = types.TuningDataset(
|
|
1419
|
-
|
|
1555
|
+
# or gcs_uri=my_vertex_multimodal_dataset
|
|
1420
1556
|
gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
|
|
1421
1557
|
)
|
|
1422
1558
|
```
|
|
@@ -1568,11 +1704,11 @@ Vertex AI:
|
|
|
1568
1704
|
```python
|
|
1569
1705
|
# Specify model and source file only, destination and job display name will be auto-populated
|
|
1570
1706
|
job = client.batches.create(
|
|
1571
|
-
model='gemini-2.
|
|
1707
|
+
model='gemini-2.5-flash',
|
|
1572
1708
|
src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
|
|
1573
1709
|
)
|
|
1574
1710
|
|
|
1575
|
-
job
|
|
1711
|
+
print(job)
|
|
1576
1712
|
```
|
|
1577
1713
|
|
|
1578
1714
|
Gemini Developer API:
|
|
@@ -1580,22 +1716,22 @@ Gemini Developer API:
|
|
|
1580
1716
|
```python
|
|
1581
1717
|
# Create a batch job with inlined requests
|
|
1582
1718
|
batch_job = client.batches.create(
|
|
1583
|
-
model="gemini-2.
|
|
1719
|
+
model="gemini-2.5-flash",
|
|
1584
1720
|
src=[{
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
|
|
1721
|
+
"contents": [{
|
|
1722
|
+
"parts": [{
|
|
1723
|
+
"text": "Hello!",
|
|
1724
|
+
}],
|
|
1725
|
+
"role": "user",
|
|
1588
1726
|
}],
|
|
1589
|
-
|
|
1590
|
-
}],
|
|
1591
|
-
"config:": {"response_modalities": ["text"]},
|
|
1727
|
+
"config": {"response_modalities": ["text"]},
|
|
1592
1728
|
}],
|
|
1593
1729
|
)
|
|
1594
1730
|
|
|
1595
1731
|
job
|
|
1596
1732
|
```
|
|
1597
1733
|
|
|
1598
|
-
In order to create a batch job with file name. Need to upload a
|
|
1734
|
+
In order to create a batch job with file name. Need to upload a json file.
|
|
1599
1735
|
For example myrequests.json:
|
|
1600
1736
|
|
|
1601
1737
|
```
|
|
@@ -1608,14 +1744,14 @@ Then upload the file.
|
|
|
1608
1744
|
```python
|
|
1609
1745
|
# Upload the file
|
|
1610
1746
|
file = client.files.upload(
|
|
1611
|
-
file='
|
|
1612
|
-
config=types.UploadFileConfig(display_name='
|
|
1747
|
+
file='myrequests.json',
|
|
1748
|
+
config=types.UploadFileConfig(display_name='test-json')
|
|
1613
1749
|
)
|
|
1614
1750
|
|
|
1615
1751
|
# Create a batch job with file name
|
|
1616
1752
|
batch_job = client.batches.create(
|
|
1617
1753
|
model="gemini-2.0-flash",
|
|
1618
|
-
src="files/
|
|
1754
|
+
src="files/test-json",
|
|
1619
1755
|
)
|
|
1620
1756
|
```
|
|
1621
1757
|
|
|
@@ -1696,13 +1832,13 @@ To handle errors raised by the model service, the SDK provides this [APIError](h
|
|
|
1696
1832
|
from google.genai import errors
|
|
1697
1833
|
|
|
1698
1834
|
try:
|
|
1699
|
-
|
|
1700
|
-
|
|
1701
|
-
|
|
1702
|
-
|
|
1835
|
+
client.models.generate_content(
|
|
1836
|
+
model="invalid-model-name",
|
|
1837
|
+
contents="What is your name?",
|
|
1838
|
+
)
|
|
1703
1839
|
except errors.APIError as e:
|
|
1704
|
-
|
|
1705
|
-
|
|
1840
|
+
print(e.code) # 404
|
|
1841
|
+
print(e.message)
|
|
1706
1842
|
```
|
|
1707
1843
|
|
|
1708
1844
|
## Extra Request Body
|