google-genai 1.7.0__py3-none-any.whl → 1.53.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/__init__.py +4 -2
- google/genai/_adapters.py +55 -0
- google/genai/_api_client.py +1301 -299
- google/genai/_api_module.py +1 -1
- google/genai/_automatic_function_calling_util.py +54 -33
- google/genai/_base_transformers.py +26 -0
- google/genai/_base_url.py +50 -0
- google/genai/_common.py +560 -59
- google/genai/_extra_utils.py +371 -38
- google/genai/_live_converters.py +1467 -0
- google/genai/_local_tokenizer_loader.py +214 -0
- google/genai/_mcp_utils.py +117 -0
- google/genai/_operations_converters.py +394 -0
- google/genai/_replay_api_client.py +204 -92
- google/genai/_test_api_client.py +1 -1
- google/genai/_tokens_converters.py +520 -0
- google/genai/_transformers.py +633 -233
- google/genai/batches.py +1733 -538
- google/genai/caches.py +678 -1012
- google/genai/chats.py +48 -38
- google/genai/client.py +142 -15
- google/genai/documents.py +532 -0
- google/genai/errors.py +141 -35
- google/genai/file_search_stores.py +1296 -0
- google/genai/files.py +312 -744
- google/genai/live.py +617 -367
- google/genai/live_music.py +197 -0
- google/genai/local_tokenizer.py +395 -0
- google/genai/models.py +3598 -3116
- google/genai/operations.py +201 -362
- google/genai/pagers.py +23 -7
- google/genai/py.typed +1 -0
- google/genai/tokens.py +362 -0
- google/genai/tunings.py +1274 -496
- google/genai/types.py +14535 -5454
- google/genai/version.py +2 -2
- {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info}/METADATA +736 -234
- google_genai-1.53.0.dist-info/RECORD +41 -0
- {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info}/WHEEL +1 -1
- google_genai-1.7.0.dist-info/RECORD +0 -27
- {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info/licenses}/LICENSE +0 -0
- {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info}/top_level.txt +0 -0
|
@@ -1,43 +1,65 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: google-genai
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.53.0
|
|
4
4
|
Summary: GenAI Python SDK
|
|
5
5
|
Author-email: Google LLC <googleapis-packages@google.com>
|
|
6
|
-
License: Apache-2.0
|
|
6
|
+
License-Expression: Apache-2.0
|
|
7
7
|
Project-URL: Homepage, https://github.com/googleapis/python-genai
|
|
8
8
|
Classifier: Intended Audience :: Developers
|
|
9
|
-
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
9
|
Classifier: Operating System :: OS Independent
|
|
11
10
|
Classifier: Programming Language :: Python
|
|
12
11
|
Classifier: Programming Language :: Python :: 3
|
|
13
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
14
12
|
Classifier: Programming Language :: Python :: 3.10
|
|
15
13
|
Classifier: Programming Language :: Python :: 3.11
|
|
16
14
|
Classifier: Programming Language :: Python :: 3.12
|
|
17
15
|
Classifier: Programming Language :: Python :: 3.13
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
18
17
|
Classifier: Topic :: Internet
|
|
19
18
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
20
|
-
Requires-Python: >=3.
|
|
19
|
+
Requires-Python: >=3.10
|
|
21
20
|
Description-Content-Type: text/markdown
|
|
22
21
|
License-File: LICENSE
|
|
23
22
|
Requires-Dist: anyio<5.0.0,>=4.8.0
|
|
24
|
-
Requires-Dist: google-auth<3.0.0,>=2.14.1
|
|
23
|
+
Requires-Dist: google-auth[requests]<3.0.0,>=2.14.1
|
|
25
24
|
Requires-Dist: httpx<1.0.0,>=0.28.1
|
|
26
|
-
Requires-Dist: pydantic<3.0.0,>=2.
|
|
25
|
+
Requires-Dist: pydantic<3.0.0,>=2.9.0
|
|
27
26
|
Requires-Dist: requests<3.0.0,>=2.28.1
|
|
27
|
+
Requires-Dist: tenacity<9.2.0,>=8.2.3
|
|
28
28
|
Requires-Dist: websockets<15.1.0,>=13.0.0
|
|
29
29
|
Requires-Dist: typing-extensions<5.0.0,>=4.11.0
|
|
30
|
+
Provides-Extra: aiohttp
|
|
31
|
+
Requires-Dist: aiohttp<3.13.3; extra == "aiohttp"
|
|
32
|
+
Provides-Extra: local-tokenizer
|
|
33
|
+
Requires-Dist: sentencepiece>=0.2.0; extra == "local-tokenizer"
|
|
34
|
+
Requires-Dist: protobuf; extra == "local-tokenizer"
|
|
35
|
+
Dynamic: license-file
|
|
30
36
|
|
|
31
37
|
# Google Gen AI SDK
|
|
32
38
|
|
|
33
39
|
[](https://pypi.org/project/google-genai/)
|
|
40
|
+

|
|
41
|
+
[](https://pypistats.org/packages/google-genai)
|
|
34
42
|
|
|
35
43
|
--------
|
|
36
44
|
**Documentation:** https://googleapis.github.io/python-genai/
|
|
37
45
|
|
|
38
46
|
-----
|
|
39
47
|
|
|
40
|
-
Google Gen AI Python SDK provides an interface for developers to integrate
|
|
48
|
+
Google Gen AI Python SDK provides an interface for developers to integrate
|
|
49
|
+
Google's generative models into their Python applications. It supports the
|
|
50
|
+
[Gemini Developer API](https://ai.google.dev/gemini-api/docs) and
|
|
51
|
+
[Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview)
|
|
52
|
+
APIs.
|
|
53
|
+
|
|
54
|
+
## Code Generation
|
|
55
|
+
|
|
56
|
+
Generative models are often unaware of recent API and SDK updates and may suggest outdated or legacy code.
|
|
57
|
+
|
|
58
|
+
We recommend using our Code Generation instructions [codegen_instructions.md](https://raw.githubusercontent.com/googleapis/python-genai/refs/heads/main/codegen_instructions.md) when generating Google Gen AI SDK code to guide your model towards using the more recent SDK features.
|
|
59
|
+
|
|
60
|
+
Copy and paste the instructions from [this file](https://raw.githubusercontent.com/googleapis/python-genai/refs/heads/main/codegen_instructions.md)
|
|
61
|
+
into your development environment to provide the model with the necessary
|
|
62
|
+
context
|
|
41
63
|
|
|
42
64
|
## Installation
|
|
43
65
|
|
|
@@ -45,6 +67,12 @@ Google Gen AI Python SDK provides an interface for developers to integrate Googl
|
|
|
45
67
|
pip install google-genai
|
|
46
68
|
```
|
|
47
69
|
|
|
70
|
+
<small>With `uv`:</small>
|
|
71
|
+
|
|
72
|
+
```sh
|
|
73
|
+
uv pip install google-genai
|
|
74
|
+
```
|
|
75
|
+
|
|
48
76
|
## Imports
|
|
49
77
|
|
|
50
78
|
```python
|
|
@@ -58,31 +86,76 @@ Please run one of the following code blocks to create a client for
|
|
|
58
86
|
different services ([Gemini Developer API](https://ai.google.dev/gemini-api/docs) or [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview)).
|
|
59
87
|
|
|
60
88
|
```python
|
|
89
|
+
from google import genai
|
|
90
|
+
|
|
61
91
|
# Only run this block for Gemini Developer API
|
|
62
92
|
client = genai.Client(api_key='GEMINI_API_KEY')
|
|
63
93
|
```
|
|
64
94
|
|
|
65
95
|
```python
|
|
96
|
+
from google import genai
|
|
97
|
+
|
|
66
98
|
# Only run this block for Vertex AI API
|
|
67
99
|
client = genai.Client(
|
|
68
100
|
vertexai=True, project='your-project-id', location='us-central1'
|
|
69
101
|
)
|
|
70
102
|
```
|
|
71
103
|
|
|
104
|
+
## Using types
|
|
105
|
+
|
|
106
|
+
All API methods support Pydantic types and dictionaries, which you can access
|
|
107
|
+
from `google.genai.types`. You can import the types module with the following:
|
|
108
|
+
|
|
109
|
+
```python
|
|
110
|
+
from google.genai import types
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
Below is an example `generate_content()` call using types from the types module:
|
|
114
|
+
|
|
115
|
+
```python
|
|
116
|
+
response = client.models.generate_content(
|
|
117
|
+
model='gemini-2.0-flash-001',
|
|
118
|
+
contents=types.Part.from_text(text='Why is the sky blue?'),
|
|
119
|
+
config=types.GenerateContentConfig(
|
|
120
|
+
temperature=0,
|
|
121
|
+
top_p=0.95,
|
|
122
|
+
top_k=20,
|
|
123
|
+
),
|
|
124
|
+
)
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
Alternatively, you can accomplish the same request using dictionaries instead of
|
|
128
|
+
types:
|
|
129
|
+
|
|
130
|
+
```python
|
|
131
|
+
response = client.models.generate_content(
|
|
132
|
+
model='gemini-2.0-flash-001',
|
|
133
|
+
contents={'text': 'Why is the sky blue?'},
|
|
134
|
+
config={
|
|
135
|
+
'temperature': 0,
|
|
136
|
+
'top_p': 0.95,
|
|
137
|
+
'top_k': 20,
|
|
138
|
+
},
|
|
139
|
+
)
|
|
140
|
+
```
|
|
141
|
+
|
|
72
142
|
**(Optional) Using environment variables:**
|
|
73
143
|
|
|
74
144
|
You can create a client by configuring the necessary environment variables.
|
|
75
145
|
Configuration setup instructions depends on whether you're using the Gemini
|
|
76
146
|
Developer API or the Gemini API in Vertex AI.
|
|
77
147
|
|
|
78
|
-
**Gemini Developer API:** Set `
|
|
148
|
+
**Gemini Developer API:** Set the `GEMINI_API_KEY` or `GOOGLE_API_KEY`.
|
|
149
|
+
It will automatically be picked up by the client. It's recommended that you
|
|
150
|
+
set only one of those variables, but if both are set, `GOOGLE_API_KEY` takes
|
|
151
|
+
precedence.
|
|
79
152
|
|
|
80
153
|
```bash
|
|
81
|
-
export
|
|
154
|
+
export GEMINI_API_KEY='your-api-key'
|
|
82
155
|
```
|
|
83
156
|
|
|
84
|
-
**Gemini API on Vertex AI:** Set `GOOGLE_GENAI_USE_VERTEXAI`,
|
|
85
|
-
and `GOOGLE_CLOUD_LOCATION`, as shown below:
|
|
157
|
+
**Gemini API on Vertex AI:** Set `GOOGLE_GENAI_USE_VERTEXAI`,
|
|
158
|
+
`GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION`, as shown below:
|
|
86
159
|
|
|
87
160
|
```bash
|
|
88
161
|
export GOOGLE_GENAI_USE_VERTEXAI=true
|
|
@@ -91,9 +164,88 @@ export GOOGLE_CLOUD_LOCATION='us-central1'
|
|
|
91
164
|
```
|
|
92
165
|
|
|
93
166
|
```python
|
|
167
|
+
from google import genai
|
|
168
|
+
|
|
94
169
|
client = genai.Client()
|
|
95
170
|
```
|
|
96
171
|
|
|
172
|
+
## Close a client
|
|
173
|
+
|
|
174
|
+
Explicitly close the sync client to ensure that resources, such as the
|
|
175
|
+
underlying HTTP connections, are properly cleaned up and closed.
|
|
176
|
+
|
|
177
|
+
```python
|
|
178
|
+
from google.genai import Client
|
|
179
|
+
|
|
180
|
+
client = Client()
|
|
181
|
+
response_1 = client.models.generate_content(
|
|
182
|
+
model=MODEL_ID,
|
|
183
|
+
contents='Hello',
|
|
184
|
+
)
|
|
185
|
+
response_2 = client.models.generate_content(
|
|
186
|
+
model=MODEL_ID,
|
|
187
|
+
contents='Ask a question',
|
|
188
|
+
)
|
|
189
|
+
# Close the sync client to release resources.
|
|
190
|
+
client.close()
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
To explicitly close the async client:
|
|
194
|
+
|
|
195
|
+
```python
|
|
196
|
+
from google.genai import Client
|
|
197
|
+
|
|
198
|
+
aclient = Client(
|
|
199
|
+
vertexai=True, project='my-project-id', location='us-central1'
|
|
200
|
+
).aio
|
|
201
|
+
response_1 = await aclient.models.generate_content(
|
|
202
|
+
model=MODEL_ID,
|
|
203
|
+
contents='Hello',
|
|
204
|
+
)
|
|
205
|
+
response_2 = await aclient.models.generate_content(
|
|
206
|
+
model=MODEL_ID,
|
|
207
|
+
contents='Ask a question',
|
|
208
|
+
)
|
|
209
|
+
# Close the async client to release resources.
|
|
210
|
+
await aclient.aclose()
|
|
211
|
+
```
|
|
212
|
+
|
|
213
|
+
## Client context managers
|
|
214
|
+
|
|
215
|
+
By using the sync client context manager, it will close the underlying
|
|
216
|
+
sync client when exiting the with block.
|
|
217
|
+
|
|
218
|
+
```python
|
|
219
|
+
from google.genai import Client
|
|
220
|
+
|
|
221
|
+
with Client() as client:
|
|
222
|
+
response_1 = client.models.generate_content(
|
|
223
|
+
model=MODEL_ID,
|
|
224
|
+
contents='Hello',
|
|
225
|
+
)
|
|
226
|
+
response_2 = client.models.generate_content(
|
|
227
|
+
model=MODEL_ID,
|
|
228
|
+
contents='Ask a question',
|
|
229
|
+
)
|
|
230
|
+
```
|
|
231
|
+
|
|
232
|
+
By using the async client context manager, it will close the underlying
|
|
233
|
+
async client when exiting the with block.
|
|
234
|
+
|
|
235
|
+
```python
|
|
236
|
+
from google.genai import Client
|
|
237
|
+
|
|
238
|
+
async with Client().aio as aclient:
|
|
239
|
+
response_1 = await aclient.models.generate_content(
|
|
240
|
+
model=MODEL_ID,
|
|
241
|
+
contents='Hello',
|
|
242
|
+
)
|
|
243
|
+
response_2 = await aclient.models.generate_content(
|
|
244
|
+
model=MODEL_ID,
|
|
245
|
+
contents='Ask a question',
|
|
246
|
+
)
|
|
247
|
+
```
|
|
248
|
+
|
|
97
249
|
### API Selection
|
|
98
250
|
|
|
99
251
|
By default, the SDK uses the beta API endpoints provided by Google to support
|
|
@@ -104,6 +256,9 @@ To set the API version use `http_options`. For example, to set the API version
|
|
|
104
256
|
to `v1` for Vertex AI:
|
|
105
257
|
|
|
106
258
|
```python
|
|
259
|
+
from google import genai
|
|
260
|
+
from google.genai import types
|
|
261
|
+
|
|
107
262
|
client = genai.Client(
|
|
108
263
|
vertexai=True,
|
|
109
264
|
project='your-project-id',
|
|
@@ -115,12 +270,72 @@ client = genai.Client(
|
|
|
115
270
|
To set the API version to `v1alpha` for the Gemini Developer API:
|
|
116
271
|
|
|
117
272
|
```python
|
|
273
|
+
from google import genai
|
|
274
|
+
from google.genai import types
|
|
275
|
+
|
|
118
276
|
client = genai.Client(
|
|
119
277
|
api_key='GEMINI_API_KEY',
|
|
120
278
|
http_options=types.HttpOptions(api_version='v1alpha')
|
|
121
279
|
)
|
|
122
280
|
```
|
|
123
281
|
|
|
282
|
+
### Faster async client option: Aiohttp
|
|
283
|
+
|
|
284
|
+
By default we use httpx for both sync and async client implementations. In order
|
|
285
|
+
to have faster performance, you may install `google-genai[aiohttp]`. In Gen AI
|
|
286
|
+
SDK we configure `trust_env=True` to match with the default behavior of httpx.
|
|
287
|
+
Additional args of `aiohttp.ClientSession.request()` ([see _RequestOptions args](https://github.com/aio-libs/aiohttp/blob/v3.12.13/aiohttp/client.py#L170)) can be passed
|
|
288
|
+
through the following way:
|
|
289
|
+
|
|
290
|
+
```python
|
|
291
|
+
http_options = types.HttpOptions(
|
|
292
|
+
async_client_args={'cookies': ..., 'ssl': ...},
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
client=Client(..., http_options=http_options)
|
|
296
|
+
```
|
|
297
|
+
|
|
298
|
+
### Proxy
|
|
299
|
+
|
|
300
|
+
Both httpx and aiohttp libraries use `urllib.request.getproxies` from
|
|
301
|
+
environment variables. Before client initialization, you may set proxy (and
|
|
302
|
+
optional SSL_CERT_FILE) by setting the environment variables:
|
|
303
|
+
|
|
304
|
+
```bash
|
|
305
|
+
export HTTPS_PROXY='http://username:password@proxy_uri:port'
|
|
306
|
+
export SSL_CERT_FILE='client.pem'
|
|
307
|
+
```
|
|
308
|
+
|
|
309
|
+
If you need `socks5` proxy, httpx [supports](https://www.python-httpx.org/advanced/proxies/#socks) `socks5` proxy if you pass it via
|
|
310
|
+
args to `httpx.Client()`. You may install `httpx[socks]` to use it.
|
|
311
|
+
Then, you can pass it through the following way:
|
|
312
|
+
|
|
313
|
+
```python
|
|
314
|
+
http_options = types.HttpOptions(
|
|
315
|
+
client_args={'proxy': 'socks5://user:pass@host:port'},
|
|
316
|
+
async_client_args={'proxy': 'socks5://user:pass@host:port'},
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
client=Client(..., http_options=http_options)
|
|
320
|
+
```
|
|
321
|
+
|
|
322
|
+
### Custom base url
|
|
323
|
+
|
|
324
|
+
In some cases you might need a custom base url (for example, API gateway proxy
|
|
325
|
+
server) and bypass some authentication checks for project, location, or API key.
|
|
326
|
+
You may pass the custom base url like this:
|
|
327
|
+
|
|
328
|
+
```python
|
|
329
|
+
base_url = 'https://test-api-gateway-proxy.com'
|
|
330
|
+
client = Client(
|
|
331
|
+
vertexai=True, # Currently only vertexai=True is supported
|
|
332
|
+
http_options={
|
|
333
|
+
'base_url': base_url,
|
|
334
|
+
'headers': {'Authorization': 'Bearer test_token'},
|
|
335
|
+
},
|
|
336
|
+
)
|
|
337
|
+
```
|
|
338
|
+
|
|
124
339
|
## Types
|
|
125
340
|
|
|
126
341
|
Parameter types can be specified as either dictionaries(`TypedDict`) or
|
|
@@ -129,19 +344,42 @@ Pydantic model types are available in the `types` module.
|
|
|
129
344
|
|
|
130
345
|
## Models
|
|
131
346
|
|
|
132
|
-
The `client.models`
|
|
347
|
+
The `client.models` module exposes model inferencing and model getters.
|
|
348
|
+
See the 'Create a client' section above to initialize a client.
|
|
133
349
|
|
|
134
350
|
### Generate Content
|
|
135
351
|
|
|
136
|
-
#### with text content
|
|
352
|
+
#### with text content input (text output)
|
|
137
353
|
|
|
138
354
|
```python
|
|
139
355
|
response = client.models.generate_content(
|
|
140
|
-
model='gemini-2.
|
|
356
|
+
model='gemini-2.5-flash', contents='Why is the sky blue?'
|
|
141
357
|
)
|
|
142
358
|
print(response.text)
|
|
143
359
|
```
|
|
144
360
|
|
|
361
|
+
#### with text content input (image output)
|
|
362
|
+
|
|
363
|
+
```python
|
|
364
|
+
from google.genai import types
|
|
365
|
+
|
|
366
|
+
response = client.models.generate_content(
|
|
367
|
+
model='gemini-2.5-flash-image',
|
|
368
|
+
contents='A cartoon infographic for flying sneakers',
|
|
369
|
+
config=types.GenerateContentConfig(
|
|
370
|
+
response_modalities=["IMAGE"],
|
|
371
|
+
image_config=types.ImageConfig(
|
|
372
|
+
aspect_ratio="9:16",
|
|
373
|
+
),
|
|
374
|
+
),
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
for part in response.parts:
|
|
378
|
+
if part.inline_data:
|
|
379
|
+
generated_image = part.as_image()
|
|
380
|
+
generated_image.show()
|
|
381
|
+
```
|
|
382
|
+
|
|
145
383
|
#### with uploaded file (Gemini Developer API only)
|
|
146
384
|
download the file in console.
|
|
147
385
|
|
|
@@ -154,7 +392,7 @@ python code.
|
|
|
154
392
|
```python
|
|
155
393
|
file = client.files.upload(file='a11.txt')
|
|
156
394
|
response = client.models.generate_content(
|
|
157
|
-
model='gemini-2.
|
|
395
|
+
model='gemini-2.5-flash',
|
|
158
396
|
contents=['Could you summarize this file?', file]
|
|
159
397
|
)
|
|
160
398
|
print(response.text)
|
|
@@ -171,9 +409,11 @@ This is the canonical way to provide contents, SDK will not do any conversion.
|
|
|
171
409
|
##### Provide a `types.Content` instance
|
|
172
410
|
|
|
173
411
|
```python
|
|
412
|
+
from google.genai import types
|
|
413
|
+
|
|
174
414
|
contents = types.Content(
|
|
175
|
-
|
|
176
|
-
|
|
415
|
+
role='user',
|
|
416
|
+
parts=[types.Part.from_text(text='Why is the sky blue?')]
|
|
177
417
|
)
|
|
178
418
|
```
|
|
179
419
|
|
|
@@ -181,10 +421,10 @@ SDK converts this to
|
|
|
181
421
|
|
|
182
422
|
```python
|
|
183
423
|
[
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
424
|
+
types.Content(
|
|
425
|
+
role='user',
|
|
426
|
+
parts=[types.Part.from_text(text='Why is the sky blue?')]
|
|
427
|
+
)
|
|
188
428
|
]
|
|
189
429
|
```
|
|
190
430
|
|
|
@@ -198,11 +438,11 @@ The SDK will assume this is a text part, and it converts this into the following
|
|
|
198
438
|
|
|
199
439
|
```python
|
|
200
440
|
[
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
441
|
+
types.UserContent(
|
|
442
|
+
parts=[
|
|
443
|
+
types.Part.from_text(text='Why is the sky blue?')
|
|
444
|
+
]
|
|
445
|
+
)
|
|
206
446
|
]
|
|
207
447
|
```
|
|
208
448
|
|
|
@@ -220,12 +460,12 @@ like the following:
|
|
|
220
460
|
|
|
221
461
|
```python
|
|
222
462
|
[
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
463
|
+
types.UserContent(
|
|
464
|
+
parts=[
|
|
465
|
+
types.Part.from_text(text='Why is the sky blue?'),
|
|
466
|
+
types.Part.from_text(text='Why is the cloud white?'),
|
|
467
|
+
]
|
|
468
|
+
)
|
|
229
469
|
]
|
|
230
470
|
```
|
|
231
471
|
|
|
@@ -235,9 +475,11 @@ Where a `types.UserContent` is a subclass of `types.Content`, the
|
|
|
235
475
|
##### Provide a function call part
|
|
236
476
|
|
|
237
477
|
```python
|
|
478
|
+
from google.genai import types
|
|
479
|
+
|
|
238
480
|
contents = types.Part.from_function_call(
|
|
239
|
-
|
|
240
|
-
|
|
481
|
+
name='get_weather_by_location',
|
|
482
|
+
args={'location': 'Boston'}
|
|
241
483
|
)
|
|
242
484
|
```
|
|
243
485
|
|
|
@@ -245,14 +487,14 @@ The SDK converts a function call part to a content with a `model` role:
|
|
|
245
487
|
|
|
246
488
|
```python
|
|
247
489
|
[
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
490
|
+
types.ModelContent(
|
|
491
|
+
parts=[
|
|
492
|
+
types.Part.from_function_call(
|
|
493
|
+
name='get_weather_by_location',
|
|
494
|
+
args={'location': 'Boston'}
|
|
495
|
+
)
|
|
496
|
+
]
|
|
497
|
+
)
|
|
256
498
|
]
|
|
257
499
|
```
|
|
258
500
|
|
|
@@ -262,15 +504,17 @@ Where a `types.ModelContent` is a subclass of `types.Content`, the
|
|
|
262
504
|
##### Provide a list of function call parts
|
|
263
505
|
|
|
264
506
|
```python
|
|
507
|
+
from google.genai import types
|
|
508
|
+
|
|
265
509
|
contents = [
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
510
|
+
types.Part.from_function_call(
|
|
511
|
+
name='get_weather_by_location',
|
|
512
|
+
args={'location': 'Boston'}
|
|
513
|
+
),
|
|
514
|
+
types.Part.from_function_call(
|
|
515
|
+
name='get_weather_by_location',
|
|
516
|
+
args={'location': 'New York'}
|
|
517
|
+
),
|
|
274
518
|
]
|
|
275
519
|
```
|
|
276
520
|
|
|
@@ -278,18 +522,18 @@ The SDK converts a list of function call parts to the a content with a `model` r
|
|
|
278
522
|
|
|
279
523
|
```python
|
|
280
524
|
[
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
525
|
+
types.ModelContent(
|
|
526
|
+
parts=[
|
|
527
|
+
types.Part.from_function_call(
|
|
528
|
+
name='get_weather_by_location',
|
|
529
|
+
args={'location': 'Boston'}
|
|
530
|
+
),
|
|
531
|
+
types.Part.from_function_call(
|
|
532
|
+
name='get_weather_by_location',
|
|
533
|
+
args={'location': 'New York'}
|
|
534
|
+
)
|
|
535
|
+
]
|
|
536
|
+
)
|
|
293
537
|
]
|
|
294
538
|
```
|
|
295
539
|
|
|
@@ -299,9 +543,11 @@ Where a `types.ModelContent` is a subclass of `types.Content`, the
|
|
|
299
543
|
##### Provide a non function call part
|
|
300
544
|
|
|
301
545
|
```python
|
|
546
|
+
from google.genai import types
|
|
547
|
+
|
|
302
548
|
contents = types.Part.from_uri(
|
|
303
|
-
|
|
304
|
-
|
|
549
|
+
file_uri: 'gs://generativeai-downloads/images/scones.jpg',
|
|
550
|
+
mime_type: 'image/jpeg',
|
|
305
551
|
)
|
|
306
552
|
```
|
|
307
553
|
|
|
@@ -309,24 +555,26 @@ The SDK converts all non function call parts into a content with a `user` role.
|
|
|
309
555
|
|
|
310
556
|
```python
|
|
311
557
|
[
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
558
|
+
types.UserContent(parts=[
|
|
559
|
+
types.Part.from_uri(
|
|
560
|
+
file_uri: 'gs://generativeai-downloads/images/scones.jpg',
|
|
561
|
+
mime_type: 'image/jpeg',
|
|
562
|
+
)
|
|
563
|
+
])
|
|
318
564
|
]
|
|
319
565
|
```
|
|
320
566
|
|
|
321
567
|
##### Provide a list of non function call parts
|
|
322
568
|
|
|
323
569
|
```python
|
|
570
|
+
from google.genai import types
|
|
571
|
+
|
|
324
572
|
contents = [
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
573
|
+
types.Part.from_text('What is this image about?'),
|
|
574
|
+
types.Part.from_uri(
|
|
575
|
+
file_uri: 'gs://generativeai-downloads/images/scones.jpg',
|
|
576
|
+
mime_type: 'image/jpeg',
|
|
577
|
+
)
|
|
330
578
|
]
|
|
331
579
|
```
|
|
332
580
|
|
|
@@ -334,15 +582,15 @@ The SDK will convert the list of parts into a content with a `user` role
|
|
|
334
582
|
|
|
335
583
|
```python
|
|
336
584
|
[
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
585
|
+
types.UserContent(
|
|
586
|
+
parts=[
|
|
587
|
+
types.Part.from_text('What is this image about?'),
|
|
588
|
+
types.Part.from_uri(
|
|
589
|
+
file_uri: 'gs://generativeai-downloads/images/scones.jpg',
|
|
590
|
+
mime_type: 'image/jpeg',
|
|
591
|
+
)
|
|
592
|
+
]
|
|
593
|
+
)
|
|
346
594
|
]
|
|
347
595
|
```
|
|
348
596
|
|
|
@@ -360,11 +608,17 @@ If you put a list within a list, the inner list can only contain
|
|
|
360
608
|
### System Instructions and Other Configs
|
|
361
609
|
|
|
362
610
|
The output of the model can be influenced by several optional settings
|
|
363
|
-
available in generate_content's config parameter. For example,
|
|
364
|
-
|
|
365
|
-
|
|
611
|
+
available in generate_content's config parameter. For example, increasing
|
|
612
|
+
`max_output_tokens` is essential for longer model responses. To make a model more
|
|
613
|
+
deterministic, lowering the `temperature` parameter reduces randomness, with
|
|
614
|
+
values near 0 minimizing variability. Capabilities and parameter defaults for
|
|
615
|
+
each model is shown in the
|
|
616
|
+
[Vertex AI docs](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/2-5-flash)
|
|
617
|
+
and [Gemini API docs](https://ai.google.dev/gemini-api/docs/models) respectively.
|
|
366
618
|
|
|
367
619
|
```python
|
|
620
|
+
from google.genai import types
|
|
621
|
+
|
|
368
622
|
response = client.models.generate_content(
|
|
369
623
|
model='gemini-2.0-flash-001',
|
|
370
624
|
contents='high',
|
|
@@ -377,31 +631,6 @@ response = client.models.generate_content(
|
|
|
377
631
|
print(response.text)
|
|
378
632
|
```
|
|
379
633
|
|
|
380
|
-
### Typed Config
|
|
381
|
-
|
|
382
|
-
All API methods support Pydantic types for parameters as well as
|
|
383
|
-
dictionaries. You can get the type from `google.genai.types`.
|
|
384
|
-
|
|
385
|
-
```python
|
|
386
|
-
response = client.models.generate_content(
|
|
387
|
-
model='gemini-2.0-flash-001',
|
|
388
|
-
contents=types.Part.from_text(text='Why is the sky blue?'),
|
|
389
|
-
config=types.GenerateContentConfig(
|
|
390
|
-
temperature=0,
|
|
391
|
-
top_p=0.95,
|
|
392
|
-
top_k=20,
|
|
393
|
-
candidate_count=1,
|
|
394
|
-
seed=5,
|
|
395
|
-
max_output_tokens=100,
|
|
396
|
-
stop_sequences=['STOP!'],
|
|
397
|
-
presence_penalty=0.0,
|
|
398
|
-
frequency_penalty=0.0,
|
|
399
|
-
),
|
|
400
|
-
)
|
|
401
|
-
|
|
402
|
-
print(response.text)
|
|
403
|
-
```
|
|
404
|
-
|
|
405
634
|
### List Base Models
|
|
406
635
|
|
|
407
636
|
To retrieve tuned models, see [list tuned models](#list-tuned-models).
|
|
@@ -419,7 +648,7 @@ pager.next_page()
|
|
|
419
648
|
print(pager[0])
|
|
420
649
|
```
|
|
421
650
|
|
|
422
|
-
####
|
|
651
|
+
#### List Base Models (Asynchronous)
|
|
423
652
|
|
|
424
653
|
```python
|
|
425
654
|
async for job in await client.aio.models.list():
|
|
@@ -437,8 +666,10 @@ print(async_pager[0])
|
|
|
437
666
|
### Safety Settings
|
|
438
667
|
|
|
439
668
|
```python
|
|
669
|
+
from google.genai import types
|
|
670
|
+
|
|
440
671
|
response = client.models.generate_content(
|
|
441
|
-
model='gemini-2.
|
|
672
|
+
model='gemini-2.5-flash',
|
|
442
673
|
contents='Say something bad.',
|
|
443
674
|
config=types.GenerateContentConfig(
|
|
444
675
|
safety_settings=[
|
|
@@ -460,17 +691,19 @@ You can pass a Python function directly and it will be automatically
|
|
|
460
691
|
called and responded by default.
|
|
461
692
|
|
|
462
693
|
```python
|
|
694
|
+
from google.genai import types
|
|
695
|
+
|
|
463
696
|
def get_current_weather(location: str) -> str:
|
|
464
697
|
"""Returns the current weather.
|
|
465
698
|
|
|
466
699
|
Args:
|
|
467
|
-
|
|
700
|
+
location: The city and state, e.g. San Francisco, CA
|
|
468
701
|
"""
|
|
469
702
|
return 'sunny'
|
|
470
703
|
|
|
471
704
|
|
|
472
705
|
response = client.models.generate_content(
|
|
473
|
-
model='gemini-2.
|
|
706
|
+
model='gemini-2.5-flash',
|
|
474
707
|
contents='What is the weather like in Boston?',
|
|
475
708
|
config=types.GenerateContentConfig(tools=[get_current_weather]),
|
|
476
709
|
)
|
|
@@ -483,15 +716,17 @@ automatic function calling, you can disable automatic function calling
|
|
|
483
716
|
as follows:
|
|
484
717
|
|
|
485
718
|
```python
|
|
719
|
+
from google.genai import types
|
|
720
|
+
|
|
486
721
|
response = client.models.generate_content(
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
722
|
+
model='gemini-2.5-flash',
|
|
723
|
+
contents='What is the weather like in Boston?',
|
|
724
|
+
config=types.GenerateContentConfig(
|
|
725
|
+
tools=[get_current_weather],
|
|
726
|
+
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
727
|
+
disable=True
|
|
728
|
+
),
|
|
493
729
|
),
|
|
494
|
-
),
|
|
495
730
|
)
|
|
496
731
|
```
|
|
497
732
|
|
|
@@ -511,25 +746,27 @@ The following example shows how to declare a function and pass it as a tool.
|
|
|
511
746
|
Then you will receive a function call part in the response.
|
|
512
747
|
|
|
513
748
|
```python
|
|
749
|
+
from google.genai import types
|
|
750
|
+
|
|
514
751
|
function = types.FunctionDeclaration(
|
|
515
752
|
name='get_current_weather',
|
|
516
753
|
description='Get the current weather in a given location',
|
|
517
|
-
|
|
518
|
-
type
|
|
519
|
-
properties
|
|
520
|
-
'location':
|
|
521
|
-
type
|
|
522
|
-
description
|
|
523
|
-
|
|
754
|
+
parameters_json_schema={
|
|
755
|
+
'type': 'object',
|
|
756
|
+
'properties': {
|
|
757
|
+
'location': {
|
|
758
|
+
'type': 'string',
|
|
759
|
+
'description': 'The city and state, e.g. San Francisco, CA',
|
|
760
|
+
}
|
|
524
761
|
},
|
|
525
|
-
required
|
|
526
|
-
|
|
762
|
+
'required': ['location'],
|
|
763
|
+
},
|
|
527
764
|
)
|
|
528
765
|
|
|
529
766
|
tool = types.Tool(function_declarations=[function])
|
|
530
767
|
|
|
531
768
|
response = client.models.generate_content(
|
|
532
|
-
model='gemini-2.
|
|
769
|
+
model='gemini-2.5-flash',
|
|
533
770
|
contents='What is the weather like in Boston?',
|
|
534
771
|
config=types.GenerateContentConfig(tools=[tool]),
|
|
535
772
|
)
|
|
@@ -543,6 +780,8 @@ the model.
|
|
|
543
780
|
The following example shows how to do it for a simple function invocation.
|
|
544
781
|
|
|
545
782
|
```python
|
|
783
|
+
from google.genai import types
|
|
784
|
+
|
|
546
785
|
user_prompt_content = types.Content(
|
|
547
786
|
role='user',
|
|
548
787
|
parts=[types.Part.from_text(text='What is the weather like in Boston?')],
|
|
@@ -571,7 +810,7 @@ function_response_content = types.Content(
|
|
|
571
810
|
)
|
|
572
811
|
|
|
573
812
|
response = client.models.generate_content(
|
|
574
|
-
model='gemini-2.
|
|
813
|
+
model='gemini-2.5-flash',
|
|
575
814
|
contents=[
|
|
576
815
|
user_prompt_content,
|
|
577
816
|
function_call_content,
|
|
@@ -595,16 +834,18 @@ maximum remote call for automatic function calling (default to 10 times).
|
|
|
595
834
|
If you'd like to disable automatic function calling in `ANY` mode:
|
|
596
835
|
|
|
597
836
|
```python
|
|
837
|
+
from google.genai import types
|
|
838
|
+
|
|
598
839
|
def get_current_weather(location: str) -> str:
|
|
599
840
|
"""Returns the current weather.
|
|
600
841
|
|
|
601
842
|
Args:
|
|
602
|
-
|
|
843
|
+
location: The city and state, e.g. San Francisco, CA
|
|
603
844
|
"""
|
|
604
845
|
return "sunny"
|
|
605
846
|
|
|
606
847
|
response = client.models.generate_content(
|
|
607
|
-
model="gemini-2.
|
|
848
|
+
model="gemini-2.5-flash",
|
|
608
849
|
contents="What is the weather like in Boston?",
|
|
609
850
|
config=types.GenerateContentConfig(
|
|
610
851
|
tools=[get_current_weather],
|
|
@@ -623,16 +864,18 @@ configure the maximum remote calls to be `x + 1`.
|
|
|
623
864
|
Assuming you prefer `1` turn for automatic function calling.
|
|
624
865
|
|
|
625
866
|
```python
|
|
867
|
+
from google.genai import types
|
|
868
|
+
|
|
626
869
|
def get_current_weather(location: str) -> str:
|
|
627
870
|
"""Returns the current weather.
|
|
628
871
|
|
|
629
872
|
Args:
|
|
630
|
-
|
|
873
|
+
location: The city and state, e.g. San Francisco, CA
|
|
631
874
|
"""
|
|
632
875
|
return "sunny"
|
|
633
876
|
|
|
634
877
|
response = client.models.generate_content(
|
|
635
|
-
model="gemini-2.
|
|
878
|
+
model="gemini-2.5-flash",
|
|
636
879
|
contents="What is the weather like in Boston?",
|
|
637
880
|
config=types.GenerateContentConfig(
|
|
638
881
|
tools=[get_current_weather],
|
|
@@ -645,18 +888,100 @@ response = client.models.generate_content(
|
|
|
645
888
|
),
|
|
646
889
|
)
|
|
647
890
|
```
|
|
891
|
+
|
|
892
|
+
#### Model Context Protocol (MCP) support (experimental)
|
|
893
|
+
|
|
894
|
+
Built-in [MCP](https://modelcontextprotocol.io/introduction) support is an
|
|
895
|
+
experimental feature. You can pass a local MCP server as a tool directly.
|
|
896
|
+
|
|
897
|
+
```python
|
|
898
|
+
import os
|
|
899
|
+
import asyncio
|
|
900
|
+
from datetime import datetime
|
|
901
|
+
from mcp import ClientSession, StdioServerParameters
|
|
902
|
+
from mcp.client.stdio import stdio_client
|
|
903
|
+
from google import genai
|
|
904
|
+
|
|
905
|
+
client = genai.Client()
|
|
906
|
+
|
|
907
|
+
# Create server parameters for stdio connection
|
|
908
|
+
server_params = StdioServerParameters(
|
|
909
|
+
command="npx", # Executable
|
|
910
|
+
args=["-y", "@philschmid/weather-mcp"], # MCP Server
|
|
911
|
+
env=None, # Optional environment variables
|
|
912
|
+
)
|
|
913
|
+
|
|
914
|
+
async def run():
|
|
915
|
+
async with stdio_client(server_params) as (read, write):
|
|
916
|
+
async with ClientSession(read, write) as session:
|
|
917
|
+
# Prompt to get the weather for the current day in London.
|
|
918
|
+
prompt = f"What is the weather in London in {datetime.now().strftime('%Y-%m-%d')}?"
|
|
919
|
+
|
|
920
|
+
# Initialize the connection between client and server
|
|
921
|
+
await session.initialize()
|
|
922
|
+
|
|
923
|
+
# Send request to the model with MCP function declarations
|
|
924
|
+
response = await client.aio.models.generate_content(
|
|
925
|
+
model="gemini-2.5-flash",
|
|
926
|
+
contents=prompt,
|
|
927
|
+
config=genai.types.GenerateContentConfig(
|
|
928
|
+
temperature=0,
|
|
929
|
+
tools=[session], # uses the session, will automatically call the tool using automatic function calling
|
|
930
|
+
),
|
|
931
|
+
)
|
|
932
|
+
print(response.text)
|
|
933
|
+
|
|
934
|
+
# Start the asyncio event loop and run the main function
|
|
935
|
+
asyncio.run(run())
|
|
936
|
+
```
|
|
937
|
+
|
|
648
938
|
### JSON Response Schema
|
|
649
939
|
|
|
650
940
|
However you define your schema, don't duplicate it in your input prompt,
|
|
651
941
|
including by giving examples of expected JSON output. If you do, the generated
|
|
652
942
|
output might be lower in quality.
|
|
653
943
|
|
|
944
|
+
#### JSON Schema support
|
|
945
|
+
Schemas can be provided as standard JSON schema.
|
|
946
|
+
```python
|
|
947
|
+
user_profile = {
|
|
948
|
+
'properties': {
|
|
949
|
+
'age': {
|
|
950
|
+
'anyOf': [
|
|
951
|
+
{'maximum': 20, 'minimum': 0, 'type': 'integer'},
|
|
952
|
+
{'type': 'null'},
|
|
953
|
+
],
|
|
954
|
+
'title': 'Age',
|
|
955
|
+
},
|
|
956
|
+
'username': {
|
|
957
|
+
'description': "User's unique name",
|
|
958
|
+
'title': 'Username',
|
|
959
|
+
'type': 'string',
|
|
960
|
+
},
|
|
961
|
+
},
|
|
962
|
+
'required': ['username', 'age'],
|
|
963
|
+
'title': 'User Schema',
|
|
964
|
+
'type': 'object',
|
|
965
|
+
}
|
|
966
|
+
|
|
967
|
+
response = client.models.generate_content(
|
|
968
|
+
model='gemini-2.5-flash',
|
|
969
|
+
contents='Give me a random user profile.',
|
|
970
|
+
config={
|
|
971
|
+
'response_mime_type': 'application/json',
|
|
972
|
+
'response_json_schema': user_profile
|
|
973
|
+
},
|
|
974
|
+
)
|
|
975
|
+
print(response.parsed)
|
|
976
|
+
```
|
|
977
|
+
|
|
654
978
|
#### Pydantic Model Schema support
|
|
655
979
|
|
|
656
980
|
Schemas can be provided as Pydantic Models.
|
|
657
981
|
|
|
658
982
|
```python
|
|
659
983
|
from pydantic import BaseModel
|
|
984
|
+
from google.genai import types
|
|
660
985
|
|
|
661
986
|
|
|
662
987
|
class CountryInfo(BaseModel):
|
|
@@ -670,7 +995,7 @@ class CountryInfo(BaseModel):
|
|
|
670
995
|
|
|
671
996
|
|
|
672
997
|
response = client.models.generate_content(
|
|
673
|
-
model='gemini-2.
|
|
998
|
+
model='gemini-2.5-flash',
|
|
674
999
|
contents='Give me information for the United States.',
|
|
675
1000
|
config=types.GenerateContentConfig(
|
|
676
1001
|
response_mime_type='application/json',
|
|
@@ -681,8 +1006,10 @@ print(response.text)
|
|
|
681
1006
|
```
|
|
682
1007
|
|
|
683
1008
|
```python
|
|
1009
|
+
from google.genai import types
|
|
1010
|
+
|
|
684
1011
|
response = client.models.generate_content(
|
|
685
|
-
model='gemini-2.
|
|
1012
|
+
model='gemini-2.5-flash',
|
|
686
1013
|
contents='Give me information for the United States.',
|
|
687
1014
|
config=types.GenerateContentConfig(
|
|
688
1015
|
response_mime_type='application/json',
|
|
@@ -720,56 +1047,62 @@ You can set response_mime_type to 'text/x.enum' to return one of those enum
|
|
|
720
1047
|
values as the response.
|
|
721
1048
|
|
|
722
1049
|
```python
|
|
1050
|
+
from enum import Enum
|
|
1051
|
+
|
|
723
1052
|
class InstrumentEnum(Enum):
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
1053
|
+
PERCUSSION = 'Percussion'
|
|
1054
|
+
STRING = 'String'
|
|
1055
|
+
WOODWIND = 'Woodwind'
|
|
1056
|
+
BRASS = 'Brass'
|
|
1057
|
+
KEYBOARD = 'Keyboard'
|
|
729
1058
|
|
|
730
1059
|
response = client.models.generate_content(
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
1060
|
+
model='gemini-2.5-flash',
|
|
1061
|
+
contents='What instrument plays multiple notes at once?',
|
|
1062
|
+
config={
|
|
1063
|
+
'response_mime_type': 'text/x.enum',
|
|
1064
|
+
'response_schema': InstrumentEnum,
|
|
1065
|
+
},
|
|
1066
|
+
)
|
|
738
1067
|
print(response.text)
|
|
739
1068
|
```
|
|
740
1069
|
|
|
741
1070
|
#### JSON Response
|
|
742
1071
|
|
|
743
|
-
You can also set response_mime_type to 'application/json', the response will be
|
|
1072
|
+
You can also set response_mime_type to 'application/json', the response will be
|
|
1073
|
+
identical but in quotes.
|
|
744
1074
|
|
|
745
1075
|
```python
|
|
746
1076
|
from enum import Enum
|
|
747
1077
|
|
|
748
1078
|
class InstrumentEnum(Enum):
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
1079
|
+
PERCUSSION = 'Percussion'
|
|
1080
|
+
STRING = 'String'
|
|
1081
|
+
WOODWIND = 'Woodwind'
|
|
1082
|
+
BRASS = 'Brass'
|
|
1083
|
+
KEYBOARD = 'Keyboard'
|
|
754
1084
|
|
|
755
1085
|
response = client.models.generate_content(
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
1086
|
+
model='gemini-2.5-flash',
|
|
1087
|
+
contents='What instrument plays multiple notes at once?',
|
|
1088
|
+
config={
|
|
1089
|
+
'response_mime_type': 'application/json',
|
|
1090
|
+
'response_schema': InstrumentEnum,
|
|
1091
|
+
},
|
|
1092
|
+
)
|
|
763
1093
|
print(response.text)
|
|
764
1094
|
```
|
|
765
1095
|
|
|
766
|
-
### Streaming
|
|
1096
|
+
### Generate Content (Synchronous Streaming)
|
|
1097
|
+
|
|
1098
|
+
Generate content in a streaming format so that the model outputs streams back
|
|
1099
|
+
to you, rather than being returned as one chunk.
|
|
767
1100
|
|
|
768
1101
|
#### Streaming for text content
|
|
769
1102
|
|
|
770
1103
|
```python
|
|
771
1104
|
for chunk in client.models.generate_content_stream(
|
|
772
|
-
model='gemini-2.
|
|
1105
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
|
773
1106
|
):
|
|
774
1107
|
print(chunk.text, end='')
|
|
775
1108
|
```
|
|
@@ -780,8 +1113,10 @@ If your image is stored in [Google Cloud Storage](https://cloud.google.com/stora
|
|
|
780
1113
|
you can use the `from_uri` class method to create a `Part` object.
|
|
781
1114
|
|
|
782
1115
|
```python
|
|
1116
|
+
from google.genai import types
|
|
1117
|
+
|
|
783
1118
|
for chunk in client.models.generate_content_stream(
|
|
784
|
-
model='gemini-2.
|
|
1119
|
+
model='gemini-2.5-flash',
|
|
785
1120
|
contents=[
|
|
786
1121
|
'What is this image about?',
|
|
787
1122
|
types.Part.from_uri(
|
|
@@ -797,13 +1132,15 @@ If your image is stored in your local file system, you can read it in as bytes
|
|
|
797
1132
|
data and use the `from_bytes` class method to create a `Part` object.
|
|
798
1133
|
|
|
799
1134
|
```python
|
|
1135
|
+
from google.genai import types
|
|
1136
|
+
|
|
800
1137
|
YOUR_IMAGE_PATH = 'your_image_path'
|
|
801
1138
|
YOUR_IMAGE_MIME_TYPE = 'your_image_mime_type'
|
|
802
1139
|
with open(YOUR_IMAGE_PATH, 'rb') as f:
|
|
803
1140
|
image_bytes = f.read()
|
|
804
1141
|
|
|
805
1142
|
for chunk in client.models.generate_content_stream(
|
|
806
|
-
model='gemini-2.
|
|
1143
|
+
model='gemini-2.5-flash',
|
|
807
1144
|
contents=[
|
|
808
1145
|
'What is this image about?',
|
|
809
1146
|
types.Part.from_bytes(data=image_bytes, mime_type=YOUR_IMAGE_MIME_TYPE),
|
|
@@ -812,27 +1149,27 @@ for chunk in client.models.generate_content_stream(
|
|
|
812
1149
|
print(chunk.text, end='')
|
|
813
1150
|
```
|
|
814
1151
|
|
|
815
|
-
###
|
|
1152
|
+
### Generate Content (Asynchronous Non Streaming)
|
|
816
1153
|
|
|
817
1154
|
`client.aio` exposes all the analogous [`async` methods](https://docs.python.org/3/library/asyncio.html)
|
|
818
|
-
that are available on `client
|
|
1155
|
+
that are available on `client`. Note that it applies to all the modules.
|
|
819
1156
|
|
|
820
1157
|
For example, `client.aio.models.generate_content` is the `async` version
|
|
821
1158
|
of `client.models.generate_content`
|
|
822
1159
|
|
|
823
1160
|
```python
|
|
824
1161
|
response = await client.aio.models.generate_content(
|
|
825
|
-
model='gemini-2.
|
|
1162
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
|
826
1163
|
)
|
|
827
1164
|
|
|
828
1165
|
print(response.text)
|
|
829
1166
|
```
|
|
830
1167
|
|
|
831
|
-
### Streaming
|
|
1168
|
+
### Generate Content (Asynchronous Streaming)
|
|
832
1169
|
|
|
833
1170
|
```python
|
|
834
1171
|
async for chunk in await client.aio.models.generate_content_stream(
|
|
835
|
-
model='gemini-2.
|
|
1172
|
+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
|
|
836
1173
|
):
|
|
837
1174
|
print(chunk.text, end='')
|
|
838
1175
|
```
|
|
@@ -841,7 +1178,7 @@ async for chunk in await client.aio.models.generate_content_stream(
|
|
|
841
1178
|
|
|
842
1179
|
```python
|
|
843
1180
|
response = client.models.count_tokens(
|
|
844
|
-
model='gemini-2.
|
|
1181
|
+
model='gemini-2.5-flash',
|
|
845
1182
|
contents='why is the sky blue?',
|
|
846
1183
|
)
|
|
847
1184
|
print(response)
|
|
@@ -853,7 +1190,7 @@ Compute tokens is only supported in Vertex AI.
|
|
|
853
1190
|
|
|
854
1191
|
```python
|
|
855
1192
|
response = client.models.compute_tokens(
|
|
856
|
-
model='gemini-2.
|
|
1193
|
+
model='gemini-2.5-flash',
|
|
857
1194
|
contents='why is the sky blue?',
|
|
858
1195
|
)
|
|
859
1196
|
print(response)
|
|
@@ -863,26 +1200,42 @@ print(response)
|
|
|
863
1200
|
|
|
864
1201
|
```python
|
|
865
1202
|
response = await client.aio.models.count_tokens(
|
|
866
|
-
model='gemini-2.
|
|
1203
|
+
model='gemini-2.5-flash',
|
|
867
1204
|
contents='why is the sky blue?',
|
|
868
1205
|
)
|
|
869
1206
|
print(response)
|
|
870
1207
|
```
|
|
871
1208
|
|
|
1209
|
+
#### Local Count Tokens
|
|
1210
|
+
|
|
1211
|
+
```python
|
|
1212
|
+
tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
|
|
1213
|
+
result = tokenizer.count_tokens("What is your name?")
|
|
1214
|
+
```
|
|
1215
|
+
|
|
1216
|
+
#### Local Compute Tokens
|
|
1217
|
+
|
|
1218
|
+
```python
|
|
1219
|
+
tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
|
|
1220
|
+
result = tokenizer.compute_tokens("What is your name?")
|
|
1221
|
+
```
|
|
1222
|
+
|
|
872
1223
|
### Embed Content
|
|
873
1224
|
|
|
874
1225
|
```python
|
|
875
1226
|
response = client.models.embed_content(
|
|
876
|
-
model='
|
|
1227
|
+
model='gemini-embedding-001',
|
|
877
1228
|
contents='why is the sky blue?',
|
|
878
1229
|
)
|
|
879
1230
|
print(response)
|
|
880
1231
|
```
|
|
881
1232
|
|
|
882
1233
|
```python
|
|
1234
|
+
from google.genai import types
|
|
1235
|
+
|
|
883
1236
|
# multiple contents with config
|
|
884
1237
|
response = client.models.embed_content(
|
|
885
|
-
model='
|
|
1238
|
+
model='gemini-embedding-001',
|
|
886
1239
|
contents=['why is the sky blue?', 'What is your age?'],
|
|
887
1240
|
config=types.EmbedContentConfig(output_dimensionality=10),
|
|
888
1241
|
)
|
|
@@ -897,9 +1250,11 @@ print(response)
|
|
|
897
1250
|
Support for generate images in Gemini Developer API is behind an allowlist
|
|
898
1251
|
|
|
899
1252
|
```python
|
|
1253
|
+
from google.genai import types
|
|
1254
|
+
|
|
900
1255
|
# Generate Image
|
|
901
1256
|
response1 = client.models.generate_images(
|
|
902
|
-
model='imagen-
|
|
1257
|
+
model='imagen-4.0-generate-001',
|
|
903
1258
|
prompt='An umbrella in the foreground, and a rainy night sky in the background',
|
|
904
1259
|
config=types.GenerateImagesConfig(
|
|
905
1260
|
number_of_images=1,
|
|
@@ -915,9 +1270,11 @@ response1.generated_images[0].image.show()
|
|
|
915
1270
|
Upscale image is only supported in Vertex AI.
|
|
916
1271
|
|
|
917
1272
|
```python
|
|
1273
|
+
from google.genai import types
|
|
1274
|
+
|
|
918
1275
|
# Upscale the generated image from above
|
|
919
1276
|
response2 = client.models.upscale_image(
|
|
920
|
-
model='imagen-
|
|
1277
|
+
model='imagen-4.0-upscale-preview',
|
|
921
1278
|
image=response1.generated_images[0].image,
|
|
922
1279
|
upscale_factor='x2',
|
|
923
1280
|
config=types.UpscaleImageConfig(
|
|
@@ -936,6 +1293,7 @@ Edit image is only supported in Vertex AI.
|
|
|
936
1293
|
|
|
937
1294
|
```python
|
|
938
1295
|
# Edit the generated image from above
|
|
1296
|
+
from google.genai import types
|
|
939
1297
|
from google.genai.types import RawReferenceImage, MaskReferenceImage
|
|
940
1298
|
|
|
941
1299
|
raw_ref_image = RawReferenceImage(
|
|
@@ -968,18 +1326,19 @@ response3.generated_images[0].image.show()
|
|
|
968
1326
|
|
|
969
1327
|
### Veo
|
|
970
1328
|
|
|
971
|
-
|
|
1329
|
+
Support for generating videos is considered public preview
|
|
972
1330
|
|
|
973
|
-
|
|
1331
|
+
#### Generate Videos (Text to Video)
|
|
974
1332
|
|
|
975
1333
|
```python
|
|
1334
|
+
from google.genai import types
|
|
1335
|
+
|
|
976
1336
|
# Create operation
|
|
977
1337
|
operation = client.models.generate_videos(
|
|
978
|
-
model='veo-
|
|
1338
|
+
model='veo-3.1-generate-preview',
|
|
979
1339
|
prompt='A neon hologram of a cat driving at top speed',
|
|
980
1340
|
config=types.GenerateVideosConfig(
|
|
981
1341
|
number_of_videos=1,
|
|
982
|
-
fps=24,
|
|
983
1342
|
duration_seconds=5,
|
|
984
1343
|
enhance_prompt=True,
|
|
985
1344
|
),
|
|
@@ -990,51 +1349,124 @@ while not operation.done:
|
|
|
990
1349
|
time.sleep(20)
|
|
991
1350
|
operation = client.operations.get(operation)
|
|
992
1351
|
|
|
993
|
-
video = operation.
|
|
1352
|
+
video = operation.response.generated_videos[0].video
|
|
1353
|
+
video.show()
|
|
1354
|
+
```
|
|
1355
|
+
|
|
1356
|
+
#### Generate Videos (Image to Video)
|
|
1357
|
+
|
|
1358
|
+
```python
|
|
1359
|
+
from google.genai import types
|
|
1360
|
+
|
|
1361
|
+
# Read local image (uses mimetypes.guess_type to infer mime type)
|
|
1362
|
+
image = types.Image.from_file("local/path/file.png")
|
|
1363
|
+
|
|
1364
|
+
# Create operation
|
|
1365
|
+
operation = client.models.generate_videos(
|
|
1366
|
+
model='veo-3.1-generate-preview',
|
|
1367
|
+
# Prompt is optional if image is provided
|
|
1368
|
+
prompt='Night sky',
|
|
1369
|
+
image=image,
|
|
1370
|
+
config=types.GenerateVideosConfig(
|
|
1371
|
+
number_of_videos=1,
|
|
1372
|
+
duration_seconds=5,
|
|
1373
|
+
enhance_prompt=True,
|
|
1374
|
+
# Can also pass an Image into last_frame for frame interpolation
|
|
1375
|
+
),
|
|
1376
|
+
)
|
|
1377
|
+
|
|
1378
|
+
# Poll operation
|
|
1379
|
+
while not operation.done:
|
|
1380
|
+
time.sleep(20)
|
|
1381
|
+
operation = client.operations.get(operation)
|
|
1382
|
+
|
|
1383
|
+
video = operation.response.generated_videos[0].video
|
|
1384
|
+
video.show()
|
|
1385
|
+
```
|
|
1386
|
+
|
|
1387
|
+
#### Generate Videos (Video to Video)
|
|
1388
|
+
|
|
1389
|
+
Currently, only Gemini Developer API supports video extension on Veo 3.1 for
|
|
1390
|
+
previously generated videos. Vertex supports video extension on Veo 2.0.
|
|
1391
|
+
|
|
1392
|
+
```python
|
|
1393
|
+
from google.genai import types
|
|
1394
|
+
|
|
1395
|
+
# Read local video (uses mimetypes.guess_type to infer mime type)
|
|
1396
|
+
video = types.Video.from_file("local/path/video.mp4")
|
|
1397
|
+
|
|
1398
|
+
# Create operation
|
|
1399
|
+
operation = client.models.generate_videos(
|
|
1400
|
+
model='veo-3.1-generate-preview',
|
|
1401
|
+
# Prompt is optional if Video is provided
|
|
1402
|
+
prompt='Night sky',
|
|
1403
|
+
# Input video must be in GCS for Vertex or a URI for Gemini
|
|
1404
|
+
video=types.Video(
|
|
1405
|
+
uri="gs://bucket-name/inputs/videos/cat_driving.mp4",
|
|
1406
|
+
),
|
|
1407
|
+
config=types.GenerateVideosConfig(
|
|
1408
|
+
number_of_videos=1,
|
|
1409
|
+
duration_seconds=5,
|
|
1410
|
+
enhance_prompt=True,
|
|
1411
|
+
),
|
|
1412
|
+
)
|
|
1413
|
+
|
|
1414
|
+
# Poll operation
|
|
1415
|
+
while not operation.done:
|
|
1416
|
+
time.sleep(20)
|
|
1417
|
+
operation = client.operations.get(operation)
|
|
1418
|
+
|
|
1419
|
+
video = operation.response.generated_videos[0].video
|
|
994
1420
|
video.show()
|
|
995
1421
|
```
|
|
996
1422
|
|
|
997
1423
|
## Chats
|
|
998
1424
|
|
|
999
|
-
Create a chat session to start a multi-turn conversations with the model.
|
|
1425
|
+
Create a chat session to start a multi-turn conversations with the model. Then,
|
|
1426
|
+
use `chat.send_message` function multiple times within the same chat session so
|
|
1427
|
+
that it can reflect on its previous responses (i.e., engage in an ongoing
|
|
1428
|
+
conversation). See the 'Create a client' section above to initialize a client.
|
|
1000
1429
|
|
|
1001
|
-
### Send Message
|
|
1430
|
+
### Send Message (Synchronous Non-Streaming)
|
|
1002
1431
|
|
|
1003
1432
|
```python
|
|
1004
|
-
chat = client.chats.create(model='gemini-2.
|
|
1433
|
+
chat = client.chats.create(model='gemini-2.5-flash')
|
|
1005
1434
|
response = chat.send_message('tell me a story')
|
|
1006
1435
|
print(response.text)
|
|
1436
|
+
response = chat.send_message('summarize the story you told me in 1 sentence')
|
|
1437
|
+
print(response.text)
|
|
1007
1438
|
```
|
|
1008
1439
|
|
|
1009
|
-
### Streaming
|
|
1440
|
+
### Send Message (Synchronous Streaming)
|
|
1010
1441
|
|
|
1011
1442
|
```python
|
|
1012
|
-
chat = client.chats.create(model='gemini-2.
|
|
1443
|
+
chat = client.chats.create(model='gemini-2.5-flash')
|
|
1013
1444
|
for chunk in chat.send_message_stream('tell me a story'):
|
|
1014
1445
|
print(chunk.text)
|
|
1015
1446
|
```
|
|
1016
1447
|
|
|
1017
|
-
###
|
|
1448
|
+
### Send Message (Asynchronous Non-Streaming)
|
|
1018
1449
|
|
|
1019
1450
|
```python
|
|
1020
|
-
chat = client.aio.chats.create(model='gemini-2.
|
|
1451
|
+
chat = client.aio.chats.create(model='gemini-2.5-flash')
|
|
1021
1452
|
response = await chat.send_message('tell me a story')
|
|
1022
1453
|
print(response.text)
|
|
1023
1454
|
```
|
|
1024
1455
|
|
|
1025
|
-
###
|
|
1456
|
+
### Send Message (Asynchronous Streaming)
|
|
1026
1457
|
|
|
1027
1458
|
```python
|
|
1028
|
-
chat = client.aio.chats.create(model='gemini-2.
|
|
1459
|
+
chat = client.aio.chats.create(model='gemini-2.5-flash')
|
|
1029
1460
|
async for chunk in await chat.send_message_stream('tell me a story'):
|
|
1030
1461
|
print(chunk.text)
|
|
1031
1462
|
```
|
|
1032
1463
|
|
|
1033
1464
|
## Files
|
|
1034
1465
|
|
|
1035
|
-
Files are only supported in Gemini Developer API.
|
|
1466
|
+
Files are only supported in Gemini Developer API. See the 'Create a client'
|
|
1467
|
+
section above to initialize a client.
|
|
1036
1468
|
|
|
1037
|
-
```
|
|
1469
|
+
```sh
|
|
1038
1470
|
!gsutil cp gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf .
|
|
1039
1471
|
!gsutil cp gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf .
|
|
1040
1472
|
```
|
|
@@ -1066,11 +1498,14 @@ client.files.delete(name=file3.name)
|
|
|
1066
1498
|
|
|
1067
1499
|
## Caches
|
|
1068
1500
|
|
|
1069
|
-
`client.caches` contains the control plane APIs for cached content
|
|
1501
|
+
`client.caches` contains the control plane APIs for cached content. See the
|
|
1502
|
+
'Create a client' section above to initialize a client.
|
|
1070
1503
|
|
|
1071
1504
|
### Create
|
|
1072
1505
|
|
|
1073
1506
|
```python
|
|
1507
|
+
from google.genai import types
|
|
1508
|
+
|
|
1074
1509
|
if client.vertexai:
|
|
1075
1510
|
file_uris = [
|
|
1076
1511
|
'gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf',
|
|
@@ -1080,7 +1515,7 @@ else:
|
|
|
1080
1515
|
file_uris = [file1.uri, file2.uri]
|
|
1081
1516
|
|
|
1082
1517
|
cached_content = client.caches.create(
|
|
1083
|
-
model='gemini-
|
|
1518
|
+
model='gemini-2.5-flash',
|
|
1084
1519
|
config=types.CreateCachedContentConfig(
|
|
1085
1520
|
contents=[
|
|
1086
1521
|
types.Content(
|
|
@@ -1112,8 +1547,10 @@ cached_content = client.caches.get(name=cached_content.name)
|
|
|
1112
1547
|
### Generate Content with Caches
|
|
1113
1548
|
|
|
1114
1549
|
```python
|
|
1550
|
+
from google.genai import types
|
|
1551
|
+
|
|
1115
1552
|
response = client.models.generate_content(
|
|
1116
|
-
model='gemini-
|
|
1553
|
+
model='gemini-2.5-flash',
|
|
1117
1554
|
contents='Summarize the pdfs',
|
|
1118
1555
|
config=types.GenerateContentConfig(
|
|
1119
1556
|
cached_content=cached_content.name,
|
|
@@ -1125,33 +1562,26 @@ print(response.text)
|
|
|
1125
1562
|
## Tunings
|
|
1126
1563
|
|
|
1127
1564
|
`client.tunings` contains tuning job APIs and supports supervised fine
|
|
1128
|
-
tuning through `tune`.
|
|
1565
|
+
tuning through `tune`. Only supported in Vertex AI. See the 'Create a client'
|
|
1566
|
+
section above to initialize a client.
|
|
1129
1567
|
|
|
1130
1568
|
### Tune
|
|
1131
1569
|
|
|
1132
|
-
- Vertex AI supports tuning from GCS source
|
|
1133
|
-
- Gemini Developer API supports tuning from inline examples
|
|
1570
|
+
- Vertex AI supports tuning from GCS source or from a [Vertex AI Multimodal Dataset](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/multimodal/datasets)
|
|
1134
1571
|
|
|
1135
1572
|
```python
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
training_dataset = types.TuningDataset(
|
|
1144
|
-
examples=[
|
|
1145
|
-
types.TuningExample(
|
|
1146
|
-
text_input=f'Input text {i}',
|
|
1147
|
-
output=f'Output text {i}',
|
|
1148
|
-
)
|
|
1149
|
-
for i in range(5)
|
|
1150
|
-
],
|
|
1151
|
-
)
|
|
1573
|
+
from google.genai import types
|
|
1574
|
+
|
|
1575
|
+
model = 'gemini-2.5-flash'
|
|
1576
|
+
training_dataset = types.TuningDataset(
|
|
1577
|
+
# or gcs_uri=my_vertex_multimodal_dataset
|
|
1578
|
+
gcs_uri='gs://your-gcs-bucket/your-tuning-data.jsonl',
|
|
1579
|
+
)
|
|
1152
1580
|
```
|
|
1153
1581
|
|
|
1154
1582
|
```python
|
|
1583
|
+
from google.genai import types
|
|
1584
|
+
|
|
1155
1585
|
tuning_job = client.tunings.tune(
|
|
1156
1586
|
base_model=model,
|
|
1157
1587
|
training_dataset=training_dataset,
|
|
@@ -1172,14 +1602,15 @@ print(tuning_job)
|
|
|
1172
1602
|
```python
|
|
1173
1603
|
import time
|
|
1174
1604
|
|
|
1175
|
-
|
|
1605
|
+
completed_states = set(
|
|
1176
1606
|
[
|
|
1177
|
-
'
|
|
1178
|
-
'
|
|
1607
|
+
'JOB_STATE_SUCCEEDED',
|
|
1608
|
+
'JOB_STATE_FAILED',
|
|
1609
|
+
'JOB_STATE_CANCELLED',
|
|
1179
1610
|
]
|
|
1180
1611
|
)
|
|
1181
1612
|
|
|
1182
|
-
while tuning_job.state in
|
|
1613
|
+
while tuning_job.state not in completed_states:
|
|
1183
1614
|
print(tuning_job.state)
|
|
1184
1615
|
tuning_job = client.tunings.get(name=tuning_job.name)
|
|
1185
1616
|
time.sleep(10)
|
|
@@ -1238,6 +1669,8 @@ print(async_pager[0])
|
|
|
1238
1669
|
### Update Tuned Model
|
|
1239
1670
|
|
|
1240
1671
|
```python
|
|
1672
|
+
from google.genai import types
|
|
1673
|
+
|
|
1241
1674
|
model = pager[0]
|
|
1242
1675
|
|
|
1243
1676
|
model = client.models.update(
|
|
@@ -1283,20 +1716,68 @@ print(async_pager[0])
|
|
|
1283
1716
|
|
|
1284
1717
|
## Batch Prediction
|
|
1285
1718
|
|
|
1286
|
-
Only supported in Vertex AI.
|
|
1719
|
+
Only supported in Vertex AI. See the 'Create a client' section above to
|
|
1720
|
+
initialize a client.
|
|
1287
1721
|
|
|
1288
1722
|
### Create
|
|
1289
1723
|
|
|
1724
|
+
Vertex AI:
|
|
1725
|
+
|
|
1290
1726
|
```python
|
|
1291
1727
|
# Specify model and source file only, destination and job display name will be auto-populated
|
|
1292
1728
|
job = client.batches.create(
|
|
1293
|
-
model='gemini-
|
|
1294
|
-
src='bq://my-project.my-dataset.my-table',
|
|
1729
|
+
model='gemini-2.5-flash',
|
|
1730
|
+
src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
|
|
1731
|
+
)
|
|
1732
|
+
|
|
1733
|
+
print(job)
|
|
1734
|
+
```
|
|
1735
|
+
|
|
1736
|
+
Gemini Developer API:
|
|
1737
|
+
|
|
1738
|
+
```python
|
|
1739
|
+
# Create a batch job with inlined requests
|
|
1740
|
+
batch_job = client.batches.create(
|
|
1741
|
+
model="gemini-2.5-flash",
|
|
1742
|
+
src=[{
|
|
1743
|
+
"contents": [{
|
|
1744
|
+
"parts": [{
|
|
1745
|
+
"text": "Hello!",
|
|
1746
|
+
}],
|
|
1747
|
+
"role": "user",
|
|
1748
|
+
}],
|
|
1749
|
+
"config": {"response_modalities": ["text"]},
|
|
1750
|
+
}],
|
|
1295
1751
|
)
|
|
1296
1752
|
|
|
1297
1753
|
job
|
|
1298
1754
|
```
|
|
1299
1755
|
|
|
1756
|
+
In order to create a batch job with file name. Need to upload a json file.
|
|
1757
|
+
For example myrequests.json:
|
|
1758
|
+
|
|
1759
|
+
```
|
|
1760
|
+
{"key":"request_1", "request": {"contents": [{"parts": [{"text":
|
|
1761
|
+
"Explain how AI works in a few words"}]}], "generation_config": {"response_modalities": ["TEXT"]}}}
|
|
1762
|
+
{"key":"request_2", "request": {"contents": [{"parts": [{"text": "Explain how Crypto works in a few words"}]}]}}
|
|
1763
|
+
```
|
|
1764
|
+
Then upload the file.
|
|
1765
|
+
|
|
1766
|
+
```python
|
|
1767
|
+
# Upload the file
|
|
1768
|
+
file = client.files.upload(
|
|
1769
|
+
file='myrequests.json',
|
|
1770
|
+
config=types.UploadFileConfig(display_name='test-json')
|
|
1771
|
+
)
|
|
1772
|
+
|
|
1773
|
+
# Create a batch job with file name
|
|
1774
|
+
batch_job = client.batches.create(
|
|
1775
|
+
model="gemini-2.0-flash",
|
|
1776
|
+
src="files/test-json",
|
|
1777
|
+
)
|
|
1778
|
+
```
|
|
1779
|
+
|
|
1780
|
+
|
|
1300
1781
|
```python
|
|
1301
1782
|
# Get a job by name
|
|
1302
1783
|
job = client.batches.get(name=job.name)
|
|
@@ -1373,11 +1854,32 @@ To handle errors raised by the model service, the SDK provides this [APIError](h
|
|
|
1373
1854
|
from google.genai import errors
|
|
1374
1855
|
|
|
1375
1856
|
try:
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
|
|
1857
|
+
client.models.generate_content(
|
|
1858
|
+
model="invalid-model-name",
|
|
1859
|
+
contents="What is your name?",
|
|
1860
|
+
)
|
|
1380
1861
|
except errors.APIError as e:
|
|
1381
|
-
|
|
1382
|
-
|
|
1862
|
+
print(e.code) # 404
|
|
1863
|
+
print(e.message)
|
|
1864
|
+
```
|
|
1865
|
+
|
|
1866
|
+
## Extra Request Body
|
|
1867
|
+
|
|
1868
|
+
The `extra_body` field in `HttpOptions` accepts a dictionary of additional JSON
|
|
1869
|
+
properties to include in the request body. This can be used to access new or
|
|
1870
|
+
experimental backend features that are not yet formally supported in the SDK.
|
|
1871
|
+
The structure of the dictionary must match the backend API's request structure.
|
|
1872
|
+
|
|
1873
|
+
- VertexAI backend API docs: https://cloud.google.com/vertex-ai/docs/reference/rest
|
|
1874
|
+
- GeminiAPI backend API docs: https://ai.google.dev/api/rest
|
|
1875
|
+
|
|
1876
|
+
```python
|
|
1877
|
+
response = client.models.generate_content(
|
|
1878
|
+
model="gemini-2.5-pro",
|
|
1879
|
+
contents="What is the weather in Boston? and how about Sunnyvale?",
|
|
1880
|
+
config=types.GenerateContentConfig(
|
|
1881
|
+
tools=[get_current_weather],
|
|
1882
|
+
http_options=types.HttpOptions(extra_body={'tool_config': {'function_calling_config': {'mode': 'COMPOSITIONAL'}}}),
|
|
1883
|
+
),
|
|
1884
|
+
)
|
|
1383
1885
|
```
|