google-genai 1.36.0__tar.gz → 1.51.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. {google_genai-1.36.0/google_genai.egg-info → google_genai-1.51.0}/PKG-INFO +294 -174
  2. {google_genai-1.36.0 → google_genai-1.51.0}/README.md +289 -168
  3. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_api_client.py +303 -208
  4. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_common.py +256 -70
  5. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_extra_utils.py +137 -4
  6. google_genai-1.51.0/google/genai/_live_converters.py +1467 -0
  7. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_local_tokenizer_loader.py +0 -9
  8. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_operations_converters.py +186 -99
  9. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_replay_api_client.py +48 -51
  10. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_tokens_converters.py +169 -510
  11. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_transformers.py +142 -79
  12. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/batches.py +765 -1592
  13. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/caches.py +420 -1133
  14. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/client.py +101 -0
  15. google_genai-1.51.0/google/genai/documents.py +552 -0
  16. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/errors.py +58 -4
  17. google_genai-1.51.0/google/genai/file_search_stores.py +1312 -0
  18. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/files.py +52 -304
  19. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/live.py +83 -32
  20. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/live_music.py +24 -27
  21. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/local_tokenizer.py +7 -2
  22. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/models.py +2257 -3442
  23. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/operations.py +129 -21
  24. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/pagers.py +7 -1
  25. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/tokens.py +2 -12
  26. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/tunings.py +679 -429
  27. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/types.py +4033 -1229
  28. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/version.py +1 -1
  29. {google_genai-1.36.0 → google_genai-1.51.0/google_genai.egg-info}/PKG-INFO +294 -174
  30. {google_genai-1.36.0 → google_genai-1.51.0}/google_genai.egg-info/SOURCES.txt +3 -0
  31. {google_genai-1.36.0 → google_genai-1.51.0}/google_genai.egg-info/requires.txt +1 -1
  32. {google_genai-1.36.0 → google_genai-1.51.0}/pyproject.toml +21 -14
  33. google_genai-1.51.0/setup.cfg +10 -0
  34. google_genai-1.36.0/google/genai/_live_converters.py +0 -3580
  35. google_genai-1.36.0/setup.cfg +0 -4
  36. {google_genai-1.36.0 → google_genai-1.51.0}/LICENSE +0 -0
  37. {google_genai-1.36.0 → google_genai-1.51.0}/MANIFEST.in +0 -0
  38. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/__init__.py +0 -0
  39. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_adapters.py +0 -0
  40. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_api_module.py +0 -0
  41. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_automatic_function_calling_util.py +0 -0
  42. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_base_transformers.py +0 -0
  43. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_base_url.py +0 -0
  44. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_mcp_utils.py +0 -0
  45. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/_test_api_client.py +0 -0
  46. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/chats.py +0 -0
  47. {google_genai-1.36.0 → google_genai-1.51.0}/google/genai/py.typed +0 -0
  48. {google_genai-1.36.0 → google_genai-1.51.0}/google_genai.egg-info/dependency_links.txt +0 -0
  49. {google_genai-1.36.0 → google_genai-1.51.0}/google_genai.egg-info/top_level.txt +0 -0
@@ -1,29 +1,28 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.36.0
3
+ Version: 1.51.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
- License: Apache-2.0
6
+ License-Expression: Apache-2.0
7
7
  Project-URL: Homepage, https://github.com/googleapis/python-genai
8
8
  Classifier: Intended Audience :: Developers
9
- Classifier: License :: OSI Approved :: Apache Software License
10
9
  Classifier: Operating System :: OS Independent
11
10
  Classifier: Programming Language :: Python
12
11
  Classifier: Programming Language :: Python :: 3
13
- Classifier: Programming Language :: Python :: 3.9
14
12
  Classifier: Programming Language :: Python :: 3.10
15
13
  Classifier: Programming Language :: Python :: 3.11
16
14
  Classifier: Programming Language :: Python :: 3.12
17
15
  Classifier: Programming Language :: Python :: 3.13
16
+ Classifier: Programming Language :: Python :: 3.14
18
17
  Classifier: Topic :: Internet
19
18
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
20
- Requires-Python: >=3.9
19
+ Requires-Python: >=3.10
21
20
  Description-Content-Type: text/markdown
22
21
  License-File: LICENSE
23
22
  Requires-Dist: anyio<5.0.0,>=4.8.0
24
23
  Requires-Dist: google-auth<3.0.0,>=2.14.1
25
24
  Requires-Dist: httpx<1.0.0,>=0.28.1
26
- Requires-Dist: pydantic<3.0.0,>=2.0.0
25
+ Requires-Dist: pydantic<3.0.0,>=2.9.0
27
26
  Requires-Dist: requests<3.0.0,>=2.28.1
28
27
  Requires-Dist: tenacity<9.2.0,>=8.2.3
29
28
  Requires-Dist: websockets<15.1.0,>=13.0.0
@@ -58,6 +57,12 @@ APIs.
58
57
  pip install google-genai
59
58
  ```
60
59
 
60
+ <small>With `uv`:</small>
61
+
62
+ ```sh
63
+ uv pip install google-genai
64
+ ```
65
+
61
66
  ## Imports
62
67
 
63
68
  ```python
@@ -116,6 +121,83 @@ from google import genai
116
121
  client = genai.Client()
117
122
  ```
118
123
 
124
+ ## Close a client
125
+
126
+ Explicitly close the sync client to ensure that resources, such as the
127
+ underlying HTTP connections, are properly cleaned up and closed.
128
+
129
+ ```python
130
+ from google.genai import Client
131
+
132
+ client = Client()
133
+ response_1 = client.models.generate_content(
134
+ model=MODEL_ID,
135
+ contents='Hello',
136
+ )
137
+ response_2 = client.models.generate_content(
138
+ model=MODEL_ID,
139
+ contents='Ask a question',
140
+ )
141
+ # Close the sync client to release resources.
142
+ client.close()
143
+ ```
144
+
145
+ To explicitly close the async client:
146
+
147
+ ```python
148
+ from google.genai import Client
149
+
150
+ aclient = Client(
151
+ vertexai=True, project='my-project-id', location='us-central1'
152
+ ).aio
153
+ response_1 = await aclient.models.generate_content(
154
+ model=MODEL_ID,
155
+ contents='Hello',
156
+ )
157
+ response_2 = await aclient.models.generate_content(
158
+ model=MODEL_ID,
159
+ contents='Ask a question',
160
+ )
161
+ # Close the async client to release resources.
162
+ await aclient.aclose()
163
+ ```
164
+
165
+ ## Client context managers
166
+
167
+ By using the sync client context manager, it will close the underlying
168
+ sync client when exiting the with block.
169
+
170
+ ```python
171
+ from google.genai import Client
172
+
173
+ with Client() as client:
174
+ response_1 = client.models.generate_content(
175
+ model=MODEL_ID,
176
+ contents='Hello',
177
+ )
178
+ response_2 = client.models.generate_content(
179
+ model=MODEL_ID,
180
+ contents='Ask a question',
181
+ )
182
+ ```
183
+
184
+ By using the async client context manager, it will close the underlying
185
+ async client when exiting the with block.
186
+
187
+ ```python
188
+ from google.genai import Client
189
+
190
+ async with Client().aio as aclient:
191
+ response_1 = await aclient.models.generate_content(
192
+ model=MODEL_ID,
193
+ contents='Hello',
194
+ )
195
+ response_2 = await aclient.models.generate_content(
196
+ model=MODEL_ID,
197
+ contents='Ask a question',
198
+ )
199
+ ```
200
+
119
201
  ### API Selection
120
202
 
121
203
  By default, the SDK uses the beta API endpoints provided by Google to support
@@ -158,7 +240,6 @@ Additional args of `aiohttp.ClientSession.request()` ([see _RequestOptions args]
158
240
  through the following way:
159
241
 
160
242
  ```python
161
-
162
243
  http_options = types.HttpOptions(
163
244
  async_client_args={'cookies': ..., 'ssl': ...},
164
245
  )
@@ -172,7 +253,6 @@ Both httpx and aiohttp libraries use `urllib.request.getproxies` from
172
253
  environment variables. Before client initialization, you may set proxy (and
173
254
  optional SSL_CERT_FILE) by setting the environment variables:
174
255
 
175
-
176
256
  ```bash
177
257
  export HTTPS_PROXY='http://username:password@proxy_uri:port'
178
258
  export SSL_CERT_FILE='client.pem'
@@ -183,7 +263,6 @@ args to `httpx.Client()`. You may install `httpx[socks]` to use it.
183
263
  Then, you can pass it through the following way:
184
264
 
185
265
  ```python
186
-
187
266
  http_options = types.HttpOptions(
188
267
  client_args={'proxy': 'socks5://user:pass@host:port'},
189
268
  async_client_args={'proxy': 'socks5://user:pass@host:port'},
@@ -192,6 +271,23 @@ http_options = types.HttpOptions(
192
271
  client=Client(..., http_options=http_options)
193
272
  ```
194
273
 
274
+ ### Custom base url
275
+
276
+ In some cases you might need a custom base url (for example, API gateway proxy
277
+ server) and bypass some authentication checks for project, location, or API key.
278
+ You may pass the custom base url like this:
279
+
280
+ ```python
281
+ base_url = 'https://test-api-gateway-proxy.com'
282
+ client = Client(
283
+ vertexai=True, # Currently only vertexai=True is supported
284
+ http_options={
285
+ 'base_url': base_url,
286
+ 'headers': {'Authorization': 'Bearer test_token'},
287
+ },
288
+ )
289
+ ```
290
+
195
291
  ## Types
196
292
 
197
293
  Parameter types can be specified as either dictionaries(`TypedDict`) or
@@ -205,15 +301,37 @@ See the 'Create a client' section above to initialize a client.
205
301
 
206
302
  ### Generate Content
207
303
 
208
- #### with text content
304
+ #### with text content input (text output)
209
305
 
210
306
  ```python
211
307
  response = client.models.generate_content(
212
- model='gemini-2.0-flash-001', contents='Why is the sky blue?'
308
+ model='gemini-2.5-flash', contents='Why is the sky blue?'
213
309
  )
214
310
  print(response.text)
215
311
  ```
216
312
 
313
+ #### with text content input (image output)
314
+
315
+ ```python
316
+ from google.genai import types
317
+
318
+ response = client.models.generate_content(
319
+ model='gemini-2.5-flash-image',
320
+ contents='A cartoon infographic for flying sneakers',
321
+ config=types.GenerateContentConfig(
322
+ response_modalities=["IMAGE"],
323
+ image_config=types.ImageConfig(
324
+ aspect_ratio="9:16",
325
+ ),
326
+ ),
327
+ )
328
+
329
+ for part in response.parts:
330
+ if part.inline_data:
331
+ generated_image = part.as_image()
332
+ generated_image.show()
333
+ ```
334
+
217
335
  #### with uploaded file (Gemini Developer API only)
218
336
  download the file in console.
219
337
 
@@ -226,7 +344,7 @@ python code.
226
344
  ```python
227
345
  file = client.files.upload(file='a11.txt')
228
346
  response = client.models.generate_content(
229
- model='gemini-2.0-flash-001',
347
+ model='gemini-2.5-flash',
230
348
  contents=['Could you summarize this file?', file]
231
349
  )
232
350
  print(response.text)
@@ -246,8 +364,8 @@ This is the canonical way to provide contents, SDK will not do any conversion.
246
364
  from google.genai import types
247
365
 
248
366
  contents = types.Content(
249
- role='user',
250
- parts=[types.Part.from_text(text='Why is the sky blue?')]
367
+ role='user',
368
+ parts=[types.Part.from_text(text='Why is the sky blue?')]
251
369
  )
252
370
  ```
253
371
 
@@ -255,10 +373,10 @@ SDK converts this to
255
373
 
256
374
  ```python
257
375
  [
258
- types.Content(
259
- role='user',
260
- parts=[types.Part.from_text(text='Why is the sky blue?')]
261
- )
376
+ types.Content(
377
+ role='user',
378
+ parts=[types.Part.from_text(text='Why is the sky blue?')]
379
+ )
262
380
  ]
263
381
  ```
264
382
 
@@ -272,11 +390,11 @@ The SDK will assume this is a text part, and it converts this into the following
272
390
 
273
391
  ```python
274
392
  [
275
- types.UserContent(
276
- parts=[
277
- types.Part.from_text(text='Why is the sky blue?')
278
- ]
279
- )
393
+ types.UserContent(
394
+ parts=[
395
+ types.Part.from_text(text='Why is the sky blue?')
396
+ ]
397
+ )
280
398
  ]
281
399
  ```
282
400
 
@@ -294,12 +412,12 @@ like the following:
294
412
 
295
413
  ```python
296
414
  [
297
- types.UserContent(
298
- parts=[
299
- types.Part.from_text(text='Why is the sky blue?'),
300
- types.Part.from_text(text='Why is the cloud white?'),
301
- ]
302
- )
415
+ types.UserContent(
416
+ parts=[
417
+ types.Part.from_text(text='Why is the sky blue?'),
418
+ types.Part.from_text(text='Why is the cloud white?'),
419
+ ]
420
+ )
303
421
  ]
304
422
  ```
305
423
 
@@ -312,8 +430,8 @@ Where a `types.UserContent` is a subclass of `types.Content`, the
312
430
  from google.genai import types
313
431
 
314
432
  contents = types.Part.from_function_call(
315
- name='get_weather_by_location',
316
- args={'location': 'Boston'}
433
+ name='get_weather_by_location',
434
+ args={'location': 'Boston'}
317
435
  )
318
436
  ```
319
437
 
@@ -321,14 +439,14 @@ The SDK converts a function call part to a content with a `model` role:
321
439
 
322
440
  ```python
323
441
  [
324
- types.ModelContent(
325
- parts=[
326
- types.Part.from_function_call(
327
- name='get_weather_by_location',
328
- args={'location': 'Boston'}
329
- )
330
- ]
331
- )
442
+ types.ModelContent(
443
+ parts=[
444
+ types.Part.from_function_call(
445
+ name='get_weather_by_location',
446
+ args={'location': 'Boston'}
447
+ )
448
+ ]
449
+ )
332
450
  ]
333
451
  ```
334
452
 
@@ -341,14 +459,14 @@ Where a `types.ModelContent` is a subclass of `types.Content`, the
341
459
  from google.genai import types
342
460
 
343
461
  contents = [
344
- types.Part.from_function_call(
345
- name='get_weather_by_location',
346
- args={'location': 'Boston'}
347
- ),
348
- types.Part.from_function_call(
349
- name='get_weather_by_location',
350
- args={'location': 'New York'}
351
- ),
462
+ types.Part.from_function_call(
463
+ name='get_weather_by_location',
464
+ args={'location': 'Boston'}
465
+ ),
466
+ types.Part.from_function_call(
467
+ name='get_weather_by_location',
468
+ args={'location': 'New York'}
469
+ ),
352
470
  ]
353
471
  ```
354
472
 
@@ -356,18 +474,18 @@ The SDK converts a list of function call parts to the a content with a `model` r
356
474
 
357
475
  ```python
358
476
  [
359
- types.ModelContent(
360
- parts=[
361
- types.Part.from_function_call(
362
- name='get_weather_by_location',
363
- args={'location': 'Boston'}
364
- ),
365
- types.Part.from_function_call(
366
- name='get_weather_by_location',
367
- args={'location': 'New York'}
368
- )
369
- ]
370
- )
477
+ types.ModelContent(
478
+ parts=[
479
+ types.Part.from_function_call(
480
+ name='get_weather_by_location',
481
+ args={'location': 'Boston'}
482
+ ),
483
+ types.Part.from_function_call(
484
+ name='get_weather_by_location',
485
+ args={'location': 'New York'}
486
+ )
487
+ ]
488
+ )
371
489
  ]
372
490
  ```
373
491
 
@@ -380,8 +498,8 @@ Where a `types.ModelContent` is a subclass of `types.Content`, the
380
498
  from google.genai import types
381
499
 
382
500
  contents = types.Part.from_uri(
383
- file_uri: 'gs://generativeai-downloads/images/scones.jpg',
384
- mime_type: 'image/jpeg',
501
+ file_uri: 'gs://generativeai-downloads/images/scones.jpg',
502
+ mime_type: 'image/jpeg',
385
503
  )
386
504
  ```
387
505
 
@@ -389,12 +507,12 @@ The SDK converts all non function call parts into a content with a `user` role.
389
507
 
390
508
  ```python
391
509
  [
392
- types.UserContent(parts=[
393
- types.Part.from_uri(
394
- file_uri: 'gs://generativeai-downloads/images/scones.jpg',
395
- mime_type: 'image/jpeg',
396
- )
397
- ])
510
+ types.UserContent(parts=[
511
+ types.Part.from_uri(
512
+ file_uri: 'gs://generativeai-downloads/images/scones.jpg',
513
+ mime_type: 'image/jpeg',
514
+ )
515
+ ])
398
516
  ]
399
517
  ```
400
518
 
@@ -404,11 +522,11 @@ The SDK converts all non function call parts into a content with a `user` role.
404
522
  from google.genai import types
405
523
 
406
524
  contents = [
407
- types.Part.from_text('What is this image about?'),
408
- types.Part.from_uri(
409
- file_uri: 'gs://generativeai-downloads/images/scones.jpg',
410
- mime_type: 'image/jpeg',
411
- )
525
+ types.Part.from_text('What is this image about?'),
526
+ types.Part.from_uri(
527
+ file_uri: 'gs://generativeai-downloads/images/scones.jpg',
528
+ mime_type: 'image/jpeg',
529
+ )
412
530
  ]
413
531
  ```
414
532
 
@@ -416,15 +534,15 @@ The SDK will convert the list of parts into a content with a `user` role
416
534
 
417
535
  ```python
418
536
  [
419
- types.UserContent(
420
- parts=[
421
- types.Part.from_text('What is this image about?'),
422
- types.Part.from_uri(
423
- file_uri: 'gs://generativeai-downloads/images/scones.jpg',
424
- mime_type: 'image/jpeg',
425
- )
426
- ]
427
- )
537
+ types.UserContent(
538
+ parts=[
539
+ types.Part.from_text('What is this image about?'),
540
+ types.Part.from_uri(
541
+ file_uri: 'gs://generativeai-downloads/images/scones.jpg',
542
+ mime_type: 'image/jpeg',
543
+ )
544
+ ]
545
+ )
428
546
  ]
429
547
  ```
430
548
 
@@ -530,7 +648,7 @@ print(async_pager[0])
530
648
  from google.genai import types
531
649
 
532
650
  response = client.models.generate_content(
533
- model='gemini-2.0-flash-001',
651
+ model='gemini-2.5-flash',
534
652
  contents='Say something bad.',
535
653
  config=types.GenerateContentConfig(
536
654
  safety_settings=[
@@ -558,13 +676,13 @@ def get_current_weather(location: str) -> str:
558
676
  """Returns the current weather.
559
677
 
560
678
  Args:
561
- location: The city and state, e.g. San Francisco, CA
679
+ location: The city and state, e.g. San Francisco, CA
562
680
  """
563
681
  return 'sunny'
564
682
 
565
683
 
566
684
  response = client.models.generate_content(
567
- model='gemini-2.0-flash-001',
685
+ model='gemini-2.5-flash',
568
686
  contents='What is the weather like in Boston?',
569
687
  config=types.GenerateContentConfig(tools=[get_current_weather]),
570
688
  )
@@ -580,14 +698,14 @@ as follows:
580
698
  from google.genai import types
581
699
 
582
700
  response = client.models.generate_content(
583
- model='gemini-2.0-flash-001',
584
- contents='What is the weather like in Boston?',
585
- config=types.GenerateContentConfig(
586
- tools=[get_current_weather],
587
- automatic_function_calling=types.AutomaticFunctionCallingConfig(
588
- disable=True
701
+ model='gemini-2.5-flash',
702
+ contents='What is the weather like in Boston?',
703
+ config=types.GenerateContentConfig(
704
+ tools=[get_current_weather],
705
+ automatic_function_calling=types.AutomaticFunctionCallingConfig(
706
+ disable=True
707
+ ),
589
708
  ),
590
- ),
591
709
  )
592
710
  ```
593
711
 
@@ -627,7 +745,7 @@ function = types.FunctionDeclaration(
627
745
  tool = types.Tool(function_declarations=[function])
628
746
 
629
747
  response = client.models.generate_content(
630
- model='gemini-2.0-flash-001',
748
+ model='gemini-2.5-flash',
631
749
  contents='What is the weather like in Boston?',
632
750
  config=types.GenerateContentConfig(tools=[tool]),
633
751
  )
@@ -671,7 +789,7 @@ function_response_content = types.Content(
671
789
  )
672
790
 
673
791
  response = client.models.generate_content(
674
- model='gemini-2.0-flash-001',
792
+ model='gemini-2.5-flash',
675
793
  contents=[
676
794
  user_prompt_content,
677
795
  function_call_content,
@@ -701,12 +819,12 @@ def get_current_weather(location: str) -> str:
701
819
  """Returns the current weather.
702
820
 
703
821
  Args:
704
- location: The city and state, e.g. San Francisco, CA
822
+ location: The city and state, e.g. San Francisco, CA
705
823
  """
706
824
  return "sunny"
707
825
 
708
826
  response = client.models.generate_content(
709
- model="gemini-2.0-flash-001",
827
+ model="gemini-2.5-flash",
710
828
  contents="What is the weather like in Boston?",
711
829
  config=types.GenerateContentConfig(
712
830
  tools=[get_current_weather],
@@ -731,12 +849,12 @@ def get_current_weather(location: str) -> str:
731
849
  """Returns the current weather.
732
850
 
733
851
  Args:
734
- location: The city and state, e.g. San Francisco, CA
852
+ location: The city and state, e.g. San Francisco, CA
735
853
  """
736
854
  return "sunny"
737
855
 
738
856
  response = client.models.generate_content(
739
- model="gemini-2.0-flash-001",
857
+ model="gemini-2.5-flash",
740
858
  contents="What is the weather like in Boston?",
741
859
  config=types.GenerateContentConfig(
742
860
  tools=[get_current_weather],
@@ -826,7 +944,7 @@ user_profile = {
826
944
  }
827
945
 
828
946
  response = client.models.generate_content(
829
- model='gemini-2.0-flash',
947
+ model='gemini-2.5-flash',
830
948
  contents='Give me a random user profile.',
831
949
  config={
832
950
  'response_mime_type': 'application/json',
@@ -856,7 +974,7 @@ class CountryInfo(BaseModel):
856
974
 
857
975
 
858
976
  response = client.models.generate_content(
859
- model='gemini-2.0-flash-001',
977
+ model='gemini-2.5-flash',
860
978
  contents='Give me information for the United States.',
861
979
  config=types.GenerateContentConfig(
862
980
  response_mime_type='application/json',
@@ -870,7 +988,7 @@ print(response.text)
870
988
  from google.genai import types
871
989
 
872
990
  response = client.models.generate_content(
873
- model='gemini-2.0-flash-001',
991
+ model='gemini-2.5-flash',
874
992
  contents='Give me information for the United States.',
875
993
  config=types.GenerateContentConfig(
876
994
  response_mime_type='application/json',
@@ -908,21 +1026,23 @@ You can set response_mime_type to 'text/x.enum' to return one of those enum
908
1026
  values as the response.
909
1027
 
910
1028
  ```python
1029
+ from enum import Enum
1030
+
911
1031
  class InstrumentEnum(Enum):
912
- PERCUSSION = 'Percussion'
913
- STRING = 'String'
914
- WOODWIND = 'Woodwind'
915
- BRASS = 'Brass'
916
- KEYBOARD = 'Keyboard'
1032
+ PERCUSSION = 'Percussion'
1033
+ STRING = 'String'
1034
+ WOODWIND = 'Woodwind'
1035
+ BRASS = 'Brass'
1036
+ KEYBOARD = 'Keyboard'
917
1037
 
918
1038
  response = client.models.generate_content(
919
- model='gemini-2.0-flash-001',
920
- contents='What instrument plays multiple notes at once?',
921
- config={
922
- 'response_mime_type': 'text/x.enum',
923
- 'response_schema': InstrumentEnum,
924
- },
925
- )
1039
+ model='gemini-2.5-flash',
1040
+ contents='What instrument plays multiple notes at once?',
1041
+ config={
1042
+ 'response_mime_type': 'text/x.enum',
1043
+ 'response_schema': InstrumentEnum,
1044
+ },
1045
+ )
926
1046
  print(response.text)
927
1047
  ```
928
1048
 
@@ -935,20 +1055,20 @@ identical but in quotes.
935
1055
  from enum import Enum
936
1056
 
937
1057
  class InstrumentEnum(Enum):
938
- PERCUSSION = 'Percussion'
939
- STRING = 'String'
940
- WOODWIND = 'Woodwind'
941
- BRASS = 'Brass'
942
- KEYBOARD = 'Keyboard'
1058
+ PERCUSSION = 'Percussion'
1059
+ STRING = 'String'
1060
+ WOODWIND = 'Woodwind'
1061
+ BRASS = 'Brass'
1062
+ KEYBOARD = 'Keyboard'
943
1063
 
944
1064
  response = client.models.generate_content(
945
- model='gemini-2.0-flash-001',
946
- contents='What instrument plays multiple notes at once?',
947
- config={
948
- 'response_mime_type': 'application/json',
949
- 'response_schema': InstrumentEnum,
950
- },
951
- )
1065
+ model='gemini-2.5-flash',
1066
+ contents='What instrument plays multiple notes at once?',
1067
+ config={
1068
+ 'response_mime_type': 'application/json',
1069
+ 'response_schema': InstrumentEnum,
1070
+ },
1071
+ )
952
1072
  print(response.text)
953
1073
  ```
954
1074
 
@@ -961,7 +1081,7 @@ to you, rather than being returned as one chunk.
961
1081
 
962
1082
  ```python
963
1083
  for chunk in client.models.generate_content_stream(
964
- model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
1084
+ model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
965
1085
  ):
966
1086
  print(chunk.text, end='')
967
1087
  ```
@@ -975,7 +1095,7 @@ you can use the `from_uri` class method to create a `Part` object.
975
1095
  from google.genai import types
976
1096
 
977
1097
  for chunk in client.models.generate_content_stream(
978
- model='gemini-2.0-flash-001',
1098
+ model='gemini-2.5-flash',
979
1099
  contents=[
980
1100
  'What is this image about?',
981
1101
  types.Part.from_uri(
@@ -999,7 +1119,7 @@ with open(YOUR_IMAGE_PATH, 'rb') as f:
999
1119
  image_bytes = f.read()
1000
1120
 
1001
1121
  for chunk in client.models.generate_content_stream(
1002
- model='gemini-2.0-flash-001',
1122
+ model='gemini-2.5-flash',
1003
1123
  contents=[
1004
1124
  'What is this image about?',
1005
1125
  types.Part.from_bytes(data=image_bytes, mime_type=YOUR_IMAGE_MIME_TYPE),
@@ -1018,7 +1138,7 @@ of `client.models.generate_content`
1018
1138
 
1019
1139
  ```python
1020
1140
  response = await client.aio.models.generate_content(
1021
- model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
1141
+ model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
1022
1142
  )
1023
1143
 
1024
1144
  print(response.text)
@@ -1026,10 +1146,9 @@ print(response.text)
1026
1146
 
1027
1147
  ### Generate Content (Asynchronous Streaming)
1028
1148
 
1029
-
1030
1149
  ```python
1031
1150
  async for chunk in await client.aio.models.generate_content_stream(
1032
- model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
1151
+ model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
1033
1152
  ):
1034
1153
  print(chunk.text, end='')
1035
1154
  ```
@@ -1038,7 +1157,7 @@ async for chunk in await client.aio.models.generate_content_stream(
1038
1157
 
1039
1158
  ```python
1040
1159
  response = client.models.count_tokens(
1041
- model='gemini-2.0-flash-001',
1160
+ model='gemini-2.5-flash',
1042
1161
  contents='why is the sky blue?',
1043
1162
  )
1044
1163
  print(response)
@@ -1050,7 +1169,7 @@ Compute tokens is only supported in Vertex AI.
1050
1169
 
1051
1170
  ```python
1052
1171
  response = client.models.compute_tokens(
1053
- model='gemini-2.0-flash-001',
1172
+ model='gemini-2.5-flash',
1054
1173
  contents='why is the sky blue?',
1055
1174
  )
1056
1175
  print(response)
@@ -1060,7 +1179,7 @@ print(response)
1060
1179
 
1061
1180
  ```python
1062
1181
  response = await client.aio.models.count_tokens(
1063
- model='gemini-2.0-flash-001',
1182
+ model='gemini-2.5-flash',
1064
1183
  contents='why is the sky blue?',
1065
1184
  )
1066
1185
  print(response)
@@ -1069,14 +1188,14 @@ print(response)
1069
1188
  #### Local Count Tokens
1070
1189
 
1071
1190
  ```python
1072
- tokenizer = genai.LocalTokenizer(model_name='gemini-2.0-flash-001')
1191
+ tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
1073
1192
  result = tokenizer.count_tokens("What is your name?")
1074
1193
  ```
1075
1194
 
1076
1195
  #### Local Compute Tokens
1077
1196
 
1078
1197
  ```python
1079
- tokenizer = genai.LocalTokenizer(model_name='gemini-2.0-flash-001')
1198
+ tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
1080
1199
  result = tokenizer.compute_tokens("What is your name?")
1081
1200
  ```
1082
1201
 
@@ -1084,7 +1203,7 @@ result = tokenizer.compute_tokens("What is your name?")
1084
1203
 
1085
1204
  ```python
1086
1205
  response = client.models.embed_content(
1087
- model='text-embedding-004',
1206
+ model='gemini-embedding-001',
1088
1207
  contents='why is the sky blue?',
1089
1208
  )
1090
1209
  print(response)
@@ -1095,7 +1214,7 @@ from google.genai import types
1095
1214
 
1096
1215
  # multiple contents with config
1097
1216
  response = client.models.embed_content(
1098
- model='text-embedding-004',
1217
+ model='gemini-embedding-001',
1099
1218
  contents=['why is the sky blue?', 'What is your age?'],
1100
1219
  config=types.EmbedContentConfig(output_dimensionality=10),
1101
1220
  )
@@ -1114,7 +1233,7 @@ from google.genai import types
1114
1233
 
1115
1234
  # Generate Image
1116
1235
  response1 = client.models.generate_images(
1117
- model='imagen-3.0-generate-002',
1236
+ model='imagen-4.0-generate-001',
1118
1237
  prompt='An umbrella in the foreground, and a rainy night sky in the background',
1119
1238
  config=types.GenerateImagesConfig(
1120
1239
  number_of_images=1,
@@ -1134,7 +1253,7 @@ from google.genai import types
1134
1253
 
1135
1254
  # Upscale the generated image from above
1136
1255
  response2 = client.models.upscale_image(
1137
- model='imagen-3.0-generate-001',
1256
+ model='imagen-4.0-upscale-preview',
1138
1257
  image=response1.generated_images[0].image,
1139
1258
  upscale_factor='x2',
1140
1259
  config=types.UpscaleImageConfig(
@@ -1195,7 +1314,7 @@ from google.genai import types
1195
1314
 
1196
1315
  # Create operation
1197
1316
  operation = client.models.generate_videos(
1198
- model='veo-2.0-generate-001',
1317
+ model='veo-3.1-generate-preview',
1199
1318
  prompt='A neon hologram of a cat driving at top speed',
1200
1319
  config=types.GenerateVideosConfig(
1201
1320
  number_of_videos=1,
@@ -1223,7 +1342,7 @@ image = types.Image.from_file("local/path/file.png")
1223
1342
 
1224
1343
  # Create operation
1225
1344
  operation = client.models.generate_videos(
1226
- model='veo-2.0-generate-001',
1345
+ model='veo-3.1-generate-preview',
1227
1346
  # Prompt is optional if image is provided
1228
1347
  prompt='Night sky',
1229
1348
  image=image,
@@ -1246,7 +1365,8 @@ video.show()
1246
1365
 
1247
1366
  #### Generate Videos (Video to Video)
1248
1367
 
1249
- Currently, only Vertex supports Video to Video generation (Video extension).
1368
+ Currently, only Gemini Developer API supports video extension on Veo 3.1 for
1369
+ previously generated videos. Vertex supports video extension on Veo 2.0.
1250
1370
 
1251
1371
  ```python
1252
1372
  from google.genai import types
@@ -1256,10 +1376,10 @@ video = types.Video.from_file("local/path/video.mp4")
1256
1376
 
1257
1377
  # Create operation
1258
1378
  operation = client.models.generate_videos(
1259
- model='veo-2.0-generate-001',
1379
+ model='veo-3.1-generate-preview',
1260
1380
  # Prompt is optional if Video is provided
1261
1381
  prompt='Night sky',
1262
- # Input video must be in GCS
1382
+ # Input video must be in GCS for Vertex or a URI for Gemini
1263
1383
  video=types.Video(
1264
1384
  uri="gs://bucket-name/inputs/videos/cat_driving.mp4",
1265
1385
  ),
@@ -1289,7 +1409,7 @@ that it can reflect on its previous responses (i.e., engage in an ongoing
1289
1409
  ### Send Message (Synchronous Non-Streaming)
1290
1410
 
1291
1411
  ```python
1292
- chat = client.chats.create(model='gemini-2.0-flash-001')
1412
+ chat = client.chats.create(model='gemini-2.5-flash')
1293
1413
  response = chat.send_message('tell me a story')
1294
1414
  print(response.text)
1295
1415
  response = chat.send_message('summarize the story you told me in 1 sentence')
@@ -1299,7 +1419,7 @@ print(response.text)
1299
1419
  ### Send Message (Synchronous Streaming)
1300
1420
 
1301
1421
  ```python
1302
- chat = client.chats.create(model='gemini-2.0-flash-001')
1422
+ chat = client.chats.create(model='gemini-2.5-flash')
1303
1423
  for chunk in chat.send_message_stream('tell me a story'):
1304
1424
  print(chunk.text)
1305
1425
  ```
@@ -1307,7 +1427,7 @@ for chunk in chat.send_message_stream('tell me a story'):
1307
1427
  ### Send Message (Asynchronous Non-Streaming)
1308
1428
 
1309
1429
  ```python
1310
- chat = client.aio.chats.create(model='gemini-2.0-flash-001')
1430
+ chat = client.aio.chats.create(model='gemini-2.5-flash')
1311
1431
  response = await chat.send_message('tell me a story')
1312
1432
  print(response.text)
1313
1433
  ```
@@ -1315,7 +1435,7 @@ print(response.text)
1315
1435
  ### Send Message (Asynchronous Streaming)
1316
1436
 
1317
1437
  ```python
1318
- chat = client.aio.chats.create(model='gemini-2.0-flash-001')
1438
+ chat = client.aio.chats.create(model='gemini-2.5-flash')
1319
1439
  async for chunk in await chat.send_message_stream('tell me a story'):
1320
1440
  print(chunk.text)
1321
1441
  ```
@@ -1325,7 +1445,7 @@ async for chunk in await chat.send_message_stream('tell me a story'):
1325
1445
  Files are only supported in Gemini Developer API. See the 'Create a client'
1326
1446
  section above to initialize a client.
1327
1447
 
1328
- ```cmd
1448
+ ```sh
1329
1449
  !gsutil cp gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf .
1330
1450
  !gsutil cp gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf .
1331
1451
  ```
@@ -1374,7 +1494,7 @@ else:
1374
1494
  file_uris = [file1.uri, file2.uri]
1375
1495
 
1376
1496
  cached_content = client.caches.create(
1377
- model='gemini-2.0-flash-001',
1497
+ model='gemini-2.5-flash',
1378
1498
  config=types.CreateCachedContentConfig(
1379
1499
  contents=[
1380
1500
  types.Content(
@@ -1409,7 +1529,7 @@ cached_content = client.caches.get(name=cached_content.name)
1409
1529
  from google.genai import types
1410
1530
 
1411
1531
  response = client.models.generate_content(
1412
- model='gemini-2.0-flash-001',
1532
+ model='gemini-2.5-flash',
1413
1533
  contents='Summarize the pdfs',
1414
1534
  config=types.GenerateContentConfig(
1415
1535
  cached_content=cached_content.name,
@@ -1426,15 +1546,15 @@ section above to initialize a client.
1426
1546
 
1427
1547
  ### Tune
1428
1548
 
1429
- - Vertex AI supports tuning from GCS source or from a Vertex Multimodal Dataset
1549
+ - Vertex AI supports tuning from GCS source or from a [Vertex AI Multimodal Dataset](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/multimodal/datasets)
1430
1550
 
1431
1551
  ```python
1432
1552
  from google.genai import types
1433
1553
 
1434
- model = 'gemini-2.0-flash-001'
1554
+ model = 'gemini-2.5-flash'
1435
1555
  training_dataset = types.TuningDataset(
1436
- # or gcs_uri=my_vertex_multimodal_dataset
1437
- gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
1556
+ # or gcs_uri=my_vertex_multimodal_dataset
1557
+ gcs_uri='gs://your-gcs-bucket/your-tuning-data.jsonl',
1438
1558
  )
1439
1559
  ```
1440
1560
 
@@ -1585,11 +1705,11 @@ Vertex AI:
1585
1705
  ```python
1586
1706
  # Specify model and source file only, destination and job display name will be auto-populated
1587
1707
  job = client.batches.create(
1588
- model='gemini-2.0-flash-001',
1708
+ model='gemini-2.5-flash',
1589
1709
  src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
1590
1710
  )
1591
1711
 
1592
- job
1712
+ print(job)
1593
1713
  ```
1594
1714
 
1595
1715
  Gemini Developer API:
@@ -1597,22 +1717,22 @@ Gemini Developer API:
1597
1717
  ```python
1598
1718
  # Create a batch job with inlined requests
1599
1719
  batch_job = client.batches.create(
1600
- model="gemini-2.0-flash",
1720
+ model="gemini-2.5-flash",
1601
1721
  src=[{
1602
- "contents": [{
1603
- "parts": [{
1604
- "text": "Hello!",
1722
+ "contents": [{
1723
+ "parts": [{
1724
+ "text": "Hello!",
1725
+ }],
1726
+ "role": "user",
1605
1727
  }],
1606
- "role": "user",
1607
- }],
1608
- "config": {"response_modalities": ["text"]},
1728
+ "config": {"response_modalities": ["text"]},
1609
1729
  }],
1610
1730
  )
1611
1731
 
1612
1732
  job
1613
1733
  ```
1614
1734
 
1615
- In order to create a batch job with file name. Need to upload a jsonl file.
1735
+ In order to create a batch job with file name. Need to upload a json file.
1616
1736
  For example myrequests.json:
1617
1737
 
1618
1738
  ```
@@ -1625,14 +1745,14 @@ Then upload the file.
1625
1745
  ```python
1626
1746
  # Upload the file
1627
1747
  file = client.files.upload(
1628
- file='myrequest.json',
1629
- config=types.UploadFileConfig(display_name='test_json')
1748
+ file='myrequests.json',
1749
+ config=types.UploadFileConfig(display_name='test-json')
1630
1750
  )
1631
1751
 
1632
1752
  # Create a batch job with file name
1633
1753
  batch_job = client.batches.create(
1634
1754
  model="gemini-2.0-flash",
1635
- src="files/file_name",
1755
+ src="files/test-json",
1636
1756
  )
1637
1757
  ```
1638
1758
 
@@ -1713,13 +1833,13 @@ To handle errors raised by the model service, the SDK provides this [APIError](h
1713
1833
  from google.genai import errors
1714
1834
 
1715
1835
  try:
1716
- client.models.generate_content(
1717
- model="invalid-model-name",
1718
- contents="What is your name?",
1719
- )
1836
+ client.models.generate_content(
1837
+ model="invalid-model-name",
1838
+ contents="What is your name?",
1839
+ )
1720
1840
  except errors.APIError as e:
1721
- print(e.code) # 404
1722
- print(e.message)
1841
+ print(e.code) # 404
1842
+ print(e.message)
1723
1843
  ```
1724
1844
 
1725
1845
  ## Extra Request Body