google-genai 1.14.0__tar.gz → 1.16.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. {google_genai-1.14.0/google_genai.egg-info → google_genai-1.16.0}/PKG-INFO +111 -22
  2. {google_genai-1.14.0 → google_genai-1.16.0}/README.md +110 -21
  3. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/__init__.py +5 -3
  4. google_genai-1.16.0/google/genai/_adapters.py +55 -0
  5. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/_api_client.py +24 -20
  6. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/_api_module.py +1 -1
  7. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/_automatic_function_calling_util.py +2 -2
  8. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/_common.py +1 -1
  9. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/_extra_utils.py +117 -9
  10. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/_live_converters.py +2127 -758
  11. google_genai-1.16.0/google/genai/_mcp_utils.py +117 -0
  12. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/_replay_api_client.py +1 -1
  13. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/_test_api_client.py +1 -1
  14. google_genai-1.16.0/google/genai/_tokens_converters.py +1701 -0
  15. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/_transformers.py +66 -33
  16. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/caches.py +277 -26
  17. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/chats.py +1 -1
  18. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/client.py +12 -1
  19. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/errors.py +1 -1
  20. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/live.py +218 -35
  21. google_genai-1.16.0/google/genai/live_music.py +201 -0
  22. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/models.py +670 -56
  23. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/pagers.py +1 -1
  24. google_genai-1.16.0/google/genai/tokens.py +357 -0
  25. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/tunings.py +53 -0
  26. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/types.py +4892 -3606
  27. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/version.py +2 -2
  28. {google_genai-1.14.0 → google_genai-1.16.0/google_genai.egg-info}/PKG-INFO +111 -22
  29. {google_genai-1.14.0 → google_genai-1.16.0}/google_genai.egg-info/SOURCES.txt +5 -0
  30. {google_genai-1.14.0 → google_genai-1.16.0}/pyproject.toml +1 -1
  31. {google_genai-1.14.0 → google_genai-1.16.0}/LICENSE +0 -0
  32. {google_genai-1.14.0 → google_genai-1.16.0}/MANIFEST.in +0 -0
  33. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/_base_url.py +0 -0
  34. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/batches.py +0 -0
  35. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/files.py +0 -0
  36. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/operations.py +0 -0
  37. {google_genai-1.14.0 → google_genai-1.16.0}/google/genai/py.typed +0 -0
  38. {google_genai-1.14.0 → google_genai-1.16.0}/google_genai.egg-info/dependency_links.txt +0 -0
  39. {google_genai-1.14.0 → google_genai-1.16.0}/google_genai.egg-info/requires.txt +0 -0
  40. {google_genai-1.14.0 → google_genai-1.16.0}/google_genai.egg-info/top_level.txt +0 -0
  41. {google_genai-1.14.0 → google_genai-1.16.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.14.0
3
+ Version: 1.16.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -40,7 +40,11 @@ Dynamic: license-file
40
40
 
41
41
  -----
42
42
 
43
- Google Gen AI Python SDK provides an interface for developers to integrate Google's generative models into their Python applications. It supports the [Gemini Developer API](https://ai.google.dev/gemini-api/docs) and [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview) APIs.
43
+ Google Gen AI Python SDK provides an interface for developers to integrate
44
+ Google's generative models into their Python applications. It supports the
45
+ [Gemini Developer API](https://ai.google.dev/gemini-api/docs) and
46
+ [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview)
47
+ APIs.
44
48
 
45
49
  ## Installation
46
50
 
@@ -61,11 +65,15 @@ Please run one of the following code blocks to create a client for
61
65
  different services ([Gemini Developer API](https://ai.google.dev/gemini-api/docs) or [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview)).
62
66
 
63
67
  ```python
68
+ from google import genai
69
+
64
70
  # Only run this block for Gemini Developer API
65
71
  client = genai.Client(api_key='GEMINI_API_KEY')
66
72
  ```
67
73
 
68
74
  ```python
75
+ from google import genai
76
+
69
77
  # Only run this block for Vertex AI API
70
78
  client = genai.Client(
71
79
  vertexai=True, project='your-project-id', location='us-central1'
@@ -84,8 +92,8 @@ Developer API or the Gemini API in Vertex AI.
84
92
  export GOOGLE_API_KEY='your-api-key'
85
93
  ```
86
94
 
87
- **Gemini API on Vertex AI:** Set `GOOGLE_GENAI_USE_VERTEXAI`, `GOOGLE_CLOUD_PROJECT`
88
- and `GOOGLE_CLOUD_LOCATION`, as shown below:
95
+ **Gemini API on Vertex AI:** Set `GOOGLE_GENAI_USE_VERTEXAI`,
96
+ `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION`, as shown below:
89
97
 
90
98
  ```bash
91
99
  export GOOGLE_GENAI_USE_VERTEXAI=true
@@ -94,6 +102,8 @@ export GOOGLE_CLOUD_LOCATION='us-central1'
94
102
  ```
95
103
 
96
104
  ```python
105
+ from google import genai
106
+
97
107
  client = genai.Client()
98
108
  ```
99
109
 
@@ -107,6 +117,9 @@ To set the API version use `http_options`. For example, to set the API version
107
117
  to `v1` for Vertex AI:
108
118
 
109
119
  ```python
120
+ from google import genai
121
+ from google.genai import types
122
+
110
123
  client = genai.Client(
111
124
  vertexai=True,
112
125
  project='your-project-id',
@@ -118,6 +131,9 @@ client = genai.Client(
118
131
  To set the API version to `v1alpha` for the Gemini Developer API:
119
132
 
120
133
  ```python
134
+ from google import genai
135
+ from google.genai import types
136
+
121
137
  client = genai.Client(
122
138
  api_key='GEMINI_API_KEY',
123
139
  http_options=types.HttpOptions(api_version='v1alpha')
@@ -133,6 +149,7 @@ Pydantic model types are available in the `types` module.
133
149
  ## Models
134
150
 
135
151
  The `client.models` modules exposes model inferencing and model getters.
152
+ See the 'Create a client' section above to initialize a client.
136
153
 
137
154
  ### Generate Content
138
155
 
@@ -174,6 +191,8 @@ This is the canonical way to provide contents, SDK will not do any conversion.
174
191
  ##### Provide a `types.Content` instance
175
192
 
176
193
  ```python
194
+ from google.genai import types
195
+
177
196
  contents = types.Content(
178
197
  role='user',
179
198
  parts=[types.Part.from_text(text='Why is the sky blue?')]
@@ -238,6 +257,8 @@ Where a `types.UserContent` is a subclass of `types.Content`, the
238
257
  ##### Provide a function call part
239
258
 
240
259
  ```python
260
+ from google.genai import types
261
+
241
262
  contents = types.Part.from_function_call(
242
263
  name='get_weather_by_location',
243
264
  args={'location': 'Boston'}
@@ -265,6 +286,8 @@ Where a `types.ModelContent` is a subclass of `types.Content`, the
265
286
  ##### Provide a list of function call parts
266
287
 
267
288
  ```python
289
+ from google.genai import types
290
+
268
291
  contents = [
269
292
  types.Part.from_function_call(
270
293
  name='get_weather_by_location',
@@ -302,6 +325,8 @@ Where a `types.ModelContent` is a subclass of `types.Content`, the
302
325
  ##### Provide a non function call part
303
326
 
304
327
  ```python
328
+ from google.genai import types
329
+
305
330
  contents = types.Part.from_uri(
306
331
  file_uri: 'gs://generativeai-downloads/images/scones.jpg',
307
332
  mime_type: 'image/jpeg',
@@ -324,6 +349,8 @@ The SDK converts all non function call parts into a content with a `user` role.
324
349
  ##### Provide a list of non function call parts
325
350
 
326
351
  ```python
352
+ from google.genai import types
353
+
327
354
  contents = [
328
355
  types.Part.from_text('What is this image about?'),
329
356
  types.Part.from_uri(
@@ -363,11 +390,17 @@ If you put a list within a list, the inner list can only contain
363
390
  ### System Instructions and Other Configs
364
391
 
365
392
  The output of the model can be influenced by several optional settings
366
- available in generate_content's config parameter. For example, the
367
- variability and length of the output can be influenced by the temperature
368
- and max_output_tokens respectively.
393
+ available in generate_content's config parameter. For example, increasing
394
+ `max_output_tokens` is essential for longer model responses. To make a model more
395
+ deterministic, lowering the `temperature` parameter reduces randomness, with
396
+ values near 0 minimizing variability. Capabilities and parameter defaults for
397
+ each model is shown in the
398
+ [Vertex AI docs](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/2-5-flash)
399
+ and [Gemini API docs](https://ai.google.dev/gemini-api/docs/models) respectively.
369
400
 
370
401
  ```python
402
+ from google.genai import types
403
+
371
404
  response = client.models.generate_content(
372
405
  model='gemini-2.0-flash-001',
373
406
  contents='high',
@@ -386,6 +419,8 @@ All API methods support Pydantic types for parameters as well as
386
419
  dictionaries. You can get the type from `google.genai.types`.
387
420
 
388
421
  ```python
422
+ from google.genai import types
423
+
389
424
  response = client.models.generate_content(
390
425
  model='gemini-2.0-flash-001',
391
426
  contents=types.Part.from_text(text='Why is the sky blue?'),
@@ -422,7 +457,7 @@ pager.next_page()
422
457
  print(pager[0])
423
458
  ```
424
459
 
425
- #### Async
460
+ #### List Base Models (Asynchronous)
426
461
 
427
462
  ```python
428
463
  async for job in await client.aio.models.list():
@@ -440,6 +475,8 @@ print(async_pager[0])
440
475
  ### Safety Settings
441
476
 
442
477
  ```python
478
+ from google.genai import types
479
+
443
480
  response = client.models.generate_content(
444
481
  model='gemini-2.0-flash-001',
445
482
  contents='Say something bad.',
@@ -463,6 +500,8 @@ You can pass a Python function directly and it will be automatically
463
500
  called and responded by default.
464
501
 
465
502
  ```python
503
+ from google.genai import types
504
+
466
505
  def get_current_weather(location: str) -> str:
467
506
  """Returns the current weather.
468
507
 
@@ -486,6 +525,8 @@ automatic function calling, you can disable automatic function calling
486
525
  as follows:
487
526
 
488
527
  ```python
528
+ from google.genai import types
529
+
489
530
  response = client.models.generate_content(
490
531
  model='gemini-2.0-flash-001',
491
532
  contents='What is the weather like in Boston?',
@@ -514,6 +555,8 @@ The following example shows how to declare a function and pass it as a tool.
514
555
  Then you will receive a function call part in the response.
515
556
 
516
557
  ```python
558
+ from google.genai import types
559
+
517
560
  function = types.FunctionDeclaration(
518
561
  name='get_current_weather',
519
562
  description='Get the current weather in a given location',
@@ -546,6 +589,8 @@ the model.
546
589
  The following example shows how to do it for a simple function invocation.
547
590
 
548
591
  ```python
592
+ from google.genai import types
593
+
549
594
  user_prompt_content = types.Content(
550
595
  role='user',
551
596
  parts=[types.Part.from_text(text='What is the weather like in Boston?')],
@@ -598,6 +643,8 @@ maximum remote call for automatic function calling (default to 10 times).
598
643
  If you'd like to disable automatic function calling in `ANY` mode:
599
644
 
600
645
  ```python
646
+ from google.genai import types
647
+
601
648
  def get_current_weather(location: str) -> str:
602
649
  """Returns the current weather.
603
650
 
@@ -626,6 +673,8 @@ configure the maximum remote calls to be `x + 1`.
626
673
  Assuming you prefer `1` turn for automatic function calling.
627
674
 
628
675
  ```python
676
+ from google.genai import types
677
+
629
678
  def get_current_weather(location: str) -> str:
630
679
  """Returns the current weather.
631
680
 
@@ -660,6 +709,7 @@ Schemas can be provided as Pydantic Models.
660
709
 
661
710
  ```python
662
711
  from pydantic import BaseModel
712
+ from google.genai import types
663
713
 
664
714
 
665
715
  class CountryInfo(BaseModel):
@@ -684,6 +734,8 @@ print(response.text)
684
734
  ```
685
735
 
686
736
  ```python
737
+ from google.genai import types
738
+
687
739
  response = client.models.generate_content(
688
740
  model='gemini-2.0-flash-001',
689
741
  contents='Give me information for the United States.',
@@ -743,7 +795,8 @@ print(response.text)
743
795
 
744
796
  #### JSON Response
745
797
 
746
- You can also set response_mime_type to 'application/json', the response will be identical but in quotes.
798
+ You can also set response_mime_type to 'application/json', the response will be
799
+ identical but in quotes.
747
800
 
748
801
  ```python
749
802
  from enum import Enum
@@ -766,7 +819,10 @@ response = client.models.generate_content(
766
819
  print(response.text)
767
820
  ```
768
821
 
769
- ### Streaming
822
+ ### Generate Content (Synchronous Streaming)
823
+
824
+ Generate content in a streaming format so that the model outputs streams back
825
+ to you, rather than being returned as one chunk.
770
826
 
771
827
  #### Streaming for text content
772
828
 
@@ -783,6 +839,8 @@ If your image is stored in [Google Cloud Storage](https://cloud.google.com/stora
783
839
  you can use the `from_uri` class method to create a `Part` object.
784
840
 
785
841
  ```python
842
+ from google.genai import types
843
+
786
844
  for chunk in client.models.generate_content_stream(
787
845
  model='gemini-2.0-flash-001',
788
846
  contents=[
@@ -800,6 +858,8 @@ If your image is stored in your local file system, you can read it in as bytes
800
858
  data and use the `from_bytes` class method to create a `Part` object.
801
859
 
802
860
  ```python
861
+ from google.genai import types
862
+
803
863
  YOUR_IMAGE_PATH = 'your_image_path'
804
864
  YOUR_IMAGE_MIME_TYPE = 'your_image_mime_type'
805
865
  with open(YOUR_IMAGE_PATH, 'rb') as f:
@@ -815,10 +875,10 @@ for chunk in client.models.generate_content_stream(
815
875
  print(chunk.text, end='')
816
876
  ```
817
877
 
818
- ### Async
878
+ ### Generate Content (Asynchronous Non Streaming)
819
879
 
820
880
  `client.aio` exposes all the analogous [`async` methods](https://docs.python.org/3/library/asyncio.html)
821
- that are available on `client`
881
+ that are available on `client`. Note that it applies to all the modules.
822
882
 
823
883
  For example, `client.aio.models.generate_content` is the `async` version
824
884
  of `client.models.generate_content`
@@ -831,7 +891,8 @@ response = await client.aio.models.generate_content(
831
891
  print(response.text)
832
892
  ```
833
893
 
834
- ### Streaming
894
+ ### Generate Content (Asynchronous Streaming)
895
+
835
896
 
836
897
  ```python
837
898
  async for chunk in await client.aio.models.generate_content_stream(
@@ -883,6 +944,8 @@ print(response)
883
944
  ```
884
945
 
885
946
  ```python
947
+ from google.genai import types
948
+
886
949
  # multiple contents with config
887
950
  response = client.models.embed_content(
888
951
  model='text-embedding-004',
@@ -900,6 +963,8 @@ print(response)
900
963
  Support for generate images in Gemini Developer API is behind an allowlist
901
964
 
902
965
  ```python
966
+ from google.genai import types
967
+
903
968
  # Generate Image
904
969
  response1 = client.models.generate_images(
905
970
  model='imagen-3.0-generate-002',
@@ -918,6 +983,8 @@ response1.generated_images[0].image.show()
918
983
  Upscale image is only supported in Vertex AI.
919
984
 
920
985
  ```python
986
+ from google.genai import types
987
+
921
988
  # Upscale the generated image from above
922
989
  response2 = client.models.upscale_image(
923
990
  model='imagen-3.0-generate-001',
@@ -939,6 +1006,7 @@ Edit image is only supported in Vertex AI.
939
1006
 
940
1007
  ```python
941
1008
  # Edit the generated image from above
1009
+ from google.genai import types
942
1010
  from google.genai.types import RawReferenceImage, MaskReferenceImage
943
1011
 
944
1012
  raw_ref_image = RawReferenceImage(
@@ -976,6 +1044,8 @@ response3.generated_images[0].image.show()
976
1044
  Support for generate videos in Vertex and Gemini Developer API is behind an allowlist
977
1045
 
978
1046
  ```python
1047
+ from google.genai import types
1048
+
979
1049
  # Create operation
980
1050
  operation = client.models.generate_videos(
981
1051
  model='veo-2.0-generate-001',
@@ -999,17 +1069,22 @@ video.show()
999
1069
 
1000
1070
  ## Chats
1001
1071
 
1002
- Create a chat session to start a multi-turn conversations with the model.
1072
+ Create a chat session to start a multi-turn conversations with the model. Then,
1073
+ use `chat.send_message` function multiple times within the same chat session so
1074
+ that it can reflect on its previous responses (i.e., engage in an ongoing
1075
+ conversation). See the 'Create a client' section above to initialize a client.
1003
1076
 
1004
- ### Send Message
1077
+ ### Send Message (Synchronous Non-Streaming)
1005
1078
 
1006
1079
  ```python
1007
1080
  chat = client.chats.create(model='gemini-2.0-flash-001')
1008
1081
  response = chat.send_message('tell me a story')
1009
1082
  print(response.text)
1083
+ response = chat.send_message('summarize the story you told me in 1 sentence')
1084
+ print(response.text)
1010
1085
  ```
1011
1086
 
1012
- ### Streaming
1087
+ ### Send Message (Synchronous Streaming)
1013
1088
 
1014
1089
  ```python
1015
1090
  chat = client.chats.create(model='gemini-2.0-flash-001')
@@ -1017,7 +1092,7 @@ for chunk in chat.send_message_stream('tell me a story'):
1017
1092
  print(chunk.text)
1018
1093
  ```
1019
1094
 
1020
- ### Async
1095
+ ### Send Message (Asynchronous Non-Streaming)
1021
1096
 
1022
1097
  ```python
1023
1098
  chat = client.aio.chats.create(model='gemini-2.0-flash-001')
@@ -1025,7 +1100,7 @@ response = await chat.send_message('tell me a story')
1025
1100
  print(response.text)
1026
1101
  ```
1027
1102
 
1028
- ### Async Streaming
1103
+ ### Send Message (Asynchronous Streaming)
1029
1104
 
1030
1105
  ```python
1031
1106
  chat = client.aio.chats.create(model='gemini-2.0-flash-001')
@@ -1035,7 +1110,8 @@ async for chunk in await chat.send_message_stream('tell me a story'):
1035
1110
 
1036
1111
  ## Files
1037
1112
 
1038
- Files are only supported in Gemini Developer API.
1113
+ Files are only supported in Gemini Developer API. See the 'Create a client'
1114
+ section above to initialize a client.
1039
1115
 
1040
1116
  ```cmd
1041
1117
  !gsutil cp gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf .
@@ -1069,11 +1145,14 @@ client.files.delete(name=file3.name)
1069
1145
 
1070
1146
  ## Caches
1071
1147
 
1072
- `client.caches` contains the control plane APIs for cached content
1148
+ `client.caches` contains the control plane APIs for cached content. See the
1149
+ 'Create a client' section above to initialize a client.
1073
1150
 
1074
1151
  ### Create
1075
1152
 
1076
1153
  ```python
1154
+ from google.genai import types
1155
+
1077
1156
  if client.vertexai:
1078
1157
  file_uris = [
1079
1158
  'gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf',
@@ -1115,6 +1194,8 @@ cached_content = client.caches.get(name=cached_content.name)
1115
1194
  ### Generate Content with Caches
1116
1195
 
1117
1196
  ```python
1197
+ from google.genai import types
1198
+
1118
1199
  response = client.models.generate_content(
1119
1200
  model='gemini-2.0-flash-001',
1120
1201
  contents='Summarize the pdfs',
@@ -1128,7 +1209,8 @@ print(response.text)
1128
1209
  ## Tunings
1129
1210
 
1130
1211
  `client.tunings` contains tuning job APIs and supports supervised fine
1131
- tuning through `tune`.
1212
+ tuning through `tune`. See the 'Create a client' section above to initialize a
1213
+ client.
1132
1214
 
1133
1215
  ### Tune
1134
1216
 
@@ -1136,6 +1218,8 @@ tuning through `tune`.
1136
1218
  - Gemini Developer API supports tuning from inline examples
1137
1219
 
1138
1220
  ```python
1221
+ from google.genai import types
1222
+
1139
1223
  if client.vertexai:
1140
1224
  model = 'gemini-2.0-flash-001'
1141
1225
  training_dataset = types.TuningDataset(
@@ -1155,6 +1239,8 @@ else:
1155
1239
  ```
1156
1240
 
1157
1241
  ```python
1242
+ from google.genai import types
1243
+
1158
1244
  tuning_job = client.tunings.tune(
1159
1245
  base_model=model,
1160
1246
  training_dataset=training_dataset,
@@ -1241,6 +1327,8 @@ print(async_pager[0])
1241
1327
  ### Update Tuned Model
1242
1328
 
1243
1329
  ```python
1330
+ from google.genai import types
1331
+
1244
1332
  model = pager[0]
1245
1333
 
1246
1334
  model = client.models.update(
@@ -1286,7 +1374,8 @@ print(async_pager[0])
1286
1374
 
1287
1375
  ## Batch Prediction
1288
1376
 
1289
- Only supported in Vertex AI.
1377
+ Only supported in Vertex AI. See the 'Create a client' section above to
1378
+ initialize a client.
1290
1379
 
1291
1380
  ### Create
1292
1381