google-genai 1.14.0__tar.gz → 1.15.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. {google_genai-1.14.0/google_genai.egg-info → google_genai-1.15.0}/PKG-INFO +104 -19
  2. {google_genai-1.14.0 → google_genai-1.15.0}/README.md +103 -18
  3. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/_api_client.py +21 -17
  4. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/_automatic_function_calling_util.py +1 -1
  5. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/_live_converters.py +106 -12
  6. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/caches.py +54 -6
  7. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/models.py +165 -12
  8. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/tunings.py +53 -0
  9. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/types.py +282 -118
  10. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/version.py +1 -1
  11. {google_genai-1.14.0 → google_genai-1.15.0/google_genai.egg-info}/PKG-INFO +104 -19
  12. {google_genai-1.14.0 → google_genai-1.15.0}/pyproject.toml +1 -1
  13. {google_genai-1.14.0 → google_genai-1.15.0}/LICENSE +0 -0
  14. {google_genai-1.14.0 → google_genai-1.15.0}/MANIFEST.in +0 -0
  15. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/__init__.py +0 -0
  16. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/_api_module.py +0 -0
  17. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/_base_url.py +0 -0
  18. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/_common.py +0 -0
  19. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/_extra_utils.py +0 -0
  20. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/_replay_api_client.py +0 -0
  21. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/_test_api_client.py +0 -0
  22. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/_transformers.py +0 -0
  23. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/batches.py +0 -0
  24. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/chats.py +0 -0
  25. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/client.py +0 -0
  26. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/errors.py +0 -0
  27. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/files.py +0 -0
  28. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/live.py +0 -0
  29. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/operations.py +0 -0
  30. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/pagers.py +0 -0
  31. {google_genai-1.14.0 → google_genai-1.15.0}/google/genai/py.typed +0 -0
  32. {google_genai-1.14.0 → google_genai-1.15.0}/google_genai.egg-info/SOURCES.txt +0 -0
  33. {google_genai-1.14.0 → google_genai-1.15.0}/google_genai.egg-info/dependency_links.txt +0 -0
  34. {google_genai-1.14.0 → google_genai-1.15.0}/google_genai.egg-info/requires.txt +0 -0
  35. {google_genai-1.14.0 → google_genai-1.15.0}/google_genai.egg-info/top_level.txt +0 -0
  36. {google_genai-1.14.0 → google_genai-1.15.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.14.0
3
+ Version: 1.15.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -61,11 +61,15 @@ Please run one of the following code blocks to create a client for
61
61
  different services ([Gemini Developer API](https://ai.google.dev/gemini-api/docs) or [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview)).
62
62
 
63
63
  ```python
64
+ from google import genai
65
+
64
66
  # Only run this block for Gemini Developer API
65
67
  client = genai.Client(api_key='GEMINI_API_KEY')
66
68
  ```
67
69
 
68
70
  ```python
71
+ from google import genai
72
+
69
73
  # Only run this block for Vertex AI API
70
74
  client = genai.Client(
71
75
  vertexai=True, project='your-project-id', location='us-central1'
@@ -94,6 +98,8 @@ export GOOGLE_CLOUD_LOCATION='us-central1'
94
98
  ```
95
99
 
96
100
  ```python
101
+ from google import genai
102
+
97
103
  client = genai.Client()
98
104
  ```
99
105
 
@@ -107,6 +113,9 @@ To set the API version use `http_options`. For example, to set the API version
107
113
  to `v1` for Vertex AI:
108
114
 
109
115
  ```python
116
+ from google import genai
117
+ from google.genai import types
118
+
110
119
  client = genai.Client(
111
120
  vertexai=True,
112
121
  project='your-project-id',
@@ -118,6 +127,9 @@ client = genai.Client(
118
127
  To set the API version to `v1alpha` for the Gemini Developer API:
119
128
 
120
129
  ```python
130
+ from google import genai
131
+ from google.genai import types
132
+
121
133
  client = genai.Client(
122
134
  api_key='GEMINI_API_KEY',
123
135
  http_options=types.HttpOptions(api_version='v1alpha')
@@ -133,6 +145,7 @@ Pydantic model types are available in the `types` module.
133
145
  ## Models
134
146
 
135
147
  The `client.models` modules exposes model inferencing and model getters.
148
+ See the 'Create a client' section above to initialize a client.
136
149
 
137
150
  ### Generate Content
138
151
 
@@ -174,6 +187,8 @@ This is the canonical way to provide contents, SDK will not do any conversion.
174
187
  ##### Provide a `types.Content` instance
175
188
 
176
189
  ```python
190
+ from google.genai import types
191
+
177
192
  contents = types.Content(
178
193
  role='user',
179
194
  parts=[types.Part.from_text(text='Why is the sky blue?')]
@@ -238,6 +253,8 @@ Where a `types.UserContent` is a subclass of `types.Content`, the
238
253
  ##### Provide a function call part
239
254
 
240
255
  ```python
256
+ from google.genai import types
257
+
241
258
  contents = types.Part.from_function_call(
242
259
  name='get_weather_by_location',
243
260
  args={'location': 'Boston'}
@@ -265,6 +282,8 @@ Where a `types.ModelContent` is a subclass of `types.Content`, the
265
282
  ##### Provide a list of function call parts
266
283
 
267
284
  ```python
285
+ from google.genai import types
286
+
268
287
  contents = [
269
288
  types.Part.from_function_call(
270
289
  name='get_weather_by_location',
@@ -302,6 +321,8 @@ Where a `types.ModelContent` is a subclass of `types.Content`, the
302
321
  ##### Provide a non function call part
303
322
 
304
323
  ```python
324
+ from google.genai import types
325
+
305
326
  contents = types.Part.from_uri(
306
327
  file_uri: 'gs://generativeai-downloads/images/scones.jpg',
307
328
  mime_type: 'image/jpeg',
@@ -324,6 +345,8 @@ The SDK converts all non function call parts into a content with a `user` role.
324
345
  ##### Provide a list of non function call parts
325
346
 
326
347
  ```python
348
+ from google.genai import types
349
+
327
350
  contents = [
328
351
  types.Part.from_text('What is this image about?'),
329
352
  types.Part.from_uri(
@@ -363,11 +386,17 @@ If you put a list within a list, the inner list can only contain
363
386
  ### System Instructions and Other Configs
364
387
 
365
388
  The output of the model can be influenced by several optional settings
366
- available in generate_content's config parameter. For example, the
367
- variability and length of the output can be influenced by the temperature
368
- and max_output_tokens respectively.
389
+ available in generate_content's config parameter. For example, increasing
390
+ `max_output_tokens` is essential for longer model responses. To make a model more
391
+ deterministic, lowering the `temperature` parameter reduces randomness, with
392
+ values near 0 minimizing variability. Capabilities and parameter defaults for
393
+ each model is shown in the
394
+ [Vertex AI docs](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/2-5-flash)
395
+ and [Gemini API docs](https://ai.google.dev/gemini-api/docs/models) respectively.
369
396
 
370
397
  ```python
398
+ from google.genai import types
399
+
371
400
  response = client.models.generate_content(
372
401
  model='gemini-2.0-flash-001',
373
402
  contents='high',
@@ -386,6 +415,8 @@ All API methods support Pydantic types for parameters as well as
386
415
  dictionaries. You can get the type from `google.genai.types`.
387
416
 
388
417
  ```python
418
+ from google.genai import types
419
+
389
420
  response = client.models.generate_content(
390
421
  model='gemini-2.0-flash-001',
391
422
  contents=types.Part.from_text(text='Why is the sky blue?'),
@@ -422,7 +453,7 @@ pager.next_page()
422
453
  print(pager[0])
423
454
  ```
424
455
 
425
- #### Async
456
+ #### List Base Models (Asynchronous)
426
457
 
427
458
  ```python
428
459
  async for job in await client.aio.models.list():
@@ -440,6 +471,8 @@ print(async_pager[0])
440
471
  ### Safety Settings
441
472
 
442
473
  ```python
474
+ from google.genai import types
475
+
443
476
  response = client.models.generate_content(
444
477
  model='gemini-2.0-flash-001',
445
478
  contents='Say something bad.',
@@ -463,6 +496,8 @@ You can pass a Python function directly and it will be automatically
463
496
  called and responded by default.
464
497
 
465
498
  ```python
499
+ from google.genai import types
500
+
466
501
  def get_current_weather(location: str) -> str:
467
502
  """Returns the current weather.
468
503
 
@@ -486,6 +521,8 @@ automatic function calling, you can disable automatic function calling
486
521
  as follows:
487
522
 
488
523
  ```python
524
+ from google.genai import types
525
+
489
526
  response = client.models.generate_content(
490
527
  model='gemini-2.0-flash-001',
491
528
  contents='What is the weather like in Boston?',
@@ -514,6 +551,8 @@ The following example shows how to declare a function and pass it as a tool.
514
551
  Then you will receive a function call part in the response.
515
552
 
516
553
  ```python
554
+ from google.genai import types
555
+
517
556
  function = types.FunctionDeclaration(
518
557
  name='get_current_weather',
519
558
  description='Get the current weather in a given location',
@@ -546,6 +585,8 @@ the model.
546
585
  The following example shows how to do it for a simple function invocation.
547
586
 
548
587
  ```python
588
+ from google.genai import types
589
+
549
590
  user_prompt_content = types.Content(
550
591
  role='user',
551
592
  parts=[types.Part.from_text(text='What is the weather like in Boston?')],
@@ -598,6 +639,8 @@ maximum remote call for automatic function calling (default to 10 times).
598
639
  If you'd like to disable automatic function calling in `ANY` mode:
599
640
 
600
641
  ```python
642
+ from google.genai import types
643
+
601
644
  def get_current_weather(location: str) -> str:
602
645
  """Returns the current weather.
603
646
 
@@ -626,6 +669,8 @@ configure the maximum remote calls to be `x + 1`.
626
669
  Assuming you prefer `1` turn for automatic function calling.
627
670
 
628
671
  ```python
672
+ from google.genai import types
673
+
629
674
  def get_current_weather(location: str) -> str:
630
675
  """Returns the current weather.
631
676
 
@@ -660,6 +705,7 @@ Schemas can be provided as Pydantic Models.
660
705
 
661
706
  ```python
662
707
  from pydantic import BaseModel
708
+ from google.genai import types
663
709
 
664
710
 
665
711
  class CountryInfo(BaseModel):
@@ -684,6 +730,8 @@ print(response.text)
684
730
  ```
685
731
 
686
732
  ```python
733
+ from google.genai import types
734
+
687
735
  response = client.models.generate_content(
688
736
  model='gemini-2.0-flash-001',
689
737
  contents='Give me information for the United States.',
@@ -743,7 +791,8 @@ print(response.text)
743
791
 
744
792
  #### JSON Response
745
793
 
746
- You can also set response_mime_type to 'application/json', the response will be identical but in quotes.
794
+ You can also set response_mime_type to 'application/json', the response will be
795
+ identical but in quotes.
747
796
 
748
797
  ```python
749
798
  from enum import Enum
@@ -766,7 +815,10 @@ response = client.models.generate_content(
766
815
  print(response.text)
767
816
  ```
768
817
 
769
- ### Streaming
818
+ ### Generate Content (Synchronous Streaming)
819
+
820
+ Generate content in a streaming format so that the model outputs streams back
821
+ to you, rather than being returned as one chunk.
770
822
 
771
823
  #### Streaming for text content
772
824
 
@@ -783,6 +835,8 @@ If your image is stored in [Google Cloud Storage](https://cloud.google.com/stora
783
835
  you can use the `from_uri` class method to create a `Part` object.
784
836
 
785
837
  ```python
838
+ from google.genai import types
839
+
786
840
  for chunk in client.models.generate_content_stream(
787
841
  model='gemini-2.0-flash-001',
788
842
  contents=[
@@ -800,6 +854,8 @@ If your image is stored in your local file system, you can read it in as bytes
800
854
  data and use the `from_bytes` class method to create a `Part` object.
801
855
 
802
856
  ```python
857
+ from google.genai import types
858
+
803
859
  YOUR_IMAGE_PATH = 'your_image_path'
804
860
  YOUR_IMAGE_MIME_TYPE = 'your_image_mime_type'
805
861
  with open(YOUR_IMAGE_PATH, 'rb') as f:
@@ -815,10 +871,10 @@ for chunk in client.models.generate_content_stream(
815
871
  print(chunk.text, end='')
816
872
  ```
817
873
 
818
- ### Async
874
+ ### Generate Content (Asynchronous Non Streaming)
819
875
 
820
876
  `client.aio` exposes all the analogous [`async` methods](https://docs.python.org/3/library/asyncio.html)
821
- that are available on `client`
877
+ that are available on `client`. Note that it applies to all the modules.
822
878
 
823
879
  For example, `client.aio.models.generate_content` is the `async` version
824
880
  of `client.models.generate_content`
@@ -831,7 +887,8 @@ response = await client.aio.models.generate_content(
831
887
  print(response.text)
832
888
  ```
833
889
 
834
- ### Streaming
890
+ ### Generate Content (Asynchronous Streaming)
891
+
835
892
 
836
893
  ```python
837
894
  async for chunk in await client.aio.models.generate_content_stream(
@@ -883,6 +940,8 @@ print(response)
883
940
  ```
884
941
 
885
942
  ```python
943
+ from google.genai import types
944
+
886
945
  # multiple contents with config
887
946
  response = client.models.embed_content(
888
947
  model='text-embedding-004',
@@ -900,6 +959,8 @@ print(response)
900
959
  Support for generate images in Gemini Developer API is behind an allowlist
901
960
 
902
961
  ```python
962
+ from google.genai import types
963
+
903
964
  # Generate Image
904
965
  response1 = client.models.generate_images(
905
966
  model='imagen-3.0-generate-002',
@@ -918,6 +979,8 @@ response1.generated_images[0].image.show()
918
979
  Upscale image is only supported in Vertex AI.
919
980
 
920
981
  ```python
982
+ from google.genai import types
983
+
921
984
  # Upscale the generated image from above
922
985
  response2 = client.models.upscale_image(
923
986
  model='imagen-3.0-generate-001',
@@ -939,6 +1002,7 @@ Edit image is only supported in Vertex AI.
939
1002
 
940
1003
  ```python
941
1004
  # Edit the generated image from above
1005
+ from google.genai import types
942
1006
  from google.genai.types import RawReferenceImage, MaskReferenceImage
943
1007
 
944
1008
  raw_ref_image = RawReferenceImage(
@@ -976,6 +1040,8 @@ response3.generated_images[0].image.show()
976
1040
  Support for generate videos in Vertex and Gemini Developer API is behind an allowlist
977
1041
 
978
1042
  ```python
1043
+ from google.genai import types
1044
+
979
1045
  # Create operation
980
1046
  operation = client.models.generate_videos(
981
1047
  model='veo-2.0-generate-001',
@@ -999,17 +1065,22 @@ video.show()
999
1065
 
1000
1066
  ## Chats
1001
1067
 
1002
- Create a chat session to start a multi-turn conversations with the model.
1068
+ Create a chat session to start a multi-turn conversations with the model. Then,
1069
+ use `chat.send_message` function multiple times within the same chat session so
1070
+ that it can reflect on its previous responses (i.e., engage in an ongoing
1071
+ conversation). See the 'Create a client' section above to initialize a client.
1003
1072
 
1004
- ### Send Message
1073
+ ### Send Message (Synchronous Non-Streaming)
1005
1074
 
1006
1075
  ```python
1007
1076
  chat = client.chats.create(model='gemini-2.0-flash-001')
1008
1077
  response = chat.send_message('tell me a story')
1009
1078
  print(response.text)
1079
+ response = chat.send_message('summarize the story you told me in 1 sentence')
1080
+ print(response.text)
1010
1081
  ```
1011
1082
 
1012
- ### Streaming
1083
+ ### Send Message (Synchronous Streaming)
1013
1084
 
1014
1085
  ```python
1015
1086
  chat = client.chats.create(model='gemini-2.0-flash-001')
@@ -1017,7 +1088,7 @@ for chunk in chat.send_message_stream('tell me a story'):
1017
1088
  print(chunk.text)
1018
1089
  ```
1019
1090
 
1020
- ### Async
1091
+ ### Send Message (Asynchronous Non-Streaming)
1021
1092
 
1022
1093
  ```python
1023
1094
  chat = client.aio.chats.create(model='gemini-2.0-flash-001')
@@ -1025,7 +1096,7 @@ response = await chat.send_message('tell me a story')
1025
1096
  print(response.text)
1026
1097
  ```
1027
1098
 
1028
- ### Async Streaming
1099
+ ### Send Message (Asynchronous Streaming)
1029
1100
 
1030
1101
  ```python
1031
1102
  chat = client.aio.chats.create(model='gemini-2.0-flash-001')
@@ -1035,7 +1106,8 @@ async for chunk in await chat.send_message_stream('tell me a story'):
1035
1106
 
1036
1107
  ## Files
1037
1108
 
1038
- Files are only supported in Gemini Developer API.
1109
+ Files are only supported in Gemini Developer API. See the 'Create a client'
1110
+ section above to initialize a client.
1039
1111
 
1040
1112
  ```cmd
1041
1113
  !gsutil cp gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf .
@@ -1069,11 +1141,14 @@ client.files.delete(name=file3.name)
1069
1141
 
1070
1142
  ## Caches
1071
1143
 
1072
- `client.caches` contains the control plane APIs for cached content
1144
+ `client.caches` contains the control plane APIs for cached content. See the
1145
+ 'Create a client' section above to initialize a client.
1073
1146
 
1074
1147
  ### Create
1075
1148
 
1076
1149
  ```python
1150
+ from google.genai import types
1151
+
1077
1152
  if client.vertexai:
1078
1153
  file_uris = [
1079
1154
  'gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf',
@@ -1115,6 +1190,8 @@ cached_content = client.caches.get(name=cached_content.name)
1115
1190
  ### Generate Content with Caches
1116
1191
 
1117
1192
  ```python
1193
+ from google.genai import types
1194
+
1118
1195
  response = client.models.generate_content(
1119
1196
  model='gemini-2.0-flash-001',
1120
1197
  contents='Summarize the pdfs',
@@ -1128,7 +1205,8 @@ print(response.text)
1128
1205
  ## Tunings
1129
1206
 
1130
1207
  `client.tunings` contains tuning job APIs and supports supervised fine
1131
- tuning through `tune`.
1208
+ tuning through `tune`. See the 'Create a client' section above to initialize a
1209
+ client.
1132
1210
 
1133
1211
  ### Tune
1134
1212
 
@@ -1136,6 +1214,8 @@ tuning through `tune`.
1136
1214
  - Gemini Developer API supports tuning from inline examples
1137
1215
 
1138
1216
  ```python
1217
+ from google.genai import types
1218
+
1139
1219
  if client.vertexai:
1140
1220
  model = 'gemini-2.0-flash-001'
1141
1221
  training_dataset = types.TuningDataset(
@@ -1155,6 +1235,8 @@ else:
1155
1235
  ```
1156
1236
 
1157
1237
  ```python
1238
+ from google.genai import types
1239
+
1158
1240
  tuning_job = client.tunings.tune(
1159
1241
  base_model=model,
1160
1242
  training_dataset=training_dataset,
@@ -1241,6 +1323,8 @@ print(async_pager[0])
1241
1323
  ### Update Tuned Model
1242
1324
 
1243
1325
  ```python
1326
+ from google.genai import types
1327
+
1244
1328
  model = pager[0]
1245
1329
 
1246
1330
  model = client.models.update(
@@ -1286,7 +1370,8 @@ print(async_pager[0])
1286
1370
 
1287
1371
  ## Batch Prediction
1288
1372
 
1289
- Only supported in Vertex AI.
1373
+ Only supported in Vertex AI. See the 'Create a client' section above to
1374
+ initialize a client.
1290
1375
 
1291
1376
  ### Create
1292
1377