google-genai 1.56.0__py3-none-any.whl → 1.58.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (246) hide show
  1. google/genai/_api_client.py +49 -26
  2. google/genai/_interactions/__init__.py +3 -0
  3. google/genai/_interactions/_base_client.py +1 -1
  4. google/genai/_interactions/_client.py +57 -3
  5. google/genai/_interactions/_client_adapter.py +48 -0
  6. google/genai/_interactions/types/__init__.py +6 -0
  7. google/genai/_interactions/types/audio_content.py +2 -0
  8. google/genai/_interactions/types/audio_content_param.py +2 -0
  9. google/genai/_interactions/types/content.py +65 -0
  10. google/genai/_interactions/types/content_delta.py +10 -2
  11. google/genai/_interactions/types/content_param.py +63 -0
  12. google/genai/_interactions/types/content_start.py +5 -46
  13. google/genai/_interactions/types/content_stop.py +1 -2
  14. google/genai/_interactions/types/document_content.py +2 -0
  15. google/genai/_interactions/types/document_content_param.py +2 -0
  16. google/genai/_interactions/types/error_event.py +1 -2
  17. google/genai/_interactions/types/file_search_call_content.py +32 -0
  18. google/genai/_interactions/types/file_search_call_content_param.py +31 -0
  19. google/genai/_interactions/types/generation_config.py +4 -0
  20. google/genai/_interactions/types/generation_config_param.py +4 -0
  21. google/genai/_interactions/types/image_config.py +31 -0
  22. google/genai/_interactions/types/image_config_param.py +30 -0
  23. google/genai/_interactions/types/image_content.py +2 -0
  24. google/genai/_interactions/types/image_content_param.py +2 -0
  25. google/genai/_interactions/types/interaction.py +6 -52
  26. google/genai/_interactions/types/interaction_create_params.py +4 -22
  27. google/genai/_interactions/types/interaction_event.py +1 -2
  28. google/genai/_interactions/types/interaction_sse_event.py +5 -3
  29. google/genai/_interactions/types/interaction_status_update.py +1 -2
  30. google/genai/_interactions/types/model.py +1 -0
  31. google/genai/_interactions/types/model_param.py +1 -0
  32. google/genai/_interactions/types/turn.py +3 -44
  33. google/genai/_interactions/types/turn_param.py +4 -40
  34. google/genai/_interactions/types/usage.py +1 -1
  35. google/genai/_interactions/types/usage_param.py +1 -1
  36. google/genai/_interactions/types/video_content.py +2 -0
  37. google/genai/_interactions/types/video_content_param.py +2 -0
  38. google/genai/_live_converters.py +118 -34
  39. google/genai/_local_tokenizer_loader.py +1 -0
  40. google/genai/_tokens_converters.py +14 -14
  41. google/genai/_transformers.py +15 -21
  42. google/genai/batches.py +27 -22
  43. google/genai/caches.py +42 -42
  44. google/genai/chats.py +0 -2
  45. google/genai/client.py +61 -55
  46. google/genai/files.py +224 -0
  47. google/genai/live.py +1 -1
  48. google/genai/models.py +56 -44
  49. google/genai/tests/__init__.py +21 -0
  50. google/genai/tests/afc/__init__.py +21 -0
  51. google/genai/tests/afc/test_convert_if_exist_pydantic_model.py +309 -0
  52. google/genai/tests/afc/test_convert_number_values_for_function_call_args.py +63 -0
  53. google/genai/tests/afc/test_find_afc_incompatible_tool_indexes.py +240 -0
  54. google/genai/tests/afc/test_generate_content_stream_afc.py +530 -0
  55. google/genai/tests/afc/test_generate_content_stream_afc_thoughts.py +77 -0
  56. google/genai/tests/afc/test_get_function_map.py +176 -0
  57. google/genai/tests/afc/test_get_function_response_parts.py +277 -0
  58. google/genai/tests/afc/test_get_max_remote_calls_for_afc.py +130 -0
  59. google/genai/tests/afc/test_invoke_function_from_dict_args.py +241 -0
  60. google/genai/tests/afc/test_raise_error_for_afc_incompatible_config.py +159 -0
  61. google/genai/tests/afc/test_should_append_afc_history.py +53 -0
  62. google/genai/tests/afc/test_should_disable_afc.py +214 -0
  63. google/genai/tests/batches/__init__.py +17 -0
  64. google/genai/tests/batches/test_cancel.py +77 -0
  65. google/genai/tests/batches/test_create.py +78 -0
  66. google/genai/tests/batches/test_create_with_bigquery.py +113 -0
  67. google/genai/tests/batches/test_create_with_file.py +82 -0
  68. google/genai/tests/batches/test_create_with_gcs.py +125 -0
  69. google/genai/tests/batches/test_create_with_inlined_requests.py +255 -0
  70. google/genai/tests/batches/test_delete.py +86 -0
  71. google/genai/tests/batches/test_embedding.py +157 -0
  72. google/genai/tests/batches/test_get.py +78 -0
  73. google/genai/tests/batches/test_list.py +79 -0
  74. google/genai/tests/caches/__init__.py +17 -0
  75. google/genai/tests/caches/constants.py +29 -0
  76. google/genai/tests/caches/test_create.py +210 -0
  77. google/genai/tests/caches/test_create_custom_url.py +105 -0
  78. google/genai/tests/caches/test_delete.py +54 -0
  79. google/genai/tests/caches/test_delete_custom_url.py +52 -0
  80. google/genai/tests/caches/test_get.py +94 -0
  81. google/genai/tests/caches/test_get_custom_url.py +52 -0
  82. google/genai/tests/caches/test_list.py +68 -0
  83. google/genai/tests/caches/test_update.py +70 -0
  84. google/genai/tests/caches/test_update_custom_url.py +58 -0
  85. google/genai/tests/chats/__init__.py +1 -0
  86. google/genai/tests/chats/test_get_history.py +598 -0
  87. google/genai/tests/chats/test_send_message.py +844 -0
  88. google/genai/tests/chats/test_validate_response.py +90 -0
  89. google/genai/tests/client/__init__.py +17 -0
  90. google/genai/tests/client/test_async_stream.py +427 -0
  91. google/genai/tests/client/test_client_close.py +197 -0
  92. google/genai/tests/client/test_client_initialization.py +1687 -0
  93. google/genai/tests/client/test_client_requests.py +221 -0
  94. google/genai/tests/client/test_custom_client.py +104 -0
  95. google/genai/tests/client/test_http_options.py +178 -0
  96. google/genai/tests/client/test_replay_client_equality.py +168 -0
  97. google/genai/tests/client/test_retries.py +846 -0
  98. google/genai/tests/client/test_upload_errors.py +136 -0
  99. google/genai/tests/common/__init__.py +17 -0
  100. google/genai/tests/common/test_common.py +954 -0
  101. google/genai/tests/conftest.py +162 -0
  102. google/genai/tests/documents/__init__.py +17 -0
  103. google/genai/tests/documents/test_delete.py +51 -0
  104. google/genai/tests/documents/test_get.py +85 -0
  105. google/genai/tests/documents/test_list.py +72 -0
  106. google/genai/tests/errors/__init__.py +1 -0
  107. google/genai/tests/errors/test_api_error.py +417 -0
  108. google/genai/tests/file_search_stores/__init__.py +17 -0
  109. google/genai/tests/file_search_stores/test_create.py +66 -0
  110. google/genai/tests/file_search_stores/test_delete.py +64 -0
  111. google/genai/tests/file_search_stores/test_get.py +94 -0
  112. google/genai/tests/file_search_stores/test_import_file.py +112 -0
  113. google/genai/tests/file_search_stores/test_list.py +57 -0
  114. google/genai/tests/file_search_stores/test_upload_to_file_search_store.py +141 -0
  115. google/genai/tests/files/__init__.py +17 -0
  116. google/genai/tests/files/test_delete.py +46 -0
  117. google/genai/tests/files/test_download.py +85 -0
  118. google/genai/tests/files/test_get.py +46 -0
  119. google/genai/tests/files/test_list.py +72 -0
  120. google/genai/tests/files/test_register.py +272 -0
  121. google/genai/tests/files/test_register_table.py +70 -0
  122. google/genai/tests/files/test_upload.py +255 -0
  123. google/genai/tests/imports/test_no_optional_imports.py +28 -0
  124. google/genai/tests/interactions/test_auth.py +476 -0
  125. google/genai/tests/interactions/test_integration.py +84 -0
  126. google/genai/tests/interactions/test_paths.py +105 -0
  127. google/genai/tests/live/__init__.py +16 -0
  128. google/genai/tests/live/test_live.py +2143 -0
  129. google/genai/tests/live/test_live_music.py +362 -0
  130. google/genai/tests/live/test_live_response.py +163 -0
  131. google/genai/tests/live/test_send_client_content.py +147 -0
  132. google/genai/tests/live/test_send_realtime_input.py +268 -0
  133. google/genai/tests/live/test_send_tool_response.py +222 -0
  134. google/genai/tests/local_tokenizer/__init__.py +17 -0
  135. google/genai/tests/local_tokenizer/test_local_tokenizer.py +343 -0
  136. google/genai/tests/local_tokenizer/test_local_tokenizer_loader.py +235 -0
  137. google/genai/tests/mcp/__init__.py +17 -0
  138. google/genai/tests/mcp/test_has_mcp_tool_usage.py +89 -0
  139. google/genai/tests/mcp/test_mcp_to_gemini_tools.py +191 -0
  140. google/genai/tests/mcp/test_parse_config_for_mcp_sessions.py +201 -0
  141. google/genai/tests/mcp/test_parse_config_for_mcp_usage.py +130 -0
  142. google/genai/tests/mcp/test_set_mcp_usage_header.py +72 -0
  143. google/genai/tests/models/__init__.py +17 -0
  144. google/genai/tests/models/constants.py +8 -0
  145. google/genai/tests/models/test_compute_tokens.py +120 -0
  146. google/genai/tests/models/test_count_tokens.py +159 -0
  147. google/genai/tests/models/test_delete.py +107 -0
  148. google/genai/tests/models/test_edit_image.py +264 -0
  149. google/genai/tests/models/test_embed_content.py +94 -0
  150. google/genai/tests/models/test_function_call_streaming.py +442 -0
  151. google/genai/tests/models/test_generate_content.py +2501 -0
  152. google/genai/tests/models/test_generate_content_cached_content.py +132 -0
  153. google/genai/tests/models/test_generate_content_config_zero_value.py +103 -0
  154. google/genai/tests/models/test_generate_content_from_apikey.py +44 -0
  155. google/genai/tests/models/test_generate_content_http_options.py +40 -0
  156. google/genai/tests/models/test_generate_content_image_generation.py +143 -0
  157. google/genai/tests/models/test_generate_content_mcp.py +343 -0
  158. google/genai/tests/models/test_generate_content_media_resolution.py +97 -0
  159. google/genai/tests/models/test_generate_content_model.py +139 -0
  160. google/genai/tests/models/test_generate_content_part.py +821 -0
  161. google/genai/tests/models/test_generate_content_thought.py +76 -0
  162. google/genai/tests/models/test_generate_content_tools.py +1761 -0
  163. google/genai/tests/models/test_generate_images.py +191 -0
  164. google/genai/tests/models/test_generate_videos.py +759 -0
  165. google/genai/tests/models/test_get.py +104 -0
  166. google/genai/tests/models/test_list.py +233 -0
  167. google/genai/tests/models/test_recontext_image.py +189 -0
  168. google/genai/tests/models/test_segment_image.py +148 -0
  169. google/genai/tests/models/test_update.py +95 -0
  170. google/genai/tests/models/test_upscale_image.py +157 -0
  171. google/genai/tests/operations/__init__.py +17 -0
  172. google/genai/tests/operations/test_get.py +38 -0
  173. google/genai/tests/public_samples/__init__.py +17 -0
  174. google/genai/tests/public_samples/test_gemini_text_only.py +34 -0
  175. google/genai/tests/pytest_helper.py +246 -0
  176. google/genai/tests/shared/__init__.py +16 -0
  177. google/genai/tests/shared/batches/__init__.py +14 -0
  178. google/genai/tests/shared/batches/test_create_delete.py +57 -0
  179. google/genai/tests/shared/batches/test_create_get_cancel.py +56 -0
  180. google/genai/tests/shared/batches/test_list.py +40 -0
  181. google/genai/tests/shared/caches/__init__.py +14 -0
  182. google/genai/tests/shared/caches/test_create_get_delete.py +67 -0
  183. google/genai/tests/shared/caches/test_create_update_get.py +71 -0
  184. google/genai/tests/shared/caches/test_list.py +40 -0
  185. google/genai/tests/shared/chats/__init__.py +14 -0
  186. google/genai/tests/shared/chats/test_send_message.py +48 -0
  187. google/genai/tests/shared/chats/test_send_message_stream.py +50 -0
  188. google/genai/tests/shared/files/__init__.py +14 -0
  189. google/genai/tests/shared/files/test_list.py +41 -0
  190. google/genai/tests/shared/files/test_upload_get_delete.py +54 -0
  191. google/genai/tests/shared/models/__init__.py +14 -0
  192. google/genai/tests/shared/models/test_compute_tokens.py +41 -0
  193. google/genai/tests/shared/models/test_count_tokens.py +40 -0
  194. google/genai/tests/shared/models/test_edit_image.py +67 -0
  195. google/genai/tests/shared/models/test_embed.py +40 -0
  196. google/genai/tests/shared/models/test_generate_content.py +39 -0
  197. google/genai/tests/shared/models/test_generate_content_stream.py +54 -0
  198. google/genai/tests/shared/models/test_generate_images.py +40 -0
  199. google/genai/tests/shared/models/test_generate_videos.py +38 -0
  200. google/genai/tests/shared/models/test_list.py +37 -0
  201. google/genai/tests/shared/models/test_recontext_image.py +55 -0
  202. google/genai/tests/shared/models/test_segment_image.py +52 -0
  203. google/genai/tests/shared/models/test_upscale_image.py +52 -0
  204. google/genai/tests/shared/tunings/__init__.py +16 -0
  205. google/genai/tests/shared/tunings/test_create.py +46 -0
  206. google/genai/tests/shared/tunings/test_create_get_cancel.py +56 -0
  207. google/genai/tests/shared/tunings/test_list.py +39 -0
  208. google/genai/tests/tokens/__init__.py +16 -0
  209. google/genai/tests/tokens/test_create.py +154 -0
  210. google/genai/tests/transformers/__init__.py +17 -0
  211. google/genai/tests/transformers/test_blobs.py +84 -0
  212. google/genai/tests/transformers/test_bytes.py +15 -0
  213. google/genai/tests/transformers/test_duck_type.py +96 -0
  214. google/genai/tests/transformers/test_function_responses.py +72 -0
  215. google/genai/tests/transformers/test_schema.py +653 -0
  216. google/genai/tests/transformers/test_t_batch.py +286 -0
  217. google/genai/tests/transformers/test_t_content.py +160 -0
  218. google/genai/tests/transformers/test_t_contents.py +398 -0
  219. google/genai/tests/transformers/test_t_part.py +85 -0
  220. google/genai/tests/transformers/test_t_parts.py +87 -0
  221. google/genai/tests/transformers/test_t_tool.py +157 -0
  222. google/genai/tests/transformers/test_t_tools.py +195 -0
  223. google/genai/tests/tunings/__init__.py +16 -0
  224. google/genai/tests/tunings/test_cancel.py +39 -0
  225. google/genai/tests/tunings/test_end_to_end.py +106 -0
  226. google/genai/tests/tunings/test_get.py +67 -0
  227. google/genai/tests/tunings/test_list.py +75 -0
  228. google/genai/tests/tunings/test_tune.py +268 -0
  229. google/genai/tests/types/__init__.py +16 -0
  230. google/genai/tests/types/test_bytes_internal.py +271 -0
  231. google/genai/tests/types/test_bytes_type.py +152 -0
  232. google/genai/tests/types/test_future.py +101 -0
  233. google/genai/tests/types/test_optional_types.py +36 -0
  234. google/genai/tests/types/test_part_type.py +616 -0
  235. google/genai/tests/types/test_schema_from_json_schema.py +417 -0
  236. google/genai/tests/types/test_schema_json_schema.py +468 -0
  237. google/genai/tests/types/test_types.py +2903 -0
  238. google/genai/types.py +631 -488
  239. google/genai/version.py +1 -1
  240. {google_genai-1.56.0.dist-info → google_genai-1.58.0.dist-info}/METADATA +6 -11
  241. google_genai-1.58.0.dist-info/RECORD +358 -0
  242. google_genai-1.56.0.dist-info/RECORD +0 -162
  243. /google/genai/{_interactions/py.typed → tests/interactions/__init__.py} +0 -0
  244. {google_genai-1.56.0.dist-info → google_genai-1.58.0.dist-info}/WHEEL +0 -0
  245. {google_genai-1.56.0.dist-info → google_genai-1.58.0.dist-info}/licenses/LICENSE +0 -0
  246. {google_genai-1.56.0.dist-info → google_genai-1.58.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,2501 @@
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #
15
+
16
+ import os
17
+ import pathlib
18
+
19
+ from pydantic import BaseModel, ValidationError, Field, ConfigDict
20
+ from typing import Literal, List, Optional, Union, Set
21
+ from datetime import datetime
22
+ import pytest
23
+ import json
24
+ import logging
25
+ import sys
26
+ from ... import _transformers as t
27
+ from ... import errors
28
+ from ... import types
29
+ from .. import pytest_helper
30
+ from enum import Enum
31
+
32
+ GEMINI_FLASH_LATEST = 'gemini-2.5-flash'
33
+ GEMINI_FLASH_2_0 = 'gemini-2.0-flash-001'
34
+ GEMINI_FLASH_IMAGE_LATEST = 'gemini-2.5-flash-image'
35
+
36
+ IMAGE_PNG_FILE_PATH = pathlib.Path(__file__).parent / '../data/google.png'
37
+ image_bytes = IMAGE_PNG_FILE_PATH.read_bytes()
38
+
39
+ AUDIO_WAV_FILE_PATH = pathlib.Path(__file__).parent / '../data/voice_sample.wav'
40
+ audio_bytes = AUDIO_WAV_FILE_PATH.read_bytes()
41
+
42
+
43
+ safety_settings_with_method = [
44
+ {
45
+ 'category': 'HARM_CATEGORY_HATE_SPEECH',
46
+ 'threshold': 'BLOCK_ONLY_HIGH',
47
+ 'method': 'SEVERITY',
48
+ },
49
+ {
50
+ 'category': 'HARM_CATEGORY_DANGEROUS_CONTENT',
51
+ 'threshold': 'BLOCK_LOW_AND_ABOVE',
52
+ 'method': 'PROBABILITY',
53
+ },
54
+ ]
55
+
56
+ test_http_options = {'api_version': 'v1', 'headers': {'test': 'headers'}}
57
+
58
+
59
+ class InstrumentEnum(Enum):
60
+ PERCUSSION = 'Percussion'
61
+ STRING = 'String'
62
+ WOODWIND = 'Woodwind'
63
+ BRASS = 'Brass'
64
+ KEYBOARD = 'Keyboard'
65
+
66
+
67
+ test_table: list[pytest_helper.TestTableItem] = [
68
+ pytest_helper.TestTableItem(
69
+ name='test_http_options_in_method',
70
+ parameters=types._GenerateContentParameters(
71
+ model=GEMINI_FLASH_LATEST,
72
+ contents=t.t_contents('What is your name?'),
73
+ config={
74
+ 'http_options': test_http_options,
75
+ },
76
+ ),
77
+ ),
78
+ pytest_helper.TestTableItem(
79
+ name='test_union_contents_is_string',
80
+ override_replay_id='test_sync',
81
+ parameters=types._GenerateContentParameters(
82
+ model=GEMINI_FLASH_LATEST, contents='Tell me a story in 300 words.'
83
+ ),
84
+ has_union=True,
85
+ ),
86
+ pytest_helper.TestTableItem(
87
+ name='test_union_contents_is_content',
88
+ override_replay_id='test_sync',
89
+ parameters=types._GenerateContentParameters(
90
+ model=GEMINI_FLASH_LATEST,
91
+ contents=types.Content(
92
+ role='user',
93
+ parts=[types.Part(text='Tell me a story in 300 words.')],
94
+ ),
95
+ ),
96
+ has_union=True,
97
+ ),
98
+ pytest_helper.TestTableItem(
99
+ name='test_union_contents_is_parts',
100
+ override_replay_id='test_sync',
101
+ parameters=types._GenerateContentParameters(
102
+ model=GEMINI_FLASH_LATEST,
103
+ contents=[types.Part(text='Tell me a story in 300 words.')],
104
+ ),
105
+ has_union=True,
106
+ ),
107
+ pytest_helper.TestTableItem(
108
+ name='test_union_contents_is_part',
109
+ override_replay_id='test_sync',
110
+ parameters=types._GenerateContentParameters(
111
+ model=GEMINI_FLASH_LATEST,
112
+ contents=types.Part(text='Tell me a story in 300 words.'),
113
+ ),
114
+ has_union=True,
115
+ ),
116
+ pytest_helper.TestTableItem(
117
+ name='test_sync_content_list',
118
+ override_replay_id='test_sync',
119
+ parameters=types._GenerateContentParameters(
120
+ model=GEMINI_FLASH_LATEST,
121
+ contents=[
122
+ types.Content(
123
+ role='user',
124
+ parts=[types.Part(text='Tell me a story in 300 words.')],
125
+ )
126
+ ],
127
+ ),
128
+ ),
129
+ # You need to enable llama API in Vertex AI Model Garden.
130
+ pytest_helper.TestTableItem(
131
+ name='test_llama',
132
+ parameters=types._GenerateContentParameters(
133
+ model='meta/llama-3.2-90b-vision-instruct-maas',
134
+ contents=t.t_contents('What is your name?'),
135
+ ),
136
+ exception_if_mldev='404',
137
+ skip_in_api_mode='it will encounter 403 for api mode',
138
+ ),
139
+ pytest_helper.TestTableItem(
140
+ name='test_system_instructions',
141
+ parameters=types._GenerateContentParameters(
142
+ model=GEMINI_FLASH_LATEST,
143
+ contents=t.t_contents('high'),
144
+ config={
145
+ 'system_instruction': t.t_content(
146
+ 'I say high, you say low'
147
+ )
148
+ },
149
+ ),
150
+ ),
151
+ pytest_helper.TestTableItem(
152
+ name='test_labels',
153
+ exception_if_mldev='not supported',
154
+ parameters=types._GenerateContentParameters(
155
+ model=GEMINI_FLASH_LATEST,
156
+ contents=t.t_contents('What is your name?'),
157
+ config={
158
+ 'labels': {'label1': 'value1', 'label2': 'value2'},
159
+ },
160
+ ),
161
+ ),
162
+ pytest_helper.TestTableItem(
163
+ name='test_simple_shared_generation_config',
164
+ parameters=types._GenerateContentParameters(
165
+ model=GEMINI_FLASH_LATEST,
166
+ contents=t.t_contents('What is your name?'),
167
+ config={
168
+ 'max_output_tokens': 100,
169
+ 'top_k': 2,
170
+ 'temperature': 0.5,
171
+ 'top_p': 0.5,
172
+ 'response_mime_type': 'application/json',
173
+ 'stop_sequences': ['\n'],
174
+ 'candidate_count': 2,
175
+ 'seed': 42,
176
+ },
177
+ ),
178
+ ),
179
+ pytest_helper.TestTableItem(
180
+ name='test_2_candidates_gemini_2_5_flash',
181
+ parameters=types._GenerateContentParameters(
182
+ model=GEMINI_FLASH_LATEST,
183
+ contents=t.t_contents('Tell me a story in 30 words.'),
184
+ config={
185
+ 'candidate_count': 2,
186
+ },
187
+ ),
188
+ ),
189
+ pytest_helper.TestTableItem(
190
+ name='test_safety_settings_on_difference',
191
+ parameters=types._GenerateContentParameters(
192
+ model=GEMINI_FLASH_LATEST,
193
+ contents=t.t_contents('What is your name?'),
194
+ config={
195
+ 'safety_settings': safety_settings_with_method,
196
+ },
197
+ ),
198
+ exception_if_mldev='method',
199
+ ),
200
+ pytest_helper.TestTableItem(
201
+ name='test_penalty',
202
+ parameters=types._GenerateContentParameters(
203
+ model=GEMINI_FLASH_2_0,
204
+ contents=t.t_contents('Tell me a story in 30 words.'),
205
+ config={
206
+ 'presence_penalty': 0.5,
207
+ 'frequency_penalty': 0.5,
208
+ },
209
+ ),
210
+ ),
211
+ pytest_helper.TestTableItem(
212
+ name='test_penalty_gemini_2_0_flash',
213
+ parameters=types._GenerateContentParameters(
214
+ model=GEMINI_FLASH_2_0,
215
+ contents=t.t_contents('Tell me a story in 30 words.'),
216
+ config={
217
+ 'presence_penalty': 0.5,
218
+ 'frequency_penalty': 0.5,
219
+ },
220
+ ),
221
+ ),
222
+ pytest_helper.TestTableItem(
223
+ name='test_google_search_tool',
224
+ parameters=types._GenerateContentParameters(
225
+ model=GEMINI_FLASH_LATEST,
226
+ contents=t.t_contents('Why is the sky blue?'),
227
+ config=types.GenerateContentConfig(
228
+ tools=[types.Tool(google_search=types.GoogleSearch())]
229
+ ),
230
+ ),
231
+ ),
232
+ pytest_helper.TestTableItem(
233
+ name='test_google_maps_tool',
234
+ parameters=types._GenerateContentParameters(
235
+ model=GEMINI_FLASH_LATEST,
236
+ contents=t.t_contents('Find restaurants near me.'),
237
+ config=types.GenerateContentConfig(
238
+ tools=[{'google_maps': {}}],
239
+ tool_config={
240
+ "retrieval_config": {
241
+ "lat_lng": {
242
+ "latitude": 37.421993,
243
+ "longitude": -122.079725,
244
+ }
245
+ }
246
+ }
247
+ ),
248
+ ),
249
+ ),
250
+ pytest_helper.TestTableItem(
251
+ name='test_google_search_tool_with_time_range_filter',
252
+ parameters=types._GenerateContentParameters(
253
+ model=GEMINI_FLASH_LATEST,
254
+ contents=t.t_contents('What is the QQQ stock price?'),
255
+ config=types.GenerateContentConfig(
256
+ tools=[
257
+ types.Tool(
258
+ google_search=types.GoogleSearch(
259
+ time_range_filter=types.Interval(
260
+ start_time=datetime.fromisoformat(
261
+ '2025-05-01T00:00:00Z'
262
+ ),
263
+ end_time=datetime.fromisoformat(
264
+ '2025-05-03T00:00:00Z'
265
+ ),
266
+ )
267
+ )
268
+ )
269
+ ]
270
+ ),
271
+ ),
272
+ ),
273
+ pytest_helper.TestTableItem(
274
+ name='test_google_search_tool_with_exclude_domains',
275
+ parameters=types._GenerateContentParameters(
276
+ model=GEMINI_FLASH_LATEST,
277
+ contents=t.t_contents('Why is the sky blue?'),
278
+ config=types.GenerateContentConfig(
279
+ tools=[
280
+ types.Tool(
281
+ google_search=types.GoogleSearch(
282
+ exclude_domains=['amazon.com', 'facebook.com']
283
+ )
284
+ )
285
+ ]
286
+ ),
287
+ ),
288
+ exception_if_mldev='not supported in',
289
+ ),
290
+ pytest_helper.TestTableItem(
291
+ name='test_google_search_tool_with_blocking_confidence',
292
+ parameters=types._GenerateContentParameters(
293
+ model=GEMINI_FLASH_LATEST,
294
+ contents=t.t_contents('Why is the sky blue?'),
295
+ config=types.GenerateContentConfig(
296
+ tools=[
297
+ types.Tool(
298
+ google_search=types.GoogleSearch(
299
+ blocking_confidence=types.PhishBlockThreshold.BLOCK_LOW_AND_ABOVE,
300
+ )
301
+ )
302
+ ]
303
+ ),
304
+ ),
305
+ exception_if_mldev='not supported in',
306
+ ),
307
+ pytest_helper.TestTableItem(
308
+ name='test_enterprise_web_search_tool',
309
+ parameters=types._GenerateContentParameters(
310
+ model=GEMINI_FLASH_LATEST,
311
+ contents=t.t_contents('Why is the sky blue?'),
312
+ config=types.GenerateContentConfig(
313
+ tools=[
314
+ types.Tool(
315
+ enterprise_web_search=types.EnterpriseWebSearch()
316
+ )
317
+ ]
318
+ ),
319
+ ),
320
+ exception_if_mldev='not supported in',
321
+ ),
322
+ pytest_helper.TestTableItem(
323
+ name='test_enterprise_web_search_tool_with_exclude_domains',
324
+ parameters=types._GenerateContentParameters(
325
+ model=GEMINI_FLASH_LATEST,
326
+ contents=t.t_contents('Why is the sky blue?'),
327
+ config=types.GenerateContentConfig(
328
+ tools=[
329
+ types.Tool(
330
+ enterprise_web_search=types.EnterpriseWebSearch(
331
+ exclude_domains=['amazon.com', 'facebook.com']
332
+ )
333
+ )
334
+ ]
335
+ ),
336
+ ),
337
+ exception_if_mldev='not supported in',
338
+ ),
339
+ pytest_helper.TestTableItem(
340
+ name='test_enterprise_web_search_tool_with_blocking_confidence',
341
+ parameters=types._GenerateContentParameters(
342
+ model=GEMINI_FLASH_LATEST,
343
+ contents=t.t_contents('Why is the sky blue?'),
344
+ config=types.GenerateContentConfig(
345
+ tools=[
346
+ types.Tool(
347
+ enterprise_web_search=types.EnterpriseWebSearch(
348
+ blocking_confidence=types.PhishBlockThreshold.BLOCK_LOW_AND_ABOVE,
349
+ )
350
+ )
351
+ ]
352
+ ),
353
+ ),
354
+ exception_if_mldev='not supported in',
355
+ ),
356
+ pytest_helper.TestTableItem(
357
+ name='test_speech_with_config',
358
+ parameters=types._GenerateContentParameters(
359
+ model='gemini-2.5-flash-preview-tts',
360
+ contents=t.t_contents(
361
+ 'Produce a speech response saying "Cheese"'
362
+ ),
363
+ config=types.GenerateContentConfig(
364
+ response_modalities=['audio'],
365
+ speech_config=types.SpeechConfig(
366
+ voice_config=types.VoiceConfig(
367
+ prebuilt_voice_config=types.PrebuiltVoiceConfig(
368
+ voice_name='charon'
369
+ )
370
+ )
371
+ ),
372
+ ),
373
+ ),
374
+ ),
375
+ pytest_helper.TestTableItem(
376
+ name='test_speech_with_multi_speaker_voice_config',
377
+ parameters=types._GenerateContentParameters(
378
+ model='gemini-2.5-flash-preview-tts',
379
+ contents=t.t_contents(
380
+ 'Alice says "Hi", Bob replies with "what\'s up"?'
381
+ ),
382
+ config=types.GenerateContentConfig(
383
+ response_modalities=['audio'],
384
+ speech_config=types.SpeechConfig(
385
+ multi_speaker_voice_config=types.MultiSpeakerVoiceConfig(
386
+ speaker_voice_configs=[
387
+ types.SpeakerVoiceConfig(
388
+ speaker='Alice',
389
+ voice_config=types.VoiceConfig(
390
+ prebuilt_voice_config=types.PrebuiltVoiceConfig(
391
+ voice_name='leda'
392
+ )
393
+ ),
394
+ ),
395
+ types.SpeakerVoiceConfig(
396
+ speaker='Bob',
397
+ voice_config=types.VoiceConfig(
398
+ prebuilt_voice_config=types.PrebuiltVoiceConfig(
399
+ voice_name='kore'
400
+ )
401
+ ),
402
+ ),
403
+ ],
404
+ )
405
+ ),
406
+ ),
407
+ ),
408
+ ),
409
+ pytest_helper.TestTableItem(
410
+ name='test_speech_error_with_speech_config_and_multi_speech_config',
411
+ exception_if_vertex='mutually exclusive',
412
+ exception_if_mldev='mutually exclusive',
413
+ parameters=types._GenerateContentParameters(
414
+ model='gemini-2.5-flash-preview-tts',
415
+ contents=t.t_contents(
416
+ 'Alice says "Hi", Bob replies with "what\'s up"?'
417
+ ),
418
+ config=types.GenerateContentConfig(
419
+ response_modalities=['audio'],
420
+ speech_config=types.SpeechConfig(
421
+ voice_config=types.VoiceConfig(
422
+ prebuilt_voice_config=types.PrebuiltVoiceConfig(
423
+ voice_name='puck'
424
+ )
425
+ ),
426
+ multi_speaker_voice_config=types.MultiSpeakerVoiceConfig(
427
+ speaker_voice_configs=[
428
+ types.SpeakerVoiceConfig(
429
+ speaker='Alice',
430
+ voice_config=types.VoiceConfig(
431
+ prebuilt_voice_config=types.PrebuiltVoiceConfig(
432
+ voice_name='leda'
433
+ )
434
+ ),
435
+ ),
436
+ types.SpeakerVoiceConfig(
437
+ speaker='Bob',
438
+ voice_config=types.VoiceConfig(
439
+ prebuilt_voice_config=types.PrebuiltVoiceConfig(
440
+ voice_name='kore'
441
+ )
442
+ ),
443
+ ),
444
+ ],
445
+ ),
446
+ ),
447
+ ),
448
+ ),
449
+ ),
450
+ pytest_helper.TestTableItem(
451
+ name='test_union_speech_string_config',
452
+ parameters=types._GenerateContentParameters(
453
+ model='gemini-2.5-flash-preview-tts',
454
+ contents='Say hello!',
455
+ config=types.GenerateContentConfig(
456
+ response_modalities=['audio'], speech_config='charon'
457
+ ),
458
+ ),
459
+ has_union=True,
460
+ ),
461
+ pytest_helper.TestTableItem(
462
+ name='test_audio_timestamp',
463
+ parameters=types._GenerateContentParameters(
464
+ model=GEMINI_FLASH_LATEST,
465
+ contents=[
466
+ types.Content(
467
+ role='user',
468
+ parts=[
469
+ types.Part(
470
+ file_data=types.FileData(
471
+ file_uri='gs://cloud-samples-data/generative-ai/audio/pixel.mp3',
472
+ mime_type='audio/mpeg',
473
+ )
474
+ ),
475
+ types.Part(
476
+ text="""Can you transcribe this interview, in the
477
+ format of timecode, speaker, caption. Use speaker A,
478
+ speaker B, etc. to identify speakers."""
479
+ ),
480
+ ],
481
+ )
482
+ ],
483
+ config=types.GenerateContentConfig(audio_timestamp=True),
484
+ ),
485
+ exception_if_mldev='not supported',
486
+ ),
487
+ pytest_helper.TestTableItem(
488
+ name='test_response_schema_with_default',
489
+ parameters=types._GenerateContentParameters(
490
+ model=GEMINI_FLASH_LATEST,
491
+ contents=t.t_contents('What is your name?'),
492
+ config={
493
+ 'response_mime_type': 'application/json',
494
+ 'response_schema': {
495
+ 'type': 'OBJECT',
496
+ 'properties': {
497
+ 'name': {
498
+ 'type': 'STRING',
499
+ 'default': 'default_name',
500
+ },
501
+ },
502
+ },
503
+ },
504
+ ),
505
+ ignore_keys=['parsed'],
506
+ ),
507
+ pytest_helper.TestTableItem(
508
+ name='test_invalid_model_parameter_path',
509
+ parameters=types._GenerateContentParameters(
510
+ model='gemini-2.5-flash/../../upload/v1beta/files',
511
+ contents=t.t_contents('What is your name?'),
512
+ ),
513
+ exception_if_vertex='invalid model parameter',
514
+ exception_if_mldev='invalid model parameter',
515
+ ),
516
+ pytest_helper.TestTableItem(
517
+ name='test_invalid_model_parameter_question_mark',
518
+ parameters=types._GenerateContentParameters(
519
+ model='gemini-2.5-flash?',
520
+ contents=t.t_contents('What is your name?'),
521
+ ),
522
+ exception_if_vertex='invalid model parameter',
523
+ exception_if_mldev='invalid model parameter',
524
+ ),
525
+ pytest_helper.TestTableItem(
526
+ name='test_invalid_model_parameter_ampersand',
527
+ parameters=types._GenerateContentParameters(
528
+ model='gemini-2.5-flash&',
529
+ contents=t.t_contents('What is your name?'),
530
+ ),
531
+ exception_if_vertex='invalid model parameter',
532
+ exception_if_mldev='invalid model parameter',
533
+ ),
534
+ pytest_helper.TestTableItem(
535
+ name='test_enhanced_civic_answers',
536
+ parameters=types._GenerateContentParameters(
537
+ model=GEMINI_FLASH_LATEST,
538
+ contents=t.t_contents(
539
+ 'Summarize the evidence that confirms widespread voter fraud was'
540
+ ' the reason the last national election results were'
541
+ ' inaccurate.'
542
+ ),
543
+ config={
544
+ 'enable_enhanced_civic_answers': True,
545
+ },
546
+ ),
547
+ exception_if_vertex='not supported',
548
+ ),
549
+ ]
550
+
551
+ pytestmark = pytest_helper.setup(
552
+ file=__file__,
553
+ globals_for_file=globals(),
554
+ test_method='models.generate_content',
555
+ test_table=test_table,
556
+ )
557
+ pytest_plugins = ('pytest_asyncio',)
558
+
559
+
560
+ def test_sync_with_headers(client):
561
+ response = client.models.generate_content(
562
+ model=GEMINI_FLASH_LATEST,
563
+ contents='Tell me a story in 300 words.',
564
+ )
565
+ assert response.sdk_http_response.headers is not None
566
+ assert response.sdk_http_response.body is None
567
+
568
+
569
+ def test_sync_with_full_response(client):
570
+ response = client.models.generate_content(
571
+ model=GEMINI_FLASH_LATEST,
572
+ contents='Tell me a story in 300 words.',
573
+ config={
574
+ 'should_return_http_response': True,
575
+ },
576
+ )
577
+ print(response.sdk_http_response.body)
578
+ assert response.sdk_http_response.headers is not None
579
+ assert response.sdk_http_response.body is not None
580
+ assert 'candidates' in response.sdk_http_response.body
581
+ assert 'content' in response.sdk_http_response.body
582
+ assert 'parts' in response.sdk_http_response.body
583
+ assert 'usageMetadata' in response.sdk_http_response.body
584
+
585
+ @pytest.mark.asyncio
586
+ async def test_async(client):
587
+ response = await client.aio.models.generate_content(
588
+ model=GEMINI_FLASH_LATEST,
589
+ contents='Tell me a story in 300 words.',
590
+ config={
591
+ 'http_options': test_http_options,
592
+ },
593
+ )
594
+ assert response.text
595
+
596
+
597
+ @pytest.mark.asyncio
598
+ async def test_async_with_headers(client):
599
+ response = await client.aio.models.generate_content(
600
+ model=GEMINI_FLASH_LATEST,
601
+ contents='Tell me a story in 300 words.',
602
+ )
603
+ assert response.sdk_http_response.headers is not None
604
+ assert response.sdk_http_response.body is None
605
+
606
+
607
+ @pytest.mark.asyncio
608
+ async def test_async_with_full_response(client):
609
+ response = await client.aio.models.generate_content(
610
+ model=GEMINI_FLASH_LATEST,
611
+ contents='Tell me a story in 300 words.',
612
+ config={
613
+ 'should_return_http_response': True,
614
+ },
615
+ )
616
+ assert response.sdk_http_response.headers is not None
617
+ assert response.sdk_http_response.body is not None
618
+ assert 'candidates' in response.sdk_http_response.body
619
+ assert 'content' in response.sdk_http_response.body
620
+ assert 'parts' in response.sdk_http_response.body
621
+ assert 'usageMetadata' in response.sdk_http_response.body
622
+
623
+
624
+ def test_sync_stream(client):
625
+ response = client.models.generate_content_stream(
626
+ model=GEMINI_FLASH_LATEST,
627
+ contents='Tell me a story in 300 words.',
628
+ config={
629
+ 'http_options': test_http_options,
630
+ },
631
+ )
632
+ chunks = 0
633
+ for part in response:
634
+ chunks += 1
635
+ assert part.text is not None or part.candidates[0].finish_reason
636
+
637
+ assert chunks >= 1
638
+
639
+
640
+ def test_sync_stream_with_should_return_http_headers(client):
641
+ response = client.models.generate_content_stream(
642
+ model=GEMINI_FLASH_LATEST,
643
+ contents='Tell me a story in 300 words.',
644
+ config={
645
+ 'http_options': test_http_options,
646
+ },
647
+ )
648
+ chunks = 0
649
+ for part in response:
650
+ chunks += 1
651
+ assert part.text is not None or part.candidates[0].finish_reason
652
+ assert part.sdk_http_response.headers is not None
653
+ assert chunks >= 1
654
+
655
+
656
+ def test_sync_stream_with_non_text_modality(client):
657
+ response = client.models.generate_content_stream(
658
+ model='gemini-2.0-flash-preview-image-generation',
659
+ contents=(
660
+ 'Generate an image of the Eiffel tower with fireworks in the'
661
+ ' background.'
662
+ ),
663
+ config={
664
+ 'response_modalities': ['IMAGE', 'TEXT'],
665
+ },
666
+ )
667
+ chunks = 0
668
+ for chunk in response:
669
+ chunks += 1
670
+ if chunk.candidates[0].finish_reason is not None:
671
+ continue
672
+ for part in chunk.parts:
673
+ assert part.text is not None or part.inline_data is not None
674
+
675
+ assert chunks >= 1
676
+
677
+
678
+ @pytest.mark.asyncio
679
+ async def test_async_stream(client):
680
+ chunks = 0
681
+ async for part in await client.aio.models.generate_content_stream(
682
+ model=GEMINI_FLASH_LATEST, contents='Tell me a story in 300 words.',
683
+ config={
684
+ 'http_options': test_http_options,
685
+ },
686
+ ):
687
+ chunks += 1
688
+ assert part.text is not None or part.candidates[0].finish_reason
689
+
690
+ assert chunks >= 1
691
+
692
+
693
+ @pytest.mark.asyncio
694
+ async def test_async_stream_with_headers(client):
695
+ chunks = 0
696
+ async for part in await client.aio.models.generate_content_stream(
697
+ model=GEMINI_FLASH_LATEST, contents='Tell me a story in 300 words.',
698
+ config={
699
+ 'http_options': test_http_options,
700
+ },
701
+ ):
702
+ chunks += 1
703
+ assert part.text is not None or part.candidates[0].finish_reason
704
+ assert part.sdk_http_response.headers is not None
705
+
706
+ assert chunks >= 1
707
+
708
+
709
+ @pytest.mark.asyncio
710
+ async def test_async_stream_with_non_text_modality(client):
711
+ chunks = 0
712
+ async for chunk in await client.aio.models.generate_content_stream(
713
+ model=GEMINI_FLASH_IMAGE_LATEST,
714
+ contents=(
715
+ 'Generate an image of the Eiffel tower with fireworks in the'
716
+ ' background.'
717
+ ),
718
+ config={
719
+ 'response_modalities': ['IMAGE', 'TEXT'],
720
+ },
721
+ ):
722
+ chunks += 1
723
+ if chunk.candidates[0].finish_reason is not None:
724
+ continue
725
+ for part in chunk.parts:
726
+ assert part.text is not None or part.inline_data is not None
727
+
728
+ assert chunks >= 1
729
+
730
+
731
+ def test_simple_shared_generation_config_stream(client):
732
+ chunks = 0
733
+ for chunk in client.models.generate_content_stream(
734
+ model=GEMINI_FLASH_LATEST,
735
+ contents='tell me a story in 300 words',
736
+ config={
737
+ 'max_output_tokens': 1000,
738
+ 'top_k': 2,
739
+ 'temperature': 0.5,
740
+ 'top_p': 0.5,
741
+ 'response_mime_type': 'application/json',
742
+ 'stop_sequences': ['\n'],
743
+ 'seed': 42,
744
+ },
745
+ ):
746
+ chunks += 1
747
+ assert (
748
+ chunk.text is not None or chunk.candidates[0].finish_reason
749
+ ), f'vertexai: {client._api_client.vertexai}, {chunk.candidate[0]}'
750
+ assert chunks >= 1
751
+
752
+
753
+ @pytest.mark.asyncio
754
+ async def test_simple_shared_generation_config_async(client):
755
+ response = await client.aio.models.generate_content(
756
+ model=GEMINI_FLASH_LATEST,
757
+ contents='tell me a story in 300 words',
758
+ config={
759
+ 'max_output_tokens': 4000,
760
+ 'top_k': 2,
761
+ 'temperature': 0.5,
762
+ 'top_p': 0.5,
763
+ 'response_mime_type': 'application/json',
764
+ 'stop_sequences': ['\n'],
765
+ 'seed': 42,
766
+ },
767
+ )
768
+
769
+
770
+ @pytest.mark.asyncio
771
+ async def test_simple_shared_generation_config_stream_async(client):
772
+ chunks = 0
773
+ async for part in await client.aio.models.generate_content_stream(
774
+ model=GEMINI_FLASH_2_0,
775
+ contents='tell me a story in 300 words',
776
+ config={
777
+ 'max_output_tokens': 400,
778
+ 'top_k': 2,
779
+ 'temperature': 0.5,
780
+ 'top_p': 0.5,
781
+ 'response_mime_type': 'application/json',
782
+ 'stop_sequences': ['\n'],
783
+ 'seed': 42,
784
+ },
785
+ ):
786
+ chunks += 1
787
+ assert part.text is not None or part.candidates[0].finish_reason
788
+ assert chunks >= 1
789
+
790
+
791
+ def test_log_probs(client):
792
+ client.models.generate_content(
793
+ model=GEMINI_FLASH_2_0,
794
+ contents='What is your name?',
795
+ config={
796
+ 'logprobs': 2,
797
+ 'presence_penalty': 0.5,
798
+ 'frequency_penalty': 0.5,
799
+ 'response_logprobs': True,
800
+ },
801
+ )
802
+
803
+
804
+ def test_simple_config(client):
805
+ response = client.models.generate_content(
806
+ model=GEMINI_FLASH_LATEST,
807
+ contents='What is your name?',
808
+ config={
809
+ 'max_output_tokens': 300,
810
+ 'top_k': 2,
811
+ },
812
+ )
813
+ assert response.text
814
+
815
+
816
+ def test_model_selection_config_dict(client):
817
+ if not client.vertexai:
818
+ return
819
+ response = client.models.generate_content(
820
+ model=GEMINI_FLASH_LATEST,
821
+ contents='Give me a Taylor Swift lyric and explain its meaning.',
822
+ config={
823
+ 'model_selection_config': {
824
+ 'feature_selection_preference': 'PRIORITIZE_COST'
825
+ }
826
+ },
827
+ )
828
+ assert response.text
829
+
830
+
831
+ def test_model_selection_config_pydantic(client):
832
+ if not client.vertexai:
833
+ return
834
+ response = client.models.generate_content(
835
+ model=GEMINI_FLASH_LATEST,
836
+ contents='Give me a Taylor Swift lyric and explain its meaning.',
837
+ config=types.GenerateContentConfig(
838
+ model_selection_config=types.ModelSelectionConfig(
839
+ feature_selection_preference=types.FeatureSelectionPreference.PRIORITIZE_QUALITY
840
+ )
841
+ ),
842
+ )
843
+ assert response.text
844
+
845
+
846
+ def test_sdk_logger_logs_warnings_once(client, caplog):
847
+ from ... import types as types_module
848
+
849
+ types_module._response_text_warning_logged = False
850
+
851
+ caplog.set_level(logging.WARNING, logger='google_genai.types')
852
+
853
+ response = client.models.generate_content(
854
+ model=GEMINI_FLASH_LATEST,
855
+ contents='Tell me a 50 word story about cheese.',
856
+ config={
857
+ 'candidate_count': 2,
858
+ }
859
+ )
860
+ assert response.text
861
+ assert 'WARNING' in caplog.text
862
+ assert 'there are 2 candidates' in caplog.text
863
+ caplog_after_first_call = caplog.text
864
+ assert len(caplog.records) == 1
865
+ client.models.generate_content(
866
+ model=GEMINI_FLASH_LATEST,
867
+ contents='Tell me a 50 word story about cheese.',
868
+ config={
869
+ 'candidate_count': 2,
870
+ }
871
+ )
872
+ assert caplog.text == caplog_after_first_call
873
+ assert len(caplog.records) == 1
874
+
875
+
876
+ def test_response_create_time_and_response_id(client):
877
+ if client.vertexai:
878
+ response = client.models.generate_content(
879
+ model=GEMINI_FLASH_LATEST,
880
+ contents='What is your name?',
881
+ config={
882
+ 'max_output_tokens': 300,
883
+ 'top_k': 2,
884
+ },
885
+ )
886
+ # create_time and response_id are not supported in mldev
887
+ assert response.create_time
888
+ assert response.response_id
889
+ assert isinstance(response.create_time, datetime)
890
+
891
+
892
+ def test_safety_settings(client):
893
+ response = client.models.generate_content(
894
+ model=GEMINI_FLASH_LATEST,
895
+ contents='What is your name?',
896
+ config={
897
+ 'safety_settings': [{
898
+ 'category': 'HARM_CATEGORY_HATE_SPEECH',
899
+ 'threshold': 'BLOCK_ONLY_HIGH',
900
+ }]
901
+ },
902
+ )
903
+ assert response.text
904
+
905
+
906
+ def test_safety_settings_on_difference_stream(client):
907
+ safety_settings = [
908
+ {
909
+ 'category': 'HARM_CATEGORY_HATE_SPEECH',
910
+ 'threshold': 'BLOCK_ONLY_HIGH',
911
+ 'method': 'SEVERITY',
912
+ },
913
+ {
914
+ 'category': 'HARM_CATEGORY_DANGEROUS_CONTENT',
915
+ 'threshold': 'BLOCK_LOW_AND_ABOVE',
916
+ 'method': 'PROBABILITY',
917
+ },
918
+ ]
919
+ if client._api_client.vertexai:
920
+ for part in client.models.generate_content_stream(
921
+ model=GEMINI_FLASH_LATEST,
922
+ contents='What is your name?',
923
+ config={
924
+ 'safety_settings': safety_settings,
925
+ },
926
+ ):
927
+ pass
928
+ else:
929
+ with pytest.raises(ValueError) as e:
930
+ for part in client.models.generate_content_stream(
931
+ model=GEMINI_FLASH_LATEST,
932
+ contents='What is your name?',
933
+ config={
934
+ 'safety_settings': safety_settings,
935
+ },
936
+ ):
937
+ pass
938
+ assert 'method' in str(e)
939
+
940
+
941
+ def test_safety_settings_on_difference_stream_with_lower_enum(client):
942
+ safety_settings = [
943
+ {
944
+ 'category': 'harm_category_hate_speech',
945
+ 'threshold': 'block_only_high',
946
+ 'method': 'severity',
947
+ },
948
+ {
949
+ 'category': 'harm_category_dangerous_content',
950
+ 'threshold': 'block_low_and_above',
951
+ 'method': 'probability',
952
+ },
953
+ ]
954
+ if client._api_client.vertexai:
955
+ for part in client.models.generate_content_stream(
956
+ model=GEMINI_FLASH_LATEST,
957
+ contents='What is your name?',
958
+ config={
959
+ 'safety_settings': safety_settings,
960
+ },
961
+ ):
962
+ pass
963
+ else:
964
+ with pytest.raises(ValueError) as e:
965
+ for part in client.models.generate_content_stream(
966
+ model=GEMINI_FLASH_LATEST,
967
+ contents='What is your name?',
968
+ config={
969
+ 'safety_settings': safety_settings,
970
+ },
971
+ ):
972
+ pass
973
+ assert 'method' in str(e)
974
+
975
+
976
+ def test_pydantic_schema(client):
977
+ class CountryInfo(BaseModel):
978
+ # We need at least one test with `title` in properties in case the schema
979
+ # edits go wrong.
980
+ title: str
981
+ population: int
982
+ capital: str
983
+ continent: str
984
+ gdp: int
985
+ official_language: str
986
+ total_area_sq_mi: int
987
+
988
+ response = client.models.generate_content(
989
+ model=GEMINI_FLASH_LATEST,
990
+ contents='Give me information of the United States.',
991
+ config={
992
+ 'response_mime_type': 'application/json',
993
+ 'response_schema': CountryInfo,
994
+ },
995
+ )
996
+ assert isinstance(response.parsed, CountryInfo)
997
+
998
+ def test_json_schema_fields(client):
999
+ class UserRole(str, Enum):
1000
+ ADMIN = "admin"
1001
+ VIEWER = "viewer"
1002
+ class Address(BaseModel):
1003
+ street: str
1004
+ city: str
1005
+ class UserProfile(BaseModel):
1006
+ username: str = Field(description="User's unique name")
1007
+ age: Optional[int] = Field(ge=0, le=20)
1008
+ roles: Set[UserRole] = Field(min_items=1)
1009
+ contact: Union[Address, str]
1010
+
1011
+ model_config = ConfigDict(
1012
+ title="User Schema", description="A user profile"
1013
+ ) # This is the title of the schema
1014
+
1015
+ response = client.models.generate_content(
1016
+ model=GEMINI_FLASH_LATEST,
1017
+ contents='Give me information of the United States.',
1018
+ config={
1019
+ 'response_mime_type': 'application/json',
1020
+ 'response_json_schema': UserProfile.model_json_schema(),
1021
+ },
1022
+ )
1023
+ print(response.parsed)
1024
+ assert response.parsed != None
1025
+
1026
+
1027
+ def test_pydantic_schema_orders_properties(client):
1028
+ class Restaurant(BaseModel):
1029
+ name: str
1030
+ rating: int
1031
+ fun_fact: str
1032
+
1033
+ response = client.models.generate_content(
1034
+ model=GEMINI_FLASH_LATEST,
1035
+ contents='Give me information about a restaurant in Boston.',
1036
+ config={
1037
+ 'response_mime_type': 'application/json',
1038
+ 'response_schema': Restaurant,
1039
+ },
1040
+ )
1041
+ response_text_json = json.loads(response.text)
1042
+ response_keys = list(response_text_json.keys())
1043
+ assert response_keys[0] == 'name'
1044
+ assert response_keys == list(Restaurant.model_fields.keys())
1045
+
1046
+
1047
+ def test_pydantic_schema_with_default_value(client):
1048
+ class Restaurant(BaseModel):
1049
+ name: str
1050
+ rating: int = 0
1051
+ city: Optional[str] = 'New York'
1052
+
1053
+ response = client.models.generate_content(
1054
+ model=GEMINI_FLASH_LATEST,
1055
+ contents='Can you recommend a restaurant for me?',
1056
+ config={
1057
+ 'response_mime_type': 'application/json',
1058
+ 'response_schema': Restaurant,
1059
+ },
1060
+ )
1061
+ assert isinstance(response.parsed, Restaurant)
1062
+
1063
+
1064
+ def test_repeated_pydantic_schema(client):
1065
+ # This tests the defs handling on the pydantic side.
1066
+ class Person(BaseModel):
1067
+ name: str
1068
+
1069
+ class Relationship(BaseModel):
1070
+ relationship: str
1071
+ person1: Person
1072
+ person2: Person
1073
+
1074
+ response = client.models.generate_content(
1075
+ model=GEMINI_FLASH_LATEST,
1076
+ contents='Create a couple.',
1077
+ config={
1078
+ 'response_mime_type': 'application/json',
1079
+ 'response_schema': Relationship,
1080
+ },
1081
+ )
1082
+ assert isinstance(response.parsed, Relationship)
1083
+
1084
+
1085
+ def test_int_schema(client):
1086
+ response = client.models.generate_content(
1087
+ model=GEMINI_FLASH_LATEST,
1088
+ contents="what's your favorite number?",
1089
+ config={
1090
+ 'response_mime_type': 'application/json',
1091
+ 'response_schema': int,
1092
+ },
1093
+ )
1094
+ assert isinstance(response.parsed, int)
1095
+
1096
+
1097
+ def test_nested_list_of_int_schema(client):
1098
+ response = client.models.generate_content(
1099
+ model=GEMINI_FLASH_LATEST,
1100
+ contents="Can you return two matrices, a 2x3 and a 3x4?",
1101
+ config={
1102
+ 'response_mime_type': 'application/json',
1103
+ 'response_schema': list[list[list[int]]],
1104
+ },
1105
+ )
1106
+ assert isinstance(response.parsed[0][0][0], int)
1107
+
1108
+
1109
+ def test_literal_schema(client):
1110
+ response = client.models.generate_content(
1111
+ model=GEMINI_FLASH_LATEST,
1112
+ contents='Which ice cream flavor should I order?',
1113
+ config={
1114
+ 'response_mime_type': 'application/json',
1115
+ 'response_schema': Literal['chocolate', 'vanilla', 'cookie dough'],
1116
+ },
1117
+ )
1118
+
1119
+ allowed_values = ['chocolate', 'vanilla', 'cookie dough']
1120
+ assert isinstance(response.parsed, str)
1121
+ assert response.parsed in allowed_values
1122
+
1123
+
1124
+ def test_literal_schema_with_non_string_types_raises(client):
1125
+ with pytest.raises(ValueError) as e:
1126
+ client.models.generate_content(
1127
+ model=GEMINI_FLASH_LATEST,
1128
+ contents='Which ice cream flavor should I order?',
1129
+ config={
1130
+ 'response_mime_type': 'application/json',
1131
+ 'response_schema': Literal['chocolate', 'vanilla', 1],
1132
+ },
1133
+ )
1134
+ assert 'validation error' in str(e)
1135
+
1136
+
1137
+ @pytest.mark.skipif(
1138
+ sys.version_info < (3, 10),
1139
+ reason='| is not supported in Python 3.9',
1140
+ )
1141
+ def test_pydantic_schema_with_literal(client):
1142
+ class Movie(BaseModel):
1143
+ name: str
1144
+ genre: Literal['action', 'comedy', 'drama']
1145
+
1146
+ response = client.models.generate_content(
1147
+ model=GEMINI_FLASH_LATEST,
1148
+ contents='Give me information about the movie "Mean Girls"',
1149
+ config={
1150
+ 'response_mime_type': 'application/json',
1151
+ 'response_schema': Movie,
1152
+ },
1153
+ )
1154
+ assert isinstance(response.parsed, Movie)
1155
+ assert isinstance(response.parsed.genre, str)
1156
+ assert response.parsed.genre in ['action', 'comedy', 'drama']
1157
+
1158
+
1159
+ @pytest.mark.skipif(
1160
+ sys.version_info < (3, 10),
1161
+ reason='| is not supported in Python 3.9',
1162
+ )
1163
+ def test_pydantic_schema_with_single_value_literal(client):
1164
+ class Movie(BaseModel):
1165
+ name: str
1166
+ genre: Literal['action']
1167
+
1168
+ response = client.models.generate_content(
1169
+ model=GEMINI_FLASH_LATEST,
1170
+ contents='Give me information about the movie "The Matrix"',
1171
+ config={
1172
+ 'response_mime_type': 'application/json',
1173
+ 'response_schema': Movie,
1174
+ },
1175
+ )
1176
+ assert isinstance(response.parsed, Movie)
1177
+ assert response.parsed.genre == 'action'
1178
+
1179
+
1180
+ @pytest.mark.skipif(
1181
+ sys.version_info < (3, 10),
1182
+ reason='| is not supported in Python 3.9',
1183
+ )
1184
+ def test_pydantic_schema_with_none(client):
1185
+ class CountryInfo(BaseModel):
1186
+ name: str
1187
+ total_area_sq_mi: int | None = None
1188
+
1189
+ response = client.models.generate_content(
1190
+ model=GEMINI_FLASH_LATEST,
1191
+ contents='Give me information of the United States.',
1192
+ config={
1193
+ 'response_mime_type': 'application/json',
1194
+ 'response_schema': CountryInfo,
1195
+ },
1196
+ )
1197
+ assert isinstance(response.parsed, CountryInfo)
1198
+ assert type(response.parsed.total_area_sq_mi) in [int, None]
1199
+
1200
+
1201
+ def test_pydantic_schema_with_optional_none(client):
1202
+ class CountryInfo(BaseModel):
1203
+ name: str
1204
+ total_area_sq_mi: Optional[int] = None
1205
+
1206
+ response = client.models.generate_content(
1207
+ model=GEMINI_FLASH_LATEST,
1208
+ contents='Give me information of the United States but don\'t include the total area.',
1209
+ config={
1210
+ 'response_mime_type': 'application/json',
1211
+ 'response_schema': CountryInfo,
1212
+ },
1213
+ )
1214
+ assert isinstance(response.parsed, CountryInfo)
1215
+ assert response.parsed.total_area_sq_mi is None
1216
+
1217
+
1218
+ def test_pydantic_schema_from_json(client):
1219
+ class CountryInfo(BaseModel):
1220
+ name: str
1221
+ pupulation: int
1222
+ capital: str
1223
+ continent: str
1224
+ gdp: int
1225
+ official_language: str
1226
+ total_area_sq_mi: int
1227
+
1228
+ schema = types.Schema.model_validate(CountryInfo.model_json_schema())
1229
+
1230
+ response = client.models.generate_content(
1231
+ model=GEMINI_FLASH_LATEST,
1232
+ contents='Give me information of the United States.',
1233
+ config=types.GenerateContentConfig(
1234
+ response_mime_type='application/json',
1235
+ response_schema=schema,
1236
+ ),
1237
+ )
1238
+
1239
+ assert response.text
1240
+
1241
+
1242
+ @pytest.mark.skipif(
1243
+ sys.version_info < (3, 10),
1244
+ reason='| is not supported in Python 3.9',
1245
+ )
1246
+ def test_schema_with_union_type(client):
1247
+ response = client.models.generate_content(
1248
+ model=GEMINI_FLASH_LATEST,
1249
+ contents='Give me a random number, either as an integers or written out as words.',
1250
+ config=types.GenerateContentConfig.model_validate(dict(
1251
+ response_mime_type='application/json',
1252
+ response_schema=int | str,
1253
+ ))
1254
+ )
1255
+ assert type(response.parsed) in (int, str)
1256
+
1257
+
1258
+ def test_schema_with_union_type_all_py_versions(client):
1259
+ response = client.models.generate_content(
1260
+ model=GEMINI_FLASH_LATEST,
1261
+ contents="Give me a random number, either an integer or a float.",
1262
+ config={
1263
+ 'response_mime_type': 'application/json',
1264
+ 'response_schema': Union[int, float],
1265
+ },
1266
+ )
1267
+ assert type(response.parsed) in (int, float)
1268
+
1269
+
1270
+ @pytest.mark.skipif(
1271
+ sys.version_info < (3, 10),
1272
+ reason='| is not supported in Python 3.9',
1273
+ )
1274
+ def test_list_schema_with_union_type(client):
1275
+ response = client.models.generate_content(
1276
+ model=GEMINI_FLASH_LATEST,
1277
+ contents='Give me a list of 5 random numbers, including some integers and some written out as words.',
1278
+ config=types.GenerateContentConfig(
1279
+ response_mime_type='application/json',
1280
+ response_schema=list[int | str],
1281
+ )
1282
+ )
1283
+ for item in response.parsed:
1284
+ assert isinstance(item, int) or isinstance(item, str)
1285
+
1286
+
1287
+ def test_list_schema_with_union_type_all_py_versions(client):
1288
+ response = client.models.generate_content(
1289
+ model=GEMINI_FLASH_LATEST,
1290
+ contents='Give me a list of 5 random numbers, including some integers and some written out as words.',
1291
+ config=types.GenerateContentConfig(
1292
+ response_mime_type='application/json',
1293
+ response_schema=list[Union[int, str]],
1294
+ )
1295
+ )
1296
+ for item in response.parsed:
1297
+ assert isinstance(item, int) or isinstance(item, str)
1298
+
1299
+
1300
+ def test_pydantic_schema_with_optional_generic_alias(client):
1301
+ class CountryInfo(BaseModel):
1302
+ name: str
1303
+ population: int
1304
+ capital: str
1305
+ continent: str
1306
+ gdp: int
1307
+ official_languages: Optional[List[str]]
1308
+ total_area_sq_mi: int
1309
+
1310
+ response = client.models.generate_content(
1311
+ model=GEMINI_FLASH_LATEST,
1312
+ contents='Give me information of the United States.',
1313
+ config={
1314
+ 'response_mime_type': 'application/json',
1315
+ 'response_schema': CountryInfo,
1316
+ },
1317
+ )
1318
+ assert isinstance(response.parsed, CountryInfo)
1319
+ assert isinstance(response.parsed.official_languages, list) or response.parsed.official_languages is None
1320
+
1321
+
1322
+ def test_pydantic_schema_with_optional_pydantic(client):
1323
+ class TestPerson(BaseModel):
1324
+ first_name: Optional[str] = Field(
1325
+ description='First name of the person', default=None
1326
+ )
1327
+ last_name: Optional[str] = Field(
1328
+ description='Last name of the person', default=None
1329
+ )
1330
+
1331
+ class TestDocument(BaseModel):
1332
+ case_number: Optional[str] = Field(
1333
+ description='Case number assigned to the claim', default=None
1334
+ )
1335
+ filed_by: Optional[TestPerson] = Field(
1336
+ description='Name of the party that filed or submitted the statement',
1337
+ default=None,
1338
+ )
1339
+
1340
+ test_prompt = """
1341
+ Carefully examine the following document and extract the metadata.
1342
+ Be sure to include the party that filed the document.
1343
+
1344
+ Document Text:
1345
+ --------------
1346
+ Case Number: 20-12345
1347
+ File by: John Doe
1348
+ """
1349
+
1350
+ response = client.models.generate_content(
1351
+ model=GEMINI_FLASH_LATEST,
1352
+ contents=test_prompt,
1353
+ config={
1354
+ 'response_mime_type': 'application/json',
1355
+ 'response_schema': TestDocument,
1356
+ },
1357
+ )
1358
+ assert isinstance(response.parsed, TestDocument)
1359
+ assert isinstance(response.parsed.filed_by, TestPerson)
1360
+
1361
+
1362
+ def test_list_of_pydantic_schema(client):
1363
+ class CountryInfo(BaseModel):
1364
+ name: str
1365
+ population: int
1366
+ capital: str
1367
+ continent: str
1368
+ gdp: int
1369
+ official_language: str
1370
+ total_area_sq_mi: int
1371
+
1372
+ response = client.models.generate_content(
1373
+ model=GEMINI_FLASH_LATEST,
1374
+ contents='Give me information for the United States, Canada, and Mexico.',
1375
+ config=types.GenerateContentConfig(
1376
+ response_mime_type='application/json',
1377
+ response_schema=list[CountryInfo],
1378
+ )
1379
+ )
1380
+ assert isinstance(response.parsed, list)
1381
+ assert len(response.parsed) == 3
1382
+ assert isinstance(response.parsed[0], CountryInfo)
1383
+
1384
+
1385
+ def test_nested_list_of_pydantic_schema(client):
1386
+ class Recipe(BaseModel):
1387
+ name: str
1388
+ cook_time: str
1389
+
1390
+ response = client.models.generate_content(
1391
+ model=GEMINI_FLASH_LATEST,
1392
+ contents="I\'m writing three recipe books, one each for United States, Canada, and Mexico. "
1393
+ "Can you give some recipe ideas, at least 2 per book?",
1394
+ config=types.GenerateContentConfig(
1395
+ response_mime_type='application/json',
1396
+ response_schema=list[list[Recipe]],
1397
+ )
1398
+ )
1399
+ assert isinstance(response.parsed, list)
1400
+ assert len(response.parsed) == 3
1401
+ assert isinstance(response.parsed[0][0], Recipe)
1402
+
1403
+
1404
+ def test_list_of_pydantic_schema_with_dict_config(client):
1405
+ class CountryInfo(BaseModel):
1406
+ name: str
1407
+ population: int
1408
+ capital: str
1409
+ continent: str
1410
+ gdp: int
1411
+ official_language: str
1412
+ total_area_sq_mi: int
1413
+
1414
+ response = client.models.generate_content(
1415
+ model=GEMINI_FLASH_LATEST,
1416
+ contents='Give me information for the United States, Canada, and Mexico.',
1417
+ config={
1418
+ 'response_mime_type': 'application/json',
1419
+ 'response_schema': list[CountryInfo],
1420
+ }
1421
+ )
1422
+ assert isinstance(response.parsed, list)
1423
+ assert len(response.parsed) == 3
1424
+ assert isinstance(response.parsed[0], CountryInfo)
1425
+
1426
+
1427
+ def test_pydantic_schema_with_nested_class(client):
1428
+ class CurrencyInfo(BaseModel):
1429
+ name: str
1430
+
1431
+ class CountryInfo(BaseModel):
1432
+ name: str
1433
+ currency: CurrencyInfo
1434
+
1435
+ response = client.models.generate_content(
1436
+ model=GEMINI_FLASH_LATEST,
1437
+ contents='Give me information for the United States',
1438
+ config=types.GenerateContentConfig(
1439
+ response_mime_type='application/json',
1440
+ response_schema=CountryInfo,
1441
+ )
1442
+ )
1443
+ assert isinstance(response.parsed, CountryInfo)
1444
+ assert isinstance(response.parsed.currency, CurrencyInfo)
1445
+
1446
+
1447
+ @pytest.mark.skipif(
1448
+ sys.version_info < (3, 10),
1449
+ reason='| is not supported in Python 3.9',
1450
+ )
1451
+ def test_pydantic_schema_with_union_type(client):
1452
+
1453
+ class CountryInfo(BaseModel):
1454
+ name: str
1455
+ restaurants_per_capita: int | float
1456
+
1457
+ response = client.models.generate_content(
1458
+ model=GEMINI_FLASH_LATEST,
1459
+ contents='Give me information for the United States',
1460
+ config=types.GenerateContentConfig(
1461
+ response_mime_type='application/json',
1462
+ response_schema=CountryInfo,
1463
+ )
1464
+ )
1465
+ assert isinstance(response.parsed, CountryInfo)
1466
+ assert type(response.parsed.restaurants_per_capita) in (int, float)
1467
+
1468
+
1469
+ def test_pydantic_schema_with_union_type_all_py_versions(client):
1470
+
1471
+ class CountryInfo(BaseModel):
1472
+ name: str
1473
+ restaurants_per_capita: Union[int, float]
1474
+
1475
+ response = client.models.generate_content(
1476
+ model=GEMINI_FLASH_LATEST,
1477
+ contents='Give me information for the United States',
1478
+ config=types.GenerateContentConfig(
1479
+ response_mime_type='application/json',
1480
+ response_schema=CountryInfo,
1481
+ )
1482
+ )
1483
+ assert isinstance(response.parsed, CountryInfo)
1484
+ assert type(response.parsed.restaurants_per_capita) in (int, float)
1485
+
1486
+
1487
+ @pytest.mark.skipif(
1488
+ sys.version_info < (3, 10),
1489
+ reason='| is not supported in Python 3.9',
1490
+ )
1491
+ def test_union_of_pydantic_schema(client):
1492
+
1493
+ class SongLyric(BaseModel):
1494
+ song_name: str
1495
+ lyric: str
1496
+ artist: str
1497
+
1498
+ class FunFact(BaseModel):
1499
+ fun_fact: str
1500
+
1501
+ response = client.models.generate_content(
1502
+ model=GEMINI_FLASH_LATEST,
1503
+ contents='Can you give me a Taylor Swift song lyric or a fun fact?',
1504
+ config=types.GenerateContentConfig(
1505
+ response_mime_type='application/json',
1506
+ response_schema=SongLyric | FunFact,
1507
+ )
1508
+ )
1509
+ assert type(response.parsed) in (SongLyric, FunFact)
1510
+
1511
+
1512
+ def test_union_of_pydantic_schema_all_py_versions(client):
1513
+
1514
+ class SongLyric(BaseModel):
1515
+ song_name: str
1516
+ lyric: str
1517
+ artist: str
1518
+
1519
+ class FunFact(BaseModel):
1520
+ fun_fact: str
1521
+
1522
+ response = client.models.generate_content(
1523
+ model=GEMINI_FLASH_LATEST,
1524
+ contents='Can you give me a Taylor Swift song lyric or a fun fact?',
1525
+ config=types.GenerateContentConfig(
1526
+ response_mime_type='application/json',
1527
+ response_schema=Union[SongLyric, FunFact],
1528
+ )
1529
+ )
1530
+ assert type(response.parsed) in (SongLyric, FunFact)
1531
+
1532
+
1533
+ def test_pydantic_schema_with_nested_enum(client):
1534
+ class Continent(Enum):
1535
+ ASIA = 'Asia'
1536
+ AFRICA = 'Africa'
1537
+ ANTARCTICA = 'Antarctica'
1538
+ EUROPE = 'Europe'
1539
+ NORTH_AMERICA = 'North America'
1540
+ SOUTH_AMERICA = 'South America'
1541
+ AUSTRALIA = 'Australia'
1542
+
1543
+ class CountryInfo(BaseModel):
1544
+ name: str
1545
+ continent: Continent
1546
+
1547
+ response = client.models.generate_content(
1548
+ model=GEMINI_FLASH_LATEST,
1549
+ contents='Give me information for the United States',
1550
+ config=types.GenerateContentConfig(
1551
+ response_mime_type='application/json',
1552
+ response_schema=CountryInfo,
1553
+ )
1554
+ )
1555
+ assert isinstance(response.parsed, CountryInfo)
1556
+ assert isinstance(response.parsed.continent, Continent)
1557
+
1558
+
1559
+ def test_pydantic_schema_with_nested_list_class(client):
1560
+ class CurrencyInfo(BaseModel):
1561
+ name: str
1562
+
1563
+ class CountryInfo(BaseModel):
1564
+ name: str
1565
+ currency: list[CurrencyInfo]
1566
+
1567
+ response = client.models.generate_content(
1568
+ model=GEMINI_FLASH_LATEST,
1569
+ contents='Give me information for the United States.',
1570
+ config=types.GenerateContentConfig(
1571
+ response_mime_type='application/json',
1572
+ response_schema=CountryInfo,
1573
+ )
1574
+ )
1575
+ assert isinstance(response.parsed, CountryInfo)
1576
+ assert isinstance(response.parsed.currency[0], CurrencyInfo)
1577
+
1578
+
1579
+ def test_list_of_pydantic_schema_with_nested_class(client):
1580
+ class CurrencyInfo(BaseModel):
1581
+ name: str
1582
+ code: str
1583
+ symbol: str
1584
+
1585
+ class CountryInfo(BaseModel):
1586
+ name: str
1587
+ population: int
1588
+ capital: str
1589
+ continent: str
1590
+ gdp: int
1591
+ official_language: str
1592
+ total_area_sq_mi: int
1593
+ currency: CurrencyInfo
1594
+
1595
+ response = client.models.generate_content(
1596
+ model=GEMINI_FLASH_LATEST,
1597
+ contents='Give me information for the United States, Canada, and Mexico.',
1598
+ config=types.GenerateContentConfig(
1599
+ response_mime_type='application/json',
1600
+ response_schema=list[CountryInfo],
1601
+ )
1602
+ )
1603
+ assert isinstance(response.parsed, list)
1604
+ assert isinstance(response.parsed[0], CountryInfo)
1605
+ assert isinstance(response.parsed[0].currency, CurrencyInfo)
1606
+
1607
+
1608
+ def test_list_of_pydantic_schema_with_nested_list_class(client):
1609
+ class CurrencyInfo(BaseModel):
1610
+ name: str
1611
+ code: str
1612
+ symbol: str
1613
+
1614
+ class CountryInfo(BaseModel):
1615
+ name: str
1616
+ population: int
1617
+ capital: str
1618
+ continent: str
1619
+ gdp: int
1620
+ official_language: str
1621
+ total_area_sq_mi: int
1622
+ currency: list[CurrencyInfo]
1623
+
1624
+ response = client.models.generate_content(
1625
+ model=GEMINI_FLASH_LATEST,
1626
+ contents='Give me information for the United States, Canada, and Mexico.',
1627
+ config=types.GenerateContentConfig(
1628
+ response_mime_type='application/json',
1629
+ response_schema=list[CountryInfo],
1630
+ )
1631
+ )
1632
+ assert isinstance(response.parsed, list)
1633
+ assert isinstance(response.parsed[0], CountryInfo)
1634
+ assert isinstance(response.parsed[0].currency, list)
1635
+ assert isinstance(response.parsed[0].currency[0], CurrencyInfo)
1636
+
1637
+
1638
+ def test_response_schema_with_dict_of_pydantic_schema(client):
1639
+ class CountryInfo(BaseModel):
1640
+ population: int
1641
+ capital: str
1642
+ continent: str
1643
+ gdp: int
1644
+ official_language: str
1645
+ total_area_sq_mi: int
1646
+
1647
+ if not client.vertexai:
1648
+ with pytest.raises(ValueError) as e:
1649
+ client.models.generate_content(
1650
+ model=GEMINI_FLASH_LATEST,
1651
+ contents='Give me information for the United States, Canada, and Mexico.',
1652
+ config=types.GenerateContentConfig(
1653
+ response_mime_type='application/json',
1654
+ response_schema=dict[str, CountryInfo],
1655
+ )
1656
+ )
1657
+ else:
1658
+ response = client.models.generate_content(
1659
+ model=GEMINI_FLASH_LATEST,
1660
+ contents='Give me information for the United States, Canada, and Mexico.',
1661
+ config=types.GenerateContentConfig(
1662
+ response_mime_type='application/json',
1663
+ response_schema=dict[str, CountryInfo],
1664
+ )
1665
+ )
1666
+ assert response.text
1667
+
1668
+
1669
+ def test_schema_with_unsupported_type_raises(client):
1670
+ with pytest.raises(ValueError) as e:
1671
+ client.models.generate_content(
1672
+ model=GEMINI_FLASH_LATEST,
1673
+ contents='Give me information for the United States, Canada, and Mexico.',
1674
+ config=types.GenerateContentConfig(
1675
+ response_mime_type='application/json',
1676
+ response_schema=types.Schema(),
1677
+ )
1678
+ )
1679
+ assert 'Unsupported schema type' in str(e)
1680
+
1681
+
1682
+ def test_enum_schema_with_enum_mime_type(client):
1683
+ response = client.models.generate_content(
1684
+ model=GEMINI_FLASH_2_0,
1685
+ contents='What instrument plays multiple notes at once?',
1686
+ config={
1687
+ 'response_mime_type': 'text/x.enum',
1688
+ 'response_schema': InstrumentEnum,
1689
+ },
1690
+ )
1691
+
1692
+ instrument_values = {member.value for member in InstrumentEnum}
1693
+
1694
+ assert response.text in instrument_values
1695
+ assert isinstance(response.parsed, InstrumentEnum)
1696
+
1697
+
1698
+ def test_list_of_enum_schema_with_enum_mime_type(client):
1699
+ with pytest.raises(errors.ClientError) as e:
1700
+ client.models.generate_content(
1701
+ model=GEMINI_FLASH_2_0,
1702
+ contents='What instrument plays single note at once?',
1703
+ config={
1704
+ 'response_mime_type': 'text/x.enum',
1705
+ 'response_schema': list[InstrumentEnum],
1706
+ },
1707
+ )
1708
+ assert '400' in str(e)
1709
+
1710
+
1711
+ def test_list_of_enum_schema_with_json_mime_type(client):
1712
+ response = client.models.generate_content(
1713
+ model=GEMINI_FLASH_LATEST,
1714
+ contents='What instrument plays single note at once?',
1715
+ config={
1716
+ 'response_mime_type': 'application/json',
1717
+ 'response_schema': list[InstrumentEnum],
1718
+ },
1719
+ )
1720
+
1721
+ assert isinstance(response.parsed, list)
1722
+ assert response.parsed
1723
+ for item in response.parsed:
1724
+ assert isinstance(item, InstrumentEnum)
1725
+
1726
+
1727
+ def test_optional_enum_in_pydantic_schema_with_json_mime_type(client):
1728
+ class InstrumentInfo(BaseModel):
1729
+ instrument: Optional[InstrumentEnum]
1730
+ fun_fact: str
1731
+
1732
+ response = client.models.generate_content(
1733
+ model=GEMINI_FLASH_LATEST,
1734
+ contents='What instrument plays single note at once? Include the name of the instrument in your response.',
1735
+ config={
1736
+ 'response_mime_type': 'application/json',
1737
+ 'response_schema': InstrumentInfo,
1738
+ },
1739
+ )
1740
+
1741
+ assert isinstance(response.parsed, InstrumentInfo)
1742
+ assert isinstance(response.parsed.instrument, InstrumentEnum)
1743
+
1744
+
1745
+ def test_enum_schema_with_json_mime_type(client):
1746
+ response = client.models.generate_content(
1747
+ model=GEMINI_FLASH_LATEST,
1748
+ contents='What instrument plays multiple notes at once?',
1749
+ config={
1750
+ 'response_mime_type': 'application/json',
1751
+ 'response_schema': InstrumentEnum,
1752
+ },
1753
+ )
1754
+ # "application/json" returns response in double quotes.
1755
+ removed_quotes = response.text.replace('"', '')
1756
+ instrument_values = {member.value for member in InstrumentEnum}
1757
+
1758
+ assert removed_quotes in instrument_values
1759
+ assert isinstance(response.parsed, InstrumentEnum)
1760
+
1761
+
1762
+ def test_non_string_enum_schema_with_enum_mime_type(client):
1763
+ class IntegerEnum(Enum):
1764
+ PERCUSSION = 1
1765
+ STRING = 2
1766
+ WOODWIND = 3
1767
+ BRASS = 4
1768
+ KEYBOARD = 5
1769
+
1770
+ response =client.models.generate_content(
1771
+ model=GEMINI_FLASH_LATEST,
1772
+ contents='What instrument plays multiple notes at once?',
1773
+ config={
1774
+ 'response_mime_type': 'text/x.enum',
1775
+ 'response_schema': IntegerEnum,
1776
+ },
1777
+ )
1778
+
1779
+ instrument_values = {str(member.value) for member in IntegerEnum}
1780
+
1781
+ assert response.text in instrument_values
1782
+
1783
+
1784
+ def test_json_schema(client):
1785
+ response = client.models.generate_content(
1786
+ model=GEMINI_FLASH_LATEST,
1787
+ contents='Give me information of the United States.',
1788
+ config={
1789
+ 'response_mime_type': 'application/json',
1790
+ 'response_schema': {
1791
+ 'required': [
1792
+ 'name',
1793
+ 'population',
1794
+ 'capital',
1795
+ 'continent',
1796
+ 'gdp',
1797
+ 'official_language',
1798
+ 'total_area_sq_mi',
1799
+ ],
1800
+ 'properties': {
1801
+ 'name': {'type': 'STRING'},
1802
+ 'population': {'type': 'INTEGER'},
1803
+ 'capital': {'type': 'STRING'},
1804
+ 'continent': {'type': 'STRING'},
1805
+ 'gdp': {'type': 'INTEGER'},
1806
+ 'official_language': {'type': 'STRING'},
1807
+ 'total_area_sq_mi': {'type': 'INTEGER'},
1808
+ },
1809
+ 'type': 'OBJECT',
1810
+ },
1811
+ },
1812
+ )
1813
+ assert isinstance(response.parsed, dict)
1814
+
1815
+
1816
+ def test_json_schema_with_lower_enum(client):
1817
+ response = client.models.generate_content(
1818
+ model=GEMINI_FLASH_LATEST,
1819
+ contents='Give me information of the United States.',
1820
+ config={
1821
+ 'response_mime_type': 'application/json',
1822
+ 'response_schema': {
1823
+ 'required': [
1824
+ 'name',
1825
+ 'pupulation',
1826
+ 'capital',
1827
+ 'continent',
1828
+ 'gdp',
1829
+ 'official_language',
1830
+ 'total_area_sq_mi',
1831
+ ],
1832
+ 'properties': {
1833
+ 'name': {'type': 'string'},
1834
+ 'pupulation': {'type': 'integer'},
1835
+ 'capital': {'type': 'string'},
1836
+ 'continent': {'type': 'string'},
1837
+ 'gdp': {'type': 'integer'},
1838
+ 'official_language': {'type': 'string'},
1839
+ 'total_area_sq_mi': {'type': 'integer'},
1840
+ },
1841
+ 'type': 'OBJECT',
1842
+ },
1843
+ },
1844
+ )
1845
+ assert isinstance(response.parsed, dict)
1846
+
1847
+
1848
+ def test_json_schema_with_any_of(client):
1849
+ response = client.models.generate_content(
1850
+ model=GEMINI_FLASH_LATEST,
1851
+ contents='Give me a fruit basket.',
1852
+ config={
1853
+ 'response_mime_type': 'application/json',
1854
+ 'response_schema': {
1855
+ 'type': 'OBJECT',
1856
+ 'title': 'Fruit Basket',
1857
+ 'description': 'A structured representation of a fruit basket',
1858
+ 'required': ['fruit'],
1859
+ 'properties': {
1860
+ 'fruit': {
1861
+ 'type': 'ARRAY',
1862
+ 'description': (
1863
+ 'An ordered list of the fruit in the basket'
1864
+ ),
1865
+ 'items': {
1866
+ 'description': 'A piece of fruit',
1867
+ 'any_of': [
1868
+ {
1869
+ 'title': 'Apple',
1870
+ 'description': 'Describes an apple',
1871
+ 'type': 'OBJECT',
1872
+ 'properties': {
1873
+ 'type': {
1874
+ 'type': 'STRING',
1875
+ 'description': "Always 'apple'",
1876
+ },
1877
+ 'color': {
1878
+ 'type': 'STRING',
1879
+ 'description': (
1880
+ 'The color of the apple (e.g.,'
1881
+ " 'red')"
1882
+ ),
1883
+ },
1884
+ },
1885
+ 'property_ordering': ['type', 'color'],
1886
+ 'required': ['type', 'color'],
1887
+ },
1888
+ {
1889
+ 'title': 'Orange',
1890
+ 'description': 'Describes an orange',
1891
+ 'type': 'OBJECT',
1892
+ 'properties': {
1893
+ 'type': {
1894
+ 'type': 'STRING',
1895
+ 'description': "Always 'orange'",
1896
+ },
1897
+ 'size': {
1898
+ 'type': 'STRING',
1899
+ 'description': (
1900
+ 'The size of the orange (e.g.,'
1901
+ " 'medium')"
1902
+ ),
1903
+ },
1904
+ },
1905
+ 'property_ordering': ['type', 'size'],
1906
+ 'required': ['type', 'size'],
1907
+ },
1908
+ ],
1909
+ },
1910
+ }
1911
+ },
1912
+ },
1913
+ },
1914
+ )
1915
+ assert isinstance(response.parsed, dict)
1916
+ assert 'fruit' in response.parsed
1917
+ assert isinstance(response.parsed['fruit'], list)
1918
+ assert 'type' in response.parsed['fruit'][0]
1919
+
1920
+
1921
+ def test_schema_with_any_of(client):
1922
+ response_schema=types.Schema(
1923
+ type=types.Type.OBJECT,
1924
+ title='Fruit Basket',
1925
+ description='A structured representation of a fruit basket',
1926
+ properties={
1927
+ 'fruit': types.Schema(
1928
+ type=types.Type.ARRAY,
1929
+ description='An ordered list of the fruit in the basket',
1930
+ items=types.Schema(
1931
+ any_of=[
1932
+ types.Schema(
1933
+ title='Apple',
1934
+ description='Describes an apple',
1935
+ type=types.Type.OBJECT,
1936
+ properties={
1937
+ 'type': types.Schema(type=types.Type.STRING, description='Always "apple"'),
1938
+ 'variety': types.Schema(
1939
+ type=types.Type.STRING,
1940
+ description='The variety of apple (e.g., "Granny Smith")',
1941
+ ),
1942
+ },
1943
+ property_ordering=['type', 'variety'],
1944
+ required=['type', 'variety'],
1945
+ ),
1946
+ types.Schema(
1947
+ title='Orange',
1948
+ description='Describes an orange',
1949
+ type=types.Type.OBJECT,
1950
+ properties={
1951
+ 'type': types.Schema(type=types.Type.STRING, description='Always "orange"'),
1952
+ 'variety': types.Schema(
1953
+ type=types.Type.STRING,
1954
+ description='The variety of orange (e.g.,"Navel orange")',
1955
+ ),
1956
+ },
1957
+ property_ordering=['type', 'variety'],
1958
+ required=['type', 'variety'],
1959
+ ),
1960
+ ],
1961
+ ),
1962
+ ),
1963
+ },
1964
+ required=['fruit'],
1965
+ )
1966
+ response = client.models.generate_content(
1967
+ model=GEMINI_FLASH_LATEST,
1968
+ contents='Give me a fruit basket.',
1969
+ config=types.GenerateContentConfig(
1970
+ response_mime_type='application/json',
1971
+ response_schema=response_schema,
1972
+ ),
1973
+ )
1974
+ assert isinstance(response.parsed, dict)
1975
+ assert 'fruit' in response.parsed
1976
+ assert isinstance(response.parsed['fruit'], list)
1977
+ assert 'type' in response.parsed['fruit'][0]
1978
+
1979
+
1980
+ def test_replicated_voice_config(client):
1981
+ with pytest_helper.exception_if_vertex(client, errors.ClientError):
1982
+ client.models.generate_content(
1983
+ model='gemini-2.5-flash-preview-tts-voice-replication-rev22-2025-10-28',
1984
+ contents=t.t_contents(
1985
+ 'Produce a speech response saying "Cheese"'
1986
+ ),
1987
+ config=types.GenerateContentConfig(
1988
+ response_modalities=['audio'],
1989
+ speech_config=types.SpeechConfig(
1990
+ voice_config=types.VoiceConfig(
1991
+ replicated_voice_config=types.ReplicatedVoiceConfig(
1992
+ voice_sample_audio=audio_bytes,
1993
+ mime_type='audio/wav',
1994
+ )
1995
+ )
1996
+ ),
1997
+ ),
1998
+ )
1999
+
2000
+
2001
+ def test_json_schema_with_streaming(client):
2002
+
2003
+ response = client.models.generate_content_stream(
2004
+ model=GEMINI_FLASH_LATEST,
2005
+ contents='Give me information of the United States.',
2006
+ config={
2007
+ 'response_mime_type': 'application/json',
2008
+ 'response_schema': {
2009
+ 'properties': {
2010
+ 'name': {'type': 'STRING'},
2011
+ 'population': {'type': 'INTEGER'},
2012
+ 'capital': {'type': 'STRING'},
2013
+ 'continent': {'type': 'STRING'},
2014
+ 'gdp': {'type': 'INTEGER'},
2015
+ 'official_language': {'type': 'STRING'},
2016
+ 'total_area_sq_mi': {'type': 'INTEGER'},
2017
+ },
2018
+ 'type': 'OBJECT',
2019
+ },
2020
+ },
2021
+ )
2022
+
2023
+ for r in response:
2024
+ parts = r.parts
2025
+ for p in parts:
2026
+ assert p.text
2027
+
2028
+
2029
+ def test_pydantic_schema_with_streaming(client):
2030
+
2031
+ class CountryInfo(BaseModel):
2032
+ name: str
2033
+ population: int
2034
+ capital: str
2035
+ continent: str
2036
+ gdp: int
2037
+ official_language: str
2038
+ total_area_sq_mi: int
2039
+
2040
+ response = client.models.generate_content_stream(
2041
+ model=GEMINI_FLASH_LATEST,
2042
+ contents='Give me information of the United States.',
2043
+ config={
2044
+ 'response_mime_type': 'application/json',
2045
+ 'response_schema': CountryInfo
2046
+ },
2047
+ )
2048
+
2049
+ for r in response:
2050
+ parts = r.parts
2051
+ for p in parts:
2052
+ assert p.text
2053
+
2054
+
2055
+ def test_schema_from_json(client):
2056
+
2057
+ class Foo(BaseModel):
2058
+ bar: str
2059
+ baz: int
2060
+ qux: list[str]
2061
+
2062
+ schema = types.Schema.model_validate(Foo.model_json_schema())
2063
+
2064
+ response = client.models.generate_content(
2065
+ model=GEMINI_FLASH_LATEST,
2066
+ contents='Fill in the Foo.',
2067
+ config=types.GenerateContentConfig(
2068
+ response_mime_type='application/json',
2069
+ response_schema=schema
2070
+ ),
2071
+ )
2072
+
2073
+ assert response.text
2074
+
2075
+
2076
+ def test_schema_from_model_schema(client):
2077
+
2078
+ class Foo(BaseModel):
2079
+ bar: str
2080
+ baz: int
2081
+ qux: list[str]
2082
+
2083
+ response = client.models.generate_content(
2084
+ model=GEMINI_FLASH_LATEST,
2085
+ contents='Fill in the Foo.',
2086
+ config=types.GenerateContentConfig(
2087
+ response_mime_type='application/json',
2088
+ response_schema=Foo.model_json_schema(),
2089
+ ),
2090
+ )
2091
+
2092
+ response.text
2093
+
2094
+
2095
+ def test_schema_with_additional_properties(client):
2096
+
2097
+ class Foo(BaseModel):
2098
+ bar: str
2099
+ baz: int
2100
+ qux: dict[str, str]
2101
+
2102
+ if client.vertexai:
2103
+ response = client.models.generate_content(
2104
+ model=GEMINI_FLASH_LATEST,
2105
+ contents='What is your name?',
2106
+ config=types.GenerateContentConfig(
2107
+ response_mime_type='application/json',
2108
+ response_schema=Foo,
2109
+ ),
2110
+ )
2111
+ assert response.text
2112
+ else:
2113
+ with pytest.raises(ValueError) as e:
2114
+ client.models.generate_content(
2115
+ model=GEMINI_FLASH_LATEST,
2116
+ contents='What is your name?',
2117
+ config=types.GenerateContentConfig(
2118
+ response_mime_type='application/json',
2119
+ response_schema=Foo,
2120
+ ),
2121
+ )
2122
+ assert 'additionalProperties is not supported in the Gemini API.' in str(e)
2123
+
2124
+
2125
+ def test_function(client):
2126
+ def get_weather(city: str) -> str:
2127
+ """Returns the weather in a city."""
2128
+ return f'The weather in {city} is sunny and 100 degrees.'
2129
+
2130
+ response = client.models.generate_content(
2131
+ model=GEMINI_FLASH_LATEST,
2132
+ contents=(
2133
+ 'What is the weather like in Sunnyvale? Answer in very short'
2134
+ ' sentence.'
2135
+ ),
2136
+ config={
2137
+ 'tools': [get_weather],
2138
+ },
2139
+ )
2140
+ assert '100' in response.text
2141
+
2142
+
2143
+ def test_invalid_input_without_transformer(client):
2144
+ with pytest.raises(ValidationError) as e:
2145
+ client.models.generate_content(
2146
+ model=GEMINI_FLASH_LATEST,
2147
+ contents='What is your name',
2148
+ config={
2149
+ 'input_that_does_not_exist': 'what_ever_value',
2150
+ },
2151
+ )
2152
+ assert 'input_that_does_not_exist' in str(e)
2153
+ assert 'Extra inputs are not permitted' in str(e)
2154
+
2155
+
2156
+ def test_invalid_input_with_transformer_dict(client):
2157
+ with pytest.raises(ValidationError) as e:
2158
+ client.models.generate_content(
2159
+ model=GEMINI_FLASH_LATEST,
2160
+ contents={'invalid_key': 'invalid_value'},
2161
+ )
2162
+ assert 'invalid_key' in str(e.value)
2163
+
2164
+
2165
+ def test_invalid_input_with_transformer_list(client):
2166
+ with pytest.raises(ValidationError) as e:
2167
+ client.models.generate_content(
2168
+ model=GEMINI_FLASH_LATEST,
2169
+ contents=[{'invalid_key': 'invalid_value'}],
2170
+ )
2171
+ assert 'invalid_key' in str(e.value)
2172
+
2173
+
2174
+ def test_invalid_input_for_simple_parameter(client):
2175
+ with pytest.raises(ValidationError) as e:
2176
+ client.models.generate_content(
2177
+ model=5,
2178
+ contents='What is your name?',
2179
+ )
2180
+ assert 'model' in str(e)
2181
+
2182
+
2183
+ def test_catch_stack_trace_in_error_handling(client):
2184
+ try:
2185
+ client.models.generate_content(
2186
+ model=GEMINI_FLASH_LATEST,
2187
+ contents='What is your name?',
2188
+ config={'response_modalities': ['AUDIO']},
2189
+ )
2190
+ except errors.ClientError as e:
2191
+ # Note that the stack trace is truncated in replay file, therefore this is
2192
+ # the best we can do in testing error handling. In api mode, the stack trace
2193
+ # is:
2194
+ # {
2195
+ # 'error': {
2196
+ # 'code': 400,
2197
+ # 'message': 'Multi-modal output is not supported.',
2198
+ # 'status': 'INVALID_ARGUMENT',
2199
+ # 'details': [{
2200
+ # '@type': 'type.googleapis.com/google.rpc.DebugInfo',
2201
+ # 'detail': '[ORIGINAL ERROR] generic::invalid_argument: '
2202
+ # 'Multi-modal output is not supported. '
2203
+ # '[google.rpc.error_details_ext] '
2204
+ # '{ message: "Multi-modal output is not supported." }'
2205
+ # }]
2206
+ # }
2207
+ # }
2208
+ if 'error' in e.details:
2209
+ details = e.details['error']
2210
+ else:
2211
+ details = e.details
2212
+ assert details['code'] == 400
2213
+ assert details['status'] == 'INVALID_ARGUMENT'
2214
+
2215
+
2216
+ def test_multiple_strings(client):
2217
+ class SummaryResponses(BaseModel):
2218
+ summary: str
2219
+ person: str
2220
+
2221
+ response = client.models.generate_content(
2222
+ model=GEMINI_FLASH_LATEST,
2223
+ contents=[
2224
+ "Summarize Shakespeare's life work in a few sentences",
2225
+ "Summarize Hemingway's life work",
2226
+ ],
2227
+ config={
2228
+ 'response_mime_type': 'application/json',
2229
+ 'response_schema': list[SummaryResponses],
2230
+ },
2231
+ )
2232
+
2233
+ assert 'Shakespeare' in response.text
2234
+ assert 'Hemingway' in response.text
2235
+ assert 'Shakespeare' in response.parsed[0].person
2236
+ assert 'Hemingway' in response.parsed[1].person
2237
+
2238
+
2239
+ def test_multiple_parts(client):
2240
+ class SummaryResponses(BaseModel):
2241
+ summary: str
2242
+ person: str
2243
+
2244
+ response = client.models.generate_content(
2245
+ model=GEMINI_FLASH_LATEST,
2246
+ contents=[
2247
+ types.Part(
2248
+ text="Summarize Shakespeare's life work in a few sentences"
2249
+ ),
2250
+ types.Part(text="Summarize Hemingway's life work"),
2251
+ ],
2252
+ config={
2253
+ 'response_mime_type': 'application/json',
2254
+ 'response_schema': list[SummaryResponses],
2255
+ },
2256
+ )
2257
+
2258
+ assert 'Shakespeare' in response.text
2259
+ assert 'Hemingway' in response.text
2260
+ assert 'Shakespeare' in response.parsed[0].person
2261
+ assert 'Hemingway' in response.parsed[1].person
2262
+
2263
+
2264
+ def test_multiple_function_calls(client):
2265
+ response = client.models.generate_content(
2266
+ model=GEMINI_FLASH_LATEST,
2267
+ contents=[
2268
+ 'What is the weather in Boston?',
2269
+ 'What is the stock price of GOOG?',
2270
+ types.Part.from_function_call(
2271
+ name='get_weather',
2272
+ args={'location': 'Boston'},
2273
+ ),
2274
+ types.Part.from_function_call(
2275
+ name='get_stock_price',
2276
+ args={'symbol': 'GOOG'},
2277
+ ),
2278
+ types.Part.from_function_response(
2279
+ name='get_weather',
2280
+ response={'response': 'It is sunny and 100 degrees.'},
2281
+ ),
2282
+ types.Part.from_function_response(
2283
+ name='get_stock_price',
2284
+ response={'response': 'The stock price is $100.'},
2285
+ ),
2286
+ ],
2287
+ config=types.GenerateContentConfig(
2288
+ tools=[
2289
+ types.Tool(
2290
+ function_declarations=[
2291
+ types.FunctionDeclaration(
2292
+ name='get_weather',
2293
+ description='Get the weather in a city.',
2294
+ parameters=types.Schema(
2295
+ type=types.Type.OBJECT,
2296
+ properties={
2297
+ 'location': types.Schema(
2298
+ type=types.Type.STRING
2299
+ )
2300
+ },
2301
+ ),
2302
+ ),
2303
+ types.FunctionDeclaration(
2304
+ name='get_stock_price',
2305
+ description='Get the stock price of a symbol.',
2306
+ parameters=types.Schema(
2307
+ type=types.Type.OBJECT,
2308
+ properties={
2309
+ 'symbol': types.Schema(
2310
+ type=types.Type.STRING
2311
+ )
2312
+ },
2313
+ ),
2314
+ ),
2315
+ ]
2316
+ ),
2317
+ ]
2318
+ ),
2319
+ )
2320
+
2321
+ assert 'Boston' in response.text
2322
+ assert 'sunny' in response.text
2323
+ assert '100 degrees' in response.text
2324
+ assert '$100' in response.text
2325
+
2326
+
2327
+ def test_usage_metadata_part_types(client):
2328
+ contents = [
2329
+ 'Hello world.',
2330
+ types.Part.from_bytes(
2331
+ data=image_bytes,
2332
+ mime_type='image/png',
2333
+ ),
2334
+ ]
2335
+
2336
+ response = client.models.generate_content(
2337
+ model=GEMINI_FLASH_2_0, contents=contents
2338
+ )
2339
+ usage_metadata = response.usage_metadata
2340
+
2341
+ assert usage_metadata.candidates_token_count
2342
+ assert usage_metadata.candidates_tokens_details
2343
+ modalities = sorted(
2344
+ [d.modality.name for d in usage_metadata.candidates_tokens_details]
2345
+ )
2346
+ assert modalities == ['TEXT']
2347
+ assert isinstance(
2348
+ usage_metadata.candidates_tokens_details[0].modality, types.MediaModality)
2349
+
2350
+ assert usage_metadata.prompt_token_count
2351
+ assert usage_metadata.prompt_tokens_details
2352
+ modalities = sorted(
2353
+ [d.modality.name for d in usage_metadata.prompt_tokens_details]
2354
+ )
2355
+ assert modalities == ['IMAGE', 'TEXT']
2356
+
2357
+
2358
+ def test_error_handling_stream(client):
2359
+ if client.vertexai:
2360
+ return
2361
+
2362
+ try:
2363
+ for chunk in client.models.generate_content_stream(
2364
+ model=GEMINI_FLASH_IMAGE_LATEST,
2365
+ contents=[
2366
+ types.Content(
2367
+ role='user',
2368
+ parts=[
2369
+ types.Part.from_bytes(
2370
+ data=image_bytes, mime_type='image/png'
2371
+ ),
2372
+ types.Part.from_text(text='Make sky more beautiful.'),
2373
+ ],
2374
+ ),
2375
+ ],
2376
+ config=types.GenerateContentConfig(
2377
+ response_mime_type='text/plain',
2378
+ response_modalities=['IMAGE', 'TEXT'],
2379
+ system_instruction='make the sky more beautiful.',
2380
+ ),
2381
+ ):
2382
+ continue
2383
+
2384
+ except errors.ClientError as e:
2385
+ assert (
2386
+ e.message
2387
+ == 'Developer instruction is not enabled for'
2388
+ ' models/gemini-2.5-flash-image'
2389
+ )
2390
+
2391
+
2392
+ def test_error_handling_unary(client):
2393
+ if client.vertexai:
2394
+ return
2395
+
2396
+ try:
2397
+ client.models.generate_content(
2398
+ model=GEMINI_FLASH_IMAGE_LATEST,
2399
+ contents=[
2400
+ types.Content(
2401
+ role='user',
2402
+ parts=[
2403
+ types.Part.from_bytes(
2404
+ data=image_bytes, mime_type='image/png'
2405
+ ),
2406
+ types.Part.from_text(text='Make sky more beautiful.'),
2407
+ ],
2408
+ ),
2409
+ ],
2410
+ config=types.GenerateContentConfig(
2411
+ response_mime_type='text/plain',
2412
+ response_modalities=['IMAGE', 'TEXT'],
2413
+ system_instruction='make the sky more beautiful.',
2414
+ ),
2415
+ )
2416
+
2417
+ except errors.ClientError as e:
2418
+ assert (
2419
+ e.message
2420
+ == 'Developer instruction is not enabled for'
2421
+ ' models/gemini-2.5-flash-image'
2422
+ )
2423
+
2424
+
2425
+ def test_provisioned_output_dedicated(client):
2426
+ response = client.models.generate_content(
2427
+ model=GEMINI_FLASH_LATEST,
2428
+ contents='What is 1 + 1?',
2429
+ config=types.GenerateContentConfig(
2430
+ http_options={'headers': {'X-Vertex-AI-LLM-Request-Type': 'dedicated'}}
2431
+ ),
2432
+ )
2433
+ if client.vertexai:
2434
+ assert response.usage_metadata.traffic_type == types.TrafficType.PROVISIONED_THROUGHPUT
2435
+ else:
2436
+ assert not response.usage_metadata.traffic_type
2437
+
2438
+
2439
+ @pytest.mark.asyncio
2440
+ async def test_error_handling_unary_async(client):
2441
+ if client.vertexai:
2442
+ return
2443
+
2444
+ try:
2445
+ await client.aio.models.generate_content(
2446
+ model=GEMINI_FLASH_IMAGE_LATEST,
2447
+ contents=[
2448
+ types.Content(
2449
+ role='user',
2450
+ parts=[
2451
+ types.Part.from_bytes(
2452
+ data=image_bytes, mime_type='image/png'
2453
+ ),
2454
+ types.Part.from_text(text='Make sky more beautiful.'),
2455
+ ],
2456
+ ),
2457
+ ],
2458
+ config=types.GenerateContentConfig(
2459
+ response_mime_type='text/plain',
2460
+ response_modalities=['IMAGE', 'TEXT'],
2461
+ system_instruction='make the sky more beautiful.',
2462
+ ),
2463
+ )
2464
+
2465
+ except errors.ClientError as e:
2466
+ assert (
2467
+ e.message
2468
+ == 'Developer instruction is not enabled for'
2469
+ ' models/gemini-2.5-flash-image'
2470
+ )
2471
+
2472
+
2473
+ @pytest.mark.asyncio
2474
+ async def test_error_handling_stream_async(client):
2475
+ if client.vertexai:
2476
+ return
2477
+
2478
+ try:
2479
+ async for part in await client.aio.models.generate_content_stream(
2480
+ model=GEMINI_FLASH_IMAGE_LATEST,
2481
+ contents=[
2482
+ types.Content(
2483
+ role='user',
2484
+ parts=[
2485
+ types.Part.from_bytes(
2486
+ data=image_bytes, mime_type='image/png'
2487
+ ),
2488
+ types.Part.from_text(text='Make sky more beautiful.'),
2489
+ ],
2490
+ ),
2491
+ ],
2492
+ config=types.GenerateContentConfig(
2493
+ response_mime_type='text/plain',
2494
+ response_modalities=['IMAGE', 'TEXT'],
2495
+ system_instruction='make the sky more beautiful.',
2496
+ ),
2497
+ ):
2498
+ continue
2499
+
2500
+ except errors.ClientError as e:
2501
+ assert ('Developer instruction is not enabled' in e.message)