google-genai 1.56.0__py3-none-any.whl → 1.58.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +49 -26
- google/genai/_interactions/__init__.py +3 -0
- google/genai/_interactions/_base_client.py +1 -1
- google/genai/_interactions/_client.py +57 -3
- google/genai/_interactions/_client_adapter.py +48 -0
- google/genai/_interactions/types/__init__.py +6 -0
- google/genai/_interactions/types/audio_content.py +2 -0
- google/genai/_interactions/types/audio_content_param.py +2 -0
- google/genai/_interactions/types/content.py +65 -0
- google/genai/_interactions/types/content_delta.py +10 -2
- google/genai/_interactions/types/content_param.py +63 -0
- google/genai/_interactions/types/content_start.py +5 -46
- google/genai/_interactions/types/content_stop.py +1 -2
- google/genai/_interactions/types/document_content.py +2 -0
- google/genai/_interactions/types/document_content_param.py +2 -0
- google/genai/_interactions/types/error_event.py +1 -2
- google/genai/_interactions/types/file_search_call_content.py +32 -0
- google/genai/_interactions/types/file_search_call_content_param.py +31 -0
- google/genai/_interactions/types/generation_config.py +4 -0
- google/genai/_interactions/types/generation_config_param.py +4 -0
- google/genai/_interactions/types/image_config.py +31 -0
- google/genai/_interactions/types/image_config_param.py +30 -0
- google/genai/_interactions/types/image_content.py +2 -0
- google/genai/_interactions/types/image_content_param.py +2 -0
- google/genai/_interactions/types/interaction.py +6 -52
- google/genai/_interactions/types/interaction_create_params.py +4 -22
- google/genai/_interactions/types/interaction_event.py +1 -2
- google/genai/_interactions/types/interaction_sse_event.py +5 -3
- google/genai/_interactions/types/interaction_status_update.py +1 -2
- google/genai/_interactions/types/model.py +1 -0
- google/genai/_interactions/types/model_param.py +1 -0
- google/genai/_interactions/types/turn.py +3 -44
- google/genai/_interactions/types/turn_param.py +4 -40
- google/genai/_interactions/types/usage.py +1 -1
- google/genai/_interactions/types/usage_param.py +1 -1
- google/genai/_interactions/types/video_content.py +2 -0
- google/genai/_interactions/types/video_content_param.py +2 -0
- google/genai/_live_converters.py +118 -34
- google/genai/_local_tokenizer_loader.py +1 -0
- google/genai/_tokens_converters.py +14 -14
- google/genai/_transformers.py +15 -21
- google/genai/batches.py +27 -22
- google/genai/caches.py +42 -42
- google/genai/chats.py +0 -2
- google/genai/client.py +61 -55
- google/genai/files.py +224 -0
- google/genai/live.py +1 -1
- google/genai/models.py +56 -44
- google/genai/tests/__init__.py +21 -0
- google/genai/tests/afc/__init__.py +21 -0
- google/genai/tests/afc/test_convert_if_exist_pydantic_model.py +309 -0
- google/genai/tests/afc/test_convert_number_values_for_function_call_args.py +63 -0
- google/genai/tests/afc/test_find_afc_incompatible_tool_indexes.py +240 -0
- google/genai/tests/afc/test_generate_content_stream_afc.py +530 -0
- google/genai/tests/afc/test_generate_content_stream_afc_thoughts.py +77 -0
- google/genai/tests/afc/test_get_function_map.py +176 -0
- google/genai/tests/afc/test_get_function_response_parts.py +277 -0
- google/genai/tests/afc/test_get_max_remote_calls_for_afc.py +130 -0
- google/genai/tests/afc/test_invoke_function_from_dict_args.py +241 -0
- google/genai/tests/afc/test_raise_error_for_afc_incompatible_config.py +159 -0
- google/genai/tests/afc/test_should_append_afc_history.py +53 -0
- google/genai/tests/afc/test_should_disable_afc.py +214 -0
- google/genai/tests/batches/__init__.py +17 -0
- google/genai/tests/batches/test_cancel.py +77 -0
- google/genai/tests/batches/test_create.py +78 -0
- google/genai/tests/batches/test_create_with_bigquery.py +113 -0
- google/genai/tests/batches/test_create_with_file.py +82 -0
- google/genai/tests/batches/test_create_with_gcs.py +125 -0
- google/genai/tests/batches/test_create_with_inlined_requests.py +255 -0
- google/genai/tests/batches/test_delete.py +86 -0
- google/genai/tests/batches/test_embedding.py +157 -0
- google/genai/tests/batches/test_get.py +78 -0
- google/genai/tests/batches/test_list.py +79 -0
- google/genai/tests/caches/__init__.py +17 -0
- google/genai/tests/caches/constants.py +29 -0
- google/genai/tests/caches/test_create.py +210 -0
- google/genai/tests/caches/test_create_custom_url.py +105 -0
- google/genai/tests/caches/test_delete.py +54 -0
- google/genai/tests/caches/test_delete_custom_url.py +52 -0
- google/genai/tests/caches/test_get.py +94 -0
- google/genai/tests/caches/test_get_custom_url.py +52 -0
- google/genai/tests/caches/test_list.py +68 -0
- google/genai/tests/caches/test_update.py +70 -0
- google/genai/tests/caches/test_update_custom_url.py +58 -0
- google/genai/tests/chats/__init__.py +1 -0
- google/genai/tests/chats/test_get_history.py +598 -0
- google/genai/tests/chats/test_send_message.py +844 -0
- google/genai/tests/chats/test_validate_response.py +90 -0
- google/genai/tests/client/__init__.py +17 -0
- google/genai/tests/client/test_async_stream.py +427 -0
- google/genai/tests/client/test_client_close.py +197 -0
- google/genai/tests/client/test_client_initialization.py +1687 -0
- google/genai/tests/client/test_client_requests.py +221 -0
- google/genai/tests/client/test_custom_client.py +104 -0
- google/genai/tests/client/test_http_options.py +178 -0
- google/genai/tests/client/test_replay_client_equality.py +168 -0
- google/genai/tests/client/test_retries.py +846 -0
- google/genai/tests/client/test_upload_errors.py +136 -0
- google/genai/tests/common/__init__.py +17 -0
- google/genai/tests/common/test_common.py +954 -0
- google/genai/tests/conftest.py +162 -0
- google/genai/tests/documents/__init__.py +17 -0
- google/genai/tests/documents/test_delete.py +51 -0
- google/genai/tests/documents/test_get.py +85 -0
- google/genai/tests/documents/test_list.py +72 -0
- google/genai/tests/errors/__init__.py +1 -0
- google/genai/tests/errors/test_api_error.py +417 -0
- google/genai/tests/file_search_stores/__init__.py +17 -0
- google/genai/tests/file_search_stores/test_create.py +66 -0
- google/genai/tests/file_search_stores/test_delete.py +64 -0
- google/genai/tests/file_search_stores/test_get.py +94 -0
- google/genai/tests/file_search_stores/test_import_file.py +112 -0
- google/genai/tests/file_search_stores/test_list.py +57 -0
- google/genai/tests/file_search_stores/test_upload_to_file_search_store.py +141 -0
- google/genai/tests/files/__init__.py +17 -0
- google/genai/tests/files/test_delete.py +46 -0
- google/genai/tests/files/test_download.py +85 -0
- google/genai/tests/files/test_get.py +46 -0
- google/genai/tests/files/test_list.py +72 -0
- google/genai/tests/files/test_register.py +272 -0
- google/genai/tests/files/test_register_table.py +70 -0
- google/genai/tests/files/test_upload.py +255 -0
- google/genai/tests/imports/test_no_optional_imports.py +28 -0
- google/genai/tests/interactions/test_auth.py +476 -0
- google/genai/tests/interactions/test_integration.py +84 -0
- google/genai/tests/interactions/test_paths.py +105 -0
- google/genai/tests/live/__init__.py +16 -0
- google/genai/tests/live/test_live.py +2143 -0
- google/genai/tests/live/test_live_music.py +362 -0
- google/genai/tests/live/test_live_response.py +163 -0
- google/genai/tests/live/test_send_client_content.py +147 -0
- google/genai/tests/live/test_send_realtime_input.py +268 -0
- google/genai/tests/live/test_send_tool_response.py +222 -0
- google/genai/tests/local_tokenizer/__init__.py +17 -0
- google/genai/tests/local_tokenizer/test_local_tokenizer.py +343 -0
- google/genai/tests/local_tokenizer/test_local_tokenizer_loader.py +235 -0
- google/genai/tests/mcp/__init__.py +17 -0
- google/genai/tests/mcp/test_has_mcp_tool_usage.py +89 -0
- google/genai/tests/mcp/test_mcp_to_gemini_tools.py +191 -0
- google/genai/tests/mcp/test_parse_config_for_mcp_sessions.py +201 -0
- google/genai/tests/mcp/test_parse_config_for_mcp_usage.py +130 -0
- google/genai/tests/mcp/test_set_mcp_usage_header.py +72 -0
- google/genai/tests/models/__init__.py +17 -0
- google/genai/tests/models/constants.py +8 -0
- google/genai/tests/models/test_compute_tokens.py +120 -0
- google/genai/tests/models/test_count_tokens.py +159 -0
- google/genai/tests/models/test_delete.py +107 -0
- google/genai/tests/models/test_edit_image.py +264 -0
- google/genai/tests/models/test_embed_content.py +94 -0
- google/genai/tests/models/test_function_call_streaming.py +442 -0
- google/genai/tests/models/test_generate_content.py +2501 -0
- google/genai/tests/models/test_generate_content_cached_content.py +132 -0
- google/genai/tests/models/test_generate_content_config_zero_value.py +103 -0
- google/genai/tests/models/test_generate_content_from_apikey.py +44 -0
- google/genai/tests/models/test_generate_content_http_options.py +40 -0
- google/genai/tests/models/test_generate_content_image_generation.py +143 -0
- google/genai/tests/models/test_generate_content_mcp.py +343 -0
- google/genai/tests/models/test_generate_content_media_resolution.py +97 -0
- google/genai/tests/models/test_generate_content_model.py +139 -0
- google/genai/tests/models/test_generate_content_part.py +821 -0
- google/genai/tests/models/test_generate_content_thought.py +76 -0
- google/genai/tests/models/test_generate_content_tools.py +1761 -0
- google/genai/tests/models/test_generate_images.py +191 -0
- google/genai/tests/models/test_generate_videos.py +759 -0
- google/genai/tests/models/test_get.py +104 -0
- google/genai/tests/models/test_list.py +233 -0
- google/genai/tests/models/test_recontext_image.py +189 -0
- google/genai/tests/models/test_segment_image.py +148 -0
- google/genai/tests/models/test_update.py +95 -0
- google/genai/tests/models/test_upscale_image.py +157 -0
- google/genai/tests/operations/__init__.py +17 -0
- google/genai/tests/operations/test_get.py +38 -0
- google/genai/tests/public_samples/__init__.py +17 -0
- google/genai/tests/public_samples/test_gemini_text_only.py +34 -0
- google/genai/tests/pytest_helper.py +246 -0
- google/genai/tests/shared/__init__.py +16 -0
- google/genai/tests/shared/batches/__init__.py +14 -0
- google/genai/tests/shared/batches/test_create_delete.py +57 -0
- google/genai/tests/shared/batches/test_create_get_cancel.py +56 -0
- google/genai/tests/shared/batches/test_list.py +40 -0
- google/genai/tests/shared/caches/__init__.py +14 -0
- google/genai/tests/shared/caches/test_create_get_delete.py +67 -0
- google/genai/tests/shared/caches/test_create_update_get.py +71 -0
- google/genai/tests/shared/caches/test_list.py +40 -0
- google/genai/tests/shared/chats/__init__.py +14 -0
- google/genai/tests/shared/chats/test_send_message.py +48 -0
- google/genai/tests/shared/chats/test_send_message_stream.py +50 -0
- google/genai/tests/shared/files/__init__.py +14 -0
- google/genai/tests/shared/files/test_list.py +41 -0
- google/genai/tests/shared/files/test_upload_get_delete.py +54 -0
- google/genai/tests/shared/models/__init__.py +14 -0
- google/genai/tests/shared/models/test_compute_tokens.py +41 -0
- google/genai/tests/shared/models/test_count_tokens.py +40 -0
- google/genai/tests/shared/models/test_edit_image.py +67 -0
- google/genai/tests/shared/models/test_embed.py +40 -0
- google/genai/tests/shared/models/test_generate_content.py +39 -0
- google/genai/tests/shared/models/test_generate_content_stream.py +54 -0
- google/genai/tests/shared/models/test_generate_images.py +40 -0
- google/genai/tests/shared/models/test_generate_videos.py +38 -0
- google/genai/tests/shared/models/test_list.py +37 -0
- google/genai/tests/shared/models/test_recontext_image.py +55 -0
- google/genai/tests/shared/models/test_segment_image.py +52 -0
- google/genai/tests/shared/models/test_upscale_image.py +52 -0
- google/genai/tests/shared/tunings/__init__.py +16 -0
- google/genai/tests/shared/tunings/test_create.py +46 -0
- google/genai/tests/shared/tunings/test_create_get_cancel.py +56 -0
- google/genai/tests/shared/tunings/test_list.py +39 -0
- google/genai/tests/tokens/__init__.py +16 -0
- google/genai/tests/tokens/test_create.py +154 -0
- google/genai/tests/transformers/__init__.py +17 -0
- google/genai/tests/transformers/test_blobs.py +84 -0
- google/genai/tests/transformers/test_bytes.py +15 -0
- google/genai/tests/transformers/test_duck_type.py +96 -0
- google/genai/tests/transformers/test_function_responses.py +72 -0
- google/genai/tests/transformers/test_schema.py +653 -0
- google/genai/tests/transformers/test_t_batch.py +286 -0
- google/genai/tests/transformers/test_t_content.py +160 -0
- google/genai/tests/transformers/test_t_contents.py +398 -0
- google/genai/tests/transformers/test_t_part.py +85 -0
- google/genai/tests/transformers/test_t_parts.py +87 -0
- google/genai/tests/transformers/test_t_tool.py +157 -0
- google/genai/tests/transformers/test_t_tools.py +195 -0
- google/genai/tests/tunings/__init__.py +16 -0
- google/genai/tests/tunings/test_cancel.py +39 -0
- google/genai/tests/tunings/test_end_to_end.py +106 -0
- google/genai/tests/tunings/test_get.py +67 -0
- google/genai/tests/tunings/test_list.py +75 -0
- google/genai/tests/tunings/test_tune.py +268 -0
- google/genai/tests/types/__init__.py +16 -0
- google/genai/tests/types/test_bytes_internal.py +271 -0
- google/genai/tests/types/test_bytes_type.py +152 -0
- google/genai/tests/types/test_future.py +101 -0
- google/genai/tests/types/test_optional_types.py +36 -0
- google/genai/tests/types/test_part_type.py +616 -0
- google/genai/tests/types/test_schema_from_json_schema.py +417 -0
- google/genai/tests/types/test_schema_json_schema.py +468 -0
- google/genai/tests/types/test_types.py +2903 -0
- google/genai/types.py +631 -488
- google/genai/version.py +1 -1
- {google_genai-1.56.0.dist-info → google_genai-1.58.0.dist-info}/METADATA +6 -11
- google_genai-1.58.0.dist-info/RECORD +358 -0
- google_genai-1.56.0.dist-info/RECORD +0 -162
- /google/genai/{_interactions/py.typed → tests/interactions/__init__.py} +0 -0
- {google_genai-1.56.0.dist-info → google_genai-1.58.0.dist-info}/WHEEL +0 -0
- {google_genai-1.56.0.dist-info → google_genai-1.58.0.dist-info}/licenses/LICENSE +0 -0
- {google_genai-1.56.0.dist-info → google_genai-1.58.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1761 @@
|
|
|
1
|
+
# Copyright 2025 Google LLC
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
#
|
|
15
|
+
|
|
16
|
+
import collections
|
|
17
|
+
import logging
|
|
18
|
+
import os
|
|
19
|
+
import sys
|
|
20
|
+
import typing
|
|
21
|
+
|
|
22
|
+
import pydantic
|
|
23
|
+
import pytest
|
|
24
|
+
|
|
25
|
+
from ... import _transformers as t
|
|
26
|
+
from ... import errors
|
|
27
|
+
from ... import types
|
|
28
|
+
from .. import pytest_helper
|
|
29
|
+
|
|
30
|
+
GOOGLE_HOMEPAGE_FILE_PATH = os.path.abspath(
|
|
31
|
+
os.path.join(os.path.dirname(__file__), '../data/google_homepage.png')
|
|
32
|
+
)
|
|
33
|
+
with open(GOOGLE_HOMEPAGE_FILE_PATH, 'rb') as image_file:
|
|
34
|
+
google_homepage_screenshot_bytes = image_file.read()
|
|
35
|
+
|
|
36
|
+
function_declarations = [{
|
|
37
|
+
'name': 'get_current_weather',
|
|
38
|
+
'description': 'Get the current weather in a city',
|
|
39
|
+
'parameters': {
|
|
40
|
+
'type': 'OBJECT',
|
|
41
|
+
'properties': {
|
|
42
|
+
'location': {
|
|
43
|
+
'type': 'STRING',
|
|
44
|
+
'description': 'The location to get the weather for',
|
|
45
|
+
},
|
|
46
|
+
'unit': {
|
|
47
|
+
'type': 'STRING',
|
|
48
|
+
'enum': ['C', 'F'],
|
|
49
|
+
},
|
|
50
|
+
},
|
|
51
|
+
},
|
|
52
|
+
}]
|
|
53
|
+
computer_use_override_function_declarations = [{
|
|
54
|
+
'name': 'type_text_at',
|
|
55
|
+
'description': 'Types text at a certain coordinate.',
|
|
56
|
+
'parameters': {
|
|
57
|
+
'type': 'OBJECT',
|
|
58
|
+
'properties': {
|
|
59
|
+
'y': {
|
|
60
|
+
'type': 'INTEGER',
|
|
61
|
+
'description': 'The y-coordinate, normalized from 0 to 1000.',
|
|
62
|
+
},
|
|
63
|
+
'x': {
|
|
64
|
+
'type': 'INTEGER',
|
|
65
|
+
'description': 'The x-coordinate, normalized from 0 to 1000.',
|
|
66
|
+
},
|
|
67
|
+
'press_enter': {
|
|
68
|
+
'type': 'BOOLEAN',
|
|
69
|
+
'description': 'Whether to press enter after typing the text.'
|
|
70
|
+
},
|
|
71
|
+
'text': {
|
|
72
|
+
'type': 'STRING',
|
|
73
|
+
'description': 'The text to type.',
|
|
74
|
+
},
|
|
75
|
+
},
|
|
76
|
+
},
|
|
77
|
+
}]
|
|
78
|
+
function_response_parts = [
|
|
79
|
+
{
|
|
80
|
+
'function_response': {
|
|
81
|
+
'name': 'get_current_weather',
|
|
82
|
+
'response': {
|
|
83
|
+
'name': 'get_current_weather',
|
|
84
|
+
'content': {'weather': 'super nice'},
|
|
85
|
+
},
|
|
86
|
+
},
|
|
87
|
+
},
|
|
88
|
+
]
|
|
89
|
+
manual_function_calling_contents = [
|
|
90
|
+
{'role': 'user', 'parts': [{'text': 'What is the weather in Boston?'}]},
|
|
91
|
+
{
|
|
92
|
+
'role': 'model',
|
|
93
|
+
'parts': [{
|
|
94
|
+
'function_call': {
|
|
95
|
+
'name': 'get_current_weather',
|
|
96
|
+
'args': {'location': 'Boston'},
|
|
97
|
+
}
|
|
98
|
+
}],
|
|
99
|
+
},
|
|
100
|
+
{'role': 'user', 'parts': function_response_parts},
|
|
101
|
+
]
|
|
102
|
+
computer_use_multi_turn_contents = [
|
|
103
|
+
{
|
|
104
|
+
'role': 'user',
|
|
105
|
+
'parts': [{'text': 'Go to google and search nano banana'}],
|
|
106
|
+
},
|
|
107
|
+
{
|
|
108
|
+
'role': 'model',
|
|
109
|
+
'parts': [{'function_call': {'name': 'open_web_browser', 'args': {}}}],
|
|
110
|
+
},
|
|
111
|
+
{
|
|
112
|
+
'role': 'user',
|
|
113
|
+
'parts': [{
|
|
114
|
+
'function_response': {
|
|
115
|
+
'name': 'open_web_browser',
|
|
116
|
+
'response': {
|
|
117
|
+
'url': 'http://www.google.com',
|
|
118
|
+
},
|
|
119
|
+
'parts': [{
|
|
120
|
+
'inline_data': {
|
|
121
|
+
'data': google_homepage_screenshot_bytes,
|
|
122
|
+
'mime_type': 'image/png',
|
|
123
|
+
}
|
|
124
|
+
}],
|
|
125
|
+
}
|
|
126
|
+
}],
|
|
127
|
+
},
|
|
128
|
+
]
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def get_weather(city: str) -> str:
|
|
132
|
+
return f'The weather in {city} is sunny and 100 degrees.'
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def get_weather_declaration_only(city: str) -> str:
|
|
136
|
+
"""Get the current weather in a given city.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
city: The city to get the weather for.
|
|
140
|
+
"""
|
|
141
|
+
pass
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def get_stock_price(symbol: str) -> str:
|
|
145
|
+
if symbol == 'GOOG':
|
|
146
|
+
return '1000'
|
|
147
|
+
else:
|
|
148
|
+
return '100'
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def divide_integers(a: int, b: int) -> int:
|
|
152
|
+
"""Divide two integers."""
|
|
153
|
+
return a // b
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
async def divide_floats_async(numerator: float, denominator: float) -> float:
|
|
157
|
+
"""Divide two floats."""
|
|
158
|
+
return numerator / denominator
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def divide_floats(a: float, b: float) -> float:
|
|
162
|
+
"""Divide two floats."""
|
|
163
|
+
return a / b
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
test_table: list[pytest_helper.TestTableItem] = [
|
|
167
|
+
pytest_helper.TestTableItem(
|
|
168
|
+
name='test_google_search',
|
|
169
|
+
parameters=types._GenerateContentParameters(
|
|
170
|
+
model='gemini-2.5-flash',
|
|
171
|
+
contents=t.t_contents('Why is the sky blue?'),
|
|
172
|
+
config={'tools': [{'google_search': {}}]},
|
|
173
|
+
),
|
|
174
|
+
),
|
|
175
|
+
pytest_helper.TestTableItem(
|
|
176
|
+
name='test_vai_search',
|
|
177
|
+
parameters=types._GenerateContentParameters(
|
|
178
|
+
model='gemini-2.5-flash',
|
|
179
|
+
contents=t.t_contents('what is vertex ai search?'),
|
|
180
|
+
config={
|
|
181
|
+
'tools': [{
|
|
182
|
+
'retrieval': {
|
|
183
|
+
'vertex_ai_search': {
|
|
184
|
+
'datastore': (
|
|
185
|
+
'projects/vertex-sdk-dev/locations/global/collections/default_collection/dataStores/yvonne_1728691676574'
|
|
186
|
+
)
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
}]
|
|
190
|
+
},
|
|
191
|
+
),
|
|
192
|
+
exception_if_mldev='retrieval',
|
|
193
|
+
),
|
|
194
|
+
pytest_helper.TestTableItem(
|
|
195
|
+
name='test_vai_google_search',
|
|
196
|
+
parameters=types._GenerateContentParameters(
|
|
197
|
+
model='gemini-2.5-flash',
|
|
198
|
+
contents=t.t_contents('why is the sky blue?'),
|
|
199
|
+
config={
|
|
200
|
+
'tools': [
|
|
201
|
+
types.Tool(
|
|
202
|
+
retrieval=types.Retrieval(
|
|
203
|
+
vertex_ai_search=types.VertexAISearch(
|
|
204
|
+
datastore='projects/vertex-sdk-dev/locations/global/collections/default_collection/dataStores/yvonne_1728691676574'
|
|
205
|
+
)
|
|
206
|
+
),
|
|
207
|
+
google_search_retrieval=types.GoogleSearchRetrieval(),
|
|
208
|
+
),
|
|
209
|
+
]
|
|
210
|
+
},
|
|
211
|
+
),
|
|
212
|
+
exception_if_mldev='retrieval',
|
|
213
|
+
exception_if_vertex='400',
|
|
214
|
+
),
|
|
215
|
+
pytest_helper.TestTableItem(
|
|
216
|
+
name='test_vai_search_engine',
|
|
217
|
+
parameters=types._GenerateContentParameters(
|
|
218
|
+
model='gemini-2.0-flash-001',
|
|
219
|
+
contents=t.t_contents('why is the sky blue?'),
|
|
220
|
+
config={
|
|
221
|
+
'tools': [
|
|
222
|
+
types.Tool(
|
|
223
|
+
retrieval=types.Retrieval(
|
|
224
|
+
vertex_ai_search=types.VertexAISearch(
|
|
225
|
+
engine='projects/862721868538/locations/global/collections/default_collection/engines/teamfood-v11_1720671063545'
|
|
226
|
+
)
|
|
227
|
+
)
|
|
228
|
+
),
|
|
229
|
+
]
|
|
230
|
+
},
|
|
231
|
+
),
|
|
232
|
+
exception_if_mldev='retrieval',
|
|
233
|
+
),
|
|
234
|
+
pytest_helper.TestTableItem(
|
|
235
|
+
name='test_rag_model_old',
|
|
236
|
+
parameters=types._GenerateContentParameters(
|
|
237
|
+
model='gemini-2.5-flash',
|
|
238
|
+
contents=t.t_contents(
|
|
239
|
+
'How much gain or loss did Google get in the Motorola Mobile'
|
|
240
|
+
' deal in 2014?',
|
|
241
|
+
),
|
|
242
|
+
config={
|
|
243
|
+
'tools': [
|
|
244
|
+
types.Tool(
|
|
245
|
+
retrieval=types.Retrieval(
|
|
246
|
+
vertex_rag_store=types.VertexRagStore(
|
|
247
|
+
rag_resources=[
|
|
248
|
+
types.VertexRagStoreRagResource(
|
|
249
|
+
rag_corpus='projects/964831358985/locations/us-central1/ragCorpora/3379951520341557248'
|
|
250
|
+
)
|
|
251
|
+
],
|
|
252
|
+
similarity_top_k=3,
|
|
253
|
+
)
|
|
254
|
+
),
|
|
255
|
+
),
|
|
256
|
+
]
|
|
257
|
+
},
|
|
258
|
+
),
|
|
259
|
+
exception_if_mldev='retrieval',
|
|
260
|
+
),
|
|
261
|
+
pytest_helper.TestTableItem(
|
|
262
|
+
name='test_rag_model_ga',
|
|
263
|
+
parameters=types._GenerateContentParameters(
|
|
264
|
+
model='gemini-2.5-flash',
|
|
265
|
+
contents=t.t_contents(
|
|
266
|
+
'How much gain or loss did Google get in the Motorola Mobile'
|
|
267
|
+
' deal in 2014?',
|
|
268
|
+
),
|
|
269
|
+
config={
|
|
270
|
+
'tools': [
|
|
271
|
+
types.Tool(
|
|
272
|
+
retrieval=types.Retrieval(
|
|
273
|
+
vertex_rag_store=types.VertexRagStore(
|
|
274
|
+
rag_resources=[
|
|
275
|
+
types.VertexRagStoreRagResource(
|
|
276
|
+
rag_corpus='projects/964831358985/locations/us-central1/ragCorpora/3379951520341557248'
|
|
277
|
+
)
|
|
278
|
+
],
|
|
279
|
+
rag_retrieval_config=types.RagRetrievalConfig(
|
|
280
|
+
top_k=3,
|
|
281
|
+
filter=types.RagRetrievalConfigFilter(
|
|
282
|
+
vector_similarity_threshold=0.5,
|
|
283
|
+
),
|
|
284
|
+
),
|
|
285
|
+
)
|
|
286
|
+
),
|
|
287
|
+
),
|
|
288
|
+
]
|
|
289
|
+
},
|
|
290
|
+
),
|
|
291
|
+
exception_if_mldev='retrieval',
|
|
292
|
+
),
|
|
293
|
+
pytest_helper.TestTableItem(
|
|
294
|
+
name='test_file_search',
|
|
295
|
+
parameters=types._GenerateContentParameters(
|
|
296
|
+
model='gemini-2.5-flash',
|
|
297
|
+
contents=t.t_contents(
|
|
298
|
+
'can you tell me the author of "A Survey of Modernist Poetry"?',
|
|
299
|
+
),
|
|
300
|
+
config={
|
|
301
|
+
'tools': [
|
|
302
|
+
types.Tool(
|
|
303
|
+
file_search=types.FileSearch(
|
|
304
|
+
file_search_store_names=[
|
|
305
|
+
'fileSearchStores/5en07ei3kojo-yo8sjqgvx2xf'
|
|
306
|
+
]
|
|
307
|
+
),
|
|
308
|
+
),
|
|
309
|
+
],
|
|
310
|
+
},
|
|
311
|
+
),
|
|
312
|
+
exception_if_vertex='is not supported in Vertex AI',
|
|
313
|
+
),
|
|
314
|
+
pytest_helper.TestTableItem(
|
|
315
|
+
name='test_file_search_non_existent_file_search_store',
|
|
316
|
+
parameters=types._GenerateContentParameters(
|
|
317
|
+
model='gemini-2.5-flash',
|
|
318
|
+
contents=t.t_contents(
|
|
319
|
+
'can you tell me the author of "A Survey of Modernist Poetry"?',
|
|
320
|
+
),
|
|
321
|
+
config={
|
|
322
|
+
'tools': [
|
|
323
|
+
types.Tool(
|
|
324
|
+
file_search=types.FileSearch(
|
|
325
|
+
file_search_store_names=[
|
|
326
|
+
'fileSearchStores/test-non-existent-rag-store'
|
|
327
|
+
],
|
|
328
|
+
),
|
|
329
|
+
),
|
|
330
|
+
],
|
|
331
|
+
},
|
|
332
|
+
),
|
|
333
|
+
exception_if_mldev='not exist',
|
|
334
|
+
exception_if_vertex='is not supported in Vertex AI',
|
|
335
|
+
),
|
|
336
|
+
pytest_helper.TestTableItem(
|
|
337
|
+
name='test_file_search_with_metadata_filter',
|
|
338
|
+
parameters=types._GenerateContentParameters(
|
|
339
|
+
model='gemini-2.5-flash',
|
|
340
|
+
contents=t.t_contents(
|
|
341
|
+
'can you tell me the author of "A Survey of Modernist Poetry"?',
|
|
342
|
+
),
|
|
343
|
+
config={
|
|
344
|
+
'tools': [
|
|
345
|
+
types.Tool(
|
|
346
|
+
file_search=types.FileSearch(
|
|
347
|
+
file_search_store_names=[
|
|
348
|
+
'fileSearchStores/5en07ei3kojo-yo8sjqgvx2xf'
|
|
349
|
+
],
|
|
350
|
+
metadata_filter='tag=science',
|
|
351
|
+
),
|
|
352
|
+
),
|
|
353
|
+
],
|
|
354
|
+
},
|
|
355
|
+
),
|
|
356
|
+
exception_if_vertex='is not supported in Vertex AI',
|
|
357
|
+
),
|
|
358
|
+
pytest_helper.TestTableItem(
|
|
359
|
+
name='test_file_search_with_metadata_filter_and_top_k',
|
|
360
|
+
parameters=types._GenerateContentParameters(
|
|
361
|
+
model='gemini-2.5-flash',
|
|
362
|
+
contents=t.t_contents(
|
|
363
|
+
'can you tell me the author of "A Survey of Modernist Poetry"',
|
|
364
|
+
),
|
|
365
|
+
config={
|
|
366
|
+
'tools': [
|
|
367
|
+
types.Tool(
|
|
368
|
+
file_search=types.FileSearch(
|
|
369
|
+
file_search_store_names=[
|
|
370
|
+
'fileSearchStores/5en07ei3kojo-yo8sjqgvx2xf'
|
|
371
|
+
],
|
|
372
|
+
metadata_filter='tag=science',
|
|
373
|
+
top_k=1,
|
|
374
|
+
),
|
|
375
|
+
),
|
|
376
|
+
],
|
|
377
|
+
},
|
|
378
|
+
),
|
|
379
|
+
exception_if_vertex='is not supported in Vertex AI',
|
|
380
|
+
),
|
|
381
|
+
pytest_helper.TestTableItem(
|
|
382
|
+
name='test_function_call',
|
|
383
|
+
parameters=types._GenerateContentParameters(
|
|
384
|
+
model='gemini-2.5-flash',
|
|
385
|
+
contents=manual_function_calling_contents,
|
|
386
|
+
config={
|
|
387
|
+
'tools': [{'function_declarations': function_declarations}]
|
|
388
|
+
},
|
|
389
|
+
),
|
|
390
|
+
),
|
|
391
|
+
pytest_helper.TestTableItem(
|
|
392
|
+
# TODO(b/382547236) add the test back in api mode when the code
|
|
393
|
+
# execution is supported.
|
|
394
|
+
skip_in_api_mode=(
|
|
395
|
+
'Model gemini-2.5-flash-001 does not support code execution for'
|
|
396
|
+
' Vertex API.'
|
|
397
|
+
),
|
|
398
|
+
name='test_code_execution',
|
|
399
|
+
parameters=types._GenerateContentParameters(
|
|
400
|
+
model='gemini-2.5-flash',
|
|
401
|
+
contents=t.t_contents(
|
|
402
|
+
'What is the sum of the first 50 prime numbers? '
|
|
403
|
+
+ 'Generate and run code for the calculation, and make sure you'
|
|
404
|
+
' get all 50.',
|
|
405
|
+
),
|
|
406
|
+
config={'tools': [{'code_execution': {}}]},
|
|
407
|
+
),
|
|
408
|
+
),
|
|
409
|
+
pytest_helper.TestTableItem(
|
|
410
|
+
name='test_function_google_search_with_long_lat',
|
|
411
|
+
parameters=types._GenerateContentParameters(
|
|
412
|
+
model='gemini-2.5-flash',
|
|
413
|
+
contents=t.t_contents('what is the price of GOOG?'),
|
|
414
|
+
config=types.GenerateContentConfig(
|
|
415
|
+
tools=[
|
|
416
|
+
types.Tool(
|
|
417
|
+
google_search=types.GoogleSearch(),
|
|
418
|
+
),
|
|
419
|
+
],
|
|
420
|
+
tool_config=types.ToolConfig(
|
|
421
|
+
retrieval_config=types.RetrievalConfig(
|
|
422
|
+
lat_lng=types.LatLngDict(
|
|
423
|
+
latitude=37.7749, longitude=-122.4194
|
|
424
|
+
)
|
|
425
|
+
)
|
|
426
|
+
),
|
|
427
|
+
),
|
|
428
|
+
),
|
|
429
|
+
),
|
|
430
|
+
pytest_helper.TestTableItem(
|
|
431
|
+
name='test_url_context',
|
|
432
|
+
parameters=types._GenerateContentParameters(
|
|
433
|
+
model='gemini-2.5-flash',
|
|
434
|
+
contents=t.t_contents(
|
|
435
|
+
'what are the top headlines on https://news.google.com'
|
|
436
|
+
),
|
|
437
|
+
config={'tools': [{'url_context': {}}]},
|
|
438
|
+
),
|
|
439
|
+
),
|
|
440
|
+
pytest_helper.TestTableItem(
|
|
441
|
+
name='test_url_context_paywall_status',
|
|
442
|
+
parameters=types._GenerateContentParameters(
|
|
443
|
+
model='gemini-2.5-flash',
|
|
444
|
+
contents=t.t_contents(
|
|
445
|
+
'Read the content of this URL:'
|
|
446
|
+
' https://unsplash.com/photos/portrait-of-an-adorable-golden-retriever-puppy-studio-shot-isolated-on-black-yRYCnnQASnc'
|
|
447
|
+
),
|
|
448
|
+
config={'tools': [{'url_context': {}}]},
|
|
449
|
+
),
|
|
450
|
+
),
|
|
451
|
+
pytest_helper.TestTableItem(
|
|
452
|
+
name='test_url_context_unsafe_status',
|
|
453
|
+
parameters=types._GenerateContentParameters(
|
|
454
|
+
model='gemini-2.5-flash',
|
|
455
|
+
contents=t.t_contents(
|
|
456
|
+
'Fetch the content of http://0k9.me/test.html'
|
|
457
|
+
),
|
|
458
|
+
config={'tools': [{'url_context': {}}]},
|
|
459
|
+
),
|
|
460
|
+
),
|
|
461
|
+
pytest_helper.TestTableItem(
|
|
462
|
+
name='test_computer_use',
|
|
463
|
+
parameters=types._GenerateContentParameters(
|
|
464
|
+
model='gemini-2.5-computer-use-preview-10-2025',
|
|
465
|
+
contents=t.t_contents('Go to google and search nano banana'),
|
|
466
|
+
config={'tools': [{'computer_use': {}}]},
|
|
467
|
+
),
|
|
468
|
+
exception_if_vertex='404',
|
|
469
|
+
),
|
|
470
|
+
pytest_helper.TestTableItem(
|
|
471
|
+
name='test_computer_use_with_browser_environment',
|
|
472
|
+
parameters=types._GenerateContentParameters(
|
|
473
|
+
model='gemini-2.5-computer-use-preview-10-2025',
|
|
474
|
+
contents=t.t_contents('Go to google and search nano banana'),
|
|
475
|
+
config={
|
|
476
|
+
'tools': [
|
|
477
|
+
{'computer_use': {'environment': 'ENVIRONMENT_BROWSER'}}
|
|
478
|
+
]
|
|
479
|
+
},
|
|
480
|
+
),
|
|
481
|
+
exception_if_vertex='404',
|
|
482
|
+
),
|
|
483
|
+
pytest_helper.TestTableItem(
|
|
484
|
+
name='test_computer_use_multi_turn',
|
|
485
|
+
parameters=types._GenerateContentParameters(
|
|
486
|
+
model='gemini-2.5-computer-use-preview-10-2025',
|
|
487
|
+
contents=computer_use_multi_turn_contents,
|
|
488
|
+
config={
|
|
489
|
+
'tools': [
|
|
490
|
+
{'computer_use': {'environment': 'ENVIRONMENT_BROWSER'}}
|
|
491
|
+
]
|
|
492
|
+
},
|
|
493
|
+
),
|
|
494
|
+
exception_if_vertex='404',
|
|
495
|
+
),
|
|
496
|
+
pytest_helper.TestTableItem(
|
|
497
|
+
name='test_computer_use_exclude_predefined_functions',
|
|
498
|
+
parameters=types._GenerateContentParameters(
|
|
499
|
+
model='gemini-2.5-computer-use-preview-10-2025',
|
|
500
|
+
contents='cheapest flight to NYC on Mar 18 2025 on Google Flights',
|
|
501
|
+
config={
|
|
502
|
+
'tools': [
|
|
503
|
+
{
|
|
504
|
+
'computer_use': {
|
|
505
|
+
'environment': 'ENVIRONMENT_BROWSER',
|
|
506
|
+
'excluded_predefined_functions': ['click_at'],
|
|
507
|
+
},
|
|
508
|
+
},
|
|
509
|
+
]
|
|
510
|
+
},
|
|
511
|
+
),
|
|
512
|
+
exception_if_vertex='404',
|
|
513
|
+
),
|
|
514
|
+
pytest_helper.TestTableItem(
|
|
515
|
+
name='test_computer_use_override_default_function',
|
|
516
|
+
parameters=types._GenerateContentParameters(
|
|
517
|
+
model='gemini-2.5-computer-use-preview-10-2025',
|
|
518
|
+
contents=computer_use_multi_turn_contents,
|
|
519
|
+
config={
|
|
520
|
+
'tools': [
|
|
521
|
+
{
|
|
522
|
+
'computer_use': {
|
|
523
|
+
'environment': 'ENVIRONMENT_BROWSER',
|
|
524
|
+
'excluded_predefined_functions': ['type_text_at'],
|
|
525
|
+
},
|
|
526
|
+
},
|
|
527
|
+
{
|
|
528
|
+
'function_declarations': (
|
|
529
|
+
computer_use_override_function_declarations
|
|
530
|
+
)
|
|
531
|
+
},
|
|
532
|
+
]
|
|
533
|
+
},
|
|
534
|
+
),
|
|
535
|
+
exception_if_vertex='404',
|
|
536
|
+
),
|
|
537
|
+
pytest_helper.TestTableItem(
|
|
538
|
+
# https://github.com/googleapis/python-genai/issues/830
|
|
539
|
+
# - models started returning empty thought in response to queries
|
|
540
|
+
# containing tools.
|
|
541
|
+
# - The API needs to accept any Content response it sends (otherwise
|
|
542
|
+
# chat breaks)
|
|
543
|
+
# - MLDev is not returning the, so it's okay that MLDev doesn't accept
|
|
544
|
+
# them?
|
|
545
|
+
# - This is also important to configm forward compatibility.
|
|
546
|
+
# when the models start returning thought_signature, those will get
|
|
547
|
+
# dropped by the SDK leaving a `{'thought: True}` part.
|
|
548
|
+
name='test_chat_tools_empty_thoughts',
|
|
549
|
+
parameters=types._GenerateContentParameters(
|
|
550
|
+
model='gemini-2.5-flash',
|
|
551
|
+
contents=[
|
|
552
|
+
types.Content.model_validate(item)
|
|
553
|
+
for item in [
|
|
554
|
+
{
|
|
555
|
+
'parts': [{'text': 'Who won the 1955 world cup?'}],
|
|
556
|
+
'role': 'user',
|
|
557
|
+
},
|
|
558
|
+
{
|
|
559
|
+
'parts': [
|
|
560
|
+
{'thought': True},
|
|
561
|
+
{
|
|
562
|
+
'text': (
|
|
563
|
+
'The FIFA World Cup is held every four'
|
|
564
|
+
' years. The 1954 FIFA World Cup was won by'
|
|
565
|
+
' West Germany, who defeated Hungary in the'
|
|
566
|
+
' final.'
|
|
567
|
+
)
|
|
568
|
+
},
|
|
569
|
+
],
|
|
570
|
+
'role': 'model',
|
|
571
|
+
},
|
|
572
|
+
{
|
|
573
|
+
'parts': [{
|
|
574
|
+
'text': 'What was the population of canada in 1955?'
|
|
575
|
+
}],
|
|
576
|
+
'role': 'user',
|
|
577
|
+
},
|
|
578
|
+
]
|
|
579
|
+
],
|
|
580
|
+
config={
|
|
581
|
+
'tools': [{'function_declarations': function_declarations}],
|
|
582
|
+
},
|
|
583
|
+
),
|
|
584
|
+
),
|
|
585
|
+
pytest_helper.TestTableItem(
|
|
586
|
+
name='test_function_calling_config_validated_mode',
|
|
587
|
+
parameters=types._GenerateContentParameters(
|
|
588
|
+
model='gemini-2.5-flash',
|
|
589
|
+
contents=t.t_contents('How is the weather in Kirkland?'),
|
|
590
|
+
config={
|
|
591
|
+
'tools': [{'function_declarations': function_declarations}],
|
|
592
|
+
'tool_config': {
|
|
593
|
+
'function_calling_config': {'mode': 'VALIDATED'}
|
|
594
|
+
},
|
|
595
|
+
},
|
|
596
|
+
),
|
|
597
|
+
),
|
|
598
|
+
pytest_helper.TestTableItem(
|
|
599
|
+
name='test_google_maps_with_enable_widget',
|
|
600
|
+
parameters=types._GenerateContentParameters(
|
|
601
|
+
model='gemini-2.5-flash',
|
|
602
|
+
contents=t.t_contents('What is the nearest airport to Seattle?'),
|
|
603
|
+
config={'tools': [{'google_maps': {'enable_widget': True}}]},
|
|
604
|
+
),
|
|
605
|
+
),
|
|
606
|
+
]
|
|
607
|
+
|
|
608
|
+
|
|
609
|
+
pytestmark = pytest_helper.setup(
|
|
610
|
+
file=__file__,
|
|
611
|
+
globals_for_file=globals(),
|
|
612
|
+
test_method='models.generate_content',
|
|
613
|
+
test_table=test_table,
|
|
614
|
+
)
|
|
615
|
+
pytest_plugins = ('pytest_asyncio',)
|
|
616
|
+
|
|
617
|
+
|
|
618
|
+
# Cannot be included in test_table because json serialization fails on function.
|
|
619
|
+
def test_function_google_search(client):
|
|
620
|
+
contents = 'What is the price of GOOG?.'
|
|
621
|
+
config = types.GenerateContentConfig(
|
|
622
|
+
tools=[
|
|
623
|
+
types.Tool(
|
|
624
|
+
google_search=types.GoogleSearch(),
|
|
625
|
+
),
|
|
626
|
+
get_stock_price,
|
|
627
|
+
],
|
|
628
|
+
tool_config=types.ToolConfig(
|
|
629
|
+
function_calling_config=types.FunctionCallingConfig(mode='AUTO')
|
|
630
|
+
),
|
|
631
|
+
)
|
|
632
|
+
# bad request to combine function call and google search retrieval
|
|
633
|
+
with pytest.raises(errors.ClientError):
|
|
634
|
+
client.models.generate_content(
|
|
635
|
+
model='gemini-2.5-flash',
|
|
636
|
+
contents=contents,
|
|
637
|
+
config=config,
|
|
638
|
+
)
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
def test_google_search_stream(client):
|
|
642
|
+
for part in client.models.generate_content_stream(
|
|
643
|
+
model='gemini-2.5-flash',
|
|
644
|
+
contents=types.Content(
|
|
645
|
+
role='user',
|
|
646
|
+
parts=[types.Part(text='Why is the sky blue?')],
|
|
647
|
+
),
|
|
648
|
+
config=types.GenerateContentConfig(
|
|
649
|
+
tools=[types.ToolDict({'google_search': {}})],
|
|
650
|
+
),
|
|
651
|
+
):
|
|
652
|
+
pass
|
|
653
|
+
|
|
654
|
+
|
|
655
|
+
@pytest.mark.skipif(
|
|
656
|
+
sys.version_info >= (3, 13),
|
|
657
|
+
reason=(
|
|
658
|
+
'object type is dumped as <Type.OBJECT: "OBJECT"> as opposed to'
|
|
659
|
+
' "OBJECT" in Python 3.13'
|
|
660
|
+
),
|
|
661
|
+
)
|
|
662
|
+
def test_function_calling_without_implementation(client):
|
|
663
|
+
response = client.models.generate_content(
|
|
664
|
+
model='gemini-2.5-flash',
|
|
665
|
+
contents='What is the weather in Boston?',
|
|
666
|
+
config={
|
|
667
|
+
'tools': [get_weather_declaration_only],
|
|
668
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
669
|
+
},
|
|
670
|
+
)
|
|
671
|
+
|
|
672
|
+
|
|
673
|
+
def test_2_function(client):
|
|
674
|
+
response = client.models.generate_content(
|
|
675
|
+
model='gemini-2.5-flash',
|
|
676
|
+
contents='What is the price of GOOG? And what is the weather in Boston?',
|
|
677
|
+
config={
|
|
678
|
+
'tools': [get_weather, get_stock_price],
|
|
679
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
680
|
+
},
|
|
681
|
+
)
|
|
682
|
+
assert '1000' in response.text
|
|
683
|
+
assert 'Boston' in response.text
|
|
684
|
+
assert 'sunny' in response.text
|
|
685
|
+
|
|
686
|
+
|
|
687
|
+
@pytest.mark.asyncio
|
|
688
|
+
async def test_2_function_async(client):
|
|
689
|
+
response = await client.aio.models.generate_content(
|
|
690
|
+
model='gemini-2.5-flash',
|
|
691
|
+
contents='What is the price of GOOG? And what is the weather in Boston?',
|
|
692
|
+
config={
|
|
693
|
+
'tools': [get_weather, get_stock_price],
|
|
694
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
695
|
+
},
|
|
696
|
+
)
|
|
697
|
+
assert '1000' in response.text
|
|
698
|
+
assert 'Boston' in response.text
|
|
699
|
+
assert 'sunny' in response.text
|
|
700
|
+
|
|
701
|
+
|
|
702
|
+
def test_automatic_function_calling_with_customized_math_rule(client):
|
|
703
|
+
def customized_divide_integers(numerator: int, denominator: int) -> int:
|
|
704
|
+
"""Divide two integers with customized math rule."""
|
|
705
|
+
return numerator // denominator + 1
|
|
706
|
+
|
|
707
|
+
response = client.models.generate_content(
|
|
708
|
+
model='gemini-2.5-flash',
|
|
709
|
+
contents='what is the result of 1000/2?',
|
|
710
|
+
config={
|
|
711
|
+
'tools': [customized_divide_integers],
|
|
712
|
+
},
|
|
713
|
+
)
|
|
714
|
+
assert '501' in response.text
|
|
715
|
+
|
|
716
|
+
|
|
717
|
+
def test_automatic_function_calling(client):
|
|
718
|
+
response = client.models.generate_content(
|
|
719
|
+
model='gemini-2.5-flash',
|
|
720
|
+
contents='what is the result of 1000/2?',
|
|
721
|
+
config={
|
|
722
|
+
'tools': [divide_integers],
|
|
723
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
724
|
+
},
|
|
725
|
+
)
|
|
726
|
+
|
|
727
|
+
assert '500' in response.text
|
|
728
|
+
|
|
729
|
+
|
|
730
|
+
@pytest.mark.asyncio
|
|
731
|
+
async def test_automatic_function_calling_with_async_function(client):
|
|
732
|
+
response = await client.aio.models.generate_content(
|
|
733
|
+
model='gemini-2.5-flash',
|
|
734
|
+
contents='what is the result of 1001.0/2.0?',
|
|
735
|
+
config={
|
|
736
|
+
'tools': [divide_floats_async],
|
|
737
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
738
|
+
},
|
|
739
|
+
)
|
|
740
|
+
|
|
741
|
+
assert '500.5' in response.text
|
|
742
|
+
|
|
743
|
+
|
|
744
|
+
def test_automatic_function_calling_stream(client):
|
|
745
|
+
response = client.models.generate_content_stream(
|
|
746
|
+
model='gemini-2.5-flash',
|
|
747
|
+
contents='what is the result of 1000/2?',
|
|
748
|
+
config={
|
|
749
|
+
'tools': [divide_integers],
|
|
750
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
751
|
+
},
|
|
752
|
+
)
|
|
753
|
+
chunks = 0
|
|
754
|
+
for part in response:
|
|
755
|
+
chunks += 1
|
|
756
|
+
assert part.text is not None or part.candidates[0].finish_reason
|
|
757
|
+
|
|
758
|
+
|
|
759
|
+
def test_disable_automatic_function_calling_stream(client):
|
|
760
|
+
# If AFC is disabled, the response should contain a function call.
|
|
761
|
+
response = client.models.generate_content_stream(
|
|
762
|
+
model='gemini-2.5-flash',
|
|
763
|
+
contents='what is the result of 1000/2?',
|
|
764
|
+
config={
|
|
765
|
+
'tools': [divide_integers],
|
|
766
|
+
'automatic_function_calling': {'disable': True},
|
|
767
|
+
},
|
|
768
|
+
)
|
|
769
|
+
chunks = 0
|
|
770
|
+
for chunk in response:
|
|
771
|
+
chunks += 1
|
|
772
|
+
assert chunk.parts[0].function_call is not None
|
|
773
|
+
|
|
774
|
+
|
|
775
|
+
def test_automatic_function_calling_no_function_response_stream(client):
|
|
776
|
+
response = client.models.generate_content_stream(
|
|
777
|
+
model='gemini-2.5-flash',
|
|
778
|
+
contents='what is the weather in Boston?',
|
|
779
|
+
config={
|
|
780
|
+
'tools': [divide_integers],
|
|
781
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
782
|
+
},
|
|
783
|
+
)
|
|
784
|
+
chunks = 0
|
|
785
|
+
for part in response:
|
|
786
|
+
chunks += 1
|
|
787
|
+
assert part.text is not None or part.candidates[0].finish_reason
|
|
788
|
+
|
|
789
|
+
|
|
790
|
+
@pytest.mark.asyncio
|
|
791
|
+
async def test_disable_automatic_function_calling_stream_async(client):
|
|
792
|
+
# If AFC is disabled, the response should contain a function call.
|
|
793
|
+
response = await client.aio.models.generate_content_stream(
|
|
794
|
+
model='gemini-2.5-flash',
|
|
795
|
+
contents='what is the result of 1000/2?',
|
|
796
|
+
config={
|
|
797
|
+
'tools': [divide_integers],
|
|
798
|
+
'automatic_function_calling': {'disable': True},
|
|
799
|
+
},
|
|
800
|
+
)
|
|
801
|
+
chunks = 0
|
|
802
|
+
async for chunk in response:
|
|
803
|
+
chunks += 1
|
|
804
|
+
assert chunk.parts[0].function_call is not None
|
|
805
|
+
|
|
806
|
+
|
|
807
|
+
@pytest.mark.asyncio
|
|
808
|
+
async def test_automatic_function_calling_no_function_response_stream_async(
|
|
809
|
+
client,
|
|
810
|
+
):
|
|
811
|
+
response = await client.aio.models.generate_content_stream(
|
|
812
|
+
model='gemini-2.5-flash',
|
|
813
|
+
contents='what is the weather in Boston?',
|
|
814
|
+
config={
|
|
815
|
+
'tools': [divide_integers],
|
|
816
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
817
|
+
},
|
|
818
|
+
)
|
|
819
|
+
chunks = 0
|
|
820
|
+
async for chunk in response:
|
|
821
|
+
chunks += 1
|
|
822
|
+
assert chunk.text is not None or chunk.candidates[0].finish_reason
|
|
823
|
+
|
|
824
|
+
|
|
825
|
+
@pytest.mark.asyncio
|
|
826
|
+
async def test_automatic_function_calling_stream_async(client):
|
|
827
|
+
response = await client.aio.models.generate_content_stream(
|
|
828
|
+
model='gemini-2.5-flash',
|
|
829
|
+
contents='what is the result of 1000/2?',
|
|
830
|
+
config={
|
|
831
|
+
'tools': [divide_integers],
|
|
832
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
833
|
+
},
|
|
834
|
+
)
|
|
835
|
+
chunks = 0
|
|
836
|
+
async for chunk in response:
|
|
837
|
+
chunks += 1
|
|
838
|
+
assert chunk.text is not None or chunk.candidates[0].finish_reason
|
|
839
|
+
|
|
840
|
+
|
|
841
|
+
def test_callable_tools_user_disable_afc(client):
|
|
842
|
+
response = client.models.generate_content(
|
|
843
|
+
model='gemini-2.5-flash',
|
|
844
|
+
contents='what is the result of 1000/2?',
|
|
845
|
+
config={
|
|
846
|
+
'tools': [divide_integers],
|
|
847
|
+
'automatic_function_calling': {
|
|
848
|
+
'disable': True,
|
|
849
|
+
'ignore_call_history': True,
|
|
850
|
+
},
|
|
851
|
+
},
|
|
852
|
+
)
|
|
853
|
+
|
|
854
|
+
|
|
855
|
+
def test_callable_tools_user_disable_afc_with_max_remote_calls(client):
|
|
856
|
+
response = client.models.generate_content(
|
|
857
|
+
model='gemini-2.5-flash',
|
|
858
|
+
contents='what is the result of 1000/2?',
|
|
859
|
+
config={
|
|
860
|
+
'tools': [divide_integers],
|
|
861
|
+
'automatic_function_calling': {
|
|
862
|
+
'disable': True,
|
|
863
|
+
'maximum_remote_calls': 2,
|
|
864
|
+
'ignore_call_history': True,
|
|
865
|
+
},
|
|
866
|
+
},
|
|
867
|
+
)
|
|
868
|
+
|
|
869
|
+
|
|
870
|
+
def test_callable_tools_user_disable_afc_with_max_remote_calls_negative(
|
|
871
|
+
client,
|
|
872
|
+
):
|
|
873
|
+
response = client.models.generate_content(
|
|
874
|
+
model='gemini-2.5-flash',
|
|
875
|
+
contents='what is the result of 1000/2?',
|
|
876
|
+
config={
|
|
877
|
+
'tools': [divide_integers],
|
|
878
|
+
'automatic_function_calling': {
|
|
879
|
+
'disable': True,
|
|
880
|
+
'maximum_remote_calls': -1,
|
|
881
|
+
'ignore_call_history': True,
|
|
882
|
+
},
|
|
883
|
+
},
|
|
884
|
+
)
|
|
885
|
+
|
|
886
|
+
|
|
887
|
+
def test_callable_tools_user_disable_afc_with_max_remote_calls_zero(client):
|
|
888
|
+
response = client.models.generate_content(
|
|
889
|
+
model='gemini-2.5-flash',
|
|
890
|
+
contents='what is the result of 1000/2?',
|
|
891
|
+
config={
|
|
892
|
+
'tools': [divide_integers],
|
|
893
|
+
'automatic_function_calling': {
|
|
894
|
+
'disable': True,
|
|
895
|
+
'maximum_remote_calls': 0,
|
|
896
|
+
'ignore_call_history': True,
|
|
897
|
+
},
|
|
898
|
+
},
|
|
899
|
+
)
|
|
900
|
+
|
|
901
|
+
|
|
902
|
+
def test_callable_tools_user_enable_afc(client):
|
|
903
|
+
response = client.models.generate_content(
|
|
904
|
+
model='gemini-2.5-flash',
|
|
905
|
+
contents='what is the result of 1000/2?',
|
|
906
|
+
config={
|
|
907
|
+
'tools': [divide_integers],
|
|
908
|
+
'automatic_function_calling': {
|
|
909
|
+
'disable': False,
|
|
910
|
+
'ignore_call_history': True,
|
|
911
|
+
},
|
|
912
|
+
},
|
|
913
|
+
)
|
|
914
|
+
|
|
915
|
+
|
|
916
|
+
def test_callable_tools_user_enable_afc_with_max_remote_calls(client):
|
|
917
|
+
response = client.models.generate_content(
|
|
918
|
+
model='gemini-2.5-flash',
|
|
919
|
+
contents='what is the result of 1000/2?',
|
|
920
|
+
config={
|
|
921
|
+
'tools': [divide_integers],
|
|
922
|
+
'automatic_function_calling': {
|
|
923
|
+
'disable': False,
|
|
924
|
+
'maximum_remote_calls': 2,
|
|
925
|
+
'ignore_call_history': True,
|
|
926
|
+
},
|
|
927
|
+
},
|
|
928
|
+
)
|
|
929
|
+
|
|
930
|
+
|
|
931
|
+
def test_callable_tools_user_enable_afc_with_max_remote_calls_negative(
|
|
932
|
+
client,
|
|
933
|
+
):
|
|
934
|
+
response = client.models.generate_content(
|
|
935
|
+
model='gemini-2.5-flash',
|
|
936
|
+
contents='what is the result of 1000/2?',
|
|
937
|
+
config={
|
|
938
|
+
'tools': [divide_integers],
|
|
939
|
+
'automatic_function_calling': {
|
|
940
|
+
'disable': False,
|
|
941
|
+
'maximum_remote_calls': -1,
|
|
942
|
+
'ignore_call_history': True,
|
|
943
|
+
},
|
|
944
|
+
},
|
|
945
|
+
)
|
|
946
|
+
|
|
947
|
+
|
|
948
|
+
def test_callable_tools_user_enable_afc_with_max_remote_calls_zero(client):
|
|
949
|
+
response = client.models.generate_content(
|
|
950
|
+
model='gemini-2.5-flash',
|
|
951
|
+
contents='what is the result of 1000/2?',
|
|
952
|
+
config={
|
|
953
|
+
'tools': [divide_integers],
|
|
954
|
+
'automatic_function_calling': {
|
|
955
|
+
'disable': False,
|
|
956
|
+
'maximum_remote_calls': 0,
|
|
957
|
+
'ignore_call_history': True,
|
|
958
|
+
},
|
|
959
|
+
},
|
|
960
|
+
)
|
|
961
|
+
|
|
962
|
+
|
|
963
|
+
def test_automatic_function_calling_with_exception(client):
|
|
964
|
+
client.models.generate_content(
|
|
965
|
+
model='gemini-2.5-flash',
|
|
966
|
+
contents='what is the result of 1000/0?',
|
|
967
|
+
config={
|
|
968
|
+
'tools': [divide_integers],
|
|
969
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
970
|
+
},
|
|
971
|
+
)
|
|
972
|
+
|
|
973
|
+
|
|
974
|
+
def test_automatic_function_calling_float_without_decimal(client):
|
|
975
|
+
response = client.models.generate_content(
|
|
976
|
+
model='gemini-2.5-flash',
|
|
977
|
+
contents='what is the result of 1000.0/2.0?',
|
|
978
|
+
config={
|
|
979
|
+
'tools': [divide_floats, divide_integers],
|
|
980
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
981
|
+
},
|
|
982
|
+
)
|
|
983
|
+
|
|
984
|
+
assert '500.0' in response.text
|
|
985
|
+
|
|
986
|
+
|
|
987
|
+
def test_automatic_function_calling_with_pydantic_model(client):
|
|
988
|
+
class CityObject(pydantic.BaseModel):
|
|
989
|
+
city_name: str
|
|
990
|
+
|
|
991
|
+
def get_weather_pydantic_model(
|
|
992
|
+
city_object: CityObject, is_winter: bool
|
|
993
|
+
) -> str:
|
|
994
|
+
if is_winter:
|
|
995
|
+
return f'The weather in {city_object.city_name} is cold and 10 degrees.'
|
|
996
|
+
else:
|
|
997
|
+
return f'The weather in {city_object.city_name} is sunny and 100 degrees.'
|
|
998
|
+
|
|
999
|
+
response = client.models.generate_content(
|
|
1000
|
+
model='gemini-2.5-flash',
|
|
1001
|
+
contents='it is winter now, what is the weather in Boston?',
|
|
1002
|
+
config={
|
|
1003
|
+
'tools': [get_weather_pydantic_model],
|
|
1004
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1005
|
+
},
|
|
1006
|
+
)
|
|
1007
|
+
|
|
1008
|
+
assert 'cold' in response.text and 'Boston' in response.text
|
|
1009
|
+
|
|
1010
|
+
|
|
1011
|
+
def test_automatic_function_calling_with_pydantic_model_in_list_type(client):
|
|
1012
|
+
class CityObject(pydantic.BaseModel):
|
|
1013
|
+
city_name: str
|
|
1014
|
+
|
|
1015
|
+
def get_weather_from_list_of_cities(
|
|
1016
|
+
city_object_list: list[CityObject], is_winter: bool
|
|
1017
|
+
) -> str:
|
|
1018
|
+
result = ''
|
|
1019
|
+
if is_winter:
|
|
1020
|
+
for city_object in city_object_list:
|
|
1021
|
+
result += (
|
|
1022
|
+
f'The weather in {city_object.city_name} is cold and 10 degrees.\n'
|
|
1023
|
+
)
|
|
1024
|
+
else:
|
|
1025
|
+
for city_object in city_object_list:
|
|
1026
|
+
result += (
|
|
1027
|
+
f'The weather in {city_object.city_name} is sunny and 100'
|
|
1028
|
+
' degrees.\n'
|
|
1029
|
+
)
|
|
1030
|
+
return result
|
|
1031
|
+
|
|
1032
|
+
response = client.models.generate_content(
|
|
1033
|
+
model='gemini-2.5-flash',
|
|
1034
|
+
contents='it is winter now, what is the weather in Boston and New York?',
|
|
1035
|
+
config={
|
|
1036
|
+
'tools': [get_weather_from_list_of_cities],
|
|
1037
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1038
|
+
},
|
|
1039
|
+
)
|
|
1040
|
+
|
|
1041
|
+
assert 'cold' in response.text and 'Boston' in response.text
|
|
1042
|
+
assert 'cold' in response.text and 'New York' in response.text
|
|
1043
|
+
|
|
1044
|
+
|
|
1045
|
+
# TODO(b/397404656): modify this test to pass in api mode
|
|
1046
|
+
def test_automatic_function_calling_with_pydantic_model_in_union_type(client):
|
|
1047
|
+
class AnimalObject(pydantic.BaseModel):
|
|
1048
|
+
name: str
|
|
1049
|
+
age: int
|
|
1050
|
+
species: str
|
|
1051
|
+
|
|
1052
|
+
class PlantObject(pydantic.BaseModel):
|
|
1053
|
+
name: str
|
|
1054
|
+
height: float
|
|
1055
|
+
color: str
|
|
1056
|
+
|
|
1057
|
+
def get_information(
|
|
1058
|
+
object_of_interest: typing.Union[AnimalObject, PlantObject],
|
|
1059
|
+
) -> str:
|
|
1060
|
+
if isinstance(object_of_interest, AnimalObject):
|
|
1061
|
+
return (
|
|
1062
|
+
f'The animal is of {object_of_interest.species} species and is named'
|
|
1063
|
+
f' {object_of_interest.name} is {object_of_interest.age} years old'
|
|
1064
|
+
)
|
|
1065
|
+
elif isinstance(object_of_interest, PlantObject):
|
|
1066
|
+
return (
|
|
1067
|
+
f'The plant is named {object_of_interest.name} and is'
|
|
1068
|
+
f' {object_of_interest.height} meters tall and is'
|
|
1069
|
+
f' {object_of_interest.color} color'
|
|
1070
|
+
)
|
|
1071
|
+
else:
|
|
1072
|
+
return 'The animal is not supported'
|
|
1073
|
+
|
|
1074
|
+
with pytest_helper.exception_if_vertex(client, errors.ClientError):
|
|
1075
|
+
response = client.models.generate_content(
|
|
1076
|
+
model='gemini-2.5-flash',
|
|
1077
|
+
contents=(
|
|
1078
|
+
'I have a one year old cat named Sundae, can you get the'
|
|
1079
|
+
' information of the cat for me?'
|
|
1080
|
+
),
|
|
1081
|
+
config={
|
|
1082
|
+
'system_instruction': (
|
|
1083
|
+
'you answer questions based on the tools provided'
|
|
1084
|
+
),
|
|
1085
|
+
'tools': [get_information],
|
|
1086
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1087
|
+
},
|
|
1088
|
+
)
|
|
1089
|
+
assert 'Sundae' in response.text
|
|
1090
|
+
assert 'cat' in response.text
|
|
1091
|
+
|
|
1092
|
+
|
|
1093
|
+
def test_automatic_function_calling_with_union_operator(client):
|
|
1094
|
+
class AnimalObject(pydantic.BaseModel):
|
|
1095
|
+
name: str
|
|
1096
|
+
age: int
|
|
1097
|
+
species: str
|
|
1098
|
+
|
|
1099
|
+
def get_information(
|
|
1100
|
+
object_of_interest: str | AnimalObject,
|
|
1101
|
+
) -> str:
|
|
1102
|
+
if isinstance(object_of_interest, AnimalObject):
|
|
1103
|
+
return (
|
|
1104
|
+
f'The animal is of {object_of_interest.species} species and is named'
|
|
1105
|
+
f' {object_of_interest.name} is {object_of_interest.age} years old'
|
|
1106
|
+
)
|
|
1107
|
+
else:
|
|
1108
|
+
return f'The object of interest is {object_of_interest}'
|
|
1109
|
+
|
|
1110
|
+
response = client.models.generate_content(
|
|
1111
|
+
model='gemini-2.5-flash',
|
|
1112
|
+
contents=(
|
|
1113
|
+
'I have a one year old cat named Sundae, can you get the'
|
|
1114
|
+
' information of the cat for me?'
|
|
1115
|
+
),
|
|
1116
|
+
config={
|
|
1117
|
+
'tools': [get_information],
|
|
1118
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1119
|
+
},
|
|
1120
|
+
)
|
|
1121
|
+
assert response.text
|
|
1122
|
+
|
|
1123
|
+
|
|
1124
|
+
def test_automatic_function_calling_with_tuple_param(client):
|
|
1125
|
+
def output_latlng(
|
|
1126
|
+
latlng: tuple[float, float],
|
|
1127
|
+
) -> str:
|
|
1128
|
+
return f'The latitude is {latlng[0]} and the longitude is {latlng[1]}'
|
|
1129
|
+
|
|
1130
|
+
response = client.models.generate_content(
|
|
1131
|
+
model='gemini-2.5-flash',
|
|
1132
|
+
contents=(
|
|
1133
|
+
'The coordinates are (51.509, -0.118). What is the latitude and longitude?'
|
|
1134
|
+
),
|
|
1135
|
+
config={
|
|
1136
|
+
'tools': [output_latlng],
|
|
1137
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1138
|
+
},
|
|
1139
|
+
)
|
|
1140
|
+
assert response.text
|
|
1141
|
+
|
|
1142
|
+
|
|
1143
|
+
@pytest.mark.skipif(
|
|
1144
|
+
sys.version_info < (3, 10),
|
|
1145
|
+
reason='| is only supported in Python 3.10 and above.',
|
|
1146
|
+
)
|
|
1147
|
+
def test_automatic_function_calling_with_union_operator_return_type(client):
|
|
1148
|
+
def get_cheese_age(cheese: int) -> int | float:
|
|
1149
|
+
"""
|
|
1150
|
+
Retrieves data about the age of the cheese given its ID.
|
|
1151
|
+
|
|
1152
|
+
Args:
|
|
1153
|
+
cheese_id: The ID of the cheese.
|
|
1154
|
+
|
|
1155
|
+
Returns:
|
|
1156
|
+
An int or float of the age of the cheese.
|
|
1157
|
+
"""
|
|
1158
|
+
if cheese == 1:
|
|
1159
|
+
return 2.5
|
|
1160
|
+
elif cheese == 2:
|
|
1161
|
+
return 3
|
|
1162
|
+
else:
|
|
1163
|
+
return 0.0
|
|
1164
|
+
|
|
1165
|
+
response = client.models.generate_content(
|
|
1166
|
+
model='gemini-2.5-flash',
|
|
1167
|
+
contents='How old is the cheese with id 2?',
|
|
1168
|
+
config={
|
|
1169
|
+
'tools': [get_cheese_age],
|
|
1170
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1171
|
+
},
|
|
1172
|
+
)
|
|
1173
|
+
assert '3' in response.text
|
|
1174
|
+
|
|
1175
|
+
|
|
1176
|
+
def test_automatic_function_calling_with_parameterized_generic_union_type(
|
|
1177
|
+
client,
|
|
1178
|
+
):
|
|
1179
|
+
def describe_cities(
|
|
1180
|
+
country: str,
|
|
1181
|
+
cities: typing.Optional[list[str]] = None,
|
|
1182
|
+
) -> str:
|
|
1183
|
+
'Given a country and an optional list of cities, describe the cities.'
|
|
1184
|
+
if cities is None:
|
|
1185
|
+
return 'There are no cities to describe.'
|
|
1186
|
+
else:
|
|
1187
|
+
return (
|
|
1188
|
+
f'The cities in {country} are: {", ".join(cities)} and they are nice.'
|
|
1189
|
+
)
|
|
1190
|
+
|
|
1191
|
+
response = client.models.generate_content(
|
|
1192
|
+
model='gemini-2.5-flash',
|
|
1193
|
+
contents='Can you describe the city of San Francisco, USA?',
|
|
1194
|
+
config={
|
|
1195
|
+
'tools': [describe_cities],
|
|
1196
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1197
|
+
},
|
|
1198
|
+
)
|
|
1199
|
+
assert 'San Francisco' in response.text
|
|
1200
|
+
|
|
1201
|
+
|
|
1202
|
+
@pytest.mark.asyncio
|
|
1203
|
+
async def test_google_search_async(client):
|
|
1204
|
+
await client.aio.models.generate_content(
|
|
1205
|
+
model='gemini-2.5-flash',
|
|
1206
|
+
contents=[
|
|
1207
|
+
types.ContentDict(
|
|
1208
|
+
{'role': 'user', 'parts': [{'text': 'Why is the sky blue?'}]}
|
|
1209
|
+
)
|
|
1210
|
+
],
|
|
1211
|
+
config={'tools': [{'google_search': {}}]},
|
|
1212
|
+
)
|
|
1213
|
+
|
|
1214
|
+
|
|
1215
|
+
def test_empty_tools(client):
|
|
1216
|
+
client.models.generate_content(
|
|
1217
|
+
model='gemini-2.5-flash',
|
|
1218
|
+
contents='What is the price of GOOG?.',
|
|
1219
|
+
config={'tools': []},
|
|
1220
|
+
)
|
|
1221
|
+
|
|
1222
|
+
|
|
1223
|
+
def test_with_1_empty_tool(client):
|
|
1224
|
+
# Bad request for empty tool.
|
|
1225
|
+
with pytest_helper.exception_if_vertex(client, errors.ClientError):
|
|
1226
|
+
client.models.generate_content(
|
|
1227
|
+
model='gemini-2.5-flash',
|
|
1228
|
+
contents='What is the price of GOOG?.',
|
|
1229
|
+
config={
|
|
1230
|
+
'tools': [{}, get_stock_price],
|
|
1231
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1232
|
+
},
|
|
1233
|
+
)
|
|
1234
|
+
|
|
1235
|
+
|
|
1236
|
+
@pytest.mark.asyncio
|
|
1237
|
+
async def test_google_search_stream_async(client):
|
|
1238
|
+
async for part in await client.aio.models.generate_content_stream(
|
|
1239
|
+
model='gemini-2.5-flash',
|
|
1240
|
+
contents='Why is the sky blue?',
|
|
1241
|
+
config={'tools': [{'google_search': {}}]},
|
|
1242
|
+
):
|
|
1243
|
+
pass
|
|
1244
|
+
|
|
1245
|
+
|
|
1246
|
+
@pytest.mark.asyncio
|
|
1247
|
+
async def test_vai_search_stream_async(client):
|
|
1248
|
+
if client._api_client.vertexai:
|
|
1249
|
+
async for part in await client.aio.models.generate_content_stream(
|
|
1250
|
+
model='gemini-2.5-flash',
|
|
1251
|
+
contents='what is vertex ai search?',
|
|
1252
|
+
config={
|
|
1253
|
+
'tools': [{
|
|
1254
|
+
'retrieval': {
|
|
1255
|
+
'vertex_ai_search': {
|
|
1256
|
+
'datastore': (
|
|
1257
|
+
'projects/vertex-sdk-dev/locations/global/collections/default_collection/dataStores/yvonne_1728691676574'
|
|
1258
|
+
)
|
|
1259
|
+
}
|
|
1260
|
+
}
|
|
1261
|
+
}]
|
|
1262
|
+
},
|
|
1263
|
+
):
|
|
1264
|
+
pass
|
|
1265
|
+
else:
|
|
1266
|
+
with pytest.raises(ValueError) as e:
|
|
1267
|
+
async for part in await client.aio.models.generate_content_stream(
|
|
1268
|
+
model='gemini-2.5-flash',
|
|
1269
|
+
contents='Why is the sky blue?',
|
|
1270
|
+
config={
|
|
1271
|
+
'tools': [{
|
|
1272
|
+
'retrieval': {
|
|
1273
|
+
'vertex_ai_search': {
|
|
1274
|
+
'datastore': (
|
|
1275
|
+
'projects/vertex-sdk-dev/locations/global/collections/default_collection/dataStores/yvonne_1728691676574'
|
|
1276
|
+
)
|
|
1277
|
+
}
|
|
1278
|
+
}
|
|
1279
|
+
}]
|
|
1280
|
+
},
|
|
1281
|
+
):
|
|
1282
|
+
pass
|
|
1283
|
+
assert 'retrieval' in str(e)
|
|
1284
|
+
|
|
1285
|
+
|
|
1286
|
+
def test_automatic_function_calling_with_coroutine_function(client):
|
|
1287
|
+
async def divide_integers(a: int, b: int) -> int:
|
|
1288
|
+
return a // b
|
|
1289
|
+
|
|
1290
|
+
with pytest.raises(errors.UnsupportedFunctionError):
|
|
1291
|
+
client.models.generate_content(
|
|
1292
|
+
model='gemini-2.5-flash',
|
|
1293
|
+
contents='what is the result of 1000/2?',
|
|
1294
|
+
config={
|
|
1295
|
+
'tools': [divide_integers],
|
|
1296
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1297
|
+
},
|
|
1298
|
+
)
|
|
1299
|
+
|
|
1300
|
+
|
|
1301
|
+
@pytest.mark.asyncio
|
|
1302
|
+
async def test_automatic_function_calling_with_coroutine_function_async(
|
|
1303
|
+
client,
|
|
1304
|
+
):
|
|
1305
|
+
async def divide_integers(a: int, b: int) -> int:
|
|
1306
|
+
return a // b
|
|
1307
|
+
|
|
1308
|
+
response = await client.aio.models.generate_content(
|
|
1309
|
+
model='gemini-2.5-flash',
|
|
1310
|
+
contents='what is the result of 1000/2?',
|
|
1311
|
+
config={
|
|
1312
|
+
'tools': [divide_integers],
|
|
1313
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1314
|
+
},
|
|
1315
|
+
)
|
|
1316
|
+
|
|
1317
|
+
assert '500' in response.text
|
|
1318
|
+
|
|
1319
|
+
|
|
1320
|
+
@pytest.mark.asyncio
|
|
1321
|
+
async def test_automatic_function_calling_async(client):
|
|
1322
|
+
def divide_integers(a: int, b: int) -> int:
|
|
1323
|
+
return a // b
|
|
1324
|
+
|
|
1325
|
+
response = await client.aio.models.generate_content(
|
|
1326
|
+
model='gemini-2.5-flash',
|
|
1327
|
+
contents='what is the result of 1000/2?',
|
|
1328
|
+
config={
|
|
1329
|
+
'tools': [divide_integers],
|
|
1330
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1331
|
+
},
|
|
1332
|
+
)
|
|
1333
|
+
|
|
1334
|
+
assert '500' in response.text
|
|
1335
|
+
|
|
1336
|
+
|
|
1337
|
+
@pytest.mark.asyncio
|
|
1338
|
+
async def test_automatic_function_calling_async_with_exception(client):
|
|
1339
|
+
def mystery_function(a: int, b: int) -> int:
|
|
1340
|
+
return a // b
|
|
1341
|
+
|
|
1342
|
+
response = await client.aio.models.generate_content(
|
|
1343
|
+
model='gemini-2.5-flash',
|
|
1344
|
+
contents='what is the result of 1000/0?',
|
|
1345
|
+
config={
|
|
1346
|
+
'tools': [divide_integers],
|
|
1347
|
+
'system_instruction': (
|
|
1348
|
+
'you must first look at the tools and then think about answers'
|
|
1349
|
+
),
|
|
1350
|
+
},
|
|
1351
|
+
)
|
|
1352
|
+
assert response.automatic_function_calling_history
|
|
1353
|
+
assert (
|
|
1354
|
+
response.automatic_function_calling_history[-1]
|
|
1355
|
+
.parts[0]
|
|
1356
|
+
.function_response.response['error']
|
|
1357
|
+
)
|
|
1358
|
+
|
|
1359
|
+
|
|
1360
|
+
@pytest.mark.asyncio
|
|
1361
|
+
async def test_automatic_function_calling_async_float_without_decimal(client):
|
|
1362
|
+
response = await client.aio.models.generate_content(
|
|
1363
|
+
model='gemini-2.5-flash',
|
|
1364
|
+
contents='what is the result of 1000.0/2.0?',
|
|
1365
|
+
config={
|
|
1366
|
+
'tools': [divide_floats, divide_integers],
|
|
1367
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1368
|
+
},
|
|
1369
|
+
)
|
|
1370
|
+
|
|
1371
|
+
assert '500.0' in response.text
|
|
1372
|
+
|
|
1373
|
+
|
|
1374
|
+
@pytest.mark.asyncio
|
|
1375
|
+
async def test_automatic_function_calling_async_with_pydantic_model(client):
|
|
1376
|
+
class CityObject(pydantic.BaseModel):
|
|
1377
|
+
city_name: str
|
|
1378
|
+
|
|
1379
|
+
def get_weather_pydantic_model(
|
|
1380
|
+
city_object: CityObject, is_winter: bool
|
|
1381
|
+
) -> str:
|
|
1382
|
+
if is_winter:
|
|
1383
|
+
return f'The weather in {city_object.city_name} is cold and 10 degrees.'
|
|
1384
|
+
else:
|
|
1385
|
+
return f'The weather in {city_object.city_name} is sunny and 100 degrees.'
|
|
1386
|
+
|
|
1387
|
+
response = await client.aio.models.generate_content(
|
|
1388
|
+
model='gemini-2.5-flash',
|
|
1389
|
+
contents='it is winter now, what is the weather in Boston?',
|
|
1390
|
+
config={
|
|
1391
|
+
'tools': [get_weather_pydantic_model],
|
|
1392
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1393
|
+
},
|
|
1394
|
+
)
|
|
1395
|
+
|
|
1396
|
+
# ML Dev couldn't understand pydantic model
|
|
1397
|
+
if client.vertexai:
|
|
1398
|
+
assert 'cold' in response.text and 'Boston' in response.text
|
|
1399
|
+
|
|
1400
|
+
|
|
1401
|
+
@pytest.mark.asyncio
|
|
1402
|
+
async def test_automatic_function_calling_async_with_async_function(client):
|
|
1403
|
+
async def get_current_weather_async(city: str) -> str:
|
|
1404
|
+
"""Returns the current weather in the city."""
|
|
1405
|
+
|
|
1406
|
+
return 'windy'
|
|
1407
|
+
|
|
1408
|
+
response = await client.aio.models.generate_content(
|
|
1409
|
+
model='gemini-2.5-flash',
|
|
1410
|
+
contents='what is the weather in San Francisco?',
|
|
1411
|
+
config={
|
|
1412
|
+
'tools': [get_current_weather_async],
|
|
1413
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1414
|
+
},
|
|
1415
|
+
)
|
|
1416
|
+
|
|
1417
|
+
assert 'windy' in response.text
|
|
1418
|
+
assert 'San Francisco' in response.text
|
|
1419
|
+
|
|
1420
|
+
|
|
1421
|
+
@pytest.mark.asyncio
|
|
1422
|
+
async def test_automatic_function_calling_async_with_async_function_stream(
|
|
1423
|
+
client,
|
|
1424
|
+
):
|
|
1425
|
+
async def get_current_weather_async(city: str) -> str:
|
|
1426
|
+
"""Returns the current weather in the city."""
|
|
1427
|
+
|
|
1428
|
+
return 'windy'
|
|
1429
|
+
|
|
1430
|
+
response = await client.aio.models.generate_content_stream(
|
|
1431
|
+
model='gemini-2.5-flash',
|
|
1432
|
+
contents='what is the weather in San Francisco?',
|
|
1433
|
+
config={
|
|
1434
|
+
'tools': [get_current_weather_async],
|
|
1435
|
+
'automatic_function_calling': {'ignore_call_history': True},
|
|
1436
|
+
},
|
|
1437
|
+
)
|
|
1438
|
+
|
|
1439
|
+
chunk = None
|
|
1440
|
+
async for chunk in response:
|
|
1441
|
+
if chunk.parts[0].function_call:
|
|
1442
|
+
assert chunk.parts[0].function_call.name == 'get_current_weather_async'
|
|
1443
|
+
assert chunk.parts[0].function_call.args['city'] == 'San Francisco'
|
|
1444
|
+
|
|
1445
|
+
|
|
1446
|
+
def test_2_function_with_history(client):
|
|
1447
|
+
response = client.models.generate_content(
|
|
1448
|
+
model='gemini-2.5-flash',
|
|
1449
|
+
contents='What is the price of GOOG? And what is the weather in Boston?',
|
|
1450
|
+
config={
|
|
1451
|
+
'tools': [get_weather, get_stock_price],
|
|
1452
|
+
'automatic_function_calling': {'ignore_call_history': False},
|
|
1453
|
+
},
|
|
1454
|
+
)
|
|
1455
|
+
|
|
1456
|
+
actual_history = response.automatic_function_calling_history
|
|
1457
|
+
|
|
1458
|
+
assert actual_history[0].role == 'user'
|
|
1459
|
+
assert (
|
|
1460
|
+
actual_history[0].parts[0].text
|
|
1461
|
+
== 'What is the price of GOOG? And what is the weather in Boston?'
|
|
1462
|
+
)
|
|
1463
|
+
|
|
1464
|
+
assert actual_history[1].role == 'model'
|
|
1465
|
+
assert actual_history[1].parts[0].function_call.model_dump_json(
|
|
1466
|
+
exclude_none=True
|
|
1467
|
+
) == types.FunctionCall(
|
|
1468
|
+
name='get_stock_price',
|
|
1469
|
+
args={'symbol': 'GOOG'},
|
|
1470
|
+
).model_dump_json(
|
|
1471
|
+
exclude_none=True
|
|
1472
|
+
)
|
|
1473
|
+
assert actual_history[1].parts[1].function_call.model_dump_json(
|
|
1474
|
+
exclude_none=True
|
|
1475
|
+
) == types.FunctionCall(
|
|
1476
|
+
name='get_weather',
|
|
1477
|
+
args={'city': 'Boston'},
|
|
1478
|
+
).model_dump_json(
|
|
1479
|
+
exclude_none=True
|
|
1480
|
+
)
|
|
1481
|
+
|
|
1482
|
+
assert actual_history[2].role == 'user'
|
|
1483
|
+
assert actual_history[2].parts[0].function_response.model_dump_json(
|
|
1484
|
+
exclude_none=True
|
|
1485
|
+
) == types.FunctionResponse(
|
|
1486
|
+
name='get_stock_price', response={'result': '1000'}
|
|
1487
|
+
).model_dump_json(
|
|
1488
|
+
exclude_none=True
|
|
1489
|
+
)
|
|
1490
|
+
assert actual_history[2].parts[1].function_response.model_dump_json(
|
|
1491
|
+
exclude_none=True
|
|
1492
|
+
) == types.FunctionResponse(
|
|
1493
|
+
name='get_weather',
|
|
1494
|
+
response={'result': 'The weather in Boston is sunny and 100 degrees.'},
|
|
1495
|
+
).model_dump_json(
|
|
1496
|
+
exclude_none=True
|
|
1497
|
+
)
|
|
1498
|
+
|
|
1499
|
+
|
|
1500
|
+
@pytest.mark.asyncio
|
|
1501
|
+
async def test_2_function_with_history_async(client):
|
|
1502
|
+
response = await client.aio.models.generate_content(
|
|
1503
|
+
model='gemini-2.5-flash',
|
|
1504
|
+
contents='What is the price of GOOG? And what is the weather in Boston?',
|
|
1505
|
+
config={
|
|
1506
|
+
'tools': [get_weather, get_stock_price],
|
|
1507
|
+
'automatic_function_calling': {'ignore_call_history': False},
|
|
1508
|
+
},
|
|
1509
|
+
)
|
|
1510
|
+
|
|
1511
|
+
actual_history = response.automatic_function_calling_history
|
|
1512
|
+
|
|
1513
|
+
assert actual_history[0].role == 'user'
|
|
1514
|
+
assert (
|
|
1515
|
+
actual_history[0].parts[0].text
|
|
1516
|
+
== 'What is the price of GOOG? And what is the weather in Boston?'
|
|
1517
|
+
)
|
|
1518
|
+
|
|
1519
|
+
assert actual_history[1].role == 'model'
|
|
1520
|
+
assert actual_history[1].parts[0].function_call.model_dump_json(
|
|
1521
|
+
exclude_none=True
|
|
1522
|
+
) == types.FunctionCall(
|
|
1523
|
+
name='get_stock_price',
|
|
1524
|
+
args={'symbol': 'GOOG'},
|
|
1525
|
+
).model_dump_json(
|
|
1526
|
+
exclude_none=True
|
|
1527
|
+
)
|
|
1528
|
+
assert actual_history[1].parts[1].function_call.model_dump_json(
|
|
1529
|
+
exclude_none=True
|
|
1530
|
+
) == types.FunctionCall(
|
|
1531
|
+
name='get_weather',
|
|
1532
|
+
args={'city': 'Boston'},
|
|
1533
|
+
).model_dump_json(
|
|
1534
|
+
exclude_none=True
|
|
1535
|
+
)
|
|
1536
|
+
|
|
1537
|
+
assert actual_history[2].role == 'user'
|
|
1538
|
+
assert actual_history[2].parts[0].function_response.model_dump_json(
|
|
1539
|
+
exclude_none=True
|
|
1540
|
+
) == types.FunctionResponse(
|
|
1541
|
+
name='get_stock_price', response={'result': '1000'}
|
|
1542
|
+
).model_dump_json(
|
|
1543
|
+
exclude_none=True
|
|
1544
|
+
)
|
|
1545
|
+
assert actual_history[2].parts[1].function_response.model_dump_json(
|
|
1546
|
+
exclude_none=True
|
|
1547
|
+
) == types.FunctionResponse(
|
|
1548
|
+
name='get_weather',
|
|
1549
|
+
response={'result': 'The weather in Boston is sunny and 100 degrees.'},
|
|
1550
|
+
).model_dump_json(
|
|
1551
|
+
exclude_none=True
|
|
1552
|
+
)
|
|
1553
|
+
|
|
1554
|
+
|
|
1555
|
+
class FunctionHolder:
|
|
1556
|
+
NAME = 'FunctionHolder'
|
|
1557
|
+
|
|
1558
|
+
def is_a_duck(self, number: int) -> str:
|
|
1559
|
+
return self.NAME + 'says isOdd: ' + str(number % 2 == 1)
|
|
1560
|
+
|
|
1561
|
+
def is_a_rabbit(self, number: int) -> str:
|
|
1562
|
+
return self.NAME + 'says isEven: ' + str(number % 2 == 0)
|
|
1563
|
+
|
|
1564
|
+
|
|
1565
|
+
def test_class_method_tools(client):
|
|
1566
|
+
# This test is to make sure that instance method tools can be used in
|
|
1567
|
+
# the generate_content request.
|
|
1568
|
+
|
|
1569
|
+
function_holder = FunctionHolder()
|
|
1570
|
+
response = client.models.generate_content(
|
|
1571
|
+
model='gemini-2.0-flash-exp',
|
|
1572
|
+
contents=(
|
|
1573
|
+
'Print the verbatim output of is_a_duck and is_a_rabbit for the'
|
|
1574
|
+
' number 100.'
|
|
1575
|
+
),
|
|
1576
|
+
config={
|
|
1577
|
+
'tools': [function_holder.is_a_duck, function_holder.is_a_rabbit],
|
|
1578
|
+
},
|
|
1579
|
+
)
|
|
1580
|
+
assert 'FunctionHolder' in response.text
|
|
1581
|
+
|
|
1582
|
+
|
|
1583
|
+
def test_disable_afc_in_any_mode(client):
|
|
1584
|
+
response = client.models.generate_content(
|
|
1585
|
+
model='gemini-2.5-flash',
|
|
1586
|
+
contents='what is the result of 1000/2?',
|
|
1587
|
+
config=types.GenerateContentConfig(
|
|
1588
|
+
tools=[divide_integers],
|
|
1589
|
+
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
1590
|
+
disable=True
|
|
1591
|
+
),
|
|
1592
|
+
tool_config=types.ToolConfig(
|
|
1593
|
+
function_calling_config=types.FunctionCallingConfig(mode='ANY')
|
|
1594
|
+
),
|
|
1595
|
+
),
|
|
1596
|
+
)
|
|
1597
|
+
|
|
1598
|
+
|
|
1599
|
+
def test_afc_once_in_any_mode(client):
|
|
1600
|
+
response = client.models.generate_content(
|
|
1601
|
+
model='gemini-2.5-flash',
|
|
1602
|
+
contents='what is the result of 1000/2?',
|
|
1603
|
+
config=types.GenerateContentConfig(
|
|
1604
|
+
tools=[divide_integers],
|
|
1605
|
+
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
1606
|
+
maximum_remote_calls=2
|
|
1607
|
+
),
|
|
1608
|
+
tool_config=types.ToolConfig(
|
|
1609
|
+
function_calling_config=types.FunctionCallingConfig(mode='ANY')
|
|
1610
|
+
),
|
|
1611
|
+
),
|
|
1612
|
+
)
|
|
1613
|
+
|
|
1614
|
+
|
|
1615
|
+
def test_code_execution_tool(client):
|
|
1616
|
+
response = client.models.generate_content(
|
|
1617
|
+
model='gemini-2.0-flash-exp',
|
|
1618
|
+
contents=(
|
|
1619
|
+
'What is the sum of the first 50 prime numbers? Generate and run code'
|
|
1620
|
+
' for the calculation, and make sure you get all 50.'
|
|
1621
|
+
),
|
|
1622
|
+
config=types.GenerateContentConfig(
|
|
1623
|
+
tools=[types.Tool(code_execution=types.ToolCodeExecution)]
|
|
1624
|
+
),
|
|
1625
|
+
)
|
|
1626
|
+
|
|
1627
|
+
assert response.executable_code
|
|
1628
|
+
assert (
|
|
1629
|
+
'prime' in response.code_execution_result.lower()
|
|
1630
|
+
or '5117' in response.code_execution_result
|
|
1631
|
+
)
|
|
1632
|
+
|
|
1633
|
+
|
|
1634
|
+
def test_afc_logs_to_logger_instance(client, caplog):
|
|
1635
|
+
caplog.set_level(logging.DEBUG, logger='google_genai.models')
|
|
1636
|
+
client.models.generate_content(
|
|
1637
|
+
model='gemini-2.5-flash',
|
|
1638
|
+
contents='what is the result of 1000/2?',
|
|
1639
|
+
config={
|
|
1640
|
+
'tools': [divide_integers],
|
|
1641
|
+
'automatic_function_calling': {
|
|
1642
|
+
'disable': False,
|
|
1643
|
+
'maximum_remote_calls': 1,
|
|
1644
|
+
'ignore_call_history': True,
|
|
1645
|
+
},
|
|
1646
|
+
},
|
|
1647
|
+
)
|
|
1648
|
+
for log in caplog.records:
|
|
1649
|
+
assert log.levelname == 'INFO'
|
|
1650
|
+
assert log.name == 'google_genai.models'
|
|
1651
|
+
|
|
1652
|
+
assert 'AFC is enabled with max remote calls: 1' in caplog.text
|
|
1653
|
+
assert 'remote call 1 is done' in caplog.text
|
|
1654
|
+
assert 'Reached max remote calls' in caplog.text
|
|
1655
|
+
|
|
1656
|
+
|
|
1657
|
+
def test_suppress_logs_with_sdk_logger(client, caplog):
|
|
1658
|
+
caplog.set_level(logging.DEBUG, logger='google_genai.models')
|
|
1659
|
+
sdk_logger = logging.getLogger('google_genai.models')
|
|
1660
|
+
sdk_logger.setLevel(logging.ERROR)
|
|
1661
|
+
client.models.generate_content(
|
|
1662
|
+
model='gemini-2.5-flash',
|
|
1663
|
+
contents='what is the result of 1000/2?',
|
|
1664
|
+
config={
|
|
1665
|
+
'tools': [divide_integers],
|
|
1666
|
+
'automatic_function_calling': {
|
|
1667
|
+
'disable': False,
|
|
1668
|
+
'maximum_remote_calls': 2,
|
|
1669
|
+
'ignore_call_history': True,
|
|
1670
|
+
},
|
|
1671
|
+
},
|
|
1672
|
+
)
|
|
1673
|
+
assert not caplog.text
|
|
1674
|
+
|
|
1675
|
+
|
|
1676
|
+
def test_tools_chat_curation(client, caplog):
|
|
1677
|
+
caplog.set_level(logging.DEBUG, logger='google_genai.models')
|
|
1678
|
+
sdk_logger = logging.getLogger('google_genai.models')
|
|
1679
|
+
sdk_logger.setLevel(logging.ERROR)
|
|
1680
|
+
|
|
1681
|
+
config = {
|
|
1682
|
+
'tools': [{'function_declarations': function_declarations}],
|
|
1683
|
+
}
|
|
1684
|
+
|
|
1685
|
+
chat = client.chats.create(
|
|
1686
|
+
model='gemini-2.5-flash',
|
|
1687
|
+
config=config,
|
|
1688
|
+
)
|
|
1689
|
+
|
|
1690
|
+
response = chat.send_message(
|
|
1691
|
+
message='Who won the 1955 world cup?',
|
|
1692
|
+
)
|
|
1693
|
+
|
|
1694
|
+
response = chat.send_message(
|
|
1695
|
+
message='What was the population of canada in 1955?',
|
|
1696
|
+
)
|
|
1697
|
+
|
|
1698
|
+
history = chat.get_history(curated=True)
|
|
1699
|
+
assert len(history) == 4
|
|
1700
|
+
|
|
1701
|
+
|
|
1702
|
+
def test_function_declaration_with_callable(client):
|
|
1703
|
+
response = client.models.generate_content(
|
|
1704
|
+
model='gemini-2.5-pro',
|
|
1705
|
+
contents=(
|
|
1706
|
+
'Divide 1000 by 2. And tell'
|
|
1707
|
+
' me the weather in London.'
|
|
1708
|
+
),
|
|
1709
|
+
config={
|
|
1710
|
+
'tools': [
|
|
1711
|
+
divide_integers,
|
|
1712
|
+
{'function_declarations': function_declarations},
|
|
1713
|
+
],
|
|
1714
|
+
},
|
|
1715
|
+
)
|
|
1716
|
+
assert response.function_calls is not None
|
|
1717
|
+
|
|
1718
|
+
def test_function_declaration_with_callable_stream_now(client):
|
|
1719
|
+
for chunk in client.models.generate_content_stream(
|
|
1720
|
+
model='gemini-2.5-pro',
|
|
1721
|
+
contents='Divide 1000 by 2. And tell me the weather in London.',
|
|
1722
|
+
config={
|
|
1723
|
+
'tools': [
|
|
1724
|
+
divide_integers,
|
|
1725
|
+
{'function_declarations': function_declarations},
|
|
1726
|
+
],
|
|
1727
|
+
},
|
|
1728
|
+
):
|
|
1729
|
+
pass
|
|
1730
|
+
|
|
1731
|
+
@pytest.mark.asyncio
|
|
1732
|
+
async def test_function_declaration_with_callable_async(client):
|
|
1733
|
+
response = await client.aio.models.generate_content(
|
|
1734
|
+
model='gemini-2.5-pro',
|
|
1735
|
+
contents=(
|
|
1736
|
+
'Divide 1000 by 2. And tell'
|
|
1737
|
+
' me the weather in London.'
|
|
1738
|
+
),
|
|
1739
|
+
config={
|
|
1740
|
+
'tools': [
|
|
1741
|
+
divide_integers,
|
|
1742
|
+
{'function_declarations': function_declarations},
|
|
1743
|
+
],
|
|
1744
|
+
},
|
|
1745
|
+
)
|
|
1746
|
+
assert response.function_calls is not None
|
|
1747
|
+
|
|
1748
|
+
|
|
1749
|
+
@pytest.mark.asyncio
|
|
1750
|
+
async def test_function_declaration_with_callable_async_stream(client):
|
|
1751
|
+
async for chunk in await client.aio.models.generate_content_stream(
|
|
1752
|
+
model='gemini-2.5-pro',
|
|
1753
|
+
contents='Divide 1000 by 2. And tell me the weather in London.',
|
|
1754
|
+
config={
|
|
1755
|
+
'tools': [
|
|
1756
|
+
divide_integers,
|
|
1757
|
+
{'function_declarations': function_declarations},
|
|
1758
|
+
],
|
|
1759
|
+
},
|
|
1760
|
+
):
|
|
1761
|
+
pass
|