google-genai 1.53.0__py3-none-any.whl → 1.55.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/__init__.py +1 -0
- google/genai/_api_client.py +6 -6
- google/genai/_interactions/__init__.py +117 -0
- google/genai/_interactions/_base_client.py +2019 -0
- google/genai/_interactions/_client.py +511 -0
- google/genai/_interactions/_compat.py +234 -0
- google/genai/_interactions/_constants.py +29 -0
- google/genai/_interactions/_exceptions.py +122 -0
- google/genai/_interactions/_files.py +139 -0
- google/genai/_interactions/_models.py +873 -0
- google/genai/_interactions/_qs.py +165 -0
- google/genai/_interactions/_resource.py +58 -0
- google/genai/_interactions/_response.py +847 -0
- google/genai/_interactions/_streaming.py +354 -0
- google/genai/_interactions/_types.py +276 -0
- google/genai/_interactions/_utils/__init__.py +79 -0
- google/genai/_interactions/_utils/_compat.py +61 -0
- google/genai/_interactions/_utils/_datetime_parse.py +151 -0
- google/genai/_interactions/_utils/_logs.py +40 -0
- google/genai/_interactions/_utils/_proxy.py +80 -0
- google/genai/_interactions/_utils/_reflection.py +57 -0
- google/genai/_interactions/_utils/_resources_proxy.py +39 -0
- google/genai/_interactions/_utils/_streams.py +27 -0
- google/genai/_interactions/_utils/_sync.py +73 -0
- google/genai/_interactions/_utils/_transform.py +472 -0
- google/genai/_interactions/_utils/_typing.py +172 -0
- google/genai/_interactions/_utils/_utils.py +437 -0
- google/genai/_interactions/_version.py +18 -0
- google/genai/_interactions/resources/__init__.py +34 -0
- google/genai/_interactions/resources/interactions.py +1350 -0
- google/genai/_interactions/types/__init__.py +107 -0
- google/genai/_interactions/types/allowed_tools.py +33 -0
- google/genai/_interactions/types/allowed_tools_param.py +35 -0
- google/genai/_interactions/types/annotation.py +42 -0
- google/genai/_interactions/types/annotation_param.py +42 -0
- google/genai/_interactions/types/audio_content.py +38 -0
- google/genai/_interactions/types/audio_content_param.py +45 -0
- google/genai/_interactions/types/audio_mime_type.py +25 -0
- google/genai/_interactions/types/audio_mime_type_param.py +27 -0
- google/genai/_interactions/types/code_execution_call_arguments.py +33 -0
- google/genai/_interactions/types/code_execution_call_arguments_param.py +32 -0
- google/genai/_interactions/types/code_execution_call_content.py +37 -0
- google/genai/_interactions/types/code_execution_call_content_param.py +37 -0
- google/genai/_interactions/types/code_execution_result_content.py +42 -0
- google/genai/_interactions/types/code_execution_result_content_param.py +41 -0
- google/genai/_interactions/types/content_delta.py +358 -0
- google/genai/_interactions/types/content_start.py +79 -0
- google/genai/_interactions/types/content_stop.py +35 -0
- google/genai/_interactions/types/deep_research_agent_config.py +33 -0
- google/genai/_interactions/types/deep_research_agent_config_param.py +32 -0
- google/genai/_interactions/types/document_content.py +36 -0
- google/genai/_interactions/types/document_content_param.py +43 -0
- google/genai/_interactions/types/dynamic_agent_config.py +44 -0
- google/genai/_interactions/types/dynamic_agent_config_param.py +33 -0
- google/genai/_interactions/types/error_event.py +46 -0
- google/genai/_interactions/types/file_search_result_content.py +46 -0
- google/genai/_interactions/types/file_search_result_content_param.py +46 -0
- google/genai/_interactions/types/function.py +38 -0
- google/genai/_interactions/types/function_call_content.py +39 -0
- google/genai/_interactions/types/function_call_content_param.py +39 -0
- google/genai/_interactions/types/function_param.py +37 -0
- google/genai/_interactions/types/function_result_content.py +52 -0
- google/genai/_interactions/types/function_result_content_param.py +54 -0
- google/genai/_interactions/types/generation_config.py +57 -0
- google/genai/_interactions/types/generation_config_param.py +59 -0
- google/genai/_interactions/types/google_search_call_arguments.py +29 -0
- google/genai/_interactions/types/google_search_call_arguments_param.py +31 -0
- google/genai/_interactions/types/google_search_call_content.py +37 -0
- google/genai/_interactions/types/google_search_call_content_param.py +37 -0
- google/genai/_interactions/types/google_search_result.py +35 -0
- google/genai/_interactions/types/google_search_result_content.py +43 -0
- google/genai/_interactions/types/google_search_result_content_param.py +44 -0
- google/genai/_interactions/types/google_search_result_param.py +35 -0
- google/genai/_interactions/types/image_content.py +41 -0
- google/genai/_interactions/types/image_content_param.py +48 -0
- google/genai/_interactions/types/image_mime_type.py +23 -0
- google/genai/_interactions/types/image_mime_type_param.py +25 -0
- google/genai/_interactions/types/interaction.py +165 -0
- google/genai/_interactions/types/interaction_create_params.py +212 -0
- google/genai/_interactions/types/interaction_event.py +37 -0
- google/genai/_interactions/types/interaction_get_params.py +46 -0
- google/genai/_interactions/types/interaction_sse_event.py +32 -0
- google/genai/_interactions/types/interaction_status_update.py +37 -0
- google/genai/_interactions/types/mcp_server_tool_call_content.py +42 -0
- google/genai/_interactions/types/mcp_server_tool_call_content_param.py +42 -0
- google/genai/_interactions/types/mcp_server_tool_result_content.py +52 -0
- google/genai/_interactions/types/mcp_server_tool_result_content_param.py +54 -0
- google/genai/_interactions/types/model.py +36 -0
- google/genai/_interactions/types/model_param.py +38 -0
- google/genai/_interactions/types/speech_config.py +35 -0
- google/genai/_interactions/types/speech_config_param.py +35 -0
- google/genai/_interactions/types/text_content.py +37 -0
- google/genai/_interactions/types/text_content_param.py +38 -0
- google/genai/_interactions/types/thinking_level.py +22 -0
- google/genai/_interactions/types/thought_content.py +41 -0
- google/genai/_interactions/types/thought_content_param.py +47 -0
- google/genai/_interactions/types/tool.py +100 -0
- google/genai/_interactions/types/tool_choice.py +26 -0
- google/genai/_interactions/types/tool_choice_config.py +28 -0
- google/genai/_interactions/types/tool_choice_config_param.py +29 -0
- google/genai/_interactions/types/tool_choice_param.py +28 -0
- google/genai/_interactions/types/tool_choice_type.py +22 -0
- google/genai/_interactions/types/tool_param.py +97 -0
- google/genai/_interactions/types/turn.py +76 -0
- google/genai/_interactions/types/turn_param.py +73 -0
- google/genai/_interactions/types/url_context_call_arguments.py +29 -0
- google/genai/_interactions/types/url_context_call_arguments_param.py +31 -0
- google/genai/_interactions/types/url_context_call_content.py +37 -0
- google/genai/_interactions/types/url_context_call_content_param.py +37 -0
- google/genai/_interactions/types/url_context_result.py +33 -0
- google/genai/_interactions/types/url_context_result_content.py +43 -0
- google/genai/_interactions/types/url_context_result_content_param.py +44 -0
- google/genai/_interactions/types/url_context_result_param.py +32 -0
- google/genai/_interactions/types/usage.py +106 -0
- google/genai/_interactions/types/usage_param.py +106 -0
- google/genai/_interactions/types/video_content.py +41 -0
- google/genai/_interactions/types/video_content_param.py +48 -0
- google/genai/_interactions/types/video_mime_type.py +36 -0
- google/genai/_interactions/types/video_mime_type_param.py +38 -0
- google/genai/_live_converters.py +34 -3
- google/genai/_tokens_converters.py +5 -0
- google/genai/batches.py +62 -55
- google/genai/client.py +223 -0
- google/genai/errors.py +16 -1
- google/genai/file_search_stores.py +60 -60
- google/genai/files.py +56 -56
- google/genai/interactions.py +17 -0
- google/genai/live.py +4 -3
- google/genai/models.py +15 -3
- google/genai/tests/__init__.py +21 -0
- google/genai/tests/afc/__init__.py +21 -0
- google/genai/tests/afc/test_convert_if_exist_pydantic_model.py +309 -0
- google/genai/tests/afc/test_convert_number_values_for_function_call_args.py +63 -0
- google/genai/tests/afc/test_find_afc_incompatible_tool_indexes.py +240 -0
- google/genai/tests/afc/test_generate_content_stream_afc.py +530 -0
- google/genai/tests/afc/test_generate_content_stream_afc_thoughts.py +77 -0
- google/genai/tests/afc/test_get_function_map.py +176 -0
- google/genai/tests/afc/test_get_function_response_parts.py +277 -0
- google/genai/tests/afc/test_get_max_remote_calls_for_afc.py +130 -0
- google/genai/tests/afc/test_invoke_function_from_dict_args.py +241 -0
- google/genai/tests/afc/test_raise_error_for_afc_incompatible_config.py +159 -0
- google/genai/tests/afc/test_should_append_afc_history.py +53 -0
- google/genai/tests/afc/test_should_disable_afc.py +214 -0
- google/genai/tests/batches/__init__.py +17 -0
- google/genai/tests/batches/test_cancel.py +77 -0
- google/genai/tests/batches/test_create.py +78 -0
- google/genai/tests/batches/test_create_with_bigquery.py +113 -0
- google/genai/tests/batches/test_create_with_file.py +82 -0
- google/genai/tests/batches/test_create_with_gcs.py +125 -0
- google/genai/tests/batches/test_create_with_inlined_requests.py +255 -0
- google/genai/tests/batches/test_delete.py +86 -0
- google/genai/tests/batches/test_embedding.py +157 -0
- google/genai/tests/batches/test_get.py +78 -0
- google/genai/tests/batches/test_list.py +79 -0
- google/genai/tests/caches/__init__.py +17 -0
- google/genai/tests/caches/constants.py +29 -0
- google/genai/tests/caches/test_create.py +210 -0
- google/genai/tests/caches/test_create_custom_url.py +105 -0
- google/genai/tests/caches/test_delete.py +54 -0
- google/genai/tests/caches/test_delete_custom_url.py +52 -0
- google/genai/tests/caches/test_get.py +94 -0
- google/genai/tests/caches/test_get_custom_url.py +52 -0
- google/genai/tests/caches/test_list.py +68 -0
- google/genai/tests/caches/test_update.py +70 -0
- google/genai/tests/caches/test_update_custom_url.py +58 -0
- google/genai/tests/chats/__init__.py +1 -0
- google/genai/tests/chats/test_get_history.py +597 -0
- google/genai/tests/chats/test_send_message.py +844 -0
- google/genai/tests/chats/test_validate_response.py +90 -0
- google/genai/tests/client/__init__.py +17 -0
- google/genai/tests/client/test_async_stream.py +427 -0
- google/genai/tests/client/test_client_close.py +197 -0
- google/genai/tests/client/test_client_initialization.py +1687 -0
- google/genai/tests/client/test_client_requests.py +355 -0
- google/genai/tests/client/test_custom_client.py +77 -0
- google/genai/tests/client/test_http_options.py +178 -0
- google/genai/tests/client/test_replay_client_equality.py +168 -0
- google/genai/tests/client/test_retries.py +846 -0
- google/genai/tests/client/test_upload_errors.py +136 -0
- google/genai/tests/common/__init__.py +17 -0
- google/genai/tests/common/test_common.py +954 -0
- google/genai/tests/conftest.py +162 -0
- google/genai/tests/documents/__init__.py +17 -0
- google/genai/tests/documents/test_delete.py +51 -0
- google/genai/tests/documents/test_get.py +85 -0
- google/genai/tests/documents/test_list.py +72 -0
- google/genai/tests/errors/__init__.py +1 -0
- google/genai/tests/errors/test_api_error.py +417 -0
- google/genai/tests/file_search_stores/__init__.py +17 -0
- google/genai/tests/file_search_stores/test_create.py +66 -0
- google/genai/tests/file_search_stores/test_delete.py +64 -0
- google/genai/tests/file_search_stores/test_get.py +94 -0
- google/genai/tests/file_search_stores/test_import_file.py +112 -0
- google/genai/tests/file_search_stores/test_list.py +57 -0
- google/genai/tests/file_search_stores/test_upload_to_file_search_store.py +141 -0
- google/genai/tests/files/__init__.py +17 -0
- google/genai/tests/files/test_delete.py +46 -0
- google/genai/tests/files/test_download.py +85 -0
- google/genai/tests/files/test_get.py +46 -0
- google/genai/tests/files/test_list.py +72 -0
- google/genai/tests/files/test_upload.py +255 -0
- google/genai/tests/imports/test_no_optional_imports.py +28 -0
- google/genai/tests/interactions/__init__.py +0 -0
- google/genai/tests/interactions/test_integration.py +80 -0
- google/genai/tests/live/__init__.py +16 -0
- google/genai/tests/live/test_live.py +2177 -0
- google/genai/tests/live/test_live_music.py +362 -0
- google/genai/tests/live/test_live_response.py +163 -0
- google/genai/tests/live/test_send_client_content.py +147 -0
- google/genai/tests/live/test_send_realtime_input.py +268 -0
- google/genai/tests/live/test_send_tool_response.py +222 -0
- google/genai/tests/local_tokenizer/__init__.py +17 -0
- google/genai/tests/local_tokenizer/test_local_tokenizer.py +343 -0
- google/genai/tests/local_tokenizer/test_local_tokenizer_loader.py +235 -0
- google/genai/tests/mcp/__init__.py +17 -0
- google/genai/tests/mcp/test_has_mcp_tool_usage.py +89 -0
- google/genai/tests/mcp/test_mcp_to_gemini_tools.py +191 -0
- google/genai/tests/mcp/test_parse_config_for_mcp_sessions.py +201 -0
- google/genai/tests/mcp/test_parse_config_for_mcp_usage.py +130 -0
- google/genai/tests/mcp/test_set_mcp_usage_header.py +72 -0
- google/genai/tests/models/__init__.py +17 -0
- google/genai/tests/models/constants.py +8 -0
- google/genai/tests/models/test_compute_tokens.py +120 -0
- google/genai/tests/models/test_count_tokens.py +159 -0
- google/genai/tests/models/test_delete.py +107 -0
- google/genai/tests/models/test_edit_image.py +264 -0
- google/genai/tests/models/test_embed_content.py +94 -0
- google/genai/tests/models/test_function_call_streaming.py +442 -0
- google/genai/tests/models/test_generate_content.py +2502 -0
- google/genai/tests/models/test_generate_content_cached_content.py +132 -0
- google/genai/tests/models/test_generate_content_config_zero_value.py +103 -0
- google/genai/tests/models/test_generate_content_from_apikey.py +44 -0
- google/genai/tests/models/test_generate_content_http_options.py +40 -0
- google/genai/tests/models/test_generate_content_image_generation.py +143 -0
- google/genai/tests/models/test_generate_content_mcp.py +343 -0
- google/genai/tests/models/test_generate_content_media_resolution.py +97 -0
- google/genai/tests/models/test_generate_content_model.py +139 -0
- google/genai/tests/models/test_generate_content_part.py +821 -0
- google/genai/tests/models/test_generate_content_thought.py +76 -0
- google/genai/tests/models/test_generate_content_tools.py +1761 -0
- google/genai/tests/models/test_generate_images.py +191 -0
- google/genai/tests/models/test_generate_videos.py +759 -0
- google/genai/tests/models/test_get.py +104 -0
- google/genai/tests/models/test_list.py +233 -0
- google/genai/tests/models/test_recontext_image.py +189 -0
- google/genai/tests/models/test_segment_image.py +148 -0
- google/genai/tests/models/test_update.py +95 -0
- google/genai/tests/models/test_upscale_image.py +157 -0
- google/genai/tests/operations/__init__.py +17 -0
- google/genai/tests/operations/test_get.py +38 -0
- google/genai/tests/public_samples/__init__.py +17 -0
- google/genai/tests/public_samples/test_gemini_text_only.py +34 -0
- google/genai/tests/pytest_helper.py +229 -0
- google/genai/tests/shared/__init__.py +16 -0
- google/genai/tests/shared/batches/__init__.py +14 -0
- google/genai/tests/shared/batches/test_create_delete.py +57 -0
- google/genai/tests/shared/batches/test_create_get_cancel.py +56 -0
- google/genai/tests/shared/batches/test_list.py +40 -0
- google/genai/tests/shared/caches/__init__.py +14 -0
- google/genai/tests/shared/caches/test_create_get_delete.py +67 -0
- google/genai/tests/shared/caches/test_create_update_get.py +71 -0
- google/genai/tests/shared/caches/test_list.py +40 -0
- google/genai/tests/shared/chats/__init__.py +14 -0
- google/genai/tests/shared/chats/test_send_message.py +48 -0
- google/genai/tests/shared/chats/test_send_message_stream.py +50 -0
- google/genai/tests/shared/files/__init__.py +14 -0
- google/genai/tests/shared/files/test_list.py +41 -0
- google/genai/tests/shared/files/test_upload_get_delete.py +54 -0
- google/genai/tests/shared/models/__init__.py +14 -0
- google/genai/tests/shared/models/test_compute_tokens.py +41 -0
- google/genai/tests/shared/models/test_count_tokens.py +40 -0
- google/genai/tests/shared/models/test_edit_image.py +67 -0
- google/genai/tests/shared/models/test_embed.py +40 -0
- google/genai/tests/shared/models/test_generate_content.py +39 -0
- google/genai/tests/shared/models/test_generate_content_stream.py +54 -0
- google/genai/tests/shared/models/test_generate_images.py +40 -0
- google/genai/tests/shared/models/test_generate_videos.py +38 -0
- google/genai/tests/shared/models/test_list.py +37 -0
- google/genai/tests/shared/models/test_recontext_image.py +55 -0
- google/genai/tests/shared/models/test_segment_image.py +52 -0
- google/genai/tests/shared/models/test_upscale_image.py +52 -0
- google/genai/tests/shared/tunings/__init__.py +16 -0
- google/genai/tests/shared/tunings/test_create.py +46 -0
- google/genai/tests/shared/tunings/test_create_get_cancel.py +56 -0
- google/genai/tests/shared/tunings/test_list.py +39 -0
- google/genai/tests/tokens/__init__.py +16 -0
- google/genai/tests/tokens/test_create.py +154 -0
- google/genai/tests/transformers/__init__.py +17 -0
- google/genai/tests/transformers/test_blobs.py +71 -0
- google/genai/tests/transformers/test_bytes.py +15 -0
- google/genai/tests/transformers/test_duck_type.py +96 -0
- google/genai/tests/transformers/test_function_responses.py +72 -0
- google/genai/tests/transformers/test_schema.py +653 -0
- google/genai/tests/transformers/test_t_batch.py +286 -0
- google/genai/tests/transformers/test_t_content.py +160 -0
- google/genai/tests/transformers/test_t_contents.py +398 -0
- google/genai/tests/transformers/test_t_part.py +85 -0
- google/genai/tests/transformers/test_t_parts.py +87 -0
- google/genai/tests/transformers/test_t_tool.py +157 -0
- google/genai/tests/transformers/test_t_tools.py +195 -0
- google/genai/tests/tunings/__init__.py +16 -0
- google/genai/tests/tunings/test_cancel.py +39 -0
- google/genai/tests/tunings/test_end_to_end.py +106 -0
- google/genai/tests/tunings/test_get.py +67 -0
- google/genai/tests/tunings/test_list.py +75 -0
- google/genai/tests/tunings/test_tune.py +268 -0
- google/genai/tests/types/__init__.py +16 -0
- google/genai/tests/types/test_bytes_internal.py +271 -0
- google/genai/tests/types/test_bytes_type.py +152 -0
- google/genai/tests/types/test_future.py +101 -0
- google/genai/tests/types/test_optional_types.py +36 -0
- google/genai/tests/types/test_part_type.py +616 -0
- google/genai/tests/types/test_schema_from_json_schema.py +417 -0
- google/genai/tests/types/test_schema_json_schema.py +468 -0
- google/genai/tests/types/test_types.py +2903 -0
- google/genai/tunings.py +57 -57
- google/genai/types.py +229 -121
- google/genai/version.py +1 -1
- {google_genai-1.53.0.dist-info → google_genai-1.55.0.dist-info}/METADATA +4 -2
- google_genai-1.55.0.dist-info/RECORD +345 -0
- google_genai-1.53.0.dist-info/RECORD +0 -41
- {google_genai-1.53.0.dist-info → google_genai-1.55.0.dist-info}/WHEEL +0 -0
- {google_genai-1.53.0.dist-info → google_genai-1.55.0.dist-info}/licenses/LICENSE +0 -0
- {google_genai-1.53.0.dist-info → google_genai-1.55.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,844 @@
|
|
|
1
|
+
# Copyright 2025 Google LLC
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
#
|
|
15
|
+
|
|
16
|
+
import json
|
|
17
|
+
import os
|
|
18
|
+
import sys
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel
|
|
21
|
+
from pydantic import ValidationError
|
|
22
|
+
import pytest
|
|
23
|
+
|
|
24
|
+
from .. import pytest_helper
|
|
25
|
+
from ... import errors
|
|
26
|
+
from ... import types
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
from mcp import types as mcp_types
|
|
30
|
+
from mcp import ClientSession as McpClientSession
|
|
31
|
+
except ImportError as e:
|
|
32
|
+
import sys
|
|
33
|
+
|
|
34
|
+
if sys.version_info < (3, 10):
|
|
35
|
+
raise ImportError(
|
|
36
|
+
'MCP Tool requires Python 3.10 or above. Please upgrade your Python'
|
|
37
|
+
' version.'
|
|
38
|
+
) from e
|
|
39
|
+
else:
|
|
40
|
+
raise e
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
pytestmark = pytest_helper.setup(
|
|
44
|
+
file=__file__,
|
|
45
|
+
globals_for_file=globals(),
|
|
46
|
+
)
|
|
47
|
+
pytest_plugins = ('pytest_asyncio',)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
MODEL_NAME = 'gemini-2.5-flash'
|
|
51
|
+
|
|
52
|
+
def divide_intergers_with_customized_math_rule(
|
|
53
|
+
numerator: int, denominator: int
|
|
54
|
+
) -> int:
|
|
55
|
+
"""Divides two integers with customized math rule."""
|
|
56
|
+
return numerator // denominator + 1
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def square_integer(given_integer: int) -> int:
|
|
60
|
+
return given_integer*given_integer
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def power_disco_ball(power: bool) -> bool:
|
|
64
|
+
"""Powers the spinning disco ball."""
|
|
65
|
+
print(f"Disco ball is {'spinning!' if power else 'stopped.'}")
|
|
66
|
+
return True
|
|
67
|
+
|
|
68
|
+
def start_music(energetic: bool, loud: bool, bpm: int) -> str:
|
|
69
|
+
"""Play some music matching the specified parameters.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
energetic: Whether the music is energetic or not.
|
|
73
|
+
loud: Whether the music is loud or not.
|
|
74
|
+
bpm: The beats per minute of the music.
|
|
75
|
+
|
|
76
|
+
Returns: The name of the song being played.
|
|
77
|
+
"""
|
|
78
|
+
print(f"Starting music! {energetic=} {loud=}, {bpm=}")
|
|
79
|
+
return "Never gonna give you up."
|
|
80
|
+
|
|
81
|
+
def dim_lights(brightness: float) -> bool:
|
|
82
|
+
"""Dim the lights.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
brightness: The brightness of the lights, 0.0 is off, 1.0 is full.
|
|
86
|
+
"""
|
|
87
|
+
print(f"Lights are now set to {brightness:.0%}")
|
|
88
|
+
return True
|
|
89
|
+
|
|
90
|
+
def test_text(client):
|
|
91
|
+
chat = client.chats.create(model=MODEL_NAME)
|
|
92
|
+
chat.send_message(
|
|
93
|
+
'tell me a story in 100 words',
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def test_part(client):
|
|
98
|
+
chat = client.chats.create(model=MODEL_NAME)
|
|
99
|
+
chat.send_message(
|
|
100
|
+
types.Part.from_text(text='tell me a story in 100 words'),
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def test_parts(client):
|
|
105
|
+
chat = client.chats.create(model=MODEL_NAME)
|
|
106
|
+
chat.send_message(
|
|
107
|
+
[
|
|
108
|
+
types.Part.from_text(text='tell me a US city'),
|
|
109
|
+
types.Part.from_text(text='the city is in west coast'),
|
|
110
|
+
],
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def test_image(client, image_jpeg):
|
|
115
|
+
chat = client.chats.create(model=MODEL_NAME)
|
|
116
|
+
chat.send_message(
|
|
117
|
+
[
|
|
118
|
+
'what is the image about?',
|
|
119
|
+
image_jpeg,
|
|
120
|
+
],
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def test_thinking_budget(client):
|
|
125
|
+
"""Tests that the thinking budget is respected and generates thoughts."""
|
|
126
|
+
chat = client.chats.create(
|
|
127
|
+
model=MODEL_NAME,
|
|
128
|
+
config={
|
|
129
|
+
'thinking_config': {
|
|
130
|
+
'include_thoughts': True,
|
|
131
|
+
'thinking_budget': 10000,
|
|
132
|
+
},
|
|
133
|
+
},
|
|
134
|
+
)
|
|
135
|
+
response1 = chat.send_message(
|
|
136
|
+
'what is the sum of natural numbers from 1 to 100?',
|
|
137
|
+
)
|
|
138
|
+
has_thought1 = False
|
|
139
|
+
if response1.candidates:
|
|
140
|
+
for candidate in response1.candidates:
|
|
141
|
+
for part in candidate.content.parts:
|
|
142
|
+
if part.thought:
|
|
143
|
+
has_thought1 = True
|
|
144
|
+
break
|
|
145
|
+
assert has_thought1
|
|
146
|
+
|
|
147
|
+
response2 = chat.send_message(
|
|
148
|
+
'can you help me to understand the logic better?'
|
|
149
|
+
)
|
|
150
|
+
has_thought2 = False
|
|
151
|
+
if response2.candidates:
|
|
152
|
+
for candidate in response2.candidates:
|
|
153
|
+
for part in candidate.content.parts:
|
|
154
|
+
if part.thought:
|
|
155
|
+
has_thought2 = True
|
|
156
|
+
break
|
|
157
|
+
assert has_thought2
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def test_thinking_budget_stream(client):
|
|
161
|
+
"""Tests that the thinking budget is respected and generates thoughts."""
|
|
162
|
+
chat = client.chats.create(
|
|
163
|
+
model=MODEL_NAME,
|
|
164
|
+
config={
|
|
165
|
+
'thinking_config': {
|
|
166
|
+
'include_thoughts': True,
|
|
167
|
+
'thinking_budget': 10000,
|
|
168
|
+
},
|
|
169
|
+
},
|
|
170
|
+
)
|
|
171
|
+
has_thought1 = False
|
|
172
|
+
for chunk in chat.send_message_stream(
|
|
173
|
+
'what is the sum of natural numbers from 1 to 100?',
|
|
174
|
+
):
|
|
175
|
+
if chunk.candidates:
|
|
176
|
+
for candidate in chunk.candidates:
|
|
177
|
+
for part in candidate.content.parts:
|
|
178
|
+
if part.thought:
|
|
179
|
+
has_thought1 = True
|
|
180
|
+
break
|
|
181
|
+
assert has_thought1
|
|
182
|
+
|
|
183
|
+
has_thought2 = False
|
|
184
|
+
for chunk in chat.send_message_stream(
|
|
185
|
+
'can you help me to understand the logic better?'
|
|
186
|
+
):
|
|
187
|
+
if chunk.candidates:
|
|
188
|
+
for candidate in chunk.candidates:
|
|
189
|
+
for part in candidate.content.parts:
|
|
190
|
+
if part.thought:
|
|
191
|
+
has_thought2 = True
|
|
192
|
+
break
|
|
193
|
+
assert has_thought2
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def test_google_cloud_storage_uri(client):
|
|
197
|
+
chat = client.chats.create(model=MODEL_NAME)
|
|
198
|
+
with pytest_helper.exception_if_mldev(client, errors.ClientError):
|
|
199
|
+
chat.send_message(
|
|
200
|
+
[
|
|
201
|
+
'what is the image about?',
|
|
202
|
+
types.Part.from_uri(
|
|
203
|
+
file_uri='gs://unified-genai-dev/imagen-inputs/google_small.png',
|
|
204
|
+
mime_type='image/png',
|
|
205
|
+
),
|
|
206
|
+
],
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def test_uploaded_file_uri(client):
|
|
211
|
+
chat = client.chats.create(model=MODEL_NAME)
|
|
212
|
+
with pytest_helper.exception_if_vertex(client, errors.ClientError):
|
|
213
|
+
chat.send_message(
|
|
214
|
+
[
|
|
215
|
+
'what is the image about?',
|
|
216
|
+
types.Part.from_uri(
|
|
217
|
+
file_uri='https://generativelanguage.googleapis.com/v1beta/files/az606f58k7zj',
|
|
218
|
+
mime_type='image/png',
|
|
219
|
+
),
|
|
220
|
+
],
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def test_config_override(client):
|
|
225
|
+
chat_config = {'candidate_count': 1}
|
|
226
|
+
chat = client.chats.create(model=MODEL_NAME, config=chat_config)
|
|
227
|
+
request_config = {'candidate_count': 2}
|
|
228
|
+
request_config_response = chat.send_message(
|
|
229
|
+
'tell me a story in 100 words',
|
|
230
|
+
config=request_config)
|
|
231
|
+
default_config_response = chat.send_message(
|
|
232
|
+
'tell me a story in 100 words')
|
|
233
|
+
|
|
234
|
+
assert len(request_config_response.candidates) == 2
|
|
235
|
+
assert len(default_config_response.candidates) == 1
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
def test_history(client):
|
|
239
|
+
history = [
|
|
240
|
+
types.Content(
|
|
241
|
+
role='user', parts=[types.Part.from_text(text='define a=5, b=10')]
|
|
242
|
+
),
|
|
243
|
+
types.Content(
|
|
244
|
+
role='model',
|
|
245
|
+
parts=[types.Part.from_text(text='Hello there! how can I help you?')],
|
|
246
|
+
),
|
|
247
|
+
]
|
|
248
|
+
chat = client.chats.create(model=MODEL_NAME, history=history)
|
|
249
|
+
chat.send_message('what is a + b?')
|
|
250
|
+
|
|
251
|
+
assert len(chat.get_history()) > 2
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def test_send_2_messages(client):
|
|
255
|
+
chat = client.chats.create(model=MODEL_NAME)
|
|
256
|
+
chat.send_message('write a python function to check if a year is a leap year')
|
|
257
|
+
chat.send_message('write a unit test for the function')
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def test_with_afc_history(client):
|
|
261
|
+
chat = client.chats.create(
|
|
262
|
+
model='gemini-2.0-flash-exp',
|
|
263
|
+
config={'tools': [divide_intergers_with_customized_math_rule]},
|
|
264
|
+
)
|
|
265
|
+
_ = chat.send_message('what is the result of 100/2?')
|
|
266
|
+
chat_history = chat.get_history()
|
|
267
|
+
|
|
268
|
+
assert len(chat_history) == 4
|
|
269
|
+
assert chat_history[0].role == 'user'
|
|
270
|
+
assert chat_history[0].parts[0].text == 'what is the result of 100/2?'
|
|
271
|
+
|
|
272
|
+
assert chat_history[1].role == 'model'
|
|
273
|
+
assert (
|
|
274
|
+
chat_history[1].parts[0].function_call.name
|
|
275
|
+
== 'divide_intergers_with_customized_math_rule'
|
|
276
|
+
)
|
|
277
|
+
assert chat_history[1].parts[0].function_call.args == {
|
|
278
|
+
'numerator': 100,
|
|
279
|
+
'denominator': 2,
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
assert chat_history[2].role == 'user'
|
|
283
|
+
assert (
|
|
284
|
+
chat_history[2].parts[0].function_response.name
|
|
285
|
+
== 'divide_intergers_with_customized_math_rule'
|
|
286
|
+
)
|
|
287
|
+
assert chat_history[2].parts[0].function_response.response == {'result': 51}
|
|
288
|
+
|
|
289
|
+
assert chat_history[3].role == 'model'
|
|
290
|
+
assert '51' in chat_history[3].parts[0].text
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
def test_existing_chat_history_extends_afc_history(client):
|
|
294
|
+
chat = client.chats.create(
|
|
295
|
+
model='gemini-2.0-flash-exp',
|
|
296
|
+
config={'tools': [divide_intergers_with_customized_math_rule]},
|
|
297
|
+
)
|
|
298
|
+
_ = chat.send_message('hello')
|
|
299
|
+
_ = chat.send_message('could you help me with a math problem?')
|
|
300
|
+
_ = chat.send_message('what is the result of 100/2?')
|
|
301
|
+
chat_history = chat.get_history()
|
|
302
|
+
content_strings = []
|
|
303
|
+
for content in chat_history:
|
|
304
|
+
content_strings.append(content.model_dump_json())
|
|
305
|
+
|
|
306
|
+
# checks that the history is not duplicated
|
|
307
|
+
assert len(content_strings) == len(set(content_strings))
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
@pytest.mark.skipif(
|
|
311
|
+
sys.version_info >= (3, 13),
|
|
312
|
+
reason=(
|
|
313
|
+
'object type is dumped as <Type.OBJECT: "OBJECT"> as opposed to'
|
|
314
|
+
' "OBJECT" in Python 3.13'
|
|
315
|
+
),
|
|
316
|
+
)
|
|
317
|
+
def test_with_afc_multiple_remote_calls(client):
|
|
318
|
+
|
|
319
|
+
house_fns = [power_disco_ball, start_music, dim_lights]
|
|
320
|
+
config = {
|
|
321
|
+
'tools': house_fns,
|
|
322
|
+
# Force the model to act (call 'any' function), instead of chatting.
|
|
323
|
+
'tool_config': {
|
|
324
|
+
'function_calling_config': {
|
|
325
|
+
'mode': 'ANY',
|
|
326
|
+
}
|
|
327
|
+
},
|
|
328
|
+
'automatic_function_calling': {
|
|
329
|
+
'maximum_remote_calls': 3,
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
chat = client.chats.create(model=MODEL_NAME, config=config)
|
|
333
|
+
chat.send_message('Turn this place into a party!')
|
|
334
|
+
curated_history = chat.get_history()
|
|
335
|
+
|
|
336
|
+
assert len(curated_history) == 8
|
|
337
|
+
assert curated_history[0].role == 'user'
|
|
338
|
+
assert curated_history[0].parts[0].text == 'Turn this place into a party!'
|
|
339
|
+
assert curated_history[1].role == 'model'
|
|
340
|
+
assert len(curated_history[1].parts) == 3
|
|
341
|
+
for part in curated_history[1].parts:
|
|
342
|
+
assert part.function_call
|
|
343
|
+
assert curated_history[2].role == 'user'
|
|
344
|
+
assert len(curated_history[2].parts) == 3
|
|
345
|
+
for part in curated_history[2].parts:
|
|
346
|
+
assert part.function_response
|
|
347
|
+
assert curated_history[3].role == 'model'
|
|
348
|
+
assert len(curated_history[3].parts) == 3
|
|
349
|
+
for part in curated_history[3].parts:
|
|
350
|
+
assert part.function_call
|
|
351
|
+
assert curated_history[4].role == 'user'
|
|
352
|
+
assert len(curated_history[4].parts) == 3
|
|
353
|
+
for part in curated_history[4].parts:
|
|
354
|
+
assert part.function_response
|
|
355
|
+
assert curated_history[5].role == 'model'
|
|
356
|
+
assert len(curated_history[5].parts) == 3
|
|
357
|
+
for part in curated_history[5].parts:
|
|
358
|
+
assert part.function_call
|
|
359
|
+
assert curated_history[6].role == 'user'
|
|
360
|
+
assert len(curated_history[6].parts) == 3
|
|
361
|
+
for part in curated_history[6].parts:
|
|
362
|
+
assert part.function_response
|
|
363
|
+
assert curated_history[7].role == 'model'
|
|
364
|
+
assert len(curated_history[7].parts) == 3
|
|
365
|
+
for part in curated_history[7].parts:
|
|
366
|
+
assert part.function_call
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
@pytest.mark.skipif(
|
|
370
|
+
sys.version_info >= (3, 13),
|
|
371
|
+
reason=(
|
|
372
|
+
'object type is dumped as <Type.OBJECT: "OBJECT"> as opposed to'
|
|
373
|
+
' "OBJECT" in Python 3.13'
|
|
374
|
+
),
|
|
375
|
+
)
|
|
376
|
+
def test_with_afc_multiple_remote_calls_async(client):
|
|
377
|
+
|
|
378
|
+
house_fns = [power_disco_ball, start_music, dim_lights]
|
|
379
|
+
config = {
|
|
380
|
+
'tools': house_fns,
|
|
381
|
+
# Force the model to act (call 'any' function), instead of chatting.
|
|
382
|
+
'tool_config': {
|
|
383
|
+
'function_calling_config': {
|
|
384
|
+
'mode': 'ANY',
|
|
385
|
+
}
|
|
386
|
+
},
|
|
387
|
+
'automatic_function_calling': {
|
|
388
|
+
'maximum_remote_calls': 3,
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
chat = client.chats.create(model=MODEL_NAME, config=config)
|
|
392
|
+
chat.send_message('Turn this place into a party!')
|
|
393
|
+
curated_history = chat.get_history()
|
|
394
|
+
|
|
395
|
+
assert len(curated_history) == 8
|
|
396
|
+
assert curated_history[0].role == 'user'
|
|
397
|
+
assert curated_history[0].parts[0].text == 'Turn this place into a party!'
|
|
398
|
+
assert curated_history[1].role == 'model'
|
|
399
|
+
assert len(curated_history[1].parts) == 3
|
|
400
|
+
for part in curated_history[1].parts:
|
|
401
|
+
assert part.function_call
|
|
402
|
+
assert curated_history[2].role == 'user'
|
|
403
|
+
assert len(curated_history[2].parts) == 3
|
|
404
|
+
for part in curated_history[2].parts:
|
|
405
|
+
assert part.function_response
|
|
406
|
+
assert curated_history[3].role == 'model'
|
|
407
|
+
assert len(curated_history[3].parts) == 3
|
|
408
|
+
for part in curated_history[3].parts:
|
|
409
|
+
assert part.function_call
|
|
410
|
+
assert curated_history[4].role == 'user'
|
|
411
|
+
assert len(curated_history[4].parts) == 3
|
|
412
|
+
for part in curated_history[4].parts:
|
|
413
|
+
assert part.function_response
|
|
414
|
+
assert curated_history[5].role == 'model'
|
|
415
|
+
assert len(curated_history[5].parts) == 3
|
|
416
|
+
for part in curated_history[5].parts:
|
|
417
|
+
assert part.function_call
|
|
418
|
+
assert curated_history[6].role == 'user'
|
|
419
|
+
assert len(curated_history[6].parts) == 3
|
|
420
|
+
for part in curated_history[6].parts:
|
|
421
|
+
assert part.function_response
|
|
422
|
+
assert curated_history[7].role == 'model'
|
|
423
|
+
assert len(curated_history[7].parts) == 3
|
|
424
|
+
for part in curated_history[7].parts:
|
|
425
|
+
assert part.function_call
|
|
426
|
+
|
|
427
|
+
def test_with_afc_disabled(client):
|
|
428
|
+
chat = client.chats.create(
|
|
429
|
+
model='gemini-2.0-flash-exp',
|
|
430
|
+
config={
|
|
431
|
+
'tools': [square_integer],
|
|
432
|
+
'automatic_function_calling': {'disable': True},
|
|
433
|
+
},
|
|
434
|
+
)
|
|
435
|
+
chat.send_message(
|
|
436
|
+
'Do the square of 3.',
|
|
437
|
+
)
|
|
438
|
+
chat_history = chat.get_history()
|
|
439
|
+
|
|
440
|
+
assert len(chat_history) == 2
|
|
441
|
+
assert chat_history[0].role == 'user'
|
|
442
|
+
assert chat_history[0].parts[0].text == 'Do the square of 3.'
|
|
443
|
+
|
|
444
|
+
assert chat_history[1].role == 'model'
|
|
445
|
+
assert chat_history[1].parts[0].function_call.name == 'square_integer'
|
|
446
|
+
assert chat_history[1].parts[0].function_call.args == {
|
|
447
|
+
'given_integer': 3,
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
@pytest.mark.asyncio
|
|
452
|
+
async def test_with_afc_history_async(client):
|
|
453
|
+
chat = client.aio.chats.create(
|
|
454
|
+
model='gemini-2.0-flash-exp',
|
|
455
|
+
config={'tools': [divide_intergers_with_customized_math_rule]},
|
|
456
|
+
)
|
|
457
|
+
_ = await chat.send_message('what is the result of 100/2?')
|
|
458
|
+
chat_history = chat.get_history()
|
|
459
|
+
|
|
460
|
+
assert len(chat_history) == 4
|
|
461
|
+
assert chat_history[0].role == 'user'
|
|
462
|
+
assert chat_history[0].parts[0].text == 'what is the result of 100/2?'
|
|
463
|
+
|
|
464
|
+
assert chat_history[1].role == 'model'
|
|
465
|
+
assert (
|
|
466
|
+
chat_history[1].parts[0].function_call.name
|
|
467
|
+
== 'divide_intergers_with_customized_math_rule'
|
|
468
|
+
)
|
|
469
|
+
assert chat_history[1].parts[0].function_call.args == {
|
|
470
|
+
'numerator': 100,
|
|
471
|
+
'denominator': 2,
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
assert chat_history[2].role == 'user'
|
|
475
|
+
assert (
|
|
476
|
+
chat_history[2].parts[0].function_response.name
|
|
477
|
+
== 'divide_intergers_with_customized_math_rule'
|
|
478
|
+
)
|
|
479
|
+
assert chat_history[2].parts[0].function_response.response == {'result': 51}
|
|
480
|
+
|
|
481
|
+
assert chat_history[3].role == 'model'
|
|
482
|
+
assert '51' in chat_history[3].parts[0].text
|
|
483
|
+
|
|
484
|
+
|
|
485
|
+
@pytest.mark.asyncio
|
|
486
|
+
async def test_with_afc_disabled_async(client):
|
|
487
|
+
chat = client.aio.chats.create(
|
|
488
|
+
model='gemini-2.0-flash-exp',
|
|
489
|
+
config={
|
|
490
|
+
'tools': [square_integer],
|
|
491
|
+
'automatic_function_calling': {'disable': True},
|
|
492
|
+
},
|
|
493
|
+
)
|
|
494
|
+
await chat.send_message(
|
|
495
|
+
'Do the square of 3.',
|
|
496
|
+
)
|
|
497
|
+
chat_history = chat.get_history()
|
|
498
|
+
|
|
499
|
+
assert len(chat_history) == 2
|
|
500
|
+
assert chat_history[0].role == 'user'
|
|
501
|
+
assert chat_history[0].parts[0].text == 'Do the square of 3.'
|
|
502
|
+
|
|
503
|
+
assert chat_history[1].role == 'model'
|
|
504
|
+
assert chat_history[1].parts[0].function_call.name == 'square_integer'
|
|
505
|
+
assert chat_history[1].parts[0].function_call.args == {
|
|
506
|
+
'given_integer': 3,
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
|
|
510
|
+
def test_stream_text(client):
|
|
511
|
+
chat = client.chats.create(model=MODEL_NAME)
|
|
512
|
+
chunks = 0
|
|
513
|
+
for chunk in chat.send_message_stream(
|
|
514
|
+
'tell me a story in 100 words',
|
|
515
|
+
):
|
|
516
|
+
chunks += 1
|
|
517
|
+
|
|
518
|
+
assert chunks > 1
|
|
519
|
+
|
|
520
|
+
|
|
521
|
+
def test_stream_part(client):
|
|
522
|
+
chat = client.chats.create(model=MODEL_NAME)
|
|
523
|
+
chunks = 0
|
|
524
|
+
for chunk in chat.send_message_stream(
|
|
525
|
+
types.Part.from_text(text='tell me a story in 100 words'),
|
|
526
|
+
):
|
|
527
|
+
chunks += 1
|
|
528
|
+
|
|
529
|
+
assert chunks > 1
|
|
530
|
+
|
|
531
|
+
|
|
532
|
+
def test_stream_parts(client):
|
|
533
|
+
chat = client.chats.create(model=MODEL_NAME)
|
|
534
|
+
chunks = 0
|
|
535
|
+
for chunk in chat.send_message_stream(
|
|
536
|
+
[
|
|
537
|
+
types.Part.from_text(text='tell me a story in 100 words'),
|
|
538
|
+
types.Part.from_text(text='the story is about a car'),
|
|
539
|
+
],
|
|
540
|
+
):
|
|
541
|
+
chunks += 1
|
|
542
|
+
|
|
543
|
+
assert chunks > 2
|
|
544
|
+
|
|
545
|
+
|
|
546
|
+
def test_stream_config_override(client):
|
|
547
|
+
chat_config = {'response_mime_type': 'text/plain'}
|
|
548
|
+
chat = client.chats.create(model=MODEL_NAME, config=chat_config)
|
|
549
|
+
request_config = {'response_mime_type': 'application/json'}
|
|
550
|
+
request_config_text = ''
|
|
551
|
+
for chunk in chat.send_message_stream(
|
|
552
|
+
'tell me a story in 100 words', config=request_config
|
|
553
|
+
):
|
|
554
|
+
request_config_text += chunk.text
|
|
555
|
+
default_config_text = ''
|
|
556
|
+
for chunk in chat.send_message_stream('tell me a story in 100 words'):
|
|
557
|
+
default_config_text += chunk.text
|
|
558
|
+
|
|
559
|
+
assert json.loads(request_config_text)
|
|
560
|
+
with pytest.raises(json.JSONDecodeError):
|
|
561
|
+
json.loads(default_config_text)
|
|
562
|
+
|
|
563
|
+
|
|
564
|
+
def test_stream_function_calling(client):
|
|
565
|
+
chat = client.chats.create(
|
|
566
|
+
model='gemini-2.0-flash-exp',
|
|
567
|
+
config={'tools': [divide_intergers_with_customized_math_rule]},
|
|
568
|
+
)
|
|
569
|
+
# Now we support AFC.
|
|
570
|
+
for chunk in chat.send_message_stream(
|
|
571
|
+
'what is the result of 100/2?',
|
|
572
|
+
):
|
|
573
|
+
pass
|
|
574
|
+
for chunk in chat.send_message_stream(
|
|
575
|
+
'what is the result of 50/2?',
|
|
576
|
+
):
|
|
577
|
+
pass
|
|
578
|
+
chat_history = chat.get_history()
|
|
579
|
+
|
|
580
|
+
assert chat_history[0].role == 'user'
|
|
581
|
+
assert chat_history[0].parts[0].text == 'what is the result of 100/2?'
|
|
582
|
+
|
|
583
|
+
assert chat_history[1].role == 'model'
|
|
584
|
+
assert (
|
|
585
|
+
chat_history[1].parts[0].function_call.name
|
|
586
|
+
== 'divide_intergers_with_customized_math_rule'
|
|
587
|
+
)
|
|
588
|
+
assert chat_history[1].parts[0].function_call.args == {
|
|
589
|
+
'numerator': 100,
|
|
590
|
+
'denominator': 2,
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
|
|
594
|
+
def test_stream_send_2_messages(client):
|
|
595
|
+
chat = client.chats.create(model=MODEL_NAME)
|
|
596
|
+
for chunk in chat.send_message_stream(
|
|
597
|
+
'write a python function to check if a year is a leap year'
|
|
598
|
+
):
|
|
599
|
+
pass
|
|
600
|
+
|
|
601
|
+
for chunk in chat.send_message_stream('write a unit test for the function'):
|
|
602
|
+
pass
|
|
603
|
+
|
|
604
|
+
|
|
605
|
+
@pytest.mark.asyncio
|
|
606
|
+
async def test_async_text(client):
|
|
607
|
+
chat = client.aio.chats.create(model=MODEL_NAME)
|
|
608
|
+
await chat.send_message('tell me a story in 100 words')
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
@pytest.mark.asyncio
|
|
612
|
+
async def test_async_part(client):
|
|
613
|
+
chat = client.aio.chats.create(model=MODEL_NAME)
|
|
614
|
+
await chat.send_message(types.Part.from_text(text='tell me a story in 100 words'))
|
|
615
|
+
|
|
616
|
+
|
|
617
|
+
@pytest.mark.asyncio
|
|
618
|
+
async def test_async_parts(client):
|
|
619
|
+
chat = client.aio.chats.create(model=MODEL_NAME)
|
|
620
|
+
await chat.send_message(
|
|
621
|
+
[
|
|
622
|
+
types.Part.from_text(text='tell me a US city'),
|
|
623
|
+
types.Part.from_text(text='the city is in west coast'),
|
|
624
|
+
],
|
|
625
|
+
)
|
|
626
|
+
|
|
627
|
+
|
|
628
|
+
@pytest.mark.asyncio
|
|
629
|
+
async def test_async_config_override(client):
|
|
630
|
+
chat_config = {'candidate_count': 1}
|
|
631
|
+
chat = client.aio.chats.create(model=MODEL_NAME, config=chat_config)
|
|
632
|
+
request_config = {'candidate_count': 2}
|
|
633
|
+
request_config_response = await chat.send_message(
|
|
634
|
+
'tell me a story in 100 words',
|
|
635
|
+
config=request_config)
|
|
636
|
+
default_config_response = await chat.send_message(
|
|
637
|
+
'tell me a story in 100 words')
|
|
638
|
+
|
|
639
|
+
assert len(request_config_response.candidates) == 2
|
|
640
|
+
assert len(default_config_response.candidates) == 1
|
|
641
|
+
|
|
642
|
+
|
|
643
|
+
@pytest.mark.asyncio
|
|
644
|
+
async def test_async_history(client):
|
|
645
|
+
history = [
|
|
646
|
+
types.Content(
|
|
647
|
+
role='user', parts=[types.Part.from_text(text='define a=5, b=10')]
|
|
648
|
+
),
|
|
649
|
+
types.Content(
|
|
650
|
+
role='model',
|
|
651
|
+
parts=[types.Part.from_text(text='Hello there! how can I help you?')],
|
|
652
|
+
),
|
|
653
|
+
]
|
|
654
|
+
chat = client.aio.chats.create(model=MODEL_NAME, history=history)
|
|
655
|
+
await chat.send_message('what is a + b?')
|
|
656
|
+
|
|
657
|
+
assert len(chat.get_history()) > 2
|
|
658
|
+
|
|
659
|
+
|
|
660
|
+
@pytest.mark.asyncio
|
|
661
|
+
async def test_async_stream_text(client):
|
|
662
|
+
chat = client.aio.chats.create(model=MODEL_NAME)
|
|
663
|
+
chunks = 0
|
|
664
|
+
async for chunk in await chat.send_message_stream('tell me a story in 100 words'):
|
|
665
|
+
chunks += 1
|
|
666
|
+
|
|
667
|
+
assert chunks > 1
|
|
668
|
+
|
|
669
|
+
|
|
670
|
+
@pytest.mark.asyncio
|
|
671
|
+
async def test_async_stream_part(client):
|
|
672
|
+
chat = client.aio.chats.create(model=MODEL_NAME)
|
|
673
|
+
chunks = 0
|
|
674
|
+
async for chunk in await chat.send_message_stream(
|
|
675
|
+
types.Part.from_text(text='tell me a story in 100 words')
|
|
676
|
+
):
|
|
677
|
+
chunks += 1
|
|
678
|
+
|
|
679
|
+
assert chunks > 1
|
|
680
|
+
|
|
681
|
+
|
|
682
|
+
@pytest.mark.asyncio
|
|
683
|
+
async def test_async_stream_parts(client):
|
|
684
|
+
chat = client.aio.chats.create(model=MODEL_NAME)
|
|
685
|
+
chunks = 0
|
|
686
|
+
async for chunk in await chat.send_message_stream(
|
|
687
|
+
[
|
|
688
|
+
types.Part.from_text(text='tell me a story in 100 words'),
|
|
689
|
+
types.Part.from_text(text='the story is about a car'),
|
|
690
|
+
],
|
|
691
|
+
):
|
|
692
|
+
chunks += 1
|
|
693
|
+
|
|
694
|
+
assert chunks > 1
|
|
695
|
+
|
|
696
|
+
|
|
697
|
+
@pytest.mark.asyncio
|
|
698
|
+
async def test_async_stream_config_override(client):
|
|
699
|
+
chat_config = {'response_mime_type': 'text/plain'}
|
|
700
|
+
chat = client.aio.chats.create(model=MODEL_NAME, config=chat_config)
|
|
701
|
+
request_config = {'response_mime_type': 'application/json'}
|
|
702
|
+
request_config_text = ''
|
|
703
|
+
async for chunk in await chat.send_message_stream(
|
|
704
|
+
'tell me a story in 100 words', config=request_config
|
|
705
|
+
):
|
|
706
|
+
request_config_text += chunk.text
|
|
707
|
+
default_config_text = ''
|
|
708
|
+
|
|
709
|
+
async for chunk in await chat.send_message_stream('tell me family friendly story in 100 words'):
|
|
710
|
+
default_config_text += chunk.text
|
|
711
|
+
|
|
712
|
+
assert json.loads(request_config_text)
|
|
713
|
+
with pytest_helper.exception_if_mldev(client, json.JSONDecodeError):
|
|
714
|
+
json.loads(default_config_text)
|
|
715
|
+
|
|
716
|
+
|
|
717
|
+
@pytest.mark.asyncio
|
|
718
|
+
async def test_async_stream_function_calling(client):
|
|
719
|
+
chat = client.aio.chats.create(
|
|
720
|
+
model='gemini-2.0-flash-exp',
|
|
721
|
+
config={'tools': [divide_intergers_with_customized_math_rule]},
|
|
722
|
+
)
|
|
723
|
+
# Now we support AFC.
|
|
724
|
+
async for chunk in await chat.send_message_stream('what is the result of 100/2?'):
|
|
725
|
+
pass
|
|
726
|
+
async for chunk in await chat.send_message_stream('what is the result of 50/2?'):
|
|
727
|
+
pass
|
|
728
|
+
chat_history = chat.get_history()
|
|
729
|
+
|
|
730
|
+
assert chat_history[0].role == 'user'
|
|
731
|
+
assert chat_history[0].parts[0].text == 'what is the result of 100/2?'
|
|
732
|
+
|
|
733
|
+
assert chat_history[1].role == 'model'
|
|
734
|
+
assert (
|
|
735
|
+
chat_history[1].parts[0].function_call.name
|
|
736
|
+
== 'divide_intergers_with_customized_math_rule'
|
|
737
|
+
)
|
|
738
|
+
assert chat_history[1].parts[0].function_call.args == {
|
|
739
|
+
'numerator': 100,
|
|
740
|
+
'denominator': 2,
|
|
741
|
+
}
|
|
742
|
+
|
|
743
|
+
|
|
744
|
+
@pytest.mark.asyncio
|
|
745
|
+
async def test_async_stream_send_2_messages(client):
|
|
746
|
+
chat = client.aio.chats.create(model=MODEL_NAME)
|
|
747
|
+
async for chunk in await chat.send_message_stream(
|
|
748
|
+
'write a python function to check if a year is a leap year'
|
|
749
|
+
):
|
|
750
|
+
pass
|
|
751
|
+
async for chunk in await chat.send_message_stream(
|
|
752
|
+
'write a unit test for the function'
|
|
753
|
+
):
|
|
754
|
+
pass
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
def test_mcp_tools(client):
|
|
758
|
+
chat = client.chats.create(
|
|
759
|
+
model='gemini-2.0-flash-exp',
|
|
760
|
+
config={'tools': [
|
|
761
|
+
mcp_types.Tool(
|
|
762
|
+
name='get_weather',
|
|
763
|
+
description='Get the weather in a city.',
|
|
764
|
+
inputSchema={
|
|
765
|
+
'type': 'object',
|
|
766
|
+
'properties': {'location': {'type': 'string'}},
|
|
767
|
+
},
|
|
768
|
+
)
|
|
769
|
+
],},
|
|
770
|
+
)
|
|
771
|
+
response = chat.send_message('What is the weather in Boston?');
|
|
772
|
+
response = chat.send_message('What is the weather in San Francisco?');
|
|
773
|
+
|
|
774
|
+
|
|
775
|
+
|
|
776
|
+
def test_mcp_tools_stream(client):
|
|
777
|
+
chat = client.chats.create(
|
|
778
|
+
model='gemini-2.0-flash-exp',
|
|
779
|
+
config={'tools': [
|
|
780
|
+
mcp_types.Tool(
|
|
781
|
+
name='get_weather',
|
|
782
|
+
description='Get the weather in a city.',
|
|
783
|
+
inputSchema={
|
|
784
|
+
'type': 'object',
|
|
785
|
+
'properties': {'location': {'type': 'string'}},
|
|
786
|
+
},
|
|
787
|
+
)
|
|
788
|
+
],
|
|
789
|
+
},
|
|
790
|
+
)
|
|
791
|
+
for chunk in chat.send_message_stream(
|
|
792
|
+
'What is the weather in Boston?'
|
|
793
|
+
):
|
|
794
|
+
pass
|
|
795
|
+
for chunk in chat.send_message_stream(
|
|
796
|
+
'What is the weather in San Francisco?'
|
|
797
|
+
):
|
|
798
|
+
pass
|
|
799
|
+
|
|
800
|
+
|
|
801
|
+
@pytest.mark.asyncio
|
|
802
|
+
async def test_async_mcp_tools(client):
|
|
803
|
+
chat = client.aio.chats.create(
|
|
804
|
+
model='gemini-2.0-flash-exp',
|
|
805
|
+
config={'tools': [
|
|
806
|
+
mcp_types.Tool(
|
|
807
|
+
name='get_weather',
|
|
808
|
+
description='Get the weather in a city.',
|
|
809
|
+
inputSchema={
|
|
810
|
+
'type': 'object',
|
|
811
|
+
'properties': {'location': {'type': 'string'}},
|
|
812
|
+
},
|
|
813
|
+
)
|
|
814
|
+
],},
|
|
815
|
+
)
|
|
816
|
+
await chat.send_message('What is the weather in Boston?');
|
|
817
|
+
await chat.send_message('What is the weather in San Francisco?');
|
|
818
|
+
|
|
819
|
+
|
|
820
|
+
@pytest.mark.asyncio
|
|
821
|
+
async def test_async_mcp_tools_stream(client):
|
|
822
|
+
chat = client.aio.chats.create(
|
|
823
|
+
model='gemini-2.0-flash-exp',
|
|
824
|
+
config={'tools': [
|
|
825
|
+
mcp_types.Tool(
|
|
826
|
+
name='get_weather',
|
|
827
|
+
description='Get the weather in a city.',
|
|
828
|
+
inputSchema={
|
|
829
|
+
'type': 'object',
|
|
830
|
+
'properties': {'location': {'type': 'string'}},
|
|
831
|
+
},
|
|
832
|
+
)
|
|
833
|
+
],
|
|
834
|
+
},
|
|
835
|
+
)
|
|
836
|
+
|
|
837
|
+
async for chunk in await chat.send_message_stream(
|
|
838
|
+
'What is the weather in Boston?'
|
|
839
|
+
):
|
|
840
|
+
pass
|
|
841
|
+
async for chunk in await chat.send_message_stream(
|
|
842
|
+
'What is the weather in San Francisco?'
|
|
843
|
+
):
|
|
844
|
+
pass
|