google-adk 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/adk/._version.py +0 -0
- google/adk/__init__.py +20 -0
- google/adk/agents/__init__.py +32 -0
- google/adk/agents/active_streaming_tool.py +38 -0
- google/adk/agents/base_agent.py +345 -0
- google/adk/agents/callback_context.py +112 -0
- google/adk/agents/invocation_context.py +181 -0
- google/adk/agents/langgraph_agent.py +140 -0
- google/adk/agents/live_request_queue.py +64 -0
- google/adk/agents/llm_agent.py +376 -0
- google/adk/agents/loop_agent.py +62 -0
- google/adk/agents/parallel_agent.py +96 -0
- google/adk/agents/readonly_context.py +46 -0
- google/adk/agents/remote_agent.py +50 -0
- google/adk/agents/run_config.py +87 -0
- google/adk/agents/sequential_agent.py +45 -0
- google/adk/agents/transcription_entry.py +34 -0
- google/adk/artifacts/__init__.py +23 -0
- google/adk/artifacts/base_artifact_service.py +128 -0
- google/adk/artifacts/gcs_artifact_service.py +195 -0
- google/adk/artifacts/in_memory_artifact_service.py +133 -0
- google/adk/auth/__init__.py +22 -0
- google/adk/auth/auth_credential.py +220 -0
- google/adk/auth/auth_handler.py +268 -0
- google/adk/auth/auth_preprocessor.py +116 -0
- google/adk/auth/auth_schemes.py +67 -0
- google/adk/auth/auth_tool.py +55 -0
- google/adk/cli/__init__.py +15 -0
- google/adk/cli/__main__.py +18 -0
- google/adk/cli/agent_graph.py +148 -0
- google/adk/cli/browser/adk_favicon.svg +17 -0
- google/adk/cli/browser/assets/audio-processor.js +51 -0
- google/adk/cli/browser/assets/config/runtime-config.json +3 -0
- google/adk/cli/browser/index.html +33 -0
- google/adk/cli/browser/main-SY2WYYGV.js +75 -0
- google/adk/cli/browser/polyfills-FFHMD2TL.js +18 -0
- google/adk/cli/browser/styles-4VDSPQ37.css +17 -0
- google/adk/cli/cli.py +181 -0
- google/adk/cli/cli_deploy.py +181 -0
- google/adk/cli/cli_eval.py +282 -0
- google/adk/cli/cli_tools_click.py +524 -0
- google/adk/cli/fast_api.py +784 -0
- google/adk/cli/utils/__init__.py +49 -0
- google/adk/cli/utils/envs.py +57 -0
- google/adk/cli/utils/evals.py +93 -0
- google/adk/cli/utils/logs.py +72 -0
- google/adk/code_executors/__init__.py +49 -0
- google/adk/code_executors/base_code_executor.py +97 -0
- google/adk/code_executors/code_execution_utils.py +256 -0
- google/adk/code_executors/code_executor_context.py +202 -0
- google/adk/code_executors/container_code_executor.py +196 -0
- google/adk/code_executors/unsafe_local_code_executor.py +71 -0
- google/adk/code_executors/vertex_ai_code_executor.py +234 -0
- google/adk/docs/Makefile +20 -0
- google/adk/docs/build/doctrees/google-adk.doctree +0 -0
- google/adk/docs/build/html/_sources/google-adk.rst.txt +98 -0
- google/adk/docs/build/html/_sources/index.rst.txt +7 -0
- google/adk/docs/build/html/_static/autodoc_pydantic.css +27 -0
- google/adk/docs/build/html/_static/basic.css +925 -0
- google/adk/docs/build/html/_static/debug.css +85 -0
- google/adk/docs/build/html/_static/doctools.js +156 -0
- google/adk/docs/build/html/_static/documentation_options.js +29 -0
- google/adk/docs/build/html/_static/file.png +0 -0
- google/adk/docs/build/html/_static/language_data.js +199 -0
- google/adk/docs/build/html/_static/minus.png +0 -0
- google/adk/docs/build/html/_static/plus.png +0 -0
- google/adk/docs/build/html/_static/pygments.css +274 -0
- google/adk/docs/build/html/_static/scripts/furo-extensions.js +16 -0
- google/adk/docs/build/html/_static/scripts/furo.js +19 -0
- google/adk/docs/build/html/_static/scripts/furo.js.LICENSE.txt +7 -0
- google/adk/docs/build/html/_static/scripts/furo.js.map +1 -0
- google/adk/docs/build/html/_static/searchtools.js +620 -0
- google/adk/docs/build/html/_static/skeleton.css +312 -0
- google/adk/docs/build/html/_static/sphinx_highlight.js +170 -0
- google/adk/docs/build/html/_static/styles/furo-extensions.css +18 -0
- google/adk/docs/build/html/_static/styles/furo-extensions.css.map +1 -0
- google/adk/docs/build/html/_static/styles/furo.css +18 -0
- google/adk/docs/build/html/_static/styles/furo.css.map +1 -0
- google/adk/docs/build/html/genindex.html +861 -0
- google/adk/docs/build/html/google-adk.html +5461 -0
- google/adk/docs/build/html/index.html +567 -0
- google/adk/docs/build/html/objects.inv +0 -0
- google/adk/docs/build/html/py-modindex.html +373 -0
- google/adk/docs/build/html/search.html +333 -0
- google/adk/docs/build/html/searchindex.js +17 -0
- google/adk/docs/source/conf.py +133 -0
- google/adk/docs/source/google-adk.rst +98 -0
- google/adk/docs/source/index.rst +7 -0
- google/adk/evaluation/__init__.py +31 -0
- google/adk/evaluation/agent_evaluator.py +329 -0
- google/adk/evaluation/evaluation_constants.py +24 -0
- google/adk/evaluation/evaluation_generator.py +270 -0
- google/adk/evaluation/response_evaluator.py +135 -0
- google/adk/evaluation/trajectory_evaluator.py +184 -0
- google/adk/events/__init__.py +21 -0
- google/adk/events/event.py +130 -0
- google/adk/events/event_actions.py +55 -0
- google/adk/examples/__init__.py +28 -0
- google/adk/examples/base_example_provider.py +35 -0
- google/adk/examples/example.py +27 -0
- google/adk/examples/example_util.py +123 -0
- google/adk/examples/vertex_ai_example_store.py +104 -0
- google/adk/flows/__init__.py +14 -0
- google/adk/flows/llm_flows/__init__.py +20 -0
- google/adk/flows/llm_flows/_base_llm_processor.py +52 -0
- google/adk/flows/llm_flows/_code_execution.py +458 -0
- google/adk/flows/llm_flows/_nl_planning.py +129 -0
- google/adk/flows/llm_flows/agent_transfer.py +132 -0
- google/adk/flows/llm_flows/audio_transcriber.py +109 -0
- google/adk/flows/llm_flows/auto_flow.py +49 -0
- google/adk/flows/llm_flows/base_llm_flow.py +559 -0
- google/adk/flows/llm_flows/basic.py +72 -0
- google/adk/flows/llm_flows/contents.py +370 -0
- google/adk/flows/llm_flows/functions.py +486 -0
- google/adk/flows/llm_flows/identity.py +47 -0
- google/adk/flows/llm_flows/instructions.py +137 -0
- google/adk/flows/llm_flows/single_flow.py +57 -0
- google/adk/memory/__init__.py +35 -0
- google/adk/memory/base_memory_service.py +74 -0
- google/adk/memory/in_memory_memory_service.py +62 -0
- google/adk/memory/vertex_ai_rag_memory_service.py +177 -0
- google/adk/models/__init__.py +31 -0
- google/adk/models/anthropic_llm.py +243 -0
- google/adk/models/base_llm.py +87 -0
- google/adk/models/base_llm_connection.py +76 -0
- google/adk/models/gemini_llm_connection.py +200 -0
- google/adk/models/google_llm.py +331 -0
- google/adk/models/lite_llm.py +673 -0
- google/adk/models/llm_request.py +98 -0
- google/adk/models/llm_response.py +111 -0
- google/adk/models/registry.py +102 -0
- google/adk/planners/__init__.py +23 -0
- google/adk/planners/base_planner.py +66 -0
- google/adk/planners/built_in_planner.py +75 -0
- google/adk/planners/plan_re_act_planner.py +208 -0
- google/adk/runners.py +456 -0
- google/adk/sessions/__init__.py +41 -0
- google/adk/sessions/base_session_service.py +133 -0
- google/adk/sessions/database_session_service.py +522 -0
- google/adk/sessions/in_memory_session_service.py +206 -0
- google/adk/sessions/session.py +54 -0
- google/adk/sessions/state.py +71 -0
- google/adk/sessions/vertex_ai_session_service.py +356 -0
- google/adk/telemetry.py +189 -0
- google/adk/tests/__init__.py +14 -0
- google/adk/tests/integration/.env.example +10 -0
- google/adk/tests/integration/__init__.py +18 -0
- google/adk/tests/integration/conftest.py +119 -0
- google/adk/tests/integration/fixture/__init__.py +14 -0
- google/adk/tests/integration/fixture/agent_with_config/__init__.py +15 -0
- google/adk/tests/integration/fixture/agent_with_config/agent.py +88 -0
- google/adk/tests/integration/fixture/callback_agent/__init__.py +15 -0
- google/adk/tests/integration/fixture/callback_agent/agent.py +105 -0
- google/adk/tests/integration/fixture/context_update_test/OWNERS +1 -0
- google/adk/tests/integration/fixture/context_update_test/__init__.py +15 -0
- google/adk/tests/integration/fixture/context_update_test/agent.py +43 -0
- google/adk/tests/integration/fixture/context_update_test/successful_test.session.json +582 -0
- google/adk/tests/integration/fixture/context_variable_agent/__init__.py +15 -0
- google/adk/tests/integration/fixture/context_variable_agent/agent.py +115 -0
- google/adk/tests/integration/fixture/customer_support_ma/__init__.py +15 -0
- google/adk/tests/integration/fixture/customer_support_ma/agent.py +172 -0
- google/adk/tests/integration/fixture/ecommerce_customer_service_agent/__init__.py +15 -0
- google/adk/tests/integration/fixture/ecommerce_customer_service_agent/agent.py +338 -0
- google/adk/tests/integration/fixture/ecommerce_customer_service_agent/order_query.test.json +69 -0
- google/adk/tests/integration/fixture/ecommerce_customer_service_agent/test_config.json +6 -0
- google/adk/tests/integration/fixture/flow_complex_spark/__init__.py +15 -0
- google/adk/tests/integration/fixture/flow_complex_spark/agent.py +182 -0
- google/adk/tests/integration/fixture/flow_complex_spark/sample.session.json +190 -0
- google/adk/tests/integration/fixture/hello_world_agent/__init__.py +15 -0
- google/adk/tests/integration/fixture/hello_world_agent/agent.py +95 -0
- google/adk/tests/integration/fixture/hello_world_agent/roll_die.test.json +24 -0
- google/adk/tests/integration/fixture/hello_world_agent/test_config.json +6 -0
- google/adk/tests/integration/fixture/home_automation_agent/__init__.py +15 -0
- google/adk/tests/integration/fixture/home_automation_agent/agent.py +304 -0
- google/adk/tests/integration/fixture/home_automation_agent/simple_test.test.json +5 -0
- google/adk/tests/integration/fixture/home_automation_agent/simple_test2.test.json +5 -0
- google/adk/tests/integration/fixture/home_automation_agent/test_config.json +5 -0
- google/adk/tests/integration/fixture/home_automation_agent/test_files/dependent_tool_calls.test.json +18 -0
- google/adk/tests/integration/fixture/home_automation_agent/test_files/memorizing_past_events/eval_data.test.json +17 -0
- google/adk/tests/integration/fixture/home_automation_agent/test_files/memorizing_past_events/test_config.json +6 -0
- google/adk/tests/integration/fixture/home_automation_agent/test_files/simple_multi_turn_conversation.test.json +18 -0
- google/adk/tests/integration/fixture/home_automation_agent/test_files/simple_test.test.json +17 -0
- google/adk/tests/integration/fixture/home_automation_agent/test_files/simple_test2.test.json +5 -0
- google/adk/tests/integration/fixture/home_automation_agent/test_files/test_config.json +5 -0
- google/adk/tests/integration/fixture/tool_agent/__init__.py +15 -0
- google/adk/tests/integration/fixture/tool_agent/agent.py +218 -0
- google/adk/tests/integration/fixture/tool_agent/files/Agent_test_plan.pdf +0 -0
- google/adk/tests/integration/fixture/trip_planner_agent/__init__.py +15 -0
- google/adk/tests/integration/fixture/trip_planner_agent/agent.py +110 -0
- google/adk/tests/integration/fixture/trip_planner_agent/initial.session.json +13 -0
- google/adk/tests/integration/fixture/trip_planner_agent/test_config.json +5 -0
- google/adk/tests/integration/fixture/trip_planner_agent/test_files/initial.session.json +13 -0
- google/adk/tests/integration/fixture/trip_planner_agent/test_files/test_config.json +5 -0
- google/adk/tests/integration/fixture/trip_planner_agent/test_files/trip_inquiry_sub_agent.test.json +7 -0
- google/adk/tests/integration/fixture/trip_planner_agent/trip_inquiry.test.json +19 -0
- google/adk/tests/integration/models/__init__.py +14 -0
- google/adk/tests/integration/models/test_google_llm.py +65 -0
- google/adk/tests/integration/test_callback.py +70 -0
- google/adk/tests/integration/test_context_variable.py +67 -0
- google/adk/tests/integration/test_evalute_agent_in_fixture.py +76 -0
- google/adk/tests/integration/test_multi_agent.py +28 -0
- google/adk/tests/integration/test_multi_turn.py +42 -0
- google/adk/tests/integration/test_single_agent.py +23 -0
- google/adk/tests/integration/test_sub_agent.py +26 -0
- google/adk/tests/integration/test_system_instruction.py +177 -0
- google/adk/tests/integration/test_tools.py +287 -0
- google/adk/tests/integration/test_with_test_file.py +34 -0
- google/adk/tests/integration/tools/__init__.py +14 -0
- google/adk/tests/integration/utils/__init__.py +16 -0
- google/adk/tests/integration/utils/asserts.py +75 -0
- google/adk/tests/integration/utils/test_runner.py +97 -0
- google/adk/tests/unittests/__init__.py +14 -0
- google/adk/tests/unittests/agents/__init__.py +14 -0
- google/adk/tests/unittests/agents/test_base_agent.py +407 -0
- google/adk/tests/unittests/agents/test_langgraph_agent.py +191 -0
- google/adk/tests/unittests/agents/test_llm_agent_callbacks.py +138 -0
- google/adk/tests/unittests/agents/test_llm_agent_fields.py +231 -0
- google/adk/tests/unittests/agents/test_loop_agent.py +136 -0
- google/adk/tests/unittests/agents/test_parallel_agent.py +92 -0
- google/adk/tests/unittests/agents/test_sequential_agent.py +114 -0
- google/adk/tests/unittests/artifacts/__init__.py +14 -0
- google/adk/tests/unittests/artifacts/test_artifact_service.py +276 -0
- google/adk/tests/unittests/auth/test_auth_handler.py +575 -0
- google/adk/tests/unittests/conftest.py +73 -0
- google/adk/tests/unittests/fast_api/__init__.py +14 -0
- google/adk/tests/unittests/fast_api/test_fast_api.py +269 -0
- google/adk/tests/unittests/flows/__init__.py +14 -0
- google/adk/tests/unittests/flows/llm_flows/__init__.py +14 -0
- google/adk/tests/unittests/flows/llm_flows/_test_examples.py +142 -0
- google/adk/tests/unittests/flows/llm_flows/test_agent_transfer.py +311 -0
- google/adk/tests/unittests/flows/llm_flows/test_functions_long_running.py +244 -0
- google/adk/tests/unittests/flows/llm_flows/test_functions_request_euc.py +346 -0
- google/adk/tests/unittests/flows/llm_flows/test_functions_sequential.py +93 -0
- google/adk/tests/unittests/flows/llm_flows/test_functions_simple.py +258 -0
- google/adk/tests/unittests/flows/llm_flows/test_identity.py +66 -0
- google/adk/tests/unittests/flows/llm_flows/test_instructions.py +164 -0
- google/adk/tests/unittests/flows/llm_flows/test_model_callbacks.py +142 -0
- google/adk/tests/unittests/flows/llm_flows/test_other_configs.py +46 -0
- google/adk/tests/unittests/flows/llm_flows/test_tool_callbacks.py +269 -0
- google/adk/tests/unittests/models/__init__.py +14 -0
- google/adk/tests/unittests/models/test_google_llm.py +224 -0
- google/adk/tests/unittests/models/test_litellm.py +804 -0
- google/adk/tests/unittests/models/test_models.py +60 -0
- google/adk/tests/unittests/sessions/__init__.py +14 -0
- google/adk/tests/unittests/sessions/test_session_service.py +227 -0
- google/adk/tests/unittests/sessions/test_vertex_ai_session_service.py +246 -0
- google/adk/tests/unittests/streaming/__init__.py +14 -0
- google/adk/tests/unittests/streaming/test_streaming.py +50 -0
- google/adk/tests/unittests/tools/__init__.py +14 -0
- google/adk/tests/unittests/tools/apihub_tool/clients/test_apihub_client.py +499 -0
- google/adk/tests/unittests/tools/apihub_tool/test_apihub_toolset.py +204 -0
- google/adk/tests/unittests/tools/application_integration_tool/clients/test_connections_client.py +600 -0
- google/adk/tests/unittests/tools/application_integration_tool/clients/test_integration_client.py +630 -0
- google/adk/tests/unittests/tools/application_integration_tool/test_application_integration_toolset.py +345 -0
- google/adk/tests/unittests/tools/google_api_tool/__init__.py +13 -0
- google/adk/tests/unittests/tools/google_api_tool/test_googleapi_to_openapi_converter.py +657 -0
- google/adk/tests/unittests/tools/openapi_tool/auth/credential_exchangers/test_auto_auth_credential_exchanger.py +145 -0
- google/adk/tests/unittests/tools/openapi_tool/auth/credential_exchangers/test_base_auth_credential_exchanger.py +68 -0
- google/adk/tests/unittests/tools/openapi_tool/auth/credential_exchangers/test_oauth2_exchanger.py +153 -0
- google/adk/tests/unittests/tools/openapi_tool/auth/credential_exchangers/test_service_account_exchanger.py +196 -0
- google/adk/tests/unittests/tools/openapi_tool/auth/test_auth_helper.py +573 -0
- google/adk/tests/unittests/tools/openapi_tool/common/test_common.py +436 -0
- google/adk/tests/unittests/tools/openapi_tool/openapi_spec_parser/test.yaml +1367 -0
- google/adk/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_openapi_spec_parser.py +628 -0
- google/adk/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_openapi_toolset.py +139 -0
- google/adk/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_operation_parser.py +406 -0
- google/adk/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_rest_api_tool.py +966 -0
- google/adk/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_tool_auth_handler.py +201 -0
- google/adk/tests/unittests/tools/retrieval/__init__.py +14 -0
- google/adk/tests/unittests/tools/retrieval/test_vertex_ai_rag_retrieval.py +147 -0
- google/adk/tests/unittests/tools/test_agent_tool.py +167 -0
- google/adk/tests/unittests/tools/test_base_tool.py +141 -0
- google/adk/tests/unittests/tools/test_build_function_declaration.py +277 -0
- google/adk/tests/unittests/utils.py +304 -0
- google/adk/tools/__init__.py +51 -0
- google/adk/tools/_automatic_function_calling_util.py +346 -0
- google/adk/tools/agent_tool.py +176 -0
- google/adk/tools/apihub_tool/__init__.py +19 -0
- google/adk/tools/apihub_tool/apihub_toolset.py +209 -0
- google/adk/tools/apihub_tool/clients/__init__.py +13 -0
- google/adk/tools/apihub_tool/clients/apihub_client.py +332 -0
- google/adk/tools/apihub_tool/clients/secret_client.py +115 -0
- google/adk/tools/application_integration_tool/__init__.py +19 -0
- google/adk/tools/application_integration_tool/application_integration_toolset.py +230 -0
- google/adk/tools/application_integration_tool/clients/connections_client.py +903 -0
- google/adk/tools/application_integration_tool/clients/integration_client.py +253 -0
- google/adk/tools/base_tool.py +144 -0
- google/adk/tools/built_in_code_execution_tool.py +59 -0
- google/adk/tools/crewai_tool.py +72 -0
- google/adk/tools/example_tool.py +62 -0
- google/adk/tools/exit_loop_tool.py +23 -0
- google/adk/tools/function_parameter_parse_util.py +307 -0
- google/adk/tools/function_tool.py +87 -0
- google/adk/tools/get_user_choice_tool.py +28 -0
- google/adk/tools/google_api_tool/__init__.py +14 -0
- google/adk/tools/google_api_tool/google_api_tool.py +59 -0
- google/adk/tools/google_api_tool/google_api_tool_set.py +107 -0
- google/adk/tools/google_api_tool/google_api_tool_sets.py +55 -0
- google/adk/tools/google_api_tool/googleapi_to_openapi_converter.py +521 -0
- google/adk/tools/google_search_tool.py +68 -0
- google/adk/tools/langchain_tool.py +86 -0
- google/adk/tools/load_artifacts_tool.py +113 -0
- google/adk/tools/load_memory_tool.py +58 -0
- google/adk/tools/load_web_page.py +41 -0
- google/adk/tools/long_running_tool.py +39 -0
- google/adk/tools/mcp_tool/__init__.py +42 -0
- google/adk/tools/mcp_tool/conversion_utils.py +161 -0
- google/adk/tools/mcp_tool/mcp_tool.py +113 -0
- google/adk/tools/mcp_tool/mcp_toolset.py +272 -0
- google/adk/tools/openapi_tool/__init__.py +21 -0
- google/adk/tools/openapi_tool/auth/__init__.py +19 -0
- google/adk/tools/openapi_tool/auth/auth_helpers.py +498 -0
- google/adk/tools/openapi_tool/auth/credential_exchangers/__init__.py +25 -0
- google/adk/tools/openapi_tool/auth/credential_exchangers/auto_auth_credential_exchanger.py +105 -0
- google/adk/tools/openapi_tool/auth/credential_exchangers/base_credential_exchanger.py +55 -0
- google/adk/tools/openapi_tool/auth/credential_exchangers/oauth2_exchanger.py +117 -0
- google/adk/tools/openapi_tool/auth/credential_exchangers/service_account_exchanger.py +97 -0
- google/adk/tools/openapi_tool/common/__init__.py +19 -0
- google/adk/tools/openapi_tool/common/common.py +300 -0
- google/adk/tools/openapi_tool/openapi_spec_parser/__init__.py +32 -0
- google/adk/tools/openapi_tool/openapi_spec_parser/openapi_spec_parser.py +231 -0
- google/adk/tools/openapi_tool/openapi_spec_parser/openapi_toolset.py +144 -0
- google/adk/tools/openapi_tool/openapi_spec_parser/operation_parser.py +260 -0
- google/adk/tools/openapi_tool/openapi_spec_parser/rest_api_tool.py +496 -0
- google/adk/tools/openapi_tool/openapi_spec_parser/tool_auth_handler.py +268 -0
- google/adk/tools/preload_memory_tool.py +72 -0
- google/adk/tools/retrieval/__init__.py +36 -0
- google/adk/tools/retrieval/base_retrieval_tool.py +37 -0
- google/adk/tools/retrieval/files_retrieval.py +33 -0
- google/adk/tools/retrieval/llama_index_retrieval.py +41 -0
- google/adk/tools/retrieval/vertex_ai_rag_retrieval.py +107 -0
- google/adk/tools/tool_context.py +90 -0
- google/adk/tools/toolbox_tool.py +46 -0
- google/adk/tools/transfer_to_agent_tool.py +21 -0
- google/adk/tools/vertex_ai_search_tool.py +96 -0
- google/adk/version.py +16 -0
- google_adk-0.0.3.dist-info/METADATA +73 -0
- google_adk-0.0.3.dist-info/RECORD +340 -0
- {google_adk-0.0.1.dist-info → google_adk-0.0.3.dist-info}/WHEEL +1 -2
- google_adk-0.0.3.dist-info/entry_points.txt +3 -0
- agent_kit/__init__.py +0 -0
- google_adk-0.0.1.dist-info/LICENSE.txt +0 -170
- google_adk-0.0.1.dist-info/METADATA +0 -15
- google_adk-0.0.1.dist-info/RECORD +0 -6
- google_adk-0.0.1.dist-info/top_level.txt +0 -1
@@ -0,0 +1,87 @@
|
|
1
|
+
# Copyright 2025 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
from __future__ import annotations
|
15
|
+
|
16
|
+
from abc import abstractmethod
|
17
|
+
from typing import AsyncGenerator
|
18
|
+
from typing import TYPE_CHECKING
|
19
|
+
|
20
|
+
from pydantic import BaseModel
|
21
|
+
from pydantic import ConfigDict
|
22
|
+
|
23
|
+
from .base_llm_connection import BaseLlmConnection
|
24
|
+
|
25
|
+
if TYPE_CHECKING:
|
26
|
+
from .llm_request import LlmRequest
|
27
|
+
from .llm_response import LlmResponse
|
28
|
+
|
29
|
+
|
30
|
+
class BaseLlm(BaseModel):
|
31
|
+
"""The BaseLLM class.
|
32
|
+
|
33
|
+
Attributes:
|
34
|
+
model: The name of the LLM, e.g. gemini-1.5-flash or gemini-1.5-flash-001.
|
35
|
+
model_config: The model config
|
36
|
+
"""
|
37
|
+
|
38
|
+
model_config = ConfigDict(
|
39
|
+
# This allows us to use arbitrary types in the model. E.g. PIL.Image.
|
40
|
+
arbitrary_types_allowed=True,
|
41
|
+
)
|
42
|
+
"""The model config."""
|
43
|
+
|
44
|
+
model: str
|
45
|
+
"""The name of the LLM, e.g. gemini-1.5-flash or gemini-1.5-flash-001."""
|
46
|
+
|
47
|
+
@classmethod
|
48
|
+
def supported_models(cls) -> list[str]:
|
49
|
+
"""Returns a list of supported models in regex for LlmRegistry."""
|
50
|
+
return []
|
51
|
+
|
52
|
+
@abstractmethod
|
53
|
+
async def generate_content_async(
|
54
|
+
self, llm_request: LlmRequest, stream: bool = False
|
55
|
+
) -> AsyncGenerator[LlmResponse, None]:
|
56
|
+
"""Generates one content from the given contents and tools.
|
57
|
+
|
58
|
+
Args:
|
59
|
+
llm_request: LlmRequest, the request to send to the LLM.
|
60
|
+
stream: bool = False, whether to do streaming call.
|
61
|
+
|
62
|
+
Yields:
|
63
|
+
a generator of types.Content.
|
64
|
+
|
65
|
+
For non-streaming call, it will only yield one Content.
|
66
|
+
|
67
|
+
For streaming call, it may yield more than one content, but all yielded
|
68
|
+
contents should be treated as one content by merging the
|
69
|
+
parts list.
|
70
|
+
"""
|
71
|
+
raise NotImplementedError(
|
72
|
+
f'Async generation is not supported for {self.model}.'
|
73
|
+
)
|
74
|
+
yield # AsyncGenerator requires a yield statement in function body.
|
75
|
+
|
76
|
+
def connect(self, llm_request: LlmRequest) -> BaseLlmConnection:
|
77
|
+
"""Creates a live connection to the LLM.
|
78
|
+
|
79
|
+
Args:
|
80
|
+
llm_request: LlmRequest, the request to send to the LLM.
|
81
|
+
|
82
|
+
Returns:
|
83
|
+
BaseLlmConnection, the connection to the LLM.
|
84
|
+
"""
|
85
|
+
raise NotImplementedError(
|
86
|
+
f'Live connection is not supported for {self.model}.'
|
87
|
+
)
|
@@ -0,0 +1,76 @@
|
|
1
|
+
# Copyright 2025 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
from abc import abstractmethod
|
16
|
+
from typing import AsyncGenerator
|
17
|
+
from google.genai import types
|
18
|
+
from .llm_response import LlmResponse
|
19
|
+
|
20
|
+
|
21
|
+
class BaseLlmConnection:
|
22
|
+
"""The base class for a live model connection."""
|
23
|
+
|
24
|
+
@abstractmethod
|
25
|
+
async def send_history(self, history: list[types.Content]):
|
26
|
+
"""Sends the conversation history to the model.
|
27
|
+
|
28
|
+
You call this method right after setting up the model connection.
|
29
|
+
The model will respond if the last content is from user, otherwise it will
|
30
|
+
wait for new user input before responding.
|
31
|
+
|
32
|
+
Args:
|
33
|
+
history: The conversation history to send to the model.
|
34
|
+
"""
|
35
|
+
pass
|
36
|
+
|
37
|
+
@abstractmethod
|
38
|
+
async def send_content(self, content: types.Content):
|
39
|
+
"""Sends a user content to the model.
|
40
|
+
|
41
|
+
The model will respond immediately upon receiving the content.
|
42
|
+
If you send function responses, all parts in the content should be function
|
43
|
+
responses.
|
44
|
+
|
45
|
+
Args:
|
46
|
+
content: The content to send to the model.
|
47
|
+
"""
|
48
|
+
pass
|
49
|
+
|
50
|
+
@abstractmethod
|
51
|
+
async def send_realtime(self, blob: types.Blob):
|
52
|
+
"""Sends a chunk of audio or a frame of video to the model in realtime.
|
53
|
+
|
54
|
+
The model may not respond immediately upon receiving the blob. It will do
|
55
|
+
voice activity detection and decide when to respond.
|
56
|
+
|
57
|
+
Args:
|
58
|
+
blob: The blob to send to the model.
|
59
|
+
"""
|
60
|
+
pass
|
61
|
+
|
62
|
+
@abstractmethod
|
63
|
+
async def receive(self) -> AsyncGenerator[LlmResponse, None]:
|
64
|
+
"""Receives the model response using the llm server connection.
|
65
|
+
|
66
|
+
Args: None.
|
67
|
+
|
68
|
+
Yields:
|
69
|
+
LlmResponse: The model response.
|
70
|
+
"""
|
71
|
+
pass
|
72
|
+
|
73
|
+
@abstractmethod
|
74
|
+
async def close(self):
|
75
|
+
"""Closes the llm server connection."""
|
76
|
+
pass
|
@@ -0,0 +1,200 @@
|
|
1
|
+
# Copyright 2025 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
import logging
|
16
|
+
from typing import AsyncGenerator
|
17
|
+
|
18
|
+
from google.genai import live
|
19
|
+
from google.genai import types
|
20
|
+
|
21
|
+
from .base_llm_connection import BaseLlmConnection
|
22
|
+
from .llm_response import LlmResponse
|
23
|
+
|
24
|
+
logger = logging.getLogger(__name__)
|
25
|
+
|
26
|
+
|
27
|
+
class GeminiLlmConnection(BaseLlmConnection):
|
28
|
+
"""The Gemini model connection."""
|
29
|
+
|
30
|
+
def __init__(self, gemini_session: live.AsyncSession):
|
31
|
+
self._gemini_session = gemini_session
|
32
|
+
|
33
|
+
async def send_history(self, history: list[types.Content]):
|
34
|
+
"""Sends the conversation history to the gemini model.
|
35
|
+
|
36
|
+
You call this method right after setting up the model connection.
|
37
|
+
The model will respond if the last content is from user, otherwise it will
|
38
|
+
wait for new user input before responding.
|
39
|
+
|
40
|
+
Args:
|
41
|
+
history: The conversation history to send to the model.
|
42
|
+
"""
|
43
|
+
|
44
|
+
# TODO: Remove this filter and translate unary contents to streaming
|
45
|
+
# contents properly.
|
46
|
+
|
47
|
+
# We ignore any audio from user during the agent transfer phase
|
48
|
+
contents = [
|
49
|
+
content
|
50
|
+
for content in history
|
51
|
+
if content.parts and content.parts[0].text
|
52
|
+
]
|
53
|
+
|
54
|
+
if contents:
|
55
|
+
await self._gemini_session.send(
|
56
|
+
input=types.LiveClientContent(
|
57
|
+
turns=contents,
|
58
|
+
turn_complete=contents[-1].role == 'user',
|
59
|
+
),
|
60
|
+
)
|
61
|
+
else:
|
62
|
+
logger.info('no content is sent')
|
63
|
+
|
64
|
+
async def send_content(self, content: types.Content):
|
65
|
+
"""Sends a user content to the gemini model.
|
66
|
+
|
67
|
+
The model will respond immediately upon receiving the content.
|
68
|
+
If you send function responses, all parts in the content should be function
|
69
|
+
responses.
|
70
|
+
|
71
|
+
Args:
|
72
|
+
content: The content to send to the model.
|
73
|
+
"""
|
74
|
+
|
75
|
+
assert content.parts
|
76
|
+
if content.parts[0].function_response:
|
77
|
+
# All parts have to be function responses.
|
78
|
+
function_responses = [part.function_response for part in content.parts]
|
79
|
+
logger.debug('Sending LLM function response: %s', function_responses)
|
80
|
+
await self._gemini_session.send(
|
81
|
+
input=types.LiveClientToolResponse(
|
82
|
+
function_responses=function_responses
|
83
|
+
),
|
84
|
+
)
|
85
|
+
else:
|
86
|
+
logger.debug('Sending LLM new content %s', content)
|
87
|
+
await self._gemini_session.send(
|
88
|
+
input=types.LiveClientContent(
|
89
|
+
turns=[content],
|
90
|
+
turn_complete=True,
|
91
|
+
)
|
92
|
+
)
|
93
|
+
|
94
|
+
async def send_realtime(self, blob: types.Blob):
|
95
|
+
"""Sends a chunk of audio or a frame of video to the model in realtime.
|
96
|
+
|
97
|
+
Args:
|
98
|
+
blob: The blob to send to the model.
|
99
|
+
"""
|
100
|
+
|
101
|
+
input_blob = blob.model_dump()
|
102
|
+
logger.debug('Sending LLM Blob: %s', input_blob)
|
103
|
+
await self._gemini_session.send(input=input_blob)
|
104
|
+
|
105
|
+
def __build_full_text_response(self, text: str):
|
106
|
+
"""Builds a full text response.
|
107
|
+
|
108
|
+
The text should not partial and the returned LlmResponse is not be
|
109
|
+
partial.
|
110
|
+
|
111
|
+
Args:
|
112
|
+
text: The text to be included in the response.
|
113
|
+
|
114
|
+
Returns:
|
115
|
+
An LlmResponse containing the full text.
|
116
|
+
"""
|
117
|
+
return LlmResponse(
|
118
|
+
content=types.Content(
|
119
|
+
role='model',
|
120
|
+
parts=[types.Part.from_text(text=text)],
|
121
|
+
),
|
122
|
+
)
|
123
|
+
|
124
|
+
async def receive(self) -> AsyncGenerator[LlmResponse, None]:
|
125
|
+
"""Receives the model response using the llm server connection.
|
126
|
+
|
127
|
+
Yields:
|
128
|
+
LlmResponse: The model response.
|
129
|
+
"""
|
130
|
+
|
131
|
+
text = ''
|
132
|
+
async for message in self._gemini_session.receive():
|
133
|
+
logger.debug('Got LLM Live message: %s', message)
|
134
|
+
if message.server_content:
|
135
|
+
content = message.server_content.model_turn
|
136
|
+
if content and content.parts:
|
137
|
+
llm_response = LlmResponse(
|
138
|
+
content=content, interrupted=message.server_content.interrupted
|
139
|
+
)
|
140
|
+
if content.parts[0].text:
|
141
|
+
text += content.parts[0].text
|
142
|
+
llm_response.partial = True
|
143
|
+
# don't yield the merged text event when receiving audio data
|
144
|
+
elif text and not content.parts[0].inline_data:
|
145
|
+
yield self.__build_full_text_response(text)
|
146
|
+
text = ''
|
147
|
+
yield llm_response
|
148
|
+
|
149
|
+
if (
|
150
|
+
message.server_content.output_transcription
|
151
|
+
and message.server_content.output_transcription.text
|
152
|
+
):
|
153
|
+
# TODO: Right now, we just support output_transcription without
|
154
|
+
# changing interface and data protocol. Later, we can consider to
|
155
|
+
# support output_transcription as a separete field in LlmResponse.
|
156
|
+
|
157
|
+
# Transcription is always considered as partial event
|
158
|
+
# We rely on other control signals to determine when to yield the
|
159
|
+
# full text response(turn_complete, interrupted, or tool_call).
|
160
|
+
text += message.server_content.output_transcription.text
|
161
|
+
parts = [
|
162
|
+
types.Part.from_text(
|
163
|
+
text=message.server_content.output_transcription.text
|
164
|
+
)
|
165
|
+
]
|
166
|
+
llm_response = LlmResponse(
|
167
|
+
content=types.Content(role='model', parts=parts), partial=True
|
168
|
+
)
|
169
|
+
yield llm_response
|
170
|
+
|
171
|
+
if message.server_content.turn_complete:
|
172
|
+
if text:
|
173
|
+
yield self.__build_full_text_response(text)
|
174
|
+
text = ''
|
175
|
+
yield LlmResponse(
|
176
|
+
turn_complete=True, interrupted=message.server_content.interrupted
|
177
|
+
)
|
178
|
+
break
|
179
|
+
# in case of empty content or parts, we sill surface it
|
180
|
+
# in case it's an interrupted message, we merge the previous partial
|
181
|
+
# text. Other we don't merge. because content can be none when model
|
182
|
+
# safty threshold is triggered
|
183
|
+
if message.server_content.interrupted and text:
|
184
|
+
yield self.__build_full_text_response(text)
|
185
|
+
text = ''
|
186
|
+
yield LlmResponse(interrupted=message.server_content.interrupted)
|
187
|
+
if message.tool_call:
|
188
|
+
if text:
|
189
|
+
yield self.__build_full_text_response(text)
|
190
|
+
text = ''
|
191
|
+
parts = [
|
192
|
+
types.Part(function_call=function_call)
|
193
|
+
for function_call in message.tool_call.function_calls
|
194
|
+
]
|
195
|
+
yield LlmResponse(content=types.Content(role='model', parts=parts))
|
196
|
+
|
197
|
+
async def close(self):
|
198
|
+
"""Closes the llm server connection."""
|
199
|
+
|
200
|
+
await self._gemini_session.close()
|
@@ -0,0 +1,331 @@
|
|
1
|
+
# Copyright 2025 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
from __future__ import annotations
|
15
|
+
|
16
|
+
import contextlib
|
17
|
+
from functools import cached_property
|
18
|
+
import logging
|
19
|
+
import sys
|
20
|
+
from typing import AsyncGenerator
|
21
|
+
from typing import cast
|
22
|
+
from typing import Generator
|
23
|
+
from typing import TYPE_CHECKING
|
24
|
+
|
25
|
+
from google.genai import Client
|
26
|
+
from google.genai import types
|
27
|
+
from typing_extensions import override
|
28
|
+
|
29
|
+
from .. import version
|
30
|
+
from .base_llm import BaseLlm
|
31
|
+
from .base_llm_connection import BaseLlmConnection
|
32
|
+
from .gemini_llm_connection import GeminiLlmConnection
|
33
|
+
from .llm_response import LlmResponse
|
34
|
+
|
35
|
+
if TYPE_CHECKING:
|
36
|
+
from .llm_request import LlmRequest
|
37
|
+
|
38
|
+
logger = logging.getLogger(__name__)
|
39
|
+
|
40
|
+
_NEW_LINE = '\n'
|
41
|
+
_EXCLUDED_PART_FIELD = {'inline_data': {'data'}}
|
42
|
+
|
43
|
+
|
44
|
+
class Gemini(BaseLlm):
|
45
|
+
"""Integration for Gemini models.
|
46
|
+
|
47
|
+
Attributes:
|
48
|
+
model: The name of the Gemini model.
|
49
|
+
"""
|
50
|
+
|
51
|
+
model: str = 'gemini-1.5-flash'
|
52
|
+
|
53
|
+
@staticmethod
|
54
|
+
@override
|
55
|
+
def supported_models() -> list[str]:
|
56
|
+
"""Provides the list of supported models.
|
57
|
+
|
58
|
+
Returns:
|
59
|
+
A list of supported models.
|
60
|
+
"""
|
61
|
+
|
62
|
+
return [
|
63
|
+
r'gemini-.*',
|
64
|
+
# fine-tuned vertex endpoint pattern
|
65
|
+
r'projects\/.+\/locations\/.+\/endpoints\/.+',
|
66
|
+
# vertex gemini long name
|
67
|
+
r'projects\/.+\/locations\/.+\/publishers\/google\/models\/gemini.+',
|
68
|
+
]
|
69
|
+
|
70
|
+
async def generate_content_async(
|
71
|
+
self, llm_request: LlmRequest, stream: bool = False
|
72
|
+
) -> AsyncGenerator[LlmResponse, None]:
|
73
|
+
"""Sends a request to the Gemini model.
|
74
|
+
|
75
|
+
Args:
|
76
|
+
llm_request: LlmRequest, the request to send to the Gemini model.
|
77
|
+
stream: bool = False, whether to do streaming call.
|
78
|
+
|
79
|
+
Yields:
|
80
|
+
LlmResponse: The model response.
|
81
|
+
"""
|
82
|
+
|
83
|
+
self._maybe_append_user_content(llm_request)
|
84
|
+
logger.info(
|
85
|
+
'Sending out request, model: %s, backend: %s, stream: %s',
|
86
|
+
llm_request.model,
|
87
|
+
self._api_backend,
|
88
|
+
stream,
|
89
|
+
)
|
90
|
+
logger.info(_build_request_log(llm_request))
|
91
|
+
|
92
|
+
if stream:
|
93
|
+
responses = await self.api_client.aio.models.generate_content_stream(
|
94
|
+
model=llm_request.model,
|
95
|
+
contents=llm_request.contents,
|
96
|
+
config=llm_request.config,
|
97
|
+
)
|
98
|
+
response = None
|
99
|
+
text = ''
|
100
|
+
# for sse, similar as bidi (see receive method in gemini_llm_connecton.py),
|
101
|
+
# we need to mark those text content as partial and after all partial
|
102
|
+
# contents are sent, we send an accumulated event which contains all the
|
103
|
+
# previous partial content. The only difference is bidi rely on
|
104
|
+
# complete_turn flag to detect end while sse depends on finish_reason.
|
105
|
+
async for response in responses:
|
106
|
+
logger.info(_build_response_log(response))
|
107
|
+
llm_response = LlmResponse.create(response)
|
108
|
+
if (
|
109
|
+
llm_response.content
|
110
|
+
and llm_response.content.parts
|
111
|
+
and llm_response.content.parts[0].text
|
112
|
+
):
|
113
|
+
text += llm_response.content.parts[0].text
|
114
|
+
llm_response.partial = True
|
115
|
+
elif text and (
|
116
|
+
not llm_response.content
|
117
|
+
or not llm_response.content.parts
|
118
|
+
# don't yield the merged text event when receiving audio data
|
119
|
+
or not llm_response.content.parts[0].inline_data
|
120
|
+
):
|
121
|
+
yield LlmResponse(
|
122
|
+
content=types.ModelContent(
|
123
|
+
parts=[types.Part.from_text(text=text)],
|
124
|
+
),
|
125
|
+
)
|
126
|
+
text = ''
|
127
|
+
yield llm_response
|
128
|
+
if (
|
129
|
+
text
|
130
|
+
and response
|
131
|
+
and response.candidates
|
132
|
+
and response.candidates[0].finish_reason == types.FinishReason.STOP
|
133
|
+
):
|
134
|
+
yield LlmResponse(
|
135
|
+
content=types.ModelContent(
|
136
|
+
parts=[types.Part.from_text(text=text)],
|
137
|
+
),
|
138
|
+
)
|
139
|
+
|
140
|
+
else:
|
141
|
+
response = await self.api_client.aio.models.generate_content(
|
142
|
+
model=llm_request.model,
|
143
|
+
contents=llm_request.contents,
|
144
|
+
config=llm_request.config,
|
145
|
+
)
|
146
|
+
logger.info(_build_response_log(response))
|
147
|
+
yield LlmResponse.create(response)
|
148
|
+
|
149
|
+
@cached_property
|
150
|
+
def api_client(self) -> Client:
|
151
|
+
"""Provides the api client.
|
152
|
+
|
153
|
+
Returns:
|
154
|
+
The api client.
|
155
|
+
"""
|
156
|
+
return Client(
|
157
|
+
http_options=types.HttpOptions(headers=self._tracking_headers)
|
158
|
+
)
|
159
|
+
|
160
|
+
@cached_property
|
161
|
+
def _api_backend(self) -> str:
|
162
|
+
return 'vertex' if self.api_client.vertexai else 'ml_dev'
|
163
|
+
|
164
|
+
@cached_property
|
165
|
+
def _tracking_headers(self) -> dict[str, str]:
|
166
|
+
framework_label = f'google-adk/{version.__version__}'
|
167
|
+
language_label = 'gl-python/' + sys.version.split()[0]
|
168
|
+
version_header_value = f'{framework_label} {language_label}'
|
169
|
+
tracking_headers = {
|
170
|
+
'x-goog-api-client': version_header_value,
|
171
|
+
'user-agent': version_header_value,
|
172
|
+
}
|
173
|
+
return tracking_headers
|
174
|
+
|
175
|
+
@cached_property
|
176
|
+
def _live_api_client(self) -> Client:
|
177
|
+
if self._api_backend == 'vertex':
|
178
|
+
# use default api version for vertex
|
179
|
+
return Client(
|
180
|
+
http_options=types.HttpOptions(headers=self._tracking_headers)
|
181
|
+
)
|
182
|
+
else:
|
183
|
+
# use v1alpha for ml_dev
|
184
|
+
api_version = 'v1alpha'
|
185
|
+
return Client(
|
186
|
+
http_options=types.HttpOptions(
|
187
|
+
headers=self._tracking_headers, api_version=api_version
|
188
|
+
)
|
189
|
+
)
|
190
|
+
|
191
|
+
@contextlib.asynccontextmanager
|
192
|
+
async def connect(self, llm_request: LlmRequest) -> BaseLlmConnection:
|
193
|
+
"""Connects to the Gemini model and returns an llm connection.
|
194
|
+
|
195
|
+
Args:
|
196
|
+
llm_request: LlmRequest, the request to send to the Gemini model.
|
197
|
+
|
198
|
+
Yields:
|
199
|
+
BaseLlmConnection, the connection to the Gemini model.
|
200
|
+
"""
|
201
|
+
|
202
|
+
llm_request.live_connect_config.system_instruction = types.Content(
|
203
|
+
role='system',
|
204
|
+
parts=[
|
205
|
+
types.Part.from_text(text=llm_request.config.system_instruction)
|
206
|
+
],
|
207
|
+
)
|
208
|
+
llm_request.live_connect_config.tools = llm_request.config.tools
|
209
|
+
async with self._live_api_client.aio.live.connect(
|
210
|
+
model=llm_request.model, config=llm_request.live_connect_config
|
211
|
+
) as live_session:
|
212
|
+
yield GeminiLlmConnection(live_session)
|
213
|
+
|
214
|
+
def _maybe_append_user_content(self, llm_request: LlmRequest):
|
215
|
+
"""Appends a user content, so that model can continue to output.
|
216
|
+
|
217
|
+
Args:
|
218
|
+
llm_request: LlmRequest, the request to send to the Gemini model.
|
219
|
+
"""
|
220
|
+
# If no content is provided, append a user content to hint model response
|
221
|
+
# using system instruction.
|
222
|
+
if not llm_request.contents:
|
223
|
+
llm_request.contents.append(
|
224
|
+
types.Content(
|
225
|
+
role='user',
|
226
|
+
parts=[
|
227
|
+
types.Part(
|
228
|
+
text=(
|
229
|
+
'Handle the requests as specified in the System'
|
230
|
+
' Instruction.'
|
231
|
+
)
|
232
|
+
)
|
233
|
+
],
|
234
|
+
)
|
235
|
+
)
|
236
|
+
return
|
237
|
+
|
238
|
+
# Insert a user content to preserve user intent and to avoid empty
|
239
|
+
# model response.
|
240
|
+
if llm_request.contents[-1].role != 'user':
|
241
|
+
llm_request.contents.append(
|
242
|
+
types.Content(
|
243
|
+
role='user',
|
244
|
+
parts=[
|
245
|
+
types.Part(
|
246
|
+
text=(
|
247
|
+
'Continue processing previous requests as instructed.'
|
248
|
+
' Exit or provide a summary if no more outputs are'
|
249
|
+
' needed.'
|
250
|
+
)
|
251
|
+
)
|
252
|
+
],
|
253
|
+
)
|
254
|
+
)
|
255
|
+
|
256
|
+
|
257
|
+
def _build_function_declaration_log(
|
258
|
+
func_decl: types.FunctionDeclaration,
|
259
|
+
) -> str:
|
260
|
+
param_str = '{}'
|
261
|
+
if func_decl.parameters and func_decl.parameters.properties:
|
262
|
+
param_str = str({
|
263
|
+
k: v.model_dump(exclude_none=True)
|
264
|
+
for k, v in func_decl.parameters.properties.items()
|
265
|
+
})
|
266
|
+
return_str = 'None'
|
267
|
+
if func_decl.response:
|
268
|
+
return_str = str(func_decl.response.model_dump(exclude_none=True))
|
269
|
+
return f'{func_decl.name}: {param_str} -> {return_str}'
|
270
|
+
|
271
|
+
|
272
|
+
def _build_request_log(req: LlmRequest) -> str:
|
273
|
+
function_decls: list[types.FunctionDeclaration] = cast(
|
274
|
+
list[types.FunctionDeclaration],
|
275
|
+
req.config.tools[0].function_declarations if req.config.tools else [],
|
276
|
+
)
|
277
|
+
function_logs = (
|
278
|
+
[
|
279
|
+
_build_function_declaration_log(func_decl)
|
280
|
+
for func_decl in function_decls
|
281
|
+
]
|
282
|
+
if function_decls
|
283
|
+
else []
|
284
|
+
)
|
285
|
+
contents_logs = [
|
286
|
+
content.model_dump_json(
|
287
|
+
exclude_none=True,
|
288
|
+
exclude={
|
289
|
+
'parts': {
|
290
|
+
i: _EXCLUDED_PART_FIELD for i in range(len(content.parts))
|
291
|
+
}
|
292
|
+
},
|
293
|
+
)
|
294
|
+
for content in req.contents
|
295
|
+
]
|
296
|
+
|
297
|
+
return f"""
|
298
|
+
LLM Request:
|
299
|
+
-----------------------------------------------------------
|
300
|
+
System Instruction:
|
301
|
+
{req.config.system_instruction}
|
302
|
+
-----------------------------------------------------------
|
303
|
+
Contents:
|
304
|
+
{_NEW_LINE.join(contents_logs)}
|
305
|
+
-----------------------------------------------------------
|
306
|
+
Functions:
|
307
|
+
{_NEW_LINE.join(function_logs)}
|
308
|
+
-----------------------------------------------------------
|
309
|
+
"""
|
310
|
+
|
311
|
+
|
312
|
+
def _build_response_log(resp: types.GenerateContentResponse) -> str:
|
313
|
+
function_calls_text = []
|
314
|
+
if function_calls := resp.function_calls:
|
315
|
+
for func_call in function_calls:
|
316
|
+
function_calls_text.append(
|
317
|
+
f'name: {func_call.name}, args: {func_call.args}'
|
318
|
+
)
|
319
|
+
return f"""
|
320
|
+
LLM Response:
|
321
|
+
-----------------------------------------------------------
|
322
|
+
Text:
|
323
|
+
{resp.text}
|
324
|
+
-----------------------------------------------------------
|
325
|
+
Function calls:
|
326
|
+
{_NEW_LINE.join(function_calls_text)}
|
327
|
+
-----------------------------------------------------------
|
328
|
+
Raw response:
|
329
|
+
{resp.model_dump_json(exclude_none=True)}
|
330
|
+
-----------------------------------------------------------
|
331
|
+
"""
|