aip-agents-binary 0.5.20__py3-none-manylinux_2_31_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aip_agents/__init__.py +65 -0
- aip_agents/__init__.pyi +19 -0
- aip_agents/a2a/__init__.py +19 -0
- aip_agents/a2a/__init__.pyi +3 -0
- aip_agents/a2a/server/__init__.py +10 -0
- aip_agents/a2a/server/__init__.pyi +4 -0
- aip_agents/a2a/server/base_executor.py +1086 -0
- aip_agents/a2a/server/base_executor.pyi +73 -0
- aip_agents/a2a/server/google_adk_executor.py +198 -0
- aip_agents/a2a/server/google_adk_executor.pyi +51 -0
- aip_agents/a2a/server/langflow_executor.py +180 -0
- aip_agents/a2a/server/langflow_executor.pyi +43 -0
- aip_agents/a2a/server/langgraph_executor.py +270 -0
- aip_agents/a2a/server/langgraph_executor.pyi +47 -0
- aip_agents/a2a/types.py +232 -0
- aip_agents/a2a/types.pyi +132 -0
- aip_agents/agent/__init__.py +27 -0
- aip_agents/agent/__init__.pyi +9 -0
- aip_agents/agent/base_agent.py +970 -0
- aip_agents/agent/base_agent.pyi +221 -0
- aip_agents/agent/base_langgraph_agent.py +2942 -0
- aip_agents/agent/base_langgraph_agent.pyi +232 -0
- aip_agents/agent/google_adk_agent.py +926 -0
- aip_agents/agent/google_adk_agent.pyi +141 -0
- aip_agents/agent/google_adk_constants.py +6 -0
- aip_agents/agent/google_adk_constants.pyi +3 -0
- aip_agents/agent/hitl/__init__.py +24 -0
- aip_agents/agent/hitl/__init__.pyi +6 -0
- aip_agents/agent/hitl/config.py +28 -0
- aip_agents/agent/hitl/config.pyi +15 -0
- aip_agents/agent/hitl/langgraph_hitl_mixin.py +515 -0
- aip_agents/agent/hitl/langgraph_hitl_mixin.pyi +42 -0
- aip_agents/agent/hitl/manager.py +532 -0
- aip_agents/agent/hitl/manager.pyi +200 -0
- aip_agents/agent/hitl/models.py +18 -0
- aip_agents/agent/hitl/models.pyi +3 -0
- aip_agents/agent/hitl/prompt/__init__.py +9 -0
- aip_agents/agent/hitl/prompt/__init__.pyi +4 -0
- aip_agents/agent/hitl/prompt/base.py +42 -0
- aip_agents/agent/hitl/prompt/base.pyi +24 -0
- aip_agents/agent/hitl/prompt/deferred.py +73 -0
- aip_agents/agent/hitl/prompt/deferred.pyi +30 -0
- aip_agents/agent/hitl/registry.py +149 -0
- aip_agents/agent/hitl/registry.pyi +101 -0
- aip_agents/agent/interface.py +138 -0
- aip_agents/agent/interface.pyi +81 -0
- aip_agents/agent/interfaces.py +65 -0
- aip_agents/agent/interfaces.pyi +44 -0
- aip_agents/agent/langflow_agent.py +464 -0
- aip_agents/agent/langflow_agent.pyi +133 -0
- aip_agents/agent/langgraph_memory_enhancer_agent.py +433 -0
- aip_agents/agent/langgraph_memory_enhancer_agent.pyi +49 -0
- aip_agents/agent/langgraph_react_agent.py +2514 -0
- aip_agents/agent/langgraph_react_agent.pyi +126 -0
- aip_agents/agent/system_instruction_context.py +34 -0
- aip_agents/agent/system_instruction_context.pyi +13 -0
- aip_agents/clients/__init__.py +10 -0
- aip_agents/clients/__init__.pyi +4 -0
- aip_agents/clients/langflow/__init__.py +10 -0
- aip_agents/clients/langflow/__init__.pyi +4 -0
- aip_agents/clients/langflow/client.py +477 -0
- aip_agents/clients/langflow/client.pyi +140 -0
- aip_agents/clients/langflow/types.py +18 -0
- aip_agents/clients/langflow/types.pyi +7 -0
- aip_agents/constants.py +23 -0
- aip_agents/constants.pyi +7 -0
- aip_agents/credentials/manager.py +132 -0
- aip_agents/examples/__init__.py +5 -0
- aip_agents/examples/__init__.pyi +0 -0
- aip_agents/examples/compare_streaming_client.py +783 -0
- aip_agents/examples/compare_streaming_client.pyi +48 -0
- aip_agents/examples/compare_streaming_server.py +142 -0
- aip_agents/examples/compare_streaming_server.pyi +18 -0
- aip_agents/examples/demo_memory_recall.py +401 -0
- aip_agents/examples/demo_memory_recall.pyi +58 -0
- aip_agents/examples/hello_world_a2a_google_adk_client.py +49 -0
- aip_agents/examples/hello_world_a2a_google_adk_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_agent.py +48 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_agent.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_streaming.py +60 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_streaming.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_server.py +79 -0
- aip_agents/examples/hello_world_a2a_google_adk_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_client.py +39 -0
- aip_agents/examples/hello_world_a2a_langchain_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_agent.py +39 -0
- aip_agents/examples/hello_world_a2a_langchain_client_agent.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.py +37 -0
- aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_streaming.py +41 -0
- aip_agents/examples/hello_world_a2a_langchain_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.py +60 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_server.py +105 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_server.py +79 -0
- aip_agents/examples/hello_world_a2a_langchain_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.py +78 -0
- aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langflow_client.py +83 -0
- aip_agents/examples/hello_world_a2a_langflow_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langflow_server.py +82 -0
- aip_agents/examples/hello_world_a2a_langflow_server.pyi +14 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client.py +73 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.py +76 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_server.py +92 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_server.pyi +16 -0
- aip_agents/examples/hello_world_a2a_langgraph_client.py +54 -0
- aip_agents/examples/hello_world_a2a_langgraph_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent.py +54 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.py +32 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.pyi +2 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming.py +50 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.py +44 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.py +92 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_server.py +84 -0
- aip_agents/examples/hello_world_a2a_langgraph_server.pyi +14 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.py +79 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.py +132 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.pyi +15 -0
- aip_agents/examples/hello_world_a2a_mcp_langgraph.py +196 -0
- aip_agents/examples/hello_world_a2a_mcp_langgraph.pyi +48 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.py +244 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.pyi +48 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.py +251 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.pyi +45 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.py +57 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.py +80 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_google_adk.py +41 -0
- aip_agents/examples/hello_world_google_adk.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_http.py +34 -0
- aip_agents/examples/hello_world_google_adk_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_http_stream.py +40 -0
- aip_agents/examples/hello_world_google_adk_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse.py +44 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse_stream.py +48 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio.py +44 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.py +48 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_stream.py +44 -0
- aip_agents/examples/hello_world_google_adk_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain.py +28 -0
- aip_agents/examples/hello_world_langchain.pyi +5 -0
- aip_agents/examples/hello_world_langchain_lm_invoker.py +15 -0
- aip_agents/examples/hello_world_langchain_lm_invoker.pyi +2 -0
- aip_agents/examples/hello_world_langchain_mcp_http.py +34 -0
- aip_agents/examples/hello_world_langchain_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_http_interactive.py +130 -0
- aip_agents/examples/hello_world_langchain_mcp_http_interactive.pyi +16 -0
- aip_agents/examples/hello_world_langchain_mcp_http_stream.py +42 -0
- aip_agents/examples/hello_world_langchain_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_multi_server.py +155 -0
- aip_agents/examples/hello_world_langchain_mcp_multi_server.pyi +18 -0
- aip_agents/examples/hello_world_langchain_mcp_sse.py +34 -0
- aip_agents/examples/hello_world_langchain_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_sse_stream.py +40 -0
- aip_agents/examples/hello_world_langchain_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio.py +30 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio_stream.py +41 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_stream.py +36 -0
- aip_agents/examples/hello_world_langchain_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_stream_lm_invoker.py +39 -0
- aip_agents/examples/hello_world_langchain_stream_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_langflow_agent.py +163 -0
- aip_agents/examples/hello_world_langflow_agent.pyi +35 -0
- aip_agents/examples/hello_world_langgraph.py +39 -0
- aip_agents/examples/hello_world_langgraph.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_bosa_twitter.py +41 -0
- aip_agents/examples/hello_world_langgraph_bosa_twitter.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_http.py +31 -0
- aip_agents/examples/hello_world_langgraph_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_http_stream.py +34 -0
- aip_agents/examples/hello_world_langgraph_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse.py +35 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse_stream.py +50 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio.py +35 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.py +50 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_stream.py +43 -0
- aip_agents/examples/hello_world_langgraph_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_stream_lm_invoker.py +37 -0
- aip_agents/examples/hello_world_langgraph_stream_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_model_switch_cli.py +210 -0
- aip_agents/examples/hello_world_model_switch_cli.pyi +30 -0
- aip_agents/examples/hello_world_multi_agent_adk.py +75 -0
- aip_agents/examples/hello_world_multi_agent_adk.pyi +6 -0
- aip_agents/examples/hello_world_multi_agent_langchain.py +54 -0
- aip_agents/examples/hello_world_multi_agent_langchain.pyi +5 -0
- aip_agents/examples/hello_world_multi_agent_langgraph.py +66 -0
- aip_agents/examples/hello_world_multi_agent_langgraph.pyi +5 -0
- aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.py +69 -0
- aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_pii_logger.py +21 -0
- aip_agents/examples/hello_world_pii_logger.pyi +5 -0
- aip_agents/examples/hello_world_sentry.py +133 -0
- aip_agents/examples/hello_world_sentry.pyi +21 -0
- aip_agents/examples/hello_world_step_limits.py +273 -0
- aip_agents/examples/hello_world_step_limits.pyi +17 -0
- aip_agents/examples/hello_world_stock_a2a_server.py +103 -0
- aip_agents/examples/hello_world_stock_a2a_server.pyi +17 -0
- aip_agents/examples/hello_world_tool_output_client.py +46 -0
- aip_agents/examples/hello_world_tool_output_client.pyi +5 -0
- aip_agents/examples/hello_world_tool_output_server.py +114 -0
- aip_agents/examples/hello_world_tool_output_server.pyi +19 -0
- aip_agents/examples/hitl_demo.py +724 -0
- aip_agents/examples/hitl_demo.pyi +67 -0
- aip_agents/examples/mcp_configs/configs.py +63 -0
- aip_agents/examples/mcp_servers/common.py +76 -0
- aip_agents/examples/mcp_servers/mcp_name.py +29 -0
- aip_agents/examples/mcp_servers/mcp_server_http.py +19 -0
- aip_agents/examples/mcp_servers/mcp_server_sse.py +19 -0
- aip_agents/examples/mcp_servers/mcp_server_stdio.py +19 -0
- aip_agents/examples/mcp_servers/mcp_time.py +10 -0
- aip_agents/examples/pii_demo_langgraph_client.py +69 -0
- aip_agents/examples/pii_demo_langgraph_client.pyi +5 -0
- aip_agents/examples/pii_demo_langgraph_server.py +126 -0
- aip_agents/examples/pii_demo_langgraph_server.pyi +20 -0
- aip_agents/examples/pii_demo_multi_agent_client.py +80 -0
- aip_agents/examples/pii_demo_multi_agent_client.pyi +5 -0
- aip_agents/examples/pii_demo_multi_agent_server.py +247 -0
- aip_agents/examples/pii_demo_multi_agent_server.pyi +40 -0
- aip_agents/examples/todolist_planning_a2a_langchain_client.py +70 -0
- aip_agents/examples/todolist_planning_a2a_langchain_client.pyi +5 -0
- aip_agents/examples/todolist_planning_a2a_langgraph_server.py +88 -0
- aip_agents/examples/todolist_planning_a2a_langgraph_server.pyi +19 -0
- aip_agents/examples/tools/__init__.py +27 -0
- aip_agents/examples/tools/__init__.pyi +9 -0
- aip_agents/examples/tools/adk_arithmetic_tools.py +36 -0
- aip_agents/examples/tools/adk_arithmetic_tools.pyi +24 -0
- aip_agents/examples/tools/adk_weather_tool.py +60 -0
- aip_agents/examples/tools/adk_weather_tool.pyi +18 -0
- aip_agents/examples/tools/data_generator_tool.py +103 -0
- aip_agents/examples/tools/data_generator_tool.pyi +15 -0
- aip_agents/examples/tools/data_visualization_tool.py +312 -0
- aip_agents/examples/tools/data_visualization_tool.pyi +19 -0
- aip_agents/examples/tools/image_artifact_tool.py +136 -0
- aip_agents/examples/tools/image_artifact_tool.pyi +26 -0
- aip_agents/examples/tools/langchain_arithmetic_tools.py +26 -0
- aip_agents/examples/tools/langchain_arithmetic_tools.pyi +17 -0
- aip_agents/examples/tools/langchain_currency_exchange_tool.py +88 -0
- aip_agents/examples/tools/langchain_currency_exchange_tool.pyi +20 -0
- aip_agents/examples/tools/langchain_graph_artifact_tool.py +172 -0
- aip_agents/examples/tools/langchain_graph_artifact_tool.pyi +25 -0
- aip_agents/examples/tools/langchain_weather_tool.py +48 -0
- aip_agents/examples/tools/langchain_weather_tool.pyi +19 -0
- aip_agents/examples/tools/langgraph_streaming_tool.py +130 -0
- aip_agents/examples/tools/langgraph_streaming_tool.pyi +43 -0
- aip_agents/examples/tools/mock_retrieval_tool.py +56 -0
- aip_agents/examples/tools/mock_retrieval_tool.pyi +13 -0
- aip_agents/examples/tools/pii_demo_tools.py +189 -0
- aip_agents/examples/tools/pii_demo_tools.pyi +54 -0
- aip_agents/examples/tools/random_chart_tool.py +142 -0
- aip_agents/examples/tools/random_chart_tool.pyi +20 -0
- aip_agents/examples/tools/serper_tool.py +202 -0
- aip_agents/examples/tools/serper_tool.pyi +16 -0
- aip_agents/examples/tools/stock_tools.py +82 -0
- aip_agents/examples/tools/stock_tools.pyi +36 -0
- aip_agents/examples/tools/table_generator_tool.py +167 -0
- aip_agents/examples/tools/table_generator_tool.pyi +22 -0
- aip_agents/examples/tools/time_tool.py +82 -0
- aip_agents/examples/tools/time_tool.pyi +15 -0
- aip_agents/examples/tools/weather_forecast_tool.py +38 -0
- aip_agents/examples/tools/weather_forecast_tool.pyi +14 -0
- aip_agents/executor/agent_executor.py +473 -0
- aip_agents/executor/base.py +48 -0
- aip_agents/mcp/__init__.py +1 -0
- aip_agents/mcp/__init__.pyi +0 -0
- aip_agents/mcp/client/__init__.py +14 -0
- aip_agents/mcp/client/__init__.pyi +5 -0
- aip_agents/mcp/client/base_mcp_client.py +369 -0
- aip_agents/mcp/client/base_mcp_client.pyi +148 -0
- aip_agents/mcp/client/connection_manager.py +193 -0
- aip_agents/mcp/client/connection_manager.pyi +48 -0
- aip_agents/mcp/client/google_adk/__init__.py +11 -0
- aip_agents/mcp/client/google_adk/__init__.pyi +3 -0
- aip_agents/mcp/client/google_adk/client.py +381 -0
- aip_agents/mcp/client/google_adk/client.pyi +75 -0
- aip_agents/mcp/client/langchain/__init__.py +11 -0
- aip_agents/mcp/client/langchain/__init__.pyi +3 -0
- aip_agents/mcp/client/langchain/client.py +265 -0
- aip_agents/mcp/client/langchain/client.pyi +48 -0
- aip_agents/mcp/client/persistent_session.py +359 -0
- aip_agents/mcp/client/persistent_session.pyi +113 -0
- aip_agents/mcp/client/session_pool.py +351 -0
- aip_agents/mcp/client/session_pool.pyi +101 -0
- aip_agents/mcp/client/transports.py +215 -0
- aip_agents/mcp/client/transports.pyi +123 -0
- aip_agents/mcp/utils/__init__.py +7 -0
- aip_agents/mcp/utils/__init__.pyi +0 -0
- aip_agents/mcp/utils/config_validator.py +139 -0
- aip_agents/mcp/utils/config_validator.pyi +82 -0
- aip_agents/memory/__init__.py +14 -0
- aip_agents/memory/__init__.pyi +5 -0
- aip_agents/memory/adapters/__init__.py +10 -0
- aip_agents/memory/adapters/__init__.pyi +4 -0
- aip_agents/memory/adapters/base_adapter.py +717 -0
- aip_agents/memory/adapters/base_adapter.pyi +150 -0
- aip_agents/memory/adapters/mem0.py +84 -0
- aip_agents/memory/adapters/mem0.pyi +22 -0
- aip_agents/memory/base.py +84 -0
- aip_agents/memory/base.pyi +60 -0
- aip_agents/memory/constants.py +49 -0
- aip_agents/memory/constants.pyi +25 -0
- aip_agents/memory/factory.py +86 -0
- aip_agents/memory/factory.pyi +24 -0
- aip_agents/memory/guidance.py +20 -0
- aip_agents/memory/guidance.pyi +3 -0
- aip_agents/memory/simple_memory.py +47 -0
- aip_agents/memory/simple_memory.pyi +23 -0
- aip_agents/middleware/__init__.py +17 -0
- aip_agents/middleware/__init__.pyi +5 -0
- aip_agents/middleware/base.py +88 -0
- aip_agents/middleware/base.pyi +71 -0
- aip_agents/middleware/manager.py +128 -0
- aip_agents/middleware/manager.pyi +80 -0
- aip_agents/middleware/todolist.py +274 -0
- aip_agents/middleware/todolist.pyi +125 -0
- aip_agents/schema/__init__.py +69 -0
- aip_agents/schema/__init__.pyi +9 -0
- aip_agents/schema/a2a.py +56 -0
- aip_agents/schema/a2a.pyi +40 -0
- aip_agents/schema/agent.py +111 -0
- aip_agents/schema/agent.pyi +65 -0
- aip_agents/schema/hitl.py +157 -0
- aip_agents/schema/hitl.pyi +89 -0
- aip_agents/schema/langgraph.py +37 -0
- aip_agents/schema/langgraph.pyi +28 -0
- aip_agents/schema/model_id.py +97 -0
- aip_agents/schema/model_id.pyi +54 -0
- aip_agents/schema/step_limit.py +108 -0
- aip_agents/schema/step_limit.pyi +63 -0
- aip_agents/schema/storage.py +40 -0
- aip_agents/schema/storage.pyi +21 -0
- aip_agents/sentry/__init__.py +11 -0
- aip_agents/sentry/__init__.pyi +3 -0
- aip_agents/sentry/sentry.py +151 -0
- aip_agents/sentry/sentry.pyi +48 -0
- aip_agents/storage/__init__.py +41 -0
- aip_agents/storage/__init__.pyi +8 -0
- aip_agents/storage/base.py +85 -0
- aip_agents/storage/base.pyi +58 -0
- aip_agents/storage/clients/__init__.py +12 -0
- aip_agents/storage/clients/__init__.pyi +3 -0
- aip_agents/storage/clients/minio_client.py +318 -0
- aip_agents/storage/clients/minio_client.pyi +137 -0
- aip_agents/storage/config.py +62 -0
- aip_agents/storage/config.pyi +29 -0
- aip_agents/storage/providers/__init__.py +15 -0
- aip_agents/storage/providers/__init__.pyi +5 -0
- aip_agents/storage/providers/base.py +106 -0
- aip_agents/storage/providers/base.pyi +88 -0
- aip_agents/storage/providers/memory.py +114 -0
- aip_agents/storage/providers/memory.pyi +79 -0
- aip_agents/storage/providers/object_storage.py +214 -0
- aip_agents/storage/providers/object_storage.pyi +98 -0
- aip_agents/tools/__init__.py +33 -0
- aip_agents/tools/__init__.pyi +13 -0
- aip_agents/tools/bosa_tools.py +105 -0
- aip_agents/tools/bosa_tools.pyi +37 -0
- aip_agents/tools/browser_use/__init__.py +82 -0
- aip_agents/tools/browser_use/__init__.pyi +14 -0
- aip_agents/tools/browser_use/action_parser.py +103 -0
- aip_agents/tools/browser_use/action_parser.pyi +18 -0
- aip_agents/tools/browser_use/browser_use_tool.py +1112 -0
- aip_agents/tools/browser_use/browser_use_tool.pyi +50 -0
- aip_agents/tools/browser_use/llm_config.py +120 -0
- aip_agents/tools/browser_use/llm_config.pyi +52 -0
- aip_agents/tools/browser_use/minio_storage.py +198 -0
- aip_agents/tools/browser_use/minio_storage.pyi +109 -0
- aip_agents/tools/browser_use/schemas.py +119 -0
- aip_agents/tools/browser_use/schemas.pyi +32 -0
- aip_agents/tools/browser_use/session.py +76 -0
- aip_agents/tools/browser_use/session.pyi +4 -0
- aip_agents/tools/browser_use/session_errors.py +132 -0
- aip_agents/tools/browser_use/session_errors.pyi +53 -0
- aip_agents/tools/browser_use/steel_session_recording.py +317 -0
- aip_agents/tools/browser_use/steel_session_recording.pyi +63 -0
- aip_agents/tools/browser_use/streaming.py +813 -0
- aip_agents/tools/browser_use/streaming.pyi +81 -0
- aip_agents/tools/browser_use/structured_data_parser.py +257 -0
- aip_agents/tools/browser_use/structured_data_parser.pyi +86 -0
- aip_agents/tools/browser_use/structured_data_recovery.py +204 -0
- aip_agents/tools/browser_use/structured_data_recovery.pyi +43 -0
- aip_agents/tools/browser_use/types.py +78 -0
- aip_agents/tools/browser_use/types.pyi +45 -0
- aip_agents/tools/code_sandbox/__init__.py +26 -0
- aip_agents/tools/code_sandbox/__init__.pyi +3 -0
- aip_agents/tools/code_sandbox/constant.py +13 -0
- aip_agents/tools/code_sandbox/constant.pyi +4 -0
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.py +257 -0
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.pyi +86 -0
- aip_agents/tools/code_sandbox/e2b_sandbox_tool.py +411 -0
- aip_agents/tools/code_sandbox/e2b_sandbox_tool.pyi +29 -0
- aip_agents/tools/constants.py +165 -0
- aip_agents/tools/constants.pyi +135 -0
- aip_agents/tools/document_loader/__init__.py +44 -0
- aip_agents/tools/document_loader/__init__.pyi +7 -0
- aip_agents/tools/document_loader/base_reader.py +302 -0
- aip_agents/tools/document_loader/base_reader.pyi +75 -0
- aip_agents/tools/document_loader/docx_reader_tool.py +68 -0
- aip_agents/tools/document_loader/docx_reader_tool.pyi +10 -0
- aip_agents/tools/document_loader/excel_reader_tool.py +171 -0
- aip_agents/tools/document_loader/excel_reader_tool.pyi +26 -0
- aip_agents/tools/document_loader/pdf_reader_tool.py +79 -0
- aip_agents/tools/document_loader/pdf_reader_tool.pyi +11 -0
- aip_agents/tools/document_loader/pdf_splitter.py +169 -0
- aip_agents/tools/document_loader/pdf_splitter.pyi +18 -0
- aip_agents/tools/gl_connector/__init__.py +5 -0
- aip_agents/tools/gl_connector/__init__.pyi +3 -0
- aip_agents/tools/gl_connector/tool.py +351 -0
- aip_agents/tools/gl_connector/tool.pyi +74 -0
- aip_agents/tools/memory_search/__init__.py +22 -0
- aip_agents/tools/memory_search/__init__.pyi +5 -0
- aip_agents/tools/memory_search/base.py +200 -0
- aip_agents/tools/memory_search/base.pyi +69 -0
- aip_agents/tools/memory_search/mem0.py +258 -0
- aip_agents/tools/memory_search/mem0.pyi +19 -0
- aip_agents/tools/memory_search/schema.py +48 -0
- aip_agents/tools/memory_search/schema.pyi +15 -0
- aip_agents/tools/memory_search_tool.py +26 -0
- aip_agents/tools/memory_search_tool.pyi +3 -0
- aip_agents/tools/time_tool.py +117 -0
- aip_agents/tools/time_tool.pyi +16 -0
- aip_agents/tools/tool_config_injector.py +300 -0
- aip_agents/tools/tool_config_injector.pyi +26 -0
- aip_agents/tools/web_search/__init__.py +15 -0
- aip_agents/tools/web_search/__init__.pyi +3 -0
- aip_agents/tools/web_search/serper_tool.py +187 -0
- aip_agents/tools/web_search/serper_tool.pyi +19 -0
- aip_agents/types/__init__.py +70 -0
- aip_agents/types/__init__.pyi +36 -0
- aip_agents/types/a2a_events.py +13 -0
- aip_agents/types/a2a_events.pyi +3 -0
- aip_agents/utils/__init__.py +79 -0
- aip_agents/utils/__init__.pyi +11 -0
- aip_agents/utils/a2a_connector.py +1757 -0
- aip_agents/utils/a2a_connector.pyi +146 -0
- aip_agents/utils/artifact_helpers.py +502 -0
- aip_agents/utils/artifact_helpers.pyi +203 -0
- aip_agents/utils/constants.py +22 -0
- aip_agents/utils/constants.pyi +10 -0
- aip_agents/utils/datetime/__init__.py +34 -0
- aip_agents/utils/datetime/__init__.pyi +4 -0
- aip_agents/utils/datetime/normalization.py +231 -0
- aip_agents/utils/datetime/normalization.pyi +95 -0
- aip_agents/utils/datetime/timezone.py +206 -0
- aip_agents/utils/datetime/timezone.pyi +48 -0
- aip_agents/utils/env_loader.py +27 -0
- aip_agents/utils/env_loader.pyi +10 -0
- aip_agents/utils/event_handler_registry.py +58 -0
- aip_agents/utils/event_handler_registry.pyi +23 -0
- aip_agents/utils/file_prompt_utils.py +176 -0
- aip_agents/utils/file_prompt_utils.pyi +21 -0
- aip_agents/utils/final_response_builder.py +211 -0
- aip_agents/utils/final_response_builder.pyi +34 -0
- aip_agents/utils/formatter_llm_client.py +231 -0
- aip_agents/utils/formatter_llm_client.pyi +71 -0
- aip_agents/utils/langgraph/__init__.py +19 -0
- aip_agents/utils/langgraph/__init__.pyi +3 -0
- aip_agents/utils/langgraph/converter.py +128 -0
- aip_agents/utils/langgraph/converter.pyi +49 -0
- aip_agents/utils/langgraph/tool_managers/__init__.py +15 -0
- aip_agents/utils/langgraph/tool_managers/__init__.pyi +5 -0
- aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.py +99 -0
- aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.pyi +35 -0
- aip_agents/utils/langgraph/tool_managers/base_tool_manager.py +66 -0
- aip_agents/utils/langgraph/tool_managers/base_tool_manager.pyi +48 -0
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +1071 -0
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.pyi +56 -0
- aip_agents/utils/langgraph/tool_output_management.py +967 -0
- aip_agents/utils/langgraph/tool_output_management.pyi +292 -0
- aip_agents/utils/logger.py +195 -0
- aip_agents/utils/logger.pyi +60 -0
- aip_agents/utils/metadata/__init__.py +27 -0
- aip_agents/utils/metadata/__init__.pyi +5 -0
- aip_agents/utils/metadata/activity_metadata_helper.py +407 -0
- aip_agents/utils/metadata/activity_metadata_helper.pyi +25 -0
- aip_agents/utils/metadata/activity_narrative/__init__.py +35 -0
- aip_agents/utils/metadata/activity_narrative/__init__.pyi +7 -0
- aip_agents/utils/metadata/activity_narrative/builder.py +817 -0
- aip_agents/utils/metadata/activity_narrative/builder.pyi +35 -0
- aip_agents/utils/metadata/activity_narrative/constants.py +51 -0
- aip_agents/utils/metadata/activity_narrative/constants.pyi +10 -0
- aip_agents/utils/metadata/activity_narrative/context.py +49 -0
- aip_agents/utils/metadata/activity_narrative/context.pyi +32 -0
- aip_agents/utils/metadata/activity_narrative/formatters.py +230 -0
- aip_agents/utils/metadata/activity_narrative/formatters.pyi +48 -0
- aip_agents/utils/metadata/activity_narrative/utils.py +35 -0
- aip_agents/utils/metadata/activity_narrative/utils.pyi +12 -0
- aip_agents/utils/metadata/schemas/__init__.py +16 -0
- aip_agents/utils/metadata/schemas/__init__.pyi +4 -0
- aip_agents/utils/metadata/schemas/activity_schema.py +29 -0
- aip_agents/utils/metadata/schemas/activity_schema.pyi +18 -0
- aip_agents/utils/metadata/schemas/thinking_schema.py +31 -0
- aip_agents/utils/metadata/schemas/thinking_schema.pyi +20 -0
- aip_agents/utils/metadata/thinking_metadata_helper.py +38 -0
- aip_agents/utils/metadata/thinking_metadata_helper.pyi +4 -0
- aip_agents/utils/metadata_helper.py +358 -0
- aip_agents/utils/metadata_helper.pyi +117 -0
- aip_agents/utils/name_preprocessor/__init__.py +17 -0
- aip_agents/utils/name_preprocessor/__init__.pyi +6 -0
- aip_agents/utils/name_preprocessor/base_name_preprocessor.py +73 -0
- aip_agents/utils/name_preprocessor/base_name_preprocessor.pyi +52 -0
- aip_agents/utils/name_preprocessor/google_name_preprocessor.py +100 -0
- aip_agents/utils/name_preprocessor/google_name_preprocessor.pyi +38 -0
- aip_agents/utils/name_preprocessor/name_preprocessor.py +87 -0
- aip_agents/utils/name_preprocessor/name_preprocessor.pyi +41 -0
- aip_agents/utils/name_preprocessor/openai_name_preprocessor.py +48 -0
- aip_agents/utils/name_preprocessor/openai_name_preprocessor.pyi +34 -0
- aip_agents/utils/pii/__init__.py +25 -0
- aip_agents/utils/pii/__init__.pyi +5 -0
- aip_agents/utils/pii/pii_handler.py +397 -0
- aip_agents/utils/pii/pii_handler.pyi +96 -0
- aip_agents/utils/pii/pii_helper.py +207 -0
- aip_agents/utils/pii/pii_helper.pyi +78 -0
- aip_agents/utils/pii/uuid_deanonymizer_mapping.py +195 -0
- aip_agents/utils/pii/uuid_deanonymizer_mapping.pyi +73 -0
- aip_agents/utils/reference_helper.py +273 -0
- aip_agents/utils/reference_helper.pyi +81 -0
- aip_agents/utils/sse_chunk_transformer.py +831 -0
- aip_agents/utils/sse_chunk_transformer.pyi +166 -0
- aip_agents/utils/step_limit_manager.py +265 -0
- aip_agents/utils/step_limit_manager.pyi +112 -0
- aip_agents/utils/token_usage_helper.py +156 -0
- aip_agents/utils/token_usage_helper.pyi +60 -0
- aip_agents_binary-0.5.20.dist-info/METADATA +681 -0
- aip_agents_binary-0.5.20.dist-info/RECORD +546 -0
- aip_agents_binary-0.5.20.dist-info/WHEEL +5 -0
- aip_agents_binary-0.5.20.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2514 @@
|
|
|
1
|
+
"""LangGraph ReAct Agent implementation.
|
|
2
|
+
|
|
3
|
+
A ReAct agent template built on LangGraph that can use either lm_invoker or LangChain BaseChatModel.
|
|
4
|
+
|
|
5
|
+
Authors:
|
|
6
|
+
Christian Trisno Sen Long Chen (christian.t.s.l.chen@gdplabs.id)
|
|
7
|
+
Fachriza Adhiatma (fachriza.d.adhiatma@gdplabs.id)
|
|
8
|
+
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import asyncio
|
|
12
|
+
import time
|
|
13
|
+
import uuid
|
|
14
|
+
from collections.abc import Awaitable, Callable, Sequence
|
|
15
|
+
from dataclasses import asdict, dataclass
|
|
16
|
+
from functools import reduce
|
|
17
|
+
from textwrap import dedent
|
|
18
|
+
from typing import Annotated, Any
|
|
19
|
+
|
|
20
|
+
from deprecated import deprecated
|
|
21
|
+
from gllm_core.event import EventEmitter
|
|
22
|
+
from gllm_core.schema import Chunk
|
|
23
|
+
from langchain_core.language_models import BaseChatModel
|
|
24
|
+
from langchain_core.messages import (
|
|
25
|
+
AIMessage,
|
|
26
|
+
BaseMessage,
|
|
27
|
+
HumanMessage,
|
|
28
|
+
SystemMessage,
|
|
29
|
+
ToolMessage,
|
|
30
|
+
)
|
|
31
|
+
from langchain_core.messages.ai import UsageMetadata
|
|
32
|
+
from langchain_core.tools import BaseTool
|
|
33
|
+
from langgraph.config import get_stream_writer
|
|
34
|
+
from langgraph.graph import END, StateGraph
|
|
35
|
+
from langgraph.graph.message import add_messages
|
|
36
|
+
from langgraph.graph.state import CompiledStateGraph
|
|
37
|
+
from langgraph.managed import IsLastStep, RemainingSteps
|
|
38
|
+
from langgraph.types import Command, StreamWriter
|
|
39
|
+
from typing_extensions import TypedDict
|
|
40
|
+
|
|
41
|
+
from aip_agents.agent.base_langgraph_agent import _THREAD_ID_CVAR, BaseLangGraphAgent
|
|
42
|
+
from aip_agents.agent.hitl.langgraph_hitl_mixin import LangGraphHitLMixin
|
|
43
|
+
from aip_agents.agent.hitl.manager import TOOL_EXECUTION_BLOCKING_DECISIONS
|
|
44
|
+
from aip_agents.middleware.base import AgentMiddleware, ModelRequest
|
|
45
|
+
from aip_agents.middleware.manager import MiddlewareManager
|
|
46
|
+
from aip_agents.middleware.todolist import TodoList, TodoListMiddleware
|
|
47
|
+
from aip_agents.schema.a2a import A2AStreamEventType
|
|
48
|
+
from aip_agents.schema.hitl import ApprovalDecision, HitlMetadata
|
|
49
|
+
from aip_agents.schema.langgraph import ToolCallResult, ToolStorageParams
|
|
50
|
+
from aip_agents.schema.step_limit import MaxStepsExceededError, StepLimitConfig
|
|
51
|
+
from aip_agents.tools.memory_search_tool import MEMORY_SEARCH_TOOL_NAME
|
|
52
|
+
from aip_agents.tools.tool_config_injector import TOOL_CONFIGS_KEY
|
|
53
|
+
from aip_agents.utils import add_references_chunks
|
|
54
|
+
from aip_agents.utils.langgraph import (
|
|
55
|
+
convert_langchain_messages_to_gllm_messages,
|
|
56
|
+
convert_lm_output_to_langchain_message,
|
|
57
|
+
)
|
|
58
|
+
from aip_agents.utils.langgraph.tool_output_management import (
|
|
59
|
+
StoreOutputParams,
|
|
60
|
+
ToolOutputManager,
|
|
61
|
+
ToolReferenceError,
|
|
62
|
+
ToolReferenceResolver,
|
|
63
|
+
)
|
|
64
|
+
from aip_agents.utils.logger import get_logger
|
|
65
|
+
from aip_agents.utils.metadata.activity_metadata_helper import create_tool_activity_info
|
|
66
|
+
from aip_agents.utils.metadata_helper import Kind, MetadataFieldKeys, Status
|
|
67
|
+
from aip_agents.utils.pii import ToolPIIHandler, add_pii_mappings, normalize_enable_pii
|
|
68
|
+
from aip_agents.utils.reference_helper import extract_references_from_tool
|
|
69
|
+
from aip_agents.utils.step_limit_manager import (
|
|
70
|
+
_DELEGATION_CHAIN_CVAR,
|
|
71
|
+
_DELEGATION_DEPTH_CVAR,
|
|
72
|
+
_REMAINING_STEP_BUDGET_CVAR,
|
|
73
|
+
_STEP_LIMIT_CONFIG_CVAR,
|
|
74
|
+
StepLimitManager,
|
|
75
|
+
)
|
|
76
|
+
from aip_agents.utils.token_usage_helper import (
|
|
77
|
+
TOTAL_USAGE_KEY,
|
|
78
|
+
USAGE_METADATA_KEY,
|
|
79
|
+
add_usage_metadata,
|
|
80
|
+
extract_and_update_token_usage_from_ai_message,
|
|
81
|
+
extract_token_usage_from_tool_output,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
logger = get_logger(__name__)
|
|
85
|
+
|
|
86
|
+
# Default instruction for ReAct agents
|
|
87
|
+
DEFAULT_INSTRUCTION = "You are a helpful assistant. Use the available tools to help answer questions."
|
|
88
|
+
|
|
89
|
+
# Tool method constants
|
|
90
|
+
TOOL_RUN_STREAMING_METHOD = "arun_streaming"
|
|
91
|
+
|
|
92
|
+
# Key Attributes
|
|
93
|
+
TOOL_OUTPUT_MANAGER_KEY = "tool_output_manager"
|
|
94
|
+
CALL_ID_KEY = "call_id"
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
@dataclass
|
|
98
|
+
class ToolCallContext:
|
|
99
|
+
"""Context information for executing a single tool call."""
|
|
100
|
+
|
|
101
|
+
config: dict[str, Any] | None
|
|
102
|
+
state: dict[str, Any]
|
|
103
|
+
pending_artifacts: list[dict[str, Any]]
|
|
104
|
+
hitl_decision: ApprovalDecision | None = None
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class ReactAgentState(TypedDict):
|
|
108
|
+
"""State schema for the ReAct agent.
|
|
109
|
+
|
|
110
|
+
Includes messages, step tracking, optional event emission support, artifacts, references,
|
|
111
|
+
metadata, tool output management, and deep agents middleware state (todos, filesystem).
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
messages: Annotated[Sequence[BaseMessage], add_messages]
|
|
115
|
+
is_last_step: IsLastStep
|
|
116
|
+
remaining_steps: RemainingSteps
|
|
117
|
+
event_emitter: EventEmitter | None
|
|
118
|
+
artifacts: list[dict[str, Any]] | None
|
|
119
|
+
references: Annotated[list[Chunk], add_references_chunks]
|
|
120
|
+
metadata: dict[str, Any] | None
|
|
121
|
+
tool_output_manager: ToolOutputManager | None
|
|
122
|
+
total_usage: Annotated[UsageMetadata | None, add_usage_metadata]
|
|
123
|
+
pii_mapping: Annotated[dict[str, str] | None, add_pii_mappings]
|
|
124
|
+
thread_id: str
|
|
125
|
+
|
|
126
|
+
# Deep Agents Middleware State
|
|
127
|
+
todos: TodoList | None # Planning middleware - task decomposition state
|
|
128
|
+
|
|
129
|
+
# Step Limit State (Configurable Maximum Steps Feature)
|
|
130
|
+
current_step: int # Current step number (incremented after each LLM call or tool execution)
|
|
131
|
+
delegation_depth: int # Current depth in delegation chain (0 for root)
|
|
132
|
+
delegation_chain: list[str] # Agent names in delegation chain
|
|
133
|
+
step_limit_config: StepLimitConfig | None # Step and delegation limit configuration
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
|
|
137
|
+
"""A ReAct agent template built on LangGraph.
|
|
138
|
+
|
|
139
|
+
This agent can use either:
|
|
140
|
+
- An LMInvoker (if self.lm_invoker is set by BaseAgent)
|
|
141
|
+
- A LangChain BaseChatModel (if self.model is set by BaseAgent)
|
|
142
|
+
|
|
143
|
+
The graph structure follows the standard ReAct pattern:
|
|
144
|
+
agent -> tools -> agent (loop) -> END
|
|
145
|
+
"""
|
|
146
|
+
|
|
147
|
+
def __init__( # noqa: PLR0913
|
|
148
|
+
self,
|
|
149
|
+
name: str,
|
|
150
|
+
instruction: str = DEFAULT_INSTRUCTION,
|
|
151
|
+
model: BaseChatModel | str | Any | None = None,
|
|
152
|
+
tools: Sequence[BaseTool] | None = None,
|
|
153
|
+
agents: Sequence[Any] | None = None,
|
|
154
|
+
description: str | None = None,
|
|
155
|
+
thread_id_key: str = "thread_id",
|
|
156
|
+
event_emitter: EventEmitter | None = None,
|
|
157
|
+
tool_output_manager: ToolOutputManager | None = None,
|
|
158
|
+
planning: bool = False,
|
|
159
|
+
middlewares: Sequence[AgentMiddleware] | None = None,
|
|
160
|
+
step_limit_config: StepLimitConfig | None = None,
|
|
161
|
+
**kwargs: Any,
|
|
162
|
+
):
|
|
163
|
+
"""Initialize the LangGraph ReAct Agent.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
name: The name of the agent.
|
|
167
|
+
instruction: The system instruction for the agent.
|
|
168
|
+
model: The model to use (lm_invoker, LangChain model, string, etc.).
|
|
169
|
+
tools: Sequence of LangChain tools available to the agent.
|
|
170
|
+
agents: Optional sequence of sub-agents for delegation (coordinator mode).
|
|
171
|
+
description: Human-readable description of the agent.
|
|
172
|
+
thread_id_key: Key for thread ID in configuration.
|
|
173
|
+
event_emitter: Optional event emitter for streaming updates.
|
|
174
|
+
tool_output_manager: Optional ToolOutputManager instance for tool output management.
|
|
175
|
+
When provided, enables tool output storage, reference resolution, and sharing capabilities.
|
|
176
|
+
This enables multi-agent workflows where agents can access each other's tool outputs.
|
|
177
|
+
If None, tool output management is disabled for this agent.
|
|
178
|
+
planning: Enable planning capabilities with TodoListMiddleware. Defaults to False.
|
|
179
|
+
middlewares: Optional sequence of custom middleware to COMPOSE (not override) with built-in middleware.
|
|
180
|
+
Execution order: [TodoListMiddleware (if planning=True),
|
|
181
|
+
...custom middlewares in order provided]
|
|
182
|
+
All middleware hooks execute - this extends capabilities, never replaces them.
|
|
183
|
+
enable_pii: Optional toggle to enable PII handling for tool inputs and outputs.
|
|
184
|
+
step_limit_config: Optional configuration for step limits and delegation depth.
|
|
185
|
+
**kwargs: Additional keyword arguments passed to BaseLangGraphAgent.
|
|
186
|
+
"""
|
|
187
|
+
# Use LangGraph's standard AgentState for ReAct
|
|
188
|
+
state_schema = kwargs.pop("state_schema", ReactAgentState)
|
|
189
|
+
enable_pii = kwargs.pop("enable_pii", None)
|
|
190
|
+
enable_pii = normalize_enable_pii(enable_pii)
|
|
191
|
+
|
|
192
|
+
super().__init__(
|
|
193
|
+
name=name,
|
|
194
|
+
instruction=instruction,
|
|
195
|
+
description=description,
|
|
196
|
+
model=model,
|
|
197
|
+
tools=tools,
|
|
198
|
+
state_schema=state_schema,
|
|
199
|
+
thread_id_key=thread_id_key,
|
|
200
|
+
event_emitter=event_emitter,
|
|
201
|
+
**kwargs,
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
# Handle tool output management
|
|
205
|
+
self.tool_output_manager = tool_output_manager
|
|
206
|
+
self._pii_handlers_by_thread: dict[str, ToolPIIHandler] = {}
|
|
207
|
+
self._enable_pii = enable_pii
|
|
208
|
+
|
|
209
|
+
# Initialize middleware tools list (populated by _setup_middleware)
|
|
210
|
+
self._middleware_tools: list[BaseTool] = []
|
|
211
|
+
|
|
212
|
+
# Setup middleware
|
|
213
|
+
self._middleware_manager = self._setup_middleware(
|
|
214
|
+
planning=planning,
|
|
215
|
+
custom_middlewares=middlewares,
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
# Handle delegation agents (coordinator mode) - following legacy pattern
|
|
219
|
+
if agents:
|
|
220
|
+
self.register_delegation_agents(list(agents))
|
|
221
|
+
|
|
222
|
+
self.step_limit_config = step_limit_config
|
|
223
|
+
|
|
224
|
+
def _setup_middleware(
|
|
225
|
+
self,
|
|
226
|
+
planning: bool,
|
|
227
|
+
custom_middlewares: Sequence[AgentMiddleware] | None,
|
|
228
|
+
) -> MiddlewareManager | None:
|
|
229
|
+
"""Setup middleware based on configuration.
|
|
230
|
+
|
|
231
|
+
Creates auto-configured middleware (planning) and composes
|
|
232
|
+
with custom middleware if provided.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
planning: Whether to enable TodoListMiddleware.
|
|
236
|
+
custom_middlewares: Optional custom middlewares to append.
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
MiddlewareManager if any middleware configured, None otherwise.
|
|
240
|
+
"""
|
|
241
|
+
middleware_list: list[AgentMiddleware] = []
|
|
242
|
+
|
|
243
|
+
# Auto-configure TodoListMiddleware if planning enabled
|
|
244
|
+
if planning:
|
|
245
|
+
middleware_list.append(TodoListMiddleware())
|
|
246
|
+
|
|
247
|
+
# Append custom middlewares
|
|
248
|
+
if custom_middlewares:
|
|
249
|
+
middleware_list.extend(custom_middlewares)
|
|
250
|
+
|
|
251
|
+
# Return manager if any middleware configured
|
|
252
|
+
if middleware_list:
|
|
253
|
+
manager = MiddlewareManager(middleware_list)
|
|
254
|
+
# Store middleware tools separately for proper rebuild support
|
|
255
|
+
middleware_tools = manager.get_all_tools()
|
|
256
|
+
if middleware_tools:
|
|
257
|
+
self._middleware_tools = list(middleware_tools)
|
|
258
|
+
# Add to resolved_tools for immediate use
|
|
259
|
+
self.resolved_tools = list(self.resolved_tools) + self._middleware_tools
|
|
260
|
+
# Enhance instruction with middleware prompt additions
|
|
261
|
+
self.instruction = manager.build_system_prompt(self.instruction)
|
|
262
|
+
return manager
|
|
263
|
+
|
|
264
|
+
return None
|
|
265
|
+
|
|
266
|
+
async def _get_effective_writer(self, writer: StreamWriter | None = None) -> StreamWriter | None:
|
|
267
|
+
"""Get the effective stream writer, falling back to ContextVar if needed.
|
|
268
|
+
|
|
269
|
+
Args:
|
|
270
|
+
writer: Optional stream writer to use.
|
|
271
|
+
|
|
272
|
+
Returns:
|
|
273
|
+
The effective stream writer or None if retrieval fails.
|
|
274
|
+
"""
|
|
275
|
+
try:
|
|
276
|
+
return writer or get_stream_writer()
|
|
277
|
+
except Exception:
|
|
278
|
+
return None
|
|
279
|
+
|
|
280
|
+
def _get_step_limit_manager(
|
|
281
|
+
self,
|
|
282
|
+
state: dict[str, Any],
|
|
283
|
+
node_type: str,
|
|
284
|
+
writer: StreamWriter | None = None,
|
|
285
|
+
count: int = 1,
|
|
286
|
+
manager: StepLimitManager | None = None,
|
|
287
|
+
) -> tuple[dict[str, Any] | None, StepLimitManager | None]:
|
|
288
|
+
"""Return initialized StepLimitManager or early state update.
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
state: Current LangGraph state dictionary.
|
|
292
|
+
node_type: `"agent"` or `"tool"`; determines the fallback message format when limits are exceeded.
|
|
293
|
+
writer: Optional LangGraph `StreamWriter` used when limit events need to be emitted in the absence of an event emitter.
|
|
294
|
+
count: Number of steps to check.
|
|
295
|
+
manager: Optional existing manager to reuse.
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
Tuple where the first element is a state update dict when execution should stop, and the second element is the active `StepLimitManager` when limits allow the node to proceed.
|
|
299
|
+
"""
|
|
300
|
+
limit_error_update, manager = self._check_step_limits_helper(
|
|
301
|
+
state, node_type, writer=writer, count=count, manager=manager
|
|
302
|
+
)
|
|
303
|
+
if limit_error_update:
|
|
304
|
+
return limit_error_update, None
|
|
305
|
+
if manager is None:
|
|
306
|
+
return {}, None
|
|
307
|
+
manager.set_context()
|
|
308
|
+
return None, manager
|
|
309
|
+
|
|
310
|
+
def _emit_step_limit_event(
|
|
311
|
+
self,
|
|
312
|
+
event_type: A2AStreamEventType,
|
|
313
|
+
metadata: dict[str, Any],
|
|
314
|
+
writer: StreamWriter | None = None,
|
|
315
|
+
) -> None:
|
|
316
|
+
"""Emit a step limit event via LangGraph stream writer or EventEmitter.
|
|
317
|
+
|
|
318
|
+
Args:
|
|
319
|
+
event_type: The type of event to emit.
|
|
320
|
+
metadata: Metadata to include in the event.
|
|
321
|
+
writer: Optional LangGraph `StreamWriter` used when limit events need to be emitted in the absence of an event emitter.
|
|
322
|
+
"""
|
|
323
|
+
enriched_metadata = dict(metadata)
|
|
324
|
+
enriched_metadata.setdefault("status", "error")
|
|
325
|
+
enriched_metadata.setdefault("kind", "agent_default")
|
|
326
|
+
|
|
327
|
+
event_payload = self._create_a2a_event(
|
|
328
|
+
event_type=event_type,
|
|
329
|
+
content=enriched_metadata.get("message", ""),
|
|
330
|
+
metadata=enriched_metadata,
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
try:
|
|
334
|
+
effective_writer = writer or get_stream_writer()
|
|
335
|
+
except Exception:
|
|
336
|
+
effective_writer = None
|
|
337
|
+
|
|
338
|
+
if effective_writer:
|
|
339
|
+
effective_writer(event_payload)
|
|
340
|
+
return
|
|
341
|
+
|
|
342
|
+
if self.event_emitter:
|
|
343
|
+
self.event_emitter.emit(event_payload["event_type"], event_payload["metadata"])
|
|
344
|
+
|
|
345
|
+
def _check_step_limits_helper(
|
|
346
|
+
self,
|
|
347
|
+
state: dict[str, Any],
|
|
348
|
+
node_type: str,
|
|
349
|
+
writer: StreamWriter | None = None,
|
|
350
|
+
count: int = 1,
|
|
351
|
+
manager: StepLimitManager | None = None,
|
|
352
|
+
) -> tuple[dict[str, Any] | None, StepLimitManager | None]:
|
|
353
|
+
"""Check step limits and return state update if limit exceeded.
|
|
354
|
+
|
|
355
|
+
Centralized logic to avoid duplication between agent_node and tool_node.
|
|
356
|
+
|
|
357
|
+
Args:
|
|
358
|
+
state: Current agent state.
|
|
359
|
+
node_type: Either 'agent' or 'tool' to determine return message types.
|
|
360
|
+
writer: Optional stream writer for emitting custom events if event_emitter is missing.
|
|
361
|
+
count: Number of steps to check.
|
|
362
|
+
manager: Optional existing manager to reuse.
|
|
363
|
+
|
|
364
|
+
Returns:
|
|
365
|
+
Tuple of (state update dict if limit exceeded else None, active StepLimitManager instance).
|
|
366
|
+
"""
|
|
367
|
+
try:
|
|
368
|
+
if manager is None:
|
|
369
|
+
manager = StepLimitManager.from_state(state)
|
|
370
|
+
manager.check_step_limit(agent_name=self.name, count=count)
|
|
371
|
+
|
|
372
|
+
return None, manager
|
|
373
|
+
|
|
374
|
+
except MaxStepsExceededError as e:
|
|
375
|
+
logger.warning(f"Agent '{self.name}': {e.error_response.message}")
|
|
376
|
+
metadata = {
|
|
377
|
+
"message": e.error_response.message,
|
|
378
|
+
"agent_name": e.error_response.agent_name,
|
|
379
|
+
"current_value": e.error_response.current_value,
|
|
380
|
+
"configured_limit": e.error_response.configured_limit,
|
|
381
|
+
}
|
|
382
|
+
self._emit_step_limit_event(
|
|
383
|
+
A2AStreamEventType.STEP_LIMIT_EXCEEDED,
|
|
384
|
+
metadata,
|
|
385
|
+
writer,
|
|
386
|
+
)
|
|
387
|
+
if node_type == "tool":
|
|
388
|
+
return (
|
|
389
|
+
{
|
|
390
|
+
"messages": [ToolMessage(content=f"⚠️ {e.error_response.message}", tool_call_id="step_limit")],
|
|
391
|
+
},
|
|
392
|
+
None,
|
|
393
|
+
)
|
|
394
|
+
return (
|
|
395
|
+
{
|
|
396
|
+
"messages": [AIMessage(content=f"⚠️ {e.error_response.message}")],
|
|
397
|
+
},
|
|
398
|
+
None,
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
def _rebuild_resolved_tools(self) -> None:
|
|
402
|
+
"""Rebuild resolved tools including middleware tools.
|
|
403
|
+
|
|
404
|
+
Overrides base class to ensure middleware tools are preserved
|
|
405
|
+
when tools are rebuilt (e.g., after update_regular_tools).
|
|
406
|
+
"""
|
|
407
|
+
# Call base class to rebuild with regular, a2a, delegation, and mcp tools
|
|
408
|
+
super()._rebuild_resolved_tools()
|
|
409
|
+
|
|
410
|
+
# Add middleware tools if present
|
|
411
|
+
if hasattr(self, "_middleware_tools") and self._middleware_tools:
|
|
412
|
+
self.resolved_tools.extend(self._middleware_tools)
|
|
413
|
+
|
|
414
|
+
def _handle_tool_artifacts(
|
|
415
|
+
self, tool_output: Any, pending_artifacts: list[dict[str, Any]]
|
|
416
|
+
) -> tuple[str, list[dict[str, Any]]]:
|
|
417
|
+
"""Handle artifact extraction from tool output.
|
|
418
|
+
|
|
419
|
+
Args:
|
|
420
|
+
tool_output: The output from the tool execution.
|
|
421
|
+
pending_artifacts: Current list of pending artifacts.
|
|
422
|
+
|
|
423
|
+
Returns:
|
|
424
|
+
Tuple of (agent_result_text, updated_pending_artifacts).
|
|
425
|
+
"""
|
|
426
|
+
if isinstance(tool_output, dict) and "artifacts" in tool_output:
|
|
427
|
+
artifacts = tool_output["artifacts"]
|
|
428
|
+
if isinstance(artifacts, list):
|
|
429
|
+
pending_artifacts.extend(artifacts)
|
|
430
|
+
return tool_output.get("result", ""), pending_artifacts
|
|
431
|
+
else:
|
|
432
|
+
return str(tool_output), pending_artifacts
|
|
433
|
+
|
|
434
|
+
# ruff: noqa: PLR0915
|
|
435
|
+
def define_graph(self, graph_builder: StateGraph) -> CompiledStateGraph:
|
|
436
|
+
"""Define the ReAct agent graph structure.
|
|
437
|
+
|
|
438
|
+
Args:
|
|
439
|
+
graph_builder: The StateGraph builder to define the graph structure.
|
|
440
|
+
|
|
441
|
+
Returns:
|
|
442
|
+
Compiled LangGraph ready for execution.
|
|
443
|
+
"""
|
|
444
|
+
# Create node functions using helper methods
|
|
445
|
+
agent_node = self._create_agent_node()
|
|
446
|
+
tool_node_logic = self._create_tool_node_logic()
|
|
447
|
+
should_continue = self._create_should_continue_logic(END)
|
|
448
|
+
|
|
449
|
+
# Add memory node if memory is enabled
|
|
450
|
+
if self._memory_enabled():
|
|
451
|
+
memory_enhancer_agent = self._create_memory_enhancer_agent()
|
|
452
|
+
graph_builder.add_node("memory_enhancer", self._create_memory_node(memory_enhancer_agent))
|
|
453
|
+
graph_builder.set_entry_point("memory_enhancer")
|
|
454
|
+
graph_builder.add_edge("memory_enhancer", "agent")
|
|
455
|
+
else:
|
|
456
|
+
graph_builder.set_entry_point("agent")
|
|
457
|
+
|
|
458
|
+
graph_builder.add_node("agent", agent_node)
|
|
459
|
+
|
|
460
|
+
if self.resolved_tools:
|
|
461
|
+
graph_builder.add_node("tools", tool_node_logic)
|
|
462
|
+
graph_builder.add_conditional_edges("agent", should_continue, {"tools": "tools", END: END})
|
|
463
|
+
graph_builder.add_edge("tools", "agent")
|
|
464
|
+
else:
|
|
465
|
+
graph_builder.add_edge("agent", END)
|
|
466
|
+
|
|
467
|
+
return graph_builder.compile(
|
|
468
|
+
checkpointer=self.checkpointer,
|
|
469
|
+
)
|
|
470
|
+
|
|
471
|
+
def _create_memory_enhancer_agent(self) -> Any:
|
|
472
|
+
"""Create dedicated LangGraphMemoryEnhancerAgent instance for memory enhancement.
|
|
473
|
+
|
|
474
|
+
Returns:
|
|
475
|
+
LangGraphMemoryEnhancerAgent: Configured mini-agent for automatic memory retrieval.
|
|
476
|
+
"""
|
|
477
|
+
# Lazy import to avoid circular dependency: LangGraphReactAgent imports
|
|
478
|
+
# LangGraphMemoryEnhancerAgent which inherits from LangGraphReactAgent.
|
|
479
|
+
from aip_agents.agent.langgraph_memory_enhancer_agent import ( # noqa: PLC0415
|
|
480
|
+
LangGraphMemoryEnhancerAgent,
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
model_id = getattr(self.lm_invoker, "model_id", None)
|
|
484
|
+
model = self.model or model_id
|
|
485
|
+
return LangGraphMemoryEnhancerAgent(
|
|
486
|
+
memory=self.memory,
|
|
487
|
+
model=model,
|
|
488
|
+
memory_agent_id=self.memory_agent_id,
|
|
489
|
+
memory_retrieval_limit=self.memory_retrieval_limit,
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
def _create_memory_node(self, memory_enhancer_agent: Any) -> Any:
|
|
493
|
+
"""Create memory enhancement node that delegates to LangGraphMemoryEnhancerAgent.
|
|
494
|
+
|
|
495
|
+
Args:
|
|
496
|
+
memory_enhancer_agent: The LangGraphMemoryEnhancerAgent instance to use for enhancement.
|
|
497
|
+
|
|
498
|
+
Returns:
|
|
499
|
+
Callable: Async function that enhances user query with memory context.
|
|
500
|
+
"""
|
|
501
|
+
|
|
502
|
+
async def memory_node(state: dict[str, Any], config: dict[str, Any] | None = None) -> dict[str, Any]:
|
|
503
|
+
"""Enhance user query with memory context via LangGraphMemoryEnhancerAgent.
|
|
504
|
+
|
|
505
|
+
Args:
|
|
506
|
+
state: LangGraph state containing conversation messages.
|
|
507
|
+
config: Optional LangGraph configuration.
|
|
508
|
+
|
|
509
|
+
Returns:
|
|
510
|
+
State update with potentially enhanced last message.
|
|
511
|
+
"""
|
|
512
|
+
user_query = self._extract_user_query_from_messages(state.get("messages", []))
|
|
513
|
+
if not user_query:
|
|
514
|
+
return {}
|
|
515
|
+
|
|
516
|
+
try:
|
|
517
|
+
metadata = state.get("metadata", {})
|
|
518
|
+
enhanced_result = await memory_enhancer_agent.arun(query=user_query, metadata=metadata)
|
|
519
|
+
enhanced_query = enhanced_result.get("output", user_query)
|
|
520
|
+
|
|
521
|
+
if enhanced_query == user_query:
|
|
522
|
+
logger.debug(f"Agent '{self.name}': No memory enhancement needed")
|
|
523
|
+
return {}
|
|
524
|
+
|
|
525
|
+
logger.info(f"Agent '{self.name}': Memory enhancement completed")
|
|
526
|
+
enhanced_message = HumanMessage(content=enhanced_query)
|
|
527
|
+
# Append enhanced message (with add_messages reducer, this creates: original + enhanced)
|
|
528
|
+
return {"messages": [enhanced_message]}
|
|
529
|
+
|
|
530
|
+
except Exception as e:
|
|
531
|
+
logger.warning(f"Agent '{self.name}': Memory enhancement failed: {e}")
|
|
532
|
+
return {}
|
|
533
|
+
|
|
534
|
+
return memory_node
|
|
535
|
+
|
|
536
|
+
def _extract_user_query_from_messages(self, messages: list[Any]) -> str | None:
|
|
537
|
+
"""Get latest user query string from a list of messages.
|
|
538
|
+
|
|
539
|
+
Args:
|
|
540
|
+
messages: List of LangChain messages to search through.
|
|
541
|
+
|
|
542
|
+
Returns:
|
|
543
|
+
The content string from the most recent HumanMessage if valid, None otherwise.
|
|
544
|
+
"""
|
|
545
|
+
if not messages:
|
|
546
|
+
return None
|
|
547
|
+
for i in range(len(messages) - 1, -1, -1):
|
|
548
|
+
msg = messages[i]
|
|
549
|
+
if isinstance(msg, HumanMessage) and hasattr(msg, "content"):
|
|
550
|
+
content = msg.content
|
|
551
|
+
if isinstance(content, str) and content.strip():
|
|
552
|
+
return content
|
|
553
|
+
return None
|
|
554
|
+
return None
|
|
555
|
+
|
|
556
|
+
def _create_agent_node(self) -> Callable[..., Awaitable[dict[str, Any]]]:
|
|
557
|
+
"""Create the agent node function for the graph."""
|
|
558
|
+
|
|
559
|
+
async def agent_node(
|
|
560
|
+
state: dict[str, Any], config: dict[str, Any] | None = None, *, writer: StreamWriter = None
|
|
561
|
+
) -> dict[str, Any]:
|
|
562
|
+
"""Call the appropriate LLM and return new messages.
|
|
563
|
+
|
|
564
|
+
Args:
|
|
565
|
+
state: Current agent state containing messages and conversation context.
|
|
566
|
+
config: Optional configuration containing thread_id and execution parameters.
|
|
567
|
+
writer: Optional stream writer for emitting custom events.
|
|
568
|
+
|
|
569
|
+
Returns:
|
|
570
|
+
Updated state dictionary with new AI messages and token usage.
|
|
571
|
+
"""
|
|
572
|
+
writer = await self._get_effective_writer(writer)
|
|
573
|
+
limit_error_update, manager = self._get_step_limit_manager(state, "agent", writer=writer)
|
|
574
|
+
if limit_error_update:
|
|
575
|
+
return limit_error_update
|
|
576
|
+
if manager is None:
|
|
577
|
+
return {}
|
|
578
|
+
|
|
579
|
+
current_messages = state["messages"]
|
|
580
|
+
|
|
581
|
+
# Execute LLM call
|
|
582
|
+
if self.lm_invoker:
|
|
583
|
+
result = await self._handle_lm_invoker_call(current_messages, state, config)
|
|
584
|
+
elif isinstance(self.model, BaseChatModel):
|
|
585
|
+
result = await self._handle_langchain_model_call(current_messages, state, config)
|
|
586
|
+
else:
|
|
587
|
+
raise ValueError(
|
|
588
|
+
f"Agent '{self.name}': No valid LMInvoker or LangChain model configured for ReAct agent node."
|
|
589
|
+
)
|
|
590
|
+
|
|
591
|
+
# Increment step counter after successful execution
|
|
592
|
+
manager.increment_step()
|
|
593
|
+
# Update state with new step count
|
|
594
|
+
result.update(manager.to_state_update())
|
|
595
|
+
|
|
596
|
+
return result
|
|
597
|
+
|
|
598
|
+
return agent_node
|
|
599
|
+
|
|
600
|
+
def _extract_tool_calls_from_state(self, state: dict[str, Any]) -> tuple[AIMessage | None, int]:
|
|
601
|
+
"""Extract the last AI message and tool call count from state.
|
|
602
|
+
|
|
603
|
+
Args:
|
|
604
|
+
state: Current agent state.
|
|
605
|
+
|
|
606
|
+
Returns:
|
|
607
|
+
Tuple of (last AI message or None, count of tool calls).
|
|
608
|
+
"""
|
|
609
|
+
messages = state.get("messages", [])
|
|
610
|
+
last_message = messages[-1] if messages else None
|
|
611
|
+
if not self.resolved_tools or not isinstance(last_message, AIMessage) or not last_message.tool_calls:
|
|
612
|
+
return None, 0
|
|
613
|
+
return last_message, len(last_message.tool_calls)
|
|
614
|
+
|
|
615
|
+
def _check_tool_batch_limits(
|
|
616
|
+
self,
|
|
617
|
+
state: dict[str, Any],
|
|
618
|
+
tool_call_count: int,
|
|
619
|
+
manager: StepLimitManager,
|
|
620
|
+
writer: StreamWriter | None,
|
|
621
|
+
) -> tuple[dict[str, Any] | None, StepLimitManager | None]:
|
|
622
|
+
"""Check if tool batch exceeds limits.
|
|
623
|
+
|
|
624
|
+
Args:
|
|
625
|
+
state: Current LangGraph state dictionary.
|
|
626
|
+
tool_call_count: Number of tools in the current batch.
|
|
627
|
+
manager: Initialized StepLimitManager.
|
|
628
|
+
writer: Optional stream writer for events.
|
|
629
|
+
|
|
630
|
+
Returns:
|
|
631
|
+
Tuple of (limit update dict or None, manager instance).
|
|
632
|
+
"""
|
|
633
|
+
if tool_call_count <= 1:
|
|
634
|
+
return None, manager
|
|
635
|
+
return self._get_step_limit_manager(state, "tool", writer=writer, count=tool_call_count, manager=manager)
|
|
636
|
+
|
|
637
|
+
def _create_tool_node_logic(self) -> Callable[..., Awaitable[dict[str, Any]]]:
|
|
638
|
+
"""Create the tool node logic function for the graph."""
|
|
639
|
+
|
|
640
|
+
async def tool_node_logic(
|
|
641
|
+
state: dict[str, Any],
|
|
642
|
+
config: dict[str, Any] | None = None,
|
|
643
|
+
*,
|
|
644
|
+
writer: StreamWriter = None,
|
|
645
|
+
) -> dict[str, Any]:
|
|
646
|
+
"""Execute tools with artifact payload separation and reference collection.
|
|
647
|
+
|
|
648
|
+
Args:
|
|
649
|
+
state: Current agent state.
|
|
650
|
+
config: Optional execution configuration.
|
|
651
|
+
writer: Optional stream writer.
|
|
652
|
+
|
|
653
|
+
Returns:
|
|
654
|
+
Updated state dictionary with tool results.
|
|
655
|
+
"""
|
|
656
|
+
writer = await self._get_effective_writer(writer)
|
|
657
|
+
limit_error, manager = self._get_step_limit_manager(state, "tool", writer=writer)
|
|
658
|
+
if limit_error or manager is None:
|
|
659
|
+
return limit_error or {}
|
|
660
|
+
|
|
661
|
+
last_message, tool_call_count = self._extract_tool_calls_from_state(state)
|
|
662
|
+
if not last_message:
|
|
663
|
+
return {}
|
|
664
|
+
|
|
665
|
+
# Re-check step limits with the actual batch count (Spec-3)
|
|
666
|
+
limit_error, manager = self._check_tool_batch_limits(state, tool_call_count, manager, writer)
|
|
667
|
+
if limit_error or manager is None:
|
|
668
|
+
return limit_error or {}
|
|
669
|
+
|
|
670
|
+
result = await self._execute_tool_calls(last_message, state, config)
|
|
671
|
+
|
|
672
|
+
# Increment step after tool execution
|
|
673
|
+
manager.increment_step(count=tool_call_count)
|
|
674
|
+
result.update(manager.to_state_update())
|
|
675
|
+
|
|
676
|
+
return result
|
|
677
|
+
|
|
678
|
+
return tool_node_logic
|
|
679
|
+
|
|
680
|
+
async def _execute_tool_calls(
|
|
681
|
+
self, last_message: AIMessage, state: dict[str, Any], config: dict[str, Any] | None
|
|
682
|
+
) -> dict[str, Any]:
|
|
683
|
+
"""Execute tool calls and aggregate results.
|
|
684
|
+
|
|
685
|
+
Runs multiple tool calls concurrently for better parallelism.
|
|
686
|
+
|
|
687
|
+
Args:
|
|
688
|
+
last_message: The AI message containing tool calls to execute.
|
|
689
|
+
state: Current agent state containing messages, artifacts, and metadata.
|
|
690
|
+
config: Optional configuration containing thread_id and other execution context.
|
|
691
|
+
|
|
692
|
+
Returns:
|
|
693
|
+
Updated state dictionary with tool execution results including messages,
|
|
694
|
+
artifacts, references, and metadata updates.
|
|
695
|
+
"""
|
|
696
|
+
tool_messages: list[ToolMessage] = []
|
|
697
|
+
pending_artifacts: list[dict[str, Any]] = state.get("artifacts") or []
|
|
698
|
+
reference_updates: list[Chunk] = []
|
|
699
|
+
tool_map = {tool.name: tool for tool in self.resolved_tools}
|
|
700
|
+
pii_mapping = {}
|
|
701
|
+
|
|
702
|
+
aggregated_metadata_delta: dict[str, Any] = {}
|
|
703
|
+
total_tools_token_usage: list[UsageMetadata] = []
|
|
704
|
+
|
|
705
|
+
async def run_tool(tool_call: dict[str, Any]):
|
|
706
|
+
"""Run a single tool call asynchronously.
|
|
707
|
+
|
|
708
|
+
Args:
|
|
709
|
+
tool_call: Tool call dictionary.
|
|
710
|
+
|
|
711
|
+
Returns:
|
|
712
|
+
Tool result from execution.
|
|
713
|
+
"""
|
|
714
|
+
return await self._run_single_tool_call(
|
|
715
|
+
tool_map=tool_map,
|
|
716
|
+
tool_call=tool_call,
|
|
717
|
+
context=ToolCallContext(
|
|
718
|
+
config=config,
|
|
719
|
+
state=state,
|
|
720
|
+
pending_artifacts=pending_artifacts,
|
|
721
|
+
),
|
|
722
|
+
)
|
|
723
|
+
|
|
724
|
+
tasks = [asyncio.create_task(run_tool(tc)) for tc in last_message.tool_calls]
|
|
725
|
+
|
|
726
|
+
for coro in asyncio.as_completed(tasks):
|
|
727
|
+
tool_result = await coro
|
|
728
|
+
self._accumulate_tool_result(
|
|
729
|
+
tool_result,
|
|
730
|
+
tool_messages,
|
|
731
|
+
pending_artifacts,
|
|
732
|
+
aggregated_metadata_delta,
|
|
733
|
+
reference_updates,
|
|
734
|
+
total_tools_token_usage,
|
|
735
|
+
pii_mapping,
|
|
736
|
+
)
|
|
737
|
+
|
|
738
|
+
return self._build_tool_state_updates(
|
|
739
|
+
tool_messages,
|
|
740
|
+
pending_artifacts,
|
|
741
|
+
reference_updates,
|
|
742
|
+
aggregated_metadata_delta,
|
|
743
|
+
total_tools_token_usage,
|
|
744
|
+
pii_mapping,
|
|
745
|
+
)
|
|
746
|
+
|
|
747
|
+
def _accumulate_tool_result( # noqa: PLR0913
|
|
748
|
+
self,
|
|
749
|
+
tool_result: Any,
|
|
750
|
+
tool_messages: list[ToolMessage],
|
|
751
|
+
pending_artifacts: list[dict[str, Any]],
|
|
752
|
+
aggregated_metadata_delta: dict[str, Any],
|
|
753
|
+
reference_updates: list[Chunk],
|
|
754
|
+
total_tools_token_usage: list[UsageMetadata],
|
|
755
|
+
pii_mapping: dict[str, str] | None,
|
|
756
|
+
) -> None: # noqa: PLR0913
|
|
757
|
+
"""Accumulate results from a single tool call.
|
|
758
|
+
|
|
759
|
+
Args:
|
|
760
|
+
tool_result: The result object from a single tool execution containing messages,
|
|
761
|
+
artifacts, metadata_delta, references, usage information, and PII mapping.
|
|
762
|
+
tool_messages: List to accumulate tool messages into.
|
|
763
|
+
pending_artifacts: List to accumulate artifacts into.
|
|
764
|
+
aggregated_metadata_delta: Dictionary to accumulate metadata updates into.
|
|
765
|
+
reference_updates: List to accumulate reference chunks into.
|
|
766
|
+
total_tools_token_usage: List to accumulate token usage metadata into.
|
|
767
|
+
pii_mapping: Dictionary to accumulate PII mappings into (mutated in place).
|
|
768
|
+
"""
|
|
769
|
+
if tool_result.messages:
|
|
770
|
+
tool_messages.extend(tool_result.messages)
|
|
771
|
+
if tool_result.artifacts:
|
|
772
|
+
pending_artifacts.extend(tool_result.artifacts)
|
|
773
|
+
if tool_result.metadata_delta:
|
|
774
|
+
aggregated_metadata_delta.update(tool_result.metadata_delta)
|
|
775
|
+
if tool_result.references:
|
|
776
|
+
reference_updates.extend(tool_result.references)
|
|
777
|
+
if tool_result.step_usage:
|
|
778
|
+
total_tools_token_usage.append(tool_result.step_usage)
|
|
779
|
+
if tool_result.pii_mapping:
|
|
780
|
+
pii_mapping.update(tool_result.pii_mapping)
|
|
781
|
+
|
|
782
|
+
def _build_tool_state_updates(
|
|
783
|
+
self,
|
|
784
|
+
tool_messages: list[ToolMessage],
|
|
785
|
+
pending_artifacts: list[dict[str, Any]],
|
|
786
|
+
reference_updates: list[Chunk],
|
|
787
|
+
aggregated_metadata_delta: dict[str, Any],
|
|
788
|
+
total_tools_token_usage: list[UsageMetadata],
|
|
789
|
+
pii_mapping: dict[str, str] | None = None,
|
|
790
|
+
) -> dict[str, Any]:
|
|
791
|
+
"""Build state updates from accumulated tool results.
|
|
792
|
+
|
|
793
|
+
Args:
|
|
794
|
+
tool_messages: List of tool messages to include in state updates.
|
|
795
|
+
pending_artifacts: List of artifacts to include in state updates.
|
|
796
|
+
reference_updates: List of reference chunks to include in state updates.
|
|
797
|
+
aggregated_metadata_delta: Metadata changes to include in state updates.
|
|
798
|
+
total_tools_token_usage: List of token usage metadata from all tool executions.
|
|
799
|
+
pii_mapping: Current PII mapping to include in state updates.
|
|
800
|
+
|
|
801
|
+
Returns:
|
|
802
|
+
Dictionary containing state updates with messages, artifacts, references,
|
|
803
|
+
metadata, token usage, and PII mapping information.
|
|
804
|
+
"""
|
|
805
|
+
state_updates: dict[str, Any] = {"messages": tool_messages, "artifacts": pending_artifacts}
|
|
806
|
+
|
|
807
|
+
if reference_updates:
|
|
808
|
+
state_updates["references"] = reference_updates
|
|
809
|
+
|
|
810
|
+
# Clean metadata delta to avoid leaking linkage-only fields
|
|
811
|
+
if "previous_step_ids" in aggregated_metadata_delta:
|
|
812
|
+
aggregated_metadata_delta = {k: v for k, v in aggregated_metadata_delta.items() if k != "previous_step_ids"}
|
|
813
|
+
|
|
814
|
+
if aggregated_metadata_delta:
|
|
815
|
+
state_updates["metadata"] = aggregated_metadata_delta
|
|
816
|
+
|
|
817
|
+
# Process accumulated tool usage
|
|
818
|
+
total_tool_usage = self._process_tool_usage(total_tools_token_usage)
|
|
819
|
+
if total_tool_usage:
|
|
820
|
+
state_updates[TOTAL_USAGE_KEY] = total_tool_usage
|
|
821
|
+
|
|
822
|
+
# Include PII mapping in state updates if present
|
|
823
|
+
if pii_mapping:
|
|
824
|
+
state_updates["pii_mapping"] = pii_mapping
|
|
825
|
+
|
|
826
|
+
return state_updates
|
|
827
|
+
|
|
828
|
+
def _create_should_continue_logic(self, end_node: str) -> Callable[[dict[str, Any]], str]:
|
|
829
|
+
"""Create the should_continue function for conditional edges.
|
|
830
|
+
|
|
831
|
+
Args:
|
|
832
|
+
end_node: The name of the end node to return when execution should stop.
|
|
833
|
+
|
|
834
|
+
Returns:
|
|
835
|
+
Function that determines the next node based on the current state.
|
|
836
|
+
"""
|
|
837
|
+
|
|
838
|
+
def should_continue(state: dict[str, Any]) -> str:
|
|
839
|
+
"""Determine whether to continue to tools or end.
|
|
840
|
+
|
|
841
|
+
Args:
|
|
842
|
+
state: Current agent state containing messages and execution status.
|
|
843
|
+
|
|
844
|
+
Returns:
|
|
845
|
+
Either "tools" to continue tool execution or the end_node to stop execution.
|
|
846
|
+
"""
|
|
847
|
+
messages = state.get("messages", [])
|
|
848
|
+
if not messages:
|
|
849
|
+
return end_node
|
|
850
|
+
|
|
851
|
+
last_message = messages[-1]
|
|
852
|
+
|
|
853
|
+
# Check if this is the last step
|
|
854
|
+
if state.get("is_last_step", False):
|
|
855
|
+
logger.debug(f"Agent '{self.name}': Reached last step, ending execution")
|
|
856
|
+
return end_node
|
|
857
|
+
|
|
858
|
+
if not isinstance(last_message, AIMessage) or not last_message.tool_calls:
|
|
859
|
+
return end_node
|
|
860
|
+
|
|
861
|
+
return "tools"
|
|
862
|
+
|
|
863
|
+
return should_continue
|
|
864
|
+
|
|
865
|
+
def _add_usage_metadata_to_tool_message(
|
|
866
|
+
self, messages: list[ToolMessage], usage_metadata: UsageMetadata | None
|
|
867
|
+
) -> None:
|
|
868
|
+
"""Add usage metadata to a tool message's response metadata.
|
|
869
|
+
|
|
870
|
+
Args:
|
|
871
|
+
messages: List of tool messages to potentially update.
|
|
872
|
+
usage_metadata: The usage metadata to add to the first tool message, if any.
|
|
873
|
+
|
|
874
|
+
Note:
|
|
875
|
+
- Used for streaming purposes only, to show token usage by tool via ToolMessage response_metadata.
|
|
876
|
+
- Tool message that are coming from Command with single message or a dictionary will have exactly 1 message.
|
|
877
|
+
- For those cases, we will add usage_metadata to the response_metadata of the first message.
|
|
878
|
+
"""
|
|
879
|
+
if len(messages) == 1 and isinstance(messages[0], ToolMessage) and usage_metadata is not None:
|
|
880
|
+
messages[0].response_metadata[USAGE_METADATA_KEY] = usage_metadata
|
|
881
|
+
|
|
882
|
+
def _process_tool_usage(self, total_tools_token_usage: list[UsageMetadata]) -> UsageMetadata | None:
|
|
883
|
+
"""Process accumulated tool usage metadata.
|
|
884
|
+
|
|
885
|
+
Args:
|
|
886
|
+
total_tools_token_usage: List of UsageMetadata objects to process.
|
|
887
|
+
|
|
888
|
+
Returns:
|
|
889
|
+
UsageMetadata: The accumulated token usage metadata.
|
|
890
|
+
"""
|
|
891
|
+
if not total_tools_token_usage:
|
|
892
|
+
return None
|
|
893
|
+
|
|
894
|
+
# More concise and functional
|
|
895
|
+
return reduce(add_usage_metadata, total_tools_token_usage, None)
|
|
896
|
+
|
|
897
|
+
def _process_command_tool_output(
|
|
898
|
+
self,
|
|
899
|
+
tool_output: Command,
|
|
900
|
+
tool_call: dict[str, Any],
|
|
901
|
+
execution_time: float,
|
|
902
|
+
) -> tuple[list[ToolMessage], list[dict[str, Any]], dict[str, Any]]:
|
|
903
|
+
"""Convert a Command tool output into messages, artifacts, and metadata deltas.
|
|
904
|
+
|
|
905
|
+
Args:
|
|
906
|
+
tool_output: The Command returned by the tool.
|
|
907
|
+
tool_call: The tool call info (id, name, args) for ToolMessage context.
|
|
908
|
+
execution_time: Execution time to include in ToolMessage tool_calls.
|
|
909
|
+
|
|
910
|
+
Returns:
|
|
911
|
+
A tuple of (messages, artifacts, metadata_delta).
|
|
912
|
+
"""
|
|
913
|
+
update: dict[str, Any] = getattr(tool_output, "update", {}) or {}
|
|
914
|
+
|
|
915
|
+
out_messages: list[ToolMessage] = []
|
|
916
|
+
out_artifacts: list[dict[str, Any]] = []
|
|
917
|
+
metadata_delta: dict[str, Any] = {}
|
|
918
|
+
|
|
919
|
+
# Artifacts
|
|
920
|
+
artifacts_update = update.get("artifacts")
|
|
921
|
+
if isinstance(artifacts_update, list):
|
|
922
|
+
out_artifacts.extend(artifacts_update)
|
|
923
|
+
|
|
924
|
+
# Metadata
|
|
925
|
+
md_update = update.get("metadata")
|
|
926
|
+
if isinstance(md_update, dict):
|
|
927
|
+
metadata_delta.update(md_update)
|
|
928
|
+
|
|
929
|
+
# Messages or fallback to result
|
|
930
|
+
messages_update = update.get("messages")
|
|
931
|
+
if isinstance(messages_update, list):
|
|
932
|
+
out_messages.extend(messages_update)
|
|
933
|
+
else:
|
|
934
|
+
agent_result = str(update.get("result", ""))
|
|
935
|
+
out_messages.append(
|
|
936
|
+
ToolMessage(
|
|
937
|
+
content=agent_result,
|
|
938
|
+
tool_call_id=tool_call["id"],
|
|
939
|
+
tool_calls={
|
|
940
|
+
"name": tool_call["name"],
|
|
941
|
+
"args": tool_call["args"],
|
|
942
|
+
"output": agent_result,
|
|
943
|
+
"time": execution_time,
|
|
944
|
+
},
|
|
945
|
+
)
|
|
946
|
+
)
|
|
947
|
+
|
|
948
|
+
# If metadata contains linkage info, attach to first ToolMessage response_metadata
|
|
949
|
+
md = update.get("metadata")
|
|
950
|
+
if isinstance(md, dict):
|
|
951
|
+
prev_ids = md.get("previous_step_ids")
|
|
952
|
+
if isinstance(prev_ids, list) and prev_ids and out_messages:
|
|
953
|
+
try:
|
|
954
|
+
out_messages[0].response_metadata.setdefault("previous_step_ids", [])
|
|
955
|
+
existing = out_messages[0].response_metadata.get("previous_step_ids", [])
|
|
956
|
+
combined = list(dict.fromkeys(list(existing) + list(prev_ids)))
|
|
957
|
+
out_messages[0].response_metadata["previous_step_ids"] = combined
|
|
958
|
+
except Exception:
|
|
959
|
+
pass
|
|
960
|
+
|
|
961
|
+
return out_messages, out_artifacts, metadata_delta
|
|
962
|
+
|
|
963
|
+
def _process_simple_tool_output(
|
|
964
|
+
self,
|
|
965
|
+
agent_result_text: str,
|
|
966
|
+
tool_call: dict[str, Any],
|
|
967
|
+
execution_time: float,
|
|
968
|
+
) -> tuple[list[ToolMessage], list[dict[str, Any]]]:
|
|
969
|
+
"""Convert a simple string tool output into messages with no artifacts.
|
|
970
|
+
|
|
971
|
+
Args:
|
|
972
|
+
agent_result_text: The string result from tool execution.
|
|
973
|
+
tool_call: The tool call information containing id, name, and args.
|
|
974
|
+
execution_time: Time taken to execute the tool.
|
|
975
|
+
|
|
976
|
+
Returns:
|
|
977
|
+
Tuple of (tool_messages, artifacts) where artifacts is always an empty list.
|
|
978
|
+
"""
|
|
979
|
+
messages = [
|
|
980
|
+
ToolMessage(
|
|
981
|
+
content=agent_result_text,
|
|
982
|
+
tool_call_id=tool_call["id"],
|
|
983
|
+
tool_calls={
|
|
984
|
+
"name": tool_call["name"],
|
|
985
|
+
"args": tool_call["args"],
|
|
986
|
+
"output": agent_result_text,
|
|
987
|
+
"time": execution_time,
|
|
988
|
+
},
|
|
989
|
+
)
|
|
990
|
+
]
|
|
991
|
+
return messages, []
|
|
992
|
+
|
|
993
|
+
@deprecated(version="0.5.0", reason="Use _process_command_tool_output instead")
|
|
994
|
+
def _process_legacy_tool_output(
|
|
995
|
+
self,
|
|
996
|
+
tool_output: dict[str, Any],
|
|
997
|
+
tool_call: dict[str, Any],
|
|
998
|
+
execution_time: float,
|
|
999
|
+
pending_artifacts: list[dict[str, Any]],
|
|
1000
|
+
) -> tuple[list[ToolMessage], list[dict[str, Any]]]:
|
|
1001
|
+
"""Normalize legacy dict outputs into ToolMessages and artifacts.
|
|
1002
|
+
|
|
1003
|
+
Supports legacy tools that return a mapping possibly containing 'artifacts'
|
|
1004
|
+
and 'result' keys.
|
|
1005
|
+
|
|
1006
|
+
Args:
|
|
1007
|
+
tool_output: The legacy dict output from tool execution.
|
|
1008
|
+
tool_call: The tool call information containing id, name, and args.
|
|
1009
|
+
execution_time: Time taken to execute the tool.
|
|
1010
|
+
pending_artifacts: Current list of pending artifacts to extend with new ones.
|
|
1011
|
+
|
|
1012
|
+
Returns:
|
|
1013
|
+
Tuple of (tool_messages, updated_pending_artifacts).
|
|
1014
|
+
"""
|
|
1015
|
+
if isinstance(tool_output.get("artifacts"), list):
|
|
1016
|
+
pending_artifacts.extend(tool_output["artifacts"])
|
|
1017
|
+
|
|
1018
|
+
agent_result = str(tool_output.get("result", tool_output))
|
|
1019
|
+
|
|
1020
|
+
# Extract metadata from tool_output if present
|
|
1021
|
+
response_metadata = {}
|
|
1022
|
+
if isinstance(tool_output, dict) and isinstance(tool_output.get("metadata"), dict):
|
|
1023
|
+
response_metadata.update(tool_output["metadata"])
|
|
1024
|
+
|
|
1025
|
+
messages = [
|
|
1026
|
+
ToolMessage(
|
|
1027
|
+
content=agent_result,
|
|
1028
|
+
tool_call_id=tool_call["id"],
|
|
1029
|
+
tool_calls={
|
|
1030
|
+
"name": tool_call["name"],
|
|
1031
|
+
"args": tool_call["args"],
|
|
1032
|
+
"output": agent_result,
|
|
1033
|
+
"time": execution_time,
|
|
1034
|
+
},
|
|
1035
|
+
response_metadata=response_metadata,
|
|
1036
|
+
)
|
|
1037
|
+
]
|
|
1038
|
+
return messages, pending_artifacts
|
|
1039
|
+
|
|
1040
|
+
async def _run_single_tool_call(
|
|
1041
|
+
self,
|
|
1042
|
+
tool_map: dict[str, BaseTool],
|
|
1043
|
+
tool_call: dict[str, Any],
|
|
1044
|
+
context: ToolCallContext,
|
|
1045
|
+
) -> ToolCallResult:
|
|
1046
|
+
"""Execute a single tool call with tool output management and reference resolution.
|
|
1047
|
+
|
|
1048
|
+
This method handles the complete lifecycle of a tool call including:
|
|
1049
|
+
- Reference resolution for tool arguments
|
|
1050
|
+
- Tool execution with enhanced configuration
|
|
1051
|
+
- Automatic and manual tool output storage
|
|
1052
|
+
- Error handling for reference and execution failures
|
|
1053
|
+
|
|
1054
|
+
Args:
|
|
1055
|
+
tool_map: Mapping of tool name to tool instance.
|
|
1056
|
+
tool_call: The tool call information from the AI message.
|
|
1057
|
+
context: Tool call context containing config, state, pending artifacts, and HITL decision.
|
|
1058
|
+
|
|
1059
|
+
Returns:
|
|
1060
|
+
ToolCallResult containing messages, artifacts, metadata_delta, references, and usage_metadata.
|
|
1061
|
+
"""
|
|
1062
|
+
tool = tool_map.get(tool_call["name"]) # type: ignore[index]
|
|
1063
|
+
tool_call_id = tool_call.get("id", f"tool_call_{uuid.uuid4().hex[:8]}")
|
|
1064
|
+
|
|
1065
|
+
# Check for HITL approval if configured
|
|
1066
|
+
if context.hitl_decision is None:
|
|
1067
|
+
try:
|
|
1068
|
+
context.hitl_decision = await self._check_hitl_approval(
|
|
1069
|
+
tool_call=tool_call, tool_name=tool_call["name"], state=context.state
|
|
1070
|
+
)
|
|
1071
|
+
|
|
1072
|
+
if context.hitl_decision and context.hitl_decision.decision in TOOL_EXECUTION_BLOCKING_DECISIONS:
|
|
1073
|
+
# Return sentinel result for pending/rejected/skipped tools
|
|
1074
|
+
return self._create_hitl_blocking_result(tool_call, context.hitl_decision)
|
|
1075
|
+
except Exception as e:
|
|
1076
|
+
# Log HITL failure but continue with normal tool execution
|
|
1077
|
+
logger.warning(
|
|
1078
|
+
"HITL approval check failed for tool '%s' (error: %s: %s). Proceeding with tool execution.",
|
|
1079
|
+
tool_call["name"],
|
|
1080
|
+
type(e).__name__,
|
|
1081
|
+
e,
|
|
1082
|
+
)
|
|
1083
|
+
|
|
1084
|
+
# Execute tool and handle errors
|
|
1085
|
+
tool_output, execution_time, references, updated_pii_mapping = await self._execute_tool_with_management(
|
|
1086
|
+
tool=tool,
|
|
1087
|
+
tool_call=tool_call,
|
|
1088
|
+
tool_call_id=tool_call_id,
|
|
1089
|
+
config=context.config,
|
|
1090
|
+
state=context.state,
|
|
1091
|
+
)
|
|
1092
|
+
|
|
1093
|
+
# Process tool output into messages and artifacts
|
|
1094
|
+
messages, artifacts, metadata_delta = self._process_tool_output_result(
|
|
1095
|
+
tool_output=tool_output,
|
|
1096
|
+
tool_call=tool_call,
|
|
1097
|
+
execution_time=execution_time,
|
|
1098
|
+
pending_artifacts=context.pending_artifacts,
|
|
1099
|
+
)
|
|
1100
|
+
|
|
1101
|
+
# Capture and merge new PII mapping from subagent
|
|
1102
|
+
updated_pii_mapping = self._merge_tool_pii_mapping(metadata_delta, updated_pii_mapping)
|
|
1103
|
+
|
|
1104
|
+
# If HITL was required, annotate the first ToolMessage with HITL metadata
|
|
1105
|
+
try:
|
|
1106
|
+
if context.hitl_decision and messages:
|
|
1107
|
+
first_msg = messages[0]
|
|
1108
|
+
if isinstance(first_msg, ToolMessage):
|
|
1109
|
+
response_metadata = getattr(first_msg, "response_metadata", None) or {}
|
|
1110
|
+
response_metadata = dict(response_metadata)
|
|
1111
|
+
hitl_model = HitlMetadata.from_decision(context.hitl_decision)
|
|
1112
|
+
response_metadata["hitl"] = hitl_model.as_payload()
|
|
1113
|
+
first_msg.response_metadata = response_metadata
|
|
1114
|
+
except Exception as e:
|
|
1115
|
+
# Non-fatal: continue even if metadata injection fails
|
|
1116
|
+
logger.warning(f"Failed to inject HITL metadata into tool message: {e}")
|
|
1117
|
+
|
|
1118
|
+
# Extract and add usage metadata
|
|
1119
|
+
tool_usage_metadata = extract_token_usage_from_tool_output(tool_output)
|
|
1120
|
+
self._add_usage_metadata_to_tool_message(messages, tool_usage_metadata)
|
|
1121
|
+
|
|
1122
|
+
return ToolCallResult(
|
|
1123
|
+
messages=messages,
|
|
1124
|
+
artifacts=artifacts,
|
|
1125
|
+
metadata_delta=metadata_delta,
|
|
1126
|
+
references=references,
|
|
1127
|
+
step_usage=tool_usage_metadata,
|
|
1128
|
+
pii_mapping=updated_pii_mapping,
|
|
1129
|
+
)
|
|
1130
|
+
|
|
1131
|
+
def _merge_tool_pii_mapping(
|
|
1132
|
+
self,
|
|
1133
|
+
metadata_delta: dict[str, Any],
|
|
1134
|
+
updated_pii_mapping: dict[str, str] | None,
|
|
1135
|
+
) -> dict[str, str] | None:
|
|
1136
|
+
"""Merge PII mapping from metadata delta into existing mapping.
|
|
1137
|
+
|
|
1138
|
+
Args:
|
|
1139
|
+
metadata_delta: Metadata delta returned from tool execution.
|
|
1140
|
+
updated_pii_mapping: PII mapping produced during tool execution, if any.
|
|
1141
|
+
|
|
1142
|
+
Returns:
|
|
1143
|
+
New merged PII mapping or None if no PII information is present.
|
|
1144
|
+
"""
|
|
1145
|
+
if "pii_mapping" not in metadata_delta:
|
|
1146
|
+
return updated_pii_mapping
|
|
1147
|
+
|
|
1148
|
+
metadata_pii_mapping = metadata_delta.get("pii_mapping") or {}
|
|
1149
|
+
if not isinstance(metadata_pii_mapping, dict) or not metadata_pii_mapping:
|
|
1150
|
+
return updated_pii_mapping
|
|
1151
|
+
|
|
1152
|
+
if updated_pii_mapping:
|
|
1153
|
+
return {**updated_pii_mapping, **metadata_pii_mapping}
|
|
1154
|
+
|
|
1155
|
+
return metadata_pii_mapping
|
|
1156
|
+
|
|
1157
|
+
async def _execute_tool_with_management(
|
|
1158
|
+
self,
|
|
1159
|
+
tool: BaseTool | None,
|
|
1160
|
+
tool_call: dict[str, Any],
|
|
1161
|
+
tool_call_id: str,
|
|
1162
|
+
config: dict[str, Any] | None,
|
|
1163
|
+
state: dict[str, Any],
|
|
1164
|
+
) -> tuple[Any, float, list[Chunk], dict[str, str] | None]:
|
|
1165
|
+
"""Execute tool with output management, reference resolution, and error handling.
|
|
1166
|
+
|
|
1167
|
+
Args:
|
|
1168
|
+
tool: The tool instance to execute, or None if not found.
|
|
1169
|
+
tool_call: The tool call information from the AI message.
|
|
1170
|
+
tool_call_id: Unique identifier for this tool call.
|
|
1171
|
+
config: Optional configuration passed down to the tool.
|
|
1172
|
+
state: Current agent state containing tool output manager.
|
|
1173
|
+
|
|
1174
|
+
Returns:
|
|
1175
|
+
Tuple of (tool_output, execution_time, references, updated_pii_mapping).
|
|
1176
|
+
"""
|
|
1177
|
+
execution_time = 0.0
|
|
1178
|
+
references: list[Chunk] = []
|
|
1179
|
+
updated_pii_mapping: dict[str, str] | None = None
|
|
1180
|
+
|
|
1181
|
+
if not tool:
|
|
1182
|
+
return f"Error: Tool '{tool_call['name']}' not found.", execution_time, references, updated_pii_mapping
|
|
1183
|
+
|
|
1184
|
+
start_time = time.time()
|
|
1185
|
+
try:
|
|
1186
|
+
# Resolve tool argument references
|
|
1187
|
+
resolved_args = self._resolve_tool_arguments(tool_call, state, config)
|
|
1188
|
+
predefined_pii_mapping = self._get_predefined_pii_mapping(state, config)
|
|
1189
|
+
|
|
1190
|
+
enable_pii = self._enable_pii
|
|
1191
|
+
if enable_pii is False:
|
|
1192
|
+
pii_handler = ToolPIIHandler.create_mapping_only(predefined_pii_mapping)
|
|
1193
|
+
else:
|
|
1194
|
+
pii_handler = self._create_pii_handler(predefined_pii_mapping, config)
|
|
1195
|
+
|
|
1196
|
+
# Deanonymize tool arguments if PII handler is enabled
|
|
1197
|
+
resolved_args = self._deanonymize_tool_args(pii_handler, resolved_args)
|
|
1198
|
+
|
|
1199
|
+
# Create enhanced tool configuration with output management
|
|
1200
|
+
tool_config = self._create_enhanced_tool_config(config, state, tool_call["name"], tool_call_id)
|
|
1201
|
+
|
|
1202
|
+
arun_streaming_method = getattr(tool, TOOL_RUN_STREAMING_METHOD, None)
|
|
1203
|
+
|
|
1204
|
+
if arun_streaming_method and callable(arun_streaming_method):
|
|
1205
|
+
tool_output = await self._execute_tool_with_streaming(tool, tool_call, tool_config)
|
|
1206
|
+
else:
|
|
1207
|
+
tool_output = await tool.ainvoke(resolved_args, tool_config)
|
|
1208
|
+
|
|
1209
|
+
references = extract_references_from_tool(tool, tool_output)
|
|
1210
|
+
|
|
1211
|
+
# Anonymize tool output if PII handler is enabled
|
|
1212
|
+
tool_output, updated_pii_mapping = self._anonymize_tool_output(pii_handler, tool_output)
|
|
1213
|
+
|
|
1214
|
+
# Handle automatic storage if enabled
|
|
1215
|
+
self._handle_automatic_tool_storage(
|
|
1216
|
+
ToolStorageParams(
|
|
1217
|
+
tool=tool,
|
|
1218
|
+
tool_output=tool_output,
|
|
1219
|
+
tool_call=tool_call,
|
|
1220
|
+
tool_call_id=tool_call_id,
|
|
1221
|
+
resolved_args=resolved_args,
|
|
1222
|
+
state=state,
|
|
1223
|
+
),
|
|
1224
|
+
config=config,
|
|
1225
|
+
)
|
|
1226
|
+
|
|
1227
|
+
return tool_output, time.time() - start_time, references, updated_pii_mapping
|
|
1228
|
+
|
|
1229
|
+
except ToolReferenceError as ref_error:
|
|
1230
|
+
tool_output = f"Reference error in tool '{tool_call['name']}': {str(ref_error)}"
|
|
1231
|
+
logger.error(f"Tool reference error: {ref_error}", exc_info=True)
|
|
1232
|
+
return tool_output, time.time() - start_time, references, updated_pii_mapping
|
|
1233
|
+
except Exception as e: # noqa: BLE001
|
|
1234
|
+
tool_output = f"Error executing tool '{tool_call['name']}': {str(e)}"
|
|
1235
|
+
logger.error(f"Tool execution error: {e}", exc_info=True)
|
|
1236
|
+
return tool_output, time.time() - start_time, references, updated_pii_mapping
|
|
1237
|
+
|
|
1238
|
+
def _get_predefined_pii_mapping(
|
|
1239
|
+
self,
|
|
1240
|
+
state: dict[str, Any],
|
|
1241
|
+
config: dict[str, Any] | None,
|
|
1242
|
+
) -> dict[str, str] | None:
|
|
1243
|
+
"""Get predefined PII mapping from state or configuration.
|
|
1244
|
+
|
|
1245
|
+
This helper centralizes the logic for resolving an existing PII mapping,
|
|
1246
|
+
first checking the agent state metadata, then falling back to the config
|
|
1247
|
+
metadata if available.
|
|
1248
|
+
|
|
1249
|
+
Args:
|
|
1250
|
+
state: Current LangGraph agent state.
|
|
1251
|
+
config: Optional LangGraph configuration dictionary.
|
|
1252
|
+
|
|
1253
|
+
Returns:
|
|
1254
|
+
The resolved PII mapping dictionary if found, otherwise None.
|
|
1255
|
+
"""
|
|
1256
|
+
metadata_from_state = state.get("metadata") or {}
|
|
1257
|
+
mapping_from_state = metadata_from_state.get("pii_mapping")
|
|
1258
|
+
if isinstance(mapping_from_state, dict) and mapping_from_state:
|
|
1259
|
+
return mapping_from_state # type: ignore[return-value]
|
|
1260
|
+
|
|
1261
|
+
if not config:
|
|
1262
|
+
return None
|
|
1263
|
+
|
|
1264
|
+
metadata_from_config = config.get("metadata") or {}
|
|
1265
|
+
mapping_from_config = metadata_from_config.get("pii_mapping")
|
|
1266
|
+
if isinstance(mapping_from_config, dict) and mapping_from_config:
|
|
1267
|
+
return mapping_from_config # type: ignore[return-value]
|
|
1268
|
+
|
|
1269
|
+
return None
|
|
1270
|
+
|
|
1271
|
+
def _create_pii_handler(
|
|
1272
|
+
self, predefined_pii_mapping: dict[str, str] | None, config: dict[str, Any] | None
|
|
1273
|
+
) -> ToolPIIHandler | None:
|
|
1274
|
+
"""Create (or reuse) a PII handler scoped to the current thread.
|
|
1275
|
+
|
|
1276
|
+
Thin wrapper around ToolPIIHandler.create_if_enabled to keep
|
|
1277
|
+
_execute_tool_with_management focused on orchestration. The handler can
|
|
1278
|
+
operate in mapping-only mode when no NER credentials are configured.
|
|
1279
|
+
|
|
1280
|
+
Args:
|
|
1281
|
+
predefined_pii_mapping: Existing PII mapping to seed the handler with.
|
|
1282
|
+
config: LangGraph configuration needed to scope handlers per thread.
|
|
1283
|
+
|
|
1284
|
+
Returns:
|
|
1285
|
+
A ToolPIIHandler instance when mapping/NER config is available, otherwise None.
|
|
1286
|
+
"""
|
|
1287
|
+
thread_id: str | None = None
|
|
1288
|
+
if config:
|
|
1289
|
+
try:
|
|
1290
|
+
thread_id = self._extract_thread_id_from_config(config)
|
|
1291
|
+
except Exception:
|
|
1292
|
+
thread_id = None
|
|
1293
|
+
if thread_id:
|
|
1294
|
+
handler = self._pii_handlers_by_thread.get(thread_id)
|
|
1295
|
+
if handler:
|
|
1296
|
+
return handler
|
|
1297
|
+
handler = ToolPIIHandler.create_if_enabled(predefined_pii_mapping)
|
|
1298
|
+
if handler and thread_id:
|
|
1299
|
+
self._pii_handlers_by_thread[thread_id] = handler
|
|
1300
|
+
|
|
1301
|
+
return handler
|
|
1302
|
+
|
|
1303
|
+
def _deanonymize_tool_args(
|
|
1304
|
+
self,
|
|
1305
|
+
pii_handler: ToolPIIHandler | None,
|
|
1306
|
+
resolved_args: dict[str, Any],
|
|
1307
|
+
) -> dict[str, Any]:
|
|
1308
|
+
"""Deanonymize tool arguments using the provided PII handler.
|
|
1309
|
+
|
|
1310
|
+
Args:
|
|
1311
|
+
pii_handler: Optional ToolPIIHandler instance.
|
|
1312
|
+
resolved_args: Tool arguments after reference resolution.
|
|
1313
|
+
|
|
1314
|
+
Returns:
|
|
1315
|
+
Tool arguments with PII tags replaced by real values when a handler
|
|
1316
|
+
is available, otherwise the original arguments.
|
|
1317
|
+
"""
|
|
1318
|
+
if not pii_handler:
|
|
1319
|
+
return resolved_args
|
|
1320
|
+
return pii_handler.deanonymize_tool_args(resolved_args)
|
|
1321
|
+
|
|
1322
|
+
def _anonymize_tool_output(
|
|
1323
|
+
self,
|
|
1324
|
+
pii_handler: ToolPIIHandler | None,
|
|
1325
|
+
tool_output: Any,
|
|
1326
|
+
) -> tuple[Any, dict[str, str] | None]:
|
|
1327
|
+
"""Anonymize tool output and return updated PII mapping when enabled.
|
|
1328
|
+
|
|
1329
|
+
Args:
|
|
1330
|
+
pii_handler: Optional ToolPIIHandler instance.
|
|
1331
|
+
tool_output: Raw output returned by the tool.
|
|
1332
|
+
|
|
1333
|
+
Returns:
|
|
1334
|
+
Tuple of (possibly anonymized tool_output, updated PII mapping or None).
|
|
1335
|
+
"""
|
|
1336
|
+
if not pii_handler:
|
|
1337
|
+
return tool_output, None
|
|
1338
|
+
|
|
1339
|
+
anonymized_output, updated_mapping = pii_handler.anonymize_tool_output(tool_output)
|
|
1340
|
+
return anonymized_output, updated_mapping
|
|
1341
|
+
|
|
1342
|
+
def _resolve_tool_arguments(
|
|
1343
|
+
self, tool_call: dict[str, Any], state: dict[str, Any], config: dict[str, Any] | None = None
|
|
1344
|
+
) -> dict[str, Any]:
|
|
1345
|
+
"""Resolve tool argument references using the tool output manager.
|
|
1346
|
+
|
|
1347
|
+
Args:
|
|
1348
|
+
tool_call: The tool call information containing arguments.
|
|
1349
|
+
state: Current agent state containing tool output manager.
|
|
1350
|
+
config: Optional configuration containing thread_id information.
|
|
1351
|
+
|
|
1352
|
+
Returns:
|
|
1353
|
+
Resolved arguments dictionary.
|
|
1354
|
+
|
|
1355
|
+
Raises:
|
|
1356
|
+
ToolReferenceError: If reference resolution fails.
|
|
1357
|
+
"""
|
|
1358
|
+
manager = state.get(TOOL_OUTPUT_MANAGER_KEY)
|
|
1359
|
+
resolved_args = tool_call["args"]
|
|
1360
|
+
|
|
1361
|
+
if manager and self.tool_output_manager:
|
|
1362
|
+
thread_id = self._extract_thread_id_from_config(config)
|
|
1363
|
+
|
|
1364
|
+
if manager.has_outputs(thread_id):
|
|
1365
|
+
resolver = ToolReferenceResolver(self.tool_output_manager.config)
|
|
1366
|
+
resolved_args = resolver.resolve_references(resolved_args, manager, thread_id)
|
|
1367
|
+
logger.debug(
|
|
1368
|
+
f"Resolved references for tool '{tool_call['name']}' in thread '{thread_id}', "
|
|
1369
|
+
f"Resolved args: {resolved_args}"
|
|
1370
|
+
)
|
|
1371
|
+
|
|
1372
|
+
return resolved_args
|
|
1373
|
+
|
|
1374
|
+
def _create_enhanced_tool_config(
|
|
1375
|
+
self, config: dict[str, Any] | None, state: dict[str, Any], tool_name: str, tool_call_id: str
|
|
1376
|
+
) -> dict[str, Any]:
|
|
1377
|
+
"""Create enhanced tool configuration with output management capabilities.
|
|
1378
|
+
|
|
1379
|
+
Args:
|
|
1380
|
+
config: Base configuration passed down to the tool.
|
|
1381
|
+
state: Current agent state containing tool output manager.
|
|
1382
|
+
tool_name: Name of the tool being executed.
|
|
1383
|
+
tool_call_id: Unique identifier for this tool call.
|
|
1384
|
+
|
|
1385
|
+
Returns:
|
|
1386
|
+
Enhanced tool configuration dictionary.
|
|
1387
|
+
"""
|
|
1388
|
+
tool_config = self._create_tool_config(config, state, tool_name=tool_name)
|
|
1389
|
+
|
|
1390
|
+
# Add tool output management capabilities
|
|
1391
|
+
manager = state.get(TOOL_OUTPUT_MANAGER_KEY)
|
|
1392
|
+
if manager and self.tool_output_manager:
|
|
1393
|
+
tool_config[TOOL_OUTPUT_MANAGER_KEY] = manager
|
|
1394
|
+
tool_config[CALL_ID_KEY] = tool_call_id
|
|
1395
|
+
|
|
1396
|
+
# Attach coordinator parent step id so delegated sub-agents can link their start step properly
|
|
1397
|
+
try:
|
|
1398
|
+
thread_id = self._extract_thread_id_from_config(config)
|
|
1399
|
+
parent_map = self._tool_parent_map_by_thread.get(thread_id, {})
|
|
1400
|
+
parent_step_id = parent_map.get(str(tool_call_id))
|
|
1401
|
+
if parent_step_id:
|
|
1402
|
+
tool_config["parent_step_id"] = parent_step_id
|
|
1403
|
+
cfg = tool_config.get("configurable")
|
|
1404
|
+
if not isinstance(cfg, dict):
|
|
1405
|
+
cfg = {}
|
|
1406
|
+
cfg["parent_step_id"] = parent_step_id
|
|
1407
|
+
tool_config["configurable"] = cfg
|
|
1408
|
+
except Exception:
|
|
1409
|
+
pass
|
|
1410
|
+
|
|
1411
|
+
return tool_config
|
|
1412
|
+
|
|
1413
|
+
def _extract_thread_id_from_config(self, config: dict[str, Any] | None) -> str:
|
|
1414
|
+
"""Extract thread_id from LangGraph configuration.
|
|
1415
|
+
|
|
1416
|
+
Since BaseLangGraphAgent._create_graph_config() guarantees a thread ID is always present,
|
|
1417
|
+
this method should always find a valid thread ID. If config is somehow None (which
|
|
1418
|
+
should never happen), creates a new UUID.
|
|
1419
|
+
|
|
1420
|
+
Args:
|
|
1421
|
+
config: LangGraph configuration dictionary.
|
|
1422
|
+
|
|
1423
|
+
Returns:
|
|
1424
|
+
Thread ID string from the configuration.
|
|
1425
|
+
"""
|
|
1426
|
+
# This should never happen since _create_graph_config always creates config
|
|
1427
|
+
if not config:
|
|
1428
|
+
thread_id = str(uuid.uuid4())
|
|
1429
|
+
logger.warning(f"Agent '{self.name}': No config provided, generated new thread_id: {thread_id}")
|
|
1430
|
+
return thread_id
|
|
1431
|
+
|
|
1432
|
+
configurable = config["configurable"]
|
|
1433
|
+
thread_key = self.thread_id_key or "thread_id"
|
|
1434
|
+
return str(configurable[thread_key])
|
|
1435
|
+
|
|
1436
|
+
def _handle_automatic_tool_storage(
|
|
1437
|
+
self,
|
|
1438
|
+
params: ToolStorageParams,
|
|
1439
|
+
config: dict[str, Any] | None = None,
|
|
1440
|
+
) -> None:
|
|
1441
|
+
"""Handle automatic storage for tools with store_final_output enabled.
|
|
1442
|
+
|
|
1443
|
+
Args:
|
|
1444
|
+
params: ToolStorageParams containing all necessary parameters.
|
|
1445
|
+
config: Optional configuration containing thread_id information.
|
|
1446
|
+
"""
|
|
1447
|
+
manager = params.state.get(TOOL_OUTPUT_MANAGER_KEY)
|
|
1448
|
+
|
|
1449
|
+
if (
|
|
1450
|
+
manager
|
|
1451
|
+
and self.tool_output_manager
|
|
1452
|
+
and params.tool_output is not None
|
|
1453
|
+
and getattr(params.tool, "store_final_output", False)
|
|
1454
|
+
):
|
|
1455
|
+
# Extract thread_id from config
|
|
1456
|
+
thread_id = self._extract_thread_id_from_config(config)
|
|
1457
|
+
|
|
1458
|
+
storable_data = self._extract_storable_data(params.tool_output)
|
|
1459
|
+
store_params = StoreOutputParams(
|
|
1460
|
+
call_id=params.tool_call_id,
|
|
1461
|
+
tool_name=params.tool_call["name"],
|
|
1462
|
+
data=storable_data,
|
|
1463
|
+
tool_args=params.resolved_args,
|
|
1464
|
+
thread_id=thread_id,
|
|
1465
|
+
description=None, # No automatic description
|
|
1466
|
+
tags=None,
|
|
1467
|
+
agent_name=self.name,
|
|
1468
|
+
)
|
|
1469
|
+
manager.store_output(store_params)
|
|
1470
|
+
logger.debug(
|
|
1471
|
+
f"Auto-stored output for tool '{params.tool_call['name']}' with call_id: {params.tool_call_id} "
|
|
1472
|
+
f"in thread: {thread_id}"
|
|
1473
|
+
)
|
|
1474
|
+
|
|
1475
|
+
def _process_tool_output_result(
|
|
1476
|
+
self,
|
|
1477
|
+
tool_output: Any,
|
|
1478
|
+
tool_call: dict[str, Any],
|
|
1479
|
+
execution_time: float,
|
|
1480
|
+
pending_artifacts: list[dict[str, Any]],
|
|
1481
|
+
) -> tuple[list[BaseMessage], list[dict[str, Any]], dict[str, Any]]:
|
|
1482
|
+
"""Process tool output into messages, artifacts, and metadata.
|
|
1483
|
+
|
|
1484
|
+
Args:
|
|
1485
|
+
tool_output: The output returned by the tool.
|
|
1486
|
+
tool_call: The tool call information from the AI message.
|
|
1487
|
+
execution_time: Time taken to execute the tool.
|
|
1488
|
+
pending_artifacts: List of artifacts to be updated with new artifacts from this tool call.
|
|
1489
|
+
|
|
1490
|
+
Returns:
|
|
1491
|
+
Tuple of (messages, artifacts, metadata_delta).
|
|
1492
|
+
"""
|
|
1493
|
+
metadata_delta: dict[str, Any] = {}
|
|
1494
|
+
|
|
1495
|
+
# Handle Command outputs
|
|
1496
|
+
if isinstance(tool_output, Command):
|
|
1497
|
+
return self._handle_command_output(tool_output, tool_call, execution_time, metadata_delta)
|
|
1498
|
+
|
|
1499
|
+
if isinstance(tool_output, dict):
|
|
1500
|
+
return self._handle_legacy_output(tool_output, tool_call, execution_time, pending_artifacts, metadata_delta)
|
|
1501
|
+
|
|
1502
|
+
# Handle string outputs, coercing other simple types
|
|
1503
|
+
if not isinstance(tool_output, str):
|
|
1504
|
+
tool_output = str(tool_output)
|
|
1505
|
+
return self._handle_string_output(tool_output, tool_call, execution_time)
|
|
1506
|
+
|
|
1507
|
+
def _handle_command_output(
|
|
1508
|
+
self, tool_output: Command, tool_call: dict[str, Any], execution_time: float, metadata_delta: dict[str, Any]
|
|
1509
|
+
) -> tuple[list[BaseMessage], list[dict[str, Any]], dict[str, Any]]:
|
|
1510
|
+
"""Handle Command type tool outputs.
|
|
1511
|
+
|
|
1512
|
+
Args:
|
|
1513
|
+
tool_output: The Command object returned by the tool.
|
|
1514
|
+
tool_call: The tool call information containing id, name, and args.
|
|
1515
|
+
execution_time: Time taken to execute the tool.
|
|
1516
|
+
metadata_delta: Dictionary to accumulate metadata updates into.
|
|
1517
|
+
|
|
1518
|
+
Returns:
|
|
1519
|
+
Tuple of (messages, artifacts, updated_metadata_delta).
|
|
1520
|
+
"""
|
|
1521
|
+
messages, artifacts, md_delta = self._process_command_tool_output(
|
|
1522
|
+
tool_output=tool_output,
|
|
1523
|
+
tool_call=tool_call,
|
|
1524
|
+
execution_time=execution_time,
|
|
1525
|
+
)
|
|
1526
|
+
if md_delta:
|
|
1527
|
+
metadata_delta.update(md_delta)
|
|
1528
|
+
|
|
1529
|
+
update: dict[str, Any] = getattr(tool_output, "update", {}) or {}
|
|
1530
|
+
pii_mapping = update.get("pii_mapping")
|
|
1531
|
+
if isinstance(pii_mapping, dict) and pii_mapping:
|
|
1532
|
+
metadata_delta["pii_mapping"] = pii_mapping
|
|
1533
|
+
|
|
1534
|
+
return messages, artifacts, metadata_delta
|
|
1535
|
+
|
|
1536
|
+
def _handle_string_output(
|
|
1537
|
+
self, tool_output: str, tool_call: dict[str, Any], execution_time: float
|
|
1538
|
+
) -> tuple[list[BaseMessage], list[dict[str, Any]], dict[str, Any]]:
|
|
1539
|
+
"""Handle string type tool outputs.
|
|
1540
|
+
|
|
1541
|
+
Args:
|
|
1542
|
+
tool_output: The string output from tool execution.
|
|
1543
|
+
tool_call: The tool call information containing id, name, and args.
|
|
1544
|
+
execution_time: Time taken to execute the tool.
|
|
1545
|
+
|
|
1546
|
+
Returns:
|
|
1547
|
+
Tuple of (messages, artifacts, metadata_delta) where artifacts is empty
|
|
1548
|
+
and metadata_delta is empty dict.
|
|
1549
|
+
"""
|
|
1550
|
+
messages, artifacts = self._process_simple_tool_output(
|
|
1551
|
+
agent_result_text=tool_output,
|
|
1552
|
+
tool_call=tool_call,
|
|
1553
|
+
execution_time=execution_time,
|
|
1554
|
+
)
|
|
1555
|
+
return messages, artifacts, {}
|
|
1556
|
+
|
|
1557
|
+
def _handle_legacy_output(
|
|
1558
|
+
self,
|
|
1559
|
+
tool_output: Any,
|
|
1560
|
+
tool_call: dict[str, Any],
|
|
1561
|
+
execution_time: float,
|
|
1562
|
+
pending_artifacts: list[dict[str, Any]],
|
|
1563
|
+
metadata_delta: dict[str, Any],
|
|
1564
|
+
) -> tuple[list[BaseMessage], list[dict[str, Any]], dict[str, Any]]:
|
|
1565
|
+
"""Handle legacy dict and other tool outputs.
|
|
1566
|
+
|
|
1567
|
+
Args:
|
|
1568
|
+
tool_output: The output from tool execution (typically a dict).
|
|
1569
|
+
tool_call: The tool call information containing id, name, and args.
|
|
1570
|
+
execution_time: Time taken to execute the tool.
|
|
1571
|
+
pending_artifacts: Current list of pending artifacts to extend with new ones.
|
|
1572
|
+
metadata_delta: Dictionary to accumulate metadata updates into.
|
|
1573
|
+
|
|
1574
|
+
Returns:
|
|
1575
|
+
Tuple of (messages, updated_pending_artifacts, updated_metadata_delta).
|
|
1576
|
+
"""
|
|
1577
|
+
messages, artifacts = self._process_legacy_tool_output(
|
|
1578
|
+
tool_output=tool_output, # type: ignore[arg-type]
|
|
1579
|
+
tool_call=tool_call,
|
|
1580
|
+
execution_time=execution_time,
|
|
1581
|
+
pending_artifacts=pending_artifacts,
|
|
1582
|
+
)
|
|
1583
|
+
|
|
1584
|
+
# Process metadata from legacy dict outputs
|
|
1585
|
+
if isinstance(tool_output, dict):
|
|
1586
|
+
self._process_legacy_metadata(tool_output, messages, metadata_delta)
|
|
1587
|
+
|
|
1588
|
+
return messages, artifacts, metadata_delta
|
|
1589
|
+
|
|
1590
|
+
def _process_legacy_metadata(
|
|
1591
|
+
self, tool_output: dict[str, Any], messages: list[BaseMessage], metadata_delta: dict[str, Any]
|
|
1592
|
+
) -> None:
|
|
1593
|
+
"""Process metadata from legacy dict tool outputs.
|
|
1594
|
+
|
|
1595
|
+
Args:
|
|
1596
|
+
tool_output: The dict tool output containing metadata
|
|
1597
|
+
messages: List of messages to potentially update with metadata
|
|
1598
|
+
metadata_delta: Metadata delta to update
|
|
1599
|
+
"""
|
|
1600
|
+
md = tool_output.get("metadata")
|
|
1601
|
+
if not isinstance(md, dict):
|
|
1602
|
+
return
|
|
1603
|
+
|
|
1604
|
+
prev_ids = md.get("previous_step_ids")
|
|
1605
|
+
if isinstance(prev_ids, list):
|
|
1606
|
+
metadata_delta["previous_step_ids"] = list(prev_ids)
|
|
1607
|
+
self._attach_previous_step_ids_to_message(messages, prev_ids)
|
|
1608
|
+
|
|
1609
|
+
def _attach_previous_step_ids_to_message(self, messages: list[BaseMessage], prev_ids: list[Any]) -> None:
|
|
1610
|
+
"""Attach previous step IDs to the first ToolMessage's response metadata.
|
|
1611
|
+
|
|
1612
|
+
Args:
|
|
1613
|
+
messages: List of messages to update
|
|
1614
|
+
prev_ids: Previous step IDs to attach
|
|
1615
|
+
"""
|
|
1616
|
+
if not messages or not isinstance(messages[0], ToolMessage):
|
|
1617
|
+
return
|
|
1618
|
+
|
|
1619
|
+
try:
|
|
1620
|
+
tool_message = messages[0]
|
|
1621
|
+
tool_message.response_metadata.setdefault("previous_step_ids", [])
|
|
1622
|
+
existing = tool_message.response_metadata.get("previous_step_ids", [])
|
|
1623
|
+
combined = list(dict.fromkeys(list(existing) + list(prev_ids)))
|
|
1624
|
+
tool_message.response_metadata["previous_step_ids"] = combined
|
|
1625
|
+
except Exception:
|
|
1626
|
+
pass
|
|
1627
|
+
|
|
1628
|
+
async def _execute_tool_with_streaming(
|
|
1629
|
+
self,
|
|
1630
|
+
tool: BaseTool,
|
|
1631
|
+
tool_call: dict[str, Any],
|
|
1632
|
+
tool_config: dict[str, Any] | None = None,
|
|
1633
|
+
) -> str:
|
|
1634
|
+
"""Execute a tool with streaming support and emit streaming chunks.
|
|
1635
|
+
|
|
1636
|
+
This method dynamically passes all tool arguments to the streaming method
|
|
1637
|
+
using **kwargs, making it flexible for tools with different parameter structures.
|
|
1638
|
+
|
|
1639
|
+
Args:
|
|
1640
|
+
tool: The tool instance to execute.
|
|
1641
|
+
tool_call: The tool call information from the AI message.
|
|
1642
|
+
tool_config: Optional configuration passed down to the tool.
|
|
1643
|
+
|
|
1644
|
+
Returns:
|
|
1645
|
+
The final output from the tool execution.
|
|
1646
|
+
"""
|
|
1647
|
+
writer: StreamWriter = get_stream_writer()
|
|
1648
|
+
final_output: Any = None
|
|
1649
|
+
saw_tool_result = False
|
|
1650
|
+
start_time = time.time()
|
|
1651
|
+
|
|
1652
|
+
tool_call_id = tool_call.get("id", f"tool_call_{uuid.uuid4().hex[:8]}")
|
|
1653
|
+
tool_name = tool_call.get("name", "")
|
|
1654
|
+
tool_args = self._normalize_tool_args(tool_call.get("args"))
|
|
1655
|
+
|
|
1656
|
+
logger.info("Streaming tool start detected: agent=%s tool=%s call_id=%s", self.name, tool_name, tool_call_id)
|
|
1657
|
+
|
|
1658
|
+
try:
|
|
1659
|
+
self._emit_default_tool_call_event(writer, tool_name, tool_call_id, tool_args)
|
|
1660
|
+
|
|
1661
|
+
streaming_kwargs = self._build_streaming_kwargs(tool_args, tool_config)
|
|
1662
|
+
|
|
1663
|
+
async for chunk in tool.arun_streaming(**streaming_kwargs):
|
|
1664
|
+
final_output, saw_tool_result = self._handle_streaming_chunk(
|
|
1665
|
+
chunk=chunk,
|
|
1666
|
+
writer=writer,
|
|
1667
|
+
tool_name=tool_call["name"],
|
|
1668
|
+
current_output=final_output,
|
|
1669
|
+
saw_tool_result=saw_tool_result,
|
|
1670
|
+
)
|
|
1671
|
+
|
|
1672
|
+
final_output = self._finalize_streaming_tool(
|
|
1673
|
+
writer=writer,
|
|
1674
|
+
tool_name=tool_name,
|
|
1675
|
+
tool_call_id=tool_call_id,
|
|
1676
|
+
tool_args=tool_args,
|
|
1677
|
+
final_output=final_output,
|
|
1678
|
+
saw_tool_result=saw_tool_result,
|
|
1679
|
+
start_time=start_time,
|
|
1680
|
+
)
|
|
1681
|
+
logger.info(
|
|
1682
|
+
"Streaming tool completed: agent=%s tool=%s call_id=%s",
|
|
1683
|
+
self.name,
|
|
1684
|
+
tool_name,
|
|
1685
|
+
tool_call_id,
|
|
1686
|
+
)
|
|
1687
|
+
|
|
1688
|
+
except Exception as e:
|
|
1689
|
+
final_output = f"Error during streaming execution of tool '{tool_call['name']}': {str(e)}"
|
|
1690
|
+
logger.error(f"Tool streaming error: {final_output}", exc_info=True)
|
|
1691
|
+
self._emit_tool_error_event(writer, tool_call["name"], final_output)
|
|
1692
|
+
|
|
1693
|
+
return final_output
|
|
1694
|
+
|
|
1695
|
+
@staticmethod
|
|
1696
|
+
def _normalize_tool_args(raw_tool_args: Any) -> dict[str, Any]:
|
|
1697
|
+
"""Normalize raw tool arguments into a dictionary.
|
|
1698
|
+
|
|
1699
|
+
Args:
|
|
1700
|
+
raw_tool_args: The raw tool arguments to normalize.
|
|
1701
|
+
|
|
1702
|
+
Returns:
|
|
1703
|
+
A dictionary containing the normalized tool arguments.
|
|
1704
|
+
"""
|
|
1705
|
+
if isinstance(raw_tool_args, dict):
|
|
1706
|
+
return raw_tool_args
|
|
1707
|
+
if raw_tool_args is None:
|
|
1708
|
+
return {}
|
|
1709
|
+
return {"value": raw_tool_args}
|
|
1710
|
+
|
|
1711
|
+
@staticmethod
|
|
1712
|
+
def _build_streaming_kwargs(tool_args: dict[str, Any], tool_config: dict[str, Any] | None) -> dict[str, Any]:
|
|
1713
|
+
"""Create kwargs payload for streaming execution.
|
|
1714
|
+
|
|
1715
|
+
Args:
|
|
1716
|
+
tool_args: The tool arguments to include in the streaming kwargs.
|
|
1717
|
+
tool_config: Optional tool configuration to include.
|
|
1718
|
+
|
|
1719
|
+
Returns:
|
|
1720
|
+
A dictionary containing the streaming kwargs.
|
|
1721
|
+
"""
|
|
1722
|
+
streaming_kwargs = tool_args.copy()
|
|
1723
|
+
if tool_config:
|
|
1724
|
+
streaming_kwargs["config"] = tool_config
|
|
1725
|
+
return streaming_kwargs
|
|
1726
|
+
|
|
1727
|
+
def _handle_streaming_chunk(
|
|
1728
|
+
self,
|
|
1729
|
+
*,
|
|
1730
|
+
chunk: Any,
|
|
1731
|
+
writer: StreamWriter,
|
|
1732
|
+
tool_name: str,
|
|
1733
|
+
current_output: Any,
|
|
1734
|
+
saw_tool_result: bool,
|
|
1735
|
+
) -> tuple[Any, bool]:
|
|
1736
|
+
"""Process a single streaming chunk and update output/result flag.
|
|
1737
|
+
|
|
1738
|
+
Args:
|
|
1739
|
+
chunk: The streaming chunk to process.
|
|
1740
|
+
writer: The stream writer for output.
|
|
1741
|
+
tool_name: The name of the tool being executed.
|
|
1742
|
+
current_output: The current accumulated output.
|
|
1743
|
+
saw_tool_result: Whether a tool result has been seen.
|
|
1744
|
+
|
|
1745
|
+
Returns:
|
|
1746
|
+
A tuple of (updated_output, saw_tool_result).
|
|
1747
|
+
"""
|
|
1748
|
+
if not isinstance(chunk, dict):
|
|
1749
|
+
return current_output, saw_tool_result
|
|
1750
|
+
|
|
1751
|
+
event_type_raw = chunk.get("event_type")
|
|
1752
|
+
event_type = self._resolve_tool_event_type(event_type_raw)
|
|
1753
|
+
if event_type == A2AStreamEventType.TOOL_CALL or (
|
|
1754
|
+
event_type is None
|
|
1755
|
+
and isinstance(event_type_raw, str)
|
|
1756
|
+
and event_type_raw.lower() == A2AStreamEventType.TOOL_CALL.value
|
|
1757
|
+
):
|
|
1758
|
+
return current_output, saw_tool_result
|
|
1759
|
+
|
|
1760
|
+
self._create_tool_streaming_event(chunk, writer, tool_name)
|
|
1761
|
+
new_output = self._extract_output_from_chunk(chunk, current_output)
|
|
1762
|
+
if event_type == A2AStreamEventType.STATUS_UPDATE:
|
|
1763
|
+
metadata = chunk.get("metadata")
|
|
1764
|
+
kind = None
|
|
1765
|
+
if isinstance(metadata, dict):
|
|
1766
|
+
kind = metadata.get(MetadataFieldKeys.KIND)
|
|
1767
|
+
if getattr(kind, "value", kind) == Kind.FINAL_THINKING_STEP.value:
|
|
1768
|
+
return new_output, True
|
|
1769
|
+
if event_type == A2AStreamEventType.TOOL_RESULT:
|
|
1770
|
+
return new_output, True
|
|
1771
|
+
return new_output, saw_tool_result
|
|
1772
|
+
|
|
1773
|
+
def _emit_default_tool_call_event(
|
|
1774
|
+
self,
|
|
1775
|
+
writer: StreamWriter,
|
|
1776
|
+
tool_name: str,
|
|
1777
|
+
tool_call_id: str,
|
|
1778
|
+
tool_args: dict[str, Any],
|
|
1779
|
+
) -> None:
|
|
1780
|
+
"""Emit a standardized TOOL_CALL event for streaming tools.
|
|
1781
|
+
|
|
1782
|
+
Args:
|
|
1783
|
+
writer: The stream writer to emit events to.
|
|
1784
|
+
tool_name: Name of the tool being called.
|
|
1785
|
+
tool_call_id: Unique identifier for the tool call.
|
|
1786
|
+
tool_args: Arguments passed to the tool.
|
|
1787
|
+
"""
|
|
1788
|
+
thread_id = _THREAD_ID_CVAR.get()
|
|
1789
|
+
if thread_id:
|
|
1790
|
+
emitted = self._emitted_tool_calls_by_thread.get(thread_id, set())
|
|
1791
|
+
if tool_call_id in emitted:
|
|
1792
|
+
logger.info(
|
|
1793
|
+
"Skipping fallback tool call event: agent=%s tool=%s call_id=%s",
|
|
1794
|
+
self.name,
|
|
1795
|
+
tool_name,
|
|
1796
|
+
tool_call_id,
|
|
1797
|
+
)
|
|
1798
|
+
return
|
|
1799
|
+
|
|
1800
|
+
tool_call_info = {
|
|
1801
|
+
"tool_calls": [
|
|
1802
|
+
{
|
|
1803
|
+
"id": tool_call_id,
|
|
1804
|
+
"name": tool_name,
|
|
1805
|
+
"args": tool_args,
|
|
1806
|
+
}
|
|
1807
|
+
],
|
|
1808
|
+
"status": "running",
|
|
1809
|
+
}
|
|
1810
|
+
metadata = {
|
|
1811
|
+
MetadataFieldKeys.KIND: Kind.AGENT_THINKING_STEP,
|
|
1812
|
+
MetadataFieldKeys.STATUS: Status.RUNNING,
|
|
1813
|
+
MetadataFieldKeys.TOOL_INFO: tool_call_info,
|
|
1814
|
+
}
|
|
1815
|
+
activity_info = create_tool_activity_info({"tool_info": tool_call_info})
|
|
1816
|
+
event = {
|
|
1817
|
+
"event_type": A2AStreamEventType.TOOL_CALL,
|
|
1818
|
+
"content": f"Processing with tools: {tool_name}",
|
|
1819
|
+
"metadata": metadata,
|
|
1820
|
+
"tool_info": tool_call_info,
|
|
1821
|
+
"thinking_and_activity_info": activity_info,
|
|
1822
|
+
}
|
|
1823
|
+
self._create_tool_streaming_event(event, writer, tool_name)
|
|
1824
|
+
|
|
1825
|
+
@staticmethod
|
|
1826
|
+
def _extract_output_from_chunk(chunk: dict[str, Any], current_output: Any) -> Any:
|
|
1827
|
+
"""Return most recent tool output derived from streaming chunk.
|
|
1828
|
+
|
|
1829
|
+
Args:
|
|
1830
|
+
chunk: The streaming chunk containing tool information.
|
|
1831
|
+
current_output: The current output value to fall back to.
|
|
1832
|
+
|
|
1833
|
+
Returns:
|
|
1834
|
+
The extracted output from the chunk or the current_output if not found.
|
|
1835
|
+
"""
|
|
1836
|
+
tool_info = chunk.get("tool_info")
|
|
1837
|
+
if isinstance(tool_info, dict):
|
|
1838
|
+
return tool_info.get("output", current_output)
|
|
1839
|
+
return current_output
|
|
1840
|
+
|
|
1841
|
+
def _finalize_streaming_tool(
|
|
1842
|
+
self,
|
|
1843
|
+
*,
|
|
1844
|
+
writer: StreamWriter,
|
|
1845
|
+
tool_name: str,
|
|
1846
|
+
tool_call_id: str,
|
|
1847
|
+
tool_args: dict[str, Any],
|
|
1848
|
+
final_output: Any,
|
|
1849
|
+
saw_tool_result: bool,
|
|
1850
|
+
start_time: float,
|
|
1851
|
+
) -> str:
|
|
1852
|
+
"""Emit final tool event when needed and return final output as string.
|
|
1853
|
+
|
|
1854
|
+
Args:
|
|
1855
|
+
writer: The stream writer to emit events to.
|
|
1856
|
+
tool_name: Name of the tool being called.
|
|
1857
|
+
tool_call_id: Unique identifier for the tool call.
|
|
1858
|
+
tool_args: Arguments passed to the tool.
|
|
1859
|
+
final_output: The final output from the tool execution.
|
|
1860
|
+
saw_tool_result: Whether a TOOL_RESULT event was observed during streaming.
|
|
1861
|
+
start_time: Timestamp when the tool execution started.
|
|
1862
|
+
|
|
1863
|
+
Returns:
|
|
1864
|
+
The final output as a string.
|
|
1865
|
+
"""
|
|
1866
|
+
output_text = final_output
|
|
1867
|
+
if output_text is None:
|
|
1868
|
+
output_text = f"Tool '{tool_name}' completed successfully"
|
|
1869
|
+
if not isinstance(output_text, str):
|
|
1870
|
+
output_text = str(output_text)
|
|
1871
|
+
|
|
1872
|
+
logger.debug(
|
|
1873
|
+
"Streaming tool finalize check: agent=%s tool=%s call_id=%s saw_tool_result=%s",
|
|
1874
|
+
self.name,
|
|
1875
|
+
tool_name,
|
|
1876
|
+
tool_call_id,
|
|
1877
|
+
saw_tool_result,
|
|
1878
|
+
)
|
|
1879
|
+
if not saw_tool_result:
|
|
1880
|
+
logger.debug(
|
|
1881
|
+
"Streaming tool finalize emitting default result: agent=%s tool=%s call_id=%s",
|
|
1882
|
+
self.name,
|
|
1883
|
+
tool_name,
|
|
1884
|
+
tool_call_id,
|
|
1885
|
+
)
|
|
1886
|
+
self._emit_default_tool_result_event(
|
|
1887
|
+
writer=writer,
|
|
1888
|
+
tool_name=tool_name,
|
|
1889
|
+
tool_call_id=tool_call_id,
|
|
1890
|
+
tool_args=tool_args,
|
|
1891
|
+
output_text=output_text,
|
|
1892
|
+
start_time=start_time,
|
|
1893
|
+
)
|
|
1894
|
+
|
|
1895
|
+
return output_text
|
|
1896
|
+
|
|
1897
|
+
def _emit_default_tool_result_event(
|
|
1898
|
+
self,
|
|
1899
|
+
*,
|
|
1900
|
+
writer: StreamWriter,
|
|
1901
|
+
tool_name: str,
|
|
1902
|
+
tool_call_id: str,
|
|
1903
|
+
tool_args: dict[str, Any],
|
|
1904
|
+
output_text: str,
|
|
1905
|
+
start_time: float,
|
|
1906
|
+
) -> None:
|
|
1907
|
+
"""Emit a standardized TOOL_RESULT event for streaming tools.
|
|
1908
|
+
|
|
1909
|
+
Args:
|
|
1910
|
+
writer: The stream writer to emit events to.
|
|
1911
|
+
tool_name: Name of the tool that was executed.
|
|
1912
|
+
tool_call_id: Unique identifier for the tool call.
|
|
1913
|
+
tool_args: Arguments passed to the tool.
|
|
1914
|
+
output_text: The output text from the tool execution.
|
|
1915
|
+
start_time: Timestamp when the tool execution started.
|
|
1916
|
+
"""
|
|
1917
|
+
execution_time = time.time() - start_time
|
|
1918
|
+
tool_result_info = {
|
|
1919
|
+
"name": tool_name,
|
|
1920
|
+
"args": tool_args,
|
|
1921
|
+
"output": output_text,
|
|
1922
|
+
"execution_time": execution_time,
|
|
1923
|
+
}
|
|
1924
|
+
metadata = {
|
|
1925
|
+
MetadataFieldKeys.KIND: Kind.AGENT_THINKING_STEP,
|
|
1926
|
+
MetadataFieldKeys.STATUS: Status.FINISHED,
|
|
1927
|
+
MetadataFieldKeys.TOOL_INFO: tool_result_info,
|
|
1928
|
+
}
|
|
1929
|
+
activity_info = create_tool_activity_info({"tool_info": tool_result_info})
|
|
1930
|
+
event = {
|
|
1931
|
+
"event_type": A2AStreamEventType.TOOL_RESULT,
|
|
1932
|
+
"content": output_text,
|
|
1933
|
+
"metadata": metadata,
|
|
1934
|
+
"tool_info": tool_result_info,
|
|
1935
|
+
"thinking_and_activity_info": activity_info,
|
|
1936
|
+
}
|
|
1937
|
+
self._create_tool_streaming_event(event, writer, tool_name)
|
|
1938
|
+
|
|
1939
|
+
def _emit_tool_error_event(self, writer: StreamWriter, tool_name: str, error_msg: str) -> None:
|
|
1940
|
+
"""Emit a tool error event to the stream.
|
|
1941
|
+
|
|
1942
|
+
Args:
|
|
1943
|
+
writer: Stream writer to emit events.
|
|
1944
|
+
tool_name: Name of the tool that encountered an error.
|
|
1945
|
+
error_msg: The error message.
|
|
1946
|
+
"""
|
|
1947
|
+
a2a_event = self._create_a2a_event(
|
|
1948
|
+
event_type=A2AStreamEventType.ERROR,
|
|
1949
|
+
content=f"Error in {tool_name}: {error_msg}",
|
|
1950
|
+
tool_info={
|
|
1951
|
+
"name": tool_name,
|
|
1952
|
+
"error": error_msg,
|
|
1953
|
+
},
|
|
1954
|
+
)
|
|
1955
|
+
writer(a2a_event)
|
|
1956
|
+
|
|
1957
|
+
def _execute_before_model_hook(self, state: dict[str, Any]) -> None:
|
|
1958
|
+
"""Execute before_model middleware hook and update state.
|
|
1959
|
+
|
|
1960
|
+
Args:
|
|
1961
|
+
state: Current agent state to potentially update.
|
|
1962
|
+
"""
|
|
1963
|
+
if self._middleware_manager:
|
|
1964
|
+
try:
|
|
1965
|
+
before_updates = self._middleware_manager.before_model(state)
|
|
1966
|
+
if before_updates:
|
|
1967
|
+
state.update(before_updates)
|
|
1968
|
+
except Exception as e:
|
|
1969
|
+
logger.error(f"Agent '{self.name}': Middleware before_model hook failed: {e}")
|
|
1970
|
+
|
|
1971
|
+
def _execute_modify_model_request_hook(
|
|
1972
|
+
self, messages: list[Any], enhanced_instruction: str, state: dict[str, Any]
|
|
1973
|
+
) -> tuple[list[Any], str]:
|
|
1974
|
+
"""Execute modify_model_request middleware hook.
|
|
1975
|
+
|
|
1976
|
+
Args:
|
|
1977
|
+
messages: Current messages to potentially modify.
|
|
1978
|
+
enhanced_instruction: Current system prompt to potentially modify.
|
|
1979
|
+
state: Current agent state for context.
|
|
1980
|
+
|
|
1981
|
+
Returns:
|
|
1982
|
+
Tuple of (potentially modified messages, potentially modified system prompt).
|
|
1983
|
+
"""
|
|
1984
|
+
if not self._middleware_manager:
|
|
1985
|
+
return messages, enhanced_instruction
|
|
1986
|
+
|
|
1987
|
+
try:
|
|
1988
|
+
model_request: ModelRequest = {
|
|
1989
|
+
"messages": messages,
|
|
1990
|
+
"tools": self.resolved_tools or [],
|
|
1991
|
+
"system_prompt": enhanced_instruction,
|
|
1992
|
+
}
|
|
1993
|
+
model_request = self._middleware_manager.modify_model_request(model_request, state)
|
|
1994
|
+
|
|
1995
|
+
modified_messages = model_request.get("messages", messages)
|
|
1996
|
+
modified_prompt = model_request.get("system_prompt", enhanced_instruction)
|
|
1997
|
+
|
|
1998
|
+
return modified_messages, modified_prompt
|
|
1999
|
+
except Exception as e:
|
|
2000
|
+
logger.error(f"Agent '{self.name}': Middleware modify_model_request hook failed: {e}")
|
|
2001
|
+
return messages, enhanced_instruction
|
|
2002
|
+
|
|
2003
|
+
def _execute_after_model_hook(self, state_updates: dict[str, Any], state: dict[str, Any]) -> None:
|
|
2004
|
+
"""Execute after_model middleware hook and update state_updates.
|
|
2005
|
+
|
|
2006
|
+
Args:
|
|
2007
|
+
state_updates: Dictionary to update with middleware changes.
|
|
2008
|
+
state: Current agent state for context.
|
|
2009
|
+
"""
|
|
2010
|
+
if self._middleware_manager:
|
|
2011
|
+
try:
|
|
2012
|
+
after_updates = self._middleware_manager.after_model(state)
|
|
2013
|
+
if after_updates:
|
|
2014
|
+
state_updates.update(after_updates)
|
|
2015
|
+
except Exception as e:
|
|
2016
|
+
logger.error(f"Agent '{self.name}': Middleware after_model hook failed: {e}")
|
|
2017
|
+
|
|
2018
|
+
async def _handle_lm_invoker_call(
|
|
2019
|
+
self, current_messages: Sequence[BaseMessage], state: dict[str, Any], config: dict[str, Any] | None = None
|
|
2020
|
+
) -> dict[str, Any]:
|
|
2021
|
+
"""Handle LMInvoker model calls with bridge conversion and tool output context.
|
|
2022
|
+
|
|
2023
|
+
Args:
|
|
2024
|
+
current_messages: The current messages in the agent.
|
|
2025
|
+
state: The current state of the agent.
|
|
2026
|
+
config: The configuration for the agent.
|
|
2027
|
+
|
|
2028
|
+
Returns:
|
|
2029
|
+
dict[str, Any]: A dictionary containing the new messages and updated token usage.
|
|
2030
|
+
"""
|
|
2031
|
+
# Execute before_model middleware hook
|
|
2032
|
+
self._execute_before_model_hook(state)
|
|
2033
|
+
|
|
2034
|
+
# Build tool output aware instruction
|
|
2035
|
+
enhanced_instruction = self._build_tool_output_aware_instruction(self.instruction, state, config)
|
|
2036
|
+
|
|
2037
|
+
# Execute modify_model_request middleware hook
|
|
2038
|
+
_, enhanced_instruction = self._execute_modify_model_request_hook(
|
|
2039
|
+
list(current_messages), enhanced_instruction, state
|
|
2040
|
+
)
|
|
2041
|
+
|
|
2042
|
+
messages = convert_langchain_messages_to_gllm_messages(list(current_messages), enhanced_instruction)
|
|
2043
|
+
|
|
2044
|
+
effective_event_emitter = state.get("event_emitter") or self.event_emitter
|
|
2045
|
+
|
|
2046
|
+
if self.resolved_tools:
|
|
2047
|
+
self.lm_invoker.set_tools(self.resolved_tools)
|
|
2048
|
+
|
|
2049
|
+
# Debug timing for LLM invocation
|
|
2050
|
+
_t0 = time.perf_counter()
|
|
2051
|
+
logger.info(f"Agent '{self.name}': LLM invoke start (tools={len(self.resolved_tools)})")
|
|
2052
|
+
lm_output = await self.lm_invoker.invoke(messages=messages, event_emitter=effective_event_emitter)
|
|
2053
|
+
_dt = time.perf_counter() - _t0
|
|
2054
|
+
logger.info(f"Agent '{self.name}': LLM invoke finished in {_dt:.3f}s")
|
|
2055
|
+
|
|
2056
|
+
ai_message = convert_lm_output_to_langchain_message(lm_output)
|
|
2057
|
+
|
|
2058
|
+
# Update token usage if available in the message
|
|
2059
|
+
state_updates = {"messages": [ai_message]}
|
|
2060
|
+
|
|
2061
|
+
# Extract and accumulate token usage from the message
|
|
2062
|
+
token_usage_updates = extract_and_update_token_usage_from_ai_message(ai_message)
|
|
2063
|
+
state_updates.update(token_usage_updates)
|
|
2064
|
+
|
|
2065
|
+
# Execute after_model middleware hook
|
|
2066
|
+
self._execute_after_model_hook(state_updates, state)
|
|
2067
|
+
|
|
2068
|
+
return state_updates
|
|
2069
|
+
|
|
2070
|
+
async def _handle_langchain_model_call(
|
|
2071
|
+
self, current_messages: Sequence[BaseMessage], state: dict[str, Any], config: dict[str, Any] | None = None
|
|
2072
|
+
) -> dict[str, Any]:
|
|
2073
|
+
"""Handle LangChain BaseChatModel calls with tool output context.
|
|
2074
|
+
|
|
2075
|
+
Args:
|
|
2076
|
+
current_messages: The current messages in the agent.
|
|
2077
|
+
state: The current state of the agent.
|
|
2078
|
+
config: The configuration for the agent.
|
|
2079
|
+
|
|
2080
|
+
Returns:
|
|
2081
|
+
dict[str, Any]: A dictionary containing the new messages and updated token usage.
|
|
2082
|
+
"""
|
|
2083
|
+
# Execute before_model middleware hook
|
|
2084
|
+
self._execute_before_model_hook(state)
|
|
2085
|
+
|
|
2086
|
+
# Build tool output aware instruction
|
|
2087
|
+
enhanced_instruction = self._build_tool_output_aware_instruction(self.instruction, state, config)
|
|
2088
|
+
|
|
2089
|
+
langchain_prompt: list[BaseMessage] = [SystemMessage(content=enhanced_instruction)] + list(current_messages)
|
|
2090
|
+
|
|
2091
|
+
# Execute modify_model_request middleware hook
|
|
2092
|
+
langchain_prompt, enhanced_instruction = self._execute_modify_model_request_hook(
|
|
2093
|
+
langchain_prompt, enhanced_instruction, state
|
|
2094
|
+
)
|
|
2095
|
+
|
|
2096
|
+
# Rebuild prompt if needed (invalid structure or system prompt was modified)
|
|
2097
|
+
if (
|
|
2098
|
+
not langchain_prompt
|
|
2099
|
+
or not isinstance(langchain_prompt[0], SystemMessage)
|
|
2100
|
+
or langchain_prompt[0].content != enhanced_instruction
|
|
2101
|
+
):
|
|
2102
|
+
langchain_prompt = [SystemMessage(content=enhanced_instruction)] + list(current_messages)
|
|
2103
|
+
|
|
2104
|
+
model_with_tools = self.model.bind_tools(self.resolved_tools) if self.resolved_tools else self.model
|
|
2105
|
+
|
|
2106
|
+
ai_message = await model_with_tools.ainvoke(langchain_prompt, config)
|
|
2107
|
+
|
|
2108
|
+
# Update token usage if available in the message
|
|
2109
|
+
state_updates = {"messages": [ai_message]}
|
|
2110
|
+
|
|
2111
|
+
# Extract and accumulate token usage from the message
|
|
2112
|
+
token_usage_updates = extract_and_update_token_usage_from_ai_message(ai_message)
|
|
2113
|
+
state_updates.update(token_usage_updates)
|
|
2114
|
+
|
|
2115
|
+
# Execute after_model middleware hook
|
|
2116
|
+
self._execute_after_model_hook(state_updates, state)
|
|
2117
|
+
|
|
2118
|
+
return state_updates
|
|
2119
|
+
|
|
2120
|
+
def _add_user_id_memory_tool_config(self, metadata: dict[str, Any], memory_user_id: str) -> None:
|
|
2121
|
+
"""Add user ID to memory tool config.
|
|
2122
|
+
|
|
2123
|
+
Args:
|
|
2124
|
+
metadata: The metadata to add the user ID to.
|
|
2125
|
+
memory_user_id: The user ID to add.
|
|
2126
|
+
"""
|
|
2127
|
+
try:
|
|
2128
|
+
tool_cfgs = metadata.get(TOOL_CONFIGS_KEY, {})
|
|
2129
|
+
per_tool_config = tool_cfgs.get(MEMORY_SEARCH_TOOL_NAME)
|
|
2130
|
+
if not isinstance(per_tool_config, dict):
|
|
2131
|
+
per_tool_config = {}
|
|
2132
|
+
per_tool_config["user_id"] = memory_user_id
|
|
2133
|
+
tool_cfgs[MEMORY_SEARCH_TOOL_NAME] = per_tool_config
|
|
2134
|
+
metadata[TOOL_CONFIGS_KEY] = tool_cfgs
|
|
2135
|
+
except Exception as e:
|
|
2136
|
+
# Non-fatal; metadata injection is best-effort
|
|
2137
|
+
logger.warning("Failed to add user ID to memory tool config: %s", e)
|
|
2138
|
+
|
|
2139
|
+
def _prepare_graph_input(self, input_data: str | dict[str, Any], **kwargs: Any) -> dict[str, Any]:
|
|
2140
|
+
"""Convert user input to graph state format.
|
|
2141
|
+
|
|
2142
|
+
Extracts mixed metadata schema supporting per-tool configuration.
|
|
2143
|
+
Delegation tools are isolated and do not receive parent per-tool metadata.
|
|
2144
|
+
Initializes tool output management for efficient tool result sharing.
|
|
2145
|
+
|
|
2146
|
+
Args:
|
|
2147
|
+
input_data: The user's input (typically a query string).
|
|
2148
|
+
**kwargs: Additional keyword arguments including optional metadata.
|
|
2149
|
+
- thread_id: Thread identifier passed from _create_graph_config.
|
|
2150
|
+
|
|
2151
|
+
Returns:
|
|
2152
|
+
Dictionary representing the initial graph state with messages, metadata, artifacts,
|
|
2153
|
+
and tool output management components.
|
|
2154
|
+
"""
|
|
2155
|
+
if isinstance(input_data, str):
|
|
2156
|
+
query = input_data
|
|
2157
|
+
elif isinstance(input_data, dict) and "query" in input_data:
|
|
2158
|
+
query = input_data["query"]
|
|
2159
|
+
else:
|
|
2160
|
+
raise TypeError(f"Unsupported input type for LangGraphReactAgent: {type(input_data)}")
|
|
2161
|
+
|
|
2162
|
+
existing_messages = kwargs.get("messages", []) or []
|
|
2163
|
+
messages: list[BaseMessage] = existing_messages + [HumanMessage(content=query)]
|
|
2164
|
+
|
|
2165
|
+
# Extract metadata for tools and agent context
|
|
2166
|
+
metadata = self._extract_metadata_from_kwargs(**kwargs)
|
|
2167
|
+
|
|
2168
|
+
# If caller specified memory_user_id, inject it as per-tool config for the Mem0 tool
|
|
2169
|
+
memory_user_id: str | None = kwargs.get("memory_user_id")
|
|
2170
|
+
if memory_user_id and self._memory_enabled():
|
|
2171
|
+
self._add_user_id_memory_tool_config(metadata, memory_user_id)
|
|
2172
|
+
|
|
2173
|
+
# thread_id is passed explicitly from the caller after _create_graph_config
|
|
2174
|
+
thread_id = kwargs.get("thread_id")
|
|
2175
|
+
|
|
2176
|
+
# Use the agent's tool output manager (shared or private)
|
|
2177
|
+
step_limit_config = kwargs.get("step_limit_config") or self.step_limit_config
|
|
2178
|
+
|
|
2179
|
+
# Step limit context inheritance (Spec-2)
|
|
2180
|
+
try:
|
|
2181
|
+
inherited_depth = _DELEGATION_DEPTH_CVAR.get()
|
|
2182
|
+
except LookupError:
|
|
2183
|
+
inherited_depth = 0
|
|
2184
|
+
|
|
2185
|
+
try:
|
|
2186
|
+
inherited_chain = list(_DELEGATION_CHAIN_CVAR.get())
|
|
2187
|
+
except LookupError:
|
|
2188
|
+
inherited_chain = []
|
|
2189
|
+
|
|
2190
|
+
try:
|
|
2191
|
+
inherited_budget = _REMAINING_STEP_BUDGET_CVAR.get()
|
|
2192
|
+
except LookupError:
|
|
2193
|
+
inherited_budget = None
|
|
2194
|
+
|
|
2195
|
+
# Set step_limit_config in ContextVar so delegation tools can access it
|
|
2196
|
+
if step_limit_config:
|
|
2197
|
+
_STEP_LIMIT_CONFIG_CVAR.set(step_limit_config)
|
|
2198
|
+
|
|
2199
|
+
graph_input = {
|
|
2200
|
+
"messages": messages,
|
|
2201
|
+
"event_emitter": kwargs.get("event_emitter"),
|
|
2202
|
+
"artifacts": [],
|
|
2203
|
+
"metadata": metadata,
|
|
2204
|
+
"tool_output_manager": self.tool_output_manager,
|
|
2205
|
+
"thread_id": thread_id,
|
|
2206
|
+
# Step limit state initialization
|
|
2207
|
+
"current_step": 0, # Start at step 0
|
|
2208
|
+
"delegation_depth": inherited_depth,
|
|
2209
|
+
"delegation_chain": inherited_chain,
|
|
2210
|
+
"step_limit_config": asdict(step_limit_config) if step_limit_config else None,
|
|
2211
|
+
"remaining_step_budget": inherited_budget,
|
|
2212
|
+
}
|
|
2213
|
+
|
|
2214
|
+
return graph_input
|
|
2215
|
+
|
|
2216
|
+
def _resolve_tool_metadata(self, tool_name: str, metadata: dict[str, Any] | None) -> dict[str, Any]:
|
|
2217
|
+
"""Resolve effective metadata for a specific tool given the mixed schema.
|
|
2218
|
+
|
|
2219
|
+
Metadata Resolution Hierarchy (lowest to highest precedence):
|
|
2220
|
+
|
|
2221
|
+
1. Agent-level flat defaults: Apply to all tools from self.tool_configs
|
|
2222
|
+
- Skips 'tool_configs' key and dict values (per-tool configs)
|
|
2223
|
+
|
|
2224
|
+
2. Agent-level per-tool defaults: From self.tool_configs[tool_name] or
|
|
2225
|
+
self.tool_configs['tool_configs'][tool_name]
|
|
2226
|
+
|
|
2227
|
+
3. Request-level global metadata: From metadata kwargs, excluding 'tool_configs' key
|
|
2228
|
+
|
|
2229
|
+
4. Request-level per-tool metadata: From metadata['tool_configs'][tool_name]
|
|
2230
|
+
- Highest precedence, overrides all previous layers
|
|
2231
|
+
|
|
2232
|
+
Tool names are sanitized for consistent lookup across all layers.
|
|
2233
|
+
|
|
2234
|
+
Args:
|
|
2235
|
+
tool_name: Sanitized runtime tool name (e.g., 'delegate_to_report_generator')
|
|
2236
|
+
metadata: Raw metadata from kwargs (flat dict or mixed schema)
|
|
2237
|
+
|
|
2238
|
+
Returns:
|
|
2239
|
+
Merged metadata for this tool with proper precedence hierarchy applied.
|
|
2240
|
+
"""
|
|
2241
|
+
effective_metadata: dict[str, Any] = {}
|
|
2242
|
+
|
|
2243
|
+
# Layer 1: Agent-level defaults (lowest precedence)
|
|
2244
|
+
self._apply_agent_defaults(effective_metadata, tool_name)
|
|
2245
|
+
|
|
2246
|
+
# Layer 2: Request-level global metadata (middle precedence)
|
|
2247
|
+
self._apply_global_metadata(effective_metadata, metadata)
|
|
2248
|
+
|
|
2249
|
+
# Layer 3: Request-level per-tool metadata (highest precedence)
|
|
2250
|
+
self._apply_per_tool_metadata(effective_metadata, tool_name, metadata)
|
|
2251
|
+
|
|
2252
|
+
return effective_metadata
|
|
2253
|
+
|
|
2254
|
+
def _apply_agent_defaults(self, effective_metadata: dict[str, Any], tool_name: str) -> None:
|
|
2255
|
+
"""Apply agent-level default configurations to effective metadata.
|
|
2256
|
+
|
|
2257
|
+
This method implements a 3-layer agent configuration hierarchy:
|
|
2258
|
+
|
|
2259
|
+
1. Flat agent defaults: Apply to ALL tools from self.tool_configs
|
|
2260
|
+
- Processes top-level key-value pairs (excluding TOOL_CONFIGS_KEY)
|
|
2261
|
+
- Skips dictionary values as they are per-tool configurations
|
|
2262
|
+
- Example: {"api_timeout": 30, "retry_count": 3}
|
|
2263
|
+
|
|
2264
|
+
2. Agent per-tool defaults (direct key mapping): From self.tool_configs[tool_name]
|
|
2265
|
+
- Direct tool name as key in agent configuration
|
|
2266
|
+
- Example: self.tool_configs["search_tool"] = {"max_results": 10}
|
|
2267
|
+
|
|
2268
|
+
3. Agent per-tool defaults (nested structure): From self.tool_configs[TOOL_CONFIGS_KEY][tool_name]
|
|
2269
|
+
- Tool configurations nested under TOOL_CONFIGS_KEY
|
|
2270
|
+
- Tool names are sanitized for consistent lookup
|
|
2271
|
+
- Example: self.tool_configs["tool_configs"]["search_tool"] = {"max_results": 10}
|
|
2272
|
+
|
|
2273
|
+
Configuration Precedence (later layers override earlier ones):
|
|
2274
|
+
Flat defaults < Direct per-tool < Nested per-tool
|
|
2275
|
+
|
|
2276
|
+
Args:
|
|
2277
|
+
effective_metadata: The metadata dict to update with agent defaults
|
|
2278
|
+
tool_name: The sanitized tool name to apply configurations for
|
|
2279
|
+
"""
|
|
2280
|
+
if not isinstance(self.tool_configs, dict):
|
|
2281
|
+
return
|
|
2282
|
+
|
|
2283
|
+
# Flat agent defaults (apply to all tools)
|
|
2284
|
+
for k, v in self.tool_configs.items():
|
|
2285
|
+
if k != TOOL_CONFIGS_KEY and not isinstance(v, dict):
|
|
2286
|
+
effective_metadata[k] = v
|
|
2287
|
+
|
|
2288
|
+
# Agent per-tool defaults (direct key mapping)
|
|
2289
|
+
agent_direct = self.tool_configs.get(tool_name)
|
|
2290
|
+
if isinstance(agent_direct, dict):
|
|
2291
|
+
effective_metadata.update(agent_direct)
|
|
2292
|
+
|
|
2293
|
+
# Agent per-tool defaults (nested under 'tool_configs')
|
|
2294
|
+
agent_nested_map = self.tool_configs.get(TOOL_CONFIGS_KEY)
|
|
2295
|
+
if isinstance(agent_nested_map, dict):
|
|
2296
|
+
sanitized_map = self._sanitize_tool_names_map(agent_nested_map)
|
|
2297
|
+
agent_nested = sanitized_map.get(tool_name)
|
|
2298
|
+
if isinstance(agent_nested, dict):
|
|
2299
|
+
effective_metadata.update(agent_nested)
|
|
2300
|
+
|
|
2301
|
+
def _apply_global_metadata(self, effective_metadata: dict[str, Any], metadata: dict[str, Any] | None) -> None:
|
|
2302
|
+
"""Apply request-level global metadata to effective metadata.
|
|
2303
|
+
|
|
2304
|
+
Args:
|
|
2305
|
+
effective_metadata: The metadata dict to update
|
|
2306
|
+
metadata: Raw metadata from request
|
|
2307
|
+
"""
|
|
2308
|
+
if not (metadata and isinstance(metadata, dict)):
|
|
2309
|
+
return
|
|
2310
|
+
|
|
2311
|
+
# Extract global metadata (excluding per-tool section)
|
|
2312
|
+
global_metadata = {k: v for k, v in metadata.items() if k != TOOL_CONFIGS_KEY}
|
|
2313
|
+
effective_metadata.update(global_metadata)
|
|
2314
|
+
|
|
2315
|
+
def _apply_per_tool_metadata(
|
|
2316
|
+
self, effective_metadata: dict[str, Any], tool_name: str, metadata: dict[str, Any] | None
|
|
2317
|
+
) -> None:
|
|
2318
|
+
"""Apply request-level per-tool metadata to effective metadata.
|
|
2319
|
+
|
|
2320
|
+
Args:
|
|
2321
|
+
effective_metadata: The metadata dict to update
|
|
2322
|
+
tool_name: The sanitized tool name
|
|
2323
|
+
metadata: Raw metadata from request
|
|
2324
|
+
"""
|
|
2325
|
+
if metadata and isinstance(metadata, dict):
|
|
2326
|
+
tools_metadata = metadata.get(TOOL_CONFIGS_KEY, {})
|
|
2327
|
+
if isinstance(tools_metadata, dict):
|
|
2328
|
+
sanitized_tools_map = self._sanitize_tool_names_map(tools_metadata)
|
|
2329
|
+
tool_specific = sanitized_tools_map.get(tool_name, {})
|
|
2330
|
+
if isinstance(tool_specific, dict):
|
|
2331
|
+
effective_metadata.update(tool_specific)
|
|
2332
|
+
|
|
2333
|
+
def _sanitize_tool_names_map(self, tools_map: dict[str, Any]) -> dict[str, Any]:
|
|
2334
|
+
"""Sanitize tool names in a mapping for consistent lookup.
|
|
2335
|
+
|
|
2336
|
+
Args:
|
|
2337
|
+
tools_map: Dictionary with potentially unsanitized tool names as keys
|
|
2338
|
+
|
|
2339
|
+
Returns:
|
|
2340
|
+
Dictionary with sanitized tool names as keys
|
|
2341
|
+
"""
|
|
2342
|
+
sanitized_map = {}
|
|
2343
|
+
for user_key, tool_meta in tools_map.items():
|
|
2344
|
+
sanitized_key = self.name_preprocessor.sanitize_tool_name(user_key)
|
|
2345
|
+
sanitized_map[sanitized_key] = tool_meta
|
|
2346
|
+
return sanitized_map
|
|
2347
|
+
|
|
2348
|
+
def _create_tool_config(
|
|
2349
|
+
self, base_config: dict[str, Any] | None, state: dict[str, Any], tool_name: str | None = None
|
|
2350
|
+
) -> dict[str, Any]:
|
|
2351
|
+
"""Create enriched tool configuration with metadata and context.
|
|
2352
|
+
|
|
2353
|
+
Args:
|
|
2354
|
+
base_config: The base configuration passed to the tool node.
|
|
2355
|
+
state: The current agent state containing metadata and other context.
|
|
2356
|
+
tool_name: Optional tool name for per-tool metadata resolution.
|
|
2357
|
+
|
|
2358
|
+
Returns:
|
|
2359
|
+
dict[str, Any]: Enriched configuration for tool execution.
|
|
2360
|
+
"""
|
|
2361
|
+
tool_config = base_config.copy() if base_config else {}
|
|
2362
|
+
|
|
2363
|
+
state_metadata = state.get("metadata")
|
|
2364
|
+
if tool_name:
|
|
2365
|
+
effective_metadata = self._resolve_tool_metadata(tool_name, state_metadata)
|
|
2366
|
+
else:
|
|
2367
|
+
effective_metadata = state_metadata if isinstance(state_metadata, dict) else {}
|
|
2368
|
+
|
|
2369
|
+
if effective_metadata:
|
|
2370
|
+
if "metadata" not in tool_config:
|
|
2371
|
+
tool_config["metadata"] = effective_metadata
|
|
2372
|
+
else:
|
|
2373
|
+
tool_config["metadata"].update(effective_metadata)
|
|
2374
|
+
logger.debug(f"Agent '{self.name}': Passing metadata to tool '{tool_name}': {effective_metadata}")
|
|
2375
|
+
|
|
2376
|
+
return tool_config
|
|
2377
|
+
|
|
2378
|
+
def _extract_storable_data(self, tool_output: Any) -> Any:
|
|
2379
|
+
"""Extract storable data from tool output for the tool output management system.
|
|
2380
|
+
|
|
2381
|
+
This method determines what part of a tool's output should be stored for later
|
|
2382
|
+
reference by other tools. It handles different output formats and extracts the
|
|
2383
|
+
most relevant data for storage.
|
|
2384
|
+
|
|
2385
|
+
The extraction logic varies by type:
|
|
2386
|
+
- Command objects: Extracts the 'result' field from the update dict, or the entire update dict
|
|
2387
|
+
- String objects: Returns the string as-is
|
|
2388
|
+
- Dict objects: Returns the 'result' key if present, otherwise the entire dict
|
|
2389
|
+
- Other types: Converts to string representation
|
|
2390
|
+
|
|
2391
|
+
This method is used in the tool output management system to automatically store
|
|
2392
|
+
outputs from tools that have `store_final_output=True` set. The extracted data can
|
|
2393
|
+
then be referenced by other tools using the `$tool_output.<call_id>` syntax.
|
|
2394
|
+
|
|
2395
|
+
Example:
|
|
2396
|
+
For a Command object with update = {"result": "success", "data": [1, 2, 3]},
|
|
2397
|
+
this method would return "success".
|
|
2398
|
+
|
|
2399
|
+
For a dict = {"result": "completed", "status": "ok"},
|
|
2400
|
+
this method would return "completed".
|
|
2401
|
+
|
|
2402
|
+
For a dict = {"status": "ok", "data": [1, 2, 3]} (no "result" key),
|
|
2403
|
+
this method would return the entire dict.
|
|
2404
|
+
|
|
2405
|
+
Args:
|
|
2406
|
+
tool_output: The raw output from a tool execution. Can be any type including
|
|
2407
|
+
Command, str, dict, or other objects.
|
|
2408
|
+
|
|
2409
|
+
Returns:
|
|
2410
|
+
The data that should be stored in the tool output management system.
|
|
2411
|
+
The return type depends on the input type:
|
|
2412
|
+
- Command -> dict or the value of update.get("result")
|
|
2413
|
+
- str -> str (unchanged)
|
|
2414
|
+
- dict -> dict (either the value of .get("result") or the original dict)
|
|
2415
|
+
- other -> str (string representation of the object)
|
|
2416
|
+
"""
|
|
2417
|
+
if isinstance(tool_output, Command):
|
|
2418
|
+
update = getattr(tool_output, "update", {}) or {}
|
|
2419
|
+
return update.get("result", update)
|
|
2420
|
+
elif isinstance(tool_output, str):
|
|
2421
|
+
return tool_output
|
|
2422
|
+
elif isinstance(tool_output, dict):
|
|
2423
|
+
return tool_output.get("result", tool_output)
|
|
2424
|
+
else:
|
|
2425
|
+
return str(tool_output)
|
|
2426
|
+
|
|
2427
|
+
def _build_tool_output_aware_instruction(
|
|
2428
|
+
self, base_instruction: str, state: dict[str, Any], config: dict[str, Any] | None = None
|
|
2429
|
+
) -> str:
|
|
2430
|
+
"""Build LLM instruction that includes context about available tool outputs.
|
|
2431
|
+
|
|
2432
|
+
This method enhances the base instruction with information about previously
|
|
2433
|
+
stored tool outputs, allowing the LLM to make informed decisions about
|
|
2434
|
+
which outputs to reference in subsequent tool calls.
|
|
2435
|
+
|
|
2436
|
+
Args:
|
|
2437
|
+
base_instruction: The original system instruction for the agent.
|
|
2438
|
+
state: Current agent state containing the tool output manager.
|
|
2439
|
+
config: Optional configuration containing thread_id information.
|
|
2440
|
+
|
|
2441
|
+
Returns:
|
|
2442
|
+
Enhanced instruction string that includes tool output context.
|
|
2443
|
+
"""
|
|
2444
|
+
manager = state.get(TOOL_OUTPUT_MANAGER_KEY)
|
|
2445
|
+
|
|
2446
|
+
if not manager or not self.tool_output_manager:
|
|
2447
|
+
return base_instruction
|
|
2448
|
+
|
|
2449
|
+
thread_id = self._extract_thread_id_from_config(config)
|
|
2450
|
+
|
|
2451
|
+
if not manager.has_outputs(thread_id):
|
|
2452
|
+
return base_instruction
|
|
2453
|
+
outputs_summary = manager.generate_summary(max_entries=10, thread_id=thread_id)
|
|
2454
|
+
|
|
2455
|
+
# Build enhanced instruction
|
|
2456
|
+
prompt = dedent(f"""
|
|
2457
|
+
{base_instruction}
|
|
2458
|
+
|
|
2459
|
+
<TOOL_OUTPUT_REFERENCES>
|
|
2460
|
+
|
|
2461
|
+
# Goal
|
|
2462
|
+
- Use the most relevant stored tool output via "$tool_output.<call_id>" to avoid copying large data.
|
|
2463
|
+
|
|
2464
|
+
# Usage
|
|
2465
|
+
- Syntax: "$tool_output.<call_id>" in any tool argument; returns the full stored output.
|
|
2466
|
+
- IDs: Use only those listed below; do not invent or modify.
|
|
2467
|
+
- Selection: Pick the most relevant (usually most recent).
|
|
2468
|
+
- Don’ts: Don’t paste raw output or expand references.
|
|
2469
|
+
- Errors: Invalid/missing IDs fail—ask for the correct call_id or run the prerequisite tool.
|
|
2470
|
+
|
|
2471
|
+
# Example
|
|
2472
|
+
- tool_name.run(tool_argument="$tool_output.abc123")
|
|
2473
|
+
|
|
2474
|
+
# User Output Schema
|
|
2475
|
+
- "reference": "$tool_output.<call_id>", "tool": "<tool_name>", "agent": "<agent_name>", "data_preview": "<truncated preview>"
|
|
2476
|
+
|
|
2477
|
+
Available Outputs
|
|
2478
|
+
{outputs_summary}
|
|
2479
|
+
</TOOL_OUTPUT_REFERENCES>
|
|
2480
|
+
""") # noqa: E501
|
|
2481
|
+
return prompt
|
|
2482
|
+
|
|
2483
|
+
def _cleanup_thread_context(self, current_thread_id: str | None, token: Any) -> None:
|
|
2484
|
+
"""Extend base cleanup to dispose cached PII handlers.
|
|
2485
|
+
|
|
2486
|
+
Args:
|
|
2487
|
+
current_thread_id: ID of the thread whose context is being cleaned up.
|
|
2488
|
+
token: Cancellation or execution token passed from the caller.
|
|
2489
|
+
|
|
2490
|
+
Returns:
|
|
2491
|
+
None. This method performs cleanup side effects only.
|
|
2492
|
+
"""
|
|
2493
|
+
super()._cleanup_thread_context(current_thread_id, token)
|
|
2494
|
+
if current_thread_id:
|
|
2495
|
+
self._pii_handlers_by_thread.pop(current_thread_id, None)
|
|
2496
|
+
|
|
2497
|
+
def _format_graph_output(self, final_state_result: dict[str, Any]) -> Any:
|
|
2498
|
+
"""Convert final graph state to user-friendly output.
|
|
2499
|
+
|
|
2500
|
+
Args:
|
|
2501
|
+
final_state_result: The final state from graph execution.
|
|
2502
|
+
|
|
2503
|
+
Returns:
|
|
2504
|
+
Formatted output dictionary.
|
|
2505
|
+
"""
|
|
2506
|
+
return self._extract_output_from_final_state(final_state_result)
|
|
2507
|
+
|
|
2508
|
+
|
|
2509
|
+
class LangGraphAgent(LangGraphReactAgent):
|
|
2510
|
+
"""Alias for LangGraphReactAgent."""
|
|
2511
|
+
|
|
2512
|
+
|
|
2513
|
+
class LangChainAgent(LangGraphReactAgent):
|
|
2514
|
+
"""Alias for LangGraphReactAgent."""
|