aip-agents-binary 0.5.25b1__py3-none-macosx_13_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aip_agents/__init__.py +65 -0
- aip_agents/__init__.pyi +19 -0
- aip_agents/a2a/__init__.py +19 -0
- aip_agents/a2a/__init__.pyi +3 -0
- aip_agents/a2a/server/__init__.py +10 -0
- aip_agents/a2a/server/__init__.pyi +4 -0
- aip_agents/a2a/server/base_executor.py +1086 -0
- aip_agents/a2a/server/base_executor.pyi +73 -0
- aip_agents/a2a/server/google_adk_executor.py +198 -0
- aip_agents/a2a/server/google_adk_executor.pyi +51 -0
- aip_agents/a2a/server/langflow_executor.py +180 -0
- aip_agents/a2a/server/langflow_executor.pyi +43 -0
- aip_agents/a2a/server/langgraph_executor.py +270 -0
- aip_agents/a2a/server/langgraph_executor.pyi +47 -0
- aip_agents/a2a/types.py +232 -0
- aip_agents/a2a/types.pyi +132 -0
- aip_agents/agent/__init__.py +27 -0
- aip_agents/agent/__init__.pyi +9 -0
- aip_agents/agent/base_agent.py +970 -0
- aip_agents/agent/base_agent.pyi +221 -0
- aip_agents/agent/base_langgraph_agent.py +2948 -0
- aip_agents/agent/base_langgraph_agent.pyi +232 -0
- aip_agents/agent/google_adk_agent.py +926 -0
- aip_agents/agent/google_adk_agent.pyi +141 -0
- aip_agents/agent/google_adk_constants.py +6 -0
- aip_agents/agent/google_adk_constants.pyi +3 -0
- aip_agents/agent/hitl/__init__.py +24 -0
- aip_agents/agent/hitl/__init__.pyi +6 -0
- aip_agents/agent/hitl/config.py +28 -0
- aip_agents/agent/hitl/config.pyi +15 -0
- aip_agents/agent/hitl/langgraph_hitl_mixin.py +515 -0
- aip_agents/agent/hitl/langgraph_hitl_mixin.pyi +42 -0
- aip_agents/agent/hitl/manager.py +532 -0
- aip_agents/agent/hitl/manager.pyi +200 -0
- aip_agents/agent/hitl/models.py +18 -0
- aip_agents/agent/hitl/models.pyi +3 -0
- aip_agents/agent/hitl/prompt/__init__.py +9 -0
- aip_agents/agent/hitl/prompt/__init__.pyi +4 -0
- aip_agents/agent/hitl/prompt/base.py +42 -0
- aip_agents/agent/hitl/prompt/base.pyi +24 -0
- aip_agents/agent/hitl/prompt/deferred.py +73 -0
- aip_agents/agent/hitl/prompt/deferred.pyi +30 -0
- aip_agents/agent/hitl/registry.py +149 -0
- aip_agents/agent/hitl/registry.pyi +101 -0
- aip_agents/agent/interface.py +138 -0
- aip_agents/agent/interface.pyi +81 -0
- aip_agents/agent/interfaces.py +65 -0
- aip_agents/agent/interfaces.pyi +44 -0
- aip_agents/agent/langflow_agent.py +464 -0
- aip_agents/agent/langflow_agent.pyi +133 -0
- aip_agents/agent/langgraph_memory_enhancer_agent.py +433 -0
- aip_agents/agent/langgraph_memory_enhancer_agent.pyi +49 -0
- aip_agents/agent/langgraph_react_agent.py +2596 -0
- aip_agents/agent/langgraph_react_agent.pyi +131 -0
- aip_agents/agent/system_instruction_context.py +34 -0
- aip_agents/agent/system_instruction_context.pyi +13 -0
- aip_agents/clients/__init__.py +10 -0
- aip_agents/clients/__init__.pyi +4 -0
- aip_agents/clients/langflow/__init__.py +10 -0
- aip_agents/clients/langflow/__init__.pyi +4 -0
- aip_agents/clients/langflow/client.py +477 -0
- aip_agents/clients/langflow/client.pyi +140 -0
- aip_agents/clients/langflow/types.py +18 -0
- aip_agents/clients/langflow/types.pyi +7 -0
- aip_agents/constants.py +23 -0
- aip_agents/constants.pyi +7 -0
- aip_agents/credentials/manager.py +132 -0
- aip_agents/examples/__init__.py +5 -0
- aip_agents/examples/__init__.pyi +0 -0
- aip_agents/examples/compare_streaming_client.py +783 -0
- aip_agents/examples/compare_streaming_client.pyi +48 -0
- aip_agents/examples/compare_streaming_server.py +142 -0
- aip_agents/examples/compare_streaming_server.pyi +18 -0
- aip_agents/examples/demo_memory_recall.py +401 -0
- aip_agents/examples/demo_memory_recall.pyi +58 -0
- aip_agents/examples/hello_world_a2a_google_adk_client.py +49 -0
- aip_agents/examples/hello_world_a2a_google_adk_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_agent.py +48 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_agent.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_streaming.py +60 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_streaming.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_server.py +79 -0
- aip_agents/examples/hello_world_a2a_google_adk_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_client.py +39 -0
- aip_agents/examples/hello_world_a2a_langchain_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_agent.py +39 -0
- aip_agents/examples/hello_world_a2a_langchain_client_agent.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.py +37 -0
- aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_streaming.py +41 -0
- aip_agents/examples/hello_world_a2a_langchain_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.py +60 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_server.py +105 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_server.py +79 -0
- aip_agents/examples/hello_world_a2a_langchain_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.py +78 -0
- aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langflow_client.py +83 -0
- aip_agents/examples/hello_world_a2a_langflow_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langflow_server.py +82 -0
- aip_agents/examples/hello_world_a2a_langflow_server.pyi +14 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client.py +73 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.py +76 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_server.py +92 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_server.pyi +16 -0
- aip_agents/examples/hello_world_a2a_langgraph_client.py +54 -0
- aip_agents/examples/hello_world_a2a_langgraph_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent.py +54 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.py +32 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.pyi +2 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming.py +50 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.py +44 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.py +92 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_server.py +84 -0
- aip_agents/examples/hello_world_a2a_langgraph_server.pyi +14 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.py +79 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.py +132 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.pyi +15 -0
- aip_agents/examples/hello_world_a2a_mcp_langgraph.py +196 -0
- aip_agents/examples/hello_world_a2a_mcp_langgraph.pyi +48 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.py +244 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.pyi +48 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.py +251 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.pyi +45 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.py +57 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.py +80 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_google_adk.py +41 -0
- aip_agents/examples/hello_world_google_adk.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_http.py +34 -0
- aip_agents/examples/hello_world_google_adk_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_http_stream.py +40 -0
- aip_agents/examples/hello_world_google_adk_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse.py +44 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse_stream.py +48 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio.py +44 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.py +48 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_stream.py +44 -0
- aip_agents/examples/hello_world_google_adk_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain.py +28 -0
- aip_agents/examples/hello_world_langchain.pyi +5 -0
- aip_agents/examples/hello_world_langchain_lm_invoker.py +15 -0
- aip_agents/examples/hello_world_langchain_lm_invoker.pyi +2 -0
- aip_agents/examples/hello_world_langchain_mcp_http.py +34 -0
- aip_agents/examples/hello_world_langchain_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_http_interactive.py +130 -0
- aip_agents/examples/hello_world_langchain_mcp_http_interactive.pyi +16 -0
- aip_agents/examples/hello_world_langchain_mcp_http_stream.py +42 -0
- aip_agents/examples/hello_world_langchain_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_multi_server.py +155 -0
- aip_agents/examples/hello_world_langchain_mcp_multi_server.pyi +18 -0
- aip_agents/examples/hello_world_langchain_mcp_sse.py +34 -0
- aip_agents/examples/hello_world_langchain_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_sse_stream.py +40 -0
- aip_agents/examples/hello_world_langchain_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio.py +30 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio_stream.py +41 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_stream.py +36 -0
- aip_agents/examples/hello_world_langchain_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_stream_lm_invoker.py +39 -0
- aip_agents/examples/hello_world_langchain_stream_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_langflow_agent.py +163 -0
- aip_agents/examples/hello_world_langflow_agent.pyi +35 -0
- aip_agents/examples/hello_world_langgraph.py +39 -0
- aip_agents/examples/hello_world_langgraph.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_gl_connector_twitter.py +44 -0
- aip_agents/examples/hello_world_langgraph_gl_connector_twitter.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_http.py +31 -0
- aip_agents/examples/hello_world_langgraph_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_http_stream.py +34 -0
- aip_agents/examples/hello_world_langgraph_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse.py +35 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse_stream.py +50 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio.py +35 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.py +50 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_stream.py +43 -0
- aip_agents/examples/hello_world_langgraph_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_stream_lm_invoker.py +37 -0
- aip_agents/examples/hello_world_langgraph_stream_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_model_switch_cli.py +210 -0
- aip_agents/examples/hello_world_model_switch_cli.pyi +30 -0
- aip_agents/examples/hello_world_multi_agent_adk.py +75 -0
- aip_agents/examples/hello_world_multi_agent_adk.pyi +6 -0
- aip_agents/examples/hello_world_multi_agent_langchain.py +54 -0
- aip_agents/examples/hello_world_multi_agent_langchain.pyi +5 -0
- aip_agents/examples/hello_world_multi_agent_langgraph.py +66 -0
- aip_agents/examples/hello_world_multi_agent_langgraph.pyi +5 -0
- aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.py +69 -0
- aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_pii_logger.py +21 -0
- aip_agents/examples/hello_world_pii_logger.pyi +5 -0
- aip_agents/examples/hello_world_sentry.py +133 -0
- aip_agents/examples/hello_world_sentry.pyi +21 -0
- aip_agents/examples/hello_world_step_limits.py +273 -0
- aip_agents/examples/hello_world_step_limits.pyi +17 -0
- aip_agents/examples/hello_world_stock_a2a_server.py +103 -0
- aip_agents/examples/hello_world_stock_a2a_server.pyi +17 -0
- aip_agents/examples/hello_world_tool_output_client.py +46 -0
- aip_agents/examples/hello_world_tool_output_client.pyi +5 -0
- aip_agents/examples/hello_world_tool_output_server.py +114 -0
- aip_agents/examples/hello_world_tool_output_server.pyi +19 -0
- aip_agents/examples/hitl_demo.py +724 -0
- aip_agents/examples/hitl_demo.pyi +67 -0
- aip_agents/examples/mcp_configs/configs.py +63 -0
- aip_agents/examples/mcp_servers/common.py +76 -0
- aip_agents/examples/mcp_servers/mcp_name.py +29 -0
- aip_agents/examples/mcp_servers/mcp_server_http.py +19 -0
- aip_agents/examples/mcp_servers/mcp_server_sse.py +19 -0
- aip_agents/examples/mcp_servers/mcp_server_stdio.py +19 -0
- aip_agents/examples/mcp_servers/mcp_time.py +10 -0
- aip_agents/examples/pii_demo_langgraph_client.py +69 -0
- aip_agents/examples/pii_demo_langgraph_client.pyi +5 -0
- aip_agents/examples/pii_demo_langgraph_server.py +126 -0
- aip_agents/examples/pii_demo_langgraph_server.pyi +20 -0
- aip_agents/examples/pii_demo_multi_agent_client.py +80 -0
- aip_agents/examples/pii_demo_multi_agent_client.pyi +5 -0
- aip_agents/examples/pii_demo_multi_agent_server.py +247 -0
- aip_agents/examples/pii_demo_multi_agent_server.pyi +40 -0
- aip_agents/examples/todolist_planning_a2a_langchain_client.py +70 -0
- aip_agents/examples/todolist_planning_a2a_langchain_client.pyi +5 -0
- aip_agents/examples/todolist_planning_a2a_langgraph_server.py +88 -0
- aip_agents/examples/todolist_planning_a2a_langgraph_server.pyi +19 -0
- aip_agents/examples/tools/__init__.py +27 -0
- aip_agents/examples/tools/__init__.pyi +9 -0
- aip_agents/examples/tools/adk_arithmetic_tools.py +36 -0
- aip_agents/examples/tools/adk_arithmetic_tools.pyi +24 -0
- aip_agents/examples/tools/adk_weather_tool.py +60 -0
- aip_agents/examples/tools/adk_weather_tool.pyi +18 -0
- aip_agents/examples/tools/data_generator_tool.py +103 -0
- aip_agents/examples/tools/data_generator_tool.pyi +15 -0
- aip_agents/examples/tools/data_visualization_tool.py +312 -0
- aip_agents/examples/tools/data_visualization_tool.pyi +19 -0
- aip_agents/examples/tools/image_artifact_tool.py +136 -0
- aip_agents/examples/tools/image_artifact_tool.pyi +26 -0
- aip_agents/examples/tools/langchain_arithmetic_tools.py +26 -0
- aip_agents/examples/tools/langchain_arithmetic_tools.pyi +17 -0
- aip_agents/examples/tools/langchain_currency_exchange_tool.py +88 -0
- aip_agents/examples/tools/langchain_currency_exchange_tool.pyi +20 -0
- aip_agents/examples/tools/langchain_graph_artifact_tool.py +172 -0
- aip_agents/examples/tools/langchain_graph_artifact_tool.pyi +25 -0
- aip_agents/examples/tools/langchain_weather_tool.py +48 -0
- aip_agents/examples/tools/langchain_weather_tool.pyi +19 -0
- aip_agents/examples/tools/langgraph_streaming_tool.py +130 -0
- aip_agents/examples/tools/langgraph_streaming_tool.pyi +43 -0
- aip_agents/examples/tools/mock_retrieval_tool.py +56 -0
- aip_agents/examples/tools/mock_retrieval_tool.pyi +13 -0
- aip_agents/examples/tools/pii_demo_tools.py +189 -0
- aip_agents/examples/tools/pii_demo_tools.pyi +54 -0
- aip_agents/examples/tools/random_chart_tool.py +142 -0
- aip_agents/examples/tools/random_chart_tool.pyi +20 -0
- aip_agents/examples/tools/serper_tool.py +202 -0
- aip_agents/examples/tools/serper_tool.pyi +16 -0
- aip_agents/examples/tools/stock_tools.py +82 -0
- aip_agents/examples/tools/stock_tools.pyi +36 -0
- aip_agents/examples/tools/table_generator_tool.py +167 -0
- aip_agents/examples/tools/table_generator_tool.pyi +22 -0
- aip_agents/examples/tools/time_tool.py +82 -0
- aip_agents/examples/tools/time_tool.pyi +15 -0
- aip_agents/examples/tools/weather_forecast_tool.py +38 -0
- aip_agents/examples/tools/weather_forecast_tool.pyi +14 -0
- aip_agents/executor/agent_executor.py +473 -0
- aip_agents/executor/base.py +48 -0
- aip_agents/guardrails/__init__.py +83 -0
- aip_agents/guardrails/__init__.pyi +6 -0
- aip_agents/guardrails/engines/__init__.py +69 -0
- aip_agents/guardrails/engines/__init__.pyi +4 -0
- aip_agents/guardrails/engines/base.py +90 -0
- aip_agents/guardrails/engines/base.pyi +61 -0
- aip_agents/guardrails/engines/nemo.py +101 -0
- aip_agents/guardrails/engines/nemo.pyi +46 -0
- aip_agents/guardrails/engines/phrase_matcher.py +113 -0
- aip_agents/guardrails/engines/phrase_matcher.pyi +48 -0
- aip_agents/guardrails/exceptions.py +39 -0
- aip_agents/guardrails/exceptions.pyi +23 -0
- aip_agents/guardrails/manager.py +163 -0
- aip_agents/guardrails/manager.pyi +42 -0
- aip_agents/guardrails/middleware.py +199 -0
- aip_agents/guardrails/middleware.pyi +87 -0
- aip_agents/guardrails/schemas.py +63 -0
- aip_agents/guardrails/schemas.pyi +43 -0
- aip_agents/guardrails/utils.py +45 -0
- aip_agents/guardrails/utils.pyi +19 -0
- aip_agents/mcp/__init__.py +1 -0
- aip_agents/mcp/__init__.pyi +0 -0
- aip_agents/mcp/client/__init__.py +14 -0
- aip_agents/mcp/client/__init__.pyi +5 -0
- aip_agents/mcp/client/base_mcp_client.py +369 -0
- aip_agents/mcp/client/base_mcp_client.pyi +148 -0
- aip_agents/mcp/client/connection_manager.py +193 -0
- aip_agents/mcp/client/connection_manager.pyi +48 -0
- aip_agents/mcp/client/google_adk/__init__.py +11 -0
- aip_agents/mcp/client/google_adk/__init__.pyi +3 -0
- aip_agents/mcp/client/google_adk/client.py +381 -0
- aip_agents/mcp/client/google_adk/client.pyi +75 -0
- aip_agents/mcp/client/langchain/__init__.py +11 -0
- aip_agents/mcp/client/langchain/__init__.pyi +3 -0
- aip_agents/mcp/client/langchain/client.py +265 -0
- aip_agents/mcp/client/langchain/client.pyi +48 -0
- aip_agents/mcp/client/persistent_session.py +362 -0
- aip_agents/mcp/client/persistent_session.pyi +113 -0
- aip_agents/mcp/client/session_pool.py +351 -0
- aip_agents/mcp/client/session_pool.pyi +101 -0
- aip_agents/mcp/client/transports.py +228 -0
- aip_agents/mcp/client/transports.pyi +123 -0
- aip_agents/mcp/utils/__init__.py +7 -0
- aip_agents/mcp/utils/__init__.pyi +0 -0
- aip_agents/mcp/utils/config_validator.py +139 -0
- aip_agents/mcp/utils/config_validator.pyi +82 -0
- aip_agents/memory/__init__.py +14 -0
- aip_agents/memory/__init__.pyi +5 -0
- aip_agents/memory/adapters/__init__.py +10 -0
- aip_agents/memory/adapters/__init__.pyi +4 -0
- aip_agents/memory/adapters/base_adapter.py +717 -0
- aip_agents/memory/adapters/base_adapter.pyi +150 -0
- aip_agents/memory/adapters/mem0.py +84 -0
- aip_agents/memory/adapters/mem0.pyi +22 -0
- aip_agents/memory/base.py +84 -0
- aip_agents/memory/base.pyi +60 -0
- aip_agents/memory/constants.py +49 -0
- aip_agents/memory/constants.pyi +25 -0
- aip_agents/memory/factory.py +86 -0
- aip_agents/memory/factory.pyi +24 -0
- aip_agents/memory/guidance.py +20 -0
- aip_agents/memory/guidance.pyi +3 -0
- aip_agents/memory/simple_memory.py +47 -0
- aip_agents/memory/simple_memory.pyi +23 -0
- aip_agents/middleware/__init__.py +17 -0
- aip_agents/middleware/__init__.pyi +5 -0
- aip_agents/middleware/base.py +96 -0
- aip_agents/middleware/base.pyi +75 -0
- aip_agents/middleware/manager.py +150 -0
- aip_agents/middleware/manager.pyi +84 -0
- aip_agents/middleware/todolist.py +274 -0
- aip_agents/middleware/todolist.pyi +125 -0
- aip_agents/schema/__init__.py +69 -0
- aip_agents/schema/__init__.pyi +9 -0
- aip_agents/schema/a2a.py +56 -0
- aip_agents/schema/a2a.pyi +40 -0
- aip_agents/schema/agent.py +111 -0
- aip_agents/schema/agent.pyi +65 -0
- aip_agents/schema/hitl.py +157 -0
- aip_agents/schema/hitl.pyi +89 -0
- aip_agents/schema/langgraph.py +37 -0
- aip_agents/schema/langgraph.pyi +28 -0
- aip_agents/schema/model_id.py +97 -0
- aip_agents/schema/model_id.pyi +54 -0
- aip_agents/schema/step_limit.py +108 -0
- aip_agents/schema/step_limit.pyi +63 -0
- aip_agents/schema/storage.py +40 -0
- aip_agents/schema/storage.pyi +21 -0
- aip_agents/sentry/__init__.py +11 -0
- aip_agents/sentry/__init__.pyi +3 -0
- aip_agents/sentry/sentry.py +151 -0
- aip_agents/sentry/sentry.pyi +48 -0
- aip_agents/storage/__init__.py +41 -0
- aip_agents/storage/__init__.pyi +8 -0
- aip_agents/storage/base.py +85 -0
- aip_agents/storage/base.pyi +58 -0
- aip_agents/storage/clients/__init__.py +12 -0
- aip_agents/storage/clients/__init__.pyi +3 -0
- aip_agents/storage/clients/minio_client.py +318 -0
- aip_agents/storage/clients/minio_client.pyi +137 -0
- aip_agents/storage/config.py +62 -0
- aip_agents/storage/config.pyi +29 -0
- aip_agents/storage/providers/__init__.py +15 -0
- aip_agents/storage/providers/__init__.pyi +5 -0
- aip_agents/storage/providers/base.py +106 -0
- aip_agents/storage/providers/base.pyi +88 -0
- aip_agents/storage/providers/memory.py +114 -0
- aip_agents/storage/providers/memory.pyi +79 -0
- aip_agents/storage/providers/object_storage.py +214 -0
- aip_agents/storage/providers/object_storage.pyi +98 -0
- aip_agents/tools/__init__.py +53 -0
- aip_agents/tools/__init__.pyi +9 -0
- aip_agents/tools/browser_use/__init__.py +82 -0
- aip_agents/tools/browser_use/__init__.pyi +14 -0
- aip_agents/tools/browser_use/action_parser.py +103 -0
- aip_agents/tools/browser_use/action_parser.pyi +18 -0
- aip_agents/tools/browser_use/browser_use_tool.py +1112 -0
- aip_agents/tools/browser_use/browser_use_tool.pyi +50 -0
- aip_agents/tools/browser_use/llm_config.py +120 -0
- aip_agents/tools/browser_use/llm_config.pyi +52 -0
- aip_agents/tools/browser_use/minio_storage.py +198 -0
- aip_agents/tools/browser_use/minio_storage.pyi +109 -0
- aip_agents/tools/browser_use/schemas.py +119 -0
- aip_agents/tools/browser_use/schemas.pyi +32 -0
- aip_agents/tools/browser_use/session.py +76 -0
- aip_agents/tools/browser_use/session.pyi +4 -0
- aip_agents/tools/browser_use/session_errors.py +132 -0
- aip_agents/tools/browser_use/session_errors.pyi +53 -0
- aip_agents/tools/browser_use/steel_session_recording.py +317 -0
- aip_agents/tools/browser_use/steel_session_recording.pyi +63 -0
- aip_agents/tools/browser_use/streaming.py +813 -0
- aip_agents/tools/browser_use/streaming.pyi +81 -0
- aip_agents/tools/browser_use/structured_data_parser.py +257 -0
- aip_agents/tools/browser_use/structured_data_parser.pyi +86 -0
- aip_agents/tools/browser_use/structured_data_recovery.py +204 -0
- aip_agents/tools/browser_use/structured_data_recovery.pyi +43 -0
- aip_agents/tools/browser_use/types.py +78 -0
- aip_agents/tools/browser_use/types.pyi +45 -0
- aip_agents/tools/code_sandbox/__init__.py +26 -0
- aip_agents/tools/code_sandbox/__init__.pyi +3 -0
- aip_agents/tools/code_sandbox/constant.py +13 -0
- aip_agents/tools/code_sandbox/constant.pyi +4 -0
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.py +306 -0
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.pyi +102 -0
- aip_agents/tools/code_sandbox/e2b_sandbox_tool.py +411 -0
- aip_agents/tools/code_sandbox/e2b_sandbox_tool.pyi +29 -0
- aip_agents/tools/constants.py +177 -0
- aip_agents/tools/constants.pyi +138 -0
- aip_agents/tools/document_loader/__init__.py +44 -0
- aip_agents/tools/document_loader/__init__.pyi +7 -0
- aip_agents/tools/document_loader/base_reader.py +302 -0
- aip_agents/tools/document_loader/base_reader.pyi +75 -0
- aip_agents/tools/document_loader/docx_reader_tool.py +68 -0
- aip_agents/tools/document_loader/docx_reader_tool.pyi +10 -0
- aip_agents/tools/document_loader/excel_reader_tool.py +171 -0
- aip_agents/tools/document_loader/excel_reader_tool.pyi +26 -0
- aip_agents/tools/document_loader/pdf_reader_tool.py +79 -0
- aip_agents/tools/document_loader/pdf_reader_tool.pyi +11 -0
- aip_agents/tools/document_loader/pdf_splitter.py +169 -0
- aip_agents/tools/document_loader/pdf_splitter.pyi +18 -0
- aip_agents/tools/gl_connector/__init__.py +5 -0
- aip_agents/tools/gl_connector/__init__.pyi +3 -0
- aip_agents/tools/gl_connector/tool.py +383 -0
- aip_agents/tools/gl_connector/tool.pyi +74 -0
- aip_agents/tools/gl_connector_tools.py +119 -0
- aip_agents/tools/gl_connector_tools.pyi +39 -0
- aip_agents/tools/memory_search/__init__.py +22 -0
- aip_agents/tools/memory_search/__init__.pyi +5 -0
- aip_agents/tools/memory_search/base.py +200 -0
- aip_agents/tools/memory_search/base.pyi +69 -0
- aip_agents/tools/memory_search/mem0.py +258 -0
- aip_agents/tools/memory_search/mem0.pyi +19 -0
- aip_agents/tools/memory_search/schema.py +48 -0
- aip_agents/tools/memory_search/schema.pyi +15 -0
- aip_agents/tools/memory_search_tool.py +26 -0
- aip_agents/tools/memory_search_tool.pyi +3 -0
- aip_agents/tools/time_tool.py +117 -0
- aip_agents/tools/time_tool.pyi +16 -0
- aip_agents/tools/tool_config_injector.py +300 -0
- aip_agents/tools/tool_config_injector.pyi +26 -0
- aip_agents/tools/web_search/__init__.py +15 -0
- aip_agents/tools/web_search/__init__.pyi +3 -0
- aip_agents/tools/web_search/serper_tool.py +187 -0
- aip_agents/tools/web_search/serper_tool.pyi +19 -0
- aip_agents/types/__init__.py +70 -0
- aip_agents/types/__init__.pyi +36 -0
- aip_agents/types/a2a_events.py +13 -0
- aip_agents/types/a2a_events.pyi +3 -0
- aip_agents/utils/__init__.py +79 -0
- aip_agents/utils/__init__.pyi +11 -0
- aip_agents/utils/a2a_connector.py +1757 -0
- aip_agents/utils/a2a_connector.pyi +146 -0
- aip_agents/utils/artifact_helpers.py +502 -0
- aip_agents/utils/artifact_helpers.pyi +203 -0
- aip_agents/utils/constants.py +22 -0
- aip_agents/utils/constants.pyi +10 -0
- aip_agents/utils/datetime/__init__.py +34 -0
- aip_agents/utils/datetime/__init__.pyi +4 -0
- aip_agents/utils/datetime/normalization.py +231 -0
- aip_agents/utils/datetime/normalization.pyi +95 -0
- aip_agents/utils/datetime/timezone.py +206 -0
- aip_agents/utils/datetime/timezone.pyi +48 -0
- aip_agents/utils/env_loader.py +27 -0
- aip_agents/utils/env_loader.pyi +10 -0
- aip_agents/utils/event_handler_registry.py +58 -0
- aip_agents/utils/event_handler_registry.pyi +23 -0
- aip_agents/utils/file_prompt_utils.py +176 -0
- aip_agents/utils/file_prompt_utils.pyi +21 -0
- aip_agents/utils/final_response_builder.py +211 -0
- aip_agents/utils/final_response_builder.pyi +34 -0
- aip_agents/utils/formatter_llm_client.py +231 -0
- aip_agents/utils/formatter_llm_client.pyi +71 -0
- aip_agents/utils/langgraph/__init__.py +19 -0
- aip_agents/utils/langgraph/__init__.pyi +3 -0
- aip_agents/utils/langgraph/converter.py +128 -0
- aip_agents/utils/langgraph/converter.pyi +49 -0
- aip_agents/utils/langgraph/tool_managers/__init__.py +15 -0
- aip_agents/utils/langgraph/tool_managers/__init__.pyi +5 -0
- aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.py +99 -0
- aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.pyi +35 -0
- aip_agents/utils/langgraph/tool_managers/base_tool_manager.py +66 -0
- aip_agents/utils/langgraph/tool_managers/base_tool_manager.pyi +48 -0
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +1071 -0
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.pyi +56 -0
- aip_agents/utils/langgraph/tool_output_management.py +967 -0
- aip_agents/utils/langgraph/tool_output_management.pyi +292 -0
- aip_agents/utils/logger.py +195 -0
- aip_agents/utils/logger.pyi +60 -0
- aip_agents/utils/metadata/__init__.py +27 -0
- aip_agents/utils/metadata/__init__.pyi +5 -0
- aip_agents/utils/metadata/activity_metadata_helper.py +407 -0
- aip_agents/utils/metadata/activity_metadata_helper.pyi +25 -0
- aip_agents/utils/metadata/activity_narrative/__init__.py +35 -0
- aip_agents/utils/metadata/activity_narrative/__init__.pyi +7 -0
- aip_agents/utils/metadata/activity_narrative/builder.py +817 -0
- aip_agents/utils/metadata/activity_narrative/builder.pyi +35 -0
- aip_agents/utils/metadata/activity_narrative/constants.py +51 -0
- aip_agents/utils/metadata/activity_narrative/constants.pyi +10 -0
- aip_agents/utils/metadata/activity_narrative/context.py +49 -0
- aip_agents/utils/metadata/activity_narrative/context.pyi +32 -0
- aip_agents/utils/metadata/activity_narrative/formatters.py +230 -0
- aip_agents/utils/metadata/activity_narrative/formatters.pyi +48 -0
- aip_agents/utils/metadata/activity_narrative/utils.py +35 -0
- aip_agents/utils/metadata/activity_narrative/utils.pyi +12 -0
- aip_agents/utils/metadata/schemas/__init__.py +16 -0
- aip_agents/utils/metadata/schemas/__init__.pyi +4 -0
- aip_agents/utils/metadata/schemas/activity_schema.py +29 -0
- aip_agents/utils/metadata/schemas/activity_schema.pyi +18 -0
- aip_agents/utils/metadata/schemas/thinking_schema.py +31 -0
- aip_agents/utils/metadata/schemas/thinking_schema.pyi +20 -0
- aip_agents/utils/metadata/thinking_metadata_helper.py +38 -0
- aip_agents/utils/metadata/thinking_metadata_helper.pyi +4 -0
- aip_agents/utils/metadata_helper.py +358 -0
- aip_agents/utils/metadata_helper.pyi +117 -0
- aip_agents/utils/name_preprocessor/__init__.py +17 -0
- aip_agents/utils/name_preprocessor/__init__.pyi +6 -0
- aip_agents/utils/name_preprocessor/base_name_preprocessor.py +73 -0
- aip_agents/utils/name_preprocessor/base_name_preprocessor.pyi +52 -0
- aip_agents/utils/name_preprocessor/google_name_preprocessor.py +100 -0
- aip_agents/utils/name_preprocessor/google_name_preprocessor.pyi +38 -0
- aip_agents/utils/name_preprocessor/name_preprocessor.py +87 -0
- aip_agents/utils/name_preprocessor/name_preprocessor.pyi +41 -0
- aip_agents/utils/name_preprocessor/openai_name_preprocessor.py +48 -0
- aip_agents/utils/name_preprocessor/openai_name_preprocessor.pyi +34 -0
- aip_agents/utils/pii/__init__.py +25 -0
- aip_agents/utils/pii/__init__.pyi +5 -0
- aip_agents/utils/pii/pii_handler.py +397 -0
- aip_agents/utils/pii/pii_handler.pyi +96 -0
- aip_agents/utils/pii/pii_helper.py +207 -0
- aip_agents/utils/pii/pii_helper.pyi +78 -0
- aip_agents/utils/pii/uuid_deanonymizer_mapping.py +195 -0
- aip_agents/utils/pii/uuid_deanonymizer_mapping.pyi +73 -0
- aip_agents/utils/reference_helper.py +273 -0
- aip_agents/utils/reference_helper.pyi +81 -0
- aip_agents/utils/sse_chunk_transformer.py +831 -0
- aip_agents/utils/sse_chunk_transformer.pyi +166 -0
- aip_agents/utils/step_limit_manager.py +265 -0
- aip_agents/utils/step_limit_manager.pyi +112 -0
- aip_agents/utils/token_usage_helper.py +156 -0
- aip_agents/utils/token_usage_helper.pyi +60 -0
- aip_agents_binary-0.5.25b1.dist-info/METADATA +681 -0
- aip_agents_binary-0.5.25b1.dist-info/RECORD +566 -0
- aip_agents_binary-0.5.25b1.dist-info/WHEEL +5 -0
- aip_agents_binary-0.5.25b1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2948 @@
|
|
|
1
|
+
"""Base class for LangGraph-based agent implementations.
|
|
2
|
+
|
|
3
|
+
This class provides the core LangGraph machinery including graph compilation,
|
|
4
|
+
state handling, and I/O mapping for LangGraph agents.
|
|
5
|
+
|
|
6
|
+
Authors:
|
|
7
|
+
Christian Trisno Sen Long Chen (christian.t.s.l.chen@gdplabs.id)
|
|
8
|
+
Putu Ravindra Wiguna (putu.r.wiguna@gdplabs.id)
|
|
9
|
+
Fachriza Adhiatma (fachriza.d.adhiatma@gdplabs.id)
|
|
10
|
+
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import asyncio
|
|
14
|
+
import copy
|
|
15
|
+
import hashlib
|
|
16
|
+
import json
|
|
17
|
+
import uuid
|
|
18
|
+
from abc import abstractmethod
|
|
19
|
+
from collections.abc import AsyncGenerator, Sequence
|
|
20
|
+
from concurrent.futures import Future
|
|
21
|
+
from contextlib import suppress
|
|
22
|
+
from contextvars import ContextVar
|
|
23
|
+
from dataclasses import dataclass
|
|
24
|
+
from typing import Annotated, Any
|
|
25
|
+
|
|
26
|
+
from a2a.types import AgentCard
|
|
27
|
+
from aiostream import stream as astream
|
|
28
|
+
from gllm_core.event import EventEmitter
|
|
29
|
+
from gllm_core.event.handler import StreamEventHandler
|
|
30
|
+
from gllm_core.schema import Chunk
|
|
31
|
+
from langchain_core.messages import AIMessage, BaseMessage, ToolMessage
|
|
32
|
+
from langchain_core.tools import BaseTool
|
|
33
|
+
from langgraph.graph import StateGraph
|
|
34
|
+
from langgraph.graph.message import add_messages
|
|
35
|
+
from langgraph.graph.state import CompiledStateGraph
|
|
36
|
+
from langgraph.types import Checkpointer, StreamWriter
|
|
37
|
+
from pydantic import ValidationError
|
|
38
|
+
from typing_extensions import TypedDict
|
|
39
|
+
|
|
40
|
+
from aip_agents.agent.base_agent import BaseAgent
|
|
41
|
+
from aip_agents.agent.system_instruction_context import get_current_date_context
|
|
42
|
+
from aip_agents.constants import TEXT_PREVIEW_LENGTH
|
|
43
|
+
from aip_agents.mcp.client import LangchainMCPClient
|
|
44
|
+
from aip_agents.memory import BaseMemory, MemoryFactory, MemoryMethod
|
|
45
|
+
from aip_agents.memory.constants import MemoryDefaults
|
|
46
|
+
from aip_agents.schema.agent import StreamMode
|
|
47
|
+
from aip_agents.schema.hitl import HitlMetadata
|
|
48
|
+
from aip_agents.tools.tool_config_injector import (
|
|
49
|
+
CONFIG_SCHEMA_ATTR,
|
|
50
|
+
TOOL_CONFIG_SCHEMA_ATTR,
|
|
51
|
+
inject_config_methods_into_tool,
|
|
52
|
+
)
|
|
53
|
+
from aip_agents.types import A2AEvent, A2AStreamEventType
|
|
54
|
+
from aip_agents.utils import augment_query_with_file_paths, validate_references
|
|
55
|
+
from aip_agents.utils.langgraph.tool_managers.a2a_tool_manager import A2AToolManager
|
|
56
|
+
from aip_agents.utils.langgraph.tool_managers.delegation_tool_manager import (
|
|
57
|
+
DelegationToolManager,
|
|
58
|
+
)
|
|
59
|
+
from aip_agents.utils.logger import get_logger
|
|
60
|
+
from aip_agents.utils.metadata.activity_metadata_helper import create_tool_activity_info
|
|
61
|
+
from aip_agents.utils.metadata_helper import (
|
|
62
|
+
DefaultStepMessages,
|
|
63
|
+
Kind,
|
|
64
|
+
MetadataFieldKeys,
|
|
65
|
+
Status,
|
|
66
|
+
end_step_counter_scope,
|
|
67
|
+
get_next_step_number,
|
|
68
|
+
start_step_counter_scope,
|
|
69
|
+
)
|
|
70
|
+
from aip_agents.utils.pii import deanonymize_final_response_content
|
|
71
|
+
from aip_agents.utils.sse_chunk_transformer import SSEChunkTransformer
|
|
72
|
+
from aip_agents.utils.step_limit_manager import _STEP_LIMIT_CONFIG_CVAR
|
|
73
|
+
from aip_agents.utils.token_usage_helper import (
|
|
74
|
+
STEP_USAGE_KEY,
|
|
75
|
+
TOTAL_USAGE_KEY,
|
|
76
|
+
USAGE_METADATA_KEY,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
logger = get_logger(__name__)
|
|
80
|
+
|
|
81
|
+
# Context variable to access current thread_id during streaming callbacks
|
|
82
|
+
_THREAD_ID_CVAR: ContextVar[str | None] = ContextVar("aip_agents_thread_id", default=None)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
# Context variable to track operation mode for dependency tracking
|
|
86
|
+
# "parallel" = include all completed steps (default for backward compatibility)
|
|
87
|
+
# "sequential" = include only the most recent completed step
|
|
88
|
+
_OPERATION_MODE_CVAR: ContextVar[str] = ContextVar("aip_agents_operation_mode", default="parallel")
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@dataclass
|
|
92
|
+
class _StreamingContext:
|
|
93
|
+
"""Context object for managing streaming state and configuration."""
|
|
94
|
+
|
|
95
|
+
original_query: str
|
|
96
|
+
graph_input: dict[str, Any]
|
|
97
|
+
config: dict[str, Any]
|
|
98
|
+
memory_user_id: str | None
|
|
99
|
+
current_thread_id: str
|
|
100
|
+
token: Any
|
|
101
|
+
enable_token_streaming: bool
|
|
102
|
+
|
|
103
|
+
# Streaming state
|
|
104
|
+
final_event_yielded: bool = False
|
|
105
|
+
pending_artifacts: list | None = None
|
|
106
|
+
seen_artifact_hashes: set | None = None
|
|
107
|
+
processed_message_count: int = 0
|
|
108
|
+
final_state: dict[str, Any] | None = None
|
|
109
|
+
last_final_content: str | None = None
|
|
110
|
+
saved_memory: bool = False
|
|
111
|
+
is_token_streaming: bool = False
|
|
112
|
+
|
|
113
|
+
def __post_init__(self):
|
|
114
|
+
"""Initialize mutable defaults."""
|
|
115
|
+
if self.pending_artifacts is None:
|
|
116
|
+
self.pending_artifacts = []
|
|
117
|
+
if self.seen_artifact_hashes is None:
|
|
118
|
+
self.seen_artifact_hashes = set()
|
|
119
|
+
if self.final_state is None:
|
|
120
|
+
self.final_state = {}
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class BaseLangGraphAgent(BaseAgent):
|
|
124
|
+
"""Base class for LangGraph-based agents with unified tool approach.
|
|
125
|
+
|
|
126
|
+
Provides core LangGraph functionality including:
|
|
127
|
+
- Graph compilation and execution
|
|
128
|
+
- State schema management
|
|
129
|
+
- I/O mapping between user inputs and graph states
|
|
130
|
+
- Event emission support
|
|
131
|
+
- Tool resolution and handling
|
|
132
|
+
- A2A communication capabilities via tools
|
|
133
|
+
- Agent delegation capabilities via tools
|
|
134
|
+
- MCP server integration via tools
|
|
135
|
+
- Enhanced output extraction from various state formats
|
|
136
|
+
|
|
137
|
+
Tool Management:
|
|
138
|
+
- regular_tools: Standard LangChain tools provided during initialization
|
|
139
|
+
- mcp_tools: Tools retrieved from MCP servers
|
|
140
|
+
- resolved_tools: Combined collection of all tools for graph execution
|
|
141
|
+
|
|
142
|
+
Subclasses must implement:
|
|
143
|
+
- define_graph(): Define the specific graph structure
|
|
144
|
+
- _prepare_graph_input(): Convert user input to graph state
|
|
145
|
+
- _format_graph_output(): Convert final graph state to user output
|
|
146
|
+
"""
|
|
147
|
+
|
|
148
|
+
def __init__( # noqa: PLR0913
|
|
149
|
+
self,
|
|
150
|
+
name: str,
|
|
151
|
+
instruction: str,
|
|
152
|
+
description: str | None = None,
|
|
153
|
+
model: Any | None = None,
|
|
154
|
+
tools: Sequence[BaseTool] | None = None,
|
|
155
|
+
state_schema: type | None = None,
|
|
156
|
+
thread_id_key: str = "thread_id",
|
|
157
|
+
event_emitter: EventEmitter | None = None,
|
|
158
|
+
checkpointer: Checkpointer | None = None,
|
|
159
|
+
enable_a2a_token_streaming: bool = False,
|
|
160
|
+
**kwargs: Any,
|
|
161
|
+
):
|
|
162
|
+
"""Initialize the BaseLangGraphAgent.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
name: The name of the agent.
|
|
166
|
+
instruction: The system instruction for the agent.
|
|
167
|
+
description: Human-readable description of the agent.
|
|
168
|
+
model: The model to use (lm_invoker, LangChain model, string, etc.).
|
|
169
|
+
tools: Sequence of regular LangChain tools (not A2A or delegation tools).
|
|
170
|
+
state_schema: The state schema for the LangGraph. Defaults to basic message state.
|
|
171
|
+
thread_id_key: Key for thread ID in configuration.
|
|
172
|
+
event_emitter: Optional event emitter for streaming updates.
|
|
173
|
+
checkpointer: Optional checkpointer for conversation persistence.
|
|
174
|
+
enable_a2a_token_streaming: Enable token-level streaming for A2A responses.
|
|
175
|
+
- False (default): Stream message-level events only
|
|
176
|
+
- True: Stream individual tokens plus message-level events
|
|
177
|
+
**kwargs: Additional keyword arguments passed to BaseAgent (including tool_configs and memory settings).
|
|
178
|
+
Memory settings include:
|
|
179
|
+
- memory_backend: Memory backend (e.g., "mem0")
|
|
180
|
+
- agent_id: Agent identifier for memory scoping
|
|
181
|
+
- memory_namespace: Memory namespace
|
|
182
|
+
- save_interaction_to_memory: Whether to save interactions (default True)
|
|
183
|
+
"""
|
|
184
|
+
super().__init__(
|
|
185
|
+
name=name,
|
|
186
|
+
instruction=instruction,
|
|
187
|
+
description=description,
|
|
188
|
+
model=model,
|
|
189
|
+
tools=list(tools) if tools else [],
|
|
190
|
+
**kwargs,
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
self._add_system_date_context()
|
|
194
|
+
|
|
195
|
+
self.state_schema = state_schema
|
|
196
|
+
self.thread_id_key = thread_id_key
|
|
197
|
+
self.enable_a2a_token_streaming = enable_a2a_token_streaming
|
|
198
|
+
self.event_emitter = event_emitter
|
|
199
|
+
self.checkpointer = checkpointer
|
|
200
|
+
|
|
201
|
+
self._mem0_client: Any | None = None
|
|
202
|
+
self.memory: BaseMemory | None = None
|
|
203
|
+
self._initialize_memory_from_kwargs(name, kwargs)
|
|
204
|
+
|
|
205
|
+
self.a2a_tool_manager = A2AToolManager()
|
|
206
|
+
self.delegation_tool_manager = DelegationToolManager(parent_agent=self)
|
|
207
|
+
|
|
208
|
+
self.regular_tools: list[BaseTool] = self._resolve_and_validate_tools()
|
|
209
|
+
self.mcp_tools: list[BaseTool] = []
|
|
210
|
+
self.resolved_tools: list[BaseTool] = self.regular_tools.copy()
|
|
211
|
+
|
|
212
|
+
self._compiled_graph = self._build_and_compile_graph()
|
|
213
|
+
|
|
214
|
+
self._tool_parent_map_by_thread: dict[str, dict[str, str]] = {}
|
|
215
|
+
self._completed_tool_steps_by_thread: dict[str, list[str]] = {}
|
|
216
|
+
self._last_status_step_id_by_thread: dict[str, str] = {}
|
|
217
|
+
self._coordinator_completed_tool_steps_by_thread: dict[str, list[str]] = {}
|
|
218
|
+
self._emitted_tool_calls_by_thread: dict[str, set[str]] = {}
|
|
219
|
+
|
|
220
|
+
def _create_default_event_emitter(self) -> EventEmitter:
|
|
221
|
+
"""Create default event emitter for token streaming.
|
|
222
|
+
|
|
223
|
+
Returns:
|
|
224
|
+
EventEmitter with StreamEventHandler configured for token streaming.
|
|
225
|
+
"""
|
|
226
|
+
stream_handler = StreamEventHandler(name=f"{self.name}_A2AStreamHandler")
|
|
227
|
+
logger.info(f"Agent '{self.name}': Auto-created event emitter for token streaming")
|
|
228
|
+
return EventEmitter(handlers=[stream_handler])
|
|
229
|
+
|
|
230
|
+
def _log_streaming_event_debug(self, source: str, event: dict[str, Any]) -> None:
|
|
231
|
+
"""Log the raw streaming event for debugging purposes.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
source: A short label describing where the event originated.
|
|
235
|
+
event: The event payload emitted by the streaming pipeline.
|
|
236
|
+
"""
|
|
237
|
+
try:
|
|
238
|
+
logger.info("Streaming event (%s): %s", source, event)
|
|
239
|
+
except Exception as exc: # noqa: BLE001
|
|
240
|
+
logger.debug("Failed to log streaming event: %s", exc, exc_info=True)
|
|
241
|
+
|
|
242
|
+
def _record_emitted_tool_calls(self, tool_calls_details: list[dict[str, Any]]) -> None:
|
|
243
|
+
"""Track tool call IDs that have already been emitted to avoid duplicates.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
tool_calls_details: Tool call metadata emitted by the tool_call event.
|
|
247
|
+
"""
|
|
248
|
+
thread_id = _THREAD_ID_CVAR.get()
|
|
249
|
+
if not thread_id or not tool_calls_details:
|
|
250
|
+
return
|
|
251
|
+
|
|
252
|
+
emitted = self._emitted_tool_calls_by_thread.setdefault(thread_id, set())
|
|
253
|
+
for details in tool_calls_details:
|
|
254
|
+
call_id = details.get("id")
|
|
255
|
+
if isinstance(call_id, str) and call_id:
|
|
256
|
+
emitted.add(call_id)
|
|
257
|
+
logger.info(
|
|
258
|
+
"Registered tool call event: agent=%s thread=%s call_id=%s payload=%s",
|
|
259
|
+
self.name,
|
|
260
|
+
thread_id,
|
|
261
|
+
call_id,
|
|
262
|
+
details,
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
def _discard_emitted_tool_call(self, tool_call_id: str | None) -> None:
|
|
266
|
+
"""Remove a tool call ID from the emitted tracker.
|
|
267
|
+
|
|
268
|
+
Args:
|
|
269
|
+
tool_call_id: Identifier of the tool call to remove from cache.
|
|
270
|
+
"""
|
|
271
|
+
if not tool_call_id:
|
|
272
|
+
return
|
|
273
|
+
thread_id = _THREAD_ID_CVAR.get()
|
|
274
|
+
if not thread_id:
|
|
275
|
+
return
|
|
276
|
+
emitted = self._emitted_tool_calls_by_thread.get(thread_id)
|
|
277
|
+
if emitted:
|
|
278
|
+
emitted.discard(tool_call_id)
|
|
279
|
+
logger.info(
|
|
280
|
+
"Cleared recorded tool call: agent=%s thread=%s call_id=%s",
|
|
281
|
+
self.name,
|
|
282
|
+
thread_id,
|
|
283
|
+
tool_call_id,
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
def _get_stream_handler(self) -> StreamEventHandler | None:
|
|
287
|
+
"""Get StreamEventHandler from event_emitter if available.
|
|
288
|
+
|
|
289
|
+
Returns:
|
|
290
|
+
StreamEventHandler instance if found, None otherwise.
|
|
291
|
+
"""
|
|
292
|
+
if not self.event_emitter or not self.event_emitter.handlers:
|
|
293
|
+
return None
|
|
294
|
+
|
|
295
|
+
for handler in self.event_emitter.handlers:
|
|
296
|
+
if isinstance(handler, StreamEventHandler):
|
|
297
|
+
return handler
|
|
298
|
+
return None
|
|
299
|
+
|
|
300
|
+
def _add_system_date_context(self):
|
|
301
|
+
"""Prepend the current date context to the agent's system instruction.
|
|
302
|
+
|
|
303
|
+
The `get_current_date_context()` helper returns a short natural-language phrase
|
|
304
|
+
describing "today" (e.g., "Today is DD MMM YYYY"). By prepending this
|
|
305
|
+
snippet the agent gains up-to-date temporal grounding for each run,
|
|
306
|
+
which is especially important for prompts that reason about recency or compute
|
|
307
|
+
relative dates.
|
|
308
|
+
"""
|
|
309
|
+
date_context = get_current_date_context()
|
|
310
|
+
self.instruction = date_context + "\n\n" + self.instruction
|
|
311
|
+
logger.info(f"Agent '{self.name}': Prepended current date context to system instruction")
|
|
312
|
+
|
|
313
|
+
def set_operation_mode(self, mode: str) -> None:
|
|
314
|
+
"""Set the operation mode for dependency tracking.
|
|
315
|
+
|
|
316
|
+
Args:
|
|
317
|
+
mode: Operation mode - "parallel" (default) or "sequential"
|
|
318
|
+
"""
|
|
319
|
+
if mode not in ["parallel", "sequential"]:
|
|
320
|
+
raise ValueError(f"Invalid operation mode: {mode}. Must be 'parallel' or 'sequential'")
|
|
321
|
+
_OPERATION_MODE_CVAR.set(mode)
|
|
322
|
+
|
|
323
|
+
def _default_memory_agent_id(self, name: str) -> str:
|
|
324
|
+
"""Create a stable identifier for memory scoping.
|
|
325
|
+
|
|
326
|
+
Args:
|
|
327
|
+
name: The agent's human-readable name.
|
|
328
|
+
|
|
329
|
+
Returns:
|
|
330
|
+
str: A deterministic ID derived from the class and name, suitable for scoping memory per agent.
|
|
331
|
+
"""
|
|
332
|
+
base = f"{self.__class__.__name__}:{name}"
|
|
333
|
+
return f"{MemoryDefaults.AGENT_ID_PREFIX}{hashlib.sha256(base.encode()).hexdigest()}"
|
|
334
|
+
|
|
335
|
+
@staticmethod
|
|
336
|
+
def _parse_bool_value(value: Any) -> bool:
|
|
337
|
+
"""Parse a value to boolean with string handling for "true"/"false".
|
|
338
|
+
|
|
339
|
+
Treats string "false" as False, "true" as True.
|
|
340
|
+
For other values, uses standard bool() conversion.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
value: The value to parse.
|
|
344
|
+
|
|
345
|
+
Returns:
|
|
346
|
+
bool: The parsed boolean value.
|
|
347
|
+
"""
|
|
348
|
+
if isinstance(value, str):
|
|
349
|
+
lower_value = value.lower().strip()
|
|
350
|
+
if lower_value == "false":
|
|
351
|
+
return False
|
|
352
|
+
elif lower_value == "true":
|
|
353
|
+
return True
|
|
354
|
+
return bool(value)
|
|
355
|
+
|
|
356
|
+
def _memory_enabled(self) -> bool:
|
|
357
|
+
"""Check whether memory is enabled for this agent.
|
|
358
|
+
|
|
359
|
+
Returns:
|
|
360
|
+
bool: True when a memory adapter is set.
|
|
361
|
+
"""
|
|
362
|
+
return self.memory is not None
|
|
363
|
+
|
|
364
|
+
def _has_lm_invoker(self) -> bool:
|
|
365
|
+
"""Check whether lm_invoker is available for this agent.
|
|
366
|
+
|
|
367
|
+
Returns:
|
|
368
|
+
bool: True when lm_invoker attribute exists and is not None.
|
|
369
|
+
"""
|
|
370
|
+
return self.lm_invoker is not None
|
|
371
|
+
|
|
372
|
+
def _memory_search(self, query: str, override_user_id: str | None = None) -> list[dict[str, Any]]:
|
|
373
|
+
"""Search for relevant memories using the configured adapter.
|
|
374
|
+
|
|
375
|
+
Args:
|
|
376
|
+
query: The user query to retrieve relevant memories for.
|
|
377
|
+
override_user_id: Optional per-call override for the memory scope.
|
|
378
|
+
|
|
379
|
+
Returns:
|
|
380
|
+
list[dict[str, Any]]: Memory hits; empty list on failure or when disabled.
|
|
381
|
+
"""
|
|
382
|
+
if not (self._memory_enabled() and isinstance(query, str)):
|
|
383
|
+
return []
|
|
384
|
+
try:
|
|
385
|
+
user_id = override_user_id or self.memory_agent_id
|
|
386
|
+
if hasattr(self.memory, MemoryMethod.SEARCH):
|
|
387
|
+
results = self.memory.search(query=query, user_id=user_id, limit=self.memory_retrieval_limit) # type: ignore[attr-defined]
|
|
388
|
+
return results if isinstance(results, list) else []
|
|
389
|
+
else:
|
|
390
|
+
return []
|
|
391
|
+
except Exception as e: # noqa: BLE001
|
|
392
|
+
logger.debug(f"Memory: search ignored error: {e}")
|
|
393
|
+
return []
|
|
394
|
+
|
|
395
|
+
def _memory_save_interaction(self, user_text: str, ai_text: str, memory_user_id: str | None = None) -> None:
|
|
396
|
+
"""Persist the user/assistant pair using the configured adapter (best-effort).
|
|
397
|
+
|
|
398
|
+
Args:
|
|
399
|
+
user_text: The user input text.
|
|
400
|
+
ai_text: The assistant output text.
|
|
401
|
+
memory_user_id: Optional per-call memory scope override.
|
|
402
|
+
"""
|
|
403
|
+
if not (self.save_interaction_to_memory and self._memory_enabled() and user_text and ai_text):
|
|
404
|
+
logger.debug("Memory: Skipping save_interaction - saving disabled, memory disabled, or empty text")
|
|
405
|
+
return
|
|
406
|
+
try:
|
|
407
|
+
user_id = memory_user_id or self.memory_agent_id
|
|
408
|
+
logger.info(
|
|
409
|
+
f"Memory: Saving interaction for user_id='{user_id}' - "
|
|
410
|
+
f"User: '{user_text[:TEXT_PREVIEW_LENGTH]}{'...' if len(user_text) > TEXT_PREVIEW_LENGTH else ''}' "
|
|
411
|
+
f"AI: '{ai_text[:TEXT_PREVIEW_LENGTH]}{'...' if len(ai_text) > TEXT_PREVIEW_LENGTH else ''}'"
|
|
412
|
+
)
|
|
413
|
+
save_async = getattr(self.memory, "save_interaction_async", None)
|
|
414
|
+
if callable(save_async):
|
|
415
|
+
future = save_async(user_text=str(user_text), ai_text=str(ai_text), user_id=user_id)
|
|
416
|
+
self._watch_memory_future(future, user_id)
|
|
417
|
+
elif hasattr(self.memory, MemoryMethod.SAVE_INTERACTION):
|
|
418
|
+
self.memory.save_interaction(user_text=str(user_text), ai_text=str(ai_text), user_id=user_id) # type: ignore[attr-defined]
|
|
419
|
+
else:
|
|
420
|
+
logger.warning(
|
|
421
|
+
"Memory: save_interaction method NOT available on memory adapter "
|
|
422
|
+
f"(type: {type(self.memory).__name__})"
|
|
423
|
+
)
|
|
424
|
+
except Exception as e: # noqa: BLE001
|
|
425
|
+
logger.debug(f"Memory: save_interaction ignored error: {e}")
|
|
426
|
+
|
|
427
|
+
@staticmethod
|
|
428
|
+
def _watch_memory_future(future: Any, user_id: str) -> None:
|
|
429
|
+
"""Attach logging to asynchronous memory writes.
|
|
430
|
+
|
|
431
|
+
Args:
|
|
432
|
+
future: The Future object to monitor for completion.
|
|
433
|
+
user_id: User identifier for logging context.
|
|
434
|
+
"""
|
|
435
|
+
if not isinstance(future, Future):
|
|
436
|
+
return
|
|
437
|
+
|
|
438
|
+
def _log_completion(done: Future) -> None:
|
|
439
|
+
"""Log memory save completion or failure.
|
|
440
|
+
|
|
441
|
+
Args:
|
|
442
|
+
done: Future object that has completed.
|
|
443
|
+
"""
|
|
444
|
+
exc = done.exception()
|
|
445
|
+
if exc:
|
|
446
|
+
logger.warning("Memory: async save failed for user_id='%s': %s", user_id, exc)
|
|
447
|
+
|
|
448
|
+
future.add_done_callback(_log_completion)
|
|
449
|
+
|
|
450
|
+
def _resolve_and_validate_tools(self) -> list[BaseTool]:
|
|
451
|
+
"""Resolve and validate regular tools for LangGraph usage.
|
|
452
|
+
|
|
453
|
+
Also configures tools with injected configuration capabilities
|
|
454
|
+
from agent-level tool_configs.
|
|
455
|
+
|
|
456
|
+
Returns:
|
|
457
|
+
List of resolved LangChain BaseTool instances.
|
|
458
|
+
"""
|
|
459
|
+
resolved = []
|
|
460
|
+
for tool in self.tools:
|
|
461
|
+
if isinstance(tool, BaseTool):
|
|
462
|
+
self._configure_injected_tool(tool)
|
|
463
|
+
resolved.append(tool)
|
|
464
|
+
else:
|
|
465
|
+
logger.warning(f"Agent '{self.name}': Tool {tool} is not a LangChain BaseTool, skipping")
|
|
466
|
+
|
|
467
|
+
logger.info(f"Agent '{self.name}': Resolved {len(resolved)} regular tools for LangGraph")
|
|
468
|
+
return resolved
|
|
469
|
+
|
|
470
|
+
def _initialize_memory_from_kwargs(self, agent_name: str, kwargs: dict[str, Any]) -> None:
|
|
471
|
+
"""Initialize memory-related settings and adapter.
|
|
472
|
+
|
|
473
|
+
Extracts known memory kwargs, sets defaults, and initializes the adapter when enabled.
|
|
474
|
+
Keeps ``__init__`` concise and improves DX.
|
|
475
|
+
|
|
476
|
+
Args:
|
|
477
|
+
agent_name: The name of the agent, used to derive a default memory id.
|
|
478
|
+
kwargs: Keyword arguments from the agent constructor; consumed keys are removed.
|
|
479
|
+
Supported memory kwargs:
|
|
480
|
+
- memory_backend: str - Memory backend to use (e.g., "mem0")
|
|
481
|
+
- agent_id: str - Agent identifier for memory scoping
|
|
482
|
+
- memory_namespace: str - Memory namespace
|
|
483
|
+
- memory_retrieval_limit: int - Max memories to retrieve
|
|
484
|
+
- memory_max_chars: int - Max characters per memory
|
|
485
|
+
- save_interaction_to_memory: bool (default True) - Whether to save interactions to memory
|
|
486
|
+
"""
|
|
487
|
+
# Initialize memory configuration settings
|
|
488
|
+
self.memory_backend: str | None = kwargs.pop("memory_backend", None)
|
|
489
|
+
self.memory_agent_id: str = str(kwargs.pop("agent_id", self._default_memory_agent_id(agent_name)))
|
|
490
|
+
self.memory_namespace: str | None = kwargs.pop("memory_namespace", None)
|
|
491
|
+
self.memory_retrieval_limit: int = int(kwargs.pop("memory_retrieval_limit", MemoryDefaults.RETRIEVAL_LIMIT))
|
|
492
|
+
self.memory_max_chars: int = int(kwargs.pop("memory_max_chars", MemoryDefaults.MAX_CHARS))
|
|
493
|
+
|
|
494
|
+
# Initialize memory interaction saving flag with proper bool conversion
|
|
495
|
+
save_raw = kwargs.pop("save_interaction_to_memory", True)
|
|
496
|
+
self.save_interaction_to_memory: bool = self._parse_bool_value(save_raw)
|
|
497
|
+
|
|
498
|
+
if self.memory_backend:
|
|
499
|
+
memory_kwargs = {
|
|
500
|
+
"limit": self.memory_retrieval_limit,
|
|
501
|
+
"max_chars": self.memory_max_chars,
|
|
502
|
+
"agent_id": self.memory_agent_id,
|
|
503
|
+
}
|
|
504
|
+
if self.memory_namespace:
|
|
505
|
+
memory_kwargs["namespace"] = self.memory_namespace
|
|
506
|
+
|
|
507
|
+
self._mem0_client = MemoryFactory.create(
|
|
508
|
+
self.memory_backend,
|
|
509
|
+
**memory_kwargs,
|
|
510
|
+
)
|
|
511
|
+
self.memory = self._mem0_client
|
|
512
|
+
|
|
513
|
+
def _configure_injected_tool(self, tool: BaseTool) -> None:
|
|
514
|
+
"""Configure a tool with automatic configuration injection using agent-level defaults.
|
|
515
|
+
|
|
516
|
+
Args:
|
|
517
|
+
tool: The tool instance to configure.
|
|
518
|
+
"""
|
|
519
|
+
if self._should_auto_inject_config(tool):
|
|
520
|
+
self._auto_inject_config_capabilities(tool)
|
|
521
|
+
self._apply_agent_config_to_tool(tool)
|
|
522
|
+
|
|
523
|
+
def _should_auto_inject_config(self, tool: BaseTool) -> bool:
|
|
524
|
+
"""Check if tool needs auto-injection of configuration capabilities.
|
|
525
|
+
|
|
526
|
+
Args:
|
|
527
|
+
tool: The tool instance to check.
|
|
528
|
+
|
|
529
|
+
Returns:
|
|
530
|
+
True if tool needs auto-injection of configuration capabilities, False otherwise.
|
|
531
|
+
"""
|
|
532
|
+
return (
|
|
533
|
+
hasattr(tool, TOOL_CONFIG_SCHEMA_ATTR)
|
|
534
|
+
and getattr(tool, TOOL_CONFIG_SCHEMA_ATTR) is not None
|
|
535
|
+
and not hasattr(tool, CONFIG_SCHEMA_ATTR)
|
|
536
|
+
)
|
|
537
|
+
|
|
538
|
+
def _auto_inject_config_capabilities(self, tool: BaseTool) -> None:
|
|
539
|
+
"""Inject configuration capabilities into a tool with tool_config_schema.
|
|
540
|
+
|
|
541
|
+
Args:
|
|
542
|
+
tool: The tool instance to configure.
|
|
543
|
+
"""
|
|
544
|
+
try:
|
|
545
|
+
tool_config_schema = getattr(tool, TOOL_CONFIG_SCHEMA_ATTR)
|
|
546
|
+
inject_config_methods_into_tool(tool, tool_config_schema)
|
|
547
|
+
logger.info(f"Agent '{self.name}': Auto-injected config capabilities into tool '{tool.name}'")
|
|
548
|
+
except Exception as e:
|
|
549
|
+
logger.warning(f"Agent '{self.name}': Failed to auto-inject config into tool '{tool.name}': {e}")
|
|
550
|
+
|
|
551
|
+
def _apply_agent_config_to_tool(self, tool: BaseTool) -> None:
|
|
552
|
+
"""Apply agent-level configuration to a tool.
|
|
553
|
+
|
|
554
|
+
Args:
|
|
555
|
+
tool: The tool instance to configure.
|
|
556
|
+
"""
|
|
557
|
+
tool_config_data = self._get_agent_config_for_tool(tool.name)
|
|
558
|
+
|
|
559
|
+
if tool_config_data is None:
|
|
560
|
+
return
|
|
561
|
+
|
|
562
|
+
try:
|
|
563
|
+
tool.set_tool_config(tool_config_data)
|
|
564
|
+
logger.info(f"Agent '{self.name}': Configured tool '{tool.name}' with agent defaults: {tool_config_data}")
|
|
565
|
+
except Exception as e:
|
|
566
|
+
logger.warning(f"Agent '{self.name}': Failed to configure tool '{tool.name}': {e}")
|
|
567
|
+
|
|
568
|
+
def _get_agent_config_for_tool(self, tool_name: str) -> dict[str, Any] | None:
|
|
569
|
+
"""Get agent-level configuration data for a specific tool.
|
|
570
|
+
|
|
571
|
+
This method intentionally returns only per-tool configuration and does NOT include
|
|
572
|
+
global agent configuration. Global configuration merging is handled separately
|
|
573
|
+
in the metadata resolution process during tool execution.
|
|
574
|
+
|
|
575
|
+
Args:
|
|
576
|
+
tool_name: The name of the tool to get configuration for.
|
|
577
|
+
|
|
578
|
+
Returns:
|
|
579
|
+
The configuration data for the tool, or None if no configuration is found.
|
|
580
|
+
"""
|
|
581
|
+
if not isinstance(self.tool_configs, dict):
|
|
582
|
+
return None
|
|
583
|
+
|
|
584
|
+
return self.tool_configs.get(tool_name)
|
|
585
|
+
|
|
586
|
+
def _sanitize_tool_names(self):
|
|
587
|
+
"""Correct resolved_tools' names that will be used for the agent according to the model provider's rules."""
|
|
588
|
+
for tool in self.resolved_tools:
|
|
589
|
+
sanitized_name = self.name_preprocessor.sanitize_tool_name(tool.name)
|
|
590
|
+
tool.name = sanitized_name
|
|
591
|
+
|
|
592
|
+
def _build_and_compile_graph(self) -> CompiledStateGraph:
|
|
593
|
+
"""Build and compile the LangGraph while ensuring tool names are valid.
|
|
594
|
+
|
|
595
|
+
Returns:
|
|
596
|
+
Compiled LangGraph ready for execution.
|
|
597
|
+
"""
|
|
598
|
+
self._sanitize_tool_names()
|
|
599
|
+
try:
|
|
600
|
+
if self.state_schema:
|
|
601
|
+
graph_builder = StateGraph(self.state_schema)
|
|
602
|
+
else:
|
|
603
|
+
|
|
604
|
+
class DefaultAgentState(TypedDict):
|
|
605
|
+
messages: Annotated[list[BaseMessage], add_messages]
|
|
606
|
+
|
|
607
|
+
graph_builder = StateGraph(DefaultAgentState)
|
|
608
|
+
|
|
609
|
+
compiled_graph = self.define_graph(graph_builder)
|
|
610
|
+
logger.info(
|
|
611
|
+
f"Agent '{self.name}': Successfully compiled LangGraph with {len(self.resolved_tools)} total tools"
|
|
612
|
+
)
|
|
613
|
+
return compiled_graph
|
|
614
|
+
|
|
615
|
+
except Exception as e:
|
|
616
|
+
logger.error(f"Agent '{self.name}': Failed to build LangGraph: {e}")
|
|
617
|
+
raise RuntimeError(f"Failed to build LangGraph for agent '{self.name}': {e}") from e
|
|
618
|
+
|
|
619
|
+
@abstractmethod
|
|
620
|
+
def define_graph(self, graph_builder: StateGraph) -> CompiledStateGraph:
|
|
621
|
+
"""Define the specific graph structure for this agent type.
|
|
622
|
+
|
|
623
|
+
Subclasses must implement this method to:
|
|
624
|
+
1. Add nodes to the graph_builder
|
|
625
|
+
2. Add edges and conditional edges
|
|
626
|
+
3. Set entry points
|
|
627
|
+
4. Return the compiled graph
|
|
628
|
+
|
|
629
|
+
Args:
|
|
630
|
+
graph_builder: The StateGraph builder to define nodes and edges on.
|
|
631
|
+
|
|
632
|
+
Returns:
|
|
633
|
+
The compiled graph ready for execution.
|
|
634
|
+
"""
|
|
635
|
+
raise NotImplementedError(f"Agent '{self.name}': Subclasses must implement define_graph method")
|
|
636
|
+
|
|
637
|
+
@abstractmethod
|
|
638
|
+
def _prepare_graph_input(self, input_data: Any, **kwargs: Any) -> dict[str, Any]:
|
|
639
|
+
"""Convert user input to graph state format.
|
|
640
|
+
|
|
641
|
+
Args:
|
|
642
|
+
input_data: The user's input (query string, structured data, etc.).
|
|
643
|
+
**kwargs: Additional keyword arguments from the user.
|
|
644
|
+
|
|
645
|
+
Returns:
|
|
646
|
+
Dictionary representing the initial graph state.
|
|
647
|
+
"""
|
|
648
|
+
raise NotImplementedError(f"Agent '{self.name}': Subclasses must implement _prepare_graph_input method")
|
|
649
|
+
|
|
650
|
+
@abstractmethod
|
|
651
|
+
def _format_graph_output(self, final_state_result: dict[str, Any]) -> Any:
|
|
652
|
+
"""Convert final graph state to user-friendly output.
|
|
653
|
+
|
|
654
|
+
Args:
|
|
655
|
+
final_state_result: The final state from graph execution.
|
|
656
|
+
|
|
657
|
+
Returns:
|
|
658
|
+
Formatted output for the user.
|
|
659
|
+
"""
|
|
660
|
+
raise NotImplementedError(f"Agent '{self.name}': Subclasses must implement _format_graph_output method")
|
|
661
|
+
|
|
662
|
+
def _extract_metadata_from_kwargs(self, **kwargs: Any) -> dict[str, Any]:
|
|
663
|
+
"""Extract metadata from kwargs for agent implementations.
|
|
664
|
+
|
|
665
|
+
Supports both flat and mixed metadata schemas:
|
|
666
|
+
- Flat dict (legacy): all keys applied to all tools and model calls
|
|
667
|
+
- Mixed dict (new): top-level keys applied to all tools, 'tool_configs' section per-tool
|
|
668
|
+
|
|
669
|
+
Args:
|
|
670
|
+
**kwargs: Keyword arguments that may contain metadata.
|
|
671
|
+
|
|
672
|
+
Returns:
|
|
673
|
+
dict[str, Any]: The metadata dictionary, or an empty dict if no metadata was provided.
|
|
674
|
+
"""
|
|
675
|
+
return kwargs.get("metadata", {})
|
|
676
|
+
|
|
677
|
+
def _extract_output_from_dict_state(self, dict_state: dict[str, Any]) -> str | None:
|
|
678
|
+
"""Extract output from a dictionary state (migrated from BaseLangChainAgent).
|
|
679
|
+
|
|
680
|
+
Args:
|
|
681
|
+
dict_state: A dictionary containing agent state information.
|
|
682
|
+
|
|
683
|
+
Returns:
|
|
684
|
+
The extracted output string or None if no valid output found.
|
|
685
|
+
"""
|
|
686
|
+
output_content: str | None = None
|
|
687
|
+
messages = dict_state.get("messages")
|
|
688
|
+
if messages and isinstance(messages, list) and messages:
|
|
689
|
+
last_message = messages[-1]
|
|
690
|
+
if isinstance(last_message, AIMessage):
|
|
691
|
+
candidate_content = getattr(last_message, "content", None)
|
|
692
|
+
if not candidate_content:
|
|
693
|
+
output_content = ""
|
|
694
|
+
else:
|
|
695
|
+
output_content = candidate_content
|
|
696
|
+
elif hasattr(last_message, "content"):
|
|
697
|
+
output_content = getattr(last_message, "content", None)
|
|
698
|
+
if output_content is None:
|
|
699
|
+
candidate_output_from_key = dict_state.get("output")
|
|
700
|
+
if isinstance(candidate_output_from_key, str):
|
|
701
|
+
output_content = candidate_output_from_key
|
|
702
|
+
return output_content
|
|
703
|
+
|
|
704
|
+
def _extract_output_from_list_state(self, list_state: list[Any]) -> str | None:
|
|
705
|
+
"""Extract output from a list state (migrated from BaseLangChainAgent).
|
|
706
|
+
|
|
707
|
+
Args:
|
|
708
|
+
list_state: A list containing agent state information.
|
|
709
|
+
|
|
710
|
+
Returns:
|
|
711
|
+
The extracted output string or None if no valid output found.
|
|
712
|
+
"""
|
|
713
|
+
output_content: str | None = None
|
|
714
|
+
if not list_state:
|
|
715
|
+
return None
|
|
716
|
+
last_item = list_state[-1]
|
|
717
|
+
if isinstance(last_item, AIMessage) and getattr(last_item, "content", None) is not None:
|
|
718
|
+
output_content = last_item.content
|
|
719
|
+
elif isinstance(last_item, str):
|
|
720
|
+
output_content = last_item
|
|
721
|
+
return output_content
|
|
722
|
+
|
|
723
|
+
def _extract_output_from_final_state(self, final_state_result: Any) -> str:
|
|
724
|
+
"""Enhanced output extraction from final state (migrated from BaseLangChainAgent).
|
|
725
|
+
|
|
726
|
+
Args:
|
|
727
|
+
final_state_result: The final state from graph execution.
|
|
728
|
+
|
|
729
|
+
Returns:
|
|
730
|
+
Extracted text content.
|
|
731
|
+
"""
|
|
732
|
+
output_content: str | None = None
|
|
733
|
+
if isinstance(final_state_result, dict):
|
|
734
|
+
output_content = self._extract_output_from_dict_state(final_state_result)
|
|
735
|
+
elif isinstance(final_state_result, str):
|
|
736
|
+
output_content = final_state_result
|
|
737
|
+
elif isinstance(final_state_result, list):
|
|
738
|
+
output_content = self._extract_output_from_list_state(final_state_result)
|
|
739
|
+
|
|
740
|
+
if output_content is None:
|
|
741
|
+
return "Error: Could not extract output from agent's final state."
|
|
742
|
+
return output_content
|
|
743
|
+
|
|
744
|
+
def register_a2a_agents(self, agent_cards: list[AgentCard]) -> None:
|
|
745
|
+
"""Register A2A communication capabilities using the A2A tool manager.
|
|
746
|
+
|
|
747
|
+
Args:
|
|
748
|
+
agent_cards (list[AgentCard]): List of AgentCard instances for external communication.
|
|
749
|
+
"""
|
|
750
|
+
if not agent_cards:
|
|
751
|
+
logger.info(f"Agent '{self.name}': No A2A agents to register")
|
|
752
|
+
return
|
|
753
|
+
|
|
754
|
+
a2a_tools = self.a2a_tool_manager.register_resources(agent_cards)
|
|
755
|
+
self.resolved_tools.extend(a2a_tools)
|
|
756
|
+
|
|
757
|
+
logger.info(f"Agent '{self.name}': Registered {len(agent_cards)} A2A agents as tools")
|
|
758
|
+
self._rebuild_graph()
|
|
759
|
+
|
|
760
|
+
def register_delegation_agents(self, agents: list[BaseAgent]) -> None:
|
|
761
|
+
"""Register internal agent delegation capabilities using the delegation tool manager.
|
|
762
|
+
|
|
763
|
+
Args:
|
|
764
|
+
agents: List of BaseAgent instances for internal task delegation.
|
|
765
|
+
"""
|
|
766
|
+
if not agents:
|
|
767
|
+
logger.info(f"Agent '{self.name}': No delegation agents to register")
|
|
768
|
+
return
|
|
769
|
+
|
|
770
|
+
delegation_tools = self.delegation_tool_manager.register_resources(agents)
|
|
771
|
+
self.resolved_tools.extend(delegation_tools)
|
|
772
|
+
logger.info(f"Agent '{self.name}': Registered {len(agents)} delegation agents as streaming tools")
|
|
773
|
+
|
|
774
|
+
self._rebuild_graph()
|
|
775
|
+
|
|
776
|
+
def update_regular_tools(self, new_tools: list[BaseTool], rebuild_graph: bool | None = None) -> None:
|
|
777
|
+
"""Update regular tools (not capabilities).
|
|
778
|
+
|
|
779
|
+
Args:
|
|
780
|
+
new_tools: New list of regular tools to use.
|
|
781
|
+
rebuild_graph: Whether to rebuild graph. If None, uses auto_rebuild_graph setting.
|
|
782
|
+
"""
|
|
783
|
+
logger.info(f"Agent '{self.name}': Updating regular tools from {len(self.tools)} to {len(new_tools)}")
|
|
784
|
+
|
|
785
|
+
self.tools = list(new_tools)
|
|
786
|
+
old_resolved_count = len(self.regular_tools)
|
|
787
|
+
self.regular_tools = self._resolve_and_validate_tools()
|
|
788
|
+
|
|
789
|
+
logger.info(
|
|
790
|
+
f"Agent '{self.name}': Regular tools changed from {old_resolved_count} to {len(self.regular_tools)}"
|
|
791
|
+
)
|
|
792
|
+
|
|
793
|
+
self._rebuild_resolved_tools()
|
|
794
|
+
|
|
795
|
+
should_rebuild = rebuild_graph if rebuild_graph is not None else True
|
|
796
|
+
if should_rebuild:
|
|
797
|
+
try:
|
|
798
|
+
logger.info(f"Agent '{self.name}': Rebuilding graph with updated tools")
|
|
799
|
+
self._compiled_graph = self._build_and_compile_graph()
|
|
800
|
+
except Exception as e:
|
|
801
|
+
logger.error(f"Agent '{self.name}': Failed to rebuild graph after tool update: {e}")
|
|
802
|
+
raise
|
|
803
|
+
|
|
804
|
+
def _rebuild_resolved_tools(self) -> None:
|
|
805
|
+
"""Rebuild resolved tools combining regular tools with capability tools."""
|
|
806
|
+
self.resolved_tools = self.regular_tools.copy()
|
|
807
|
+
|
|
808
|
+
if self.a2a_tool_manager:
|
|
809
|
+
a2a_tools = self.a2a_tool_manager.get_tools()
|
|
810
|
+
self.resolved_tools.extend(a2a_tools)
|
|
811
|
+
logger.info(f"Agent '{self.name}': Added {len(a2a_tools)} A2A tools")
|
|
812
|
+
|
|
813
|
+
if self.delegation_tool_manager:
|
|
814
|
+
delegation_tools = self.delegation_tool_manager.get_tools()
|
|
815
|
+
self.resolved_tools.extend(delegation_tools)
|
|
816
|
+
logger.info(f"Agent '{self.name}': Added {len(delegation_tools)} delegation tools")
|
|
817
|
+
|
|
818
|
+
if self.mcp_tools:
|
|
819
|
+
self.resolved_tools.extend(self.mcp_tools)
|
|
820
|
+
logger.info(f"Agent '{self.name}': Added {len(self.mcp_tools)} MCP tools")
|
|
821
|
+
|
|
822
|
+
logger.info(f"Agent '{self.name}': Rebuilt resolved tools: {len(self.resolved_tools)} total tools")
|
|
823
|
+
|
|
824
|
+
def _rebuild_graph(self) -> None:
|
|
825
|
+
"""Rebuilds and recompiles the graph using the current set of tools.
|
|
826
|
+
|
|
827
|
+
Raises:
|
|
828
|
+
RuntimeError: If the graph rebuilding or compilation process fails.
|
|
829
|
+
"""
|
|
830
|
+
try:
|
|
831
|
+
self._rebuild_resolved_tools()
|
|
832
|
+
self._compiled_graph = self._build_and_compile_graph()
|
|
833
|
+
logger.info(f"Agent '{self.name}': Successfully rebuilt graph")
|
|
834
|
+
except Exception as e:
|
|
835
|
+
logger.error(f"Agent '{self.name}': Failed to rebuild graph: {e}")
|
|
836
|
+
raise RuntimeError(f"Failed to rebuild graph for agent '{self.name}': {e}") from e
|
|
837
|
+
|
|
838
|
+
def run(self, query: str, **kwargs: Any) -> dict[str, Any]:
|
|
839
|
+
"""Synchronously run the LangGraph agent.
|
|
840
|
+
|
|
841
|
+
Args:
|
|
842
|
+
query: The input query for the agent.
|
|
843
|
+
**kwargs: Additional keyword arguments.
|
|
844
|
+
|
|
845
|
+
Returns:
|
|
846
|
+
Dictionary containing the agent's response.
|
|
847
|
+
"""
|
|
848
|
+
try:
|
|
849
|
+
return asyncio.run(self.arun(query, **kwargs))
|
|
850
|
+
except RuntimeError as e:
|
|
851
|
+
raise RuntimeError(f"Agent '{self.name}': Error in sync 'run'. Original: {e}") from e
|
|
852
|
+
|
|
853
|
+
async def arun(self, query: str, **kwargs: Any) -> dict[str, Any]:
|
|
854
|
+
"""Asynchronously run the LangGraph agent with lazy MCP initialization.
|
|
855
|
+
|
|
856
|
+
Args:
|
|
857
|
+
query: The input query for the agent.
|
|
858
|
+
**kwargs: Additional keyword arguments including configurable for LangGraph.
|
|
859
|
+
|
|
860
|
+
Returns:
|
|
861
|
+
Dictionary containing the agent's response and full final state.
|
|
862
|
+
"""
|
|
863
|
+
await self._ensure_mcp_tools_initialized()
|
|
864
|
+
return await self._arun(query, **kwargs)
|
|
865
|
+
|
|
866
|
+
async def _arun(self, query: str, **kwargs: Any) -> dict[str, Any]:
|
|
867
|
+
"""Internal implementation of arun without MCP handling.
|
|
868
|
+
|
|
869
|
+
Args:
|
|
870
|
+
query: The input query for the agent.
|
|
871
|
+
**kwargs: Additional keyword arguments including configurable for LangGraph.
|
|
872
|
+
|
|
873
|
+
Returns:
|
|
874
|
+
Dictionary containing the agent's response and full final state.
|
|
875
|
+
"""
|
|
876
|
+
memory_user_id: str | None = kwargs.get("memory_user_id")
|
|
877
|
+
|
|
878
|
+
# Create config first to ensure thread_id is generated
|
|
879
|
+
config = self._create_graph_config(**kwargs)
|
|
880
|
+
thread_id = self._get_thread_id_from_config(config)
|
|
881
|
+
|
|
882
|
+
graph_input = self._prepare_graph_input(query, thread_id=thread_id, **kwargs)
|
|
883
|
+
|
|
884
|
+
try:
|
|
885
|
+
final_state_result = await self._compiled_graph.ainvoke(graph_input, config=config)
|
|
886
|
+
formatted_output = self._format_graph_output(final_state_result)
|
|
887
|
+
|
|
888
|
+
try:
|
|
889
|
+
self._memory_save_interaction(user_text=query, ai_text=formatted_output, memory_user_id=memory_user_id)
|
|
890
|
+
except Exception:
|
|
891
|
+
pass
|
|
892
|
+
|
|
893
|
+
return {"output": formatted_output, "full_final_state": final_state_result}
|
|
894
|
+
|
|
895
|
+
except Exception as e:
|
|
896
|
+
logger.error(f"Agent '{self.name}': Error during graph execution: {e}")
|
|
897
|
+
raise RuntimeError(f"Agent '{self.name}': Graph execution failed: {e}") from e
|
|
898
|
+
|
|
899
|
+
async def _stream_with_lm_invoker(self, query: str, **kwargs: Any) -> AsyncGenerator[str | dict[str, Any], None]:
|
|
900
|
+
"""Handle streaming for LM Invoker using StreamEventHandler.
|
|
901
|
+
|
|
902
|
+
Args:
|
|
903
|
+
query: The input query for the agent.
|
|
904
|
+
**kwargs: Additional keyword arguments.
|
|
905
|
+
|
|
906
|
+
Yields:
|
|
907
|
+
Chunks of output (strings or dicts) from the streaming response.
|
|
908
|
+
"""
|
|
909
|
+
stream_handler = StreamEventHandler(name=f"{self.name}_StreamHandler")
|
|
910
|
+
event_emitter = EventEmitter(handlers=[stream_handler])
|
|
911
|
+
|
|
912
|
+
async def run_and_cleanup():
|
|
913
|
+
"""Runs the agent and ensures event emitter cleanup."""
|
|
914
|
+
try:
|
|
915
|
+
await self.arun(
|
|
916
|
+
query=query,
|
|
917
|
+
event_emitter=event_emitter,
|
|
918
|
+
**kwargs,
|
|
919
|
+
)
|
|
920
|
+
finally:
|
|
921
|
+
await event_emitter.close()
|
|
922
|
+
|
|
923
|
+
execution_task = asyncio.create_task(run_and_cleanup())
|
|
924
|
+
|
|
925
|
+
try:
|
|
926
|
+
async for event in stream_handler.stream():
|
|
927
|
+
chunk_data = json.loads(event)
|
|
928
|
+
chunk_value = chunk_data.get("value", "")
|
|
929
|
+
if not chunk_value:
|
|
930
|
+
continue
|
|
931
|
+
if isinstance(chunk_value, str) or isinstance(chunk_value, dict):
|
|
932
|
+
yield chunk_value
|
|
933
|
+
|
|
934
|
+
await execution_task
|
|
935
|
+
|
|
936
|
+
except asyncio.CancelledError:
|
|
937
|
+
execution_task.cancel()
|
|
938
|
+
await event_emitter.close()
|
|
939
|
+
with suppress(asyncio.CancelledError):
|
|
940
|
+
await execution_task
|
|
941
|
+
raise
|
|
942
|
+
except Exception as e:
|
|
943
|
+
execution_task.cancel()
|
|
944
|
+
await event_emitter.close()
|
|
945
|
+
with suppress(asyncio.CancelledError):
|
|
946
|
+
await execution_task
|
|
947
|
+
logger.error(f"Agent '{self.name}': Error during LM Invoker streaming: {e}")
|
|
948
|
+
yield {"error": f"Streaming failed: {e}"}
|
|
949
|
+
|
|
950
|
+
def _create_graph_config(self, **kwargs: Any) -> dict[str, Any]:
|
|
951
|
+
"""Create standardized graph configuration with thread ID handling.
|
|
952
|
+
|
|
953
|
+
Guarantees a thread identifier is present in the returned config. The key used
|
|
954
|
+
is `self.thread_id_key` when set, otherwise the default key `"thread_id"`.
|
|
955
|
+
|
|
956
|
+
Args:
|
|
957
|
+
**kwargs: Additional keyword arguments including configurable, metadata, and pii_mapping.
|
|
958
|
+
|
|
959
|
+
Returns:
|
|
960
|
+
Dictionary containing the graph configuration with a guaranteed thread ID
|
|
961
|
+
and metadata (including pii_mapping) if provided.
|
|
962
|
+
"""
|
|
963
|
+
configurable = kwargs.get("configurable", {}).copy()
|
|
964
|
+
|
|
965
|
+
key = self.thread_id_key or "thread_id"
|
|
966
|
+
if key not in configurable:
|
|
967
|
+
configurable[key] = str(uuid.uuid4())
|
|
968
|
+
logger.info(f"Agent '{self.name}': Generated new thread ID: {configurable[key]}")
|
|
969
|
+
|
|
970
|
+
config: dict[str, Any] = {"configurable": configurable}
|
|
971
|
+
|
|
972
|
+
# Include metadata in config to preserve pii_mapping and other metadata
|
|
973
|
+
# This ensures parity between direct SSE streaming and A2A executor paths
|
|
974
|
+
metadata = kwargs.get("metadata")
|
|
975
|
+
pii_mapping = kwargs.get("pii_mapping")
|
|
976
|
+
|
|
977
|
+
if metadata or pii_mapping:
|
|
978
|
+
config_metadata: dict[str, Any] = dict(metadata) if metadata else {}
|
|
979
|
+
if pii_mapping and "pii_mapping" not in config_metadata:
|
|
980
|
+
config_metadata["pii_mapping"] = pii_mapping
|
|
981
|
+
config["metadata"] = config_metadata
|
|
982
|
+
|
|
983
|
+
return config
|
|
984
|
+
|
|
985
|
+
def _get_thread_id_from_config(self, config: dict[str, Any]) -> str | None:
|
|
986
|
+
"""Extract thread_id from graph configuration.
|
|
987
|
+
|
|
988
|
+
Args:
|
|
989
|
+
config: Graph configuration dict with 'configurable' key.
|
|
990
|
+
|
|
991
|
+
Returns:
|
|
992
|
+
The thread_id value or None if not found.
|
|
993
|
+
"""
|
|
994
|
+
configurable = config.get("configurable", {})
|
|
995
|
+
key = self.thread_id_key or "thread_id"
|
|
996
|
+
return configurable.get(key)
|
|
997
|
+
|
|
998
|
+
def _process_langgraph_event(self, event: dict[str, Any]) -> str | dict[str, Any] | None:
|
|
999
|
+
"""Process a single LangGraph streaming event.
|
|
1000
|
+
|
|
1001
|
+
Args:
|
|
1002
|
+
event: Event from LangGraph's astream_events.
|
|
1003
|
+
|
|
1004
|
+
Returns:
|
|
1005
|
+
Processed output or None if event should be skipped.
|
|
1006
|
+
"""
|
|
1007
|
+
event_type = event.get("event")
|
|
1008
|
+
event_data = event.get("data")
|
|
1009
|
+
|
|
1010
|
+
if event_type == "on_chat_model_stream" and event_data:
|
|
1011
|
+
chunk = event_data.get("chunk")
|
|
1012
|
+
if chunk and hasattr(chunk, "content") and chunk.content:
|
|
1013
|
+
return chunk.content
|
|
1014
|
+
|
|
1015
|
+
elif event_type == "on_tool_end" and event_data:
|
|
1016
|
+
output = event_data.get("output")
|
|
1017
|
+
if output:
|
|
1018
|
+
return {"tool_output": str(output)}
|
|
1019
|
+
|
|
1020
|
+
return None
|
|
1021
|
+
|
|
1022
|
+
def _should_yield_a2a_event(self, event_data: A2AEvent) -> bool:
|
|
1023
|
+
"""Check if A2A event should be yielded based on event type.
|
|
1024
|
+
|
|
1025
|
+
Args:
|
|
1026
|
+
event_data: A2AEvent with semantic type information.
|
|
1027
|
+
|
|
1028
|
+
Returns:
|
|
1029
|
+
True if event should be yielded, False otherwise.
|
|
1030
|
+
"""
|
|
1031
|
+
event_type = event_data.get("event_type")
|
|
1032
|
+
|
|
1033
|
+
if event_type in {
|
|
1034
|
+
A2AStreamEventType.TOOL_CALL,
|
|
1035
|
+
A2AStreamEventType.TOOL_RESULT,
|
|
1036
|
+
A2AStreamEventType.CONTENT_CHUNK,
|
|
1037
|
+
A2AStreamEventType.FINAL_RESPONSE,
|
|
1038
|
+
A2AStreamEventType.ERROR,
|
|
1039
|
+
}:
|
|
1040
|
+
return True
|
|
1041
|
+
|
|
1042
|
+
if event_type == A2AStreamEventType.STATUS_UPDATE:
|
|
1043
|
+
content = event_data.get("content", "")
|
|
1044
|
+
return bool(content.strip())
|
|
1045
|
+
|
|
1046
|
+
return True
|
|
1047
|
+
|
|
1048
|
+
async def _stream_with_langgraph(self, query: str, **kwargs: Any) -> AsyncGenerator[str | dict[str, Any], None]:
|
|
1049
|
+
"""Handle streaming for LangChain models using LangGraph's native streaming.
|
|
1050
|
+
|
|
1051
|
+
Args:
|
|
1052
|
+
query: The input query for the agent.
|
|
1053
|
+
**kwargs: Additional keyword arguments.
|
|
1054
|
+
|
|
1055
|
+
Yields:
|
|
1056
|
+
Chunks of output (strings or dicts) from the streaming response.
|
|
1057
|
+
"""
|
|
1058
|
+
# Create config first to ensure thread_id is generated
|
|
1059
|
+
config = self._create_graph_config(**kwargs)
|
|
1060
|
+
thread_id = self._get_thread_id_from_config(config)
|
|
1061
|
+
|
|
1062
|
+
graph_input = self._prepare_graph_input(query, thread_id=thread_id, **kwargs)
|
|
1063
|
+
|
|
1064
|
+
try:
|
|
1065
|
+
async for event in self._compiled_graph.astream_events(graph_input, config=config, version="v2"):
|
|
1066
|
+
processed_output = self._process_langgraph_event(event)
|
|
1067
|
+
if processed_output is not None:
|
|
1068
|
+
yield processed_output
|
|
1069
|
+
|
|
1070
|
+
except Exception as e:
|
|
1071
|
+
logger.error(f"Agent '{self.name}': Error during graph streaming: {e}")
|
|
1072
|
+
yield {"error": f"Streaming failed: {e}"}
|
|
1073
|
+
|
|
1074
|
+
async def arun_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[str | dict[str, Any], None]:
|
|
1075
|
+
"""Asynchronously stream the LangGraph agent's response.
|
|
1076
|
+
|
|
1077
|
+
If MCP configuration exists, connects to the MCP server and registers tools before streaming.
|
|
1078
|
+
This method properly handles both LM Invoker and LangChain model streaming:
|
|
1079
|
+
- For LM Invoker: Uses StreamEventHandler to capture streaming events
|
|
1080
|
+
- For LangChain models: Uses LangGraph's native streaming implementation
|
|
1081
|
+
|
|
1082
|
+
Args:
|
|
1083
|
+
query: The input query for the agent.
|
|
1084
|
+
**kwargs: Additional keyword arguments.
|
|
1085
|
+
|
|
1086
|
+
Yields:
|
|
1087
|
+
Chunks of output (strings or dicts) from the streaming response.
|
|
1088
|
+
"""
|
|
1089
|
+
await self._ensure_mcp_tools_initialized()
|
|
1090
|
+
async for chunk in self._arun_stream(query, **kwargs):
|
|
1091
|
+
yield chunk
|
|
1092
|
+
|
|
1093
|
+
async def _arun_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[str | dict[str, Any], None]:
|
|
1094
|
+
"""Internal implementation of arun_stream without MCP handling.
|
|
1095
|
+
|
|
1096
|
+
This method properly handles both LM Invoker and LangChain model streaming:
|
|
1097
|
+
- For LM Invoker: Uses StreamEventHandler to capture streaming events
|
|
1098
|
+
- For LangChain models: Uses LangGraph's native streaming implementation
|
|
1099
|
+
|
|
1100
|
+
Args:
|
|
1101
|
+
query: The input query for the agent.
|
|
1102
|
+
**kwargs: Additional keyword arguments.
|
|
1103
|
+
|
|
1104
|
+
Yields:
|
|
1105
|
+
Chunks of output (strings or dicts) from the streaming response.
|
|
1106
|
+
"""
|
|
1107
|
+
if self._has_lm_invoker():
|
|
1108
|
+
async for chunk in self._stream_with_lm_invoker(query, **kwargs):
|
|
1109
|
+
yield chunk
|
|
1110
|
+
else:
|
|
1111
|
+
async for chunk in self._stream_with_langgraph(query, **kwargs):
|
|
1112
|
+
yield chunk
|
|
1113
|
+
|
|
1114
|
+
def _initialize_mcp_client(self) -> None:
|
|
1115
|
+
"""Initialize/recreate MCP client with current config safely disposing previous.
|
|
1116
|
+
|
|
1117
|
+
This method creates a new LangchainMCPClient if MCP configuration exists,
|
|
1118
|
+
and safely disposes of any existing client before setting the new one.
|
|
1119
|
+
"""
|
|
1120
|
+
new_client = LangchainMCPClient(self.mcp_config) if self.mcp_config else None
|
|
1121
|
+
self._set_mcp_client_safely(new_client)
|
|
1122
|
+
|
|
1123
|
+
async def _register_mcp_tools(self) -> None:
|
|
1124
|
+
"""Initialize MCP tools once during agent setup using persistent sessions.
|
|
1125
|
+
|
|
1126
|
+
This method connects to MCP servers, retrieves available tools, and integrates
|
|
1127
|
+
them into the agent's tool collection. It includes timeout handling to prevent
|
|
1128
|
+
hanging operations.
|
|
1129
|
+
|
|
1130
|
+
Raises:
|
|
1131
|
+
RuntimeError: If MCP initialization times out after 30 seconds.
|
|
1132
|
+
Exception: If MCP tool initialization fails for other reasons.
|
|
1133
|
+
"""
|
|
1134
|
+
try:
|
|
1135
|
+
logger.info(f"Agent '{self.name}': Initializing MCP tools with persistent sessions.")
|
|
1136
|
+
|
|
1137
|
+
# Add timeout for initialization to prevent hanging
|
|
1138
|
+
await asyncio.wait_for(self.mcp_client.initialize(), timeout=30.0)
|
|
1139
|
+
|
|
1140
|
+
mcp_tools = await self.mcp_client.get_tools()
|
|
1141
|
+
|
|
1142
|
+
if not mcp_tools:
|
|
1143
|
+
logger.warning(f"Agent '{self.name}': No MCP tools retrieved from configured servers.")
|
|
1144
|
+
return
|
|
1145
|
+
|
|
1146
|
+
self.mcp_tools.extend(mcp_tools)
|
|
1147
|
+
logger.info(f"Agent '{self.name}': Added {len(mcp_tools)} persistent MCP tools to graph.")
|
|
1148
|
+
self._rebuild_graph()
|
|
1149
|
+
|
|
1150
|
+
except TimeoutError as err:
|
|
1151
|
+
logger.error(f"Agent '{self.name}': MCP initialization timed out")
|
|
1152
|
+
raise RuntimeError(f"Agent '{self.name}': MCP initialization timed out after 30 seconds") from err
|
|
1153
|
+
except Exception as e:
|
|
1154
|
+
logger.error(f"Agent '{self.name}': Failed to initialize persistent MCP tools: {e}", exc_info=True)
|
|
1155
|
+
raise
|
|
1156
|
+
|
|
1157
|
+
async def cleanup(self) -> None:
|
|
1158
|
+
"""Cleanup MCP resources including persistent sessions.
|
|
1159
|
+
|
|
1160
|
+
This method performs best-effort cleanup of MCP client resources.
|
|
1161
|
+
Errors during cleanup are logged but do not raise exceptions to ensure
|
|
1162
|
+
the cleanup process completes gracefully.
|
|
1163
|
+
"""
|
|
1164
|
+
if hasattr(self, "mcp_client") and self.mcp_client:
|
|
1165
|
+
try:
|
|
1166
|
+
await self.mcp_client.cleanup()
|
|
1167
|
+
logger.debug(f"Agent '{self.name}': MCP client cleanup completed")
|
|
1168
|
+
except Exception as e:
|
|
1169
|
+
logger.warning(f"Agent '{self.name}': Error during MCP client cleanup: {e}")
|
|
1170
|
+
# Don't re-raise - cleanup should be best-effort
|
|
1171
|
+
|
|
1172
|
+
async def arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[dict[str, Any], None]:
|
|
1173
|
+
"""Asynchronously streams the agent's response in A2A format.
|
|
1174
|
+
|
|
1175
|
+
Args:
|
|
1176
|
+
query: The input query for the agent.
|
|
1177
|
+
**kwargs: Additional keyword arguments.
|
|
1178
|
+
|
|
1179
|
+
Yields:
|
|
1180
|
+
Dictionaries with "status" and "content" keys.
|
|
1181
|
+
Possible statuses: "working", "completed", "failed", "canceled".
|
|
1182
|
+
"""
|
|
1183
|
+
await self._ensure_mcp_tools_initialized()
|
|
1184
|
+
async for chunk in self._arun_a2a_stream(query, **kwargs):
|
|
1185
|
+
yield chunk
|
|
1186
|
+
|
|
1187
|
+
async def arun_sse_stream(
|
|
1188
|
+
self,
|
|
1189
|
+
query: str,
|
|
1190
|
+
task_id: str | None = None,
|
|
1191
|
+
context_id: str | None = None,
|
|
1192
|
+
**kwargs: Any,
|
|
1193
|
+
) -> AsyncGenerator[dict[str, Any], None]:
|
|
1194
|
+
"""Stream agent response as SSE-compatible chunks.
|
|
1195
|
+
|
|
1196
|
+
This method wraps arun_a2a_stream and transforms output to the normalized
|
|
1197
|
+
dict format matching A2AConnector.astream_to_agent output, enabling direct
|
|
1198
|
+
streaming without A2A server overhead.
|
|
1199
|
+
|
|
1200
|
+
Args:
|
|
1201
|
+
query: The input query for the agent.
|
|
1202
|
+
task_id: Optional task identifier for the stream.
|
|
1203
|
+
context_id: Optional context identifier for the stream.
|
|
1204
|
+
**kwargs: Additional arguments passed to arun_a2a_stream.
|
|
1205
|
+
|
|
1206
|
+
Yields:
|
|
1207
|
+
SSEChunk dicts with normalized structure:
|
|
1208
|
+
- status: "success" | "error"
|
|
1209
|
+
- task_state: "working" | "completed" | "failed" | "canceled"
|
|
1210
|
+
- content: Text content or None
|
|
1211
|
+
- event_type: Always string (never enum)
|
|
1212
|
+
- final: True for terminal events
|
|
1213
|
+
- metadata: Normalized metadata dict
|
|
1214
|
+
- artifacts: Only present when non-empty
|
|
1215
|
+
"""
|
|
1216
|
+
if task_id is None:
|
|
1217
|
+
task_id = str(uuid.uuid4())
|
|
1218
|
+
if context_id is None:
|
|
1219
|
+
context_id = str(uuid.uuid4())
|
|
1220
|
+
|
|
1221
|
+
# Extract pii_mapping from kwargs to pass to transformer (matching A2A executor behavior)
|
|
1222
|
+
pii_mapping = kwargs.get("pii_mapping")
|
|
1223
|
+
transformer = SSEChunkTransformer(task_id=task_id, context_id=context_id, pii_mapping=pii_mapping)
|
|
1224
|
+
try:
|
|
1225
|
+
async for chunk in transformer.transform_stream(self.arun_a2a_stream(query, **kwargs)):
|
|
1226
|
+
yield chunk
|
|
1227
|
+
except Exception as e:
|
|
1228
|
+
# Lazy import to support optional guardrails dependency
|
|
1229
|
+
from aip_agents.guardrails.exceptions import GuardrailViolationError
|
|
1230
|
+
|
|
1231
|
+
if isinstance(e, GuardrailViolationError):
|
|
1232
|
+
# Re-raise guardrail violations without modification
|
|
1233
|
+
raise
|
|
1234
|
+
logger.error(f"Agent '{self.name}': Error in arun_sse_stream: {e}", exc_info=True)
|
|
1235
|
+
yield SSEChunkTransformer._create_error_chunk(f"Error during streaming: {e}")
|
|
1236
|
+
|
|
1237
|
+
def _get_tool_processing_content(self, tool_names: list[str]) -> str:
|
|
1238
|
+
"""Generate appropriate content prefix for tool processing messages.
|
|
1239
|
+
|
|
1240
|
+
Args:
|
|
1241
|
+
tool_names: List of tool names to process.
|
|
1242
|
+
|
|
1243
|
+
Returns:
|
|
1244
|
+
Formatted content string with appropriate prefix.
|
|
1245
|
+
"""
|
|
1246
|
+
unique_tool_names = list(dict.fromkeys(tool_names))
|
|
1247
|
+
has_delegation_tools = any(name.startswith("delegate_to") for name in unique_tool_names)
|
|
1248
|
+
content_prefix = "Processing with sub-agents:" if has_delegation_tools else "Processing with tools:"
|
|
1249
|
+
return f"{content_prefix} {', '.join(unique_tool_names)}"
|
|
1250
|
+
|
|
1251
|
+
def _get_tool_completion_content(self, tool_names: list[str]) -> str:
|
|
1252
|
+
"""Generate completion message for finished tool executions.
|
|
1253
|
+
|
|
1254
|
+
Args:
|
|
1255
|
+
tool_names: List of tool names to summarize.
|
|
1256
|
+
|
|
1257
|
+
Returns:
|
|
1258
|
+
Content string indicating completion.
|
|
1259
|
+
"""
|
|
1260
|
+
unique_tool_names = list(dict.fromkeys(tool_names))
|
|
1261
|
+
has_delegation_tools = any(name.startswith("delegate_to") for name in unique_tool_names)
|
|
1262
|
+
content_prefix = "Completed sub-agents:" if has_delegation_tools else "Completed tools:"
|
|
1263
|
+
return f"{content_prefix} {', '.join(unique_tool_names)}"
|
|
1264
|
+
|
|
1265
|
+
def _parse_a2a_stream_message(
|
|
1266
|
+
self, message: BaseMessage, state: dict[str, Any] | None = None
|
|
1267
|
+
) -> tuple[A2AEvent | None, bool]:
|
|
1268
|
+
"""Parse LangChain messages into semantically meaningful A2A events.
|
|
1269
|
+
|
|
1270
|
+
This method converts LangChain message types (AIMessage, ToolMessage) into
|
|
1271
|
+
structured A2AEvent objects that preserve semantic information and eliminate
|
|
1272
|
+
the need for string parsing downstream.
|
|
1273
|
+
|
|
1274
|
+
Args:
|
|
1275
|
+
message: The LangChain message to parse (AIMessage, ToolMessage, etc.).
|
|
1276
|
+
state: Optional state dictionary containing pii_mapping and other data.
|
|
1277
|
+
|
|
1278
|
+
Returns:
|
|
1279
|
+
A tuple containing:
|
|
1280
|
+
- A2AEvent | None: The parsed A2A event, or None if message should be skipped.
|
|
1281
|
+
- bool: True if this is a final event that should terminate the stream.
|
|
1282
|
+
"""
|
|
1283
|
+
if isinstance(message, AIMessage) and message.tool_calls:
|
|
1284
|
+
return self._create_tool_call_event(message), False
|
|
1285
|
+
|
|
1286
|
+
elif isinstance(message, ToolMessage):
|
|
1287
|
+
return self._create_tool_result_event(message), False
|
|
1288
|
+
|
|
1289
|
+
elif isinstance(message, AIMessage) and message.content:
|
|
1290
|
+
return self._create_ai_message_event(message, state)
|
|
1291
|
+
|
|
1292
|
+
return None, False
|
|
1293
|
+
|
|
1294
|
+
def _link_tool_call_to_previous_status(self, event: A2AEvent) -> None:
|
|
1295
|
+
"""Link the tool call event to completed tool steps or the most recent status step.
|
|
1296
|
+
|
|
1297
|
+
Supports both parallel and sequential operation modes:
|
|
1298
|
+
- "parallel": Links to ALL completed tool steps (default for backward compatibility)
|
|
1299
|
+
- "sequential": Links to only the most recent completed tool step
|
|
1300
|
+
|
|
1301
|
+
Args:
|
|
1302
|
+
event: The A2AEvent to link to previous step.
|
|
1303
|
+
"""
|
|
1304
|
+
try:
|
|
1305
|
+
thread_id = _THREAD_ID_CVAR.get()
|
|
1306
|
+
if thread_id:
|
|
1307
|
+
metadata = event.get("metadata", {})
|
|
1308
|
+
existing_step_ids = metadata.get("previous_step_ids") or []
|
|
1309
|
+
|
|
1310
|
+
if existing_step_ids:
|
|
1311
|
+
return
|
|
1312
|
+
|
|
1313
|
+
operation_mode = _OPERATION_MODE_CVAR.get() or "parallel"
|
|
1314
|
+
|
|
1315
|
+
coord_completed_steps = self._coordinator_completed_tool_steps_by_thread.get(thread_id, [])
|
|
1316
|
+
completed_steps = coord_completed_steps or self._completed_tool_steps_by_thread.get(thread_id, [])
|
|
1317
|
+
if completed_steps:
|
|
1318
|
+
if operation_mode == "sequential":
|
|
1319
|
+
metadata["previous_step_ids"] = [completed_steps[-1]]
|
|
1320
|
+
else:
|
|
1321
|
+
metadata["previous_step_ids"] = completed_steps
|
|
1322
|
+
event["metadata"] = metadata
|
|
1323
|
+
return
|
|
1324
|
+
|
|
1325
|
+
last_status_id = self._last_status_step_id_by_thread.get(thread_id)
|
|
1326
|
+
if last_status_id:
|
|
1327
|
+
metadata["previous_step_ids"] = [last_status_id]
|
|
1328
|
+
event["metadata"] = metadata
|
|
1329
|
+
except Exception as e:
|
|
1330
|
+
logger.warning("Failed linking tool call to previous step: %s", e, exc_info=True)
|
|
1331
|
+
|
|
1332
|
+
def _register_tool_call_parent_steps(self, event: A2AEvent, tool_calls_details: list[dict]) -> None:
|
|
1333
|
+
"""Register parent step IDs for each tool call ID.
|
|
1334
|
+
|
|
1335
|
+
Args:
|
|
1336
|
+
event: The A2AEvent containing the parent step.
|
|
1337
|
+
tool_calls_details: List of tool call details.
|
|
1338
|
+
"""
|
|
1339
|
+
try:
|
|
1340
|
+
thread_id = _THREAD_ID_CVAR.get()
|
|
1341
|
+
if thread_id:
|
|
1342
|
+
parent_step_id = event["metadata"].get("step_id")
|
|
1343
|
+
if parent_step_id:
|
|
1344
|
+
parent_map = self._tool_parent_map_by_thread.setdefault(thread_id, {})
|
|
1345
|
+
for tool_call in tool_calls_details:
|
|
1346
|
+
tool_call_id = tool_call.get("id")
|
|
1347
|
+
if tool_call_id:
|
|
1348
|
+
parent_map[str(tool_call_id)] = str(parent_step_id)
|
|
1349
|
+
except Exception as e:
|
|
1350
|
+
logger.warning("Registering tool call parent steps failed: %s", e, exc_info=True)
|
|
1351
|
+
|
|
1352
|
+
def _create_tool_call_event(self, message: AIMessage) -> A2AEvent:
|
|
1353
|
+
"""Create an A2AEvent for tool invocation from AIMessage.
|
|
1354
|
+
|
|
1355
|
+
Args:
|
|
1356
|
+
message: AIMessage containing tool calls.
|
|
1357
|
+
|
|
1358
|
+
Returns:
|
|
1359
|
+
A2AEvent with TOOL_CALL event type and structured tool information.
|
|
1360
|
+
"""
|
|
1361
|
+
tool_calls_details = [
|
|
1362
|
+
{
|
|
1363
|
+
"id": tool_call.get("id"),
|
|
1364
|
+
"name": tool_call["name"],
|
|
1365
|
+
"args": tool_call["args"],
|
|
1366
|
+
}
|
|
1367
|
+
for tool_call in message.tool_calls
|
|
1368
|
+
]
|
|
1369
|
+
tool_names = [details["name"] for details in tool_calls_details]
|
|
1370
|
+
|
|
1371
|
+
event = self._create_a2a_event(
|
|
1372
|
+
event_type=A2AStreamEventType.TOOL_CALL,
|
|
1373
|
+
content=self._get_tool_processing_content(tool_names),
|
|
1374
|
+
tool_info={"tool_calls": tool_calls_details, "status": "running"},
|
|
1375
|
+
metadata={"status": Status.RUNNING},
|
|
1376
|
+
is_final=False,
|
|
1377
|
+
step_usage=message.usage_metadata,
|
|
1378
|
+
)
|
|
1379
|
+
|
|
1380
|
+
self._record_emitted_tool_calls(tool_calls_details)
|
|
1381
|
+
|
|
1382
|
+
self._link_tool_call_to_previous_status(event)
|
|
1383
|
+
self._register_tool_call_parent_steps(event, tool_calls_details)
|
|
1384
|
+
|
|
1385
|
+
return event
|
|
1386
|
+
|
|
1387
|
+
def _get_sub_agent_previous_steps(self, message: ToolMessage) -> list[str] | None:
|
|
1388
|
+
"""Extract previous step IDs from sub-agent response metadata.
|
|
1389
|
+
|
|
1390
|
+
Args:
|
|
1391
|
+
message: ToolMessage containing response metadata.
|
|
1392
|
+
|
|
1393
|
+
Returns:
|
|
1394
|
+
List of previous step IDs or None if not available.
|
|
1395
|
+
"""
|
|
1396
|
+
try:
|
|
1397
|
+
if not hasattr(message, "response_metadata") or not isinstance(message.response_metadata, dict):
|
|
1398
|
+
return None
|
|
1399
|
+
|
|
1400
|
+
sub_prev = message.response_metadata.get("previous_step_ids")
|
|
1401
|
+
if isinstance(sub_prev, list) and sub_prev:
|
|
1402
|
+
return [str(x) for x in sub_prev if isinstance(x, str | int)]
|
|
1403
|
+
return None
|
|
1404
|
+
except Exception as e:
|
|
1405
|
+
logger.warning("Failed extracting sub-agent previous steps: %s", e, exc_info=True)
|
|
1406
|
+
return None
|
|
1407
|
+
|
|
1408
|
+
def _determine_previous_step_ids(self, message: ToolMessage, sub_prev: list[str] | None) -> list[str]:
|
|
1409
|
+
"""Determine which previous step IDs to use for the event.
|
|
1410
|
+
|
|
1411
|
+
Args:
|
|
1412
|
+
message: ToolMessage for the tool call.
|
|
1413
|
+
sub_prev: Previous step IDs from sub-agent, if available.
|
|
1414
|
+
|
|
1415
|
+
Returns:
|
|
1416
|
+
List of previous step IDs to use.
|
|
1417
|
+
"""
|
|
1418
|
+
if sub_prev:
|
|
1419
|
+
return sub_prev
|
|
1420
|
+
|
|
1421
|
+
try:
|
|
1422
|
+
thread_id = _THREAD_ID_CVAR.get()
|
|
1423
|
+
if thread_id:
|
|
1424
|
+
parent_map = self._tool_parent_map_by_thread.get(thread_id, {})
|
|
1425
|
+
parent_step = parent_map.get(str(message.tool_call_id))
|
|
1426
|
+
if parent_step:
|
|
1427
|
+
return [parent_step]
|
|
1428
|
+
except Exception as e:
|
|
1429
|
+
logger.warning("Determining previous step IDs failed: %s", e, exc_info=True)
|
|
1430
|
+
|
|
1431
|
+
return []
|
|
1432
|
+
|
|
1433
|
+
def _record_tool_completion(self, message: ToolMessage, event: A2AEvent) -> None:
|
|
1434
|
+
"""Record tool completion for final event dependency tracking.
|
|
1435
|
+
|
|
1436
|
+
Args:
|
|
1437
|
+
message: ToolMessage for the completed tool.
|
|
1438
|
+
event: The A2AEvent for the tool result.
|
|
1439
|
+
"""
|
|
1440
|
+
try:
|
|
1441
|
+
thread_id = _THREAD_ID_CVAR.get()
|
|
1442
|
+
if not thread_id:
|
|
1443
|
+
return
|
|
1444
|
+
|
|
1445
|
+
completed_list = self._completed_tool_steps_by_thread.setdefault(thread_id, [])
|
|
1446
|
+
coord_completed_list = self._coordinator_completed_tool_steps_by_thread.setdefault(thread_id, [])
|
|
1447
|
+
|
|
1448
|
+
event_sid = (event.get("metadata") or {}).get("step_id")
|
|
1449
|
+
if isinstance(event_sid, str) and event_sid:
|
|
1450
|
+
completed_list.append(event_sid)
|
|
1451
|
+
coord_completed_list.append(event_sid)
|
|
1452
|
+
|
|
1453
|
+
sub_prev = self._get_sub_agent_previous_steps(message) or []
|
|
1454
|
+
completed_list.extend(step_id for step_id in sub_prev if isinstance(step_id, str) and step_id)
|
|
1455
|
+
|
|
1456
|
+
self._completed_tool_steps_by_thread[thread_id] = list(dict.fromkeys(completed_list))
|
|
1457
|
+
self._coordinator_completed_tool_steps_by_thread[thread_id] = list(dict.fromkeys(coord_completed_list))
|
|
1458
|
+
except Exception as e:
|
|
1459
|
+
logger.warning("Recording tool completion failed: %s", e, exc_info=True)
|
|
1460
|
+
|
|
1461
|
+
def _create_tool_result_event(self, message: ToolMessage) -> A2AEvent:
|
|
1462
|
+
"""Create an A2AEvent for tool completion from ToolMessage.
|
|
1463
|
+
|
|
1464
|
+
Args:
|
|
1465
|
+
message: ToolMessage containing tool execution results.
|
|
1466
|
+
|
|
1467
|
+
Returns:
|
|
1468
|
+
A2AEvent with TOOL_RESULT event type and execution details.
|
|
1469
|
+
"""
|
|
1470
|
+
tool_info = self._extract_tool_info_from_message(message)
|
|
1471
|
+
previous_ids = self._determine_previous_step_ids(
|
|
1472
|
+
message,
|
|
1473
|
+
self._get_sub_agent_previous_steps(message),
|
|
1474
|
+
)
|
|
1475
|
+
|
|
1476
|
+
event = self._create_a2a_event(
|
|
1477
|
+
event_type=A2AStreamEventType.TOOL_RESULT,
|
|
1478
|
+
content=self._build_tool_event_content(tool_info["name"], tool_info["output"], message),
|
|
1479
|
+
tool_info={
|
|
1480
|
+
"id": message.tool_call_id,
|
|
1481
|
+
"name": tool_info["name"],
|
|
1482
|
+
"args": tool_info["args"],
|
|
1483
|
+
"output": tool_info["output"],
|
|
1484
|
+
"execution_time": tool_info["execution_time"],
|
|
1485
|
+
},
|
|
1486
|
+
metadata=self._build_tool_event_metadata(tool_info["execution_time"], previous_ids),
|
|
1487
|
+
is_final=False,
|
|
1488
|
+
step_usage=message.response_metadata.get(USAGE_METADATA_KEY),
|
|
1489
|
+
)
|
|
1490
|
+
|
|
1491
|
+
self._propagate_hitl_metadata(message, event)
|
|
1492
|
+
self._record_tool_completion(message, event)
|
|
1493
|
+
self._discard_emitted_tool_call(getattr(message, "tool_call_id", None))
|
|
1494
|
+
|
|
1495
|
+
return event
|
|
1496
|
+
|
|
1497
|
+
def _extract_tool_info_from_message(self, message: ToolMessage) -> dict[str, Any]:
|
|
1498
|
+
"""Extract tool details from a ToolMessage.
|
|
1499
|
+
|
|
1500
|
+
Args:
|
|
1501
|
+
message: The ToolMessage to extract information from.
|
|
1502
|
+
|
|
1503
|
+
Returns:
|
|
1504
|
+
Dictionary containing tool name, args, output, and execution time.
|
|
1505
|
+
"""
|
|
1506
|
+
tool_call_info = getattr(message, "tool_calls", {})
|
|
1507
|
+
tool_name = getattr(message, "name", None) or tool_call_info.get("name", "unknown")
|
|
1508
|
+
return {
|
|
1509
|
+
"name": tool_name,
|
|
1510
|
+
"args": tool_call_info.get("args", {}),
|
|
1511
|
+
"output": tool_call_info.get("output", message.content),
|
|
1512
|
+
"execution_time": tool_call_info.get("time"),
|
|
1513
|
+
}
|
|
1514
|
+
|
|
1515
|
+
def _build_tool_event_content(self, tool_name: str, tool_output: Any, message: ToolMessage) -> str:
|
|
1516
|
+
"""Determine event content for a tool result.
|
|
1517
|
+
|
|
1518
|
+
Args:
|
|
1519
|
+
tool_name: Name of the tool that was executed.
|
|
1520
|
+
tool_output: The output returned by the tool.
|
|
1521
|
+
message: The ToolMessage containing response metadata and tool call information.
|
|
1522
|
+
|
|
1523
|
+
Returns:
|
|
1524
|
+
String content for the tool result event.
|
|
1525
|
+
"""
|
|
1526
|
+
response_metadata = getattr(message, "response_metadata", None) or {}
|
|
1527
|
+
hitl_meta = response_metadata.get(MetadataFieldKeys.HITL) if isinstance(response_metadata, dict) else None
|
|
1528
|
+
|
|
1529
|
+
if hitl_meta and hitl_meta.get("required"):
|
|
1530
|
+
return str(tool_output) if tool_output else self._get_tool_processing_content([tool_name])
|
|
1531
|
+
|
|
1532
|
+
return self._get_tool_completion_content([tool_name])
|
|
1533
|
+
|
|
1534
|
+
def _build_tool_event_metadata(
|
|
1535
|
+
self,
|
|
1536
|
+
execution_time: Any,
|
|
1537
|
+
previous_ids: list[str] | None,
|
|
1538
|
+
) -> dict[str, Any]:
|
|
1539
|
+
"""Build metadata payload for tool result events.
|
|
1540
|
+
|
|
1541
|
+
Args:
|
|
1542
|
+
execution_time: Time taken to execute the tool.
|
|
1543
|
+
previous_ids: Optional list of previous step IDs this tool depends on.
|
|
1544
|
+
|
|
1545
|
+
Returns:
|
|
1546
|
+
Dictionary containing status, execution time, and previous step IDs.
|
|
1547
|
+
"""
|
|
1548
|
+
return {
|
|
1549
|
+
"status": Status.FINISHED,
|
|
1550
|
+
"time": execution_time,
|
|
1551
|
+
"previous_step_ids": previous_ids,
|
|
1552
|
+
}
|
|
1553
|
+
|
|
1554
|
+
def _propagate_hitl_metadata(self, message: ToolMessage, event: A2AEvent) -> None:
|
|
1555
|
+
"""Copy HITL metadata from ToolMessage into the event if available.
|
|
1556
|
+
|
|
1557
|
+
Args:
|
|
1558
|
+
message: The ToolMessage containing response metadata with HITL information.
|
|
1559
|
+
event: The A2AEvent to update with HITL metadata if present.
|
|
1560
|
+
"""
|
|
1561
|
+
response_metadata = getattr(message, "response_metadata", None)
|
|
1562
|
+
if not isinstance(response_metadata, dict):
|
|
1563
|
+
return
|
|
1564
|
+
|
|
1565
|
+
hitl_meta = response_metadata.get(MetadataFieldKeys.HITL)
|
|
1566
|
+
if hitl_meta is None:
|
|
1567
|
+
return
|
|
1568
|
+
|
|
1569
|
+
try:
|
|
1570
|
+
hitl_model = HitlMetadata.model_validate(hitl_meta)
|
|
1571
|
+
except ValidationError as exc:
|
|
1572
|
+
raise ValueError("Invalid HITL metadata payload encountered") from exc
|
|
1573
|
+
|
|
1574
|
+
metadata = event.get("metadata")
|
|
1575
|
+
if isinstance(metadata, dict):
|
|
1576
|
+
try:
|
|
1577
|
+
metadata[MetadataFieldKeys.HITL] = hitl_model.as_payload()
|
|
1578
|
+
except Exception as exc: # noqa: BLE001
|
|
1579
|
+
logger.warning("Failed to propagate HITL metadata to event: %s", exc)
|
|
1580
|
+
|
|
1581
|
+
def _create_ai_message_event(
|
|
1582
|
+
self, message: AIMessage, state: dict[str, Any] | None = None
|
|
1583
|
+
) -> tuple[A2AEvent, bool]:
|
|
1584
|
+
"""Create an A2AEvent for AI-generated content from AIMessage.
|
|
1585
|
+
|
|
1586
|
+
Args:
|
|
1587
|
+
message: AIMessage containing AI-generated content.
|
|
1588
|
+
state: Optional state dictionary containing pii_mapping and other data.
|
|
1589
|
+
|
|
1590
|
+
Returns:
|
|
1591
|
+
A tuple containing:
|
|
1592
|
+
- A2AEvent: Either CONTENT_CHUNK or FINAL_RESPONSE event.
|
|
1593
|
+
- bool: True if this is a final response, False for streaming content.
|
|
1594
|
+
"""
|
|
1595
|
+
is_final_response = self._is_final_response(message)
|
|
1596
|
+
metadata = self._build_metadata_for_final_response(is_final_response, state)
|
|
1597
|
+
content = deanonymize_final_response_content(
|
|
1598
|
+
content=message.content,
|
|
1599
|
+
is_final_response=is_final_response,
|
|
1600
|
+
metadata=metadata,
|
|
1601
|
+
)
|
|
1602
|
+
event = self._create_a2a_event(
|
|
1603
|
+
event_type=A2AStreamEventType.FINAL_RESPONSE if is_final_response else A2AStreamEventType.CONTENT_CHUNK,
|
|
1604
|
+
content=content,
|
|
1605
|
+
tool_info=None,
|
|
1606
|
+
metadata=metadata,
|
|
1607
|
+
is_final=is_final_response,
|
|
1608
|
+
step_usage=message.usage_metadata,
|
|
1609
|
+
)
|
|
1610
|
+
return event, is_final_response
|
|
1611
|
+
|
|
1612
|
+
def _is_final_response(self, message: AIMessage) -> bool:
|
|
1613
|
+
"""Check if the message represents a final response.
|
|
1614
|
+
|
|
1615
|
+
Args:
|
|
1616
|
+
message: AIMessage to check.
|
|
1617
|
+
|
|
1618
|
+
Returns:
|
|
1619
|
+
True if this is a final response, False otherwise.
|
|
1620
|
+
"""
|
|
1621
|
+
return bool(message.response_metadata) and message.response_metadata.get("finish_reason") == "stop"
|
|
1622
|
+
|
|
1623
|
+
def _build_metadata_for_final_response(
|
|
1624
|
+
self, is_final_response: bool, state: dict[str, Any] | None = None
|
|
1625
|
+
) -> dict[str, Any]:
|
|
1626
|
+
"""Build metadata for final response including previous_step_ids and pii_mapping.
|
|
1627
|
+
|
|
1628
|
+
Args:
|
|
1629
|
+
is_final_response: Whether this is a final response.
|
|
1630
|
+
state: Optional state dictionary containing pii_mapping and other data.
|
|
1631
|
+
|
|
1632
|
+
Returns:
|
|
1633
|
+
Metadata dictionary with previous_step_ids and pii_mapping if applicable.
|
|
1634
|
+
"""
|
|
1635
|
+
metadata: dict[str, Any] = {}
|
|
1636
|
+
|
|
1637
|
+
if not is_final_response:
|
|
1638
|
+
return metadata
|
|
1639
|
+
|
|
1640
|
+
try:
|
|
1641
|
+
previous_step_ids = self._get_previous_step_ids()
|
|
1642
|
+
if previous_step_ids:
|
|
1643
|
+
metadata["previous_step_ids"] = previous_step_ids
|
|
1644
|
+
except Exception as e:
|
|
1645
|
+
logger.warning("Attaching previous_step_ids to final response failed: %s", e, exc_info=True)
|
|
1646
|
+
|
|
1647
|
+
# Add PII mapping if present in state or nested metadata
|
|
1648
|
+
if state:
|
|
1649
|
+
pii_mapping = state.get("pii_mapping") or state.get("metadata", {}).get("pii_mapping")
|
|
1650
|
+
if pii_mapping:
|
|
1651
|
+
metadata[MetadataFieldKeys.PII_MAPPING] = pii_mapping
|
|
1652
|
+
|
|
1653
|
+
return metadata
|
|
1654
|
+
|
|
1655
|
+
def _get_previous_step_ids(self) -> list[str] | None:
|
|
1656
|
+
"""Get the list of previous step IDs based on thread context and operation mode.
|
|
1657
|
+
|
|
1658
|
+
Returns:
|
|
1659
|
+
List of step IDs or None if no thread context or steps found.
|
|
1660
|
+
"""
|
|
1661
|
+
thread_id = _THREAD_ID_CVAR.get()
|
|
1662
|
+
if not thread_id:
|
|
1663
|
+
return None
|
|
1664
|
+
|
|
1665
|
+
operation_mode = _OPERATION_MODE_CVAR.get() or "parallel"
|
|
1666
|
+
|
|
1667
|
+
coord_ids = self._coordinator_completed_tool_steps_by_thread.get(thread_id, [])
|
|
1668
|
+
if coord_ids:
|
|
1669
|
+
return self._filter_step_ids_by_mode(coord_ids, operation_mode)
|
|
1670
|
+
|
|
1671
|
+
completed_ids = self._completed_tool_steps_by_thread.get(thread_id, [])
|
|
1672
|
+
if completed_ids:
|
|
1673
|
+
return self._filter_step_ids_by_mode(completed_ids, operation_mode)
|
|
1674
|
+
|
|
1675
|
+
return None
|
|
1676
|
+
|
|
1677
|
+
def _filter_step_ids_by_mode(self, step_ids: list[str], operation_mode: str) -> list[str]:
|
|
1678
|
+
"""Filter step IDs based on operation mode.
|
|
1679
|
+
|
|
1680
|
+
Args:
|
|
1681
|
+
step_ids: List of step IDs to filter.
|
|
1682
|
+
operation_mode: Either "sequential" or "parallel".
|
|
1683
|
+
|
|
1684
|
+
Returns:
|
|
1685
|
+
Filtered list of step IDs.
|
|
1686
|
+
"""
|
|
1687
|
+
if operation_mode == "sequential":
|
|
1688
|
+
return [step_ids[-1]] if step_ids else []
|
|
1689
|
+
else:
|
|
1690
|
+
return step_ids
|
|
1691
|
+
|
|
1692
|
+
def _process_artifacts(
|
|
1693
|
+
self,
|
|
1694
|
+
item: dict[str, Any],
|
|
1695
|
+
pending_artifacts: list,
|
|
1696
|
+
seen_artifact_hashes: set,
|
|
1697
|
+
) -> None:
|
|
1698
|
+
"""Process artifacts from a graph stream item.
|
|
1699
|
+
|
|
1700
|
+
Args:
|
|
1701
|
+
item: The event item from the graph stream.
|
|
1702
|
+
pending_artifacts: A list of artifacts waiting to be attached to a message.
|
|
1703
|
+
seen_artifact_hashes: A set of hashes of artifacts already processed.
|
|
1704
|
+
"""
|
|
1705
|
+
if "artifacts" not in item or not item["artifacts"]:
|
|
1706
|
+
return
|
|
1707
|
+
|
|
1708
|
+
logger.info(f"Agent '{self.name}': Artifacts: {len(item['artifacts'])}")
|
|
1709
|
+
for artifact in item["artifacts"]:
|
|
1710
|
+
artifact_data = str(artifact.get("data", ""))
|
|
1711
|
+
artifact_name = artifact.get("name", "")
|
|
1712
|
+
artifact_hash = hashlib.sha256(f"{artifact_data}:{artifact_name}".encode()).hexdigest()
|
|
1713
|
+
|
|
1714
|
+
if artifact_hash not in seen_artifact_hashes:
|
|
1715
|
+
pending_artifacts.append(artifact)
|
|
1716
|
+
seen_artifact_hashes.add(artifact_hash)
|
|
1717
|
+
|
|
1718
|
+
def _process_a2a_stream_item(
|
|
1719
|
+
self,
|
|
1720
|
+
item: dict[str, Any],
|
|
1721
|
+
pending_artifacts: list,
|
|
1722
|
+
seen_artifact_hashes: set,
|
|
1723
|
+
processed_message_count: int,
|
|
1724
|
+
) -> tuple[list[A2AEvent], bool, int]:
|
|
1725
|
+
"""Process a single item from the graph stream, handling artifacts and messages.
|
|
1726
|
+
|
|
1727
|
+
This method processes LangGraph stream items and converts them into A2AEvent objects
|
|
1728
|
+
with proper metadata merging, artifact attachment, and reference handling.
|
|
1729
|
+
|
|
1730
|
+
Args:
|
|
1731
|
+
item: The event item from the graph stream containing messages and metadata.
|
|
1732
|
+
pending_artifacts: List of artifacts waiting to be attached to messages.
|
|
1733
|
+
seen_artifact_hashes: Set of hashes of artifacts already processed.
|
|
1734
|
+
processed_message_count: Number of messages already processed from the stream.
|
|
1735
|
+
|
|
1736
|
+
Returns:
|
|
1737
|
+
A tuple containing:
|
|
1738
|
+
- list[A2AEvent]: List of A2A events to yield to the executor.
|
|
1739
|
+
- bool: True if a final event was encountered.
|
|
1740
|
+
- int: Updated count of processed messages.
|
|
1741
|
+
"""
|
|
1742
|
+
events_to_yield = []
|
|
1743
|
+
is_final_event = False
|
|
1744
|
+
|
|
1745
|
+
self._process_artifacts(item, pending_artifacts, seen_artifact_hashes)
|
|
1746
|
+
references = item.get("references", [])
|
|
1747
|
+
|
|
1748
|
+
if "messages" not in item or not item["messages"]:
|
|
1749
|
+
return [], False, processed_message_count
|
|
1750
|
+
|
|
1751
|
+
new_messages = item["messages"][processed_message_count:]
|
|
1752
|
+
updated_message_count = len(item["messages"])
|
|
1753
|
+
for message in new_messages:
|
|
1754
|
+
event_data, is_final = self._parse_a2a_stream_message(message, item)
|
|
1755
|
+
|
|
1756
|
+
if event_data and self._should_yield_a2a_event(event_data):
|
|
1757
|
+
self._enhance_event_with_context(event_data, item, pending_artifacts, references, is_final)
|
|
1758
|
+
events_to_yield.append(event_data)
|
|
1759
|
+
|
|
1760
|
+
if is_final:
|
|
1761
|
+
is_final_event = True
|
|
1762
|
+
|
|
1763
|
+
return events_to_yield, is_final_event, updated_message_count
|
|
1764
|
+
|
|
1765
|
+
def _enhance_event_with_context(
|
|
1766
|
+
self,
|
|
1767
|
+
event_data: A2AEvent,
|
|
1768
|
+
stream_item: dict[str, Any],
|
|
1769
|
+
pending_artifacts: list,
|
|
1770
|
+
references: list[Any],
|
|
1771
|
+
is_final: bool,
|
|
1772
|
+
) -> None:
|
|
1773
|
+
"""Enhance A2AEvent with context from the stream item.
|
|
1774
|
+
|
|
1775
|
+
This method adds metadata, artifacts, and references to the A2AEvent
|
|
1776
|
+
based on the current stream item context.
|
|
1777
|
+
|
|
1778
|
+
Args:
|
|
1779
|
+
event_data: The A2AEvent to enhance.
|
|
1780
|
+
stream_item: The stream item containing context information.
|
|
1781
|
+
pending_artifacts: List of artifacts to attach to the event.
|
|
1782
|
+
references: List of references to attach to final events.
|
|
1783
|
+
is_final: Whether this is a final event.
|
|
1784
|
+
"""
|
|
1785
|
+
self._merge_event_metadata(event_data, stream_item)
|
|
1786
|
+
self._attach_pending_artifacts(event_data, pending_artifacts)
|
|
1787
|
+
|
|
1788
|
+
if is_final and references:
|
|
1789
|
+
self._attach_references_to_final_event(event_data, references)
|
|
1790
|
+
|
|
1791
|
+
if is_final and stream_item.get(TOTAL_USAGE_KEY):
|
|
1792
|
+
event_data[TOTAL_USAGE_KEY] = stream_item[TOTAL_USAGE_KEY]
|
|
1793
|
+
|
|
1794
|
+
def _merge_previous_step_ids(
|
|
1795
|
+
self,
|
|
1796
|
+
state_prev: list[str | int | None] | None,
|
|
1797
|
+
event_prev: list[str | int | None] | None,
|
|
1798
|
+
) -> list[str | int] | None:
|
|
1799
|
+
"""Merge previous_step_ids from state and event metadata.
|
|
1800
|
+
|
|
1801
|
+
Args:
|
|
1802
|
+
state_prev: Previous step IDs from state metadata.
|
|
1803
|
+
event_prev: Previous step IDs from event metadata.
|
|
1804
|
+
|
|
1805
|
+
Returns:
|
|
1806
|
+
Combined list of previous step IDs, or None if no lists to merge.
|
|
1807
|
+
"""
|
|
1808
|
+
if (state_prev is None and event_prev is None) or (
|
|
1809
|
+
not isinstance(state_prev, list)
|
|
1810
|
+
and state_prev is not None
|
|
1811
|
+
and not isinstance(event_prev, list)
|
|
1812
|
+
and event_prev is not None
|
|
1813
|
+
):
|
|
1814
|
+
return None
|
|
1815
|
+
|
|
1816
|
+
state_list = state_prev if isinstance(state_prev, list) else []
|
|
1817
|
+
event_list = event_prev if isinstance(event_prev, list) else []
|
|
1818
|
+
|
|
1819
|
+
combined: list[Any] = []
|
|
1820
|
+
|
|
1821
|
+
for step_id in event_list:
|
|
1822
|
+
if step_id is not None and step_id not in combined:
|
|
1823
|
+
combined.append(step_id)
|
|
1824
|
+
|
|
1825
|
+
for step_id in state_list:
|
|
1826
|
+
if step_id is not None and step_id not in combined:
|
|
1827
|
+
combined.append(step_id)
|
|
1828
|
+
|
|
1829
|
+
return combined
|
|
1830
|
+
|
|
1831
|
+
def _merge_event_metadata(self, event_data: A2AEvent, stream_item: dict[str, Any]) -> None:
|
|
1832
|
+
"""Merge metadata from stream item into the A2AEvent.
|
|
1833
|
+
|
|
1834
|
+
Args:
|
|
1835
|
+
event_data: The A2AEvent to update with merged metadata.
|
|
1836
|
+
stream_item: The stream item containing state metadata.
|
|
1837
|
+
"""
|
|
1838
|
+
state_metadata = stream_item.get("metadata", {})
|
|
1839
|
+
existing_metadata = event_data.get("metadata", {})
|
|
1840
|
+
if isinstance(existing_metadata, dict) and isinstance(state_metadata, dict):
|
|
1841
|
+
merged_metadata = {**state_metadata, **existing_metadata}
|
|
1842
|
+
|
|
1843
|
+
state_prev = state_metadata.get("previous_step_ids") or []
|
|
1844
|
+
event_prev = existing_metadata.get("previous_step_ids") or []
|
|
1845
|
+
combined_ids = self._merge_previous_step_ids(state_prev, event_prev)
|
|
1846
|
+
if combined_ids is not None:
|
|
1847
|
+
merged_metadata["previous_step_ids"] = combined_ids
|
|
1848
|
+
else:
|
|
1849
|
+
merged_metadata = state_metadata or existing_metadata
|
|
1850
|
+
|
|
1851
|
+
event_data["metadata"] = merged_metadata
|
|
1852
|
+
|
|
1853
|
+
def _attach_pending_artifacts(self, event_data: A2AEvent, pending_artifacts: list) -> None:
|
|
1854
|
+
"""Attach pending artifacts to the A2AEvent and clear the pending list.
|
|
1855
|
+
|
|
1856
|
+
Args:
|
|
1857
|
+
event_data: The A2AEvent to attach artifacts to.
|
|
1858
|
+
pending_artifacts: List of artifacts to attach and clear.
|
|
1859
|
+
"""
|
|
1860
|
+
if pending_artifacts:
|
|
1861
|
+
event_data["artifacts"] = pending_artifacts.copy()
|
|
1862
|
+
pending_artifacts.clear()
|
|
1863
|
+
|
|
1864
|
+
def _attach_references_to_final_event(self, event_data: A2AEvent, references: list[Any]) -> None:
|
|
1865
|
+
"""Attach references to final events.
|
|
1866
|
+
|
|
1867
|
+
Args:
|
|
1868
|
+
event_data: The final A2AEvent to attach references to.
|
|
1869
|
+
references: List of references to attach.
|
|
1870
|
+
"""
|
|
1871
|
+
if references:
|
|
1872
|
+
event_data["references"] = references
|
|
1873
|
+
|
|
1874
|
+
def _setup_thread_context(self, config: dict[str, Any]) -> tuple[str | None, Any]:
|
|
1875
|
+
"""Set up thread context for step linkage during streaming.
|
|
1876
|
+
|
|
1877
|
+
Args:
|
|
1878
|
+
config: Graph configuration
|
|
1879
|
+
|
|
1880
|
+
Returns:
|
|
1881
|
+
Tuple of (thread_id, context_token)
|
|
1882
|
+
"""
|
|
1883
|
+
current_thread_id: str | None = None
|
|
1884
|
+
try:
|
|
1885
|
+
configurable = config.get("configurable", {}) # type: ignore[assignment]
|
|
1886
|
+
thread_key = self.thread_id_key or "thread_id"
|
|
1887
|
+
current_thread_id = str(configurable.get(thread_key)) if configurable.get(thread_key) else None
|
|
1888
|
+
except Exception:
|
|
1889
|
+
current_thread_id = None
|
|
1890
|
+
|
|
1891
|
+
token = None
|
|
1892
|
+
|
|
1893
|
+
try:
|
|
1894
|
+
start_step_counter_scope()
|
|
1895
|
+
except Exception as exc:
|
|
1896
|
+
logger.debug("Starting step counter scope failed: %s", exc)
|
|
1897
|
+
|
|
1898
|
+
if current_thread_id:
|
|
1899
|
+
token = _THREAD_ID_CVAR.set(current_thread_id)
|
|
1900
|
+
self._tool_parent_map_by_thread[current_thread_id] = {}
|
|
1901
|
+
self._completed_tool_steps_by_thread[current_thread_id] = []
|
|
1902
|
+
self._emitted_tool_calls_by_thread[current_thread_id] = set()
|
|
1903
|
+
|
|
1904
|
+
return current_thread_id, token
|
|
1905
|
+
|
|
1906
|
+
def _cleanup_thread_context(self, current_thread_id: str | None, token: Any) -> None:
|
|
1907
|
+
"""Clean up thread context and reset context variables.
|
|
1908
|
+
|
|
1909
|
+
Args:
|
|
1910
|
+
current_thread_id: The thread ID to clean up
|
|
1911
|
+
token: The context token to reset
|
|
1912
|
+
"""
|
|
1913
|
+
try:
|
|
1914
|
+
end_step_counter_scope()
|
|
1915
|
+
except Exception as exc:
|
|
1916
|
+
logger.debug("Ending step counter scope failed: %s", exc)
|
|
1917
|
+
|
|
1918
|
+
if current_thread_id:
|
|
1919
|
+
self._tool_parent_map_by_thread.pop(current_thread_id, None)
|
|
1920
|
+
self._completed_tool_steps_by_thread.pop(current_thread_id, None)
|
|
1921
|
+
self._last_status_step_id_by_thread.pop(current_thread_id, None)
|
|
1922
|
+
self._emitted_tool_calls_by_thread.pop(current_thread_id, None)
|
|
1923
|
+
|
|
1924
|
+
if token is not None:
|
|
1925
|
+
try:
|
|
1926
|
+
_THREAD_ID_CVAR.reset(token)
|
|
1927
|
+
except ValueError as e:
|
|
1928
|
+
logger.debug("Context variable token from different context, skipping reset: %s", e)
|
|
1929
|
+
except Exception as e:
|
|
1930
|
+
logger.error("Resetting _THREAD_ID_CVAR failed: %s", e, exc_info=True)
|
|
1931
|
+
try:
|
|
1932
|
+
_STEP_LIMIT_CONFIG_CVAR.set(None)
|
|
1933
|
+
except Exception:
|
|
1934
|
+
logger.debug("Failed to reset step limit config context; continuing cleanup.")
|
|
1935
|
+
|
|
1936
|
+
def _handle_stream_item(
|
|
1937
|
+
self, item: tuple, pending_artifacts: list, seen_artifact_hashes: set, processed_message_count: int
|
|
1938
|
+
) -> tuple[list[A2AEvent], bool, int]:
|
|
1939
|
+
"""Handle a single stream item.
|
|
1940
|
+
|
|
1941
|
+
Args:
|
|
1942
|
+
item: Stream item tuple (mode, data)
|
|
1943
|
+
pending_artifacts: List of pending artifacts
|
|
1944
|
+
seen_artifact_hashes: Set of seen artifact hashes
|
|
1945
|
+
processed_message_count: Current message count
|
|
1946
|
+
|
|
1947
|
+
Returns:
|
|
1948
|
+
Tuple of (events_to_yield, is_final, updated_message_count)
|
|
1949
|
+
"""
|
|
1950
|
+
mode, data = item
|
|
1951
|
+
|
|
1952
|
+
if mode == StreamMode.CUSTOM:
|
|
1953
|
+
delegation_event: A2AEvent = data
|
|
1954
|
+
if self._should_yield_a2a_event(delegation_event):
|
|
1955
|
+
return [delegation_event], False, processed_message_count
|
|
1956
|
+
return [], False, processed_message_count
|
|
1957
|
+
elif mode == StreamMode.VALUES:
|
|
1958
|
+
stream_data = data
|
|
1959
|
+
else:
|
|
1960
|
+
return [], False, processed_message_count
|
|
1961
|
+
|
|
1962
|
+
events, is_final, updated_message_count = self._process_a2a_stream_item(
|
|
1963
|
+
stream_data, pending_artifacts, seen_artifact_hashes, processed_message_count
|
|
1964
|
+
)
|
|
1965
|
+
return events, is_final, updated_message_count
|
|
1966
|
+
|
|
1967
|
+
async def _arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[dict[str, Any], None]:
|
|
1968
|
+
"""Internal implementation of arun_a2a_stream without MCP handling.
|
|
1969
|
+
|
|
1970
|
+
Args:
|
|
1971
|
+
query: The input query for the agent.
|
|
1972
|
+
**kwargs: Additional keyword arguments.
|
|
1973
|
+
|
|
1974
|
+
Yields:
|
|
1975
|
+
Dictionaries with "status" and "content" keys for status events.
|
|
1976
|
+
Status events may include "artifacts" field when tools generate artifacts.
|
|
1977
|
+
Possible statuses: "working", "completed", "failed", "canceled".
|
|
1978
|
+
"""
|
|
1979
|
+
context = self._initialize_streaming_context(query, **kwargs)
|
|
1980
|
+
|
|
1981
|
+
try:
|
|
1982
|
+
async for event in self._handle_streaming_process(context):
|
|
1983
|
+
yield event
|
|
1984
|
+
|
|
1985
|
+
self._persist_memory_if_needed(context)
|
|
1986
|
+
|
|
1987
|
+
async for event in self._ensure_final_completion(context):
|
|
1988
|
+
yield event
|
|
1989
|
+
|
|
1990
|
+
except Exception as e:
|
|
1991
|
+
async for event in self._handle_streaming_error(context, e):
|
|
1992
|
+
yield event
|
|
1993
|
+
finally:
|
|
1994
|
+
self._cleanup_thread_context(context.current_thread_id, context.token)
|
|
1995
|
+
|
|
1996
|
+
def _initialize_streaming_context(self, query: str, **kwargs: Any) -> "_StreamingContext":
|
|
1997
|
+
"""Initialize the streaming context with all necessary setup.
|
|
1998
|
+
|
|
1999
|
+
Args:
|
|
2000
|
+
query: The user's input query to process.
|
|
2001
|
+
**kwargs: Additional keyword arguments including optional metadata and configuration.
|
|
2002
|
+
|
|
2003
|
+
Returns:
|
|
2004
|
+
Configured _StreamingContext object ready for streaming execution.
|
|
2005
|
+
"""
|
|
2006
|
+
files = kwargs.pop("files", [])
|
|
2007
|
+
if files is None:
|
|
2008
|
+
files = []
|
|
2009
|
+
|
|
2010
|
+
memory_user_id: str | None = kwargs.get("memory_user_id")
|
|
2011
|
+
|
|
2012
|
+
# Create config first to ensure thread_id is generated
|
|
2013
|
+
config = self._create_graph_config(**kwargs)
|
|
2014
|
+
thread_id = self._get_thread_id_from_config(config)
|
|
2015
|
+
|
|
2016
|
+
augmented_query = augment_query_with_file_paths(query=query, files=files)
|
|
2017
|
+
graph_input = self._prepare_graph_input(augmented_query, thread_id=thread_id, **kwargs)
|
|
2018
|
+
|
|
2019
|
+
current_thread_id, token = self._setup_thread_context(config)
|
|
2020
|
+
|
|
2021
|
+
if self.enable_a2a_token_streaming and self.model:
|
|
2022
|
+
self.model.disable_streaming = False
|
|
2023
|
+
|
|
2024
|
+
return _StreamingContext(
|
|
2025
|
+
original_query=query,
|
|
2026
|
+
graph_input=graph_input,
|
|
2027
|
+
config=config,
|
|
2028
|
+
memory_user_id=memory_user_id,
|
|
2029
|
+
current_thread_id=current_thread_id,
|
|
2030
|
+
token=token,
|
|
2031
|
+
enable_token_streaming=self.enable_a2a_token_streaming,
|
|
2032
|
+
)
|
|
2033
|
+
|
|
2034
|
+
async def _handle_streaming_process(self, context: "_StreamingContext") -> AsyncGenerator[dict[str, Any], None]:
|
|
2035
|
+
"""Handle the main streaming process including initial status and event processing.
|
|
2036
|
+
|
|
2037
|
+
Args:
|
|
2038
|
+
context: The streaming context containing query, config, and thread information.
|
|
2039
|
+
|
|
2040
|
+
Yields:
|
|
2041
|
+
Streaming events including initial status and processed streaming items.
|
|
2042
|
+
"""
|
|
2043
|
+
initial_status_event = self._create_initial_status_event()
|
|
2044
|
+
self._log_streaming_event_debug("initial_status", initial_status_event)
|
|
2045
|
+
yield initial_status_event
|
|
2046
|
+
|
|
2047
|
+
async for event in self._process_streaming_items(context):
|
|
2048
|
+
self._log_streaming_event_debug("process_stream_item", event)
|
|
2049
|
+
yield event
|
|
2050
|
+
|
|
2051
|
+
def _create_initial_status_event(self) -> dict[str, Any]:
|
|
2052
|
+
"""Create and setup the initial status event."""
|
|
2053
|
+
initial_status_event = self._create_a2a_event(
|
|
2054
|
+
event_type=A2AStreamEventType.STATUS_UPDATE, content=DefaultStepMessages.EN.value
|
|
2055
|
+
)
|
|
2056
|
+
|
|
2057
|
+
try:
|
|
2058
|
+
thread_id = _THREAD_ID_CVAR.get()
|
|
2059
|
+
if thread_id:
|
|
2060
|
+
step_id = initial_status_event.get("metadata", {}).get("step_id")
|
|
2061
|
+
if step_id:
|
|
2062
|
+
self._last_status_step_id_by_thread[thread_id] = str(step_id)
|
|
2063
|
+
except Exception:
|
|
2064
|
+
pass
|
|
2065
|
+
|
|
2066
|
+
return initial_status_event
|
|
2067
|
+
|
|
2068
|
+
async def _process_streaming_items(self, context: "_StreamingContext") -> AsyncGenerator[dict[str, Any], None]:
|
|
2069
|
+
"""Process individual streaming items from the LangGraph execution.
|
|
2070
|
+
|
|
2071
|
+
Handles the core streaming logic by iterating through items produced by
|
|
2072
|
+
the compiled LangGraph, processing both VALUES and CUSTOM stream modes,
|
|
2073
|
+
and managing final event generation.
|
|
2074
|
+
|
|
2075
|
+
Args:
|
|
2076
|
+
context: The streaming context containing graph input, configuration,
|
|
2077
|
+
and state tracking information.
|
|
2078
|
+
|
|
2079
|
+
Yields:
|
|
2080
|
+
dict[str, Any]: A2A events generated from the stream processing,
|
|
2081
|
+
including status updates, final responses, and completion events.
|
|
2082
|
+
"""
|
|
2083
|
+
if context.enable_token_streaming:
|
|
2084
|
+
if self.event_emitter is None:
|
|
2085
|
+
self.event_emitter = self._create_default_event_emitter()
|
|
2086
|
+
elif not self._get_stream_handler():
|
|
2087
|
+
logger.warning(
|
|
2088
|
+
"Agent '%s': No StreamEventHandler found in event_emitter. "
|
|
2089
|
+
"Reinitializing event_emitter using default emitter.",
|
|
2090
|
+
self.name,
|
|
2091
|
+
)
|
|
2092
|
+
self.event_emitter = self._create_default_event_emitter()
|
|
2093
|
+
|
|
2094
|
+
async for event in self._process_a2a_streaming_with_tokens(context):
|
|
2095
|
+
yield event
|
|
2096
|
+
else:
|
|
2097
|
+
enhanced_input = context.graph_input
|
|
2098
|
+
async for event in self._create_graph_stream_events(enhanced_input, context):
|
|
2099
|
+
yield event
|
|
2100
|
+
|
|
2101
|
+
async def _process_a2a_streaming_with_tokens(
|
|
2102
|
+
self, context: "_StreamingContext"
|
|
2103
|
+
) -> AsyncGenerator[dict[str, Any], None]:
|
|
2104
|
+
"""Process A2A streaming with token streaming support using aiostream.
|
|
2105
|
+
|
|
2106
|
+
Supports both LM Invoker and LangChain models by detecting the appropriate
|
|
2107
|
+
token source and merging with graph events.
|
|
2108
|
+
|
|
2109
|
+
Uses aiostream to merge token streaming and graph execution streams,
|
|
2110
|
+
yielding events in real-time order as they arrive.
|
|
2111
|
+
|
|
2112
|
+
Args:
|
|
2113
|
+
context: The streaming context containing graph input, configuration,
|
|
2114
|
+
and state tracking information.
|
|
2115
|
+
|
|
2116
|
+
Yields:
|
|
2117
|
+
dict[str, Any]: A2A events generated from the stream processing,
|
|
2118
|
+
including status updates, final responses, and completion events.
|
|
2119
|
+
|
|
2120
|
+
Raises:
|
|
2121
|
+
RuntimeError: If token streaming is requested but event_emitter is not available.
|
|
2122
|
+
"""
|
|
2123
|
+
if not self.event_emitter:
|
|
2124
|
+
raise RuntimeError(f"Agent '{self.name}': Event emitter required for token streaming")
|
|
2125
|
+
if astream is None:
|
|
2126
|
+
raise RuntimeError(
|
|
2127
|
+
"aiostream is required for token streaming support. "
|
|
2128
|
+
"Install the 'aiostream' dependency or disable token streaming."
|
|
2129
|
+
)
|
|
2130
|
+
|
|
2131
|
+
try:
|
|
2132
|
+
if self._has_lm_invoker():
|
|
2133
|
+
token_stream, enhanced_input = self._create_token_stream(context)
|
|
2134
|
+
graph_stream = self._create_graph_stream_events(enhanced_input, context)
|
|
2135
|
+
|
|
2136
|
+
merged = astream.merge(token_stream, graph_stream)
|
|
2137
|
+
async with merged.stream() as merged_stream:
|
|
2138
|
+
async for event in merged_stream:
|
|
2139
|
+
yield event
|
|
2140
|
+
else:
|
|
2141
|
+
_, enhanced_input = self._create_token_stream(context)
|
|
2142
|
+
async for event in self._create_graph_stream_events(enhanced_input, context):
|
|
2143
|
+
yield event
|
|
2144
|
+
|
|
2145
|
+
except Exception as e:
|
|
2146
|
+
if self.event_emitter is not None:
|
|
2147
|
+
await self.event_emitter.close()
|
|
2148
|
+
logger.error(f"Agent '{self.name}': Error during A2A token streaming: {e}")
|
|
2149
|
+
raise
|
|
2150
|
+
|
|
2151
|
+
async def _create_lm_invoker_token_stream(self) -> AsyncGenerator[dict[str, Any], None]:
|
|
2152
|
+
"""Generate A2A events from LM Invoker token stream.
|
|
2153
|
+
|
|
2154
|
+
Uses StreamEventHandler to capture tokens emitted by LM Invoker.
|
|
2155
|
+
|
|
2156
|
+
Yields:
|
|
2157
|
+
A2A events generated from LM Invoker token stream.
|
|
2158
|
+
|
|
2159
|
+
Raises:
|
|
2160
|
+
RuntimeError: If no StreamEventHandler is found in event_emitter.
|
|
2161
|
+
"""
|
|
2162
|
+
stream_handler = self._get_stream_handler()
|
|
2163
|
+
|
|
2164
|
+
try:
|
|
2165
|
+
async for event in stream_handler.stream():
|
|
2166
|
+
if event is None:
|
|
2167
|
+
break
|
|
2168
|
+
|
|
2169
|
+
token_event = self._convert_raw_token_to_a2a_event(event)
|
|
2170
|
+
if token_event:
|
|
2171
|
+
yield token_event
|
|
2172
|
+
except Exception as e:
|
|
2173
|
+
logger.error(f"Agent '{self.name}': LM Invoker token stream error: {e}")
|
|
2174
|
+
|
|
2175
|
+
def _create_token_stream(
|
|
2176
|
+
self,
|
|
2177
|
+
context: "_StreamingContext",
|
|
2178
|
+
) -> tuple[AsyncGenerator[dict[str, Any], None], dict[str, Any]]:
|
|
2179
|
+
"""Create appropriate token stream and enhanced input for the active model backend.
|
|
2180
|
+
|
|
2181
|
+
Args:
|
|
2182
|
+
context: Streaming context containing graph input and configuration.
|
|
2183
|
+
|
|
2184
|
+
Returns:
|
|
2185
|
+
Tuple of (token_stream, enhanced_input) where token_stream yields A2A token
|
|
2186
|
+
events and enhanced_input is the graph input dictionary (augmented with event
|
|
2187
|
+
emitter when required by LM Invoker backends).
|
|
2188
|
+
"""
|
|
2189
|
+
if self._has_lm_invoker():
|
|
2190
|
+
token_stream = self._create_lm_invoker_token_stream()
|
|
2191
|
+
enhanced_input = {**context.graph_input, "event_emitter": self.event_emitter}
|
|
2192
|
+
else:
|
|
2193
|
+
token_stream = None
|
|
2194
|
+
enhanced_input = context.graph_input
|
|
2195
|
+
|
|
2196
|
+
return token_stream, enhanced_input
|
|
2197
|
+
|
|
2198
|
+
async def _create_graph_stream_events(
|
|
2199
|
+
self, enhanced_input: dict[str, Any], context: "_StreamingContext"
|
|
2200
|
+
) -> AsyncGenerator[dict[str, Any], None]:
|
|
2201
|
+
"""Generate A2A events from graph execution.
|
|
2202
|
+
|
|
2203
|
+
Args:
|
|
2204
|
+
enhanced_input: The enhanced input for the graph execution.
|
|
2205
|
+
context: The streaming context containing state tracking information.
|
|
2206
|
+
|
|
2207
|
+
Yields:
|
|
2208
|
+
A2A events generated from graph execution.
|
|
2209
|
+
"""
|
|
2210
|
+
try:
|
|
2211
|
+
stream_modes = self._get_stream_modes(context)
|
|
2212
|
+
graph_execution = self._compiled_graph.astream(
|
|
2213
|
+
enhanced_input, config=context.config, stream_mode=stream_modes
|
|
2214
|
+
)
|
|
2215
|
+
|
|
2216
|
+
async for item in graph_execution:
|
|
2217
|
+
stream_mode, stream_data = item
|
|
2218
|
+
|
|
2219
|
+
if stream_mode == StreamMode.MESSAGES:
|
|
2220
|
+
async for token_event in self._process_message_stream_item(stream_data):
|
|
2221
|
+
yield token_event
|
|
2222
|
+
continue
|
|
2223
|
+
|
|
2224
|
+
async for event in self._process_graph_stream_item(item, stream_mode, stream_data, context):
|
|
2225
|
+
yield event
|
|
2226
|
+
except Exception as e:
|
|
2227
|
+
logger.error(f"Agent '{self.name}': Graph processing error: {e}")
|
|
2228
|
+
raise
|
|
2229
|
+
|
|
2230
|
+
def _get_stream_modes(self, context: "_StreamingContext") -> list[str]:
|
|
2231
|
+
"""Determine stream modes based on token streaming configuration.
|
|
2232
|
+
|
|
2233
|
+
Args:
|
|
2234
|
+
context: Streaming context containing token streaming configuration.
|
|
2235
|
+
|
|
2236
|
+
Returns:
|
|
2237
|
+
List of stream modes to use for graph execution.
|
|
2238
|
+
"""
|
|
2239
|
+
stream_modes = [StreamMode.VALUES, StreamMode.CUSTOM]
|
|
2240
|
+
|
|
2241
|
+
if context.enable_token_streaming and not self._has_lm_invoker():
|
|
2242
|
+
stream_modes.append(StreamMode.MESSAGES)
|
|
2243
|
+
|
|
2244
|
+
return stream_modes
|
|
2245
|
+
|
|
2246
|
+
async def _process_graph_stream_item(
|
|
2247
|
+
self,
|
|
2248
|
+
item: tuple[str, Any],
|
|
2249
|
+
stream_mode: str,
|
|
2250
|
+
stream_data: Any,
|
|
2251
|
+
context: "_StreamingContext",
|
|
2252
|
+
) -> AsyncGenerator[dict[str, Any], None]:
|
|
2253
|
+
"""Process a single graph stream item and yield A2A events.
|
|
2254
|
+
|
|
2255
|
+
Args:
|
|
2256
|
+
item: The stream item tuple (mode, data).
|
|
2257
|
+
stream_mode: The stream mode of this item.
|
|
2258
|
+
stream_data: The data from the stream item.
|
|
2259
|
+
context: Streaming context for state tracking.
|
|
2260
|
+
|
|
2261
|
+
Yields:
|
|
2262
|
+
A2A events generated from the stream item.
|
|
2263
|
+
"""
|
|
2264
|
+
context.final_state = copy.copy(stream_data) if stream_mode == StreamMode.VALUES else context.final_state
|
|
2265
|
+
|
|
2266
|
+
events, is_final, context.processed_message_count = self._handle_stream_item(
|
|
2267
|
+
item, context.pending_artifacts, context.seen_artifact_hashes, context.processed_message_count
|
|
2268
|
+
)
|
|
2269
|
+
|
|
2270
|
+
if is_final:
|
|
2271
|
+
context.final_event_yielded = True
|
|
2272
|
+
|
|
2273
|
+
for event in events:
|
|
2274
|
+
self._capture_final_content_if_needed(context, event)
|
|
2275
|
+
processed_event = self._update_final_response_for_streaming(context, event)
|
|
2276
|
+
yield processed_event
|
|
2277
|
+
|
|
2278
|
+
async def _process_message_stream_item(
|
|
2279
|
+
self, message_data: tuple[Any, dict[str, Any]]
|
|
2280
|
+
) -> AsyncGenerator[dict[str, Any], None]:
|
|
2281
|
+
"""Process message stream items to extract token events.
|
|
2282
|
+
|
|
2283
|
+
The "messages" stream mode yields tuples of (AIMessageChunk, metadata).
|
|
2284
|
+
This method extracts token content from AIMessageChunk and converts it
|
|
2285
|
+
to A2A CONTENT_CHUNK events with TOKEN kind.
|
|
2286
|
+
|
|
2287
|
+
Args:
|
|
2288
|
+
message_data: Tuple of (message_chunk, metadata) from messages stream
|
|
2289
|
+
|
|
2290
|
+
Yields:
|
|
2291
|
+
A2A CONTENT_CHUNK events with TOKEN kind
|
|
2292
|
+
"""
|
|
2293
|
+
try:
|
|
2294
|
+
message_chunk, _ = message_data
|
|
2295
|
+
|
|
2296
|
+
# Filter out events with response_metadata.finish_reason attribute
|
|
2297
|
+
# since it is a response from subagent
|
|
2298
|
+
if hasattr(message_chunk, "response_metadata") and message_chunk.response_metadata:
|
|
2299
|
+
if "finish_reason" in message_chunk.response_metadata:
|
|
2300
|
+
return
|
|
2301
|
+
|
|
2302
|
+
is_tool_call_event = hasattr(message_chunk, "tool_calls") and message_chunk.tool_calls
|
|
2303
|
+
is_has_content_event = hasattr(message_chunk, "content") and message_chunk.content
|
|
2304
|
+
|
|
2305
|
+
if is_has_content_event and not is_tool_call_event:
|
|
2306
|
+
token_content = message_chunk.content
|
|
2307
|
+
token_event = self._create_a2a_event(
|
|
2308
|
+
event_type=A2AStreamEventType.CONTENT_CHUNK,
|
|
2309
|
+
content=token_content,
|
|
2310
|
+
metadata={MetadataFieldKeys.KIND: Kind.TOKEN},
|
|
2311
|
+
)
|
|
2312
|
+
yield token_event
|
|
2313
|
+
|
|
2314
|
+
except Exception as e:
|
|
2315
|
+
logger.error(f"Agent '{self.name}': Error processing message stream item: {e}")
|
|
2316
|
+
|
|
2317
|
+
def _update_final_response_for_streaming(
|
|
2318
|
+
self, context: "_StreamingContext", event: dict[str, Any]
|
|
2319
|
+
) -> dict[str, Any]:
|
|
2320
|
+
"""Update final response events with appropriate streaming configuration.
|
|
2321
|
+
|
|
2322
|
+
For FINAL_RESPONSE events, this method updates the metadata and optionally clears
|
|
2323
|
+
the content when token streaming is active to prevent sending duplicate content.
|
|
2324
|
+
|
|
2325
|
+
Args:
|
|
2326
|
+
context: The streaming context containing streaming configuration
|
|
2327
|
+
event: The event dictionary to process
|
|
2328
|
+
|
|
2329
|
+
Returns:
|
|
2330
|
+
The processed event dictionary with updated metadata and content
|
|
2331
|
+
"""
|
|
2332
|
+
if event.get("event_type") == A2AStreamEventType.FINAL_RESPONSE:
|
|
2333
|
+
event["metadata"][MetadataFieldKeys.TOKEN_STREAMING] = False
|
|
2334
|
+
if context.enable_token_streaming:
|
|
2335
|
+
event["content"] = ""
|
|
2336
|
+
event["metadata"][MetadataFieldKeys.TOKEN_STREAMING] = True
|
|
2337
|
+
return event
|
|
2338
|
+
|
|
2339
|
+
def _convert_raw_token_to_a2a_event(self, raw_event: str) -> dict[str, Any] | None:
|
|
2340
|
+
"""Parse raw token event into A2A event.
|
|
2341
|
+
|
|
2342
|
+
Args:
|
|
2343
|
+
raw_event: The raw event containing the raw event.
|
|
2344
|
+
|
|
2345
|
+
Returns:
|
|
2346
|
+
dict[str, Any]: A2A event generated from the stream processing,
|
|
2347
|
+
including status updates, final responses, and completion events.
|
|
2348
|
+
"""
|
|
2349
|
+
try:
|
|
2350
|
+
event_data = json.loads(raw_event)
|
|
2351
|
+
content = event_data.get("value", "")
|
|
2352
|
+
if content:
|
|
2353
|
+
return self._create_a2a_event(
|
|
2354
|
+
event_type=A2AStreamEventType.CONTENT_CHUNK,
|
|
2355
|
+
content=content,
|
|
2356
|
+
metadata={MetadataFieldKeys.KIND: Kind.TOKEN},
|
|
2357
|
+
)
|
|
2358
|
+
except Exception as e:
|
|
2359
|
+
logger.debug(f"Agent '{self.name}': Error parsing token event: {e}")
|
|
2360
|
+
return None
|
|
2361
|
+
|
|
2362
|
+
def _capture_final_content_if_needed(self, context: "_StreamingContext", event: dict[str, Any]) -> None:
|
|
2363
|
+
"""Capture final content from A2A events for memory persistence.
|
|
2364
|
+
|
|
2365
|
+
Monitors A2A events for final response content and triggers early memory
|
|
2366
|
+
persistence to ensure conversation content is saved even if consumers
|
|
2367
|
+
stop reading the stream after receiving the final response.
|
|
2368
|
+
|
|
2369
|
+
Args:
|
|
2370
|
+
context: The streaming context containing memory state and user
|
|
2371
|
+
identification information.
|
|
2372
|
+
event: The A2A event dictionary that may contain final response content.
|
|
2373
|
+
"""
|
|
2374
|
+
try:
|
|
2375
|
+
if isinstance(event, dict) and event.get("event_type") == A2AStreamEventType.FINAL_RESPONSE:
|
|
2376
|
+
context.last_final_content = event.get("content")
|
|
2377
|
+
should_save_early = (
|
|
2378
|
+
self._memory_enabled()
|
|
2379
|
+
and (not context.saved_memory)
|
|
2380
|
+
and isinstance(context.last_final_content, str)
|
|
2381
|
+
and context.last_final_content
|
|
2382
|
+
)
|
|
2383
|
+
if should_save_early:
|
|
2384
|
+
try:
|
|
2385
|
+
logger.info(
|
|
2386
|
+
"Agent '%s': A2A persisting memory early (len=%d) for user_id='%s'",
|
|
2387
|
+
self.name,
|
|
2388
|
+
len(context.last_final_content),
|
|
2389
|
+
context.memory_user_id or self.memory_agent_id,
|
|
2390
|
+
)
|
|
2391
|
+
except Exception:
|
|
2392
|
+
pass
|
|
2393
|
+
try:
|
|
2394
|
+
self._memory_save_interaction(
|
|
2395
|
+
user_text=context.original_query,
|
|
2396
|
+
ai_text=context.last_final_content,
|
|
2397
|
+
memory_user_id=context.memory_user_id,
|
|
2398
|
+
)
|
|
2399
|
+
context.saved_memory = True
|
|
2400
|
+
except Exception:
|
|
2401
|
+
pass
|
|
2402
|
+
except Exception:
|
|
2403
|
+
pass
|
|
2404
|
+
|
|
2405
|
+
def _persist_memory_if_needed(self, context: "_StreamingContext") -> None:
|
|
2406
|
+
"""Persist memory using the final state output (best-effort).
|
|
2407
|
+
|
|
2408
|
+
Attempts to save the conversation to memory using the best available
|
|
2409
|
+
content source, first trying captured final content, then falling back
|
|
2410
|
+
to extracting content from the final state.
|
|
2411
|
+
|
|
2412
|
+
Args:
|
|
2413
|
+
context: The streaming context containing the final state, captured
|
|
2414
|
+
content, and memory persistence state.
|
|
2415
|
+
"""
|
|
2416
|
+
try:
|
|
2417
|
+
if context.last_final_content is not None:
|
|
2418
|
+
final_text = context.last_final_content
|
|
2419
|
+
elif isinstance(context.final_state, dict):
|
|
2420
|
+
final_text = self._extract_output_from_final_state(context.final_state)
|
|
2421
|
+
else:
|
|
2422
|
+
final_text = ""
|
|
2423
|
+
if (not context.saved_memory) and isinstance(final_text, str) and final_text:
|
|
2424
|
+
try:
|
|
2425
|
+
logger.info(
|
|
2426
|
+
"Agent '%s': A2A persisting memory after stream (len=%d) for user_id='%s'",
|
|
2427
|
+
self.name,
|
|
2428
|
+
len(final_text),
|
|
2429
|
+
context.memory_user_id or self.memory_agent_id,
|
|
2430
|
+
)
|
|
2431
|
+
except Exception:
|
|
2432
|
+
pass
|
|
2433
|
+
self._memory_save_interaction(
|
|
2434
|
+
user_text=context.original_query, ai_text=final_text, memory_user_id=context.memory_user_id
|
|
2435
|
+
)
|
|
2436
|
+
context.saved_memory = True
|
|
2437
|
+
except Exception:
|
|
2438
|
+
pass
|
|
2439
|
+
|
|
2440
|
+
async def _ensure_final_completion(self, context: "_StreamingContext") -> AsyncGenerator[dict[str, Any], None]:
|
|
2441
|
+
"""Ensure final completion events are yielded if not already done.
|
|
2442
|
+
|
|
2443
|
+
Args:
|
|
2444
|
+
context: The streaming context containing pending artifacts and
|
|
2445
|
+
other state information.
|
|
2446
|
+
|
|
2447
|
+
Yields:
|
|
2448
|
+
dict[str, Any]: The final completion event.
|
|
2449
|
+
"""
|
|
2450
|
+
if not context.final_event_yielded:
|
|
2451
|
+
completion_event = self._create_completion_event(context.pending_artifacts, context.final_state)
|
|
2452
|
+
self._log_streaming_event_debug("final_completion", completion_event)
|
|
2453
|
+
yield completion_event
|
|
2454
|
+
|
|
2455
|
+
async def _handle_streaming_error(
|
|
2456
|
+
self,
|
|
2457
|
+
context: "_StreamingContext",
|
|
2458
|
+
error: Exception,
|
|
2459
|
+
) -> AsyncGenerator[dict[str, Any], None]:
|
|
2460
|
+
"""Handle streaming errors gracefully.
|
|
2461
|
+
|
|
2462
|
+
Provides error handling for the A2A streaming process, ensuring errors
|
|
2463
|
+
are properly logged and communicated to the client while preserving
|
|
2464
|
+
any pending artifacts generated before the error occurred.
|
|
2465
|
+
|
|
2466
|
+
Args:
|
|
2467
|
+
context: The streaming context containing pending artifacts and
|
|
2468
|
+
other state information.
|
|
2469
|
+
error: The exception that occurred during streaming.
|
|
2470
|
+
|
|
2471
|
+
Yields:
|
|
2472
|
+
dict[str, Any]: An error event containing the failure status and
|
|
2473
|
+
error message, optionally including any pending artifacts.
|
|
2474
|
+
"""
|
|
2475
|
+
logger.error(f"Error in agent stream: {error}", exc_info=True)
|
|
2476
|
+
error_event = {"status": "failed", "content": f"Error: {str(error)}"}
|
|
2477
|
+
|
|
2478
|
+
if context.pending_artifacts:
|
|
2479
|
+
error_event["artifacts"] = context.pending_artifacts
|
|
2480
|
+
|
|
2481
|
+
self._log_streaming_event_debug("error_event", error_event)
|
|
2482
|
+
yield error_event
|
|
2483
|
+
|
|
2484
|
+
def _extract_references_from_state(self, final_state: dict[str, Any] | None) -> list[Chunk] | None:
|
|
2485
|
+
"""Extract and validate references from final state.
|
|
2486
|
+
|
|
2487
|
+
Args:
|
|
2488
|
+
final_state: The final state of the agent.
|
|
2489
|
+
|
|
2490
|
+
Returns:
|
|
2491
|
+
Validated references or None if not available.
|
|
2492
|
+
"""
|
|
2493
|
+
if final_state and isinstance(final_state, dict) and final_state.get("references"):
|
|
2494
|
+
try:
|
|
2495
|
+
return validate_references(final_state["references"])
|
|
2496
|
+
except Exception:
|
|
2497
|
+
pass
|
|
2498
|
+
return None
|
|
2499
|
+
|
|
2500
|
+
def _extract_total_usage_from_state(self, final_state: dict[str, Any] | None) -> dict[str, Any] | None:
|
|
2501
|
+
"""Extract total usage from final state.
|
|
2502
|
+
|
|
2503
|
+
Args:
|
|
2504
|
+
final_state: The final state of the agent.
|
|
2505
|
+
|
|
2506
|
+
Returns:
|
|
2507
|
+
Total usage metadata or None if not available.
|
|
2508
|
+
"""
|
|
2509
|
+
if final_state and isinstance(final_state, dict) and final_state.get(TOTAL_USAGE_KEY):
|
|
2510
|
+
return final_state[TOTAL_USAGE_KEY]
|
|
2511
|
+
return None
|
|
2512
|
+
|
|
2513
|
+
def _build_completion_metadata(self, final_state: dict[str, Any] | None) -> dict[str, Any]:
|
|
2514
|
+
"""Build metadata for completion event.
|
|
2515
|
+
|
|
2516
|
+
Args:
|
|
2517
|
+
final_state: The final state of the agent.
|
|
2518
|
+
|
|
2519
|
+
Returns:
|
|
2520
|
+
Metadata dictionary with previous_step_ids and pii_mapping if available.
|
|
2521
|
+
"""
|
|
2522
|
+
metadata: dict[str, Any] = {}
|
|
2523
|
+
|
|
2524
|
+
# Add previous step IDs if available
|
|
2525
|
+
try:
|
|
2526
|
+
thread_id = _THREAD_ID_CVAR.get()
|
|
2527
|
+
if thread_id and thread_id in self._completed_tool_steps_by_thread:
|
|
2528
|
+
completed_ids = self._completed_tool_steps_by_thread[thread_id]
|
|
2529
|
+
if completed_ids:
|
|
2530
|
+
metadata["previous_step_ids"] = list(completed_ids)
|
|
2531
|
+
except Exception as e:
|
|
2532
|
+
logger.warning("Attaching previous_step_ids to completion event failed: %s", e, exc_info=True)
|
|
2533
|
+
|
|
2534
|
+
return metadata
|
|
2535
|
+
|
|
2536
|
+
def _create_completion_event(self, pending_artifacts: list, final_state: dict[str, Any]):
|
|
2537
|
+
"""Helper to create the completion event with artifacts and references if available.
|
|
2538
|
+
|
|
2539
|
+
This method is used to create the completion event with artifacts and references if available.
|
|
2540
|
+
|
|
2541
|
+
Args:
|
|
2542
|
+
pending_artifacts: List of artifacts waiting to be attached to a message.
|
|
2543
|
+
final_state: The final state of the agent.
|
|
2544
|
+
|
|
2545
|
+
Returns:
|
|
2546
|
+
A dictionary with "status" and "content" keys
|
|
2547
|
+
Additional keys may include "artifacts" and "references" if available
|
|
2548
|
+
"""
|
|
2549
|
+
artifacts = pending_artifacts if pending_artifacts else None
|
|
2550
|
+
references = self._extract_references_from_state(final_state)
|
|
2551
|
+
total_usage = self._extract_total_usage_from_state(final_state)
|
|
2552
|
+
metadata = self._build_completion_metadata(final_state)
|
|
2553
|
+
|
|
2554
|
+
return self._create_a2a_event(
|
|
2555
|
+
event_type=A2AStreamEventType.FINAL_RESPONSE,
|
|
2556
|
+
content="Stream finished.",
|
|
2557
|
+
tool_info=None,
|
|
2558
|
+
metadata=metadata,
|
|
2559
|
+
is_final=True,
|
|
2560
|
+
artifacts=artifacts,
|
|
2561
|
+
references=references,
|
|
2562
|
+
step_usage=None,
|
|
2563
|
+
total_usage=total_usage,
|
|
2564
|
+
)
|
|
2565
|
+
|
|
2566
|
+
def _extract_tool_name_prefix(self, tool_name: str) -> str:
|
|
2567
|
+
"""Extract a meaningful prefix from a tool name.
|
|
2568
|
+
|
|
2569
|
+
Args:
|
|
2570
|
+
tool_name: The name of the tool.
|
|
2571
|
+
|
|
2572
|
+
Returns:
|
|
2573
|
+
A meaningful prefix.
|
|
2574
|
+
"""
|
|
2575
|
+
if tool_name.startswith("delegate_to_"):
|
|
2576
|
+
agent_name = tool_name[12:]
|
|
2577
|
+
if agent_name.endswith("Agent"):
|
|
2578
|
+
agent_name = agent_name[:-5]
|
|
2579
|
+
return agent_name.lower()[:4]
|
|
2580
|
+
|
|
2581
|
+
if "_" in tool_name:
|
|
2582
|
+
parts = tool_name.split("_")
|
|
2583
|
+
for part in parts:
|
|
2584
|
+
if part not in ["tool", "generator", "calculator", "forecast"]:
|
|
2585
|
+
return part[:4]
|
|
2586
|
+
return parts[0][:4]
|
|
2587
|
+
else:
|
|
2588
|
+
return tool_name[:4]
|
|
2589
|
+
|
|
2590
|
+
def _generate_tool_call_step_id(self, tool_info: dict[str, Any], counter: int) -> str:
|
|
2591
|
+
"""Generate step_id for tool call events.
|
|
2592
|
+
|
|
2593
|
+
Args:
|
|
2594
|
+
tool_info: Tool information
|
|
2595
|
+
counter: Step counter
|
|
2596
|
+
|
|
2597
|
+
Returns:
|
|
2598
|
+
Generated step_id
|
|
2599
|
+
"""
|
|
2600
|
+
if not tool_info or not tool_info.get("tool_calls"):
|
|
2601
|
+
return f"tool_start_{counter:03d}"
|
|
2602
|
+
|
|
2603
|
+
tool_calls = tool_info["tool_calls"]
|
|
2604
|
+
if not tool_calls:
|
|
2605
|
+
return f"tool_start_{counter:03d}"
|
|
2606
|
+
|
|
2607
|
+
prefixes = [self._extract_tool_name_prefix(tc.get("name", "")) or "unkn" for tc in tool_calls]
|
|
2608
|
+
delegation_flags = self._get_delegation_info_from_tool_calls(tool_calls)
|
|
2609
|
+
|
|
2610
|
+
if len(tool_calls) == 1:
|
|
2611
|
+
category = "agent" if delegation_flags[0] else "tool"
|
|
2612
|
+
return f"{category}_{prefixes[0]}_start_{counter:03d}"
|
|
2613
|
+
|
|
2614
|
+
combined_name = "".join(prefixes).strip()[:6]
|
|
2615
|
+
combined_name = combined_name or "multi"
|
|
2616
|
+
|
|
2617
|
+
if all(delegation_flags):
|
|
2618
|
+
category = "agent"
|
|
2619
|
+
elif any(delegation_flags):
|
|
2620
|
+
category = "mixed"
|
|
2621
|
+
else:
|
|
2622
|
+
category = "tool"
|
|
2623
|
+
|
|
2624
|
+
return f"{category}_{combined_name}_parent_{counter:03d}"
|
|
2625
|
+
|
|
2626
|
+
def _generate_tool_result_step_id(self, tool_info: dict[str, Any], counter: int) -> str:
|
|
2627
|
+
"""Generate step_id for tool result events.
|
|
2628
|
+
|
|
2629
|
+
Args:
|
|
2630
|
+
tool_info: Tool information
|
|
2631
|
+
counter: Step counter
|
|
2632
|
+
|
|
2633
|
+
Returns:
|
|
2634
|
+
Generated step_id
|
|
2635
|
+
"""
|
|
2636
|
+
if not tool_info:
|
|
2637
|
+
return f"tool_done_{counter:03d}"
|
|
2638
|
+
|
|
2639
|
+
tool_name = tool_info.get("name", "")
|
|
2640
|
+
prefix = self._extract_tool_name_prefix(tool_name) or "unkn"
|
|
2641
|
+
category = "agent" if self._is_delegation_tool_from_info(tool_info) else "tool"
|
|
2642
|
+
return f"{category}_{prefix}_done_{counter:03d}"
|
|
2643
|
+
|
|
2644
|
+
@staticmethod
|
|
2645
|
+
def _is_delegation_tool_name(tool_name: str) -> bool:
|
|
2646
|
+
"""Check if a tool name corresponds to a delegation (sub-agent) tool.
|
|
2647
|
+
|
|
2648
|
+
This method maintains backward compatibility by checking the tool name pattern.
|
|
2649
|
+
For new tools created by DelegationToolManager, use _is_delegation_tool() instead.
|
|
2650
|
+
|
|
2651
|
+
Args:
|
|
2652
|
+
tool_name: The name of the tool to check.
|
|
2653
|
+
|
|
2654
|
+
Returns:
|
|
2655
|
+
bool: True if the tool name indicates a delegation tool.
|
|
2656
|
+
"""
|
|
2657
|
+
return isinstance(tool_name, str) and tool_name.startswith("delegate_to_")
|
|
2658
|
+
|
|
2659
|
+
@staticmethod
|
|
2660
|
+
def _is_delegation_tool(tool_instance: Any) -> bool:
|
|
2661
|
+
"""Check delegation status based on metadata when available.
|
|
2662
|
+
|
|
2663
|
+
Args:
|
|
2664
|
+
tool_instance: The tool instance to check for delegation metadata.
|
|
2665
|
+
|
|
2666
|
+
Returns:
|
|
2667
|
+
True if the tool is marked as a delegation tool, False otherwise.
|
|
2668
|
+
"""
|
|
2669
|
+
metadata = getattr(tool_instance, "metadata", None)
|
|
2670
|
+
if not metadata or not hasattr(metadata, "get"):
|
|
2671
|
+
return False
|
|
2672
|
+
|
|
2673
|
+
return bool(metadata.get("is_delegation_tool"))
|
|
2674
|
+
|
|
2675
|
+
def _get_delegation_info_from_tool_calls(self, tool_calls: list[dict[str, Any]] | None) -> list[bool]:
|
|
2676
|
+
"""Return delegation flags for each tool call using hybrid detection.
|
|
2677
|
+
|
|
2678
|
+
Args:
|
|
2679
|
+
tool_calls: List of tool call dictionaries containing tool information.
|
|
2680
|
+
|
|
2681
|
+
Returns:
|
|
2682
|
+
List of boolean flags indicating delegation status for each tool call.
|
|
2683
|
+
"""
|
|
2684
|
+
if not tool_calls:
|
|
2685
|
+
return []
|
|
2686
|
+
|
|
2687
|
+
delegation_flags: list[bool] = []
|
|
2688
|
+
for tc in tool_calls:
|
|
2689
|
+
if not isinstance(tc, dict):
|
|
2690
|
+
logger.warning("Unexpected tool call payload type: %s", type(tc))
|
|
2691
|
+
delegation_flags.append(False)
|
|
2692
|
+
continue
|
|
2693
|
+
|
|
2694
|
+
delegation_flags.append(self._is_delegation_tool_from_info(tc))
|
|
2695
|
+
|
|
2696
|
+
return delegation_flags
|
|
2697
|
+
|
|
2698
|
+
def _is_delegation_tool_from_info(self, tool_info: dict[str, Any] | None) -> bool:
|
|
2699
|
+
"""Check delegation status from tool metadata, fallback to name pattern.
|
|
2700
|
+
|
|
2701
|
+
Args:
|
|
2702
|
+
tool_info: Dictionary containing tool information including name and instance.
|
|
2703
|
+
|
|
2704
|
+
Returns:
|
|
2705
|
+
True if the tool is identified as a delegation tool, False otherwise.
|
|
2706
|
+
"""
|
|
2707
|
+
if not isinstance(tool_info, dict):
|
|
2708
|
+
logger.warning("Unexpected tool info payload type: %s", type(tool_info))
|
|
2709
|
+
return False
|
|
2710
|
+
|
|
2711
|
+
tool_instance = tool_info.get("tool_instance")
|
|
2712
|
+
if tool_instance and self._is_delegation_tool(tool_instance):
|
|
2713
|
+
return True
|
|
2714
|
+
|
|
2715
|
+
return self._is_delegation_tool_name(tool_info.get("name", ""))
|
|
2716
|
+
|
|
2717
|
+
def _generate_meaningful_step_id(
|
|
2718
|
+
self, event_type: A2AStreamEventType, tool_info: dict[str, Any] | None = None
|
|
2719
|
+
) -> str:
|
|
2720
|
+
"""Generate a meaningful step_id based on event type and tool information.
|
|
2721
|
+
|
|
2722
|
+
Args:
|
|
2723
|
+
event_type: The type of event (tool_call, tool_result, final_response, etc.)
|
|
2724
|
+
tool_info: Tool information containing tool names and IDs
|
|
2725
|
+
|
|
2726
|
+
Returns:
|
|
2727
|
+
A meaningful step_id string
|
|
2728
|
+
"""
|
|
2729
|
+
try:
|
|
2730
|
+
counter = get_next_step_number()
|
|
2731
|
+
|
|
2732
|
+
step_id_generators = {
|
|
2733
|
+
A2AStreamEventType.TOOL_CALL: lambda: self._generate_tool_call_step_id(tool_info, counter),
|
|
2734
|
+
A2AStreamEventType.TOOL_RESULT: lambda: self._generate_tool_result_step_id(tool_info, counter),
|
|
2735
|
+
A2AStreamEventType.FINAL_RESPONSE: lambda: f"final_{counter:03d}",
|
|
2736
|
+
A2AStreamEventType.CONTENT_CHUNK: lambda: f"content_{counter:03d}",
|
|
2737
|
+
}
|
|
2738
|
+
|
|
2739
|
+
generator = step_id_generators.get(event_type)
|
|
2740
|
+
if generator:
|
|
2741
|
+
return generator()
|
|
2742
|
+
|
|
2743
|
+
event_value = event_type.value if hasattr(event_type, "value") else str(event_type)
|
|
2744
|
+
return f"{event_value}_{counter:03d}"
|
|
2745
|
+
|
|
2746
|
+
except Exception:
|
|
2747
|
+
return f"stp_{uuid.uuid4().hex[:8]}"
|
|
2748
|
+
|
|
2749
|
+
def _create_a2a_event( # noqa: PLR0913
|
|
2750
|
+
self,
|
|
2751
|
+
event_type: A2AStreamEventType,
|
|
2752
|
+
content: str,
|
|
2753
|
+
metadata: dict[str, Any] | None = None,
|
|
2754
|
+
tool_info: dict[str, Any] | None = None,
|
|
2755
|
+
thinking_and_activity_info: dict[str, Any] | None = None,
|
|
2756
|
+
is_final: bool = False,
|
|
2757
|
+
artifacts: list | None = None,
|
|
2758
|
+
references: list | None = None,
|
|
2759
|
+
step_usage: dict[str, Any] | None = None,
|
|
2760
|
+
total_usage: dict[str, Any] | None = None,
|
|
2761
|
+
) -> A2AEvent:
|
|
2762
|
+
"""Create a structured A2AEvent dictionary.
|
|
2763
|
+
|
|
2764
|
+
Args:
|
|
2765
|
+
event_type: The semantic type of the event.
|
|
2766
|
+
content: The main text content of the event.
|
|
2767
|
+
metadata: Additional metadata.
|
|
2768
|
+
tool_info: Tool-specific information.
|
|
2769
|
+
thinking_and_activity_info: Thinking and activity info from the model.
|
|
2770
|
+
is_final: Whether this is a final event.
|
|
2771
|
+
artifacts: List of artifacts to attach to the event.
|
|
2772
|
+
references: List of references to attach to the event.
|
|
2773
|
+
step_usage: Step-level token usage information.
|
|
2774
|
+
total_usage: Total token usage information.
|
|
2775
|
+
|
|
2776
|
+
Returns:
|
|
2777
|
+
A dictionary conforming to the A2AEvent TypedDict.
|
|
2778
|
+
"""
|
|
2779
|
+
enriched_metadata: dict[str, Any] = metadata.copy() if isinstance(metadata, dict) else {}
|
|
2780
|
+
if "agent_name" not in enriched_metadata:
|
|
2781
|
+
enriched_metadata["agent_name"] = self.name
|
|
2782
|
+
if "step_id" not in enriched_metadata:
|
|
2783
|
+
enriched_metadata["step_id"] = self._generate_meaningful_step_id(event_type, tool_info)
|
|
2784
|
+
if "previous_step_ids" not in enriched_metadata:
|
|
2785
|
+
enriched_metadata["previous_step_ids"] = []
|
|
2786
|
+
|
|
2787
|
+
# Inject cumulative time since the first STATUS_UPDATE for this thread
|
|
2788
|
+
# Do not set cumulative time here; server executor enforces it for all SSE events
|
|
2789
|
+
|
|
2790
|
+
event = {
|
|
2791
|
+
"event_type": event_type,
|
|
2792
|
+
"content": content,
|
|
2793
|
+
"metadata": enriched_metadata,
|
|
2794
|
+
"tool_info": tool_info,
|
|
2795
|
+
"is_final": is_final,
|
|
2796
|
+
"artifacts": artifacts,
|
|
2797
|
+
"references": references,
|
|
2798
|
+
STEP_USAGE_KEY: step_usage,
|
|
2799
|
+
TOTAL_USAGE_KEY: total_usage,
|
|
2800
|
+
}
|
|
2801
|
+
|
|
2802
|
+
if thinking_and_activity_info is not None:
|
|
2803
|
+
event["thinking_and_activity_info"] = thinking_and_activity_info
|
|
2804
|
+
|
|
2805
|
+
try:
|
|
2806
|
+
content_preview = content if isinstance(content, str) else str(content)
|
|
2807
|
+
logger.info(
|
|
2808
|
+
"A2A emitting event: type=%s step_id=%s final=%s preview=%s",
|
|
2809
|
+
getattr(event_type, "value", event_type),
|
|
2810
|
+
enriched_metadata.get("step_id"),
|
|
2811
|
+
is_final,
|
|
2812
|
+
content_preview[:120].replace("\n", " "),
|
|
2813
|
+
)
|
|
2814
|
+
except Exception:
|
|
2815
|
+
logger.debug("A2A emitting event (logging preview failed)", exc_info=True)
|
|
2816
|
+
|
|
2817
|
+
return event
|
|
2818
|
+
|
|
2819
|
+
def _resolve_tool_event_type(self, event_type_raw: Any) -> A2AStreamEventType | None:
|
|
2820
|
+
"""Normalize a raw event type to ``A2AStreamEventType``.
|
|
2821
|
+
|
|
2822
|
+
Args:
|
|
2823
|
+
event_type_raw: Raw ``event_type`` value from a streaming chunk.
|
|
2824
|
+
|
|
2825
|
+
Returns:
|
|
2826
|
+
The resolved ``A2AStreamEventType`` when supported, otherwise ``None``.
|
|
2827
|
+
"""
|
|
2828
|
+
if isinstance(event_type_raw, A2AStreamEventType):
|
|
2829
|
+
return event_type_raw
|
|
2830
|
+
if isinstance(event_type_raw, str):
|
|
2831
|
+
try:
|
|
2832
|
+
return A2AStreamEventType(event_type_raw)
|
|
2833
|
+
except ValueError:
|
|
2834
|
+
return None
|
|
2835
|
+
return None
|
|
2836
|
+
|
|
2837
|
+
@staticmethod
|
|
2838
|
+
def _is_supported_tool_event(event_type: A2AStreamEventType) -> bool:
|
|
2839
|
+
"""Return True when the event type is a tool-related streaming event.
|
|
2840
|
+
|
|
2841
|
+
Args:
|
|
2842
|
+
event_type: Candidate event type to evaluate.
|
|
2843
|
+
|
|
2844
|
+
Returns:
|
|
2845
|
+
True when the event type should be forwarded to the client.
|
|
2846
|
+
"""
|
|
2847
|
+
return event_type in {
|
|
2848
|
+
A2AStreamEventType.TOOL_CALL,
|
|
2849
|
+
A2AStreamEventType.TOOL_RESULT,
|
|
2850
|
+
A2AStreamEventType.STATUS_UPDATE,
|
|
2851
|
+
}
|
|
2852
|
+
|
|
2853
|
+
def _build_tool_activity_payload(
|
|
2854
|
+
self,
|
|
2855
|
+
event_type: A2AStreamEventType,
|
|
2856
|
+
metadata: dict[str, Any] | None,
|
|
2857
|
+
tool_info: dict[str, Any] | None,
|
|
2858
|
+
activity_info: dict[str, Any] | None,
|
|
2859
|
+
) -> dict[str, Any] | None:
|
|
2860
|
+
"""Ensure tool events carry activity payloads per the streaming contract.
|
|
2861
|
+
|
|
2862
|
+
Args:
|
|
2863
|
+
event_type: Stream event type emitted by the tool.
|
|
2864
|
+
metadata: Optional metadata accompanying the chunk.
|
|
2865
|
+
tool_info: Tool details provided by the emitting runner.
|
|
2866
|
+
activity_info: Pre-built activity payload to reuse when present.
|
|
2867
|
+
|
|
2868
|
+
Returns:
|
|
2869
|
+
Activity dictionary ready to be serialized with the tool chunk.
|
|
2870
|
+
"""
|
|
2871
|
+
if event_type not in (A2AStreamEventType.TOOL_CALL, A2AStreamEventType.TOOL_RESULT):
|
|
2872
|
+
return activity_info
|
|
2873
|
+
|
|
2874
|
+
if activity_info:
|
|
2875
|
+
return activity_info
|
|
2876
|
+
|
|
2877
|
+
activity_context = self._compose_tool_activity_context(metadata, tool_info)
|
|
2878
|
+
return create_tool_activity_info(activity_context)
|
|
2879
|
+
|
|
2880
|
+
def _compose_tool_activity_context(
|
|
2881
|
+
self,
|
|
2882
|
+
metadata: dict[str, Any] | None,
|
|
2883
|
+
tool_info: dict[str, Any] | None,
|
|
2884
|
+
) -> dict[str, Any] | None:
|
|
2885
|
+
"""Create a context dictionary for downstream activity message generation.
|
|
2886
|
+
|
|
2887
|
+
Args:
|
|
2888
|
+
metadata: Metadata payload extracted from the streaming chunk.
|
|
2889
|
+
tool_info: Tool descriptor containing ids and display names.
|
|
2890
|
+
|
|
2891
|
+
Returns:
|
|
2892
|
+
A merged context dictionary or None when no data was provided.
|
|
2893
|
+
"""
|
|
2894
|
+
activity_context: dict[str, Any] | None = None
|
|
2895
|
+
if isinstance(metadata, dict):
|
|
2896
|
+
activity_context = metadata.copy()
|
|
2897
|
+
if isinstance(tool_info, dict):
|
|
2898
|
+
if activity_context is None:
|
|
2899
|
+
activity_context = {"tool_info": tool_info}
|
|
2900
|
+
else:
|
|
2901
|
+
activity_context.setdefault("tool_info", tool_info)
|
|
2902
|
+
return activity_context
|
|
2903
|
+
|
|
2904
|
+
def _create_tool_streaming_event(self, chunk: dict[str, Any], writer: StreamWriter, tool_name: str) -> None:
|
|
2905
|
+
"""Create and emit tool streaming events.
|
|
2906
|
+
|
|
2907
|
+
Only processes TOOL_CALL and TOOL_RESULT event types.
|
|
2908
|
+
|
|
2909
|
+
Args:
|
|
2910
|
+
chunk: Streaming chunk from the tool.
|
|
2911
|
+
writer: Stream writer to emit events.
|
|
2912
|
+
tool_name: Name of the tool producing the chunk.
|
|
2913
|
+
"""
|
|
2914
|
+
event_type = self._resolve_tool_event_type(chunk.get("event_type"))
|
|
2915
|
+
if not event_type or not self._is_supported_tool_event(event_type):
|
|
2916
|
+
return
|
|
2917
|
+
|
|
2918
|
+
tool_info = chunk.get("tool_info")
|
|
2919
|
+
metadata = chunk.get("metadata")
|
|
2920
|
+
|
|
2921
|
+
if (
|
|
2922
|
+
event_type == A2AStreamEventType.TOOL_RESULT
|
|
2923
|
+
and isinstance(tool_info, dict)
|
|
2924
|
+
and not tool_info.get("id")
|
|
2925
|
+
and isinstance(tool_info.get("tool_calls"), list)
|
|
2926
|
+
and tool_info.get("tool_calls")
|
|
2927
|
+
):
|
|
2928
|
+
logger.info(
|
|
2929
|
+
"A2A skipping streaming tool_result without id (tool=%s)",
|
|
2930
|
+
tool_info.get("name"),
|
|
2931
|
+
)
|
|
2932
|
+
return
|
|
2933
|
+
|
|
2934
|
+
activity_info = self._build_tool_activity_payload(
|
|
2935
|
+
event_type,
|
|
2936
|
+
metadata if isinstance(metadata, dict) else None,
|
|
2937
|
+
tool_info if isinstance(tool_info, dict) else None,
|
|
2938
|
+
chunk.get("thinking_and_activity_info"),
|
|
2939
|
+
)
|
|
2940
|
+
|
|
2941
|
+
a2a_event = self._create_a2a_event(
|
|
2942
|
+
event_type=event_type,
|
|
2943
|
+
content=chunk.get("content", f"Processing with tools: {tool_name}"),
|
|
2944
|
+
metadata=metadata,
|
|
2945
|
+
tool_info=tool_info,
|
|
2946
|
+
thinking_and_activity_info=activity_info,
|
|
2947
|
+
)
|
|
2948
|
+
writer(a2a_event)
|