aip-agents-binary 0.5.25b1__py3-none-macosx_13_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aip_agents/__init__.py +65 -0
- aip_agents/__init__.pyi +19 -0
- aip_agents/a2a/__init__.py +19 -0
- aip_agents/a2a/__init__.pyi +3 -0
- aip_agents/a2a/server/__init__.py +10 -0
- aip_agents/a2a/server/__init__.pyi +4 -0
- aip_agents/a2a/server/base_executor.py +1086 -0
- aip_agents/a2a/server/base_executor.pyi +73 -0
- aip_agents/a2a/server/google_adk_executor.py +198 -0
- aip_agents/a2a/server/google_adk_executor.pyi +51 -0
- aip_agents/a2a/server/langflow_executor.py +180 -0
- aip_agents/a2a/server/langflow_executor.pyi +43 -0
- aip_agents/a2a/server/langgraph_executor.py +270 -0
- aip_agents/a2a/server/langgraph_executor.pyi +47 -0
- aip_agents/a2a/types.py +232 -0
- aip_agents/a2a/types.pyi +132 -0
- aip_agents/agent/__init__.py +27 -0
- aip_agents/agent/__init__.pyi +9 -0
- aip_agents/agent/base_agent.py +970 -0
- aip_agents/agent/base_agent.pyi +221 -0
- aip_agents/agent/base_langgraph_agent.py +2948 -0
- aip_agents/agent/base_langgraph_agent.pyi +232 -0
- aip_agents/agent/google_adk_agent.py +926 -0
- aip_agents/agent/google_adk_agent.pyi +141 -0
- aip_agents/agent/google_adk_constants.py +6 -0
- aip_agents/agent/google_adk_constants.pyi +3 -0
- aip_agents/agent/hitl/__init__.py +24 -0
- aip_agents/agent/hitl/__init__.pyi +6 -0
- aip_agents/agent/hitl/config.py +28 -0
- aip_agents/agent/hitl/config.pyi +15 -0
- aip_agents/agent/hitl/langgraph_hitl_mixin.py +515 -0
- aip_agents/agent/hitl/langgraph_hitl_mixin.pyi +42 -0
- aip_agents/agent/hitl/manager.py +532 -0
- aip_agents/agent/hitl/manager.pyi +200 -0
- aip_agents/agent/hitl/models.py +18 -0
- aip_agents/agent/hitl/models.pyi +3 -0
- aip_agents/agent/hitl/prompt/__init__.py +9 -0
- aip_agents/agent/hitl/prompt/__init__.pyi +4 -0
- aip_agents/agent/hitl/prompt/base.py +42 -0
- aip_agents/agent/hitl/prompt/base.pyi +24 -0
- aip_agents/agent/hitl/prompt/deferred.py +73 -0
- aip_agents/agent/hitl/prompt/deferred.pyi +30 -0
- aip_agents/agent/hitl/registry.py +149 -0
- aip_agents/agent/hitl/registry.pyi +101 -0
- aip_agents/agent/interface.py +138 -0
- aip_agents/agent/interface.pyi +81 -0
- aip_agents/agent/interfaces.py +65 -0
- aip_agents/agent/interfaces.pyi +44 -0
- aip_agents/agent/langflow_agent.py +464 -0
- aip_agents/agent/langflow_agent.pyi +133 -0
- aip_agents/agent/langgraph_memory_enhancer_agent.py +433 -0
- aip_agents/agent/langgraph_memory_enhancer_agent.pyi +49 -0
- aip_agents/agent/langgraph_react_agent.py +2596 -0
- aip_agents/agent/langgraph_react_agent.pyi +131 -0
- aip_agents/agent/system_instruction_context.py +34 -0
- aip_agents/agent/system_instruction_context.pyi +13 -0
- aip_agents/clients/__init__.py +10 -0
- aip_agents/clients/__init__.pyi +4 -0
- aip_agents/clients/langflow/__init__.py +10 -0
- aip_agents/clients/langflow/__init__.pyi +4 -0
- aip_agents/clients/langflow/client.py +477 -0
- aip_agents/clients/langflow/client.pyi +140 -0
- aip_agents/clients/langflow/types.py +18 -0
- aip_agents/clients/langflow/types.pyi +7 -0
- aip_agents/constants.py +23 -0
- aip_agents/constants.pyi +7 -0
- aip_agents/credentials/manager.py +132 -0
- aip_agents/examples/__init__.py +5 -0
- aip_agents/examples/__init__.pyi +0 -0
- aip_agents/examples/compare_streaming_client.py +783 -0
- aip_agents/examples/compare_streaming_client.pyi +48 -0
- aip_agents/examples/compare_streaming_server.py +142 -0
- aip_agents/examples/compare_streaming_server.pyi +18 -0
- aip_agents/examples/demo_memory_recall.py +401 -0
- aip_agents/examples/demo_memory_recall.pyi +58 -0
- aip_agents/examples/hello_world_a2a_google_adk_client.py +49 -0
- aip_agents/examples/hello_world_a2a_google_adk_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_agent.py +48 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_agent.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_streaming.py +60 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_streaming.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_server.py +79 -0
- aip_agents/examples/hello_world_a2a_google_adk_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_client.py +39 -0
- aip_agents/examples/hello_world_a2a_langchain_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_agent.py +39 -0
- aip_agents/examples/hello_world_a2a_langchain_client_agent.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.py +37 -0
- aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_streaming.py +41 -0
- aip_agents/examples/hello_world_a2a_langchain_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.py +60 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_server.py +105 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_server.py +79 -0
- aip_agents/examples/hello_world_a2a_langchain_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.py +78 -0
- aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langflow_client.py +83 -0
- aip_agents/examples/hello_world_a2a_langflow_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langflow_server.py +82 -0
- aip_agents/examples/hello_world_a2a_langflow_server.pyi +14 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client.py +73 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.py +76 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_server.py +92 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_server.pyi +16 -0
- aip_agents/examples/hello_world_a2a_langgraph_client.py +54 -0
- aip_agents/examples/hello_world_a2a_langgraph_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent.py +54 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.py +32 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.pyi +2 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming.py +50 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.py +44 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.py +92 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_server.py +84 -0
- aip_agents/examples/hello_world_a2a_langgraph_server.pyi +14 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.py +79 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.py +132 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.pyi +15 -0
- aip_agents/examples/hello_world_a2a_mcp_langgraph.py +196 -0
- aip_agents/examples/hello_world_a2a_mcp_langgraph.pyi +48 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.py +244 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.pyi +48 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.py +251 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.pyi +45 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.py +57 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.py +80 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_google_adk.py +41 -0
- aip_agents/examples/hello_world_google_adk.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_http.py +34 -0
- aip_agents/examples/hello_world_google_adk_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_http_stream.py +40 -0
- aip_agents/examples/hello_world_google_adk_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse.py +44 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse_stream.py +48 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio.py +44 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.py +48 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_stream.py +44 -0
- aip_agents/examples/hello_world_google_adk_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain.py +28 -0
- aip_agents/examples/hello_world_langchain.pyi +5 -0
- aip_agents/examples/hello_world_langchain_lm_invoker.py +15 -0
- aip_agents/examples/hello_world_langchain_lm_invoker.pyi +2 -0
- aip_agents/examples/hello_world_langchain_mcp_http.py +34 -0
- aip_agents/examples/hello_world_langchain_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_http_interactive.py +130 -0
- aip_agents/examples/hello_world_langchain_mcp_http_interactive.pyi +16 -0
- aip_agents/examples/hello_world_langchain_mcp_http_stream.py +42 -0
- aip_agents/examples/hello_world_langchain_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_multi_server.py +155 -0
- aip_agents/examples/hello_world_langchain_mcp_multi_server.pyi +18 -0
- aip_agents/examples/hello_world_langchain_mcp_sse.py +34 -0
- aip_agents/examples/hello_world_langchain_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_sse_stream.py +40 -0
- aip_agents/examples/hello_world_langchain_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio.py +30 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio_stream.py +41 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_stream.py +36 -0
- aip_agents/examples/hello_world_langchain_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_stream_lm_invoker.py +39 -0
- aip_agents/examples/hello_world_langchain_stream_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_langflow_agent.py +163 -0
- aip_agents/examples/hello_world_langflow_agent.pyi +35 -0
- aip_agents/examples/hello_world_langgraph.py +39 -0
- aip_agents/examples/hello_world_langgraph.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_gl_connector_twitter.py +44 -0
- aip_agents/examples/hello_world_langgraph_gl_connector_twitter.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_http.py +31 -0
- aip_agents/examples/hello_world_langgraph_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_http_stream.py +34 -0
- aip_agents/examples/hello_world_langgraph_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse.py +35 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse_stream.py +50 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio.py +35 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.py +50 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_stream.py +43 -0
- aip_agents/examples/hello_world_langgraph_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_stream_lm_invoker.py +37 -0
- aip_agents/examples/hello_world_langgraph_stream_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_model_switch_cli.py +210 -0
- aip_agents/examples/hello_world_model_switch_cli.pyi +30 -0
- aip_agents/examples/hello_world_multi_agent_adk.py +75 -0
- aip_agents/examples/hello_world_multi_agent_adk.pyi +6 -0
- aip_agents/examples/hello_world_multi_agent_langchain.py +54 -0
- aip_agents/examples/hello_world_multi_agent_langchain.pyi +5 -0
- aip_agents/examples/hello_world_multi_agent_langgraph.py +66 -0
- aip_agents/examples/hello_world_multi_agent_langgraph.pyi +5 -0
- aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.py +69 -0
- aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_pii_logger.py +21 -0
- aip_agents/examples/hello_world_pii_logger.pyi +5 -0
- aip_agents/examples/hello_world_sentry.py +133 -0
- aip_agents/examples/hello_world_sentry.pyi +21 -0
- aip_agents/examples/hello_world_step_limits.py +273 -0
- aip_agents/examples/hello_world_step_limits.pyi +17 -0
- aip_agents/examples/hello_world_stock_a2a_server.py +103 -0
- aip_agents/examples/hello_world_stock_a2a_server.pyi +17 -0
- aip_agents/examples/hello_world_tool_output_client.py +46 -0
- aip_agents/examples/hello_world_tool_output_client.pyi +5 -0
- aip_agents/examples/hello_world_tool_output_server.py +114 -0
- aip_agents/examples/hello_world_tool_output_server.pyi +19 -0
- aip_agents/examples/hitl_demo.py +724 -0
- aip_agents/examples/hitl_demo.pyi +67 -0
- aip_agents/examples/mcp_configs/configs.py +63 -0
- aip_agents/examples/mcp_servers/common.py +76 -0
- aip_agents/examples/mcp_servers/mcp_name.py +29 -0
- aip_agents/examples/mcp_servers/mcp_server_http.py +19 -0
- aip_agents/examples/mcp_servers/mcp_server_sse.py +19 -0
- aip_agents/examples/mcp_servers/mcp_server_stdio.py +19 -0
- aip_agents/examples/mcp_servers/mcp_time.py +10 -0
- aip_agents/examples/pii_demo_langgraph_client.py +69 -0
- aip_agents/examples/pii_demo_langgraph_client.pyi +5 -0
- aip_agents/examples/pii_demo_langgraph_server.py +126 -0
- aip_agents/examples/pii_demo_langgraph_server.pyi +20 -0
- aip_agents/examples/pii_demo_multi_agent_client.py +80 -0
- aip_agents/examples/pii_demo_multi_agent_client.pyi +5 -0
- aip_agents/examples/pii_demo_multi_agent_server.py +247 -0
- aip_agents/examples/pii_demo_multi_agent_server.pyi +40 -0
- aip_agents/examples/todolist_planning_a2a_langchain_client.py +70 -0
- aip_agents/examples/todolist_planning_a2a_langchain_client.pyi +5 -0
- aip_agents/examples/todolist_planning_a2a_langgraph_server.py +88 -0
- aip_agents/examples/todolist_planning_a2a_langgraph_server.pyi +19 -0
- aip_agents/examples/tools/__init__.py +27 -0
- aip_agents/examples/tools/__init__.pyi +9 -0
- aip_agents/examples/tools/adk_arithmetic_tools.py +36 -0
- aip_agents/examples/tools/adk_arithmetic_tools.pyi +24 -0
- aip_agents/examples/tools/adk_weather_tool.py +60 -0
- aip_agents/examples/tools/adk_weather_tool.pyi +18 -0
- aip_agents/examples/tools/data_generator_tool.py +103 -0
- aip_agents/examples/tools/data_generator_tool.pyi +15 -0
- aip_agents/examples/tools/data_visualization_tool.py +312 -0
- aip_agents/examples/tools/data_visualization_tool.pyi +19 -0
- aip_agents/examples/tools/image_artifact_tool.py +136 -0
- aip_agents/examples/tools/image_artifact_tool.pyi +26 -0
- aip_agents/examples/tools/langchain_arithmetic_tools.py +26 -0
- aip_agents/examples/tools/langchain_arithmetic_tools.pyi +17 -0
- aip_agents/examples/tools/langchain_currency_exchange_tool.py +88 -0
- aip_agents/examples/tools/langchain_currency_exchange_tool.pyi +20 -0
- aip_agents/examples/tools/langchain_graph_artifact_tool.py +172 -0
- aip_agents/examples/tools/langchain_graph_artifact_tool.pyi +25 -0
- aip_agents/examples/tools/langchain_weather_tool.py +48 -0
- aip_agents/examples/tools/langchain_weather_tool.pyi +19 -0
- aip_agents/examples/tools/langgraph_streaming_tool.py +130 -0
- aip_agents/examples/tools/langgraph_streaming_tool.pyi +43 -0
- aip_agents/examples/tools/mock_retrieval_tool.py +56 -0
- aip_agents/examples/tools/mock_retrieval_tool.pyi +13 -0
- aip_agents/examples/tools/pii_demo_tools.py +189 -0
- aip_agents/examples/tools/pii_demo_tools.pyi +54 -0
- aip_agents/examples/tools/random_chart_tool.py +142 -0
- aip_agents/examples/tools/random_chart_tool.pyi +20 -0
- aip_agents/examples/tools/serper_tool.py +202 -0
- aip_agents/examples/tools/serper_tool.pyi +16 -0
- aip_agents/examples/tools/stock_tools.py +82 -0
- aip_agents/examples/tools/stock_tools.pyi +36 -0
- aip_agents/examples/tools/table_generator_tool.py +167 -0
- aip_agents/examples/tools/table_generator_tool.pyi +22 -0
- aip_agents/examples/tools/time_tool.py +82 -0
- aip_agents/examples/tools/time_tool.pyi +15 -0
- aip_agents/examples/tools/weather_forecast_tool.py +38 -0
- aip_agents/examples/tools/weather_forecast_tool.pyi +14 -0
- aip_agents/executor/agent_executor.py +473 -0
- aip_agents/executor/base.py +48 -0
- aip_agents/guardrails/__init__.py +83 -0
- aip_agents/guardrails/__init__.pyi +6 -0
- aip_agents/guardrails/engines/__init__.py +69 -0
- aip_agents/guardrails/engines/__init__.pyi +4 -0
- aip_agents/guardrails/engines/base.py +90 -0
- aip_agents/guardrails/engines/base.pyi +61 -0
- aip_agents/guardrails/engines/nemo.py +101 -0
- aip_agents/guardrails/engines/nemo.pyi +46 -0
- aip_agents/guardrails/engines/phrase_matcher.py +113 -0
- aip_agents/guardrails/engines/phrase_matcher.pyi +48 -0
- aip_agents/guardrails/exceptions.py +39 -0
- aip_agents/guardrails/exceptions.pyi +23 -0
- aip_agents/guardrails/manager.py +163 -0
- aip_agents/guardrails/manager.pyi +42 -0
- aip_agents/guardrails/middleware.py +199 -0
- aip_agents/guardrails/middleware.pyi +87 -0
- aip_agents/guardrails/schemas.py +63 -0
- aip_agents/guardrails/schemas.pyi +43 -0
- aip_agents/guardrails/utils.py +45 -0
- aip_agents/guardrails/utils.pyi +19 -0
- aip_agents/mcp/__init__.py +1 -0
- aip_agents/mcp/__init__.pyi +0 -0
- aip_agents/mcp/client/__init__.py +14 -0
- aip_agents/mcp/client/__init__.pyi +5 -0
- aip_agents/mcp/client/base_mcp_client.py +369 -0
- aip_agents/mcp/client/base_mcp_client.pyi +148 -0
- aip_agents/mcp/client/connection_manager.py +193 -0
- aip_agents/mcp/client/connection_manager.pyi +48 -0
- aip_agents/mcp/client/google_adk/__init__.py +11 -0
- aip_agents/mcp/client/google_adk/__init__.pyi +3 -0
- aip_agents/mcp/client/google_adk/client.py +381 -0
- aip_agents/mcp/client/google_adk/client.pyi +75 -0
- aip_agents/mcp/client/langchain/__init__.py +11 -0
- aip_agents/mcp/client/langchain/__init__.pyi +3 -0
- aip_agents/mcp/client/langchain/client.py +265 -0
- aip_agents/mcp/client/langchain/client.pyi +48 -0
- aip_agents/mcp/client/persistent_session.py +362 -0
- aip_agents/mcp/client/persistent_session.pyi +113 -0
- aip_agents/mcp/client/session_pool.py +351 -0
- aip_agents/mcp/client/session_pool.pyi +101 -0
- aip_agents/mcp/client/transports.py +228 -0
- aip_agents/mcp/client/transports.pyi +123 -0
- aip_agents/mcp/utils/__init__.py +7 -0
- aip_agents/mcp/utils/__init__.pyi +0 -0
- aip_agents/mcp/utils/config_validator.py +139 -0
- aip_agents/mcp/utils/config_validator.pyi +82 -0
- aip_agents/memory/__init__.py +14 -0
- aip_agents/memory/__init__.pyi +5 -0
- aip_agents/memory/adapters/__init__.py +10 -0
- aip_agents/memory/adapters/__init__.pyi +4 -0
- aip_agents/memory/adapters/base_adapter.py +717 -0
- aip_agents/memory/adapters/base_adapter.pyi +150 -0
- aip_agents/memory/adapters/mem0.py +84 -0
- aip_agents/memory/adapters/mem0.pyi +22 -0
- aip_agents/memory/base.py +84 -0
- aip_agents/memory/base.pyi +60 -0
- aip_agents/memory/constants.py +49 -0
- aip_agents/memory/constants.pyi +25 -0
- aip_agents/memory/factory.py +86 -0
- aip_agents/memory/factory.pyi +24 -0
- aip_agents/memory/guidance.py +20 -0
- aip_agents/memory/guidance.pyi +3 -0
- aip_agents/memory/simple_memory.py +47 -0
- aip_agents/memory/simple_memory.pyi +23 -0
- aip_agents/middleware/__init__.py +17 -0
- aip_agents/middleware/__init__.pyi +5 -0
- aip_agents/middleware/base.py +96 -0
- aip_agents/middleware/base.pyi +75 -0
- aip_agents/middleware/manager.py +150 -0
- aip_agents/middleware/manager.pyi +84 -0
- aip_agents/middleware/todolist.py +274 -0
- aip_agents/middleware/todolist.pyi +125 -0
- aip_agents/schema/__init__.py +69 -0
- aip_agents/schema/__init__.pyi +9 -0
- aip_agents/schema/a2a.py +56 -0
- aip_agents/schema/a2a.pyi +40 -0
- aip_agents/schema/agent.py +111 -0
- aip_agents/schema/agent.pyi +65 -0
- aip_agents/schema/hitl.py +157 -0
- aip_agents/schema/hitl.pyi +89 -0
- aip_agents/schema/langgraph.py +37 -0
- aip_agents/schema/langgraph.pyi +28 -0
- aip_agents/schema/model_id.py +97 -0
- aip_agents/schema/model_id.pyi +54 -0
- aip_agents/schema/step_limit.py +108 -0
- aip_agents/schema/step_limit.pyi +63 -0
- aip_agents/schema/storage.py +40 -0
- aip_agents/schema/storage.pyi +21 -0
- aip_agents/sentry/__init__.py +11 -0
- aip_agents/sentry/__init__.pyi +3 -0
- aip_agents/sentry/sentry.py +151 -0
- aip_agents/sentry/sentry.pyi +48 -0
- aip_agents/storage/__init__.py +41 -0
- aip_agents/storage/__init__.pyi +8 -0
- aip_agents/storage/base.py +85 -0
- aip_agents/storage/base.pyi +58 -0
- aip_agents/storage/clients/__init__.py +12 -0
- aip_agents/storage/clients/__init__.pyi +3 -0
- aip_agents/storage/clients/minio_client.py +318 -0
- aip_agents/storage/clients/minio_client.pyi +137 -0
- aip_agents/storage/config.py +62 -0
- aip_agents/storage/config.pyi +29 -0
- aip_agents/storage/providers/__init__.py +15 -0
- aip_agents/storage/providers/__init__.pyi +5 -0
- aip_agents/storage/providers/base.py +106 -0
- aip_agents/storage/providers/base.pyi +88 -0
- aip_agents/storage/providers/memory.py +114 -0
- aip_agents/storage/providers/memory.pyi +79 -0
- aip_agents/storage/providers/object_storage.py +214 -0
- aip_agents/storage/providers/object_storage.pyi +98 -0
- aip_agents/tools/__init__.py +53 -0
- aip_agents/tools/__init__.pyi +9 -0
- aip_agents/tools/browser_use/__init__.py +82 -0
- aip_agents/tools/browser_use/__init__.pyi +14 -0
- aip_agents/tools/browser_use/action_parser.py +103 -0
- aip_agents/tools/browser_use/action_parser.pyi +18 -0
- aip_agents/tools/browser_use/browser_use_tool.py +1112 -0
- aip_agents/tools/browser_use/browser_use_tool.pyi +50 -0
- aip_agents/tools/browser_use/llm_config.py +120 -0
- aip_agents/tools/browser_use/llm_config.pyi +52 -0
- aip_agents/tools/browser_use/minio_storage.py +198 -0
- aip_agents/tools/browser_use/minio_storage.pyi +109 -0
- aip_agents/tools/browser_use/schemas.py +119 -0
- aip_agents/tools/browser_use/schemas.pyi +32 -0
- aip_agents/tools/browser_use/session.py +76 -0
- aip_agents/tools/browser_use/session.pyi +4 -0
- aip_agents/tools/browser_use/session_errors.py +132 -0
- aip_agents/tools/browser_use/session_errors.pyi +53 -0
- aip_agents/tools/browser_use/steel_session_recording.py +317 -0
- aip_agents/tools/browser_use/steel_session_recording.pyi +63 -0
- aip_agents/tools/browser_use/streaming.py +813 -0
- aip_agents/tools/browser_use/streaming.pyi +81 -0
- aip_agents/tools/browser_use/structured_data_parser.py +257 -0
- aip_agents/tools/browser_use/structured_data_parser.pyi +86 -0
- aip_agents/tools/browser_use/structured_data_recovery.py +204 -0
- aip_agents/tools/browser_use/structured_data_recovery.pyi +43 -0
- aip_agents/tools/browser_use/types.py +78 -0
- aip_agents/tools/browser_use/types.pyi +45 -0
- aip_agents/tools/code_sandbox/__init__.py +26 -0
- aip_agents/tools/code_sandbox/__init__.pyi +3 -0
- aip_agents/tools/code_sandbox/constant.py +13 -0
- aip_agents/tools/code_sandbox/constant.pyi +4 -0
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.py +306 -0
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.pyi +102 -0
- aip_agents/tools/code_sandbox/e2b_sandbox_tool.py +411 -0
- aip_agents/tools/code_sandbox/e2b_sandbox_tool.pyi +29 -0
- aip_agents/tools/constants.py +177 -0
- aip_agents/tools/constants.pyi +138 -0
- aip_agents/tools/document_loader/__init__.py +44 -0
- aip_agents/tools/document_loader/__init__.pyi +7 -0
- aip_agents/tools/document_loader/base_reader.py +302 -0
- aip_agents/tools/document_loader/base_reader.pyi +75 -0
- aip_agents/tools/document_loader/docx_reader_tool.py +68 -0
- aip_agents/tools/document_loader/docx_reader_tool.pyi +10 -0
- aip_agents/tools/document_loader/excel_reader_tool.py +171 -0
- aip_agents/tools/document_loader/excel_reader_tool.pyi +26 -0
- aip_agents/tools/document_loader/pdf_reader_tool.py +79 -0
- aip_agents/tools/document_loader/pdf_reader_tool.pyi +11 -0
- aip_agents/tools/document_loader/pdf_splitter.py +169 -0
- aip_agents/tools/document_loader/pdf_splitter.pyi +18 -0
- aip_agents/tools/gl_connector/__init__.py +5 -0
- aip_agents/tools/gl_connector/__init__.pyi +3 -0
- aip_agents/tools/gl_connector/tool.py +383 -0
- aip_agents/tools/gl_connector/tool.pyi +74 -0
- aip_agents/tools/gl_connector_tools.py +119 -0
- aip_agents/tools/gl_connector_tools.pyi +39 -0
- aip_agents/tools/memory_search/__init__.py +22 -0
- aip_agents/tools/memory_search/__init__.pyi +5 -0
- aip_agents/tools/memory_search/base.py +200 -0
- aip_agents/tools/memory_search/base.pyi +69 -0
- aip_agents/tools/memory_search/mem0.py +258 -0
- aip_agents/tools/memory_search/mem0.pyi +19 -0
- aip_agents/tools/memory_search/schema.py +48 -0
- aip_agents/tools/memory_search/schema.pyi +15 -0
- aip_agents/tools/memory_search_tool.py +26 -0
- aip_agents/tools/memory_search_tool.pyi +3 -0
- aip_agents/tools/time_tool.py +117 -0
- aip_agents/tools/time_tool.pyi +16 -0
- aip_agents/tools/tool_config_injector.py +300 -0
- aip_agents/tools/tool_config_injector.pyi +26 -0
- aip_agents/tools/web_search/__init__.py +15 -0
- aip_agents/tools/web_search/__init__.pyi +3 -0
- aip_agents/tools/web_search/serper_tool.py +187 -0
- aip_agents/tools/web_search/serper_tool.pyi +19 -0
- aip_agents/types/__init__.py +70 -0
- aip_agents/types/__init__.pyi +36 -0
- aip_agents/types/a2a_events.py +13 -0
- aip_agents/types/a2a_events.pyi +3 -0
- aip_agents/utils/__init__.py +79 -0
- aip_agents/utils/__init__.pyi +11 -0
- aip_agents/utils/a2a_connector.py +1757 -0
- aip_agents/utils/a2a_connector.pyi +146 -0
- aip_agents/utils/artifact_helpers.py +502 -0
- aip_agents/utils/artifact_helpers.pyi +203 -0
- aip_agents/utils/constants.py +22 -0
- aip_agents/utils/constants.pyi +10 -0
- aip_agents/utils/datetime/__init__.py +34 -0
- aip_agents/utils/datetime/__init__.pyi +4 -0
- aip_agents/utils/datetime/normalization.py +231 -0
- aip_agents/utils/datetime/normalization.pyi +95 -0
- aip_agents/utils/datetime/timezone.py +206 -0
- aip_agents/utils/datetime/timezone.pyi +48 -0
- aip_agents/utils/env_loader.py +27 -0
- aip_agents/utils/env_loader.pyi +10 -0
- aip_agents/utils/event_handler_registry.py +58 -0
- aip_agents/utils/event_handler_registry.pyi +23 -0
- aip_agents/utils/file_prompt_utils.py +176 -0
- aip_agents/utils/file_prompt_utils.pyi +21 -0
- aip_agents/utils/final_response_builder.py +211 -0
- aip_agents/utils/final_response_builder.pyi +34 -0
- aip_agents/utils/formatter_llm_client.py +231 -0
- aip_agents/utils/formatter_llm_client.pyi +71 -0
- aip_agents/utils/langgraph/__init__.py +19 -0
- aip_agents/utils/langgraph/__init__.pyi +3 -0
- aip_agents/utils/langgraph/converter.py +128 -0
- aip_agents/utils/langgraph/converter.pyi +49 -0
- aip_agents/utils/langgraph/tool_managers/__init__.py +15 -0
- aip_agents/utils/langgraph/tool_managers/__init__.pyi +5 -0
- aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.py +99 -0
- aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.pyi +35 -0
- aip_agents/utils/langgraph/tool_managers/base_tool_manager.py +66 -0
- aip_agents/utils/langgraph/tool_managers/base_tool_manager.pyi +48 -0
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +1071 -0
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.pyi +56 -0
- aip_agents/utils/langgraph/tool_output_management.py +967 -0
- aip_agents/utils/langgraph/tool_output_management.pyi +292 -0
- aip_agents/utils/logger.py +195 -0
- aip_agents/utils/logger.pyi +60 -0
- aip_agents/utils/metadata/__init__.py +27 -0
- aip_agents/utils/metadata/__init__.pyi +5 -0
- aip_agents/utils/metadata/activity_metadata_helper.py +407 -0
- aip_agents/utils/metadata/activity_metadata_helper.pyi +25 -0
- aip_agents/utils/metadata/activity_narrative/__init__.py +35 -0
- aip_agents/utils/metadata/activity_narrative/__init__.pyi +7 -0
- aip_agents/utils/metadata/activity_narrative/builder.py +817 -0
- aip_agents/utils/metadata/activity_narrative/builder.pyi +35 -0
- aip_agents/utils/metadata/activity_narrative/constants.py +51 -0
- aip_agents/utils/metadata/activity_narrative/constants.pyi +10 -0
- aip_agents/utils/metadata/activity_narrative/context.py +49 -0
- aip_agents/utils/metadata/activity_narrative/context.pyi +32 -0
- aip_agents/utils/metadata/activity_narrative/formatters.py +230 -0
- aip_agents/utils/metadata/activity_narrative/formatters.pyi +48 -0
- aip_agents/utils/metadata/activity_narrative/utils.py +35 -0
- aip_agents/utils/metadata/activity_narrative/utils.pyi +12 -0
- aip_agents/utils/metadata/schemas/__init__.py +16 -0
- aip_agents/utils/metadata/schemas/__init__.pyi +4 -0
- aip_agents/utils/metadata/schemas/activity_schema.py +29 -0
- aip_agents/utils/metadata/schemas/activity_schema.pyi +18 -0
- aip_agents/utils/metadata/schemas/thinking_schema.py +31 -0
- aip_agents/utils/metadata/schemas/thinking_schema.pyi +20 -0
- aip_agents/utils/metadata/thinking_metadata_helper.py +38 -0
- aip_agents/utils/metadata/thinking_metadata_helper.pyi +4 -0
- aip_agents/utils/metadata_helper.py +358 -0
- aip_agents/utils/metadata_helper.pyi +117 -0
- aip_agents/utils/name_preprocessor/__init__.py +17 -0
- aip_agents/utils/name_preprocessor/__init__.pyi +6 -0
- aip_agents/utils/name_preprocessor/base_name_preprocessor.py +73 -0
- aip_agents/utils/name_preprocessor/base_name_preprocessor.pyi +52 -0
- aip_agents/utils/name_preprocessor/google_name_preprocessor.py +100 -0
- aip_agents/utils/name_preprocessor/google_name_preprocessor.pyi +38 -0
- aip_agents/utils/name_preprocessor/name_preprocessor.py +87 -0
- aip_agents/utils/name_preprocessor/name_preprocessor.pyi +41 -0
- aip_agents/utils/name_preprocessor/openai_name_preprocessor.py +48 -0
- aip_agents/utils/name_preprocessor/openai_name_preprocessor.pyi +34 -0
- aip_agents/utils/pii/__init__.py +25 -0
- aip_agents/utils/pii/__init__.pyi +5 -0
- aip_agents/utils/pii/pii_handler.py +397 -0
- aip_agents/utils/pii/pii_handler.pyi +96 -0
- aip_agents/utils/pii/pii_helper.py +207 -0
- aip_agents/utils/pii/pii_helper.pyi +78 -0
- aip_agents/utils/pii/uuid_deanonymizer_mapping.py +195 -0
- aip_agents/utils/pii/uuid_deanonymizer_mapping.pyi +73 -0
- aip_agents/utils/reference_helper.py +273 -0
- aip_agents/utils/reference_helper.pyi +81 -0
- aip_agents/utils/sse_chunk_transformer.py +831 -0
- aip_agents/utils/sse_chunk_transformer.pyi +166 -0
- aip_agents/utils/step_limit_manager.py +265 -0
- aip_agents/utils/step_limit_manager.pyi +112 -0
- aip_agents/utils/token_usage_helper.py +156 -0
- aip_agents/utils/token_usage_helper.pyi +60 -0
- aip_agents_binary-0.5.25b1.dist-info/METADATA +681 -0
- aip_agents_binary-0.5.25b1.dist-info/RECORD +566 -0
- aip_agents_binary-0.5.25b1.dist-info/WHEEL +5 -0
- aip_agents_binary-0.5.25b1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1757 @@
|
|
|
1
|
+
"""Provides A2A communication functionality through a connector class.
|
|
2
|
+
|
|
3
|
+
This module contains the A2AConnector class which handles all A2A protocol
|
|
4
|
+
communication between agents, including message sending and streaming. This version
|
|
5
|
+
ensures immediate yielding of artifact events to reduce latency and adhere
|
|
6
|
+
to the A2A protocol design.
|
|
7
|
+
|
|
8
|
+
Authors:
|
|
9
|
+
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
10
|
+
Christian Trisno Sen Long Chen (christian.t.s.l.chen@gdplabs.id)
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import asyncio
|
|
14
|
+
import concurrent.futures
|
|
15
|
+
import hashlib
|
|
16
|
+
import json
|
|
17
|
+
import uuid
|
|
18
|
+
from collections.abc import AsyncGenerator
|
|
19
|
+
from datetime import UTC, datetime
|
|
20
|
+
from typing import Any
|
|
21
|
+
|
|
22
|
+
import httpx
|
|
23
|
+
from a2a.client import A2AClient
|
|
24
|
+
from a2a.types import (
|
|
25
|
+
AgentCard,
|
|
26
|
+
Message,
|
|
27
|
+
MessageSendParams,
|
|
28
|
+
SendStreamingMessageRequest,
|
|
29
|
+
SendStreamingMessageSuccessResponse,
|
|
30
|
+
Task,
|
|
31
|
+
TaskArtifactUpdateEvent,
|
|
32
|
+
TaskState,
|
|
33
|
+
TaskStatusUpdateEvent,
|
|
34
|
+
)
|
|
35
|
+
from a2a.utils import get_text_parts
|
|
36
|
+
from pydantic import BaseModel
|
|
37
|
+
|
|
38
|
+
from aip_agents.schema.a2a import A2AStreamEventType
|
|
39
|
+
from aip_agents.utils.event_handler_registry import (
|
|
40
|
+
DEFAULT_EVENT_HANDLER_REGISTRY,
|
|
41
|
+
EventHandlerRegistry,
|
|
42
|
+
)
|
|
43
|
+
from aip_agents.utils.final_response_builder import FinalResponseMetadataOptions, assemble_final_response
|
|
44
|
+
from aip_agents.utils.logger import get_logger
|
|
45
|
+
from aip_agents.utils.metadata_helper import (
|
|
46
|
+
Kind,
|
|
47
|
+
MetadataFieldKeys,
|
|
48
|
+
MetadataTimeTracker,
|
|
49
|
+
Status,
|
|
50
|
+
create_metadata,
|
|
51
|
+
create_status_update_metadata,
|
|
52
|
+
create_tool_processing_metadata,
|
|
53
|
+
)
|
|
54
|
+
from aip_agents.utils.sse_chunk_transformer import SSEChunkTransformer
|
|
55
|
+
|
|
56
|
+
logger = get_logger(__name__)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class ArtifactInfo(BaseModel):
|
|
60
|
+
"""Structured artifact information for A2A communication.
|
|
61
|
+
|
|
62
|
+
This Pydantic model provides type safety and validation for artifact data
|
|
63
|
+
exchanged between agents through the A2A protocol.
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
artifact_id: str | None = None
|
|
67
|
+
name: str | None = None
|
|
68
|
+
content_type: str | None = None
|
|
69
|
+
mime_type: str | None = None
|
|
70
|
+
file_name: str | None = None
|
|
71
|
+
has_file_data: bool = False
|
|
72
|
+
has_file_uri: bool = False
|
|
73
|
+
file_data: str | None = None
|
|
74
|
+
file_uri: str | None = None
|
|
75
|
+
description: str | None = None
|
|
76
|
+
parts: int | None = None
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class StreamingConfig(BaseModel):
|
|
80
|
+
"""Configuration for A2A streaming operations."""
|
|
81
|
+
|
|
82
|
+
http_kwargs: dict[str, Any] | None = None
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class A2AConnector:
|
|
86
|
+
"""Handles A2A protocol communication between agents.
|
|
87
|
+
|
|
88
|
+
This class provides methods for sending messages to other agents using the A2A protocol,
|
|
89
|
+
supporting both synchronous and asynchronous communication patterns, as well as streaming
|
|
90
|
+
responses with immediate artifact event handling.
|
|
91
|
+
"""
|
|
92
|
+
|
|
93
|
+
# Epsilon value for floating point comparisons to avoid precision issues
|
|
94
|
+
FLOAT_EPSILON = 1e-10
|
|
95
|
+
event_registry: EventHandlerRegistry = DEFAULT_EVENT_HANDLER_REGISTRY
|
|
96
|
+
|
|
97
|
+
@staticmethod
|
|
98
|
+
def _create_message_payload(
|
|
99
|
+
message: str | dict[str, Any],
|
|
100
|
+
task_id: str | None = None,
|
|
101
|
+
context_id: str | None = None,
|
|
102
|
+
) -> dict[str, Any]:
|
|
103
|
+
"""Creates a standardized message payload for A2A communication.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
message: The message content to send. Can be a string or dictionary.
|
|
107
|
+
task_id: Task ID to associate with the message. Defaults to None.
|
|
108
|
+
context_id: Context ID to associate with the message. Defaults to None.
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
A dictionary containing the formatted message payload.
|
|
112
|
+
"""
|
|
113
|
+
payload = {
|
|
114
|
+
"message": {
|
|
115
|
+
"role": "user",
|
|
116
|
+
"parts": [
|
|
117
|
+
{
|
|
118
|
+
"type": "text",
|
|
119
|
+
"text": message if isinstance(message, str) else json.dumps(message),
|
|
120
|
+
}
|
|
121
|
+
],
|
|
122
|
+
"messageId": str(uuid.uuid4()),
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
if task_id:
|
|
127
|
+
payload["message"]["taskId"] = task_id
|
|
128
|
+
if context_id:
|
|
129
|
+
payload["message"]["contextId"] = context_id
|
|
130
|
+
|
|
131
|
+
return payload
|
|
132
|
+
|
|
133
|
+
@staticmethod
|
|
134
|
+
def _create_a2a_client(
|
|
135
|
+
agent_card: AgentCard, http_kwargs: dict[str, Any] | None = None
|
|
136
|
+
) -> tuple[httpx.AsyncClient, A2AClient]:
|
|
137
|
+
"""Creates an A2A client with the given configuration.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
agent_card: The AgentCard instance containing agent details.
|
|
141
|
+
http_kwargs: Optional HTTP client configuration. Defaults to None.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
A tuple containing the HTTP client and A2A client.
|
|
145
|
+
"""
|
|
146
|
+
http_client = httpx.AsyncClient(**(http_kwargs or {}))
|
|
147
|
+
a2a_client = A2AClient(httpx_client=http_client, agent_card=agent_card)
|
|
148
|
+
return http_client, a2a_client
|
|
149
|
+
|
|
150
|
+
@staticmethod
|
|
151
|
+
def _extract_task_info(res_data: Task | Message) -> dict[str, Any]:
|
|
152
|
+
"""Extracts task information from response data.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
res_data: Response data from A2A communication.
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
Dictionary containing task information.
|
|
159
|
+
"""
|
|
160
|
+
if isinstance(res_data, Task):
|
|
161
|
+
return {
|
|
162
|
+
"task_id": res_data.id,
|
|
163
|
+
"task_state": str(res_data.status.state),
|
|
164
|
+
"context_id": res_data.contextId,
|
|
165
|
+
}
|
|
166
|
+
return {
|
|
167
|
+
"context_id": res_data.contextId,
|
|
168
|
+
"task_id": res_data.taskId,
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
@staticmethod
|
|
172
|
+
def _extract_text_content(res_data: Task | Message) -> list[str]:
|
|
173
|
+
"""Extracts text content from response data.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
res_data: Response data from A2A communication.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
List of extracted text strings.
|
|
180
|
+
"""
|
|
181
|
+
texts = []
|
|
182
|
+
if isinstance(res_data, Task):
|
|
183
|
+
if res_data.artifacts:
|
|
184
|
+
for artifact in res_data.artifacts:
|
|
185
|
+
texts.extend(get_text_parts(artifact.parts))
|
|
186
|
+
if not texts and res_data.status and res_data.status.message and res_data.status.message.parts:
|
|
187
|
+
texts.extend(get_text_parts(res_data.status.message.parts))
|
|
188
|
+
elif res_data.parts:
|
|
189
|
+
texts.extend(get_text_parts(res_data.parts))
|
|
190
|
+
return texts
|
|
191
|
+
|
|
192
|
+
@staticmethod
|
|
193
|
+
def _handle_task_state_update(
|
|
194
|
+
event: TaskStatusUpdateEvent,
|
|
195
|
+
) -> dict[str, Any] | None:
|
|
196
|
+
"""Handles task status update events from A2A protocol.
|
|
197
|
+
|
|
198
|
+
This method now focuses on status tracking and tool processing updates only.
|
|
199
|
+
Final responses are handled by _handle_artifact_update_event with lastChunk=True.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
event: The TaskStatusUpdateEvent from A2A protocol.
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
Response dictionary if there's content to return, None otherwise.
|
|
206
|
+
"""
|
|
207
|
+
if not event.status:
|
|
208
|
+
return None
|
|
209
|
+
|
|
210
|
+
# Check if this is a tool processing status update
|
|
211
|
+
tool_detection = A2AConnector._detect_tool_processing_in_status(event)
|
|
212
|
+
if tool_detection["is_tool_processing"]:
|
|
213
|
+
return A2AConnector._create_tool_processing_response(event, tool_detection)
|
|
214
|
+
if tool_detection.get("skip_status_update"):
|
|
215
|
+
return None
|
|
216
|
+
|
|
217
|
+
# Handle non-tool status updates (generic status messages)
|
|
218
|
+
extracted_texts = A2AConnector._extract_status_message_texts(event)
|
|
219
|
+
|
|
220
|
+
# Filter out generic completion confirmations
|
|
221
|
+
if A2AConnector._should_filter_completion_message(event, extracted_texts):
|
|
222
|
+
return None
|
|
223
|
+
|
|
224
|
+
# Emit placeholder when there is no textual content
|
|
225
|
+
if not extracted_texts:
|
|
226
|
+
custom_metadata = A2AConnector._extract_custom_metadata_from_status_message(event)
|
|
227
|
+
metadata = create_status_update_metadata("", custom_metadata)
|
|
228
|
+
metadata = A2AConnector._merge_event_metadata(metadata, event)
|
|
229
|
+
task_state = event.status.state.value if event.status and event.status.state else "working"
|
|
230
|
+
return A2AConnector._create_empty_payload_response(
|
|
231
|
+
event=event,
|
|
232
|
+
metadata=metadata,
|
|
233
|
+
default_event_type=A2AStreamEventType.STATUS_UPDATE,
|
|
234
|
+
task_state=task_state,
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
return A2AConnector._create_status_response(event, extracted_texts)
|
|
238
|
+
|
|
239
|
+
@staticmethod
|
|
240
|
+
def _create_tool_processing_response(
|
|
241
|
+
event: TaskStatusUpdateEvent, tool_detection: dict[str, Any]
|
|
242
|
+
) -> dict[str, Any]:
|
|
243
|
+
"""Create response for tool processing status updates.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
event (TaskStatusUpdateEvent): The task status update event.
|
|
247
|
+
tool_detection (dict[str, Any]): Dictionary containing tool detection information.
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
dict[str, Any]: Response dictionary with status and content.
|
|
251
|
+
"""
|
|
252
|
+
# Use the original event metadata to ensure tool_info is preserved
|
|
253
|
+
original_metadata = event.metadata or {}
|
|
254
|
+
metadata = create_tool_processing_metadata(original_metadata)
|
|
255
|
+
metadata = A2AConnector._merge_event_metadata(metadata, event)
|
|
256
|
+
|
|
257
|
+
content = tool_detection["status_message"]
|
|
258
|
+
event_type = tool_detection.get("event_type", A2AStreamEventType.STATUS_UPDATE)
|
|
259
|
+
event_type = A2AConnector._resolve_metadata_event_type(metadata, event_type)
|
|
260
|
+
|
|
261
|
+
# HITL: Override content with human-readable tool output when applicable
|
|
262
|
+
content = A2AConnector._apply_hitl_content_override(content, event_type, metadata)
|
|
263
|
+
|
|
264
|
+
normalized_content = content.strip() if isinstance(content, str) else content
|
|
265
|
+
if not normalized_content:
|
|
266
|
+
task_state = event.status.state.value if event.status and event.status.state else "working"
|
|
267
|
+
return A2AConnector._create_empty_payload_response(
|
|
268
|
+
event=event,
|
|
269
|
+
metadata=metadata,
|
|
270
|
+
default_event_type=event_type,
|
|
271
|
+
task_state=task_state,
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
response = {
|
|
275
|
+
"status": "success",
|
|
276
|
+
"task_state": "working",
|
|
277
|
+
"content": content,
|
|
278
|
+
"task_id": event.taskId,
|
|
279
|
+
"context_id": event.contextId,
|
|
280
|
+
"final": False,
|
|
281
|
+
"timestamp": event.status.timestamp,
|
|
282
|
+
"metadata": metadata,
|
|
283
|
+
"event_type": event_type,
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
return response
|
|
287
|
+
|
|
288
|
+
@staticmethod
|
|
289
|
+
def _apply_hitl_content_override(content: str, event_type: A2AStreamEventType, metadata: dict[str, Any]) -> str:
|
|
290
|
+
"""Apply HITL content override when HITL is active and tool results are available.
|
|
291
|
+
|
|
292
|
+
Delegates to SSEChunkTransformer.apply_hitl_content_override for shared implementation.
|
|
293
|
+
|
|
294
|
+
Args:
|
|
295
|
+
content: The original content/status message.
|
|
296
|
+
event_type: The type of event being processed.
|
|
297
|
+
metadata: The metadata dictionary containing tool_info and hitl flag.
|
|
298
|
+
|
|
299
|
+
Returns:
|
|
300
|
+
The original content or human-readable tool output if HITL is active.
|
|
301
|
+
"""
|
|
302
|
+
# Convert event_type enum to string for shared method
|
|
303
|
+
event_type_str = event_type.value if isinstance(event_type, A2AStreamEventType) else str(event_type)
|
|
304
|
+
result = SSEChunkTransformer.apply_hitl_content_override(content, event_type_str, metadata)
|
|
305
|
+
return result if result is not None else content
|
|
306
|
+
|
|
307
|
+
@staticmethod
|
|
308
|
+
def _extract_status_message_texts(event: TaskStatusUpdateEvent) -> list[str]:
|
|
309
|
+
"""Extract text content from status message.
|
|
310
|
+
|
|
311
|
+
Args:
|
|
312
|
+
event (TaskStatusUpdateEvent): The task status update event.
|
|
313
|
+
|
|
314
|
+
Returns:
|
|
315
|
+
list[str]: List of text parts from the status message.
|
|
316
|
+
"""
|
|
317
|
+
if event.status.message:
|
|
318
|
+
return get_text_parts(event.status.message.parts)
|
|
319
|
+
return []
|
|
320
|
+
|
|
321
|
+
@staticmethod
|
|
322
|
+
def _should_filter_completion_message(event: TaskStatusUpdateEvent, extracted_texts: list[str]) -> bool:
|
|
323
|
+
"""Check if this is a generic completion message that should be filtered.
|
|
324
|
+
|
|
325
|
+
Args:
|
|
326
|
+
event (TaskStatusUpdateEvent): The task status update event.
|
|
327
|
+
extracted_texts (list[str]): List of extracted text parts.
|
|
328
|
+
|
|
329
|
+
Returns:
|
|
330
|
+
bool: True if the completion message should be filtered, False otherwise.
|
|
331
|
+
"""
|
|
332
|
+
is_task_completed = event.status.state in [
|
|
333
|
+
TaskState.completed,
|
|
334
|
+
TaskState.failed,
|
|
335
|
+
TaskState.canceled,
|
|
336
|
+
]
|
|
337
|
+
|
|
338
|
+
if is_task_completed and event.final:
|
|
339
|
+
content_text = "\n".join(extracted_texts) if extracted_texts else ""
|
|
340
|
+
return not extracted_texts or content_text.strip() in [
|
|
341
|
+
"Task completed successfully.",
|
|
342
|
+
"Task completed successfully",
|
|
343
|
+
]
|
|
344
|
+
return False
|
|
345
|
+
|
|
346
|
+
@staticmethod
|
|
347
|
+
def _create_status_response(event: TaskStatusUpdateEvent, extracted_texts: list[str]) -> dict[str, Any]:
|
|
348
|
+
"""Create response for non-tool status updates.
|
|
349
|
+
|
|
350
|
+
Args:
|
|
351
|
+
event (TaskStatusUpdateEvent): The task status update event.
|
|
352
|
+
extracted_texts (list[str]): List of extracted text parts.
|
|
353
|
+
|
|
354
|
+
Returns:
|
|
355
|
+
dict[str, Any]: Response dictionary with status and content.
|
|
356
|
+
"""
|
|
357
|
+
content = "\n".join(extracted_texts)
|
|
358
|
+
custom_metadata = A2AConnector._extract_custom_metadata_from_status_message(event)
|
|
359
|
+
metadata = create_status_update_metadata(content, custom_metadata)
|
|
360
|
+
metadata = A2AConnector._merge_event_metadata(metadata, event)
|
|
361
|
+
event_type = A2AConnector._resolve_metadata_event_type(metadata, A2AStreamEventType.STATUS_UPDATE)
|
|
362
|
+
|
|
363
|
+
return {
|
|
364
|
+
"status": "success",
|
|
365
|
+
"task_state": event.status.state.value,
|
|
366
|
+
"content": content,
|
|
367
|
+
"task_id": event.taskId,
|
|
368
|
+
"context_id": event.contextId,
|
|
369
|
+
"final": False,
|
|
370
|
+
"timestamp": event.status.timestamp,
|
|
371
|
+
"metadata": metadata,
|
|
372
|
+
"event_type": event_type,
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
@staticmethod
|
|
376
|
+
def _merge_event_metadata(metadata: dict[str, Any] | None, event: TaskStatusUpdateEvent) -> dict[str, Any]:
|
|
377
|
+
"""Merge response metadata with event-level metadata and timestamps.
|
|
378
|
+
|
|
379
|
+
Args:
|
|
380
|
+
metadata (dict[str, Any] | None): The base metadata to merge.
|
|
381
|
+
event (TaskStatusUpdateEvent): The event containing additional metadata.
|
|
382
|
+
|
|
383
|
+
Returns:
|
|
384
|
+
dict[str, Any]: The merged metadata dictionary.
|
|
385
|
+
"""
|
|
386
|
+
merged: dict[str, Any] = {}
|
|
387
|
+
if isinstance(metadata, dict):
|
|
388
|
+
merged.update(metadata)
|
|
389
|
+
|
|
390
|
+
if isinstance(event.metadata, dict):
|
|
391
|
+
merged = {**event.metadata, **merged}
|
|
392
|
+
|
|
393
|
+
timestamp = getattr(event.status, "timestamp", None) if event.status else None
|
|
394
|
+
if timestamp is not None and "timestamp" not in merged:
|
|
395
|
+
merged["timestamp"] = timestamp
|
|
396
|
+
|
|
397
|
+
return merged
|
|
398
|
+
|
|
399
|
+
@staticmethod
|
|
400
|
+
def _create_empty_payload_response(
|
|
401
|
+
event: TaskStatusUpdateEvent,
|
|
402
|
+
metadata: dict[str, Any],
|
|
403
|
+
default_event_type: A2AStreamEventType,
|
|
404
|
+
task_state: str,
|
|
405
|
+
) -> dict[str, Any]:
|
|
406
|
+
"""Create a placeholder response for empty textual content.
|
|
407
|
+
|
|
408
|
+
Args:
|
|
409
|
+
event (TaskStatusUpdateEvent): The task status update event.
|
|
410
|
+
metadata (dict[str, Any]): The metadata for the response.
|
|
411
|
+
default_event_type (A2AStreamEventType): The default type of the streaming
|
|
412
|
+
event when no override is present in metadata.
|
|
413
|
+
task_state (str): The current state of the task.
|
|
414
|
+
|
|
415
|
+
Returns:
|
|
416
|
+
dict[str, Any]: Response dictionary with placeholder content.
|
|
417
|
+
"""
|
|
418
|
+
response_metadata = metadata.copy() if isinstance(metadata, dict) else {}
|
|
419
|
+
resolved_event_type = A2AConnector._resolve_metadata_event_type(response_metadata, default_event_type)
|
|
420
|
+
return {
|
|
421
|
+
"status": "success",
|
|
422
|
+
"task_state": task_state,
|
|
423
|
+
"content": None,
|
|
424
|
+
"reason": "empty_payload",
|
|
425
|
+
"task_id": event.taskId,
|
|
426
|
+
"context_id": event.contextId,
|
|
427
|
+
"final": bool(event.final),
|
|
428
|
+
"timestamp": event.status.timestamp if event.status else None,
|
|
429
|
+
"metadata": response_metadata,
|
|
430
|
+
"event_type": resolved_event_type,
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
@staticmethod
|
|
434
|
+
def _resolve_metadata_event_type(
|
|
435
|
+
metadata: dict[str, Any] | None, default: A2AStreamEventType
|
|
436
|
+
) -> A2AStreamEventType | str:
|
|
437
|
+
"""Resolve custom event type stored in metadata.
|
|
438
|
+
|
|
439
|
+
Args:
|
|
440
|
+
metadata (dict[str, Any] | None): Metadata that may contain an
|
|
441
|
+
``event_type`` or ``type`` override.
|
|
442
|
+
default (A2AStreamEventType): Default event type to use when no
|
|
443
|
+
override is present or the override is invalid.
|
|
444
|
+
|
|
445
|
+
Returns:
|
|
446
|
+
A2AStreamEventType | str: Resolved event type value.
|
|
447
|
+
"""
|
|
448
|
+
if not isinstance(metadata, dict):
|
|
449
|
+
return default
|
|
450
|
+
|
|
451
|
+
override = metadata.get("event_type") or metadata.get("type")
|
|
452
|
+
if isinstance(override, A2AStreamEventType):
|
|
453
|
+
return override
|
|
454
|
+
if isinstance(override, str):
|
|
455
|
+
try:
|
|
456
|
+
return A2AStreamEventType(override)
|
|
457
|
+
except ValueError:
|
|
458
|
+
return override
|
|
459
|
+
return default
|
|
460
|
+
|
|
461
|
+
@staticmethod
|
|
462
|
+
def _detect_tool_processing_in_status(event: TaskStatusUpdateEvent) -> dict[str, Any]:
|
|
463
|
+
"""Detect tool processing in TaskStatusUpdateEvent messages.
|
|
464
|
+
|
|
465
|
+
This aligns with base_executor._is_tool_processing_content() logic
|
|
466
|
+
but extracts additional information for A2A response formatting.
|
|
467
|
+
|
|
468
|
+
Args:
|
|
469
|
+
event: The TaskStatusUpdateEvent to analyze.
|
|
470
|
+
|
|
471
|
+
Returns:
|
|
472
|
+
Dictionary with tool processing detection results.
|
|
473
|
+
"""
|
|
474
|
+
if not event.status or not event.status.message:
|
|
475
|
+
return {"is_tool_processing": False}
|
|
476
|
+
|
|
477
|
+
message = event.status.message
|
|
478
|
+
metadata = event.metadata or {}
|
|
479
|
+
tool_info = metadata.get("tool_info", {})
|
|
480
|
+
|
|
481
|
+
if not tool_info:
|
|
482
|
+
return {"is_tool_processing": False}
|
|
483
|
+
|
|
484
|
+
# Extract message text once
|
|
485
|
+
message_text = message.parts[0].root.text if message.parts else None
|
|
486
|
+
|
|
487
|
+
# Handle tool calls (both invocation and results)
|
|
488
|
+
if "tool_calls" in tool_info:
|
|
489
|
+
kind = metadata.get(MetadataFieldKeys.KIND) if isinstance(metadata, dict) else None
|
|
490
|
+
if tool_info.get("id") is None and getattr(kind, "value", kind) == Kind.FINAL_THINKING_STEP.value:
|
|
491
|
+
logger.debug(
|
|
492
|
+
"A2AConnector: forwarding final thinking activity (tool=%s, state=%s)",
|
|
493
|
+
tool_info.get("name"),
|
|
494
|
+
metadata.get("status"),
|
|
495
|
+
)
|
|
496
|
+
return {"is_tool_processing": False}
|
|
497
|
+
return A2AConnector._handle_tool_calls(tool_info, message_text, metadata)
|
|
498
|
+
|
|
499
|
+
# Handle single tool result
|
|
500
|
+
if "output" in tool_info:
|
|
501
|
+
return A2AConnector._handle_single_tool_result(tool_info, message_text, metadata)
|
|
502
|
+
|
|
503
|
+
return {"is_tool_processing": False}
|
|
504
|
+
|
|
505
|
+
@staticmethod
|
|
506
|
+
def _handle_tool_calls(
|
|
507
|
+
tool_info: dict[str, Any], message_text: str | None, metadata: dict[str, Any]
|
|
508
|
+
) -> dict[str, Any]:
|
|
509
|
+
"""Handle processing of tool calls with multiple tools.
|
|
510
|
+
|
|
511
|
+
Args:
|
|
512
|
+
tool_info (dict[str, Any]): Information about the tool calls.
|
|
513
|
+
message_text (str | None): Optional message text to display.
|
|
514
|
+
metadata (dict[str, Any]): The metadata for the response.
|
|
515
|
+
|
|
516
|
+
Returns:
|
|
517
|
+
dict[str, Any]: Response dictionary with tool call information.
|
|
518
|
+
"""
|
|
519
|
+
tool_calls = tool_info["tool_calls"]
|
|
520
|
+
has_output_in_tool_calls = any("output" in tool_call for tool_call in tool_calls)
|
|
521
|
+
tool_names = [tc.get("name") for tc in tool_calls]
|
|
522
|
+
|
|
523
|
+
if not has_output_in_tool_calls:
|
|
524
|
+
# Handle tool invocation
|
|
525
|
+
status_message = message_text or f"Processing with tools: {', '.join(tool_names)}"
|
|
526
|
+
metadata[MetadataFieldKeys.STATUS] = Status.RUNNING
|
|
527
|
+
event_type = A2AStreamEventType.TOOL_CALL
|
|
528
|
+
else:
|
|
529
|
+
# Handle multiple tool results
|
|
530
|
+
status_message = message_text or A2AConnector._build_completion_message(tool_calls, tool_names)
|
|
531
|
+
metadata[MetadataFieldKeys.STATUS] = Status.FINISHED
|
|
532
|
+
event_type = A2AStreamEventType.TOOL_RESULT
|
|
533
|
+
|
|
534
|
+
return A2AConnector._create_tool_processing_result(tool_names, status_message, metadata, event_type)
|
|
535
|
+
|
|
536
|
+
@staticmethod
|
|
537
|
+
def _handle_single_tool_result(
|
|
538
|
+
tool_info: dict[str, Any], message_text: str | None, metadata: dict[str, Any]
|
|
539
|
+
) -> dict[str, Any]:
|
|
540
|
+
"""Handle processing of single tool result.
|
|
541
|
+
|
|
542
|
+
Args:
|
|
543
|
+
tool_info (dict[str, Any]): Information about the single tool result.
|
|
544
|
+
message_text (str | None): Optional message text to display.
|
|
545
|
+
metadata (dict[str, Any]): The metadata for the response.
|
|
546
|
+
|
|
547
|
+
Returns:
|
|
548
|
+
dict[str, Any]: Response dictionary with tool result information.
|
|
549
|
+
"""
|
|
550
|
+
tool_name = tool_info["name"]
|
|
551
|
+
|
|
552
|
+
tool_calls = tool_info.get("tool_calls")
|
|
553
|
+
if tool_info.get("id") is None and isinstance(tool_calls, list) and tool_calls:
|
|
554
|
+
logger.info(
|
|
555
|
+
"A2AConnector: skipping streaming tool duplicate result (tool=%s, state=%s)",
|
|
556
|
+
tool_name,
|
|
557
|
+
metadata.get("status"),
|
|
558
|
+
)
|
|
559
|
+
return {"is_tool_processing": False, "skip_status_update": True}
|
|
560
|
+
|
|
561
|
+
if message_text:
|
|
562
|
+
status_message = message_text
|
|
563
|
+
elif isinstance(metadata.get("hitl"), dict):
|
|
564
|
+
status_message = A2AConnector._format_tool_output(tool_info.get("output"), tool_name)
|
|
565
|
+
else:
|
|
566
|
+
output = tool_info.get("output")
|
|
567
|
+
status_message = A2AConnector._format_tool_output(output, tool_name)
|
|
568
|
+
|
|
569
|
+
if A2AConnector._is_generic_tool_completion(status_message, tool_info.get("output"), tool_name):
|
|
570
|
+
return {"is_tool_processing": False, "skip_status_update": True}
|
|
571
|
+
|
|
572
|
+
metadata[MetadataFieldKeys.STATUS] = Status.FINISHED
|
|
573
|
+
return A2AConnector._create_tool_processing_result(
|
|
574
|
+
[tool_name], status_message, metadata, A2AStreamEventType.TOOL_RESULT
|
|
575
|
+
)
|
|
576
|
+
|
|
577
|
+
@staticmethod
|
|
578
|
+
def _build_completion_message(tool_calls: list[dict[str, Any]], tool_names: list[str]) -> str:
|
|
579
|
+
"""Build completion message for tool calls.
|
|
580
|
+
|
|
581
|
+
Args:
|
|
582
|
+
tool_calls (list[dict[str, Any]]): List of tool call dictionaries.
|
|
583
|
+
tool_names (list[str]): List of tool names.
|
|
584
|
+
|
|
585
|
+
Returns:
|
|
586
|
+
str: The completion message.
|
|
587
|
+
"""
|
|
588
|
+
outputs = SSEChunkTransformer.extract_tool_outputs(tool_calls)
|
|
589
|
+
return "\n".join(outputs) if outputs else f"Completed {', '.join(tool_names)}"
|
|
590
|
+
|
|
591
|
+
@staticmethod
|
|
592
|
+
def _extract_tool_outputs(tool_calls: list[dict[str, Any]]) -> list[str]:
|
|
593
|
+
"""Extract human-readable output strings from tool calls.
|
|
594
|
+
|
|
595
|
+
Delegates to SSEChunkTransformer.extract_tool_outputs for shared implementation.
|
|
596
|
+
|
|
597
|
+
Args:
|
|
598
|
+
tool_calls (list[dict[str, Any]]): List of tool call dictionaries.
|
|
599
|
+
|
|
600
|
+
Returns:
|
|
601
|
+
list[str]: List of human-readable output strings.
|
|
602
|
+
"""
|
|
603
|
+
return SSEChunkTransformer.extract_tool_outputs(tool_calls)
|
|
604
|
+
|
|
605
|
+
@staticmethod
|
|
606
|
+
def _format_tool_output(output: Any, tool_name: str) -> str:
|
|
607
|
+
"""Format a single tool output for display.
|
|
608
|
+
|
|
609
|
+
Delegates to SSEChunkTransformer.format_tool_output for shared implementation.
|
|
610
|
+
|
|
611
|
+
Args:
|
|
612
|
+
output (Any): The tool output to format.
|
|
613
|
+
tool_name (str): The name of the tool.
|
|
614
|
+
|
|
615
|
+
Returns:
|
|
616
|
+
str: The formatted output string.
|
|
617
|
+
"""
|
|
618
|
+
return SSEChunkTransformer.format_tool_output(output, tool_name)
|
|
619
|
+
|
|
620
|
+
@staticmethod
|
|
621
|
+
def _create_tool_processing_result(
|
|
622
|
+
tool_names: list[str], status_message: str, metadata: dict[str, Any], event_type: A2AStreamEventType
|
|
623
|
+
) -> dict[str, Any]:
|
|
624
|
+
"""Create a standardized tool processing result dictionary.
|
|
625
|
+
|
|
626
|
+
Args:
|
|
627
|
+
tool_names: List of tool names to process.
|
|
628
|
+
status_message: Status message to display.
|
|
629
|
+
metadata: Metadata to include in the response.
|
|
630
|
+
event_type: Type of A2A stream event for the response.
|
|
631
|
+
|
|
632
|
+
Returns:
|
|
633
|
+
Dictionary containing tool processing result information.
|
|
634
|
+
"""
|
|
635
|
+
return {
|
|
636
|
+
"is_tool_processing": True,
|
|
637
|
+
"tool_names": tool_names,
|
|
638
|
+
"status_message": status_message,
|
|
639
|
+
"original_metadata": metadata,
|
|
640
|
+
"event_type": event_type,
|
|
641
|
+
}
|
|
642
|
+
|
|
643
|
+
@staticmethod
|
|
644
|
+
def _is_generic_tool_completion(
|
|
645
|
+
status_message: str | None,
|
|
646
|
+
tool_output: Any,
|
|
647
|
+
tool_name: str | None,
|
|
648
|
+
) -> bool:
|
|
649
|
+
"""Return True when the message/output represents a generic completion placeholder.
|
|
650
|
+
|
|
651
|
+
Args:
|
|
652
|
+
status_message: The status message extracted from the event.
|
|
653
|
+
tool_output: The tool output extracted from the metadata.
|
|
654
|
+
tool_name: The tool name associated with the event.
|
|
655
|
+
|
|
656
|
+
Returns:
|
|
657
|
+
bool: True if the content matches known generic completion phrases.
|
|
658
|
+
"""
|
|
659
|
+
normalized_candidates: list[str] = []
|
|
660
|
+
for candidate in (status_message, tool_output):
|
|
661
|
+
if isinstance(candidate, str):
|
|
662
|
+
normalized_candidates.append(candidate.strip())
|
|
663
|
+
if not normalized_candidates:
|
|
664
|
+
return False
|
|
665
|
+
|
|
666
|
+
default_messages = {
|
|
667
|
+
"Task completed successfully.",
|
|
668
|
+
"Task completed successfully",
|
|
669
|
+
}
|
|
670
|
+
if isinstance(tool_name, str) and tool_name:
|
|
671
|
+
default_messages.add(f"Tool '{tool_name}' completed successfully")
|
|
672
|
+
default_messages.add(f'Tool "{tool_name}" completed successfully')
|
|
673
|
+
|
|
674
|
+
return any(message in default_messages for message in normalized_candidates)
|
|
675
|
+
|
|
676
|
+
@staticmethod
|
|
677
|
+
def _extract_custom_metadata_from_status_message(
|
|
678
|
+
event: TaskStatusUpdateEvent,
|
|
679
|
+
) -> dict[str, Any] | None:
|
|
680
|
+
"""Extract custom metadata from a TaskStatusUpdateEvent.
|
|
681
|
+
|
|
682
|
+
Args:
|
|
683
|
+
event: Task status update event potentially containing event metadata.
|
|
684
|
+
|
|
685
|
+
Returns:
|
|
686
|
+
A dictionary of metadata if present and extractable; otherwise None.
|
|
687
|
+
"""
|
|
688
|
+
try:
|
|
689
|
+
metadata = event.metadata # type: ignore[union-attr]
|
|
690
|
+
except AttributeError:
|
|
691
|
+
return None
|
|
692
|
+
return metadata if isinstance(metadata, dict) else None
|
|
693
|
+
|
|
694
|
+
@staticmethod
|
|
695
|
+
def _handle_artifact_update_event(
|
|
696
|
+
event: TaskArtifactUpdateEvent,
|
|
697
|
+
artifact_tracker: dict[str, Any],
|
|
698
|
+
) -> dict[str, Any] | None:
|
|
699
|
+
"""Process an artifact update, add it to the collected list, and immediately yield it.
|
|
700
|
+
|
|
701
|
+
This method now properly detects final responses based on lastChunk=True flag,
|
|
702
|
+
aligning with the new base_executor.py implementation.
|
|
703
|
+
|
|
704
|
+
Args:
|
|
705
|
+
event: The TaskArtifactUpdateEvent from A2A protocol.
|
|
706
|
+
artifact_tracker: Artifact tracking collections.
|
|
707
|
+
|
|
708
|
+
Returns:
|
|
709
|
+
Response dictionary to be yielded immediately, or None if duplicate.
|
|
710
|
+
"""
|
|
711
|
+
artifact_dict = A2AConnector._handle_artifact_update(event)
|
|
712
|
+
if not artifact_dict:
|
|
713
|
+
return None
|
|
714
|
+
|
|
715
|
+
is_final_response = event.lastChunk is True
|
|
716
|
+
is_token_streaming = event.append is not None and not is_final_response
|
|
717
|
+
|
|
718
|
+
# Handle artifact collection and deduplication
|
|
719
|
+
# Skip deduplication for streaming chunks to avoid filtering out repeated content
|
|
720
|
+
if not is_token_streaming:
|
|
721
|
+
if not A2AConnector._process_artifact_collection(artifact_dict, artifact_tracker, is_final_response):
|
|
722
|
+
return None
|
|
723
|
+
|
|
724
|
+
# Determine response content and artifacts based on artifact type
|
|
725
|
+
content, task_state, response_artifacts = A2AConnector._determine_artifact_response_content(
|
|
726
|
+
artifact_dict, artifact_tracker, is_final_response
|
|
727
|
+
)
|
|
728
|
+
|
|
729
|
+
return A2AConnector._create_artifact_response(event, content, task_state, response_artifacts, is_final_response)
|
|
730
|
+
|
|
731
|
+
@staticmethod
|
|
732
|
+
def _process_artifact_collection(
|
|
733
|
+
artifact_dict: dict[str, Any], artifact_tracker: dict[str, Any], is_final_response: bool
|
|
734
|
+
) -> bool:
|
|
735
|
+
"""Process artifact collection and deduplication. Returns False if should skip response.
|
|
736
|
+
|
|
737
|
+
Args:
|
|
738
|
+
artifact_dict (dict[str, Any]): The artifact dictionary to process.
|
|
739
|
+
artifact_tracker (dict[str, Any]): The tracker for artifacts.
|
|
740
|
+
is_final_response (bool): Whether this is the final response.
|
|
741
|
+
|
|
742
|
+
Returns:
|
|
743
|
+
bool: False if should skip response, True otherwise.
|
|
744
|
+
"""
|
|
745
|
+
artifact_name = artifact_dict.get("artifact_name", "")
|
|
746
|
+
|
|
747
|
+
if artifact_name != "final_response":
|
|
748
|
+
artifact_hash = A2AConnector._create_artifact_hash(artifact_dict)
|
|
749
|
+
if artifact_hash not in artifact_tracker["seen_artifact_hashes"]:
|
|
750
|
+
artifact_tracker["seen_artifact_hashes"].add(artifact_hash)
|
|
751
|
+
|
|
752
|
+
artifact_info = A2AConnector._create_artifact_info(artifact_dict, artifact_name)
|
|
753
|
+
artifact_tracker["collected_artifacts"].append(artifact_info.model_dump())
|
|
754
|
+
else:
|
|
755
|
+
logger.debug(f"Skipping duplicate artifact: {artifact_name}")
|
|
756
|
+
if not is_final_response:
|
|
757
|
+
return False
|
|
758
|
+
return True
|
|
759
|
+
|
|
760
|
+
@staticmethod
|
|
761
|
+
def _create_artifact_info(artifact_dict: dict[str, Any], artifact_name: str) -> ArtifactInfo:
|
|
762
|
+
"""Create structured artifact info using Pydantic model.
|
|
763
|
+
|
|
764
|
+
Args:
|
|
765
|
+
artifact_dict (dict[str, Any]): The artifact dictionary to convert.
|
|
766
|
+
artifact_name (str): The name of the artifact.
|
|
767
|
+
|
|
768
|
+
Returns:
|
|
769
|
+
ArtifactInfo: The structured artifact information.
|
|
770
|
+
"""
|
|
771
|
+
return ArtifactInfo(
|
|
772
|
+
artifact_id=artifact_dict.get("artifact_id"),
|
|
773
|
+
name=artifact_name,
|
|
774
|
+
content_type=artifact_dict.get("content_type"),
|
|
775
|
+
mime_type=artifact_dict.get("mime_type"),
|
|
776
|
+
file_name=artifact_dict.get("file_name"),
|
|
777
|
+
has_file_data="file_data" in artifact_dict,
|
|
778
|
+
has_file_uri="file_uri" in artifact_dict,
|
|
779
|
+
file_data=artifact_dict.get("file_data"),
|
|
780
|
+
file_uri=artifact_dict.get("file_uri"),
|
|
781
|
+
description=artifact_dict.get("description"),
|
|
782
|
+
)
|
|
783
|
+
|
|
784
|
+
@staticmethod
|
|
785
|
+
def _determine_artifact_response_content(
|
|
786
|
+
artifact_dict: dict[str, Any], artifact_tracker: dict[str, Any], is_final_response: bool
|
|
787
|
+
) -> tuple[str, str, list[dict[str, Any]]]:
|
|
788
|
+
"""Determine response content, task state, and artifacts based on artifact type.
|
|
789
|
+
|
|
790
|
+
Args:
|
|
791
|
+
artifact_dict (dict[str, Any]): The artifact dictionary to process.
|
|
792
|
+
artifact_tracker (dict[str, Any]): The tracker for artifacts.
|
|
793
|
+
is_final_response (bool): Whether this is the final response.
|
|
794
|
+
|
|
795
|
+
Returns:
|
|
796
|
+
tuple[str, str, list[dict[str, Any]]]: Tuple of (content, task_state, response_artifacts).
|
|
797
|
+
"""
|
|
798
|
+
artifact_name = artifact_dict.get("artifact_name", "")
|
|
799
|
+
|
|
800
|
+
if is_final_response and artifact_name == "final_response":
|
|
801
|
+
content = artifact_dict.get("content", f"Final response: {artifact_name}")
|
|
802
|
+
task_state = "completed"
|
|
803
|
+
response_artifacts = artifact_tracker["collected_artifacts"].copy()
|
|
804
|
+
else:
|
|
805
|
+
content = artifact_dict.get("content", f"Artifact received: {artifact_name}")
|
|
806
|
+
task_state = "working"
|
|
807
|
+
response_artifacts = []
|
|
808
|
+
if artifact_name != "final_response":
|
|
809
|
+
for artifact in artifact_tracker["collected_artifacts"]:
|
|
810
|
+
if artifact.get("name") == artifact_name:
|
|
811
|
+
response_artifacts = [artifact]
|
|
812
|
+
break
|
|
813
|
+
|
|
814
|
+
return content, task_state, response_artifacts
|
|
815
|
+
|
|
816
|
+
@staticmethod
|
|
817
|
+
def _create_artifact_response(
|
|
818
|
+
event: TaskArtifactUpdateEvent,
|
|
819
|
+
content: str,
|
|
820
|
+
task_state: str,
|
|
821
|
+
response_artifacts: list[dict[str, Any]],
|
|
822
|
+
is_final_response: bool,
|
|
823
|
+
) -> dict[str, Any]:
|
|
824
|
+
"""Create the final artifact response dictionary.
|
|
825
|
+
|
|
826
|
+
Args:
|
|
827
|
+
event (TaskArtifactUpdateEvent): The artifact update event.
|
|
828
|
+
content (str): The content to include in the response.
|
|
829
|
+
task_state (str): The current state of the task.
|
|
830
|
+
response_artifacts (list[dict[str, Any]]): List of artifacts to include.
|
|
831
|
+
is_final_response (bool): Whether this is the final response.
|
|
832
|
+
|
|
833
|
+
Returns:
|
|
834
|
+
dict[str, Any]: The complete artifact response dictionary.
|
|
835
|
+
"""
|
|
836
|
+
# Extract metadata from final response artifacts if available
|
|
837
|
+
metadata = A2AConnector._extract_artifact_metadata(event, content, is_final_response)
|
|
838
|
+
|
|
839
|
+
if is_final_response:
|
|
840
|
+
metadata_options = FinalResponseMetadataOptions(metadata_extra=metadata)
|
|
841
|
+
return assemble_final_response(
|
|
842
|
+
content=content,
|
|
843
|
+
artifacts=response_artifacts or None,
|
|
844
|
+
metadata_options=metadata_options,
|
|
845
|
+
task_state=task_state,
|
|
846
|
+
extra_fields={
|
|
847
|
+
"task_id": event.taskId,
|
|
848
|
+
"context_id": event.contextId,
|
|
849
|
+
"event_type": A2AStreamEventType.FINAL_RESPONSE,
|
|
850
|
+
},
|
|
851
|
+
)
|
|
852
|
+
|
|
853
|
+
response = {
|
|
854
|
+
"status": "success",
|
|
855
|
+
"task_state": task_state,
|
|
856
|
+
"content": content,
|
|
857
|
+
"final": False,
|
|
858
|
+
"task_id": event.taskId,
|
|
859
|
+
"context_id": event.contextId,
|
|
860
|
+
"metadata": metadata,
|
|
861
|
+
"event_type": A2AStreamEventType.TOOL_RESULT,
|
|
862
|
+
}
|
|
863
|
+
|
|
864
|
+
if response_artifacts:
|
|
865
|
+
response["artifacts"] = response_artifacts
|
|
866
|
+
|
|
867
|
+
return response
|
|
868
|
+
|
|
869
|
+
@staticmethod
|
|
870
|
+
def _extract_artifact_metadata(
|
|
871
|
+
event: TaskArtifactUpdateEvent, content: str, is_final_response: bool
|
|
872
|
+
) -> dict[str, Any]:
|
|
873
|
+
"""Extract comprehensive metadata from artifact events.
|
|
874
|
+
|
|
875
|
+
For final responses, merges event metadata with standard metadata.
|
|
876
|
+
For non-final responses, creates standard metadata.
|
|
877
|
+
|
|
878
|
+
Args:
|
|
879
|
+
event: The TaskArtifactUpdateEvent from A2A protocol.
|
|
880
|
+
content: The response content.
|
|
881
|
+
is_final_response: Whether this is a final response.
|
|
882
|
+
|
|
883
|
+
Returns:
|
|
884
|
+
A dictionary containing merged metadata.
|
|
885
|
+
"""
|
|
886
|
+
# Normalize event metadata keys (may contain enum keys from server)
|
|
887
|
+
event_md = event.metadata or {}
|
|
888
|
+
normalized_md: dict[str, Any] = {}
|
|
889
|
+
if isinstance(event_md, dict):
|
|
890
|
+
for k, v in event_md.items():
|
|
891
|
+
try:
|
|
892
|
+
if isinstance(k, MetadataFieldKeys):
|
|
893
|
+
normalized_md[k] = v
|
|
894
|
+
else:
|
|
895
|
+
normalized_md[str(k)] = v
|
|
896
|
+
except Exception:
|
|
897
|
+
normalized_md[str(k)] = v
|
|
898
|
+
|
|
899
|
+
merged_metadata = create_metadata(
|
|
900
|
+
content=content,
|
|
901
|
+
is_final=is_final_response,
|
|
902
|
+
status=Status.FINISHED if is_final_response else Status.RUNNING,
|
|
903
|
+
existing_metadata=normalized_md,
|
|
904
|
+
)
|
|
905
|
+
|
|
906
|
+
return merged_metadata
|
|
907
|
+
|
|
908
|
+
@staticmethod
|
|
909
|
+
def _process_task_object(
|
|
910
|
+
res_data: Task,
|
|
911
|
+
collected_artifacts: list[dict[str, Any]],
|
|
912
|
+
) -> dict[str, Any] | None:
|
|
913
|
+
"""Processes a Task object and returns a status update response.
|
|
914
|
+
|
|
915
|
+
This method uses the ArtifactInfo Pydantic model to ensure consistent
|
|
916
|
+
artifact structure when converting A2A artifacts to our internal format.
|
|
917
|
+
|
|
918
|
+
Args:
|
|
919
|
+
res_data: The Task object from A2A protocol.
|
|
920
|
+
collected_artifacts: List of artifacts collected during streaming.
|
|
921
|
+
|
|
922
|
+
Returns:
|
|
923
|
+
Response dictionary if there's content to return, None otherwise.
|
|
924
|
+
"""
|
|
925
|
+
texts = A2AConnector._extract_text_content(res_data)
|
|
926
|
+
if not texts:
|
|
927
|
+
return None
|
|
928
|
+
|
|
929
|
+
# Convert and merge artifacts
|
|
930
|
+
all_artifacts = A2AConnector._convert_and_merge_task_artifacts(res_data, collected_artifacts)
|
|
931
|
+
|
|
932
|
+
# Determine task completion status
|
|
933
|
+
content = "\n".join(texts)
|
|
934
|
+
is_final = A2AConnector._is_task_final_state(res_data.status.state)
|
|
935
|
+
|
|
936
|
+
return A2AConnector._create_task_response(res_data, content, all_artifacts, is_final)
|
|
937
|
+
|
|
938
|
+
@staticmethod
|
|
939
|
+
def _convert_and_merge_task_artifacts(
|
|
940
|
+
res_data: Task, collected_artifacts: list[dict[str, Any]]
|
|
941
|
+
) -> list[dict[str, Any]]:
|
|
942
|
+
"""Convert A2A task artifacts to our format and merge with collected artifacts.
|
|
943
|
+
|
|
944
|
+
Args:
|
|
945
|
+
res_data: The Task object from A2A protocol.
|
|
946
|
+
collected_artifacts: List of artifacts collected during streaming.
|
|
947
|
+
|
|
948
|
+
Returns:
|
|
949
|
+
List of all artifacts.
|
|
950
|
+
"""
|
|
951
|
+
task_artifacts = []
|
|
952
|
+
if res_data.artifacts:
|
|
953
|
+
for artifact in res_data.artifacts:
|
|
954
|
+
artifact_info = ArtifactInfo(
|
|
955
|
+
artifact_id=artifact.artifactId,
|
|
956
|
+
name=artifact.name,
|
|
957
|
+
description=artifact.description,
|
|
958
|
+
parts=len(artifact.parts) if artifact.parts else 0,
|
|
959
|
+
)
|
|
960
|
+
task_artifacts.append(artifact_info.model_dump())
|
|
961
|
+
|
|
962
|
+
# Combine with collected artifacts (avoid duplicates)
|
|
963
|
+
all_artifacts = collected_artifacts.copy()
|
|
964
|
+
for task_artifact in task_artifacts:
|
|
965
|
+
artifact_id = task_artifact.get("artifact_id")
|
|
966
|
+
if not any(ca.get("artifact_id") == artifact_id for ca in all_artifacts):
|
|
967
|
+
all_artifacts.append(task_artifact)
|
|
968
|
+
|
|
969
|
+
return all_artifacts
|
|
970
|
+
|
|
971
|
+
@staticmethod
|
|
972
|
+
def _is_task_final_state(state: TaskState) -> bool:
|
|
973
|
+
"""Check if task state represents a final state.
|
|
974
|
+
|
|
975
|
+
Args:
|
|
976
|
+
state: The TaskState object from A2A protocol.
|
|
977
|
+
|
|
978
|
+
Returns:
|
|
979
|
+
True if the task state represents a final state, False otherwise.
|
|
980
|
+
"""
|
|
981
|
+
return state in [TaskState.completed, TaskState.failed, TaskState.canceled]
|
|
982
|
+
|
|
983
|
+
@staticmethod
|
|
984
|
+
def _create_task_response(
|
|
985
|
+
res_data: Task, content: str, all_artifacts: list[dict[str, Any]], is_final: bool
|
|
986
|
+
) -> dict[str, Any]:
|
|
987
|
+
"""Create the final task response dictionary.
|
|
988
|
+
|
|
989
|
+
Args:
|
|
990
|
+
res_data: The Task object from A2A protocol.
|
|
991
|
+
content: The response content.
|
|
992
|
+
all_artifacts: List of all artifacts.
|
|
993
|
+
is_final: Whether this is a final response.
|
|
994
|
+
"""
|
|
995
|
+
metadata = create_metadata(content=content, is_final=is_final)
|
|
996
|
+
timestamp = datetime.now(UTC).isoformat()
|
|
997
|
+
|
|
998
|
+
if is_final:
|
|
999
|
+
metadata_options = FinalResponseMetadataOptions(metadata_extra=metadata)
|
|
1000
|
+
return assemble_final_response(
|
|
1001
|
+
content=content,
|
|
1002
|
+
artifacts=all_artifacts or None,
|
|
1003
|
+
metadata_options=metadata_options,
|
|
1004
|
+
task_state=res_data.status.state.value,
|
|
1005
|
+
extra_fields={
|
|
1006
|
+
"task_id": res_data.id,
|
|
1007
|
+
"context_id": res_data.contextId,
|
|
1008
|
+
"event_type": A2AStreamEventType.FINAL_RESPONSE,
|
|
1009
|
+
"timestamp": timestamp,
|
|
1010
|
+
},
|
|
1011
|
+
)
|
|
1012
|
+
|
|
1013
|
+
response = {
|
|
1014
|
+
"status": "success",
|
|
1015
|
+
"task_state": res_data.status.state.value,
|
|
1016
|
+
"content": content,
|
|
1017
|
+
"task_id": res_data.id,
|
|
1018
|
+
"context_id": res_data.contextId,
|
|
1019
|
+
"final": False,
|
|
1020
|
+
"timestamp": timestamp,
|
|
1021
|
+
"metadata": metadata,
|
|
1022
|
+
"event_type": A2AStreamEventType.CONTENT_CHUNK,
|
|
1023
|
+
}
|
|
1024
|
+
|
|
1025
|
+
if all_artifacts:
|
|
1026
|
+
response["artifacts"] = all_artifacts
|
|
1027
|
+
|
|
1028
|
+
return response
|
|
1029
|
+
|
|
1030
|
+
@staticmethod
|
|
1031
|
+
def _process_message_object(res_data: Message) -> dict[str, Any] | None:
|
|
1032
|
+
"""Processes a Message object and returns a status update response.
|
|
1033
|
+
|
|
1034
|
+
Args:
|
|
1035
|
+
res_data: The Message object from A2A protocol.
|
|
1036
|
+
|
|
1037
|
+
Returns:
|
|
1038
|
+
Response dictionary if there's content to return, None otherwise.
|
|
1039
|
+
"""
|
|
1040
|
+
if not res_data.parts:
|
|
1041
|
+
return None
|
|
1042
|
+
|
|
1043
|
+
texts = get_text_parts(res_data.parts)
|
|
1044
|
+
if not texts:
|
|
1045
|
+
return None
|
|
1046
|
+
|
|
1047
|
+
content = "\n".join(texts)
|
|
1048
|
+
return {
|
|
1049
|
+
"status": "success",
|
|
1050
|
+
"task_state": "working",
|
|
1051
|
+
"content": content,
|
|
1052
|
+
"task_id": res_data.taskId,
|
|
1053
|
+
"context_id": res_data.contextId,
|
|
1054
|
+
"final": False,
|
|
1055
|
+
"timestamp": datetime.now(UTC).isoformat(),
|
|
1056
|
+
"metadata": create_metadata(content=content, is_final=False),
|
|
1057
|
+
"event_type": A2AStreamEventType.CONTENT_CHUNK,
|
|
1058
|
+
}
|
|
1059
|
+
|
|
1060
|
+
@staticmethod
|
|
1061
|
+
def _handle_artifact_update(
|
|
1062
|
+
event: TaskArtifactUpdateEvent,
|
|
1063
|
+
) -> dict[str, Any] | None:
|
|
1064
|
+
"""Handles task artifact update events from A2A protocol.
|
|
1065
|
+
|
|
1066
|
+
Args:
|
|
1067
|
+
event: The TaskArtifactUpdateEvent from A2A protocol.
|
|
1068
|
+
|
|
1069
|
+
Returns:
|
|
1070
|
+
Response dictionary if there's content to return, None otherwise.
|
|
1071
|
+
"""
|
|
1072
|
+
if not event.artifact or not event.artifact.parts:
|
|
1073
|
+
return None
|
|
1074
|
+
|
|
1075
|
+
# Create base artifact response structure
|
|
1076
|
+
artifact_response = A2AConnector._create_base_artifact_response(event)
|
|
1077
|
+
|
|
1078
|
+
# Try to extract text content first
|
|
1079
|
+
texts = get_text_parts(event.artifact.parts)
|
|
1080
|
+
if texts:
|
|
1081
|
+
return A2AConnector._create_text_artifact_response(artifact_response, texts)
|
|
1082
|
+
|
|
1083
|
+
# Handle file artifacts
|
|
1084
|
+
return A2AConnector._create_file_artifact_response(event, artifact_response)
|
|
1085
|
+
|
|
1086
|
+
@staticmethod
|
|
1087
|
+
def _create_base_artifact_response(event: TaskArtifactUpdateEvent) -> dict[str, Any]:
|
|
1088
|
+
"""Create base artifact response structure.
|
|
1089
|
+
|
|
1090
|
+
Args:
|
|
1091
|
+
event: The TaskArtifactUpdateEvent from A2A protocol.
|
|
1092
|
+
|
|
1093
|
+
Returns:
|
|
1094
|
+
A dictionary containing the base artifact response structure.
|
|
1095
|
+
"""
|
|
1096
|
+
return {
|
|
1097
|
+
"type": "artifact",
|
|
1098
|
+
"status": "success",
|
|
1099
|
+
"task_id": event.taskId,
|
|
1100
|
+
"context_id": event.contextId,
|
|
1101
|
+
"artifact_id": event.artifact.artifactId,
|
|
1102
|
+
"artifact_name": event.artifact.name,
|
|
1103
|
+
"append": event.append,
|
|
1104
|
+
"last_chunk": event.lastChunk,
|
|
1105
|
+
"timestamp": datetime.now(UTC).isoformat(),
|
|
1106
|
+
}
|
|
1107
|
+
|
|
1108
|
+
@staticmethod
|
|
1109
|
+
def _create_text_artifact_response(artifact_response: dict[str, Any], texts: list[str]) -> dict[str, Any]:
|
|
1110
|
+
"""Create response for text artifacts.
|
|
1111
|
+
|
|
1112
|
+
Args:
|
|
1113
|
+
artifact_response: The artifact response dictionary.
|
|
1114
|
+
texts: List of text parts.
|
|
1115
|
+
"""
|
|
1116
|
+
artifact_response.update(
|
|
1117
|
+
{
|
|
1118
|
+
"content_type": "text",
|
|
1119
|
+
"content": "\n".join(texts),
|
|
1120
|
+
}
|
|
1121
|
+
)
|
|
1122
|
+
return artifact_response
|
|
1123
|
+
|
|
1124
|
+
@staticmethod
|
|
1125
|
+
def _create_file_artifact_response(
|
|
1126
|
+
event: TaskArtifactUpdateEvent, artifact_response: dict[str, Any]
|
|
1127
|
+
) -> dict[str, Any] | None:
|
|
1128
|
+
"""Create response for file artifacts.
|
|
1129
|
+
|
|
1130
|
+
Args:
|
|
1131
|
+
event: The TaskArtifactUpdateEvent from A2A protocol.
|
|
1132
|
+
artifact_response: The artifact response dictionary.
|
|
1133
|
+
"""
|
|
1134
|
+
for part in event.artifact.parts:
|
|
1135
|
+
if hasattr(part, "root") and hasattr(part.root, "file"):
|
|
1136
|
+
file_info = part.root.file
|
|
1137
|
+
|
|
1138
|
+
# Update with file metadata
|
|
1139
|
+
artifact_response.update(
|
|
1140
|
+
{
|
|
1141
|
+
"content_type": "file",
|
|
1142
|
+
"mime_type": (
|
|
1143
|
+
file_info.mimeType if hasattr(file_info, "mimeType") else "application/octet-stream"
|
|
1144
|
+
),
|
|
1145
|
+
"file_name": (file_info.name if hasattr(file_info, "name") else event.artifact.name),
|
|
1146
|
+
}
|
|
1147
|
+
)
|
|
1148
|
+
|
|
1149
|
+
# Extract file content and create description
|
|
1150
|
+
A2AConnector._extract_file_content(file_info, artifact_response)
|
|
1151
|
+
return artifact_response
|
|
1152
|
+
|
|
1153
|
+
return None
|
|
1154
|
+
|
|
1155
|
+
@staticmethod
|
|
1156
|
+
def _extract_file_content(file_info: Any, artifact_response: dict[str, Any]) -> None:
|
|
1157
|
+
"""Extract file content (bytes or URI) and create content description.
|
|
1158
|
+
|
|
1159
|
+
Args:
|
|
1160
|
+
file_info: The file info object from A2A protocol.
|
|
1161
|
+
artifact_response: The artifact response dictionary.
|
|
1162
|
+
"""
|
|
1163
|
+
file_name = artifact_response["file_name"]
|
|
1164
|
+
|
|
1165
|
+
if hasattr(file_info, "bytes") and file_info.bytes:
|
|
1166
|
+
artifact_response["file_data"] = file_info.bytes
|
|
1167
|
+
artifact_response["content"] = f"File artifact: {file_name} ({len(file_info.bytes)} bytes base64 data)"
|
|
1168
|
+
elif hasattr(file_info, "uri") and file_info.uri:
|
|
1169
|
+
artifact_response["file_uri"] = file_info.uri
|
|
1170
|
+
artifact_response["content"] = f"File artifact: {file_name} (URI: {file_info.uri})"
|
|
1171
|
+
else:
|
|
1172
|
+
artifact_response["content"] = f"File artifact: {file_name} (no content available)"
|
|
1173
|
+
|
|
1174
|
+
@staticmethod
|
|
1175
|
+
def send_to_agent(
|
|
1176
|
+
agent_card: AgentCard,
|
|
1177
|
+
message: str | dict[str, Any],
|
|
1178
|
+
**kwargs: Any,
|
|
1179
|
+
) -> dict[str, Any]:
|
|
1180
|
+
"""Synchronously sends a message to another agent using the A2A protocol.
|
|
1181
|
+
|
|
1182
|
+
This method is a synchronous wrapper around asend_to_agent. It handles the creation
|
|
1183
|
+
of an event loop if one doesn't exist, and manages the asynchronous call internally.
|
|
1184
|
+
|
|
1185
|
+
Args:
|
|
1186
|
+
agent_card: The AgentCard instance containing the target agent's details including
|
|
1187
|
+
URL, authentication requirements, and capabilities.
|
|
1188
|
+
message: The message to send to the agent. Can be either a string for simple text
|
|
1189
|
+
messages or a dictionary for structured data.
|
|
1190
|
+
**kwargs: Additional keyword arguments passed to asend_to_agent.
|
|
1191
|
+
|
|
1192
|
+
Returns:
|
|
1193
|
+
A dictionary containing the response details:
|
|
1194
|
+
- status (str): 'success' or 'error'
|
|
1195
|
+
- content (str): Extracted text content from the response
|
|
1196
|
+
- task_id (str, optional): ID of the created/updated task
|
|
1197
|
+
- task_state (str, optional): Current state of the task
|
|
1198
|
+
- raw_response (str): Complete JSON response from the A2A client
|
|
1199
|
+
- error_type (str, optional): Type of error if status is 'error'
|
|
1200
|
+
- message (str, optional): Error message if status is 'error'
|
|
1201
|
+
|
|
1202
|
+
Raises:
|
|
1203
|
+
RuntimeError: If asend_to_agent encounters an unhandled exception.
|
|
1204
|
+
"""
|
|
1205
|
+
try:
|
|
1206
|
+
# Check if there's already a running event loop
|
|
1207
|
+
loop = asyncio.get_running_loop()
|
|
1208
|
+
logger.info(f"Running loop: {loop}")
|
|
1209
|
+
except RuntimeError:
|
|
1210
|
+
logger.info("No running loop, safe to use asyncio.run()")
|
|
1211
|
+
return asyncio.run(A2AConnector.asend_to_agent(agent_card, message, **kwargs))
|
|
1212
|
+
else:
|
|
1213
|
+
logger.info("There's a running loop, need to handle differently")
|
|
1214
|
+
|
|
1215
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
1216
|
+
future = executor.submit(
|
|
1217
|
+
asyncio.run,
|
|
1218
|
+
A2AConnector.asend_to_agent(agent_card, message, **kwargs),
|
|
1219
|
+
)
|
|
1220
|
+
return future.result()
|
|
1221
|
+
|
|
1222
|
+
@staticmethod
|
|
1223
|
+
async def asend_to_agent(
|
|
1224
|
+
agent_card: AgentCard,
|
|
1225
|
+
message: str | dict[str, Any],
|
|
1226
|
+
**kwargs: Any,
|
|
1227
|
+
) -> dict[str, Any]:
|
|
1228
|
+
"""Asynchronously sends a message to another agent using the A2A protocol.
|
|
1229
|
+
|
|
1230
|
+
This method uses the streaming approach internally but only returns the final response,
|
|
1231
|
+
avoiding direct httpx usage that can cause issues with Nuitka compilation.
|
|
1232
|
+
|
|
1233
|
+
Args:
|
|
1234
|
+
agent_card: The AgentCard instance containing the target agent's details including
|
|
1235
|
+
URL, authentication requirements, and capabilities.
|
|
1236
|
+
message: The message to send to the agent. Can be either a string for simple text
|
|
1237
|
+
messages or a dictionary for structured data.
|
|
1238
|
+
**kwargs: Additional keyword arguments.
|
|
1239
|
+
|
|
1240
|
+
Returns:
|
|
1241
|
+
A dictionary containing the final response with the simplified structure:
|
|
1242
|
+
- status (str): "success" or "error"
|
|
1243
|
+
- task_state (str): Final A2A TaskState value
|
|
1244
|
+
- content (str): Final text content from the agent
|
|
1245
|
+
- task_id (str): ID of the associated task
|
|
1246
|
+
- context_id (str): Context ID of the task
|
|
1247
|
+
- final (bool): Always True for final responses
|
|
1248
|
+
- artifacts (list): List of all artifacts created during execution
|
|
1249
|
+
|
|
1250
|
+
Raises:
|
|
1251
|
+
Exception: If there's an error during message sending or processing.
|
|
1252
|
+
"""
|
|
1253
|
+
error_content = "No response received"
|
|
1254
|
+
final_response = A2AConnector._create_error_response("error", error_content)
|
|
1255
|
+
last_metadata_time = None
|
|
1256
|
+
|
|
1257
|
+
try:
|
|
1258
|
+
# Process streaming response and only collect artifacts from final response
|
|
1259
|
+
async for chunk in A2AConnector.astream_to_agent(agent_card, message, **kwargs):
|
|
1260
|
+
# Process chunk and handle potential early error return
|
|
1261
|
+
processed_response = A2AConnector._merge_stream_chunk_into_final(chunk, final_response)
|
|
1262
|
+
if processed_response and processed_response.get("status") == "error":
|
|
1263
|
+
return processed_response
|
|
1264
|
+
|
|
1265
|
+
# Handle metadata
|
|
1266
|
+
final_response, last_metadata_time = A2AConnector._handle_chunk_metadata(
|
|
1267
|
+
chunk, final_response, last_metadata_time
|
|
1268
|
+
)
|
|
1269
|
+
|
|
1270
|
+
# Ensure final response metadata carries a meaningful cumulative time
|
|
1271
|
+
final_response = A2AConnector._ensure_final_metadata_time(final_response, last_metadata_time)
|
|
1272
|
+
|
|
1273
|
+
return final_response
|
|
1274
|
+
|
|
1275
|
+
except Exception as e:
|
|
1276
|
+
logger.error(f"Error in asend_to_agent: {e}", exc_info=True)
|
|
1277
|
+
error_content = f"Error during message sending: {str(e)}"
|
|
1278
|
+
return A2AConnector._create_error_response("error", error_content)
|
|
1279
|
+
|
|
1280
|
+
@staticmethod
|
|
1281
|
+
def _merge_stream_chunk_into_final(chunk: dict[str, Any], final_response: dict[str, Any]) -> dict[str, Any]:
|
|
1282
|
+
"""Merge a simplified streaming chunk into the cumulative final response.
|
|
1283
|
+
|
|
1284
|
+
Used by asend_to_agent, where chunks are already normalized dicts coming
|
|
1285
|
+
from astream_to_agent (or a test double).
|
|
1286
|
+
|
|
1287
|
+
Args:
|
|
1288
|
+
chunk (dict[str, Any]): The streaming chunk to merge.
|
|
1289
|
+
final_response (dict[str, Any]): The cumulative final response.
|
|
1290
|
+
|
|
1291
|
+
Returns:
|
|
1292
|
+
dict[str, Any]: The updated final response.
|
|
1293
|
+
"""
|
|
1294
|
+
# Handle error chunks
|
|
1295
|
+
if chunk.get("status") == "error":
|
|
1296
|
+
error_content = chunk.get("message", "Unknown error")
|
|
1297
|
+
return A2AConnector._create_error_response("error", error_content)
|
|
1298
|
+
|
|
1299
|
+
# Update final response with latest information
|
|
1300
|
+
final_response.update(
|
|
1301
|
+
{
|
|
1302
|
+
"status": chunk.get("status", "success"),
|
|
1303
|
+
"task_state": chunk.get("task_state", "working"),
|
|
1304
|
+
"content": chunk.get("content", ""),
|
|
1305
|
+
"task_id": chunk.get("task_id", ""),
|
|
1306
|
+
"context_id": chunk.get("context_id", ""),
|
|
1307
|
+
"final": chunk.get("final", False),
|
|
1308
|
+
}
|
|
1309
|
+
)
|
|
1310
|
+
|
|
1311
|
+
# Only collect artifacts from final response to avoid duplication
|
|
1312
|
+
if chunk.get("final", False):
|
|
1313
|
+
final_response["artifacts"] = chunk.get("artifacts", [])
|
|
1314
|
+
|
|
1315
|
+
return final_response
|
|
1316
|
+
|
|
1317
|
+
@staticmethod
|
|
1318
|
+
def _handle_chunk_metadata(
|
|
1319
|
+
chunk: dict[str, Any], final_response: dict[str, Any], last_metadata_time: float | None
|
|
1320
|
+
) -> tuple[dict[str, Any], float | None]:
|
|
1321
|
+
"""Handle metadata from a chunk and return updated response and time.
|
|
1322
|
+
|
|
1323
|
+
Args:
|
|
1324
|
+
chunk (dict[str, Any]): The streaming chunk containing metadata.
|
|
1325
|
+
final_response (dict[str, Any]): The cumulative final response.
|
|
1326
|
+
last_metadata_time (float | None): The last metadata timestamp.
|
|
1327
|
+
|
|
1328
|
+
Returns:
|
|
1329
|
+
tuple[dict[str, Any], float | None]: Tuple of (updated_response, new_metadata_time).
|
|
1330
|
+
"""
|
|
1331
|
+
if chunk.get("metadata"):
|
|
1332
|
+
md = chunk.get("metadata")
|
|
1333
|
+
final_response["metadata"] = md
|
|
1334
|
+
|
|
1335
|
+
# Extract time from metadata
|
|
1336
|
+
try:
|
|
1337
|
+
t = md.get(MetadataFieldKeys.TIME) if isinstance(md, dict) else None
|
|
1338
|
+
if isinstance(t, int | float) and t > 0:
|
|
1339
|
+
last_metadata_time = t
|
|
1340
|
+
except Exception:
|
|
1341
|
+
pass
|
|
1342
|
+
|
|
1343
|
+
return final_response, last_metadata_time
|
|
1344
|
+
|
|
1345
|
+
@staticmethod
|
|
1346
|
+
def _ensure_final_metadata_time(final_response: dict[str, Any], last_metadata_time: float | None) -> dict[str, Any]:
|
|
1347
|
+
"""Ensure final response metadata has meaningful cumulative time.
|
|
1348
|
+
|
|
1349
|
+
Args:
|
|
1350
|
+
final_response (dict[str, Any]): The final response to update.
|
|
1351
|
+
last_metadata_time (float | None): The last metadata timestamp to use.
|
|
1352
|
+
|
|
1353
|
+
Returns:
|
|
1354
|
+
dict[str, Any]: The updated final response with ensured time metadata.
|
|
1355
|
+
"""
|
|
1356
|
+
try:
|
|
1357
|
+
md = final_response.get("metadata")
|
|
1358
|
+
if isinstance(md, dict):
|
|
1359
|
+
t = md.get(MetadataFieldKeys.TIME)
|
|
1360
|
+
if (
|
|
1361
|
+
not isinstance(t, int | float) or abs(t) < A2AConnector.FLOAT_EPSILON
|
|
1362
|
+
) and last_metadata_time is not None:
|
|
1363
|
+
md[MetadataFieldKeys.TIME] = last_metadata_time
|
|
1364
|
+
final_response["metadata"] = md
|
|
1365
|
+
except Exception:
|
|
1366
|
+
pass
|
|
1367
|
+
|
|
1368
|
+
return final_response
|
|
1369
|
+
|
|
1370
|
+
@staticmethod
|
|
1371
|
+
async def astream_to_agent(
|
|
1372
|
+
agent_card: AgentCard,
|
|
1373
|
+
message: str | dict[str, Any],
|
|
1374
|
+
**kwargs: Any,
|
|
1375
|
+
) -> AsyncGenerator[dict[str, Any], None]:
|
|
1376
|
+
"""Asynchronously sends a streaming message to another agent using the A2A protocol.
|
|
1377
|
+
|
|
1378
|
+
This method supports streaming responses from the target agent, yielding chunks of
|
|
1379
|
+
the response as they become available. It handles the official A2A streaming event
|
|
1380
|
+
types as defined in the specification.
|
|
1381
|
+
|
|
1382
|
+
Args:
|
|
1383
|
+
agent_card: The AgentCard instance containing the target agent's details including
|
|
1384
|
+
URL, authentication requirements, and capabilities.
|
|
1385
|
+
message: The message to send to the agent. Can be either a string for simple text
|
|
1386
|
+
messages or a dictionary for structured data.
|
|
1387
|
+
**kwargs: Additional keyword arguments.
|
|
1388
|
+
|
|
1389
|
+
Yields:
|
|
1390
|
+
Dictionaries containing streaming response chunks with a simplified structure:
|
|
1391
|
+
|
|
1392
|
+
For successful responses:
|
|
1393
|
+
- status (str): "success" or "error"
|
|
1394
|
+
- task_state (str): A2A TaskState value
|
|
1395
|
+
- content (str): Text content from the agent
|
|
1396
|
+
- task_id (str): ID of the associated task
|
|
1397
|
+
- context_id (str): Context ID of the task
|
|
1398
|
+
- final (bool): Whether this is the final update
|
|
1399
|
+
- artifacts (list, optional): List of artifacts created in this step
|
|
1400
|
+
|
|
1401
|
+
Each artifact in the artifacts list contains:
|
|
1402
|
+
- artifact_id (str): ID of the artifact
|
|
1403
|
+
- name (str): Name of the artifact
|
|
1404
|
+
- content_type (str): "text" or "file"
|
|
1405
|
+
- mime_type (str): MIME type of the artifact
|
|
1406
|
+
- file_name (str, optional): Name of the file for file artifacts
|
|
1407
|
+
- has_file_data (bool): Whether file data is included
|
|
1408
|
+
- has_file_uri (bool): Whether file URI is included
|
|
1409
|
+
- file_data (str, optional): Base64 encoded file content
|
|
1410
|
+
- file_uri (str, optional): URI reference to the file
|
|
1411
|
+
|
|
1412
|
+
For errors:
|
|
1413
|
+
- status (str): "error"
|
|
1414
|
+
- error_type (str): Type of error encountered
|
|
1415
|
+
- message (str): Error description
|
|
1416
|
+
|
|
1417
|
+
Raises:
|
|
1418
|
+
httpx.HTTPError: If there's an HTTP-related error during the streaming request.
|
|
1419
|
+
Exception: For any other unexpected errors during message streaming or processing.
|
|
1420
|
+
"""
|
|
1421
|
+
http_kwargs = kwargs.pop("http_kwargs", None)
|
|
1422
|
+
|
|
1423
|
+
try:
|
|
1424
|
+
# Create message payload and request with required id field
|
|
1425
|
+
request = A2AConnector._create_streaming_request(message, kwargs)
|
|
1426
|
+
artifact_tracker = A2AConnector._create_artifact_tracker()
|
|
1427
|
+
metadata_time_tracker = MetadataTimeTracker()
|
|
1428
|
+
config = StreamingConfig(http_kwargs=http_kwargs)
|
|
1429
|
+
|
|
1430
|
+
# Stream messages and process responses
|
|
1431
|
+
async for response in A2AConnector._stream_and_process_messages(
|
|
1432
|
+
agent_card,
|
|
1433
|
+
request,
|
|
1434
|
+
artifact_tracker,
|
|
1435
|
+
metadata_time_tracker,
|
|
1436
|
+
config,
|
|
1437
|
+
):
|
|
1438
|
+
yield response
|
|
1439
|
+
|
|
1440
|
+
except Exception as e:
|
|
1441
|
+
err = A2AConnector._create_error_response("client_side_exception", str(e))
|
|
1442
|
+
yield err
|
|
1443
|
+
|
|
1444
|
+
@staticmethod
|
|
1445
|
+
def _create_streaming_request(message: str | dict[str, Any], kwargs: dict[str, Any]) -> SendStreamingMessageRequest:
|
|
1446
|
+
"""Create a streaming message request from the input parameters.
|
|
1447
|
+
|
|
1448
|
+
Args:
|
|
1449
|
+
message: The message to send.
|
|
1450
|
+
kwargs: Additional parameters including task_id, context_id, and metadata.
|
|
1451
|
+
|
|
1452
|
+
Returns:
|
|
1453
|
+
A configured SendStreamingMessageRequest.
|
|
1454
|
+
"""
|
|
1455
|
+
payload = A2AConnector._create_message_payload(
|
|
1456
|
+
message,
|
|
1457
|
+
kwargs.get("task_id"),
|
|
1458
|
+
kwargs.get("context_id"),
|
|
1459
|
+
)
|
|
1460
|
+
base_metadata = kwargs.get("metadata") or {}
|
|
1461
|
+
metadata = base_metadata.copy() if isinstance(base_metadata, dict) else {}
|
|
1462
|
+
|
|
1463
|
+
pii_mapping = kwargs.get("pii_mapping")
|
|
1464
|
+
if isinstance(pii_mapping, dict) and pii_mapping:
|
|
1465
|
+
metadata["pii_mapping"] = pii_mapping
|
|
1466
|
+
|
|
1467
|
+
return SendStreamingMessageRequest(id=str(uuid.uuid4()), params=MessageSendParams(**payload, metadata=metadata))
|
|
1468
|
+
|
|
1469
|
+
@staticmethod
|
|
1470
|
+
def _create_artifact_tracker() -> dict[str, Any]:
|
|
1471
|
+
"""Create an artifact tracking structure for deduplication.
|
|
1472
|
+
|
|
1473
|
+
Returns:
|
|
1474
|
+
Dictionary containing artifact tracking collections.
|
|
1475
|
+
"""
|
|
1476
|
+
return {
|
|
1477
|
+
"collected_artifacts": [],
|
|
1478
|
+
"seen_artifact_hashes": set(),
|
|
1479
|
+
}
|
|
1480
|
+
|
|
1481
|
+
@staticmethod
|
|
1482
|
+
async def _stream_and_process_messages(
|
|
1483
|
+
agent_card: AgentCard,
|
|
1484
|
+
request: SendStreamingMessageRequest,
|
|
1485
|
+
artifact_tracker: dict[str, Any],
|
|
1486
|
+
metadata_time_tracker: MetadataTimeTracker,
|
|
1487
|
+
config: StreamingConfig,
|
|
1488
|
+
) -> AsyncGenerator[dict[str, Any], None]:
|
|
1489
|
+
"""Stream messages from A2A client and process each response chunk.
|
|
1490
|
+
|
|
1491
|
+
Args:
|
|
1492
|
+
agent_card: The target agent's card.
|
|
1493
|
+
request: The streaming request to send.
|
|
1494
|
+
artifact_tracker: Artifact tracking collections.
|
|
1495
|
+
metadata_time_tracker: Tracker for accumulating execution time across agent steps.
|
|
1496
|
+
config: Configuration object containing streaming parameters.
|
|
1497
|
+
|
|
1498
|
+
Yields:
|
|
1499
|
+
dict[str, Any]: Response dictionaries containing processed stream chunks with structure:
|
|
1500
|
+
- status (str): "success" or "error"
|
|
1501
|
+
- task_state (str): A2A TaskState value
|
|
1502
|
+
- content (str): Text content from the agent
|
|
1503
|
+
- task_id (str): ID of the associated task
|
|
1504
|
+
- context_id (str): Context ID of the task
|
|
1505
|
+
- final (bool): Whether this is the final update
|
|
1506
|
+
- artifacts (list, optional): List of artifacts created in this step
|
|
1507
|
+
|
|
1508
|
+
Raises:
|
|
1509
|
+
httpx.HTTPError: If there's an HTTP-related error during the streaming request.
|
|
1510
|
+
"""
|
|
1511
|
+
async with httpx.AsyncClient(**(config.http_kwargs or {})) as http_client:
|
|
1512
|
+
a2a_client = A2AClient(httpx_client=http_client, agent_card=agent_card)
|
|
1513
|
+
|
|
1514
|
+
# Track metadata hash to avoid unnecessary normalization
|
|
1515
|
+
current_metadata_hash: str | None = None
|
|
1516
|
+
metadata_cache: dict[str, dict[str, Any]] = {}
|
|
1517
|
+
|
|
1518
|
+
async for chunk in a2a_client.send_message_streaming(request):
|
|
1519
|
+
if not isinstance(chunk.root, SendStreamingMessageSuccessResponse):
|
|
1520
|
+
continue
|
|
1521
|
+
|
|
1522
|
+
response = A2AConnector._process_stream_chunk(chunk.root.result, artifact_tracker)
|
|
1523
|
+
if response:
|
|
1524
|
+
async for processed_chunk, metadata_hash in A2AConnector._process_response_chunk(
|
|
1525
|
+
response, metadata_time_tracker, config, current_metadata_hash, metadata_cache
|
|
1526
|
+
):
|
|
1527
|
+
yield processed_chunk
|
|
1528
|
+
# Update hash for next iteration if metadata was normalized
|
|
1529
|
+
if metadata_hash is not None:
|
|
1530
|
+
current_metadata_hash = metadata_hash
|
|
1531
|
+
|
|
1532
|
+
@staticmethod
|
|
1533
|
+
async def _process_response_chunk(
|
|
1534
|
+
response: dict[str, Any],
|
|
1535
|
+
metadata_time_tracker: MetadataTimeTracker,
|
|
1536
|
+
config: StreamingConfig,
|
|
1537
|
+
previous_metadata_hash: str | None = None,
|
|
1538
|
+
metadata_cache: dict[str, dict[str, Any]] | None = None,
|
|
1539
|
+
) -> AsyncGenerator[tuple[dict[str, Any], str | None], None]:
|
|
1540
|
+
"""Process a single response chunk and yield the legacy format.
|
|
1541
|
+
|
|
1542
|
+
Args:
|
|
1543
|
+
response: Response payload produced from the stream chunk.
|
|
1544
|
+
metadata_time_tracker: Tracker used to aggregate timing metadata.
|
|
1545
|
+
config: Streaming configuration flags.
|
|
1546
|
+
previous_metadata_hash: Hash of the previously normalized metadata payload.
|
|
1547
|
+
metadata_cache: Optional cache of normalized metadata keyed by hash for reuse.
|
|
1548
|
+
|
|
1549
|
+
Yields:
|
|
1550
|
+
tuple[dict[str, Any], Optional[str]]: The processed chunk and the hash of the
|
|
1551
|
+
normalized metadata when it changes.
|
|
1552
|
+
"""
|
|
1553
|
+
processed_response = metadata_time_tracker.update_response_metadata(response)
|
|
1554
|
+
processed_response, current_metadata_hash = A2AConnector._normalize_metadata_enums(
|
|
1555
|
+
processed_response, previous_metadata_hash, metadata_cache
|
|
1556
|
+
)
|
|
1557
|
+
|
|
1558
|
+
# Remove artifacts field if empty (legacy behavior)
|
|
1559
|
+
if processed_response.get("artifacts") is None or processed_response.get("artifacts") == []:
|
|
1560
|
+
processed_response.pop("artifacts", None)
|
|
1561
|
+
|
|
1562
|
+
routed_response = A2AConnector.event_registry.handle(processed_response.get("event_type"), processed_response)
|
|
1563
|
+
normalized_event_type = A2AConnector._normalize_event_type_value(routed_response.get("event_type"))
|
|
1564
|
+
if normalized_event_type is not None:
|
|
1565
|
+
routed_response["event_type"] = normalized_event_type
|
|
1566
|
+
|
|
1567
|
+
yield routed_response, current_metadata_hash
|
|
1568
|
+
|
|
1569
|
+
@staticmethod
|
|
1570
|
+
def _normalize_event_type_value(event_type: Any) -> str | None:
|
|
1571
|
+
"""Normalize event type values to plain strings for downstream consumers.
|
|
1572
|
+
|
|
1573
|
+
Delegates to SSEChunkTransformer.normalize_event_type_value for shared implementation.
|
|
1574
|
+
|
|
1575
|
+
Args:
|
|
1576
|
+
event_type (Any): The event type to normalize.
|
|
1577
|
+
|
|
1578
|
+
Returns:
|
|
1579
|
+
str | None: The normalized event type as a string, or None if invalid.
|
|
1580
|
+
"""
|
|
1581
|
+
return SSEChunkTransformer.normalize_event_type_value(event_type)
|
|
1582
|
+
|
|
1583
|
+
@staticmethod
|
|
1584
|
+
def _normalize_metadata_enums(
|
|
1585
|
+
response: dict[str, Any],
|
|
1586
|
+
previous_metadata_hash: str | None = None,
|
|
1587
|
+
metadata_cache: dict[str, dict[str, Any]] | None = None,
|
|
1588
|
+
) -> tuple[dict[str, Any], str | None]:
|
|
1589
|
+
"""Convert enum keys and values in metadata to strings for proper JSON serialization.
|
|
1590
|
+
|
|
1591
|
+
Args:
|
|
1592
|
+
response: The response dictionary that may contain enum values in metadata.
|
|
1593
|
+
previous_metadata_hash: Hash of previously normalized metadata to avoid unnecessary work.
|
|
1594
|
+
metadata_cache: Optional cache mapping metadata hashes to normalized metadata dicts.
|
|
1595
|
+
|
|
1596
|
+
Returns:
|
|
1597
|
+
Tuple of (normalized_response, new_metadata_hash) where new_metadata_hash
|
|
1598
|
+
is None if no normalization was needed.
|
|
1599
|
+
"""
|
|
1600
|
+
if not isinstance(response, dict):
|
|
1601
|
+
return response, None
|
|
1602
|
+
|
|
1603
|
+
normalized_response = response.copy()
|
|
1604
|
+
current_metadata = normalized_response.get("metadata")
|
|
1605
|
+
|
|
1606
|
+
if not isinstance(current_metadata, dict):
|
|
1607
|
+
return normalized_response, None
|
|
1608
|
+
|
|
1609
|
+
current_hash = A2AConnector._compute_metadata_hash(current_metadata)
|
|
1610
|
+
|
|
1611
|
+
if previous_metadata_hash == current_hash:
|
|
1612
|
+
A2AConnector._handle_cached_metadata(normalized_response, current_hash, metadata_cache)
|
|
1613
|
+
return normalized_response, None
|
|
1614
|
+
|
|
1615
|
+
return A2AConnector._handle_new_metadata(normalized_response, current_metadata, current_hash, metadata_cache)
|
|
1616
|
+
|
|
1617
|
+
@staticmethod
|
|
1618
|
+
def _compute_metadata_hash(metadata: dict[str, Any]) -> str:
|
|
1619
|
+
"""Compute hash of metadata for comparison and caching.
|
|
1620
|
+
|
|
1621
|
+
Args:
|
|
1622
|
+
metadata (dict[str, Any]): The metadata to hash.
|
|
1623
|
+
|
|
1624
|
+
Returns:
|
|
1625
|
+
str: The computed hash string.
|
|
1626
|
+
"""
|
|
1627
|
+
metadata_items = []
|
|
1628
|
+
for k, v in metadata.items():
|
|
1629
|
+
key_str = k.value if hasattr(k, "value") else str(k)
|
|
1630
|
+
val_str = v.value if hasattr(v, "value") else str(v)
|
|
1631
|
+
metadata_items.append(f"{key_str}:{val_str}")
|
|
1632
|
+
metadata_str = "|".join(sorted(metadata_items))
|
|
1633
|
+
return hashlib.sha256(metadata_str.encode()).hexdigest()
|
|
1634
|
+
|
|
1635
|
+
@staticmethod
|
|
1636
|
+
def _handle_cached_metadata(
|
|
1637
|
+
response: dict[str, Any],
|
|
1638
|
+
metadata_hash: str,
|
|
1639
|
+
metadata_cache: dict[str, dict[str, Any]] | None,
|
|
1640
|
+
) -> None:
|
|
1641
|
+
"""Handle case where metadata hasn't changed - use cached or compute new.
|
|
1642
|
+
|
|
1643
|
+
Args:
|
|
1644
|
+
response: The response dictionary to update with normalized metadata.
|
|
1645
|
+
metadata_hash: Hash of the original metadata for cache lookup.
|
|
1646
|
+
metadata_cache: Optional cache dictionary to store/retrieve normalized metadata.
|
|
1647
|
+
"""
|
|
1648
|
+
if metadata_cache and metadata_hash in metadata_cache:
|
|
1649
|
+
response["metadata"] = metadata_cache[metadata_hash]
|
|
1650
|
+
else:
|
|
1651
|
+
metadata = response["metadata"]
|
|
1652
|
+
normalized_metadata = A2AConnector._normalize_metadata_value(metadata)
|
|
1653
|
+
response["metadata"] = normalized_metadata
|
|
1654
|
+
if metadata_cache is not None:
|
|
1655
|
+
metadata_cache[metadata_hash] = normalized_metadata
|
|
1656
|
+
|
|
1657
|
+
@staticmethod
|
|
1658
|
+
def _handle_new_metadata(
|
|
1659
|
+
response: dict[str, Any],
|
|
1660
|
+
metadata: dict[str, Any],
|
|
1661
|
+
metadata_hash: str,
|
|
1662
|
+
metadata_cache: dict[str, dict[str, Any]] | None,
|
|
1663
|
+
) -> tuple[dict[str, Any], str]:
|
|
1664
|
+
"""Handle case where metadata has changed - normalize and cache.
|
|
1665
|
+
|
|
1666
|
+
Args:
|
|
1667
|
+
response: The response dictionary to update with normalized metadata.
|
|
1668
|
+
metadata: The new metadata to normalize and cache.
|
|
1669
|
+
metadata_hash: Hash of the metadata for cache storage.
|
|
1670
|
+
metadata_cache: Optional cache dictionary to store normalized metadata.
|
|
1671
|
+
|
|
1672
|
+
Returns:
|
|
1673
|
+
Tuple of (updated_response, metadata_hash).
|
|
1674
|
+
"""
|
|
1675
|
+
normalized_metadata = A2AConnector._normalize_metadata_value(metadata)
|
|
1676
|
+
response["metadata"] = normalized_metadata
|
|
1677
|
+
|
|
1678
|
+
if metadata_cache is not None:
|
|
1679
|
+
metadata_cache[metadata_hash] = normalized_metadata
|
|
1680
|
+
|
|
1681
|
+
return response, metadata_hash
|
|
1682
|
+
|
|
1683
|
+
@staticmethod
|
|
1684
|
+
def _normalize_metadata_value(value: Any) -> Any:
|
|
1685
|
+
"""Recursively convert enum instances to their serializable value.
|
|
1686
|
+
|
|
1687
|
+
Delegates to SSEChunkTransformer.normalize_metadata_enums for shared implementation.
|
|
1688
|
+
|
|
1689
|
+
Args:
|
|
1690
|
+
value: The value to normalize. Can be an enum, dict, list, tuple, set, or other type.
|
|
1691
|
+
|
|
1692
|
+
Returns:
|
|
1693
|
+
The normalized value with enums converted to their values and nested structures processed.
|
|
1694
|
+
"""
|
|
1695
|
+
return SSEChunkTransformer.normalize_metadata_enums(value)
|
|
1696
|
+
|
|
1697
|
+
@staticmethod
|
|
1698
|
+
def _process_stream_chunk(res_data: Any, artifact_tracker: dict[str, Any]) -> dict[str, Any] | None:
|
|
1699
|
+
"""Process a single chunk from the stream based on its type.
|
|
1700
|
+
|
|
1701
|
+
This method handles A2A protocol types (TaskArtifactUpdateEvent, TaskStatusUpdateEvent, Task)
|
|
1702
|
+
which are different from A2AEvent objects processed by SSEChunkTransformer. The separation
|
|
1703
|
+
is intentional as these are different input types at different protocol layers.
|
|
1704
|
+
|
|
1705
|
+
Args:
|
|
1706
|
+
res_data: The response data from the stream (A2A protocol types).
|
|
1707
|
+
artifact_tracker: Artifact tracking collections.
|
|
1708
|
+
|
|
1709
|
+
Returns:
|
|
1710
|
+
Processed response dictionary or None if no response should be yielded.
|
|
1711
|
+
"""
|
|
1712
|
+
# Artifact updates are now handled first and yielded immediately
|
|
1713
|
+
if isinstance(res_data, TaskArtifactUpdateEvent):
|
|
1714
|
+
return A2AConnector._handle_artifact_update_event(res_data, artifact_tracker)
|
|
1715
|
+
|
|
1716
|
+
elif isinstance(res_data, TaskStatusUpdateEvent):
|
|
1717
|
+
return A2AConnector._handle_task_state_update(res_data)
|
|
1718
|
+
|
|
1719
|
+
elif isinstance(res_data, Task):
|
|
1720
|
+
return A2AConnector._process_task_object(res_data, artifact_tracker["collected_artifacts"])
|
|
1721
|
+
|
|
1722
|
+
elif isinstance(res_data, Message):
|
|
1723
|
+
return A2AConnector._process_message_object(res_data)
|
|
1724
|
+
|
|
1725
|
+
return None
|
|
1726
|
+
|
|
1727
|
+
@staticmethod
|
|
1728
|
+
def _create_error_response(error_type: str, message: str) -> dict[str, Any]:
|
|
1729
|
+
"""Create a standardized error response.
|
|
1730
|
+
|
|
1731
|
+
Args:
|
|
1732
|
+
error_type: Type of error that occurred.
|
|
1733
|
+
message: Error message description.
|
|
1734
|
+
|
|
1735
|
+
Returns:
|
|
1736
|
+
Standardized error response dictionary (legacy minimal format).
|
|
1737
|
+
"""
|
|
1738
|
+
return {
|
|
1739
|
+
"status": "error",
|
|
1740
|
+
"task_state": "failed",
|
|
1741
|
+
"content": message,
|
|
1742
|
+
"event_type": A2AStreamEventType.ERROR.value,
|
|
1743
|
+
}
|
|
1744
|
+
|
|
1745
|
+
@staticmethod
|
|
1746
|
+
def _create_artifact_hash(artifact_response: dict[str, Any]) -> str:
|
|
1747
|
+
"""Create a hash for artifact deduplication.
|
|
1748
|
+
|
|
1749
|
+
Delegates to SSEChunkTransformer.create_artifact_hash for shared implementation.
|
|
1750
|
+
|
|
1751
|
+
Args:
|
|
1752
|
+
artifact_response: The artifact response dictionary.
|
|
1753
|
+
|
|
1754
|
+
Returns:
|
|
1755
|
+
A hash string for deduplication.
|
|
1756
|
+
"""
|
|
1757
|
+
return SSEChunkTransformer.create_artifact_hash(artifact_response)
|