aip-agents-binary 0.5.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aip_agents/__init__.py +65 -0
- aip_agents/a2a/__init__.py +19 -0
- aip_agents/a2a/server/__init__.py +10 -0
- aip_agents/a2a/server/base_executor.py +1086 -0
- aip_agents/a2a/server/google_adk_executor.py +198 -0
- aip_agents/a2a/server/langflow_executor.py +180 -0
- aip_agents/a2a/server/langgraph_executor.py +270 -0
- aip_agents/a2a/types.py +232 -0
- aip_agents/agent/__init__.py +27 -0
- aip_agents/agent/base_agent.py +970 -0
- aip_agents/agent/base_langgraph_agent.py +2942 -0
- aip_agents/agent/google_adk_agent.py +926 -0
- aip_agents/agent/google_adk_constants.py +6 -0
- aip_agents/agent/hitl/__init__.py +24 -0
- aip_agents/agent/hitl/config.py +28 -0
- aip_agents/agent/hitl/langgraph_hitl_mixin.py +515 -0
- aip_agents/agent/hitl/manager.py +532 -0
- aip_agents/agent/hitl/models.py +18 -0
- aip_agents/agent/hitl/prompt/__init__.py +9 -0
- aip_agents/agent/hitl/prompt/base.py +42 -0
- aip_agents/agent/hitl/prompt/deferred.py +73 -0
- aip_agents/agent/hitl/registry.py +149 -0
- aip_agents/agent/interface.py +138 -0
- aip_agents/agent/interfaces.py +65 -0
- aip_agents/agent/langflow_agent.py +464 -0
- aip_agents/agent/langgraph_memory_enhancer_agent.py +433 -0
- aip_agents/agent/langgraph_react_agent.py +2514 -0
- aip_agents/agent/system_instruction_context.py +34 -0
- aip_agents/clients/__init__.py +10 -0
- aip_agents/clients/langflow/__init__.py +10 -0
- aip_agents/clients/langflow/client.py +477 -0
- aip_agents/clients/langflow/types.py +18 -0
- aip_agents/constants.py +23 -0
- aip_agents/credentials/manager.py +132 -0
- aip_agents/examples/__init__.py +5 -0
- aip_agents/examples/compare_streaming_client.py +783 -0
- aip_agents/examples/compare_streaming_server.py +142 -0
- aip_agents/examples/demo_memory_recall.py +401 -0
- aip_agents/examples/hello_world_a2a_google_adk_client.py +49 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_agent.py +48 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_streaming.py +60 -0
- aip_agents/examples/hello_world_a2a_google_adk_server.py +79 -0
- aip_agents/examples/hello_world_a2a_langchain_client.py +39 -0
- aip_agents/examples/hello_world_a2a_langchain_client_agent.py +39 -0
- aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.py +37 -0
- aip_agents/examples/hello_world_a2a_langchain_client_streaming.py +41 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.py +60 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_server.py +105 -0
- aip_agents/examples/hello_world_a2a_langchain_server.py +79 -0
- aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.py +78 -0
- aip_agents/examples/hello_world_a2a_langflow_client.py +83 -0
- aip_agents/examples/hello_world_a2a_langflow_server.py +82 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client.py +73 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.py +76 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_server.py +92 -0
- aip_agents/examples/hello_world_a2a_langgraph_client.py +54 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent.py +54 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.py +32 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming.py +50 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.py +44 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.py +92 -0
- aip_agents/examples/hello_world_a2a_langgraph_server.py +84 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.py +79 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.py +132 -0
- aip_agents/examples/hello_world_a2a_mcp_langgraph.py +196 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.py +244 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.py +251 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.py +57 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.py +80 -0
- aip_agents/examples/hello_world_google_adk.py +41 -0
- aip_agents/examples/hello_world_google_adk_mcp_http.py +34 -0
- aip_agents/examples/hello_world_google_adk_mcp_http_stream.py +40 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse.py +44 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse_stream.py +48 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio.py +44 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.py +48 -0
- aip_agents/examples/hello_world_google_adk_stream.py +44 -0
- aip_agents/examples/hello_world_langchain.py +28 -0
- aip_agents/examples/hello_world_langchain_lm_invoker.py +15 -0
- aip_agents/examples/hello_world_langchain_mcp_http.py +34 -0
- aip_agents/examples/hello_world_langchain_mcp_http_interactive.py +130 -0
- aip_agents/examples/hello_world_langchain_mcp_http_stream.py +42 -0
- aip_agents/examples/hello_world_langchain_mcp_multi_server.py +155 -0
- aip_agents/examples/hello_world_langchain_mcp_sse.py +34 -0
- aip_agents/examples/hello_world_langchain_mcp_sse_stream.py +40 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio.py +30 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio_stream.py +41 -0
- aip_agents/examples/hello_world_langchain_stream.py +36 -0
- aip_agents/examples/hello_world_langchain_stream_lm_invoker.py +39 -0
- aip_agents/examples/hello_world_langflow_agent.py +163 -0
- aip_agents/examples/hello_world_langgraph.py +39 -0
- aip_agents/examples/hello_world_langgraph_bosa_twitter.py +41 -0
- aip_agents/examples/hello_world_langgraph_mcp_http.py +31 -0
- aip_agents/examples/hello_world_langgraph_mcp_http_stream.py +34 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse.py +35 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse_stream.py +50 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio.py +35 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.py +50 -0
- aip_agents/examples/hello_world_langgraph_stream.py +43 -0
- aip_agents/examples/hello_world_langgraph_stream_lm_invoker.py +37 -0
- aip_agents/examples/hello_world_model_switch_cli.py +210 -0
- aip_agents/examples/hello_world_multi_agent_adk.py +75 -0
- aip_agents/examples/hello_world_multi_agent_langchain.py +54 -0
- aip_agents/examples/hello_world_multi_agent_langgraph.py +66 -0
- aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.py +69 -0
- aip_agents/examples/hello_world_pii_logger.py +21 -0
- aip_agents/examples/hello_world_sentry.py +133 -0
- aip_agents/examples/hello_world_step_limits.py +273 -0
- aip_agents/examples/hello_world_stock_a2a_server.py +103 -0
- aip_agents/examples/hello_world_tool_output_client.py +46 -0
- aip_agents/examples/hello_world_tool_output_server.py +114 -0
- aip_agents/examples/hitl_demo.py +724 -0
- aip_agents/examples/mcp_configs/configs.py +63 -0
- aip_agents/examples/mcp_servers/common.py +76 -0
- aip_agents/examples/mcp_servers/mcp_name.py +29 -0
- aip_agents/examples/mcp_servers/mcp_server_http.py +19 -0
- aip_agents/examples/mcp_servers/mcp_server_sse.py +19 -0
- aip_agents/examples/mcp_servers/mcp_server_stdio.py +19 -0
- aip_agents/examples/mcp_servers/mcp_time.py +10 -0
- aip_agents/examples/pii_demo_langgraph_client.py +69 -0
- aip_agents/examples/pii_demo_langgraph_server.py +126 -0
- aip_agents/examples/pii_demo_multi_agent_client.py +80 -0
- aip_agents/examples/pii_demo_multi_agent_server.py +247 -0
- aip_agents/examples/todolist_planning_a2a_langchain_client.py +70 -0
- aip_agents/examples/todolist_planning_a2a_langgraph_server.py +88 -0
- aip_agents/examples/tools/__init__.py +27 -0
- aip_agents/examples/tools/adk_arithmetic_tools.py +36 -0
- aip_agents/examples/tools/adk_weather_tool.py +60 -0
- aip_agents/examples/tools/data_generator_tool.py +103 -0
- aip_agents/examples/tools/data_visualization_tool.py +312 -0
- aip_agents/examples/tools/image_artifact_tool.py +136 -0
- aip_agents/examples/tools/langchain_arithmetic_tools.py +26 -0
- aip_agents/examples/tools/langchain_currency_exchange_tool.py +88 -0
- aip_agents/examples/tools/langchain_graph_artifact_tool.py +172 -0
- aip_agents/examples/tools/langchain_weather_tool.py +48 -0
- aip_agents/examples/tools/langgraph_streaming_tool.py +130 -0
- aip_agents/examples/tools/mock_retrieval_tool.py +56 -0
- aip_agents/examples/tools/pii_demo_tools.py +189 -0
- aip_agents/examples/tools/random_chart_tool.py +142 -0
- aip_agents/examples/tools/serper_tool.py +202 -0
- aip_agents/examples/tools/stock_tools.py +82 -0
- aip_agents/examples/tools/table_generator_tool.py +167 -0
- aip_agents/examples/tools/time_tool.py +82 -0
- aip_agents/examples/tools/weather_forecast_tool.py +38 -0
- aip_agents/executor/agent_executor.py +473 -0
- aip_agents/executor/base.py +48 -0
- aip_agents/mcp/__init__.py +1 -0
- aip_agents/mcp/client/__init__.py +14 -0
- aip_agents/mcp/client/base_mcp_client.py +369 -0
- aip_agents/mcp/client/connection_manager.py +193 -0
- aip_agents/mcp/client/google_adk/__init__.py +11 -0
- aip_agents/mcp/client/google_adk/client.py +381 -0
- aip_agents/mcp/client/langchain/__init__.py +11 -0
- aip_agents/mcp/client/langchain/client.py +265 -0
- aip_agents/mcp/client/persistent_session.py +359 -0
- aip_agents/mcp/client/session_pool.py +351 -0
- aip_agents/mcp/client/transports.py +215 -0
- aip_agents/mcp/utils/__init__.py +7 -0
- aip_agents/mcp/utils/config_validator.py +139 -0
- aip_agents/memory/__init__.py +14 -0
- aip_agents/memory/adapters/__init__.py +10 -0
- aip_agents/memory/adapters/base_adapter.py +717 -0
- aip_agents/memory/adapters/mem0.py +84 -0
- aip_agents/memory/base.py +84 -0
- aip_agents/memory/constants.py +49 -0
- aip_agents/memory/factory.py +86 -0
- aip_agents/memory/guidance.py +20 -0
- aip_agents/memory/simple_memory.py +47 -0
- aip_agents/middleware/__init__.py +17 -0
- aip_agents/middleware/base.py +88 -0
- aip_agents/middleware/manager.py +128 -0
- aip_agents/middleware/todolist.py +274 -0
- aip_agents/schema/__init__.py +69 -0
- aip_agents/schema/a2a.py +56 -0
- aip_agents/schema/agent.py +111 -0
- aip_agents/schema/hitl.py +157 -0
- aip_agents/schema/langgraph.py +37 -0
- aip_agents/schema/model_id.py +97 -0
- aip_agents/schema/step_limit.py +108 -0
- aip_agents/schema/storage.py +40 -0
- aip_agents/sentry/__init__.py +11 -0
- aip_agents/sentry/sentry.py +151 -0
- aip_agents/storage/__init__.py +41 -0
- aip_agents/storage/base.py +85 -0
- aip_agents/storage/clients/__init__.py +12 -0
- aip_agents/storage/clients/minio_client.py +318 -0
- aip_agents/storage/config.py +62 -0
- aip_agents/storage/providers/__init__.py +15 -0
- aip_agents/storage/providers/base.py +106 -0
- aip_agents/storage/providers/memory.py +114 -0
- aip_agents/storage/providers/object_storage.py +214 -0
- aip_agents/tools/__init__.py +33 -0
- aip_agents/tools/bosa_tools.py +105 -0
- aip_agents/tools/browser_use/__init__.py +82 -0
- aip_agents/tools/browser_use/action_parser.py +103 -0
- aip_agents/tools/browser_use/browser_use_tool.py +1112 -0
- aip_agents/tools/browser_use/llm_config.py +120 -0
- aip_agents/tools/browser_use/minio_storage.py +198 -0
- aip_agents/tools/browser_use/schemas.py +119 -0
- aip_agents/tools/browser_use/session.py +76 -0
- aip_agents/tools/browser_use/session_errors.py +132 -0
- aip_agents/tools/browser_use/steel_session_recording.py +317 -0
- aip_agents/tools/browser_use/streaming.py +813 -0
- aip_agents/tools/browser_use/structured_data_parser.py +257 -0
- aip_agents/tools/browser_use/structured_data_recovery.py +204 -0
- aip_agents/tools/browser_use/types.py +78 -0
- aip_agents/tools/code_sandbox/__init__.py +26 -0
- aip_agents/tools/code_sandbox/constant.py +13 -0
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.py +257 -0
- aip_agents/tools/code_sandbox/e2b_sandbox_tool.py +411 -0
- aip_agents/tools/constants.py +165 -0
- aip_agents/tools/document_loader/__init__.py +44 -0
- aip_agents/tools/document_loader/base_reader.py +302 -0
- aip_agents/tools/document_loader/docx_reader_tool.py +68 -0
- aip_agents/tools/document_loader/excel_reader_tool.py +171 -0
- aip_agents/tools/document_loader/pdf_reader_tool.py +79 -0
- aip_agents/tools/document_loader/pdf_splitter.py +169 -0
- aip_agents/tools/gl_connector/__init__.py +5 -0
- aip_agents/tools/gl_connector/tool.py +351 -0
- aip_agents/tools/memory_search/__init__.py +22 -0
- aip_agents/tools/memory_search/base.py +200 -0
- aip_agents/tools/memory_search/mem0.py +258 -0
- aip_agents/tools/memory_search/schema.py +48 -0
- aip_agents/tools/memory_search_tool.py +26 -0
- aip_agents/tools/time_tool.py +117 -0
- aip_agents/tools/tool_config_injector.py +300 -0
- aip_agents/tools/web_search/__init__.py +15 -0
- aip_agents/tools/web_search/serper_tool.py +187 -0
- aip_agents/types/__init__.py +70 -0
- aip_agents/types/a2a_events.py +13 -0
- aip_agents/utils/__init__.py +79 -0
- aip_agents/utils/a2a_connector.py +1757 -0
- aip_agents/utils/artifact_helpers.py +502 -0
- aip_agents/utils/constants.py +22 -0
- aip_agents/utils/datetime/__init__.py +34 -0
- aip_agents/utils/datetime/normalization.py +231 -0
- aip_agents/utils/datetime/timezone.py +206 -0
- aip_agents/utils/env_loader.py +27 -0
- aip_agents/utils/event_handler_registry.py +58 -0
- aip_agents/utils/file_prompt_utils.py +176 -0
- aip_agents/utils/final_response_builder.py +211 -0
- aip_agents/utils/formatter_llm_client.py +231 -0
- aip_agents/utils/langgraph/__init__.py +19 -0
- aip_agents/utils/langgraph/converter.py +128 -0
- aip_agents/utils/langgraph/tool_managers/__init__.py +15 -0
- aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.py +99 -0
- aip_agents/utils/langgraph/tool_managers/base_tool_manager.py +66 -0
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +1071 -0
- aip_agents/utils/langgraph/tool_output_management.py +967 -0
- aip_agents/utils/logger.py +195 -0
- aip_agents/utils/metadata/__init__.py +27 -0
- aip_agents/utils/metadata/activity_metadata_helper.py +407 -0
- aip_agents/utils/metadata/activity_narrative/__init__.py +35 -0
- aip_agents/utils/metadata/activity_narrative/builder.py +817 -0
- aip_agents/utils/metadata/activity_narrative/constants.py +51 -0
- aip_agents/utils/metadata/activity_narrative/context.py +49 -0
- aip_agents/utils/metadata/activity_narrative/formatters.py +230 -0
- aip_agents/utils/metadata/activity_narrative/utils.py +35 -0
- aip_agents/utils/metadata/schemas/__init__.py +16 -0
- aip_agents/utils/metadata/schemas/activity_schema.py +29 -0
- aip_agents/utils/metadata/schemas/thinking_schema.py +31 -0
- aip_agents/utils/metadata/thinking_metadata_helper.py +38 -0
- aip_agents/utils/metadata_helper.py +358 -0
- aip_agents/utils/name_preprocessor/__init__.py +17 -0
- aip_agents/utils/name_preprocessor/base_name_preprocessor.py +73 -0
- aip_agents/utils/name_preprocessor/google_name_preprocessor.py +100 -0
- aip_agents/utils/name_preprocessor/name_preprocessor.py +87 -0
- aip_agents/utils/name_preprocessor/openai_name_preprocessor.py +48 -0
- aip_agents/utils/pii/__init__.py +25 -0
- aip_agents/utils/pii/pii_handler.py +397 -0
- aip_agents/utils/pii/pii_helper.py +207 -0
- aip_agents/utils/pii/uuid_deanonymizer_mapping.py +195 -0
- aip_agents/utils/reference_helper.py +273 -0
- aip_agents/utils/sse_chunk_transformer.py +831 -0
- aip_agents/utils/step_limit_manager.py +265 -0
- aip_agents/utils/token_usage_helper.py +156 -0
- aip_agents_binary-0.5.20.dist-info/METADATA +681 -0
- aip_agents_binary-0.5.20.dist-info/RECORD +280 -0
- aip_agents_binary-0.5.20.dist-info/WHEEL +5 -0
- aip_agents_binary-0.5.20.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,783 @@
|
|
|
1
|
+
"""Client script to compare direct SSE streaming vs A2A connector streaming.
|
|
2
|
+
|
|
3
|
+
This script demonstrates:
|
|
4
|
+
1. Creating an agent with the same tools as the server
|
|
5
|
+
2. Running arun_sse_stream (direct) and recording the SSE chunks
|
|
6
|
+
3. Using A2AConnector.astream_to_agent (connector) to get chunks from the server
|
|
7
|
+
4. Comparing both outputs side-by-side
|
|
8
|
+
|
|
9
|
+
Prerequisites:
|
|
10
|
+
Start the server first:
|
|
11
|
+
poetry run python -m aip_agents.examples.compare_streaming_server
|
|
12
|
+
|
|
13
|
+
Then run this client:
|
|
14
|
+
poetry run python -m aip_agents.examples.compare_streaming_client
|
|
15
|
+
|
|
16
|
+
Authors:
|
|
17
|
+
AI Agent Platform Team
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import asyncio
|
|
21
|
+
import base64
|
|
22
|
+
import json
|
|
23
|
+
from copy import deepcopy
|
|
24
|
+
from typing import Any
|
|
25
|
+
|
|
26
|
+
from langchain_openai import ChatOpenAI
|
|
27
|
+
|
|
28
|
+
from aip_agents.agent import LangGraphAgent
|
|
29
|
+
from aip_agents.examples.tools.mock_retrieval_tool import MockRetrievalTool
|
|
30
|
+
from aip_agents.examples.tools.pii_demo_tools import (
|
|
31
|
+
get_customer_info,
|
|
32
|
+
get_employee_data,
|
|
33
|
+
get_user_profile,
|
|
34
|
+
)
|
|
35
|
+
from aip_agents.examples.tools.random_chart_tool import RandomChartTool
|
|
36
|
+
from aip_agents.examples.tools.table_generator_tool import TableGeneratorTool
|
|
37
|
+
from aip_agents.schema.a2a import A2AStreamEventType
|
|
38
|
+
from aip_agents.schema.agent import A2AClientConfig
|
|
39
|
+
from aip_agents.utils.logger import get_logger
|
|
40
|
+
|
|
41
|
+
logger = get_logger(__name__)
|
|
42
|
+
|
|
43
|
+
SERVER_URL = "http://localhost:18999"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def create_local_agent(enable_token_streaming: bool = False) -> LangGraphAgent:
|
|
47
|
+
"""Create a local agent with the same tools as the server.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
enable_token_streaming: Whether to enable token streaming for content_chunk events.
|
|
51
|
+
"""
|
|
52
|
+
llm = ChatOpenAI(model="gpt-4.1-mini", temperature=0, streaming=enable_token_streaming)
|
|
53
|
+
table_tool = TableGeneratorTool()
|
|
54
|
+
mock_retrieval_tool = MockRetrievalTool()
|
|
55
|
+
random_chart_tool = RandomChartTool()
|
|
56
|
+
|
|
57
|
+
visualization_agent = LangGraphAgent(
|
|
58
|
+
name="RandomChartAgent",
|
|
59
|
+
instruction=(
|
|
60
|
+
"You are a visualization specialist. Whenever someone asks for a chart, visualization, "
|
|
61
|
+
"image, or snapshot of insights, you MUST call the random_chart_tool to generate a bar chart artifact. "
|
|
62
|
+
"Always explain what the generated chart represents."
|
|
63
|
+
),
|
|
64
|
+
model=llm,
|
|
65
|
+
tools=[random_chart_tool],
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
agent = LangGraphAgent(
|
|
69
|
+
name="LocalComparisonAgent",
|
|
70
|
+
instruction=(
|
|
71
|
+
"You are a helpful assistant for testing streaming comparison. "
|
|
72
|
+
"When asked for a table, use the table_generator tool. "
|
|
73
|
+
"When asked to search or retrieve, use the mock_retrieval tool. "
|
|
74
|
+
"When asked for customer information, use the get_customer_info tool. "
|
|
75
|
+
"When asked for employee data, use the get_employee_data tool. "
|
|
76
|
+
"When asked for user profile, use the get_user_profile tool. "
|
|
77
|
+
"IMPORTANT: When you receive PII placeholders like <PERSON_1>, pass them WITH the angle brackets <> "
|
|
78
|
+
"to the tools - they are required for the PII system to work correctly. "
|
|
79
|
+
"Always use the tools when relevant to demonstrate artifacts, references, and PII masking."
|
|
80
|
+
),
|
|
81
|
+
model=llm,
|
|
82
|
+
tools=[table_tool, mock_retrieval_tool, get_customer_info, get_employee_data, get_user_profile],
|
|
83
|
+
agents=[visualization_agent],
|
|
84
|
+
enable_a2a_token_streaming=False,
|
|
85
|
+
)
|
|
86
|
+
return agent
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def format_chunk_summary(chunk: dict[str, Any], index: int) -> str:
|
|
90
|
+
"""Format a chunk for display."""
|
|
91
|
+
lines = [f" [{index}] event_type={chunk.get('event_type')}, task_state={chunk.get('task_state')}"]
|
|
92
|
+
|
|
93
|
+
content = chunk.get("content")
|
|
94
|
+
if content:
|
|
95
|
+
preview = content[:80].replace("\n", "\\n")
|
|
96
|
+
if len(content) > 80:
|
|
97
|
+
preview += "..."
|
|
98
|
+
lines.append(f" content: {preview}")
|
|
99
|
+
|
|
100
|
+
if chunk.get("artifacts"):
|
|
101
|
+
for art in chunk["artifacts"]:
|
|
102
|
+
lines.append(f" artifact: {art.get('name', art.get('artifact_name', 'unknown'))}")
|
|
103
|
+
|
|
104
|
+
metadata = chunk.get("metadata", {})
|
|
105
|
+
if metadata.get("references"):
|
|
106
|
+
lines.append(f" references: {len(metadata['references'])} items")
|
|
107
|
+
if metadata.get("tool_info"):
|
|
108
|
+
tool_info = metadata["tool_info"]
|
|
109
|
+
# Handle tool_call events (array of tools) vs tool_result events (single tool)
|
|
110
|
+
if "tool_calls" in tool_info:
|
|
111
|
+
tool_names = [tc.get("name", "?") for tc in tool_info.get("tool_calls", [])]
|
|
112
|
+
lines.append(f" tool_info: [{', '.join(tool_names)}]")
|
|
113
|
+
else:
|
|
114
|
+
tool_name = tool_info.get("name", "unknown")
|
|
115
|
+
lines.append(f" tool_info: {tool_name}")
|
|
116
|
+
|
|
117
|
+
lines.append(f" final={chunk.get('final')}, status={chunk.get('status')}")
|
|
118
|
+
return "\n".join(lines)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
async def run_direct_streaming(
|
|
122
|
+
agent: LangGraphAgent, query: str, pii_mapping: dict[str, str] | None = None
|
|
123
|
+
) -> list[dict[str, Any]]:
|
|
124
|
+
"""Run arun_sse_stream and collect all chunks."""
|
|
125
|
+
print("Running direct SSE streaming (arun_sse_stream)...")
|
|
126
|
+
chunks = []
|
|
127
|
+
|
|
128
|
+
try:
|
|
129
|
+
async for chunk in agent.arun_sse_stream(
|
|
130
|
+
query, task_id="direct-task", context_id="direct-ctx", pii_mapping=pii_mapping
|
|
131
|
+
):
|
|
132
|
+
chunks.append(chunk)
|
|
133
|
+
except Exception as e:
|
|
134
|
+
logger.error(f"Error in direct streaming: {e}")
|
|
135
|
+
chunks.append({"status": "error", "content": str(e), "event_type": "error", "final": True})
|
|
136
|
+
|
|
137
|
+
return chunks
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
async def run_connector_streaming(
|
|
141
|
+
agent: LangGraphAgent, query: str, pii_mapping: dict[str, str] | None = None
|
|
142
|
+
) -> list[dict[str, Any]]:
|
|
143
|
+
"""Run astream_to_agent via A2A connector and collect all chunks."""
|
|
144
|
+
print("Running connector streaming (astream_to_agent)...")
|
|
145
|
+
chunks = []
|
|
146
|
+
|
|
147
|
+
# Discover agents from the server
|
|
148
|
+
client_a2a_config = A2AClientConfig(discovery_urls=[SERVER_URL])
|
|
149
|
+
agent_cards = agent.discover_agents(client_a2a_config)
|
|
150
|
+
|
|
151
|
+
if not agent_cards:
|
|
152
|
+
logger.error("No agents discovered from server!")
|
|
153
|
+
return [{"status": "error", "content": "No agents discovered", "event_type": "error", "final": True}]
|
|
154
|
+
|
|
155
|
+
agent_card = agent_cards[0]
|
|
156
|
+
print(f"Discovered agent: {agent_card.name}")
|
|
157
|
+
|
|
158
|
+
try:
|
|
159
|
+
async for chunk in agent.astream_to_agent(agent_card=agent_card, message=query, pii_mapping=pii_mapping):
|
|
160
|
+
chunks.append(chunk)
|
|
161
|
+
except Exception as e:
|
|
162
|
+
logger.error(f"Error in connector streaming: {e}")
|
|
163
|
+
chunks.append({"status": "error", "content": str(e), "event_type": "error", "final": True})
|
|
164
|
+
|
|
165
|
+
return chunks
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def get_chunk_keys(chunks: list[dict[str, Any]]) -> set[str]:
|
|
169
|
+
"""Get all unique keys across all chunks."""
|
|
170
|
+
keys: set[str] = set()
|
|
171
|
+
for chunk in chunks:
|
|
172
|
+
# Convert keys to str to handle StrEnum keys (they are strings but type shows differently)
|
|
173
|
+
keys.update(str(k) for k in chunk.keys())
|
|
174
|
+
# Also get metadata keys
|
|
175
|
+
if "metadata" in chunk and isinstance(chunk["metadata"], dict):
|
|
176
|
+
keys.update(f"metadata.{str(k)}" for k in chunk["metadata"].keys())
|
|
177
|
+
return keys
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def get_field_types(chunks: list[dict[str, Any]], field: str) -> set[str]:
|
|
181
|
+
"""Get all types seen for a field across chunks.
|
|
182
|
+
|
|
183
|
+
Note: StrEnum values are reported as 'str' since they ARE strings
|
|
184
|
+
and serialize identically over HTTP/SSE.
|
|
185
|
+
"""
|
|
186
|
+
types: set[str] = set()
|
|
187
|
+
for chunk in chunks:
|
|
188
|
+
if "." in field:
|
|
189
|
+
parent, child = field.split(".", 1)
|
|
190
|
+
value = chunk.get(parent, {}).get(child) if isinstance(chunk.get(parent), dict) else None
|
|
191
|
+
else:
|
|
192
|
+
value = chunk.get(field)
|
|
193
|
+
if value is not None:
|
|
194
|
+
# StrEnum IS a string - report as 'str' for comparison purposes
|
|
195
|
+
# since they serialize identically over HTTP/SSE
|
|
196
|
+
if isinstance(value, str):
|
|
197
|
+
types.add("str")
|
|
198
|
+
else:
|
|
199
|
+
types.add(type(value).__name__)
|
|
200
|
+
return types
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def group_chunks_by_event_type(chunks: list[dict[str, Any]]) -> dict[str, list[dict[str, Any]]]:
|
|
204
|
+
"""Group chunks by their event_type."""
|
|
205
|
+
groups: dict[str, list[dict[str, Any]]] = {}
|
|
206
|
+
for chunk in chunks:
|
|
207
|
+
# Ensure the grouping key is always a non-empty string
|
|
208
|
+
raw_type = chunk.get("event_type")
|
|
209
|
+
event_type = str(raw_type) if raw_type else "unknown"
|
|
210
|
+
groups.setdefault(event_type, []).append(chunk)
|
|
211
|
+
return groups
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def _is_empty_payload_chunk(chunk: dict[str, Any]) -> bool:
|
|
215
|
+
return chunk.get("content") in (None, "") and chunk.get("reason") == "empty_payload"
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def _is_base64_payload(value: Any) -> bool:
|
|
219
|
+
if not isinstance(value, str):
|
|
220
|
+
return False
|
|
221
|
+
candidate = value.strip()
|
|
222
|
+
if len(candidate) < 24:
|
|
223
|
+
return False
|
|
224
|
+
if len(candidate) % 4 != 0:
|
|
225
|
+
return False
|
|
226
|
+
try:
|
|
227
|
+
base64.b64decode(candidate, validate=True)
|
|
228
|
+
except Exception:
|
|
229
|
+
return False
|
|
230
|
+
return True
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def _filter_chunks_for_comparison(event_type: str, chunks: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
234
|
+
"""Filter out known non-equivalent chunks so comparisons match semantic parity.
|
|
235
|
+
|
|
236
|
+
- A2A connector can emit placeholder status updates with content=None and reason=empty_payload.
|
|
237
|
+
These should not be compared against the real status_update chunk.
|
|
238
|
+
- A2A connector can emit an artifact payload chunk (base64 content) as a tool_result with
|
|
239
|
+
missing tool_info metadata. This is not equivalent to the tool_result completion message.
|
|
240
|
+
"""
|
|
241
|
+
if event_type == A2AStreamEventType.STATUS_UPDATE.value:
|
|
242
|
+
meaningful = [c for c in chunks if not _is_empty_payload_chunk(c)]
|
|
243
|
+
return meaningful if meaningful else chunks
|
|
244
|
+
|
|
245
|
+
if event_type == A2AStreamEventType.TOOL_RESULT.value:
|
|
246
|
+
filtered: list[dict[str, Any]] = []
|
|
247
|
+
for c in chunks:
|
|
248
|
+
md = c.get("metadata") if isinstance(c.get("metadata"), dict) else {}
|
|
249
|
+
has_tool_info = isinstance(md, dict) and md.get("tool_info") is not None
|
|
250
|
+
if not has_tool_info and c.get("artifacts") and _is_base64_payload(c.get("content")):
|
|
251
|
+
continue
|
|
252
|
+
filtered.append(c)
|
|
253
|
+
return filtered if filtered else chunks
|
|
254
|
+
|
|
255
|
+
return chunks
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def compare_chunk_structure(chunk1: dict[str, Any], chunk2: dict[str, Any]) -> dict[str, Any]:
|
|
259
|
+
"""Compare structure of two chunks and return differences."""
|
|
260
|
+
keys1 = {str(k) for k in chunk1.keys()}
|
|
261
|
+
keys2 = {str(k) for k in chunk2.keys()}
|
|
262
|
+
|
|
263
|
+
# Add metadata keys
|
|
264
|
+
if "metadata" in chunk1 and isinstance(chunk1["metadata"], dict):
|
|
265
|
+
keys1.update(f"metadata.{str(k)}" for k in chunk1["metadata"].keys())
|
|
266
|
+
if "metadata" in chunk2 and isinstance(chunk2["metadata"], dict):
|
|
267
|
+
keys2.update(f"metadata.{str(k)}" for k in chunk2["metadata"].keys())
|
|
268
|
+
|
|
269
|
+
differences = {
|
|
270
|
+
"keys_only_in_first": sorted(keys1 - keys2),
|
|
271
|
+
"keys_only_in_second": sorted(keys2 - keys1),
|
|
272
|
+
"keys_in_both": sorted(keys1 & keys2),
|
|
273
|
+
"type_differences": [],
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
# Check type differences for common keys
|
|
277
|
+
for key in sorted(keys1 & keys2):
|
|
278
|
+
if key.startswith("metadata."):
|
|
279
|
+
parent, child = key.split(".", 1)
|
|
280
|
+
val1 = chunk1.get(parent, {}).get(child)
|
|
281
|
+
val2 = chunk2.get(parent, {}).get(child)
|
|
282
|
+
else:
|
|
283
|
+
val1 = chunk1.get(key)
|
|
284
|
+
val2 = chunk2.get(key)
|
|
285
|
+
|
|
286
|
+
if val1 is not None and val2 is not None:
|
|
287
|
+
type1 = "str" if isinstance(val1, str) else type(val1).__name__
|
|
288
|
+
type2 = "str" if isinstance(val2, str) else type(val2).__name__
|
|
289
|
+
if type1 != type2:
|
|
290
|
+
differences["type_differences"].append(
|
|
291
|
+
{
|
|
292
|
+
"field": key,
|
|
293
|
+
"type1": type1,
|
|
294
|
+
"type2": type2,
|
|
295
|
+
}
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
return differences
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
def _get_chunk_keys_flat(chunk: dict[str, Any]) -> set[str]:
|
|
302
|
+
"""Get all keys from a single chunk, flattening metadata."""
|
|
303
|
+
keys: set[str] = set()
|
|
304
|
+
for k in chunk.keys():
|
|
305
|
+
keys.add(str(k))
|
|
306
|
+
if "metadata" in chunk and isinstance(chunk["metadata"], dict):
|
|
307
|
+
for mk in chunk["metadata"].keys():
|
|
308
|
+
keys.add(f"metadata.{str(mk)}")
|
|
309
|
+
return keys
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
def _get_chunk_value(chunk: dict[str, Any], key: str) -> Any:
|
|
313
|
+
"""Get value for a key, handling metadata.* keys."""
|
|
314
|
+
if key.startswith("metadata."):
|
|
315
|
+
_, child = key.split(".", 1)
|
|
316
|
+
meta = chunk.get("metadata")
|
|
317
|
+
if isinstance(meta, dict):
|
|
318
|
+
return meta.get(child)
|
|
319
|
+
return None
|
|
320
|
+
return chunk.get(key)
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
def _normalize_type(val: Any) -> str:
|
|
324
|
+
"""Get normalized type name. StrEnum and str are treated as 'str'."""
|
|
325
|
+
if val is None:
|
|
326
|
+
return "NoneType"
|
|
327
|
+
if isinstance(val, str):
|
|
328
|
+
return "str"
|
|
329
|
+
return type(val).__name__
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def _compare_common_keys_across_chunks(
|
|
333
|
+
direct_chunks: list[dict[str, Any]],
|
|
334
|
+
connector_chunks: list[dict[str, Any]],
|
|
335
|
+
common_keys: set[str],
|
|
336
|
+
) -> dict[str, Any]:
|
|
337
|
+
"""Compare values for common keys: check None vs non-None and type consistency."""
|
|
338
|
+
value_issues: list[dict[str, Any]] = []
|
|
339
|
+
type_issues: list[dict[str, Any]] = []
|
|
340
|
+
|
|
341
|
+
for key in sorted(common_keys):
|
|
342
|
+
# Collect all values for this key from both sides
|
|
343
|
+
direct_vals = [_get_chunk_value(c, key) for c in direct_chunks]
|
|
344
|
+
connector_vals = [_get_chunk_value(c, key) for c in connector_chunks]
|
|
345
|
+
|
|
346
|
+
direct_has_value = any(v is not None for v in direct_vals)
|
|
347
|
+
connector_has_value = any(v is not None for v in connector_vals)
|
|
348
|
+
|
|
349
|
+
# Check None vs non-None mismatch
|
|
350
|
+
if direct_has_value != connector_has_value:
|
|
351
|
+
direct_example = next((v for v in direct_vals if v is not None), None)
|
|
352
|
+
connector_example = next((v for v in connector_vals if v is not None), None)
|
|
353
|
+
value_issues.append(
|
|
354
|
+
{
|
|
355
|
+
"key": key,
|
|
356
|
+
"direct_has_value": direct_has_value,
|
|
357
|
+
"connector_has_value": connector_has_value,
|
|
358
|
+
"direct_example": str(direct_example)[:150] if direct_example else None,
|
|
359
|
+
"connector_example": str(connector_example)[:150] if connector_example else None,
|
|
360
|
+
}
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
# Check type consistency (only for non-None values)
|
|
364
|
+
direct_types = {_normalize_type(v) for v in direct_vals if v is not None}
|
|
365
|
+
connector_types = {_normalize_type(v) for v in connector_vals if v is not None}
|
|
366
|
+
|
|
367
|
+
if direct_types and connector_types and direct_types != connector_types:
|
|
368
|
+
direct_example = next((v for v in direct_vals if v is not None), None)
|
|
369
|
+
connector_example = next((v for v in connector_vals if v is not None), None)
|
|
370
|
+
type_issues.append(
|
|
371
|
+
{
|
|
372
|
+
"key": key,
|
|
373
|
+
"direct_types": sorted(direct_types),
|
|
374
|
+
"connector_types": sorted(connector_types),
|
|
375
|
+
"direct_example": str(direct_example)[:150] if direct_example else None,
|
|
376
|
+
"connector_example": str(connector_example)[:150] if connector_example else None,
|
|
377
|
+
}
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
return {"value_issues": value_issues, "type_issues": type_issues}
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
def compare_event_type_groups(
|
|
384
|
+
direct_groups: dict[str, list[dict[str, Any]]],
|
|
385
|
+
connector_groups: dict[str, list[dict[str, Any]]],
|
|
386
|
+
) -> dict[str, Any]:
|
|
387
|
+
"""Compare chunks grouped by event type - RAW comparison without filtering."""
|
|
388
|
+
all_event_types = set(direct_groups.keys()) | set(connector_groups.keys())
|
|
389
|
+
|
|
390
|
+
canonical_types = {
|
|
391
|
+
A2AStreamEventType.STATUS_UPDATE.value,
|
|
392
|
+
A2AStreamEventType.CONTENT_CHUNK.value,
|
|
393
|
+
A2AStreamEventType.TOOL_CALL.value,
|
|
394
|
+
A2AStreamEventType.TOOL_RESULT.value,
|
|
395
|
+
A2AStreamEventType.FINAL_RESPONSE.value,
|
|
396
|
+
A2AStreamEventType.ERROR.value,
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
event_comparisons = {}
|
|
400
|
+
for event_type in sorted(all_event_types):
|
|
401
|
+
direct_chunks = direct_groups.get(event_type, [])
|
|
402
|
+
connector_chunks = connector_groups.get(event_type, [])
|
|
403
|
+
|
|
404
|
+
# Collect ALL keys seen across ALL chunks of this event type
|
|
405
|
+
direct_all_keys: set[str] = set()
|
|
406
|
+
connector_all_keys: set[str] = set()
|
|
407
|
+
for c in direct_chunks:
|
|
408
|
+
direct_all_keys.update(_get_chunk_keys_flat(c))
|
|
409
|
+
for c in connector_chunks:
|
|
410
|
+
connector_all_keys.update(_get_chunk_keys_flat(c))
|
|
411
|
+
|
|
412
|
+
keys_only_direct = sorted(direct_all_keys - connector_all_keys)
|
|
413
|
+
keys_only_connector = sorted(connector_all_keys - direct_all_keys)
|
|
414
|
+
common_keys = direct_all_keys & connector_all_keys
|
|
415
|
+
|
|
416
|
+
# For keys that differ, find example chunks and values
|
|
417
|
+
direct_extra_examples: list[dict[str, Any]] = []
|
|
418
|
+
for key in keys_only_direct:
|
|
419
|
+
for idx, c in enumerate(direct_chunks):
|
|
420
|
+
val = _get_chunk_value(c, key)
|
|
421
|
+
if val is not None:
|
|
422
|
+
direct_extra_examples.append(
|
|
423
|
+
{
|
|
424
|
+
"key": key,
|
|
425
|
+
"chunk_index": idx,
|
|
426
|
+
"value": val,
|
|
427
|
+
"chunk_preview": {
|
|
428
|
+
k: (str(v)[:100] if isinstance(v, str | list | dict) else v)
|
|
429
|
+
for k, v in c.items()
|
|
430
|
+
if k != "metadata"
|
|
431
|
+
},
|
|
432
|
+
}
|
|
433
|
+
)
|
|
434
|
+
break
|
|
435
|
+
|
|
436
|
+
connector_extra_examples: list[dict[str, Any]] = []
|
|
437
|
+
for key in keys_only_connector:
|
|
438
|
+
for idx, c in enumerate(connector_chunks):
|
|
439
|
+
val = _get_chunk_value(c, key)
|
|
440
|
+
if val is not None:
|
|
441
|
+
connector_extra_examples.append(
|
|
442
|
+
{
|
|
443
|
+
"key": key,
|
|
444
|
+
"chunk_index": idx,
|
|
445
|
+
"value": val,
|
|
446
|
+
"chunk_preview": {
|
|
447
|
+
k: (str(v)[:100] if isinstance(v, str | list | dict) else v)
|
|
448
|
+
for k, v in c.items()
|
|
449
|
+
if k != "metadata"
|
|
450
|
+
},
|
|
451
|
+
}
|
|
452
|
+
)
|
|
453
|
+
break
|
|
454
|
+
|
|
455
|
+
# Compare common keys for value presence and type consistency
|
|
456
|
+
common_key_comparison = _compare_common_keys_across_chunks(direct_chunks, connector_chunks, common_keys)
|
|
457
|
+
|
|
458
|
+
has_issues = (
|
|
459
|
+
keys_only_direct
|
|
460
|
+
or keys_only_connector
|
|
461
|
+
or common_key_comparison["value_issues"]
|
|
462
|
+
or common_key_comparison["type_issues"]
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
comparison = {
|
|
466
|
+
"direct_count": len(direct_chunks),
|
|
467
|
+
"connector_count": len(connector_chunks),
|
|
468
|
+
"direct_all_keys": sorted(direct_all_keys),
|
|
469
|
+
"connector_all_keys": sorted(connector_all_keys),
|
|
470
|
+
"keys_only_in_direct": keys_only_direct,
|
|
471
|
+
"keys_only_in_connector": keys_only_connector,
|
|
472
|
+
"direct_extra_examples": direct_extra_examples,
|
|
473
|
+
"connector_extra_examples": connector_extra_examples,
|
|
474
|
+
"common_keys": sorted(common_keys),
|
|
475
|
+
"value_issues": common_key_comparison["value_issues"],
|
|
476
|
+
"type_issues": common_key_comparison["type_issues"],
|
|
477
|
+
"structure_match": not has_issues,
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
event_comparisons[event_type] = comparison
|
|
481
|
+
|
|
482
|
+
return {
|
|
483
|
+
"event_comparisons": event_comparisons,
|
|
484
|
+
"canonical_types": sorted(canonical_types),
|
|
485
|
+
"tested_types": sorted(all_event_types),
|
|
486
|
+
"missing_types": sorted(canonical_types - all_event_types),
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
|
|
490
|
+
def compare_chunks(direct_chunks: list[dict[str, Any]], connector_chunks: list[dict[str, Any]]) -> dict[str, Any]:
|
|
491
|
+
"""Compare the two chunk lists and produce a comprehensive structural summary."""
|
|
492
|
+
direct_keys = get_chunk_keys(direct_chunks)
|
|
493
|
+
connector_keys = get_chunk_keys(connector_chunks)
|
|
494
|
+
|
|
495
|
+
# Required fields per SSE chunk schema
|
|
496
|
+
required_fields = {"status", "task_state", "event_type", "final", "metadata"}
|
|
497
|
+
|
|
498
|
+
# Group by event type for detailed comparison
|
|
499
|
+
direct_groups = group_chunks_by_event_type(direct_chunks)
|
|
500
|
+
connector_groups = group_chunks_by_event_type(connector_chunks)
|
|
501
|
+
event_type_comparison = compare_event_type_groups(direct_groups, connector_groups)
|
|
502
|
+
|
|
503
|
+
comparison = {
|
|
504
|
+
# Basic counts
|
|
505
|
+
"direct_count": len(direct_chunks),
|
|
506
|
+
"connector_count": len(connector_chunks),
|
|
507
|
+
# Schema keys
|
|
508
|
+
"direct_keys": sorted(direct_keys),
|
|
509
|
+
"connector_keys": sorted(connector_keys),
|
|
510
|
+
"keys_only_in_direct": sorted(direct_keys - connector_keys),
|
|
511
|
+
"keys_only_in_connector": sorted(connector_keys - direct_keys),
|
|
512
|
+
"keys_in_both": sorted(direct_keys & connector_keys),
|
|
513
|
+
# Required fields check
|
|
514
|
+
"direct_has_required_fields": required_fields.issubset(direct_keys),
|
|
515
|
+
"connector_has_required_fields": required_fields.issubset(connector_keys),
|
|
516
|
+
# Event types
|
|
517
|
+
"direct_event_types": [c.get("event_type") for c in direct_chunks],
|
|
518
|
+
"connector_event_types": [c.get("event_type") for c in connector_chunks],
|
|
519
|
+
# Type consistency
|
|
520
|
+
"direct_all_event_types_string": all(
|
|
521
|
+
isinstance(c.get("event_type"), str) for c in direct_chunks if c.get("event_type")
|
|
522
|
+
),
|
|
523
|
+
"connector_all_event_types_string": all(
|
|
524
|
+
isinstance(c.get("event_type"), str) for c in connector_chunks if c.get("event_type")
|
|
525
|
+
),
|
|
526
|
+
# Artifacts and references
|
|
527
|
+
"direct_has_artifacts": any(c.get("artifacts") for c in direct_chunks),
|
|
528
|
+
"connector_has_artifacts": any(c.get("artifacts") for c in connector_chunks),
|
|
529
|
+
"direct_has_references": any(c.get("metadata", {}).get("references") for c in direct_chunks),
|
|
530
|
+
"connector_has_references": any(c.get("metadata", {}).get("references") for c in connector_chunks),
|
|
531
|
+
# Final chunk status
|
|
532
|
+
"direct_final_status": direct_chunks[-1].get("status") if direct_chunks else None,
|
|
533
|
+
"connector_final_status": connector_chunks[-1].get("status") if connector_chunks else None,
|
|
534
|
+
"direct_final_task_state": direct_chunks[-1].get("task_state") if direct_chunks else None,
|
|
535
|
+
"connector_final_task_state": connector_chunks[-1].get("task_state") if connector_chunks else None,
|
|
536
|
+
# Per-event-type comparison
|
|
537
|
+
"event_type_comparison": event_type_comparison,
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
# Field type comparison for common keys
|
|
541
|
+
common_keys = direct_keys & connector_keys
|
|
542
|
+
type_mismatches = []
|
|
543
|
+
for key in sorted(common_keys):
|
|
544
|
+
if key.startswith("metadata."):
|
|
545
|
+
continue # Skip nested for now
|
|
546
|
+
direct_types = get_field_types(direct_chunks, key)
|
|
547
|
+
connector_types = get_field_types(connector_chunks, key)
|
|
548
|
+
if direct_types != connector_types and direct_types and connector_types:
|
|
549
|
+
type_mismatches.append(
|
|
550
|
+
{
|
|
551
|
+
"field": key,
|
|
552
|
+
"direct_types": sorted(direct_types),
|
|
553
|
+
"connector_types": sorted(connector_types),
|
|
554
|
+
}
|
|
555
|
+
)
|
|
556
|
+
comparison["type_mismatches"] = type_mismatches
|
|
557
|
+
|
|
558
|
+
return comparison
|
|
559
|
+
|
|
560
|
+
|
|
561
|
+
def print_event_type_comparison(event_comparison: dict[str, Any]) -> None:
|
|
562
|
+
"""Print per-event-type comparison details with exact key/value examples."""
|
|
563
|
+
print("\n--- PER-EVENT-TYPE COMPARISON (RAW, NO FILTERING) ---")
|
|
564
|
+
|
|
565
|
+
canonical = event_comparison["canonical_types"]
|
|
566
|
+
tested = event_comparison["tested_types"]
|
|
567
|
+
missing = event_comparison["missing_types"]
|
|
568
|
+
|
|
569
|
+
print(f" Canonical event types: {canonical}")
|
|
570
|
+
print(f" Tested event types: {tested}")
|
|
571
|
+
if missing:
|
|
572
|
+
print(f" ⚠️ Missing event types (not tested): {missing}")
|
|
573
|
+
else:
|
|
574
|
+
print(" ✓ All canonical event types tested")
|
|
575
|
+
|
|
576
|
+
print("\n Per-event breakdown:")
|
|
577
|
+
for event_type, comp in sorted(event_comparison["event_comparisons"].items()):
|
|
578
|
+
direct_count = comp["direct_count"]
|
|
579
|
+
connector_count = comp["connector_count"]
|
|
580
|
+
structure_match = comp["structure_match"]
|
|
581
|
+
|
|
582
|
+
status_icon = "✓" if structure_match else "⚠️"
|
|
583
|
+
print(f"\n {status_icon} {event_type}: [direct={direct_count} chunks, connector={connector_count} chunks]")
|
|
584
|
+
print(f" Direct keys: {comp['direct_all_keys']}")
|
|
585
|
+
print(f" Connector keys: {comp['connector_all_keys']}")
|
|
586
|
+
|
|
587
|
+
if comp["keys_only_in_direct"]:
|
|
588
|
+
print(f" ❌ Keys ONLY in direct: {comp['keys_only_in_direct']}")
|
|
589
|
+
for ex in comp.get("direct_extra_examples", []):
|
|
590
|
+
val_preview = str(ex["value"])[:200]
|
|
591
|
+
print(f" └─ key='{ex['key']}' in chunk[{ex['chunk_index']}]")
|
|
592
|
+
print(f" value: {val_preview}")
|
|
593
|
+
print(f" chunk: {ex['chunk_preview']}")
|
|
594
|
+
|
|
595
|
+
if comp["keys_only_in_connector"]:
|
|
596
|
+
print(f" ❌ Keys ONLY in connector: {comp['keys_only_in_connector']}")
|
|
597
|
+
for ex in comp.get("connector_extra_examples", []):
|
|
598
|
+
val_preview = str(ex["value"])[:200]
|
|
599
|
+
print(f" └─ key='{ex['key']}' in chunk[{ex['chunk_index']}]")
|
|
600
|
+
print(f" value: {val_preview}")
|
|
601
|
+
print(f" chunk: {ex['chunk_preview']}")
|
|
602
|
+
|
|
603
|
+
# Show value issues (one has value, other is None)
|
|
604
|
+
if comp.get("value_issues"):
|
|
605
|
+
print(" ❌ Value presence mismatch (None vs non-None):")
|
|
606
|
+
for issue in comp["value_issues"]:
|
|
607
|
+
print(f" └─ key='{issue['key']}'")
|
|
608
|
+
print(f" direct has value: {issue['direct_has_value']} -> {issue['direct_example']}")
|
|
609
|
+
print(
|
|
610
|
+
f" connector has value: {issue['connector_has_value']} -> {issue['connector_example']}"
|
|
611
|
+
)
|
|
612
|
+
|
|
613
|
+
# Show type issues
|
|
614
|
+
if comp.get("type_issues"):
|
|
615
|
+
print(" ❌ Type mismatch (str/StrEnum treated same):")
|
|
616
|
+
for issue in comp["type_issues"]:
|
|
617
|
+
print(f" └─ key='{issue['key']}'")
|
|
618
|
+
print(f" direct types: {issue['direct_types']} -> {issue['direct_example']}")
|
|
619
|
+
print(f" connector types: {issue['connector_types']} -> {issue['connector_example']}")
|
|
620
|
+
|
|
621
|
+
if structure_match:
|
|
622
|
+
print(" ✓ Keys, values, and types match between direct and connector")
|
|
623
|
+
|
|
624
|
+
|
|
625
|
+
def print_structure_comparison(comparison: dict[str, Any]) -> None:
|
|
626
|
+
"""Print detailed structure comparison."""
|
|
627
|
+
print("\n--- SCHEMA STRUCTURE ---")
|
|
628
|
+
print(f" Direct keys: {comparison['direct_keys']}")
|
|
629
|
+
print(f" Connector keys: {comparison['connector_keys']}")
|
|
630
|
+
|
|
631
|
+
if comparison["keys_only_in_direct"]:
|
|
632
|
+
print(f" ⚠️ Keys ONLY in direct: {comparison['keys_only_in_direct']}")
|
|
633
|
+
if comparison["keys_only_in_connector"]:
|
|
634
|
+
print(f" ⚠️ Keys ONLY in connector: {comparison['keys_only_in_connector']}")
|
|
635
|
+
|
|
636
|
+
print("\n--- REQUIRED FIELDS CHECK ---")
|
|
637
|
+
print(
|
|
638
|
+
f" Direct has required fields (status, task_state, event_type, final, metadata): {comparison['direct_has_required_fields']}"
|
|
639
|
+
)
|
|
640
|
+
print(f" Connector has required fields: {comparison['connector_has_required_fields']}")
|
|
641
|
+
|
|
642
|
+
print("\n--- TYPE CONSISTENCY ---")
|
|
643
|
+
print(f" Direct event_type always string: {comparison['direct_all_event_types_string']}")
|
|
644
|
+
print(f" Connector event_type always string: {comparison['connector_all_event_types_string']}")
|
|
645
|
+
|
|
646
|
+
if comparison["type_mismatches"]:
|
|
647
|
+
print("\n ⚠️ Type mismatches found:")
|
|
648
|
+
for mismatch in comparison["type_mismatches"]:
|
|
649
|
+
print(
|
|
650
|
+
f" {mismatch['field']}: direct={mismatch['direct_types']}, connector={mismatch['connector_types']}"
|
|
651
|
+
)
|
|
652
|
+
else:
|
|
653
|
+
print(" ✓ No type mismatches in common fields")
|
|
654
|
+
|
|
655
|
+
# Per-event-type comparison
|
|
656
|
+
if "event_type_comparison" in comparison:
|
|
657
|
+
print_event_type_comparison(comparison["event_type_comparison"])
|
|
658
|
+
|
|
659
|
+
print("\n--- CONTENT COMPARISON ---")
|
|
660
|
+
print(f" Direct chunk count: {comparison['direct_count']}")
|
|
661
|
+
print(f" Connector chunk count: {comparison['connector_count']}")
|
|
662
|
+
print(f" Direct has artifacts: {comparison['direct_has_artifacts']}")
|
|
663
|
+
print(f" Connector has artifacts: {comparison['connector_has_artifacts']}")
|
|
664
|
+
print(f" Direct has references: {comparison['direct_has_references']}")
|
|
665
|
+
print(f" Connector has references: {comparison['connector_has_references']}")
|
|
666
|
+
|
|
667
|
+
print("\n--- FINAL CHUNK ---")
|
|
668
|
+
print(
|
|
669
|
+
f" Direct final: status={comparison['direct_final_status']}, task_state={comparison['direct_final_task_state']}"
|
|
670
|
+
)
|
|
671
|
+
print(
|
|
672
|
+
f" Connector final: status={comparison['connector_final_status']}, task_state={comparison['connector_final_task_state']}"
|
|
673
|
+
)
|
|
674
|
+
|
|
675
|
+
# Overall parity verdict
|
|
676
|
+
print("\n--- PARITY VERDICT ---")
|
|
677
|
+
schema_match = not comparison["keys_only_in_direct"] and not comparison["keys_only_in_connector"]
|
|
678
|
+
type_match = not comparison["type_mismatches"]
|
|
679
|
+
required_match = comparison["direct_has_required_fields"] and comparison["connector_has_required_fields"]
|
|
680
|
+
event_type_match = comparison["direct_all_event_types_string"] and comparison["connector_all_event_types_string"]
|
|
681
|
+
|
|
682
|
+
# Check per-event-type parity
|
|
683
|
+
event_parity = True
|
|
684
|
+
if "event_type_comparison" in comparison:
|
|
685
|
+
event_comp = comparison["event_type_comparison"]
|
|
686
|
+
for et_comp in event_comp["event_comparisons"].values():
|
|
687
|
+
if not et_comp["structure_match"]:
|
|
688
|
+
event_parity = False
|
|
689
|
+
break
|
|
690
|
+
|
|
691
|
+
if schema_match and type_match and required_match and event_type_match and event_parity:
|
|
692
|
+
print(" ✓ PARITY ACHIEVED: Direct SSE stream output matches connector output structure!")
|
|
693
|
+
else:
|
|
694
|
+
print(" ⚠️ PARITY ISSUES DETECTED:")
|
|
695
|
+
if not schema_match:
|
|
696
|
+
print(" - Schema keys differ")
|
|
697
|
+
if not type_match:
|
|
698
|
+
print(" - Field types differ")
|
|
699
|
+
if not required_match:
|
|
700
|
+
print(" - Missing required fields")
|
|
701
|
+
if not event_type_match:
|
|
702
|
+
print(" - event_type not consistently string")
|
|
703
|
+
if not event_parity:
|
|
704
|
+
print(" - Per-event-type structure differs")
|
|
705
|
+
|
|
706
|
+
|
|
707
|
+
async def main():
|
|
708
|
+
"""Main comparison workflow."""
|
|
709
|
+
print("=" * 80)
|
|
710
|
+
print("DIRECT vs CONNECTOR STREAMING COMPARISON")
|
|
711
|
+
print("=" * 80)
|
|
712
|
+
|
|
713
|
+
# Query that exercises table, retrieval, visualization (via sub-agent), and PII tool
|
|
714
|
+
# NOTE: The <> angle brackets in <PERSON_1> are required - they're part of the PII tag format
|
|
715
|
+
# that the system recognizes for replacement. Without them, "PERSON_1" won't be detected.
|
|
716
|
+
query = (
|
|
717
|
+
"Generate a small table with 2 rows, search for 'test data', call the RandomChartAgent to produce a "
|
|
718
|
+
"random visualization image, and can you get me information about customers <PERSON_1> and <PERSON_2>?"
|
|
719
|
+
)
|
|
720
|
+
# The pii_mapping keys MUST include the <> brackets to match the tags in the query
|
|
721
|
+
pii_mapping_original = {"<PERSON_1>": "C001", "<PERSON_2>": "C002"}
|
|
722
|
+
|
|
723
|
+
# Create local agent (token streaming disabled for now)
|
|
724
|
+
print("\n[1] Creating local agent with artifact and reference tools...")
|
|
725
|
+
agent = create_local_agent(enable_token_streaming=False)
|
|
726
|
+
print(f" Agent: {agent.name}")
|
|
727
|
+
print(
|
|
728
|
+
" Tools: table_generator, mock_retrieval, customer_info, employee_data, user_profile (PII), random_chart (sub-agent)"
|
|
729
|
+
)
|
|
730
|
+
print(" Token streaming: disabled (will compare later)")
|
|
731
|
+
# Run direct streaming (local agent, no A2A)
|
|
732
|
+
print("\n[2] Running DIRECT streaming (arun_sse_stream on local agent)...")
|
|
733
|
+
print(f" Query: {query}")
|
|
734
|
+
pii_mapping_direct = deepcopy(pii_mapping_original)
|
|
735
|
+
direct_chunks = await run_direct_streaming(agent, query, pii_mapping=pii_mapping_direct)
|
|
736
|
+
print(f" Collected {len(direct_chunks)} chunks")
|
|
737
|
+
|
|
738
|
+
# Run connector streaming (via A2A to server)
|
|
739
|
+
print(f"\n[3] Running CONNECTOR streaming (astream_to_agent to {SERVER_URL})...")
|
|
740
|
+
print(f" Query: {query}")
|
|
741
|
+
pii_mapping_connector = deepcopy(pii_mapping_original)
|
|
742
|
+
connector_chunks = await run_connector_streaming(agent, query, pii_mapping=pii_mapping_connector)
|
|
743
|
+
print(f" Collected {len(connector_chunks)} chunks")
|
|
744
|
+
|
|
745
|
+
# Display results
|
|
746
|
+
print("\n" + "=" * 80)
|
|
747
|
+
print("DIRECT STREAMING CHUNKS (arun_sse_stream)")
|
|
748
|
+
print("=" * 80)
|
|
749
|
+
for i, chunk in enumerate(direct_chunks):
|
|
750
|
+
print(format_chunk_summary(chunk, i))
|
|
751
|
+
|
|
752
|
+
print("\n" + "=" * 80)
|
|
753
|
+
print("CONNECTOR STREAMING CHUNKS (astream_to_agent)")
|
|
754
|
+
print("=" * 80)
|
|
755
|
+
for i, chunk in enumerate(connector_chunks):
|
|
756
|
+
print(format_chunk_summary(chunk, i))
|
|
757
|
+
|
|
758
|
+
# Comprehensive structure comparison
|
|
759
|
+
print("\n" + "=" * 80)
|
|
760
|
+
print("STRUCTURE COMPARISON (SSE Stream vs A2A Connector)")
|
|
761
|
+
print("=" * 80)
|
|
762
|
+
comparison = compare_chunks(direct_chunks, connector_chunks)
|
|
763
|
+
print_structure_comparison(comparison)
|
|
764
|
+
|
|
765
|
+
# Save full output for inspection
|
|
766
|
+
output_file = "streaming_comparison_output.json"
|
|
767
|
+
output_data = {
|
|
768
|
+
"query": query,
|
|
769
|
+
"direct_chunks": direct_chunks,
|
|
770
|
+
"connector_chunks": connector_chunks,
|
|
771
|
+
"comparison": comparison,
|
|
772
|
+
}
|
|
773
|
+
with open(output_file, "w") as f:
|
|
774
|
+
json.dump(output_data, f, indent=2, default=str)
|
|
775
|
+
print(f"\n Full output saved to: {output_file}")
|
|
776
|
+
|
|
777
|
+
print("\n" + "=" * 80)
|
|
778
|
+
print("DONE")
|
|
779
|
+
print("=" * 80)
|
|
780
|
+
|
|
781
|
+
|
|
782
|
+
if __name__ == "__main__":
|
|
783
|
+
asyncio.run(main())
|