aip-agents-binary 0.5.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aip_agents/__init__.py +65 -0
- aip_agents/a2a/__init__.py +19 -0
- aip_agents/a2a/server/__init__.py +10 -0
- aip_agents/a2a/server/base_executor.py +1086 -0
- aip_agents/a2a/server/google_adk_executor.py +198 -0
- aip_agents/a2a/server/langflow_executor.py +180 -0
- aip_agents/a2a/server/langgraph_executor.py +270 -0
- aip_agents/a2a/types.py +232 -0
- aip_agents/agent/__init__.py +27 -0
- aip_agents/agent/base_agent.py +970 -0
- aip_agents/agent/base_langgraph_agent.py +2942 -0
- aip_agents/agent/google_adk_agent.py +926 -0
- aip_agents/agent/google_adk_constants.py +6 -0
- aip_agents/agent/hitl/__init__.py +24 -0
- aip_agents/agent/hitl/config.py +28 -0
- aip_agents/agent/hitl/langgraph_hitl_mixin.py +515 -0
- aip_agents/agent/hitl/manager.py +532 -0
- aip_agents/agent/hitl/models.py +18 -0
- aip_agents/agent/hitl/prompt/__init__.py +9 -0
- aip_agents/agent/hitl/prompt/base.py +42 -0
- aip_agents/agent/hitl/prompt/deferred.py +73 -0
- aip_agents/agent/hitl/registry.py +149 -0
- aip_agents/agent/interface.py +138 -0
- aip_agents/agent/interfaces.py +65 -0
- aip_agents/agent/langflow_agent.py +464 -0
- aip_agents/agent/langgraph_memory_enhancer_agent.py +433 -0
- aip_agents/agent/langgraph_react_agent.py +2514 -0
- aip_agents/agent/system_instruction_context.py +34 -0
- aip_agents/clients/__init__.py +10 -0
- aip_agents/clients/langflow/__init__.py +10 -0
- aip_agents/clients/langflow/client.py +477 -0
- aip_agents/clients/langflow/types.py +18 -0
- aip_agents/constants.py +23 -0
- aip_agents/credentials/manager.py +132 -0
- aip_agents/examples/__init__.py +5 -0
- aip_agents/examples/compare_streaming_client.py +783 -0
- aip_agents/examples/compare_streaming_server.py +142 -0
- aip_agents/examples/demo_memory_recall.py +401 -0
- aip_agents/examples/hello_world_a2a_google_adk_client.py +49 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_agent.py +48 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_streaming.py +60 -0
- aip_agents/examples/hello_world_a2a_google_adk_server.py +79 -0
- aip_agents/examples/hello_world_a2a_langchain_client.py +39 -0
- aip_agents/examples/hello_world_a2a_langchain_client_agent.py +39 -0
- aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.py +37 -0
- aip_agents/examples/hello_world_a2a_langchain_client_streaming.py +41 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.py +60 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_server.py +105 -0
- aip_agents/examples/hello_world_a2a_langchain_server.py +79 -0
- aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.py +78 -0
- aip_agents/examples/hello_world_a2a_langflow_client.py +83 -0
- aip_agents/examples/hello_world_a2a_langflow_server.py +82 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client.py +73 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.py +76 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_server.py +92 -0
- aip_agents/examples/hello_world_a2a_langgraph_client.py +54 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent.py +54 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.py +32 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming.py +50 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.py +44 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.py +92 -0
- aip_agents/examples/hello_world_a2a_langgraph_server.py +84 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.py +79 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.py +132 -0
- aip_agents/examples/hello_world_a2a_mcp_langgraph.py +196 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.py +244 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.py +251 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.py +57 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.py +80 -0
- aip_agents/examples/hello_world_google_adk.py +41 -0
- aip_agents/examples/hello_world_google_adk_mcp_http.py +34 -0
- aip_agents/examples/hello_world_google_adk_mcp_http_stream.py +40 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse.py +44 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse_stream.py +48 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio.py +44 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.py +48 -0
- aip_agents/examples/hello_world_google_adk_stream.py +44 -0
- aip_agents/examples/hello_world_langchain.py +28 -0
- aip_agents/examples/hello_world_langchain_lm_invoker.py +15 -0
- aip_agents/examples/hello_world_langchain_mcp_http.py +34 -0
- aip_agents/examples/hello_world_langchain_mcp_http_interactive.py +130 -0
- aip_agents/examples/hello_world_langchain_mcp_http_stream.py +42 -0
- aip_agents/examples/hello_world_langchain_mcp_multi_server.py +155 -0
- aip_agents/examples/hello_world_langchain_mcp_sse.py +34 -0
- aip_agents/examples/hello_world_langchain_mcp_sse_stream.py +40 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio.py +30 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio_stream.py +41 -0
- aip_agents/examples/hello_world_langchain_stream.py +36 -0
- aip_agents/examples/hello_world_langchain_stream_lm_invoker.py +39 -0
- aip_agents/examples/hello_world_langflow_agent.py +163 -0
- aip_agents/examples/hello_world_langgraph.py +39 -0
- aip_agents/examples/hello_world_langgraph_bosa_twitter.py +41 -0
- aip_agents/examples/hello_world_langgraph_mcp_http.py +31 -0
- aip_agents/examples/hello_world_langgraph_mcp_http_stream.py +34 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse.py +35 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse_stream.py +50 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio.py +35 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.py +50 -0
- aip_agents/examples/hello_world_langgraph_stream.py +43 -0
- aip_agents/examples/hello_world_langgraph_stream_lm_invoker.py +37 -0
- aip_agents/examples/hello_world_model_switch_cli.py +210 -0
- aip_agents/examples/hello_world_multi_agent_adk.py +75 -0
- aip_agents/examples/hello_world_multi_agent_langchain.py +54 -0
- aip_agents/examples/hello_world_multi_agent_langgraph.py +66 -0
- aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.py +69 -0
- aip_agents/examples/hello_world_pii_logger.py +21 -0
- aip_agents/examples/hello_world_sentry.py +133 -0
- aip_agents/examples/hello_world_step_limits.py +273 -0
- aip_agents/examples/hello_world_stock_a2a_server.py +103 -0
- aip_agents/examples/hello_world_tool_output_client.py +46 -0
- aip_agents/examples/hello_world_tool_output_server.py +114 -0
- aip_agents/examples/hitl_demo.py +724 -0
- aip_agents/examples/mcp_configs/configs.py +63 -0
- aip_agents/examples/mcp_servers/common.py +76 -0
- aip_agents/examples/mcp_servers/mcp_name.py +29 -0
- aip_agents/examples/mcp_servers/mcp_server_http.py +19 -0
- aip_agents/examples/mcp_servers/mcp_server_sse.py +19 -0
- aip_agents/examples/mcp_servers/mcp_server_stdio.py +19 -0
- aip_agents/examples/mcp_servers/mcp_time.py +10 -0
- aip_agents/examples/pii_demo_langgraph_client.py +69 -0
- aip_agents/examples/pii_demo_langgraph_server.py +126 -0
- aip_agents/examples/pii_demo_multi_agent_client.py +80 -0
- aip_agents/examples/pii_demo_multi_agent_server.py +247 -0
- aip_agents/examples/todolist_planning_a2a_langchain_client.py +70 -0
- aip_agents/examples/todolist_planning_a2a_langgraph_server.py +88 -0
- aip_agents/examples/tools/__init__.py +27 -0
- aip_agents/examples/tools/adk_arithmetic_tools.py +36 -0
- aip_agents/examples/tools/adk_weather_tool.py +60 -0
- aip_agents/examples/tools/data_generator_tool.py +103 -0
- aip_agents/examples/tools/data_visualization_tool.py +312 -0
- aip_agents/examples/tools/image_artifact_tool.py +136 -0
- aip_agents/examples/tools/langchain_arithmetic_tools.py +26 -0
- aip_agents/examples/tools/langchain_currency_exchange_tool.py +88 -0
- aip_agents/examples/tools/langchain_graph_artifact_tool.py +172 -0
- aip_agents/examples/tools/langchain_weather_tool.py +48 -0
- aip_agents/examples/tools/langgraph_streaming_tool.py +130 -0
- aip_agents/examples/tools/mock_retrieval_tool.py +56 -0
- aip_agents/examples/tools/pii_demo_tools.py +189 -0
- aip_agents/examples/tools/random_chart_tool.py +142 -0
- aip_agents/examples/tools/serper_tool.py +202 -0
- aip_agents/examples/tools/stock_tools.py +82 -0
- aip_agents/examples/tools/table_generator_tool.py +167 -0
- aip_agents/examples/tools/time_tool.py +82 -0
- aip_agents/examples/tools/weather_forecast_tool.py +38 -0
- aip_agents/executor/agent_executor.py +473 -0
- aip_agents/executor/base.py +48 -0
- aip_agents/mcp/__init__.py +1 -0
- aip_agents/mcp/client/__init__.py +14 -0
- aip_agents/mcp/client/base_mcp_client.py +369 -0
- aip_agents/mcp/client/connection_manager.py +193 -0
- aip_agents/mcp/client/google_adk/__init__.py +11 -0
- aip_agents/mcp/client/google_adk/client.py +381 -0
- aip_agents/mcp/client/langchain/__init__.py +11 -0
- aip_agents/mcp/client/langchain/client.py +265 -0
- aip_agents/mcp/client/persistent_session.py +359 -0
- aip_agents/mcp/client/session_pool.py +351 -0
- aip_agents/mcp/client/transports.py +215 -0
- aip_agents/mcp/utils/__init__.py +7 -0
- aip_agents/mcp/utils/config_validator.py +139 -0
- aip_agents/memory/__init__.py +14 -0
- aip_agents/memory/adapters/__init__.py +10 -0
- aip_agents/memory/adapters/base_adapter.py +717 -0
- aip_agents/memory/adapters/mem0.py +84 -0
- aip_agents/memory/base.py +84 -0
- aip_agents/memory/constants.py +49 -0
- aip_agents/memory/factory.py +86 -0
- aip_agents/memory/guidance.py +20 -0
- aip_agents/memory/simple_memory.py +47 -0
- aip_agents/middleware/__init__.py +17 -0
- aip_agents/middleware/base.py +88 -0
- aip_agents/middleware/manager.py +128 -0
- aip_agents/middleware/todolist.py +274 -0
- aip_agents/schema/__init__.py +69 -0
- aip_agents/schema/a2a.py +56 -0
- aip_agents/schema/agent.py +111 -0
- aip_agents/schema/hitl.py +157 -0
- aip_agents/schema/langgraph.py +37 -0
- aip_agents/schema/model_id.py +97 -0
- aip_agents/schema/step_limit.py +108 -0
- aip_agents/schema/storage.py +40 -0
- aip_agents/sentry/__init__.py +11 -0
- aip_agents/sentry/sentry.py +151 -0
- aip_agents/storage/__init__.py +41 -0
- aip_agents/storage/base.py +85 -0
- aip_agents/storage/clients/__init__.py +12 -0
- aip_agents/storage/clients/minio_client.py +318 -0
- aip_agents/storage/config.py +62 -0
- aip_agents/storage/providers/__init__.py +15 -0
- aip_agents/storage/providers/base.py +106 -0
- aip_agents/storage/providers/memory.py +114 -0
- aip_agents/storage/providers/object_storage.py +214 -0
- aip_agents/tools/__init__.py +33 -0
- aip_agents/tools/bosa_tools.py +105 -0
- aip_agents/tools/browser_use/__init__.py +82 -0
- aip_agents/tools/browser_use/action_parser.py +103 -0
- aip_agents/tools/browser_use/browser_use_tool.py +1112 -0
- aip_agents/tools/browser_use/llm_config.py +120 -0
- aip_agents/tools/browser_use/minio_storage.py +198 -0
- aip_agents/tools/browser_use/schemas.py +119 -0
- aip_agents/tools/browser_use/session.py +76 -0
- aip_agents/tools/browser_use/session_errors.py +132 -0
- aip_agents/tools/browser_use/steel_session_recording.py +317 -0
- aip_agents/tools/browser_use/streaming.py +813 -0
- aip_agents/tools/browser_use/structured_data_parser.py +257 -0
- aip_agents/tools/browser_use/structured_data_recovery.py +204 -0
- aip_agents/tools/browser_use/types.py +78 -0
- aip_agents/tools/code_sandbox/__init__.py +26 -0
- aip_agents/tools/code_sandbox/constant.py +13 -0
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.py +257 -0
- aip_agents/tools/code_sandbox/e2b_sandbox_tool.py +411 -0
- aip_agents/tools/constants.py +165 -0
- aip_agents/tools/document_loader/__init__.py +44 -0
- aip_agents/tools/document_loader/base_reader.py +302 -0
- aip_agents/tools/document_loader/docx_reader_tool.py +68 -0
- aip_agents/tools/document_loader/excel_reader_tool.py +171 -0
- aip_agents/tools/document_loader/pdf_reader_tool.py +79 -0
- aip_agents/tools/document_loader/pdf_splitter.py +169 -0
- aip_agents/tools/gl_connector/__init__.py +5 -0
- aip_agents/tools/gl_connector/tool.py +351 -0
- aip_agents/tools/memory_search/__init__.py +22 -0
- aip_agents/tools/memory_search/base.py +200 -0
- aip_agents/tools/memory_search/mem0.py +258 -0
- aip_agents/tools/memory_search/schema.py +48 -0
- aip_agents/tools/memory_search_tool.py +26 -0
- aip_agents/tools/time_tool.py +117 -0
- aip_agents/tools/tool_config_injector.py +300 -0
- aip_agents/tools/web_search/__init__.py +15 -0
- aip_agents/tools/web_search/serper_tool.py +187 -0
- aip_agents/types/__init__.py +70 -0
- aip_agents/types/a2a_events.py +13 -0
- aip_agents/utils/__init__.py +79 -0
- aip_agents/utils/a2a_connector.py +1757 -0
- aip_agents/utils/artifact_helpers.py +502 -0
- aip_agents/utils/constants.py +22 -0
- aip_agents/utils/datetime/__init__.py +34 -0
- aip_agents/utils/datetime/normalization.py +231 -0
- aip_agents/utils/datetime/timezone.py +206 -0
- aip_agents/utils/env_loader.py +27 -0
- aip_agents/utils/event_handler_registry.py +58 -0
- aip_agents/utils/file_prompt_utils.py +176 -0
- aip_agents/utils/final_response_builder.py +211 -0
- aip_agents/utils/formatter_llm_client.py +231 -0
- aip_agents/utils/langgraph/__init__.py +19 -0
- aip_agents/utils/langgraph/converter.py +128 -0
- aip_agents/utils/langgraph/tool_managers/__init__.py +15 -0
- aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.py +99 -0
- aip_agents/utils/langgraph/tool_managers/base_tool_manager.py +66 -0
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +1071 -0
- aip_agents/utils/langgraph/tool_output_management.py +967 -0
- aip_agents/utils/logger.py +195 -0
- aip_agents/utils/metadata/__init__.py +27 -0
- aip_agents/utils/metadata/activity_metadata_helper.py +407 -0
- aip_agents/utils/metadata/activity_narrative/__init__.py +35 -0
- aip_agents/utils/metadata/activity_narrative/builder.py +817 -0
- aip_agents/utils/metadata/activity_narrative/constants.py +51 -0
- aip_agents/utils/metadata/activity_narrative/context.py +49 -0
- aip_agents/utils/metadata/activity_narrative/formatters.py +230 -0
- aip_agents/utils/metadata/activity_narrative/utils.py +35 -0
- aip_agents/utils/metadata/schemas/__init__.py +16 -0
- aip_agents/utils/metadata/schemas/activity_schema.py +29 -0
- aip_agents/utils/metadata/schemas/thinking_schema.py +31 -0
- aip_agents/utils/metadata/thinking_metadata_helper.py +38 -0
- aip_agents/utils/metadata_helper.py +358 -0
- aip_agents/utils/name_preprocessor/__init__.py +17 -0
- aip_agents/utils/name_preprocessor/base_name_preprocessor.py +73 -0
- aip_agents/utils/name_preprocessor/google_name_preprocessor.py +100 -0
- aip_agents/utils/name_preprocessor/name_preprocessor.py +87 -0
- aip_agents/utils/name_preprocessor/openai_name_preprocessor.py +48 -0
- aip_agents/utils/pii/__init__.py +25 -0
- aip_agents/utils/pii/pii_handler.py +397 -0
- aip_agents/utils/pii/pii_helper.py +207 -0
- aip_agents/utils/pii/uuid_deanonymizer_mapping.py +195 -0
- aip_agents/utils/reference_helper.py +273 -0
- aip_agents/utils/sse_chunk_transformer.py +831 -0
- aip_agents/utils/step_limit_manager.py +265 -0
- aip_agents/utils/token_usage_helper.py +156 -0
- aip_agents_binary-0.5.20.dist-info/METADATA +681 -0
- aip_agents_binary-0.5.20.dist-info/RECORD +280 -0
- aip_agents_binary-0.5.20.dist-info/WHEEL +5 -0
- aip_agents_binary-0.5.20.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,831 @@
|
|
|
1
|
+
"""SSE Chunk Transformer for converting A2AEvent stream to SSE-compatible output.
|
|
2
|
+
|
|
3
|
+
This module provides the SSEChunkTransformer class that transforms A2AEvent objects
|
|
4
|
+
into SSE-compatible chunks, enabling direct streaming without A2A server overhead.
|
|
5
|
+
|
|
6
|
+
The transformer consolidates normalization logic shared with A2AConnector and provides
|
|
7
|
+
both static utilities and instance methods for stream transformation.
|
|
8
|
+
|
|
9
|
+
Authors:
|
|
10
|
+
AI Agent Platform Team
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"SSEChunkTransformer",
|
|
17
|
+
"TaskState",
|
|
18
|
+
"ChunkStatus",
|
|
19
|
+
"ChunkReason",
|
|
20
|
+
"ChunkFieldKeys",
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
import hashlib
|
|
24
|
+
import time
|
|
25
|
+
import uuid
|
|
26
|
+
from collections.abc import AsyncGenerator
|
|
27
|
+
from datetime import UTC, datetime
|
|
28
|
+
from enum import Enum, StrEnum
|
|
29
|
+
from typing import Any
|
|
30
|
+
|
|
31
|
+
from aip_agents.schema.a2a import A2AEvent, A2AStreamEventType
|
|
32
|
+
from aip_agents.utils.logger import get_logger
|
|
33
|
+
from aip_agents.utils.metadata_helper import (
|
|
34
|
+
MetadataFieldKeys,
|
|
35
|
+
MetadataTimeTracker,
|
|
36
|
+
Status,
|
|
37
|
+
create_metadata,
|
|
38
|
+
create_status_update_metadata,
|
|
39
|
+
create_tool_processing_metadata,
|
|
40
|
+
)
|
|
41
|
+
from aip_agents.utils.reference_helper import serialize_references_for_metadata
|
|
42
|
+
|
|
43
|
+
logger = get_logger(__name__)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
# =============================================================================
|
|
47
|
+
# Type-safe constants for SSE chunk structure
|
|
48
|
+
# =============================================================================
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class TaskState(StrEnum):
|
|
52
|
+
"""Task state values for SSE chunks."""
|
|
53
|
+
|
|
54
|
+
WORKING = "working"
|
|
55
|
+
COMPLETED = "completed"
|
|
56
|
+
FAILED = "failed"
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class ChunkStatus(StrEnum):
|
|
60
|
+
"""Status values for SSE chunks."""
|
|
61
|
+
|
|
62
|
+
SUCCESS = "success"
|
|
63
|
+
ERROR = "error"
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class ChunkReason(StrEnum):
|
|
67
|
+
"""Reason codes for special chunk states."""
|
|
68
|
+
|
|
69
|
+
EMPTY_PAYLOAD = "empty_payload"
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class ChunkFieldKeys(StrEnum):
|
|
73
|
+
"""Field name constants for SSE chunk structure."""
|
|
74
|
+
|
|
75
|
+
STATUS = "status"
|
|
76
|
+
TASK_STATE = "task_state"
|
|
77
|
+
CONTENT = "content"
|
|
78
|
+
EVENT_TYPE = "event_type"
|
|
79
|
+
FINAL = "final"
|
|
80
|
+
METADATA = "metadata"
|
|
81
|
+
TIMESTAMP = "timestamp"
|
|
82
|
+
TASK_ID = "task_id"
|
|
83
|
+
CONTEXT_ID = "context_id"
|
|
84
|
+
ARTIFACTS = "artifacts"
|
|
85
|
+
REASON = "reason"
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
# Event type to task state mapping
|
|
89
|
+
_EVENT_TYPE_TO_TASK_STATE: dict[A2AStreamEventType | str, str] = {
|
|
90
|
+
A2AStreamEventType.STATUS_UPDATE: TaskState.WORKING,
|
|
91
|
+
A2AStreamEventType.CONTENT_CHUNK: TaskState.WORKING,
|
|
92
|
+
A2AStreamEventType.TOOL_CALL: TaskState.WORKING,
|
|
93
|
+
A2AStreamEventType.TOOL_RESULT: TaskState.WORKING,
|
|
94
|
+
A2AStreamEventType.FINAL_RESPONSE: TaskState.COMPLETED,
|
|
95
|
+
A2AStreamEventType.ERROR: TaskState.FAILED,
|
|
96
|
+
A2AStreamEventType.STEP_LIMIT_EXCEEDED: TaskState.FAILED,
|
|
97
|
+
"status_update": TaskState.WORKING,
|
|
98
|
+
"content_chunk": TaskState.WORKING,
|
|
99
|
+
"tool_call": TaskState.WORKING,
|
|
100
|
+
"tool_result": TaskState.WORKING,
|
|
101
|
+
"final_response": TaskState.COMPLETED,
|
|
102
|
+
"error": TaskState.FAILED,
|
|
103
|
+
"step_limit_exceeded": TaskState.FAILED,
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
# Event types that indicate final events
|
|
107
|
+
_FINAL_EVENT_TYPES: set[A2AStreamEventType | str] = {
|
|
108
|
+
A2AStreamEventType.FINAL_RESPONSE,
|
|
109
|
+
A2AStreamEventType.ERROR,
|
|
110
|
+
A2AStreamEventType.STEP_LIMIT_EXCEEDED,
|
|
111
|
+
"final_response",
|
|
112
|
+
"error",
|
|
113
|
+
"step_limit_exceeded",
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class SSEChunkTransformer:
|
|
118
|
+
"""Transforms A2AEvent stream to SSE-compatible output.
|
|
119
|
+
|
|
120
|
+
This class converts events from arun_a2a_stream into the normalized dict format
|
|
121
|
+
matching A2AConnector.astream_to_agent output.
|
|
122
|
+
|
|
123
|
+
Lifecycle:
|
|
124
|
+
Single-stream instance. Must NOT be reused across concurrent streams.
|
|
125
|
+
Each arun_sse_stream call creates a fresh instance.
|
|
126
|
+
|
|
127
|
+
Attributes:
|
|
128
|
+
task_id: Optional task identifier for the stream.
|
|
129
|
+
context_id: Optional context identifier for the stream.
|
|
130
|
+
|
|
131
|
+
Example:
|
|
132
|
+
>>> transformer = SSEChunkTransformer(task_id="task-123")
|
|
133
|
+
>>> async for chunk in transformer.transform_stream(agent.arun_a2a_stream("query")):
|
|
134
|
+
... print(chunk)
|
|
135
|
+
"""
|
|
136
|
+
|
|
137
|
+
def __init__(
|
|
138
|
+
self,
|
|
139
|
+
task_id: str | None = None,
|
|
140
|
+
context_id: str | None = None,
|
|
141
|
+
pii_mapping: dict[str, str] | None = None,
|
|
142
|
+
) -> None:
|
|
143
|
+
"""Initialize the transformer with optional task and context IDs.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
task_id: Optional task identifier for the stream.
|
|
147
|
+
context_id: Optional context identifier for the stream.
|
|
148
|
+
pii_mapping: Optional PII mapping to inject into each chunk's metadata.
|
|
149
|
+
"""
|
|
150
|
+
self.task_id = task_id
|
|
151
|
+
self.context_id = context_id
|
|
152
|
+
# Accumulated pii_mapping - starts with initial value and merges event pii_mapping
|
|
153
|
+
self._pii_mapping: dict[str, str] = dict(pii_mapping) if pii_mapping else {}
|
|
154
|
+
self._seen_artifact_hashes: set[str] = set()
|
|
155
|
+
self._collected_artifacts: list[dict[str, Any]] = [] # Track artifacts for final response
|
|
156
|
+
self._time_tracker = MetadataTimeTracker()
|
|
157
|
+
self._first_content_yielded: bool = False
|
|
158
|
+
self._start_time: float | None = None # Track start time for cumulative time calculation
|
|
159
|
+
|
|
160
|
+
@staticmethod
|
|
161
|
+
def normalize_metadata_enums(data: Any) -> Any:
|
|
162
|
+
"""Recursively convert enum keys/values to their string values.
|
|
163
|
+
|
|
164
|
+
This is a pure normalization utility that converts any enum instances
|
|
165
|
+
(MetadataFieldKeys, Kind, Status, etc.) to their .value strings.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
data: Dict, list, or value that may contain enum keys/values.
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
Normalized data with all enums converted to their .value strings.
|
|
172
|
+
"""
|
|
173
|
+
if isinstance(data, Enum):
|
|
174
|
+
return data.value
|
|
175
|
+
if isinstance(data, dict):
|
|
176
|
+
return SSEChunkTransformer._normalize_dict(data)
|
|
177
|
+
if isinstance(data, list | tuple | set):
|
|
178
|
+
return [SSEChunkTransformer.normalize_metadata_enums(item) for item in data]
|
|
179
|
+
return data
|
|
180
|
+
|
|
181
|
+
@staticmethod
|
|
182
|
+
def _normalize_dict(data: dict[Any, Any]) -> dict[str, Any]:
|
|
183
|
+
"""Normalize a dictionary by converting enum keys and values.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
data: Dictionary to normalize.
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
New dictionary with normalized keys and values.
|
|
190
|
+
"""
|
|
191
|
+
normalized: dict[str, Any] = {}
|
|
192
|
+
for key, value in data.items():
|
|
193
|
+
normalized_key = key.value if isinstance(key, Enum) else key
|
|
194
|
+
normalized_value = SSEChunkTransformer.normalize_metadata_enums(value)
|
|
195
|
+
normalized[normalized_key] = normalized_value
|
|
196
|
+
return normalized
|
|
197
|
+
|
|
198
|
+
@staticmethod
|
|
199
|
+
def normalize_event_type_value(event_type: Any) -> str | None:
|
|
200
|
+
"""Convert A2AStreamEventType enum to string.
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
event_type: Event type (enum, string, or None).
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
String value of the event type, or None if invalid.
|
|
207
|
+
"""
|
|
208
|
+
if event_type is None:
|
|
209
|
+
return None
|
|
210
|
+
if isinstance(event_type, A2AStreamEventType):
|
|
211
|
+
return event_type.value
|
|
212
|
+
if isinstance(event_type, str):
|
|
213
|
+
return event_type
|
|
214
|
+
return None
|
|
215
|
+
|
|
216
|
+
@staticmethod
|
|
217
|
+
def create_artifact_hash(artifact: dict[str, Any]) -> str:
|
|
218
|
+
"""Create a stable hash for artifact deduplication.
|
|
219
|
+
|
|
220
|
+
Uses name, content_type, mime_type, and file_data for hashing,
|
|
221
|
+
excluding artifact_id which may be randomly generated.
|
|
222
|
+
|
|
223
|
+
Args:
|
|
224
|
+
artifact: Artifact dict with name, content_type, mime_type, and optionally file_data.
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
SHA256 hexdigest hash string for deduplication.
|
|
228
|
+
"""
|
|
229
|
+
hash_data = {
|
|
230
|
+
"name": artifact.get("artifact_name") or artifact.get("name"),
|
|
231
|
+
"content_type": artifact.get("content_type"),
|
|
232
|
+
"mime_type": artifact.get("mime_type"),
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
# Include file_data for accurate deduplication if available
|
|
236
|
+
file_data = artifact.get("file_data")
|
|
237
|
+
if file_data is not None:
|
|
238
|
+
hash_data["file_data"] = file_data
|
|
239
|
+
else:
|
|
240
|
+
# Fallback to file_uri if file_data is not available
|
|
241
|
+
file_uri = artifact.get("file_uri")
|
|
242
|
+
if file_uri is not None:
|
|
243
|
+
hash_data["file_uri"] = file_uri
|
|
244
|
+
|
|
245
|
+
hash_string = str(sorted(hash_data.items()))
|
|
246
|
+
return hashlib.sha256(hash_string.encode()).hexdigest()
|
|
247
|
+
|
|
248
|
+
@staticmethod
|
|
249
|
+
def extract_tool_outputs(tool_calls: list[dict[str, Any]]) -> list[str]:
|
|
250
|
+
"""Extract human-readable output strings from tool calls.
|
|
251
|
+
|
|
252
|
+
Args:
|
|
253
|
+
tool_calls: List of tool call dictionaries.
|
|
254
|
+
|
|
255
|
+
Returns:
|
|
256
|
+
List of human-readable output strings.
|
|
257
|
+
"""
|
|
258
|
+
outputs: list[str] = []
|
|
259
|
+
for tool_call in tool_calls:
|
|
260
|
+
output = tool_call.get("output")
|
|
261
|
+
if isinstance(output, str):
|
|
262
|
+
outputs.append(output)
|
|
263
|
+
elif isinstance(output, dict):
|
|
264
|
+
# Attempt to extract human-readable content field before falling back
|
|
265
|
+
content = output.get("content") if isinstance(output.get("content"), str) else None
|
|
266
|
+
outputs.append(content if content else str(output))
|
|
267
|
+
else:
|
|
268
|
+
# For other types, convert to string
|
|
269
|
+
outputs.append(str(output))
|
|
270
|
+
return outputs
|
|
271
|
+
|
|
272
|
+
@staticmethod
|
|
273
|
+
def format_tool_output(output: Any, tool_name: str) -> str:
|
|
274
|
+
"""Format a single tool output for display.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
output: The tool output to format.
|
|
278
|
+
tool_name: The name of the tool.
|
|
279
|
+
|
|
280
|
+
Returns:
|
|
281
|
+
The formatted output string.
|
|
282
|
+
"""
|
|
283
|
+
if output is None:
|
|
284
|
+
return f"Completed {tool_name}"
|
|
285
|
+
if isinstance(output, str):
|
|
286
|
+
return output
|
|
287
|
+
elif isinstance(output, dict):
|
|
288
|
+
content = output.get("content") if isinstance(output.get("content"), str) else None
|
|
289
|
+
return content if content else str(output)
|
|
290
|
+
else:
|
|
291
|
+
return str(output)
|
|
292
|
+
|
|
293
|
+
@staticmethod
|
|
294
|
+
def apply_hitl_content_override(content: str | None, event_type_str: str, metadata: dict[str, Any]) -> str | None:
|
|
295
|
+
"""Apply HITL content override when HITL is active and tool results are available.
|
|
296
|
+
|
|
297
|
+
This method overrides the content with human-readable tool output when HITL
|
|
298
|
+
is active, matching A2AConnector behavior.
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
content: The original content/status message.
|
|
302
|
+
event_type_str: The type of event being processed (normalized string).
|
|
303
|
+
metadata: The metadata dictionary containing tool_info and hitl flag.
|
|
304
|
+
|
|
305
|
+
Returns:
|
|
306
|
+
The original content or human-readable tool output if HITL is active.
|
|
307
|
+
"""
|
|
308
|
+
# Only apply override for tool_result events with HITL metadata
|
|
309
|
+
if event_type_str != A2AStreamEventType.TOOL_RESULT.value:
|
|
310
|
+
return content
|
|
311
|
+
|
|
312
|
+
if not isinstance(metadata.get("hitl"), dict):
|
|
313
|
+
return content
|
|
314
|
+
|
|
315
|
+
tool_info = metadata.get(MetadataFieldKeys.TOOL_INFO)
|
|
316
|
+
if not isinstance(tool_info, dict):
|
|
317
|
+
return content
|
|
318
|
+
|
|
319
|
+
# Handle multi-tool case
|
|
320
|
+
if "tool_calls" in tool_info:
|
|
321
|
+
outputs = SSEChunkTransformer.extract_tool_outputs(tool_info["tool_calls"])
|
|
322
|
+
if outputs:
|
|
323
|
+
return "\n".join(outputs)
|
|
324
|
+
|
|
325
|
+
# Handle single-tool case
|
|
326
|
+
elif "output" in tool_info:
|
|
327
|
+
formatted_output = SSEChunkTransformer.format_tool_output(
|
|
328
|
+
tool_info.get("output"), tool_info.get("name", "")
|
|
329
|
+
)
|
|
330
|
+
if formatted_output and not formatted_output.startswith("Completed"):
|
|
331
|
+
return formatted_output
|
|
332
|
+
|
|
333
|
+
return content
|
|
334
|
+
|
|
335
|
+
@staticmethod
|
|
336
|
+
def _create_error_chunk(message: str) -> dict[str, Any]:
|
|
337
|
+
"""Create a terminal error chunk matching A2AConnector._create_error_response().
|
|
338
|
+
|
|
339
|
+
Args:
|
|
340
|
+
message: Error message to include.
|
|
341
|
+
|
|
342
|
+
Returns:
|
|
343
|
+
Error chunk dict.
|
|
344
|
+
"""
|
|
345
|
+
return {
|
|
346
|
+
ChunkFieldKeys.STATUS: ChunkStatus.ERROR,
|
|
347
|
+
ChunkFieldKeys.TASK_STATE: TaskState.FAILED,
|
|
348
|
+
ChunkFieldKeys.CONTENT: message,
|
|
349
|
+
ChunkFieldKeys.EVENT_TYPE: A2AStreamEventType.ERROR.value,
|
|
350
|
+
ChunkFieldKeys.FINAL: True,
|
|
351
|
+
ChunkFieldKeys.METADATA: {},
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
def _validate_event(self, event: dict[str, Any]) -> str | None:
|
|
355
|
+
"""Validate required event fields.
|
|
356
|
+
|
|
357
|
+
Args:
|
|
358
|
+
event: Event dict to validate.
|
|
359
|
+
|
|
360
|
+
Returns:
|
|
361
|
+
Error message if validation fails, None if valid.
|
|
362
|
+
"""
|
|
363
|
+
required_fields = [
|
|
364
|
+
ChunkFieldKeys.EVENT_TYPE,
|
|
365
|
+
ChunkFieldKeys.CONTENT,
|
|
366
|
+
ChunkFieldKeys.METADATA,
|
|
367
|
+
]
|
|
368
|
+
missing = [f for f in required_fields if f not in event]
|
|
369
|
+
if missing:
|
|
370
|
+
return f"Malformed event: missing required field(s): {', '.join(missing)}"
|
|
371
|
+
return None
|
|
372
|
+
|
|
373
|
+
def transform_event(self, event: A2AEvent) -> dict[str, Any]:
|
|
374
|
+
"""Transform a single A2AEvent to SSE chunk format.
|
|
375
|
+
|
|
376
|
+
Converts the A2AEvent structure to the normalized SSE chunk format,
|
|
377
|
+
relocating fields like tool_info and thinking_and_activity_info into
|
|
378
|
+
metadata, and normalizing enum values to strings.
|
|
379
|
+
|
|
380
|
+
Args:
|
|
381
|
+
event: Single A2AEvent dict from arun_a2a_stream.
|
|
382
|
+
|
|
383
|
+
Returns:
|
|
384
|
+
SSEChunk dict with normalized structure.
|
|
385
|
+
"""
|
|
386
|
+
# Validate required fields
|
|
387
|
+
validation_error = self._validate_event(event) # type: ignore[arg-type]
|
|
388
|
+
if validation_error:
|
|
389
|
+
return self._create_error_chunk(validation_error)
|
|
390
|
+
|
|
391
|
+
# Extract event properties
|
|
392
|
+
raw_event_type = event.get("event_type")
|
|
393
|
+
event_type_str = self._resolve_event_type(raw_event_type)
|
|
394
|
+
task_state = _EVENT_TYPE_TO_TASK_STATE.get(raw_event_type, TaskState.WORKING)
|
|
395
|
+
is_final = raw_event_type in _FINAL_EVENT_TYPES or event.get("is_final", False)
|
|
396
|
+
status = self._determine_status(raw_event_type, event_type_str)
|
|
397
|
+
content, reason = self._extract_content(event)
|
|
398
|
+
timestamp = datetime.now(UTC).isoformat()
|
|
399
|
+
|
|
400
|
+
# Build chunk
|
|
401
|
+
chunk = self._build_base_chunk(status, task_state, content, event_type_str, is_final, timestamp, reason, event)
|
|
402
|
+
|
|
403
|
+
# Apply HITL content override if applicable (must happen after metadata is built)
|
|
404
|
+
metadata = chunk.get(ChunkFieldKeys.METADATA, {})
|
|
405
|
+
if isinstance(metadata, dict):
|
|
406
|
+
overridden_content = self.apply_hitl_content_override(
|
|
407
|
+
chunk.get(ChunkFieldKeys.CONTENT), event_type_str, metadata
|
|
408
|
+
)
|
|
409
|
+
if overridden_content != chunk.get(ChunkFieldKeys.CONTENT):
|
|
410
|
+
chunk[ChunkFieldKeys.CONTENT] = overridden_content
|
|
411
|
+
# Clear empty_payload reason if content was overridden
|
|
412
|
+
if chunk.get(ChunkFieldKeys.REASON) == ChunkReason.EMPTY_PAYLOAD:
|
|
413
|
+
chunk.pop(ChunkFieldKeys.REASON, None)
|
|
414
|
+
|
|
415
|
+
# Handle artifacts
|
|
416
|
+
self._process_artifacts(event, chunk, is_final)
|
|
417
|
+
|
|
418
|
+
return chunk
|
|
419
|
+
|
|
420
|
+
def _resolve_event_type(self, raw_event_type: Any) -> str:
|
|
421
|
+
"""Resolve and normalize event type to string.
|
|
422
|
+
|
|
423
|
+
Args:
|
|
424
|
+
raw_event_type: Event type value from the incoming event.
|
|
425
|
+
|
|
426
|
+
Returns:
|
|
427
|
+
Normalized string representation of the event type.
|
|
428
|
+
"""
|
|
429
|
+
event_type_str = self.normalize_event_type_value(raw_event_type)
|
|
430
|
+
if event_type_str is None:
|
|
431
|
+
event_type_str = str(raw_event_type) if raw_event_type else "unknown"
|
|
432
|
+
logger.warning(f"Unknown event type: {raw_event_type}, passing through as '{event_type_str}'")
|
|
433
|
+
return event_type_str
|
|
434
|
+
|
|
435
|
+
def _determine_status(self, raw_event_type: Any, event_type_str: str) -> ChunkStatus:
|
|
436
|
+
"""Determine chunk status based on event type.
|
|
437
|
+
|
|
438
|
+
Args:
|
|
439
|
+
raw_event_type: Raw event type value as provided in the event.
|
|
440
|
+
event_type_str: Normalized event type string.
|
|
441
|
+
|
|
442
|
+
Returns:
|
|
443
|
+
ChunkStatus reflecting success or error.
|
|
444
|
+
"""
|
|
445
|
+
is_error = (
|
|
446
|
+
raw_event_type == A2AStreamEventType.ERROR
|
|
447
|
+
or event_type_str == A2AStreamEventType.ERROR.value
|
|
448
|
+
or raw_event_type == A2AStreamEventType.STEP_LIMIT_EXCEEDED
|
|
449
|
+
or event_type_str == A2AStreamEventType.STEP_LIMIT_EXCEEDED.value
|
|
450
|
+
)
|
|
451
|
+
return ChunkStatus.ERROR if is_error else ChunkStatus.SUCCESS
|
|
452
|
+
|
|
453
|
+
def _extract_content(self, event: A2AEvent) -> tuple[str | None, str | None]:
|
|
454
|
+
"""Extract content and reason from event.
|
|
455
|
+
|
|
456
|
+
Args:
|
|
457
|
+
event: Source A2AEvent containing content.
|
|
458
|
+
|
|
459
|
+
Returns:
|
|
460
|
+
Tuple of normalized content and optional reason when content is empty.
|
|
461
|
+
"""
|
|
462
|
+
content: str | None = event.get("content") # type: ignore[assignment]
|
|
463
|
+
reason: str | None = None
|
|
464
|
+
if content == "" or content is None:
|
|
465
|
+
content = None
|
|
466
|
+
reason = ChunkReason.EMPTY_PAYLOAD
|
|
467
|
+
return content, reason
|
|
468
|
+
|
|
469
|
+
def _build_base_chunk(
|
|
470
|
+
self,
|
|
471
|
+
status: ChunkStatus,
|
|
472
|
+
task_state: str,
|
|
473
|
+
content: str | None,
|
|
474
|
+
event_type_str: str,
|
|
475
|
+
is_final: bool,
|
|
476
|
+
timestamp: str,
|
|
477
|
+
reason: str | None,
|
|
478
|
+
event: A2AEvent,
|
|
479
|
+
) -> dict[str, Any]:
|
|
480
|
+
"""Build the base chunk dictionary.
|
|
481
|
+
|
|
482
|
+
Args:
|
|
483
|
+
status: Chunk status derived from event type.
|
|
484
|
+
task_state: Task state derived from event type.
|
|
485
|
+
content: Text content to include in the chunk.
|
|
486
|
+
event_type_str: Normalized event type string.
|
|
487
|
+
is_final: Whether this is the final chunk in the stream.
|
|
488
|
+
timestamp: ISO timestamp for the chunk.
|
|
489
|
+
reason: Optional reason describing special states.
|
|
490
|
+
event: Original event for metadata extraction.
|
|
491
|
+
|
|
492
|
+
Returns:
|
|
493
|
+
Base SSE chunk dictionary with core fields populated.
|
|
494
|
+
"""
|
|
495
|
+
metadata = self._build_metadata(event, timestamp)
|
|
496
|
+
chunk: dict[str, Any] = {
|
|
497
|
+
ChunkFieldKeys.STATUS: status,
|
|
498
|
+
ChunkFieldKeys.TASK_STATE: task_state,
|
|
499
|
+
ChunkFieldKeys.CONTENT: content,
|
|
500
|
+
ChunkFieldKeys.EVENT_TYPE: event_type_str,
|
|
501
|
+
ChunkFieldKeys.FINAL: is_final,
|
|
502
|
+
ChunkFieldKeys.METADATA: metadata,
|
|
503
|
+
ChunkFieldKeys.TIMESTAMP: timestamp,
|
|
504
|
+
}
|
|
505
|
+
if reason:
|
|
506
|
+
chunk[ChunkFieldKeys.REASON] = reason
|
|
507
|
+
if self.task_id:
|
|
508
|
+
chunk[ChunkFieldKeys.TASK_ID] = self.task_id
|
|
509
|
+
if self.context_id:
|
|
510
|
+
chunk[ChunkFieldKeys.CONTEXT_ID] = self.context_id
|
|
511
|
+
return chunk
|
|
512
|
+
|
|
513
|
+
def _process_artifacts(self, event: A2AEvent, chunk: dict[str, Any], is_final: bool) -> None:
|
|
514
|
+
"""Process and add artifacts to chunk.
|
|
515
|
+
|
|
516
|
+
Args:
|
|
517
|
+
event: Source A2AEvent possibly containing artifacts.
|
|
518
|
+
chunk: Chunk being constructed.
|
|
519
|
+
is_final: Whether the current event marks the end of the stream.
|
|
520
|
+
"""
|
|
521
|
+
artifacts = event.get("artifacts")
|
|
522
|
+
if artifacts:
|
|
523
|
+
unique_artifacts = self._deduplicate_and_collect_artifacts(artifacts)
|
|
524
|
+
if unique_artifacts:
|
|
525
|
+
chunk[ChunkFieldKeys.ARTIFACTS] = unique_artifacts
|
|
526
|
+
|
|
527
|
+
# For final response, include all collected artifacts (matching connector behavior)
|
|
528
|
+
if is_final and self._collected_artifacts:
|
|
529
|
+
self._merge_collected_artifacts(chunk)
|
|
530
|
+
|
|
531
|
+
def _merge_collected_artifacts(self, chunk: dict[str, Any]) -> None:
|
|
532
|
+
"""Merge collected artifacts into chunk, avoiding duplicates.
|
|
533
|
+
|
|
534
|
+
Args:
|
|
535
|
+
chunk: Chunk to receive merged artifacts.
|
|
536
|
+
"""
|
|
537
|
+
existing = chunk.get(ChunkFieldKeys.ARTIFACTS, [])
|
|
538
|
+
existing_ids = {a.get("artifact_id") for a in existing}
|
|
539
|
+
for artifact in self._collected_artifacts:
|
|
540
|
+
if artifact.get("artifact_id") not in existing_ids:
|
|
541
|
+
existing.append(artifact)
|
|
542
|
+
if existing:
|
|
543
|
+
chunk[ChunkFieldKeys.ARTIFACTS] = existing
|
|
544
|
+
|
|
545
|
+
def _build_metadata(self, event: A2AEvent, timestamp: str) -> dict[str, Any]:
|
|
546
|
+
"""Build normalized metadata with relocated fields and enrichment.
|
|
547
|
+
|
|
548
|
+
Enriches metadata to match A2AConnector.astream_to_agent output by adding
|
|
549
|
+
kind, message, thinking_and_activity_info, and timestamp fields.
|
|
550
|
+
|
|
551
|
+
Args:
|
|
552
|
+
event: Source A2AEvent.
|
|
553
|
+
timestamp: ISO timestamp to include in metadata.
|
|
554
|
+
|
|
555
|
+
Returns:
|
|
556
|
+
Normalized and enriched metadata dict matching connector output.
|
|
557
|
+
"""
|
|
558
|
+
# Start with existing metadata, normalized, then relocate top-level fields
|
|
559
|
+
existing_metadata = self._prepare_existing_metadata(event)
|
|
560
|
+
|
|
561
|
+
# Enrich metadata based on event type
|
|
562
|
+
event_type = event.get("event_type")
|
|
563
|
+
content = event.get("content", "")
|
|
564
|
+
is_final = event_type in _FINAL_EVENT_TYPES or event.get("is_final", False)
|
|
565
|
+
metadata = self._enrich_metadata_by_event_type(event_type, content, is_final, existing_metadata)
|
|
566
|
+
|
|
567
|
+
# Normalize and finalize
|
|
568
|
+
metadata = self.normalize_metadata_enums(metadata)
|
|
569
|
+
if "timestamp" not in metadata:
|
|
570
|
+
metadata["timestamp"] = timestamp
|
|
571
|
+
|
|
572
|
+
# Add cumulative time to all events (matching A2A server behavior)
|
|
573
|
+
self._apply_cumulative_time(metadata)
|
|
574
|
+
|
|
575
|
+
# Relocate thinking_and_activity_info from top-level event if not already present
|
|
576
|
+
self._relocate_thinking_info(event, metadata)
|
|
577
|
+
|
|
578
|
+
# Accumulate pii_mapping from event and inject into metadata (matching A2A executor behavior)
|
|
579
|
+
self._accumulate_and_inject_pii_mapping(event, metadata)
|
|
580
|
+
|
|
581
|
+
# Match A2AConnector output: status_update chunks should not include pii_mapping
|
|
582
|
+
raw_event_type = event.get("event_type")
|
|
583
|
+
if raw_event_type in (A2AStreamEventType.STATUS_UPDATE, "status_update"):
|
|
584
|
+
metadata.pop(MetadataFieldKeys.PII_MAPPING, None)
|
|
585
|
+
|
|
586
|
+
# Add event_type to metadata (matching base_executor behavior)
|
|
587
|
+
event_type_value = event.get("event_type")
|
|
588
|
+
if isinstance(event_type_value, A2AStreamEventType):
|
|
589
|
+
metadata["event_type"] = event_type_value.value
|
|
590
|
+
elif isinstance(event_type_value, str):
|
|
591
|
+
metadata["event_type"] = event_type_value
|
|
592
|
+
|
|
593
|
+
return metadata
|
|
594
|
+
|
|
595
|
+
def _prepare_existing_metadata(self, event: A2AEvent) -> dict[str, Any]:
|
|
596
|
+
"""Prepare existing metadata by normalizing and relocating top-level fields.
|
|
597
|
+
|
|
598
|
+
Args:
|
|
599
|
+
event: Source A2AEvent containing metadata and relocatable fields.
|
|
600
|
+
|
|
601
|
+
Returns:
|
|
602
|
+
Normalized metadata dictionary with relocated fields.
|
|
603
|
+
"""
|
|
604
|
+
raw_metadata = event.get("metadata", {})
|
|
605
|
+
existing = self.normalize_metadata_enums(raw_metadata) if raw_metadata else {}
|
|
606
|
+
|
|
607
|
+
# Relocate top-level fields into metadata
|
|
608
|
+
# Note: Using StrEnum for TypedDict keys works at runtime since StrEnum members ARE strings
|
|
609
|
+
fields_to_relocate = [
|
|
610
|
+
MetadataFieldKeys.TOOL_INFO,
|
|
611
|
+
MetadataFieldKeys.STEP_USAGE,
|
|
612
|
+
MetadataFieldKeys.TOTAL_USAGE,
|
|
613
|
+
]
|
|
614
|
+
for field in fields_to_relocate:
|
|
615
|
+
if event.get(field): # type: ignore[typeddict-item]
|
|
616
|
+
existing[field] = event[field] # type: ignore[typeddict-item]
|
|
617
|
+
|
|
618
|
+
# Serialize references properly (matching A2AConnector behavior)
|
|
619
|
+
if event.get(MetadataFieldKeys.REFERENCES): # type: ignore[typeddict-item]
|
|
620
|
+
existing[MetadataFieldKeys.REFERENCES] = serialize_references_for_metadata(
|
|
621
|
+
event[MetadataFieldKeys.REFERENCES] # type: ignore[typeddict-item]
|
|
622
|
+
)
|
|
623
|
+
return existing
|
|
624
|
+
|
|
625
|
+
def _enrich_metadata_by_event_type(
|
|
626
|
+
self,
|
|
627
|
+
event_type: Any,
|
|
628
|
+
content: Any,
|
|
629
|
+
is_final: bool,
|
|
630
|
+
existing_metadata: dict[str, Any],
|
|
631
|
+
) -> dict[str, Any]:
|
|
632
|
+
"""Enrich metadata based on event type to match connector behavior.
|
|
633
|
+
|
|
634
|
+
Args:
|
|
635
|
+
event_type: Event type value guiding metadata enrichment.
|
|
636
|
+
content: Content payload associated with the event.
|
|
637
|
+
is_final: Whether the event is the final message in the stream.
|
|
638
|
+
existing_metadata: Metadata accumulated before enrichment.
|
|
639
|
+
|
|
640
|
+
Returns:
|
|
641
|
+
Metadata dictionary after enrichment.
|
|
642
|
+
"""
|
|
643
|
+
content_str = content if isinstance(content, str) else ""
|
|
644
|
+
|
|
645
|
+
if event_type in (A2AStreamEventType.TOOL_CALL, A2AStreamEventType.TOOL_RESULT, "tool_call", "tool_result"):
|
|
646
|
+
return create_tool_processing_metadata(existing_metadata)
|
|
647
|
+
if is_final:
|
|
648
|
+
return create_metadata(
|
|
649
|
+
content=content_str, is_final=True, status=Status.FINISHED, existing_metadata=existing_metadata
|
|
650
|
+
)
|
|
651
|
+
if event_type in (A2AStreamEventType.STATUS_UPDATE, "status_update"):
|
|
652
|
+
return create_status_update_metadata(content=content_str, custom_metadata=existing_metadata)
|
|
653
|
+
return create_metadata(
|
|
654
|
+
content=content_str, is_final=False, status=Status.RUNNING, existing_metadata=existing_metadata
|
|
655
|
+
)
|
|
656
|
+
|
|
657
|
+
def _relocate_thinking_info(self, event: A2AEvent, metadata: dict[str, Any]) -> None:
|
|
658
|
+
"""Relocate thinking_and_activity_info from top-level event if not already in metadata.
|
|
659
|
+
|
|
660
|
+
Args:
|
|
661
|
+
event: Source A2AEvent potentially carrying thinking_and_activity_info.
|
|
662
|
+
metadata: Metadata dict to be enriched with thinking info when absent.
|
|
663
|
+
"""
|
|
664
|
+
key = MetadataFieldKeys.THINKING_AND_ACTIVITY_INFO
|
|
665
|
+
if event.get(key) and key not in metadata: # type: ignore[typeddict-item]
|
|
666
|
+
metadata[key] = event[key] # type: ignore[typeddict-item]
|
|
667
|
+
|
|
668
|
+
def _apply_cumulative_time(self, metadata: dict[str, Any]) -> None:
|
|
669
|
+
"""Add cumulative time to metadata.
|
|
670
|
+
|
|
671
|
+
Always applies cumulative time since first event was processed,
|
|
672
|
+
matching A2A server behavior in base_executor._apply_cumulative_time().
|
|
673
|
+
This ensures time is always increasing/cumulative across all events.
|
|
674
|
+
|
|
675
|
+
Args:
|
|
676
|
+
metadata: Metadata dict to update with cumulative time.
|
|
677
|
+
"""
|
|
678
|
+
# Initialize start time on first call
|
|
679
|
+
now = time.monotonic()
|
|
680
|
+
if self._start_time is None:
|
|
681
|
+
self._start_time = now
|
|
682
|
+
elapsed = 0.0
|
|
683
|
+
else:
|
|
684
|
+
elapsed = max(0.0, now - self._start_time)
|
|
685
|
+
|
|
686
|
+
# Always set cumulative time to ensure it's always increasing
|
|
687
|
+
time_key = MetadataFieldKeys.TIME
|
|
688
|
+
metadata[time_key] = elapsed
|
|
689
|
+
|
|
690
|
+
def _accumulate_and_inject_pii_mapping(self, event: A2AEvent, metadata: dict[str, Any]) -> None:
|
|
691
|
+
"""Accumulate pii_mapping from event and inject into metadata.
|
|
692
|
+
|
|
693
|
+
This matches A2A executor behavior (langgraph_executor.py:171) where
|
|
694
|
+
current_metadata.update(chunk_metadata) accumulates pii_mapping from
|
|
695
|
+
each event. The accumulated pii_mapping is then injected into each
|
|
696
|
+
chunk's metadata.
|
|
697
|
+
|
|
698
|
+
Args:
|
|
699
|
+
event: Source A2AEvent potentially containing pii_mapping in metadata.
|
|
700
|
+
metadata: Metadata dict to update with accumulated pii_mapping.
|
|
701
|
+
"""
|
|
702
|
+
# Extract pii_mapping from event metadata and merge into accumulator
|
|
703
|
+
event_metadata = event.get("metadata") or {}
|
|
704
|
+
event_pii = event_metadata.get(MetadataFieldKeys.PII_MAPPING)
|
|
705
|
+
if isinstance(event_pii, dict) and event_pii:
|
|
706
|
+
self._pii_mapping.update(event_pii)
|
|
707
|
+
|
|
708
|
+
# Match A2AConnector behavior: status_update metadata does not include pii_mapping
|
|
709
|
+
raw_event_type = event.get("event_type")
|
|
710
|
+
if raw_event_type in (A2AStreamEventType.STATUS_UPDATE, "status_update"):
|
|
711
|
+
return
|
|
712
|
+
|
|
713
|
+
# Inject accumulated pii_mapping into chunk metadata
|
|
714
|
+
if self._pii_mapping:
|
|
715
|
+
metadata[MetadataFieldKeys.PII_MAPPING] = self._pii_mapping.copy()
|
|
716
|
+
|
|
717
|
+
def _deduplicate_and_collect_artifacts(self, artifacts: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
718
|
+
"""Deduplicate artifacts, normalize format, and collect them for final response.
|
|
719
|
+
|
|
720
|
+
Args:
|
|
721
|
+
artifacts: List of artifact dicts (may be in direct stream or runner format).
|
|
722
|
+
|
|
723
|
+
Returns:
|
|
724
|
+
List of unique artifacts in runner-compatible format.
|
|
725
|
+
"""
|
|
726
|
+
unique = []
|
|
727
|
+
for artifact in artifacts:
|
|
728
|
+
# Normalize artifact format from direct stream to runner-expected format
|
|
729
|
+
normalized = self._normalize_artifact_format(artifact)
|
|
730
|
+
artifact_hash = self.create_artifact_hash(normalized)
|
|
731
|
+
if artifact_hash not in self._seen_artifact_hashes:
|
|
732
|
+
self._seen_artifact_hashes.add(artifact_hash)
|
|
733
|
+
unique.append(normalized)
|
|
734
|
+
# Collect for final response (matching connector behavior)
|
|
735
|
+
self._collected_artifacts.append(normalized)
|
|
736
|
+
return unique
|
|
737
|
+
|
|
738
|
+
@staticmethod
|
|
739
|
+
def _normalize_artifact_format(artifact: dict[str, Any]) -> dict[str, Any]:
|
|
740
|
+
"""Normalize artifact from direct stream format to ArtifactInfo-compatible format.
|
|
741
|
+
|
|
742
|
+
Converts artifacts from ArtifactHandler format (used by arun_a2a_stream):
|
|
743
|
+
{"artifact_type": "file", "data": "base64...", "name": "...", "mime_type": "..."}
|
|
744
|
+
|
|
745
|
+
To ArtifactInfo-compatible format (matching A2AConnector output):
|
|
746
|
+
{"artifact_id": "uuid", "name": "...", "content_type": "file", "mime_type": "...",
|
|
747
|
+
"file_name": "...", "has_file_data": True, "has_file_uri": False,
|
|
748
|
+
"file_data": "base64...", "file_uri": None, "description": "...", "parts": None}
|
|
749
|
+
|
|
750
|
+
Args:
|
|
751
|
+
artifact: Artifact dict in either format.
|
|
752
|
+
|
|
753
|
+
Returns:
|
|
754
|
+
Artifact dict in ArtifactInfo-compatible format.
|
|
755
|
+
"""
|
|
756
|
+
normalized = artifact.copy()
|
|
757
|
+
|
|
758
|
+
# Convert 'data' → 'file_data'
|
|
759
|
+
if "data" in normalized and "file_data" not in normalized:
|
|
760
|
+
normalized["file_data"] = normalized.pop("data")
|
|
761
|
+
|
|
762
|
+
# Generate artifact_id if missing
|
|
763
|
+
if not normalized.get("artifact_id"):
|
|
764
|
+
normalized["artifact_id"] = str(uuid.uuid4())
|
|
765
|
+
|
|
766
|
+
# Set file_name from name if not present
|
|
767
|
+
if "name" in normalized and "file_name" not in normalized:
|
|
768
|
+
normalized["file_name"] = normalized["name"]
|
|
769
|
+
|
|
770
|
+
# Convert 'artifact_type' → 'content_type' and remove artifact_type
|
|
771
|
+
if "artifact_type" in normalized:
|
|
772
|
+
if "content_type" not in normalized:
|
|
773
|
+
normalized["content_type"] = normalized["artifact_type"]
|
|
774
|
+
del normalized["artifact_type"]
|
|
775
|
+
|
|
776
|
+
# Remove 'artifact_name' if present (connector uses 'name' instead)
|
|
777
|
+
normalized.pop("artifact_name", None)
|
|
778
|
+
|
|
779
|
+
# Remove 'metadata' if present (not in ArtifactInfo model)
|
|
780
|
+
normalized.pop("metadata", None)
|
|
781
|
+
|
|
782
|
+
# Set has_file_data/has_file_uri flags
|
|
783
|
+
if "has_file_data" not in normalized:
|
|
784
|
+
normalized["has_file_data"] = bool(normalized.get("file_data"))
|
|
785
|
+
if "has_file_uri" not in normalized:
|
|
786
|
+
normalized["has_file_uri"] = bool(normalized.get("file_uri"))
|
|
787
|
+
|
|
788
|
+
# Add missing fields with None defaults for ArtifactInfo compatibility
|
|
789
|
+
if "file_uri" not in normalized:
|
|
790
|
+
normalized["file_uri"] = None
|
|
791
|
+
if "parts" not in normalized:
|
|
792
|
+
normalized["parts"] = None
|
|
793
|
+
if "description" not in normalized:
|
|
794
|
+
normalized["description"] = None
|
|
795
|
+
|
|
796
|
+
return normalized
|
|
797
|
+
|
|
798
|
+
async def transform_stream(
|
|
799
|
+
self,
|
|
800
|
+
stream: AsyncGenerator[A2AEvent, None],
|
|
801
|
+
) -> AsyncGenerator[dict[str, Any], None]:
|
|
802
|
+
"""Transform A2AEvent stream to SSE-compatible chunks.
|
|
803
|
+
|
|
804
|
+
Wraps the input stream and transforms each event, handling artifact
|
|
805
|
+
deduplication and time tracking across the stream.
|
|
806
|
+
|
|
807
|
+
Args:
|
|
808
|
+
stream: Async generator yielding A2AEvent dicts.
|
|
809
|
+
|
|
810
|
+
Yields:
|
|
811
|
+
SSEChunk dicts with normalized structure.
|
|
812
|
+
|
|
813
|
+
Raises:
|
|
814
|
+
Exceptions from underlying stream propagate to caller.
|
|
815
|
+
"""
|
|
816
|
+
async for event in stream:
|
|
817
|
+
chunk = self.transform_event(event)
|
|
818
|
+
|
|
819
|
+
# Update time tracker with the response
|
|
820
|
+
chunk = self._time_tracker.update_response_metadata(chunk)
|
|
821
|
+
|
|
822
|
+
yield chunk
|
|
823
|
+
|
|
824
|
+
# Terminate stream after yielding malformed-event error chunk
|
|
825
|
+
# Malformed events produce error chunks with empty metadata from _create_error_chunk
|
|
826
|
+
if (
|
|
827
|
+
chunk.get(ChunkFieldKeys.STATUS) == ChunkStatus.ERROR
|
|
828
|
+
and chunk.get(ChunkFieldKeys.CONTENT, "").startswith("Malformed event:")
|
|
829
|
+
and chunk.get(ChunkFieldKeys.METADATA) == {}
|
|
830
|
+
):
|
|
831
|
+
break
|