aip-agents-binary 0.5.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (280) hide show
  1. aip_agents/__init__.py +65 -0
  2. aip_agents/a2a/__init__.py +19 -0
  3. aip_agents/a2a/server/__init__.py +10 -0
  4. aip_agents/a2a/server/base_executor.py +1086 -0
  5. aip_agents/a2a/server/google_adk_executor.py +198 -0
  6. aip_agents/a2a/server/langflow_executor.py +180 -0
  7. aip_agents/a2a/server/langgraph_executor.py +270 -0
  8. aip_agents/a2a/types.py +232 -0
  9. aip_agents/agent/__init__.py +27 -0
  10. aip_agents/agent/base_agent.py +970 -0
  11. aip_agents/agent/base_langgraph_agent.py +2942 -0
  12. aip_agents/agent/google_adk_agent.py +926 -0
  13. aip_agents/agent/google_adk_constants.py +6 -0
  14. aip_agents/agent/hitl/__init__.py +24 -0
  15. aip_agents/agent/hitl/config.py +28 -0
  16. aip_agents/agent/hitl/langgraph_hitl_mixin.py +515 -0
  17. aip_agents/agent/hitl/manager.py +532 -0
  18. aip_agents/agent/hitl/models.py +18 -0
  19. aip_agents/agent/hitl/prompt/__init__.py +9 -0
  20. aip_agents/agent/hitl/prompt/base.py +42 -0
  21. aip_agents/agent/hitl/prompt/deferred.py +73 -0
  22. aip_agents/agent/hitl/registry.py +149 -0
  23. aip_agents/agent/interface.py +138 -0
  24. aip_agents/agent/interfaces.py +65 -0
  25. aip_agents/agent/langflow_agent.py +464 -0
  26. aip_agents/agent/langgraph_memory_enhancer_agent.py +433 -0
  27. aip_agents/agent/langgraph_react_agent.py +2514 -0
  28. aip_agents/agent/system_instruction_context.py +34 -0
  29. aip_agents/clients/__init__.py +10 -0
  30. aip_agents/clients/langflow/__init__.py +10 -0
  31. aip_agents/clients/langflow/client.py +477 -0
  32. aip_agents/clients/langflow/types.py +18 -0
  33. aip_agents/constants.py +23 -0
  34. aip_agents/credentials/manager.py +132 -0
  35. aip_agents/examples/__init__.py +5 -0
  36. aip_agents/examples/compare_streaming_client.py +783 -0
  37. aip_agents/examples/compare_streaming_server.py +142 -0
  38. aip_agents/examples/demo_memory_recall.py +401 -0
  39. aip_agents/examples/hello_world_a2a_google_adk_client.py +49 -0
  40. aip_agents/examples/hello_world_a2a_google_adk_client_agent.py +48 -0
  41. aip_agents/examples/hello_world_a2a_google_adk_client_streaming.py +60 -0
  42. aip_agents/examples/hello_world_a2a_google_adk_server.py +79 -0
  43. aip_agents/examples/hello_world_a2a_langchain_client.py +39 -0
  44. aip_agents/examples/hello_world_a2a_langchain_client_agent.py +39 -0
  45. aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.py +37 -0
  46. aip_agents/examples/hello_world_a2a_langchain_client_streaming.py +41 -0
  47. aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.py +60 -0
  48. aip_agents/examples/hello_world_a2a_langchain_reference_server.py +105 -0
  49. aip_agents/examples/hello_world_a2a_langchain_server.py +79 -0
  50. aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.py +78 -0
  51. aip_agents/examples/hello_world_a2a_langflow_client.py +83 -0
  52. aip_agents/examples/hello_world_a2a_langflow_server.py +82 -0
  53. aip_agents/examples/hello_world_a2a_langgraph_artifact_client.py +73 -0
  54. aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.py +76 -0
  55. aip_agents/examples/hello_world_a2a_langgraph_artifact_server.py +92 -0
  56. aip_agents/examples/hello_world_a2a_langgraph_client.py +54 -0
  57. aip_agents/examples/hello_world_a2a_langgraph_client_agent.py +54 -0
  58. aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.py +32 -0
  59. aip_agents/examples/hello_world_a2a_langgraph_client_streaming.py +50 -0
  60. aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.py +44 -0
  61. aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.py +92 -0
  62. aip_agents/examples/hello_world_a2a_langgraph_server.py +84 -0
  63. aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.py +79 -0
  64. aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.py +132 -0
  65. aip_agents/examples/hello_world_a2a_mcp_langgraph.py +196 -0
  66. aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.py +244 -0
  67. aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.py +251 -0
  68. aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.py +57 -0
  69. aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.py +80 -0
  70. aip_agents/examples/hello_world_google_adk.py +41 -0
  71. aip_agents/examples/hello_world_google_adk_mcp_http.py +34 -0
  72. aip_agents/examples/hello_world_google_adk_mcp_http_stream.py +40 -0
  73. aip_agents/examples/hello_world_google_adk_mcp_sse.py +44 -0
  74. aip_agents/examples/hello_world_google_adk_mcp_sse_stream.py +48 -0
  75. aip_agents/examples/hello_world_google_adk_mcp_stdio.py +44 -0
  76. aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.py +48 -0
  77. aip_agents/examples/hello_world_google_adk_stream.py +44 -0
  78. aip_agents/examples/hello_world_langchain.py +28 -0
  79. aip_agents/examples/hello_world_langchain_lm_invoker.py +15 -0
  80. aip_agents/examples/hello_world_langchain_mcp_http.py +34 -0
  81. aip_agents/examples/hello_world_langchain_mcp_http_interactive.py +130 -0
  82. aip_agents/examples/hello_world_langchain_mcp_http_stream.py +42 -0
  83. aip_agents/examples/hello_world_langchain_mcp_multi_server.py +155 -0
  84. aip_agents/examples/hello_world_langchain_mcp_sse.py +34 -0
  85. aip_agents/examples/hello_world_langchain_mcp_sse_stream.py +40 -0
  86. aip_agents/examples/hello_world_langchain_mcp_stdio.py +30 -0
  87. aip_agents/examples/hello_world_langchain_mcp_stdio_stream.py +41 -0
  88. aip_agents/examples/hello_world_langchain_stream.py +36 -0
  89. aip_agents/examples/hello_world_langchain_stream_lm_invoker.py +39 -0
  90. aip_agents/examples/hello_world_langflow_agent.py +163 -0
  91. aip_agents/examples/hello_world_langgraph.py +39 -0
  92. aip_agents/examples/hello_world_langgraph_bosa_twitter.py +41 -0
  93. aip_agents/examples/hello_world_langgraph_mcp_http.py +31 -0
  94. aip_agents/examples/hello_world_langgraph_mcp_http_stream.py +34 -0
  95. aip_agents/examples/hello_world_langgraph_mcp_sse.py +35 -0
  96. aip_agents/examples/hello_world_langgraph_mcp_sse_stream.py +50 -0
  97. aip_agents/examples/hello_world_langgraph_mcp_stdio.py +35 -0
  98. aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.py +50 -0
  99. aip_agents/examples/hello_world_langgraph_stream.py +43 -0
  100. aip_agents/examples/hello_world_langgraph_stream_lm_invoker.py +37 -0
  101. aip_agents/examples/hello_world_model_switch_cli.py +210 -0
  102. aip_agents/examples/hello_world_multi_agent_adk.py +75 -0
  103. aip_agents/examples/hello_world_multi_agent_langchain.py +54 -0
  104. aip_agents/examples/hello_world_multi_agent_langgraph.py +66 -0
  105. aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.py +69 -0
  106. aip_agents/examples/hello_world_pii_logger.py +21 -0
  107. aip_agents/examples/hello_world_sentry.py +133 -0
  108. aip_agents/examples/hello_world_step_limits.py +273 -0
  109. aip_agents/examples/hello_world_stock_a2a_server.py +103 -0
  110. aip_agents/examples/hello_world_tool_output_client.py +46 -0
  111. aip_agents/examples/hello_world_tool_output_server.py +114 -0
  112. aip_agents/examples/hitl_demo.py +724 -0
  113. aip_agents/examples/mcp_configs/configs.py +63 -0
  114. aip_agents/examples/mcp_servers/common.py +76 -0
  115. aip_agents/examples/mcp_servers/mcp_name.py +29 -0
  116. aip_agents/examples/mcp_servers/mcp_server_http.py +19 -0
  117. aip_agents/examples/mcp_servers/mcp_server_sse.py +19 -0
  118. aip_agents/examples/mcp_servers/mcp_server_stdio.py +19 -0
  119. aip_agents/examples/mcp_servers/mcp_time.py +10 -0
  120. aip_agents/examples/pii_demo_langgraph_client.py +69 -0
  121. aip_agents/examples/pii_demo_langgraph_server.py +126 -0
  122. aip_agents/examples/pii_demo_multi_agent_client.py +80 -0
  123. aip_agents/examples/pii_demo_multi_agent_server.py +247 -0
  124. aip_agents/examples/todolist_planning_a2a_langchain_client.py +70 -0
  125. aip_agents/examples/todolist_planning_a2a_langgraph_server.py +88 -0
  126. aip_agents/examples/tools/__init__.py +27 -0
  127. aip_agents/examples/tools/adk_arithmetic_tools.py +36 -0
  128. aip_agents/examples/tools/adk_weather_tool.py +60 -0
  129. aip_agents/examples/tools/data_generator_tool.py +103 -0
  130. aip_agents/examples/tools/data_visualization_tool.py +312 -0
  131. aip_agents/examples/tools/image_artifact_tool.py +136 -0
  132. aip_agents/examples/tools/langchain_arithmetic_tools.py +26 -0
  133. aip_agents/examples/tools/langchain_currency_exchange_tool.py +88 -0
  134. aip_agents/examples/tools/langchain_graph_artifact_tool.py +172 -0
  135. aip_agents/examples/tools/langchain_weather_tool.py +48 -0
  136. aip_agents/examples/tools/langgraph_streaming_tool.py +130 -0
  137. aip_agents/examples/tools/mock_retrieval_tool.py +56 -0
  138. aip_agents/examples/tools/pii_demo_tools.py +189 -0
  139. aip_agents/examples/tools/random_chart_tool.py +142 -0
  140. aip_agents/examples/tools/serper_tool.py +202 -0
  141. aip_agents/examples/tools/stock_tools.py +82 -0
  142. aip_agents/examples/tools/table_generator_tool.py +167 -0
  143. aip_agents/examples/tools/time_tool.py +82 -0
  144. aip_agents/examples/tools/weather_forecast_tool.py +38 -0
  145. aip_agents/executor/agent_executor.py +473 -0
  146. aip_agents/executor/base.py +48 -0
  147. aip_agents/mcp/__init__.py +1 -0
  148. aip_agents/mcp/client/__init__.py +14 -0
  149. aip_agents/mcp/client/base_mcp_client.py +369 -0
  150. aip_agents/mcp/client/connection_manager.py +193 -0
  151. aip_agents/mcp/client/google_adk/__init__.py +11 -0
  152. aip_agents/mcp/client/google_adk/client.py +381 -0
  153. aip_agents/mcp/client/langchain/__init__.py +11 -0
  154. aip_agents/mcp/client/langchain/client.py +265 -0
  155. aip_agents/mcp/client/persistent_session.py +359 -0
  156. aip_agents/mcp/client/session_pool.py +351 -0
  157. aip_agents/mcp/client/transports.py +215 -0
  158. aip_agents/mcp/utils/__init__.py +7 -0
  159. aip_agents/mcp/utils/config_validator.py +139 -0
  160. aip_agents/memory/__init__.py +14 -0
  161. aip_agents/memory/adapters/__init__.py +10 -0
  162. aip_agents/memory/adapters/base_adapter.py +717 -0
  163. aip_agents/memory/adapters/mem0.py +84 -0
  164. aip_agents/memory/base.py +84 -0
  165. aip_agents/memory/constants.py +49 -0
  166. aip_agents/memory/factory.py +86 -0
  167. aip_agents/memory/guidance.py +20 -0
  168. aip_agents/memory/simple_memory.py +47 -0
  169. aip_agents/middleware/__init__.py +17 -0
  170. aip_agents/middleware/base.py +88 -0
  171. aip_agents/middleware/manager.py +128 -0
  172. aip_agents/middleware/todolist.py +274 -0
  173. aip_agents/schema/__init__.py +69 -0
  174. aip_agents/schema/a2a.py +56 -0
  175. aip_agents/schema/agent.py +111 -0
  176. aip_agents/schema/hitl.py +157 -0
  177. aip_agents/schema/langgraph.py +37 -0
  178. aip_agents/schema/model_id.py +97 -0
  179. aip_agents/schema/step_limit.py +108 -0
  180. aip_agents/schema/storage.py +40 -0
  181. aip_agents/sentry/__init__.py +11 -0
  182. aip_agents/sentry/sentry.py +151 -0
  183. aip_agents/storage/__init__.py +41 -0
  184. aip_agents/storage/base.py +85 -0
  185. aip_agents/storage/clients/__init__.py +12 -0
  186. aip_agents/storage/clients/minio_client.py +318 -0
  187. aip_agents/storage/config.py +62 -0
  188. aip_agents/storage/providers/__init__.py +15 -0
  189. aip_agents/storage/providers/base.py +106 -0
  190. aip_agents/storage/providers/memory.py +114 -0
  191. aip_agents/storage/providers/object_storage.py +214 -0
  192. aip_agents/tools/__init__.py +33 -0
  193. aip_agents/tools/bosa_tools.py +105 -0
  194. aip_agents/tools/browser_use/__init__.py +82 -0
  195. aip_agents/tools/browser_use/action_parser.py +103 -0
  196. aip_agents/tools/browser_use/browser_use_tool.py +1112 -0
  197. aip_agents/tools/browser_use/llm_config.py +120 -0
  198. aip_agents/tools/browser_use/minio_storage.py +198 -0
  199. aip_agents/tools/browser_use/schemas.py +119 -0
  200. aip_agents/tools/browser_use/session.py +76 -0
  201. aip_agents/tools/browser_use/session_errors.py +132 -0
  202. aip_agents/tools/browser_use/steel_session_recording.py +317 -0
  203. aip_agents/tools/browser_use/streaming.py +813 -0
  204. aip_agents/tools/browser_use/structured_data_parser.py +257 -0
  205. aip_agents/tools/browser_use/structured_data_recovery.py +204 -0
  206. aip_agents/tools/browser_use/types.py +78 -0
  207. aip_agents/tools/code_sandbox/__init__.py +26 -0
  208. aip_agents/tools/code_sandbox/constant.py +13 -0
  209. aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.py +257 -0
  210. aip_agents/tools/code_sandbox/e2b_sandbox_tool.py +411 -0
  211. aip_agents/tools/constants.py +165 -0
  212. aip_agents/tools/document_loader/__init__.py +44 -0
  213. aip_agents/tools/document_loader/base_reader.py +302 -0
  214. aip_agents/tools/document_loader/docx_reader_tool.py +68 -0
  215. aip_agents/tools/document_loader/excel_reader_tool.py +171 -0
  216. aip_agents/tools/document_loader/pdf_reader_tool.py +79 -0
  217. aip_agents/tools/document_loader/pdf_splitter.py +169 -0
  218. aip_agents/tools/gl_connector/__init__.py +5 -0
  219. aip_agents/tools/gl_connector/tool.py +351 -0
  220. aip_agents/tools/memory_search/__init__.py +22 -0
  221. aip_agents/tools/memory_search/base.py +200 -0
  222. aip_agents/tools/memory_search/mem0.py +258 -0
  223. aip_agents/tools/memory_search/schema.py +48 -0
  224. aip_agents/tools/memory_search_tool.py +26 -0
  225. aip_agents/tools/time_tool.py +117 -0
  226. aip_agents/tools/tool_config_injector.py +300 -0
  227. aip_agents/tools/web_search/__init__.py +15 -0
  228. aip_agents/tools/web_search/serper_tool.py +187 -0
  229. aip_agents/types/__init__.py +70 -0
  230. aip_agents/types/a2a_events.py +13 -0
  231. aip_agents/utils/__init__.py +79 -0
  232. aip_agents/utils/a2a_connector.py +1757 -0
  233. aip_agents/utils/artifact_helpers.py +502 -0
  234. aip_agents/utils/constants.py +22 -0
  235. aip_agents/utils/datetime/__init__.py +34 -0
  236. aip_agents/utils/datetime/normalization.py +231 -0
  237. aip_agents/utils/datetime/timezone.py +206 -0
  238. aip_agents/utils/env_loader.py +27 -0
  239. aip_agents/utils/event_handler_registry.py +58 -0
  240. aip_agents/utils/file_prompt_utils.py +176 -0
  241. aip_agents/utils/final_response_builder.py +211 -0
  242. aip_agents/utils/formatter_llm_client.py +231 -0
  243. aip_agents/utils/langgraph/__init__.py +19 -0
  244. aip_agents/utils/langgraph/converter.py +128 -0
  245. aip_agents/utils/langgraph/tool_managers/__init__.py +15 -0
  246. aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.py +99 -0
  247. aip_agents/utils/langgraph/tool_managers/base_tool_manager.py +66 -0
  248. aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +1071 -0
  249. aip_agents/utils/langgraph/tool_output_management.py +967 -0
  250. aip_agents/utils/logger.py +195 -0
  251. aip_agents/utils/metadata/__init__.py +27 -0
  252. aip_agents/utils/metadata/activity_metadata_helper.py +407 -0
  253. aip_agents/utils/metadata/activity_narrative/__init__.py +35 -0
  254. aip_agents/utils/metadata/activity_narrative/builder.py +817 -0
  255. aip_agents/utils/metadata/activity_narrative/constants.py +51 -0
  256. aip_agents/utils/metadata/activity_narrative/context.py +49 -0
  257. aip_agents/utils/metadata/activity_narrative/formatters.py +230 -0
  258. aip_agents/utils/metadata/activity_narrative/utils.py +35 -0
  259. aip_agents/utils/metadata/schemas/__init__.py +16 -0
  260. aip_agents/utils/metadata/schemas/activity_schema.py +29 -0
  261. aip_agents/utils/metadata/schemas/thinking_schema.py +31 -0
  262. aip_agents/utils/metadata/thinking_metadata_helper.py +38 -0
  263. aip_agents/utils/metadata_helper.py +358 -0
  264. aip_agents/utils/name_preprocessor/__init__.py +17 -0
  265. aip_agents/utils/name_preprocessor/base_name_preprocessor.py +73 -0
  266. aip_agents/utils/name_preprocessor/google_name_preprocessor.py +100 -0
  267. aip_agents/utils/name_preprocessor/name_preprocessor.py +87 -0
  268. aip_agents/utils/name_preprocessor/openai_name_preprocessor.py +48 -0
  269. aip_agents/utils/pii/__init__.py +25 -0
  270. aip_agents/utils/pii/pii_handler.py +397 -0
  271. aip_agents/utils/pii/pii_helper.py +207 -0
  272. aip_agents/utils/pii/uuid_deanonymizer_mapping.py +195 -0
  273. aip_agents/utils/reference_helper.py +273 -0
  274. aip_agents/utils/sse_chunk_transformer.py +831 -0
  275. aip_agents/utils/step_limit_manager.py +265 -0
  276. aip_agents/utils/token_usage_helper.py +156 -0
  277. aip_agents_binary-0.5.20.dist-info/METADATA +681 -0
  278. aip_agents_binary-0.5.20.dist-info/RECORD +280 -0
  279. aip_agents_binary-0.5.20.dist-info/WHEEL +5 -0
  280. aip_agents_binary-0.5.20.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2942 @@
1
+ """Base class for LangGraph-based agent implementations.
2
+
3
+ This class provides the core LangGraph machinery including graph compilation,
4
+ state handling, and I/O mapping for LangGraph agents.
5
+
6
+ Authors:
7
+ Christian Trisno Sen Long Chen (christian.t.s.l.chen@gdplabs.id)
8
+ Putu Ravindra Wiguna (putu.r.wiguna@gdplabs.id)
9
+ Fachriza Adhiatma (fachriza.d.adhiatma@gdplabs.id)
10
+ Raymond Christopher (raymond.christopher@gdplabs.id)
11
+ """
12
+
13
+ import asyncio
14
+ import copy
15
+ import hashlib
16
+ import json
17
+ import uuid
18
+ from abc import abstractmethod
19
+ from collections.abc import AsyncGenerator, Sequence
20
+ from concurrent.futures import Future
21
+ from contextlib import suppress
22
+ from contextvars import ContextVar
23
+ from dataclasses import dataclass
24
+ from typing import Annotated, Any
25
+
26
+ from a2a.types import AgentCard
27
+ from aiostream import stream as astream
28
+ from gllm_core.event import EventEmitter
29
+ from gllm_core.event.handler import StreamEventHandler
30
+ from gllm_core.schema import Chunk
31
+ from langchain_core.messages import AIMessage, BaseMessage, ToolMessage
32
+ from langchain_core.tools import BaseTool
33
+ from langgraph.graph import StateGraph
34
+ from langgraph.graph.message import add_messages
35
+ from langgraph.graph.state import CompiledStateGraph
36
+ from langgraph.types import Checkpointer, StreamWriter
37
+ from pydantic import ValidationError
38
+ from typing_extensions import TypedDict
39
+
40
+ from aip_agents.agent.base_agent import BaseAgent
41
+ from aip_agents.agent.system_instruction_context import get_current_date_context
42
+ from aip_agents.constants import TEXT_PREVIEW_LENGTH
43
+ from aip_agents.mcp.client import LangchainMCPClient
44
+ from aip_agents.memory import BaseMemory, MemoryFactory, MemoryMethod
45
+ from aip_agents.memory.constants import MemoryDefaults
46
+ from aip_agents.schema.agent import StreamMode
47
+ from aip_agents.schema.hitl import HitlMetadata
48
+ from aip_agents.tools.tool_config_injector import (
49
+ CONFIG_SCHEMA_ATTR,
50
+ TOOL_CONFIG_SCHEMA_ATTR,
51
+ inject_config_methods_into_tool,
52
+ )
53
+ from aip_agents.types import A2AEvent, A2AStreamEventType
54
+ from aip_agents.utils import augment_query_with_file_paths, validate_references
55
+ from aip_agents.utils.langgraph.tool_managers.a2a_tool_manager import A2AToolManager
56
+ from aip_agents.utils.langgraph.tool_managers.delegation_tool_manager import (
57
+ DelegationToolManager,
58
+ )
59
+ from aip_agents.utils.logger import get_logger
60
+ from aip_agents.utils.metadata.activity_metadata_helper import create_tool_activity_info
61
+ from aip_agents.utils.metadata_helper import (
62
+ DefaultStepMessages,
63
+ Kind,
64
+ MetadataFieldKeys,
65
+ Status,
66
+ end_step_counter_scope,
67
+ get_next_step_number,
68
+ start_step_counter_scope,
69
+ )
70
+ from aip_agents.utils.pii import deanonymize_final_response_content
71
+ from aip_agents.utils.sse_chunk_transformer import SSEChunkTransformer
72
+ from aip_agents.utils.step_limit_manager import _STEP_LIMIT_CONFIG_CVAR
73
+ from aip_agents.utils.token_usage_helper import (
74
+ STEP_USAGE_KEY,
75
+ TOTAL_USAGE_KEY,
76
+ USAGE_METADATA_KEY,
77
+ )
78
+
79
+ logger = get_logger(__name__)
80
+
81
+ # Context variable to access current thread_id during streaming callbacks
82
+ _THREAD_ID_CVAR: ContextVar[str | None] = ContextVar("aip_agents_thread_id", default=None)
83
+
84
+
85
+ # Context variable to track operation mode for dependency tracking
86
+ # "parallel" = include all completed steps (default for backward compatibility)
87
+ # "sequential" = include only the most recent completed step
88
+ _OPERATION_MODE_CVAR: ContextVar[str] = ContextVar("aip_agents_operation_mode", default="parallel")
89
+
90
+
91
+ @dataclass
92
+ class _StreamingContext:
93
+ """Context object for managing streaming state and configuration."""
94
+
95
+ original_query: str
96
+ graph_input: dict[str, Any]
97
+ config: dict[str, Any]
98
+ memory_user_id: str | None
99
+ current_thread_id: str
100
+ token: Any
101
+ enable_token_streaming: bool
102
+
103
+ # Streaming state
104
+ final_event_yielded: bool = False
105
+ pending_artifacts: list | None = None
106
+ seen_artifact_hashes: set | None = None
107
+ processed_message_count: int = 0
108
+ final_state: dict[str, Any] | None = None
109
+ last_final_content: str | None = None
110
+ saved_memory: bool = False
111
+ is_token_streaming: bool = False
112
+
113
+ def __post_init__(self):
114
+ """Initialize mutable defaults."""
115
+ if self.pending_artifacts is None:
116
+ self.pending_artifacts = []
117
+ if self.seen_artifact_hashes is None:
118
+ self.seen_artifact_hashes = set()
119
+ if self.final_state is None:
120
+ self.final_state = {}
121
+
122
+
123
+ class BaseLangGraphAgent(BaseAgent):
124
+ """Base class for LangGraph-based agents with unified tool approach.
125
+
126
+ Provides core LangGraph functionality including:
127
+ - Graph compilation and execution
128
+ - State schema management
129
+ - I/O mapping between user inputs and graph states
130
+ - Event emission support
131
+ - Tool resolution and handling
132
+ - A2A communication capabilities via tools
133
+ - Agent delegation capabilities via tools
134
+ - MCP server integration via tools
135
+ - Enhanced output extraction from various state formats
136
+
137
+ Tool Management:
138
+ - regular_tools: Standard LangChain tools provided during initialization
139
+ - mcp_tools: Tools retrieved from MCP servers
140
+ - resolved_tools: Combined collection of all tools for graph execution
141
+
142
+ Subclasses must implement:
143
+ - define_graph(): Define the specific graph structure
144
+ - _prepare_graph_input(): Convert user input to graph state
145
+ - _format_graph_output(): Convert final graph state to user output
146
+ """
147
+
148
+ def __init__( # noqa: PLR0913
149
+ self,
150
+ name: str,
151
+ instruction: str,
152
+ description: str | None = None,
153
+ model: Any | None = None,
154
+ tools: Sequence[BaseTool] | None = None,
155
+ state_schema: type | None = None,
156
+ thread_id_key: str = "thread_id",
157
+ event_emitter: EventEmitter | None = None,
158
+ checkpointer: Checkpointer | None = None,
159
+ enable_a2a_token_streaming: bool = False,
160
+ **kwargs: Any,
161
+ ):
162
+ """Initialize the BaseLangGraphAgent.
163
+
164
+ Args:
165
+ name: The name of the agent.
166
+ instruction: The system instruction for the agent.
167
+ description: Human-readable description of the agent.
168
+ model: The model to use (lm_invoker, LangChain model, string, etc.).
169
+ tools: Sequence of regular LangChain tools (not A2A or delegation tools).
170
+ state_schema: The state schema for the LangGraph. Defaults to basic message state.
171
+ thread_id_key: Key for thread ID in configuration.
172
+ event_emitter: Optional event emitter for streaming updates.
173
+ checkpointer: Optional checkpointer for conversation persistence.
174
+ enable_a2a_token_streaming: Enable token-level streaming for A2A responses.
175
+ - False (default): Stream message-level events only
176
+ - True: Stream individual tokens plus message-level events
177
+ **kwargs: Additional keyword arguments passed to BaseAgent (including tool_configs and memory settings).
178
+ Memory settings include:
179
+ - memory_backend: Memory backend (e.g., "mem0")
180
+ - agent_id: Agent identifier for memory scoping
181
+ - memory_namespace: Memory namespace
182
+ - save_interaction_to_memory: Whether to save interactions (default True)
183
+ """
184
+ super().__init__(
185
+ name=name,
186
+ instruction=instruction,
187
+ description=description,
188
+ model=model,
189
+ tools=list(tools) if tools else [],
190
+ **kwargs,
191
+ )
192
+
193
+ self._add_system_date_context()
194
+
195
+ self.state_schema = state_schema
196
+ self.thread_id_key = thread_id_key
197
+ self.enable_a2a_token_streaming = enable_a2a_token_streaming
198
+ self.event_emitter = event_emitter
199
+ self.checkpointer = checkpointer
200
+
201
+ self._mem0_client: Any | None = None
202
+ self.memory: BaseMemory | None = None
203
+ self._initialize_memory_from_kwargs(name, kwargs)
204
+
205
+ self.a2a_tool_manager = A2AToolManager()
206
+ self.delegation_tool_manager = DelegationToolManager(parent_agent=self)
207
+
208
+ self.regular_tools: list[BaseTool] = self._resolve_and_validate_tools()
209
+ self.mcp_tools: list[BaseTool] = []
210
+ self.resolved_tools: list[BaseTool] = self.regular_tools.copy()
211
+
212
+ self._compiled_graph = self._build_and_compile_graph()
213
+
214
+ self._tool_parent_map_by_thread: dict[str, dict[str, str]] = {}
215
+ self._completed_tool_steps_by_thread: dict[str, list[str]] = {}
216
+ self._last_status_step_id_by_thread: dict[str, str] = {}
217
+ self._coordinator_completed_tool_steps_by_thread: dict[str, list[str]] = {}
218
+ self._emitted_tool_calls_by_thread: dict[str, set[str]] = {}
219
+
220
+ def _create_default_event_emitter(self) -> EventEmitter:
221
+ """Create default event emitter for token streaming.
222
+
223
+ Returns:
224
+ EventEmitter with StreamEventHandler configured for token streaming.
225
+ """
226
+ stream_handler = StreamEventHandler(name=f"{self.name}_A2AStreamHandler")
227
+ logger.info(f"Agent '{self.name}': Auto-created event emitter for token streaming")
228
+ return EventEmitter(handlers=[stream_handler])
229
+
230
+ def _log_streaming_event_debug(self, source: str, event: dict[str, Any]) -> None:
231
+ """Log the raw streaming event for debugging purposes.
232
+
233
+ Args:
234
+ source: A short label describing where the event originated.
235
+ event: The event payload emitted by the streaming pipeline.
236
+ """
237
+ try:
238
+ logger.info("Streaming event (%s): %s", source, event)
239
+ except Exception as exc: # noqa: BLE001
240
+ logger.debug("Failed to log streaming event: %s", exc, exc_info=True)
241
+
242
+ def _record_emitted_tool_calls(self, tool_calls_details: list[dict[str, Any]]) -> None:
243
+ """Track tool call IDs that have already been emitted to avoid duplicates.
244
+
245
+ Args:
246
+ tool_calls_details: Tool call metadata emitted by the tool_call event.
247
+ """
248
+ thread_id = _THREAD_ID_CVAR.get()
249
+ if not thread_id or not tool_calls_details:
250
+ return
251
+
252
+ emitted = self._emitted_tool_calls_by_thread.setdefault(thread_id, set())
253
+ for details in tool_calls_details:
254
+ call_id = details.get("id")
255
+ if isinstance(call_id, str) and call_id:
256
+ emitted.add(call_id)
257
+ logger.info(
258
+ "Registered tool call event: agent=%s thread=%s call_id=%s payload=%s",
259
+ self.name,
260
+ thread_id,
261
+ call_id,
262
+ details,
263
+ )
264
+
265
+ def _discard_emitted_tool_call(self, tool_call_id: str | None) -> None:
266
+ """Remove a tool call ID from the emitted tracker.
267
+
268
+ Args:
269
+ tool_call_id: Identifier of the tool call to remove from cache.
270
+ """
271
+ if not tool_call_id:
272
+ return
273
+ thread_id = _THREAD_ID_CVAR.get()
274
+ if not thread_id:
275
+ return
276
+ emitted = self._emitted_tool_calls_by_thread.get(thread_id)
277
+ if emitted:
278
+ emitted.discard(tool_call_id)
279
+ logger.info(
280
+ "Cleared recorded tool call: agent=%s thread=%s call_id=%s",
281
+ self.name,
282
+ thread_id,
283
+ tool_call_id,
284
+ )
285
+
286
+ def _get_stream_handler(self) -> StreamEventHandler | None:
287
+ """Get StreamEventHandler from event_emitter if available.
288
+
289
+ Returns:
290
+ StreamEventHandler instance if found, None otherwise.
291
+ """
292
+ if not self.event_emitter or not self.event_emitter.handlers:
293
+ return None
294
+
295
+ for handler in self.event_emitter.handlers:
296
+ if isinstance(handler, StreamEventHandler):
297
+ return handler
298
+ return None
299
+
300
+ def _add_system_date_context(self):
301
+ """Prepend the current date context to the agent's system instruction.
302
+
303
+ The `get_current_date_context()` helper returns a short natural-language phrase
304
+ describing "today" (e.g., "Today is DD MMM YYYY"). By prepending this
305
+ snippet the agent gains up-to-date temporal grounding for each run,
306
+ which is especially important for prompts that reason about recency or compute
307
+ relative dates.
308
+ """
309
+ date_context = get_current_date_context()
310
+ self.instruction = date_context + "\n\n" + self.instruction
311
+ logger.info(f"Agent '{self.name}': Prepended current date context to system instruction")
312
+
313
+ def set_operation_mode(self, mode: str) -> None:
314
+ """Set the operation mode for dependency tracking.
315
+
316
+ Args:
317
+ mode: Operation mode - "parallel" (default) or "sequential"
318
+ """
319
+ if mode not in ["parallel", "sequential"]:
320
+ raise ValueError(f"Invalid operation mode: {mode}. Must be 'parallel' or 'sequential'")
321
+ _OPERATION_MODE_CVAR.set(mode)
322
+
323
+ def _default_memory_agent_id(self, name: str) -> str:
324
+ """Create a stable identifier for memory scoping.
325
+
326
+ Args:
327
+ name: The agent's human-readable name.
328
+
329
+ Returns:
330
+ str: A deterministic ID derived from the class and name, suitable for scoping memory per agent.
331
+ """
332
+ base = f"{self.__class__.__name__}:{name}"
333
+ return f"{MemoryDefaults.AGENT_ID_PREFIX}{hashlib.sha256(base.encode()).hexdigest()}"
334
+
335
+ @staticmethod
336
+ def _parse_bool_value(value: Any) -> bool:
337
+ """Parse a value to boolean with string handling for "true"/"false".
338
+
339
+ Treats string "false" as False, "true" as True.
340
+ For other values, uses standard bool() conversion.
341
+
342
+ Args:
343
+ value: The value to parse.
344
+
345
+ Returns:
346
+ bool: The parsed boolean value.
347
+ """
348
+ if isinstance(value, str):
349
+ lower_value = value.lower().strip()
350
+ if lower_value == "false":
351
+ return False
352
+ elif lower_value == "true":
353
+ return True
354
+ return bool(value)
355
+
356
+ def _memory_enabled(self) -> bool:
357
+ """Check whether memory is enabled for this agent.
358
+
359
+ Returns:
360
+ bool: True when a memory adapter is set.
361
+ """
362
+ return self.memory is not None
363
+
364
+ def _has_lm_invoker(self) -> bool:
365
+ """Check whether lm_invoker is available for this agent.
366
+
367
+ Returns:
368
+ bool: True when lm_invoker attribute exists and is not None.
369
+ """
370
+ return self.lm_invoker is not None
371
+
372
+ def _memory_search(self, query: str, override_user_id: str | None = None) -> list[dict[str, Any]]:
373
+ """Search for relevant memories using the configured adapter.
374
+
375
+ Args:
376
+ query: The user query to retrieve relevant memories for.
377
+ override_user_id: Optional per-call override for the memory scope.
378
+
379
+ Returns:
380
+ list[dict[str, Any]]: Memory hits; empty list on failure or when disabled.
381
+ """
382
+ if not (self._memory_enabled() and isinstance(query, str)):
383
+ return []
384
+ try:
385
+ user_id = override_user_id or self.memory_agent_id
386
+ if hasattr(self.memory, MemoryMethod.SEARCH):
387
+ results = self.memory.search(query=query, user_id=user_id, limit=self.memory_retrieval_limit) # type: ignore[attr-defined]
388
+ return results if isinstance(results, list) else []
389
+ else:
390
+ return []
391
+ except Exception as e: # noqa: BLE001
392
+ logger.debug(f"Memory: search ignored error: {e}")
393
+ return []
394
+
395
+ def _memory_save_interaction(self, user_text: str, ai_text: str, memory_user_id: str | None = None) -> None:
396
+ """Persist the user/assistant pair using the configured adapter (best-effort).
397
+
398
+ Args:
399
+ user_text: The user input text.
400
+ ai_text: The assistant output text.
401
+ memory_user_id: Optional per-call memory scope override.
402
+ """
403
+ if not (self.save_interaction_to_memory and self._memory_enabled() and user_text and ai_text):
404
+ logger.debug("Memory: Skipping save_interaction - saving disabled, memory disabled, or empty text")
405
+ return
406
+ try:
407
+ user_id = memory_user_id or self.memory_agent_id
408
+ logger.info(
409
+ f"Memory: Saving interaction for user_id='{user_id}' - "
410
+ f"User: '{user_text[:TEXT_PREVIEW_LENGTH]}{'...' if len(user_text) > TEXT_PREVIEW_LENGTH else ''}' "
411
+ f"AI: '{ai_text[:TEXT_PREVIEW_LENGTH]}{'...' if len(ai_text) > TEXT_PREVIEW_LENGTH else ''}'"
412
+ )
413
+ save_async = getattr(self.memory, "save_interaction_async", None)
414
+ if callable(save_async):
415
+ future = save_async(user_text=str(user_text), ai_text=str(ai_text), user_id=user_id)
416
+ self._watch_memory_future(future, user_id)
417
+ elif hasattr(self.memory, MemoryMethod.SAVE_INTERACTION):
418
+ self.memory.save_interaction(user_text=str(user_text), ai_text=str(ai_text), user_id=user_id) # type: ignore[attr-defined]
419
+ else:
420
+ logger.warning(
421
+ "Memory: save_interaction method NOT available on memory adapter "
422
+ f"(type: {type(self.memory).__name__})"
423
+ )
424
+ except Exception as e: # noqa: BLE001
425
+ logger.debug(f"Memory: save_interaction ignored error: {e}")
426
+
427
+ @staticmethod
428
+ def _watch_memory_future(future: Any, user_id: str) -> None:
429
+ """Attach logging to asynchronous memory writes.
430
+
431
+ Args:
432
+ future: The Future object to monitor for completion.
433
+ user_id: User identifier for logging context.
434
+ """
435
+ if not isinstance(future, Future):
436
+ return
437
+
438
+ def _log_completion(done: Future) -> None:
439
+ """Log memory save completion or failure.
440
+
441
+ Args:
442
+ done: Future object that has completed.
443
+ """
444
+ exc = done.exception()
445
+ if exc:
446
+ logger.warning("Memory: async save failed for user_id='%s': %s", user_id, exc)
447
+
448
+ future.add_done_callback(_log_completion)
449
+
450
+ def _resolve_and_validate_tools(self) -> list[BaseTool]:
451
+ """Resolve and validate regular tools for LangGraph usage.
452
+
453
+ Also configures tools with injected configuration capabilities
454
+ from agent-level tool_configs.
455
+
456
+ Returns:
457
+ List of resolved LangChain BaseTool instances.
458
+ """
459
+ resolved = []
460
+ for tool in self.tools:
461
+ if isinstance(tool, BaseTool):
462
+ self._configure_injected_tool(tool)
463
+ resolved.append(tool)
464
+ else:
465
+ logger.warning(f"Agent '{self.name}': Tool {tool} is not a LangChain BaseTool, skipping")
466
+
467
+ logger.info(f"Agent '{self.name}': Resolved {len(resolved)} regular tools for LangGraph")
468
+ return resolved
469
+
470
+ def _initialize_memory_from_kwargs(self, agent_name: str, kwargs: dict[str, Any]) -> None:
471
+ """Initialize memory-related settings and adapter.
472
+
473
+ Extracts known memory kwargs, sets defaults, and initializes the adapter when enabled.
474
+ Keeps ``__init__`` concise and improves DX.
475
+
476
+ Args:
477
+ agent_name: The name of the agent, used to derive a default memory id.
478
+ kwargs: Keyword arguments from the agent constructor; consumed keys are removed.
479
+ Supported memory kwargs:
480
+ - memory_backend: str - Memory backend to use (e.g., "mem0")
481
+ - agent_id: str - Agent identifier for memory scoping
482
+ - memory_namespace: str - Memory namespace
483
+ - memory_retrieval_limit: int - Max memories to retrieve
484
+ - memory_max_chars: int - Max characters per memory
485
+ - save_interaction_to_memory: bool (default True) - Whether to save interactions to memory
486
+ """
487
+ # Initialize memory configuration settings
488
+ self.memory_backend: str | None = kwargs.pop("memory_backend", None)
489
+ self.memory_agent_id: str = str(kwargs.pop("agent_id", self._default_memory_agent_id(agent_name)))
490
+ self.memory_namespace: str | None = kwargs.pop("memory_namespace", None)
491
+ self.memory_retrieval_limit: int = int(kwargs.pop("memory_retrieval_limit", MemoryDefaults.RETRIEVAL_LIMIT))
492
+ self.memory_max_chars: int = int(kwargs.pop("memory_max_chars", MemoryDefaults.MAX_CHARS))
493
+
494
+ # Initialize memory interaction saving flag with proper bool conversion
495
+ save_raw = kwargs.pop("save_interaction_to_memory", True)
496
+ self.save_interaction_to_memory: bool = self._parse_bool_value(save_raw)
497
+
498
+ if self.memory_backend:
499
+ memory_kwargs = {
500
+ "limit": self.memory_retrieval_limit,
501
+ "max_chars": self.memory_max_chars,
502
+ "agent_id": self.memory_agent_id,
503
+ }
504
+ if self.memory_namespace:
505
+ memory_kwargs["namespace"] = self.memory_namespace
506
+
507
+ self._mem0_client = MemoryFactory.create(
508
+ self.memory_backend,
509
+ **memory_kwargs,
510
+ )
511
+ self.memory = self._mem0_client
512
+
513
+ def _configure_injected_tool(self, tool: BaseTool) -> None:
514
+ """Configure a tool with automatic configuration injection using agent-level defaults.
515
+
516
+ Args:
517
+ tool: The tool instance to configure.
518
+ """
519
+ if self._should_auto_inject_config(tool):
520
+ self._auto_inject_config_capabilities(tool)
521
+ self._apply_agent_config_to_tool(tool)
522
+
523
+ def _should_auto_inject_config(self, tool: BaseTool) -> bool:
524
+ """Check if tool needs auto-injection of configuration capabilities.
525
+
526
+ Args:
527
+ tool: The tool instance to check.
528
+
529
+ Returns:
530
+ True if tool needs auto-injection of configuration capabilities, False otherwise.
531
+ """
532
+ return (
533
+ hasattr(tool, TOOL_CONFIG_SCHEMA_ATTR)
534
+ and getattr(tool, TOOL_CONFIG_SCHEMA_ATTR) is not None
535
+ and not hasattr(tool, CONFIG_SCHEMA_ATTR)
536
+ )
537
+
538
+ def _auto_inject_config_capabilities(self, tool: BaseTool) -> None:
539
+ """Inject configuration capabilities into a tool with tool_config_schema.
540
+
541
+ Args:
542
+ tool: The tool instance to configure.
543
+ """
544
+ try:
545
+ tool_config_schema = getattr(tool, TOOL_CONFIG_SCHEMA_ATTR)
546
+ inject_config_methods_into_tool(tool, tool_config_schema)
547
+ logger.info(f"Agent '{self.name}': Auto-injected config capabilities into tool '{tool.name}'")
548
+ except Exception as e:
549
+ logger.warning(f"Agent '{self.name}': Failed to auto-inject config into tool '{tool.name}': {e}")
550
+
551
+ def _apply_agent_config_to_tool(self, tool: BaseTool) -> None:
552
+ """Apply agent-level configuration to a tool.
553
+
554
+ Args:
555
+ tool: The tool instance to configure.
556
+ """
557
+ tool_config_data = self._get_agent_config_for_tool(tool.name)
558
+
559
+ if tool_config_data is None:
560
+ return
561
+
562
+ try:
563
+ tool.set_tool_config(tool_config_data)
564
+ logger.info(f"Agent '{self.name}': Configured tool '{tool.name}' with agent defaults: {tool_config_data}")
565
+ except Exception as e:
566
+ logger.warning(f"Agent '{self.name}': Failed to configure tool '{tool.name}': {e}")
567
+
568
+ def _get_agent_config_for_tool(self, tool_name: str) -> dict[str, Any] | None:
569
+ """Get agent-level configuration data for a specific tool.
570
+
571
+ This method intentionally returns only per-tool configuration and does NOT include
572
+ global agent configuration. Global configuration merging is handled separately
573
+ in the metadata resolution process during tool execution.
574
+
575
+ Args:
576
+ tool_name: The name of the tool to get configuration for.
577
+
578
+ Returns:
579
+ The configuration data for the tool, or None if no configuration is found.
580
+ """
581
+ if not isinstance(self.tool_configs, dict):
582
+ return None
583
+
584
+ return self.tool_configs.get(tool_name)
585
+
586
+ def _sanitize_tool_names(self):
587
+ """Correct resolved_tools' names that will be used for the agent according to the model provider's rules."""
588
+ for tool in self.resolved_tools:
589
+ sanitized_name = self.name_preprocessor.sanitize_tool_name(tool.name)
590
+ tool.name = sanitized_name
591
+
592
+ def _build_and_compile_graph(self) -> CompiledStateGraph:
593
+ """Build and compile the LangGraph while ensuring tool names are valid.
594
+
595
+ Returns:
596
+ Compiled LangGraph ready for execution.
597
+ """
598
+ self._sanitize_tool_names()
599
+ try:
600
+ if self.state_schema:
601
+ graph_builder = StateGraph(self.state_schema)
602
+ else:
603
+
604
+ class DefaultAgentState(TypedDict):
605
+ messages: Annotated[list[BaseMessage], add_messages]
606
+
607
+ graph_builder = StateGraph(DefaultAgentState)
608
+
609
+ compiled_graph = self.define_graph(graph_builder)
610
+ logger.info(
611
+ f"Agent '{self.name}': Successfully compiled LangGraph with {len(self.resolved_tools)} total tools"
612
+ )
613
+ return compiled_graph
614
+
615
+ except Exception as e:
616
+ logger.error(f"Agent '{self.name}': Failed to build LangGraph: {e}")
617
+ raise RuntimeError(f"Failed to build LangGraph for agent '{self.name}': {e}") from e
618
+
619
+ @abstractmethod
620
+ def define_graph(self, graph_builder: StateGraph) -> CompiledStateGraph:
621
+ """Define the specific graph structure for this agent type.
622
+
623
+ Subclasses must implement this method to:
624
+ 1. Add nodes to the graph_builder
625
+ 2. Add edges and conditional edges
626
+ 3. Set entry points
627
+ 4. Return the compiled graph
628
+
629
+ Args:
630
+ graph_builder: The StateGraph builder to define nodes and edges on.
631
+
632
+ Returns:
633
+ The compiled graph ready for execution.
634
+ """
635
+ raise NotImplementedError(f"Agent '{self.name}': Subclasses must implement define_graph method")
636
+
637
+ @abstractmethod
638
+ def _prepare_graph_input(self, input_data: Any, **kwargs: Any) -> dict[str, Any]:
639
+ """Convert user input to graph state format.
640
+
641
+ Args:
642
+ input_data: The user's input (query string, structured data, etc.).
643
+ **kwargs: Additional keyword arguments from the user.
644
+
645
+ Returns:
646
+ Dictionary representing the initial graph state.
647
+ """
648
+ raise NotImplementedError(f"Agent '{self.name}': Subclasses must implement _prepare_graph_input method")
649
+
650
+ @abstractmethod
651
+ def _format_graph_output(self, final_state_result: dict[str, Any]) -> Any:
652
+ """Convert final graph state to user-friendly output.
653
+
654
+ Args:
655
+ final_state_result: The final state from graph execution.
656
+
657
+ Returns:
658
+ Formatted output for the user.
659
+ """
660
+ raise NotImplementedError(f"Agent '{self.name}': Subclasses must implement _format_graph_output method")
661
+
662
+ def _extract_metadata_from_kwargs(self, **kwargs: Any) -> dict[str, Any]:
663
+ """Extract metadata from kwargs for agent implementations.
664
+
665
+ Supports both flat and mixed metadata schemas:
666
+ - Flat dict (legacy): all keys applied to all tools and model calls
667
+ - Mixed dict (new): top-level keys applied to all tools, 'tool_configs' section per-tool
668
+
669
+ Args:
670
+ **kwargs: Keyword arguments that may contain metadata.
671
+
672
+ Returns:
673
+ dict[str, Any]: The metadata dictionary, or an empty dict if no metadata was provided.
674
+ """
675
+ return kwargs.get("metadata", {})
676
+
677
+ def _extract_output_from_dict_state(self, dict_state: dict[str, Any]) -> str | None:
678
+ """Extract output from a dictionary state (migrated from BaseLangChainAgent).
679
+
680
+ Args:
681
+ dict_state: A dictionary containing agent state information.
682
+
683
+ Returns:
684
+ The extracted output string or None if no valid output found.
685
+ """
686
+ output_content: str | None = None
687
+ messages = dict_state.get("messages")
688
+ if messages and isinstance(messages, list) and messages:
689
+ last_message = messages[-1]
690
+ if isinstance(last_message, AIMessage):
691
+ candidate_content = getattr(last_message, "content", None)
692
+ if not candidate_content:
693
+ output_content = ""
694
+ else:
695
+ output_content = candidate_content
696
+ elif hasattr(last_message, "content"):
697
+ output_content = getattr(last_message, "content", None)
698
+ if output_content is None:
699
+ candidate_output_from_key = dict_state.get("output")
700
+ if isinstance(candidate_output_from_key, str):
701
+ output_content = candidate_output_from_key
702
+ return output_content
703
+
704
+ def _extract_output_from_list_state(self, list_state: list[Any]) -> str | None:
705
+ """Extract output from a list state (migrated from BaseLangChainAgent).
706
+
707
+ Args:
708
+ list_state: A list containing agent state information.
709
+
710
+ Returns:
711
+ The extracted output string or None if no valid output found.
712
+ """
713
+ output_content: str | None = None
714
+ if not list_state:
715
+ return None
716
+ last_item = list_state[-1]
717
+ if isinstance(last_item, AIMessage) and getattr(last_item, "content", None) is not None:
718
+ output_content = last_item.content
719
+ elif isinstance(last_item, str):
720
+ output_content = last_item
721
+ return output_content
722
+
723
+ def _extract_output_from_final_state(self, final_state_result: Any) -> str:
724
+ """Enhanced output extraction from final state (migrated from BaseLangChainAgent).
725
+
726
+ Args:
727
+ final_state_result: The final state from graph execution.
728
+
729
+ Returns:
730
+ Extracted text content.
731
+ """
732
+ output_content: str | None = None
733
+ if isinstance(final_state_result, dict):
734
+ output_content = self._extract_output_from_dict_state(final_state_result)
735
+ elif isinstance(final_state_result, str):
736
+ output_content = final_state_result
737
+ elif isinstance(final_state_result, list):
738
+ output_content = self._extract_output_from_list_state(final_state_result)
739
+
740
+ if output_content is None:
741
+ return "Error: Could not extract output from agent's final state."
742
+ return output_content
743
+
744
+ def register_a2a_agents(self, agent_cards: list[AgentCard]) -> None:
745
+ """Register A2A communication capabilities using the A2A tool manager.
746
+
747
+ Args:
748
+ agent_cards (list[AgentCard]): List of AgentCard instances for external communication.
749
+ """
750
+ if not agent_cards:
751
+ logger.info(f"Agent '{self.name}': No A2A agents to register")
752
+ return
753
+
754
+ a2a_tools = self.a2a_tool_manager.register_resources(agent_cards)
755
+ self.resolved_tools.extend(a2a_tools)
756
+
757
+ logger.info(f"Agent '{self.name}': Registered {len(agent_cards)} A2A agents as tools")
758
+ self._rebuild_graph()
759
+
760
+ def register_delegation_agents(self, agents: list[BaseAgent]) -> None:
761
+ """Register internal agent delegation capabilities using the delegation tool manager.
762
+
763
+ Args:
764
+ agents: List of BaseAgent instances for internal task delegation.
765
+ """
766
+ if not agents:
767
+ logger.info(f"Agent '{self.name}': No delegation agents to register")
768
+ return
769
+
770
+ delegation_tools = self.delegation_tool_manager.register_resources(agents)
771
+ self.resolved_tools.extend(delegation_tools)
772
+ logger.info(f"Agent '{self.name}': Registered {len(agents)} delegation agents as streaming tools")
773
+
774
+ self._rebuild_graph()
775
+
776
+ def update_regular_tools(self, new_tools: list[BaseTool], rebuild_graph: bool | None = None) -> None:
777
+ """Update regular tools (not capabilities).
778
+
779
+ Args:
780
+ new_tools: New list of regular tools to use.
781
+ rebuild_graph: Whether to rebuild graph. If None, uses auto_rebuild_graph setting.
782
+ """
783
+ logger.info(f"Agent '{self.name}': Updating regular tools from {len(self.tools)} to {len(new_tools)}")
784
+
785
+ self.tools = list(new_tools)
786
+ old_resolved_count = len(self.regular_tools)
787
+ self.regular_tools = self._resolve_and_validate_tools()
788
+
789
+ logger.info(
790
+ f"Agent '{self.name}': Regular tools changed from {old_resolved_count} to {len(self.regular_tools)}"
791
+ )
792
+
793
+ self._rebuild_resolved_tools()
794
+
795
+ should_rebuild = rebuild_graph if rebuild_graph is not None else True
796
+ if should_rebuild:
797
+ try:
798
+ logger.info(f"Agent '{self.name}': Rebuilding graph with updated tools")
799
+ self._compiled_graph = self._build_and_compile_graph()
800
+ except Exception as e:
801
+ logger.error(f"Agent '{self.name}': Failed to rebuild graph after tool update: {e}")
802
+ raise
803
+
804
+ def _rebuild_resolved_tools(self) -> None:
805
+ """Rebuild resolved tools combining regular tools with capability tools."""
806
+ self.resolved_tools = self.regular_tools.copy()
807
+
808
+ if self.a2a_tool_manager:
809
+ a2a_tools = self.a2a_tool_manager.get_tools()
810
+ self.resolved_tools.extend(a2a_tools)
811
+ logger.info(f"Agent '{self.name}': Added {len(a2a_tools)} A2A tools")
812
+
813
+ if self.delegation_tool_manager:
814
+ delegation_tools = self.delegation_tool_manager.get_tools()
815
+ self.resolved_tools.extend(delegation_tools)
816
+ logger.info(f"Agent '{self.name}': Added {len(delegation_tools)} delegation tools")
817
+
818
+ if self.mcp_tools:
819
+ self.resolved_tools.extend(self.mcp_tools)
820
+ logger.info(f"Agent '{self.name}': Added {len(self.mcp_tools)} MCP tools")
821
+
822
+ logger.info(f"Agent '{self.name}': Rebuilt resolved tools: {len(self.resolved_tools)} total tools")
823
+
824
+ def _rebuild_graph(self) -> None:
825
+ """Rebuilds and recompiles the graph using the current set of tools.
826
+
827
+ Raises:
828
+ RuntimeError: If the graph rebuilding or compilation process fails.
829
+ """
830
+ try:
831
+ self._rebuild_resolved_tools()
832
+ self._compiled_graph = self._build_and_compile_graph()
833
+ logger.info(f"Agent '{self.name}': Successfully rebuilt graph")
834
+ except Exception as e:
835
+ logger.error(f"Agent '{self.name}': Failed to rebuild graph: {e}")
836
+ raise RuntimeError(f"Failed to rebuild graph for agent '{self.name}': {e}") from e
837
+
838
+ def run(self, query: str, **kwargs: Any) -> dict[str, Any]:
839
+ """Synchronously run the LangGraph agent.
840
+
841
+ Args:
842
+ query: The input query for the agent.
843
+ **kwargs: Additional keyword arguments.
844
+
845
+ Returns:
846
+ Dictionary containing the agent's response.
847
+ """
848
+ try:
849
+ return asyncio.run(self.arun(query, **kwargs))
850
+ except RuntimeError as e:
851
+ raise RuntimeError(f"Agent '{self.name}': Error in sync 'run'. Original: {e}") from e
852
+
853
+ async def arun(self, query: str, **kwargs: Any) -> dict[str, Any]:
854
+ """Asynchronously run the LangGraph agent with lazy MCP initialization.
855
+
856
+ Args:
857
+ query: The input query for the agent.
858
+ **kwargs: Additional keyword arguments including configurable for LangGraph.
859
+
860
+ Returns:
861
+ Dictionary containing the agent's response and full final state.
862
+ """
863
+ await self._ensure_mcp_tools_initialized()
864
+ return await self._arun(query, **kwargs)
865
+
866
+ async def _arun(self, query: str, **kwargs: Any) -> dict[str, Any]:
867
+ """Internal implementation of arun without MCP handling.
868
+
869
+ Args:
870
+ query: The input query for the agent.
871
+ **kwargs: Additional keyword arguments including configurable for LangGraph.
872
+
873
+ Returns:
874
+ Dictionary containing the agent's response and full final state.
875
+ """
876
+ memory_user_id: str | None = kwargs.get("memory_user_id")
877
+
878
+ # Create config first to ensure thread_id is generated
879
+ config = self._create_graph_config(**kwargs)
880
+ thread_id = self._get_thread_id_from_config(config)
881
+
882
+ graph_input = self._prepare_graph_input(query, thread_id=thread_id, **kwargs)
883
+
884
+ try:
885
+ final_state_result = await self._compiled_graph.ainvoke(graph_input, config=config)
886
+ formatted_output = self._format_graph_output(final_state_result)
887
+
888
+ try:
889
+ self._memory_save_interaction(user_text=query, ai_text=formatted_output, memory_user_id=memory_user_id)
890
+ except Exception:
891
+ pass
892
+
893
+ return {"output": formatted_output, "full_final_state": final_state_result}
894
+
895
+ except Exception as e:
896
+ logger.error(f"Agent '{self.name}': Error during graph execution: {e}")
897
+ raise RuntimeError(f"Agent '{self.name}': Graph execution failed: {e}") from e
898
+
899
+ async def _stream_with_lm_invoker(self, query: str, **kwargs: Any) -> AsyncGenerator[str | dict[str, Any], None]:
900
+ """Handle streaming for LM Invoker using StreamEventHandler.
901
+
902
+ Args:
903
+ query: The input query for the agent.
904
+ **kwargs: Additional keyword arguments.
905
+
906
+ Yields:
907
+ Chunks of output (strings or dicts) from the streaming response.
908
+ """
909
+ stream_handler = StreamEventHandler(name=f"{self.name}_StreamHandler")
910
+ event_emitter = EventEmitter(handlers=[stream_handler])
911
+
912
+ async def run_and_cleanup():
913
+ """Runs the agent and ensures event emitter cleanup."""
914
+ try:
915
+ await self.arun(
916
+ query=query,
917
+ event_emitter=event_emitter,
918
+ **kwargs,
919
+ )
920
+ finally:
921
+ await event_emitter.close()
922
+
923
+ execution_task = asyncio.create_task(run_and_cleanup())
924
+
925
+ try:
926
+ async for event in stream_handler.stream():
927
+ chunk_data = json.loads(event)
928
+ chunk_value = chunk_data.get("value", "")
929
+ if not chunk_value:
930
+ continue
931
+ if isinstance(chunk_value, str) or isinstance(chunk_value, dict):
932
+ yield chunk_value
933
+
934
+ await execution_task
935
+
936
+ except asyncio.CancelledError:
937
+ execution_task.cancel()
938
+ await event_emitter.close()
939
+ with suppress(asyncio.CancelledError):
940
+ await execution_task
941
+ raise
942
+ except Exception as e:
943
+ execution_task.cancel()
944
+ await event_emitter.close()
945
+ with suppress(asyncio.CancelledError):
946
+ await execution_task
947
+ logger.error(f"Agent '{self.name}': Error during LM Invoker streaming: {e}")
948
+ yield {"error": f"Streaming failed: {e}"}
949
+
950
+ def _create_graph_config(self, **kwargs: Any) -> dict[str, Any]:
951
+ """Create standardized graph configuration with thread ID handling.
952
+
953
+ Guarantees a thread identifier is present in the returned config. The key used
954
+ is `self.thread_id_key` when set, otherwise the default key `"thread_id"`.
955
+
956
+ Args:
957
+ **kwargs: Additional keyword arguments including configurable, metadata, and pii_mapping.
958
+
959
+ Returns:
960
+ Dictionary containing the graph configuration with a guaranteed thread ID
961
+ and metadata (including pii_mapping) if provided.
962
+ """
963
+ configurable = kwargs.get("configurable", {}).copy()
964
+
965
+ key = self.thread_id_key or "thread_id"
966
+ if key not in configurable:
967
+ configurable[key] = str(uuid.uuid4())
968
+ logger.info(f"Agent '{self.name}': Generated new thread ID: {configurable[key]}")
969
+
970
+ config: dict[str, Any] = {"configurable": configurable}
971
+
972
+ # Include metadata in config to preserve pii_mapping and other metadata
973
+ # This ensures parity between direct SSE streaming and A2A executor paths
974
+ metadata = kwargs.get("metadata")
975
+ pii_mapping = kwargs.get("pii_mapping")
976
+
977
+ if metadata or pii_mapping:
978
+ config_metadata: dict[str, Any] = dict(metadata) if metadata else {}
979
+ if pii_mapping and "pii_mapping" not in config_metadata:
980
+ config_metadata["pii_mapping"] = pii_mapping
981
+ config["metadata"] = config_metadata
982
+
983
+ return config
984
+
985
+ def _get_thread_id_from_config(self, config: dict[str, Any]) -> str | None:
986
+ """Extract thread_id from graph configuration.
987
+
988
+ Args:
989
+ config: Graph configuration dict with 'configurable' key.
990
+
991
+ Returns:
992
+ The thread_id value or None if not found.
993
+ """
994
+ configurable = config.get("configurable", {})
995
+ key = self.thread_id_key or "thread_id"
996
+ return configurable.get(key)
997
+
998
+ def _process_langgraph_event(self, event: dict[str, Any]) -> str | dict[str, Any] | None:
999
+ """Process a single LangGraph streaming event.
1000
+
1001
+ Args:
1002
+ event: Event from LangGraph's astream_events.
1003
+
1004
+ Returns:
1005
+ Processed output or None if event should be skipped.
1006
+ """
1007
+ event_type = event.get("event")
1008
+ event_data = event.get("data")
1009
+
1010
+ if event_type == "on_chat_model_stream" and event_data:
1011
+ chunk = event_data.get("chunk")
1012
+ if chunk and hasattr(chunk, "content") and chunk.content:
1013
+ return chunk.content
1014
+
1015
+ elif event_type == "on_tool_end" and event_data:
1016
+ output = event_data.get("output")
1017
+ if output:
1018
+ return {"tool_output": str(output)}
1019
+
1020
+ return None
1021
+
1022
+ def _should_yield_a2a_event(self, event_data: A2AEvent) -> bool:
1023
+ """Check if A2A event should be yielded based on event type.
1024
+
1025
+ Args:
1026
+ event_data: A2AEvent with semantic type information.
1027
+
1028
+ Returns:
1029
+ True if event should be yielded, False otherwise.
1030
+ """
1031
+ event_type = event_data.get("event_type")
1032
+
1033
+ if event_type in {
1034
+ A2AStreamEventType.TOOL_CALL,
1035
+ A2AStreamEventType.TOOL_RESULT,
1036
+ A2AStreamEventType.CONTENT_CHUNK,
1037
+ A2AStreamEventType.FINAL_RESPONSE,
1038
+ A2AStreamEventType.ERROR,
1039
+ }:
1040
+ return True
1041
+
1042
+ if event_type == A2AStreamEventType.STATUS_UPDATE:
1043
+ content = event_data.get("content", "")
1044
+ return bool(content.strip())
1045
+
1046
+ return True
1047
+
1048
+ async def _stream_with_langgraph(self, query: str, **kwargs: Any) -> AsyncGenerator[str | dict[str, Any], None]:
1049
+ """Handle streaming for LangChain models using LangGraph's native streaming.
1050
+
1051
+ Args:
1052
+ query: The input query for the agent.
1053
+ **kwargs: Additional keyword arguments.
1054
+
1055
+ Yields:
1056
+ Chunks of output (strings or dicts) from the streaming response.
1057
+ """
1058
+ # Create config first to ensure thread_id is generated
1059
+ config = self._create_graph_config(**kwargs)
1060
+ thread_id = self._get_thread_id_from_config(config)
1061
+
1062
+ graph_input = self._prepare_graph_input(query, thread_id=thread_id, **kwargs)
1063
+
1064
+ try:
1065
+ async for event in self._compiled_graph.astream_events(graph_input, config=config, version="v2"):
1066
+ processed_output = self._process_langgraph_event(event)
1067
+ if processed_output is not None:
1068
+ yield processed_output
1069
+
1070
+ except Exception as e:
1071
+ logger.error(f"Agent '{self.name}': Error during graph streaming: {e}")
1072
+ yield {"error": f"Streaming failed: {e}"}
1073
+
1074
+ async def arun_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[str | dict[str, Any], None]:
1075
+ """Asynchronously stream the LangGraph agent's response.
1076
+
1077
+ If MCP configuration exists, connects to the MCP server and registers tools before streaming.
1078
+ This method properly handles both LM Invoker and LangChain model streaming:
1079
+ - For LM Invoker: Uses StreamEventHandler to capture streaming events
1080
+ - For LangChain models: Uses LangGraph's native streaming implementation
1081
+
1082
+ Args:
1083
+ query: The input query for the agent.
1084
+ **kwargs: Additional keyword arguments.
1085
+
1086
+ Yields:
1087
+ Chunks of output (strings or dicts) from the streaming response.
1088
+ """
1089
+ await self._ensure_mcp_tools_initialized()
1090
+ async for chunk in self._arun_stream(query, **kwargs):
1091
+ yield chunk
1092
+
1093
+ async def _arun_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[str | dict[str, Any], None]:
1094
+ """Internal implementation of arun_stream without MCP handling.
1095
+
1096
+ This method properly handles both LM Invoker and LangChain model streaming:
1097
+ - For LM Invoker: Uses StreamEventHandler to capture streaming events
1098
+ - For LangChain models: Uses LangGraph's native streaming implementation
1099
+
1100
+ Args:
1101
+ query: The input query for the agent.
1102
+ **kwargs: Additional keyword arguments.
1103
+
1104
+ Yields:
1105
+ Chunks of output (strings or dicts) from the streaming response.
1106
+ """
1107
+ if self._has_lm_invoker():
1108
+ async for chunk in self._stream_with_lm_invoker(query, **kwargs):
1109
+ yield chunk
1110
+ else:
1111
+ async for chunk in self._stream_with_langgraph(query, **kwargs):
1112
+ yield chunk
1113
+
1114
+ def _initialize_mcp_client(self) -> None:
1115
+ """Initialize/recreate MCP client with current config safely disposing previous.
1116
+
1117
+ This method creates a new LangchainMCPClient if MCP configuration exists,
1118
+ and safely disposes of any existing client before setting the new one.
1119
+ """
1120
+ new_client = LangchainMCPClient(self.mcp_config) if self.mcp_config else None
1121
+ self._set_mcp_client_safely(new_client)
1122
+
1123
+ async def _register_mcp_tools(self) -> None:
1124
+ """Initialize MCP tools once during agent setup using persistent sessions.
1125
+
1126
+ This method connects to MCP servers, retrieves available tools, and integrates
1127
+ them into the agent's tool collection. It includes timeout handling to prevent
1128
+ hanging operations.
1129
+
1130
+ Raises:
1131
+ RuntimeError: If MCP initialization times out after 30 seconds.
1132
+ Exception: If MCP tool initialization fails for other reasons.
1133
+ """
1134
+ try:
1135
+ logger.info(f"Agent '{self.name}': Initializing MCP tools with persistent sessions.")
1136
+
1137
+ # Add timeout for initialization to prevent hanging
1138
+ await asyncio.wait_for(self.mcp_client.initialize(), timeout=30.0)
1139
+
1140
+ mcp_tools = await self.mcp_client.get_tools()
1141
+
1142
+ if not mcp_tools:
1143
+ logger.warning(f"Agent '{self.name}': No MCP tools retrieved from configured servers.")
1144
+ return
1145
+
1146
+ self.mcp_tools.extend(mcp_tools)
1147
+ logger.info(f"Agent '{self.name}': Added {len(mcp_tools)} persistent MCP tools to graph.")
1148
+ self._rebuild_graph()
1149
+
1150
+ except TimeoutError as err:
1151
+ logger.error(f"Agent '{self.name}': MCP initialization timed out")
1152
+ raise RuntimeError(f"Agent '{self.name}': MCP initialization timed out after 30 seconds") from err
1153
+ except Exception as e:
1154
+ logger.error(f"Agent '{self.name}': Failed to initialize persistent MCP tools: {e}", exc_info=True)
1155
+ raise
1156
+
1157
+ async def cleanup(self) -> None:
1158
+ """Cleanup MCP resources including persistent sessions.
1159
+
1160
+ This method performs best-effort cleanup of MCP client resources.
1161
+ Errors during cleanup are logged but do not raise exceptions to ensure
1162
+ the cleanup process completes gracefully.
1163
+ """
1164
+ if hasattr(self, "mcp_client") and self.mcp_client:
1165
+ try:
1166
+ await self.mcp_client.cleanup()
1167
+ logger.debug(f"Agent '{self.name}': MCP client cleanup completed")
1168
+ except Exception as e:
1169
+ logger.warning(f"Agent '{self.name}': Error during MCP client cleanup: {e}")
1170
+ # Don't re-raise - cleanup should be best-effort
1171
+
1172
+ async def arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[dict[str, Any], None]:
1173
+ """Asynchronously streams the agent's response in A2A format.
1174
+
1175
+ Args:
1176
+ query: The input query for the agent.
1177
+ **kwargs: Additional keyword arguments.
1178
+
1179
+ Yields:
1180
+ Dictionaries with "status" and "content" keys.
1181
+ Possible statuses: "working", "completed", "failed", "canceled".
1182
+ """
1183
+ await self._ensure_mcp_tools_initialized()
1184
+ async for chunk in self._arun_a2a_stream(query, **kwargs):
1185
+ yield chunk
1186
+
1187
+ async def arun_sse_stream(
1188
+ self,
1189
+ query: str,
1190
+ task_id: str | None = None,
1191
+ context_id: str | None = None,
1192
+ **kwargs: Any,
1193
+ ) -> AsyncGenerator[dict[str, Any], None]:
1194
+ """Stream agent response as SSE-compatible chunks.
1195
+
1196
+ This method wraps arun_a2a_stream and transforms output to the normalized
1197
+ dict format matching A2AConnector.astream_to_agent output, enabling direct
1198
+ streaming without A2A server overhead.
1199
+
1200
+ Args:
1201
+ query: The input query for the agent.
1202
+ task_id: Optional task identifier for the stream.
1203
+ context_id: Optional context identifier for the stream.
1204
+ **kwargs: Additional arguments passed to arun_a2a_stream.
1205
+
1206
+ Yields:
1207
+ SSEChunk dicts with normalized structure:
1208
+ - status: "success" | "error"
1209
+ - task_state: "working" | "completed" | "failed" | "canceled"
1210
+ - content: Text content or None
1211
+ - event_type: Always string (never enum)
1212
+ - final: True for terminal events
1213
+ - metadata: Normalized metadata dict
1214
+ - artifacts: Only present when non-empty
1215
+ """
1216
+ if task_id is None:
1217
+ task_id = str(uuid.uuid4())
1218
+ if context_id is None:
1219
+ context_id = str(uuid.uuid4())
1220
+
1221
+ # Extract pii_mapping from kwargs to pass to transformer (matching A2A executor behavior)
1222
+ pii_mapping = kwargs.get("pii_mapping")
1223
+ transformer = SSEChunkTransformer(task_id=task_id, context_id=context_id, pii_mapping=pii_mapping)
1224
+ try:
1225
+ async for chunk in transformer.transform_stream(self.arun_a2a_stream(query, **kwargs)):
1226
+ yield chunk
1227
+ except Exception as e:
1228
+ logger.error(f"Agent '{self.name}': Error in arun_sse_stream: {e}", exc_info=True)
1229
+ yield SSEChunkTransformer._create_error_chunk(f"Error during streaming: {e}")
1230
+
1231
+ def _get_tool_processing_content(self, tool_names: list[str]) -> str:
1232
+ """Generate appropriate content prefix for tool processing messages.
1233
+
1234
+ Args:
1235
+ tool_names: List of tool names to process.
1236
+
1237
+ Returns:
1238
+ Formatted content string with appropriate prefix.
1239
+ """
1240
+ unique_tool_names = list(dict.fromkeys(tool_names))
1241
+ has_delegation_tools = any(name.startswith("delegate_to") for name in unique_tool_names)
1242
+ content_prefix = "Processing with sub-agents:" if has_delegation_tools else "Processing with tools:"
1243
+ return f"{content_prefix} {', '.join(unique_tool_names)}"
1244
+
1245
+ def _get_tool_completion_content(self, tool_names: list[str]) -> str:
1246
+ """Generate completion message for finished tool executions.
1247
+
1248
+ Args:
1249
+ tool_names: List of tool names to summarize.
1250
+
1251
+ Returns:
1252
+ Content string indicating completion.
1253
+ """
1254
+ unique_tool_names = list(dict.fromkeys(tool_names))
1255
+ has_delegation_tools = any(name.startswith("delegate_to") for name in unique_tool_names)
1256
+ content_prefix = "Completed sub-agents:" if has_delegation_tools else "Completed tools:"
1257
+ return f"{content_prefix} {', '.join(unique_tool_names)}"
1258
+
1259
+ def _parse_a2a_stream_message(
1260
+ self, message: BaseMessage, state: dict[str, Any] | None = None
1261
+ ) -> tuple[A2AEvent | None, bool]:
1262
+ """Parse LangChain messages into semantically meaningful A2A events.
1263
+
1264
+ This method converts LangChain message types (AIMessage, ToolMessage) into
1265
+ structured A2AEvent objects that preserve semantic information and eliminate
1266
+ the need for string parsing downstream.
1267
+
1268
+ Args:
1269
+ message: The LangChain message to parse (AIMessage, ToolMessage, etc.).
1270
+ state: Optional state dictionary containing pii_mapping and other data.
1271
+
1272
+ Returns:
1273
+ A tuple containing:
1274
+ - A2AEvent | None: The parsed A2A event, or None if message should be skipped.
1275
+ - bool: True if this is a final event that should terminate the stream.
1276
+ """
1277
+ if isinstance(message, AIMessage) and message.tool_calls:
1278
+ return self._create_tool_call_event(message), False
1279
+
1280
+ elif isinstance(message, ToolMessage):
1281
+ return self._create_tool_result_event(message), False
1282
+
1283
+ elif isinstance(message, AIMessage) and message.content:
1284
+ return self._create_ai_message_event(message, state)
1285
+
1286
+ return None, False
1287
+
1288
+ def _link_tool_call_to_previous_status(self, event: A2AEvent) -> None:
1289
+ """Link the tool call event to completed tool steps or the most recent status step.
1290
+
1291
+ Supports both parallel and sequential operation modes:
1292
+ - "parallel": Links to ALL completed tool steps (default for backward compatibility)
1293
+ - "sequential": Links to only the most recent completed tool step
1294
+
1295
+ Args:
1296
+ event: The A2AEvent to link to previous step.
1297
+ """
1298
+ try:
1299
+ thread_id = _THREAD_ID_CVAR.get()
1300
+ if thread_id:
1301
+ metadata = event.get("metadata", {})
1302
+ existing_step_ids = metadata.get("previous_step_ids") or []
1303
+
1304
+ if existing_step_ids:
1305
+ return
1306
+
1307
+ operation_mode = _OPERATION_MODE_CVAR.get() or "parallel"
1308
+
1309
+ coord_completed_steps = self._coordinator_completed_tool_steps_by_thread.get(thread_id, [])
1310
+ completed_steps = coord_completed_steps or self._completed_tool_steps_by_thread.get(thread_id, [])
1311
+ if completed_steps:
1312
+ if operation_mode == "sequential":
1313
+ metadata["previous_step_ids"] = [completed_steps[-1]]
1314
+ else:
1315
+ metadata["previous_step_ids"] = completed_steps
1316
+ event["metadata"] = metadata
1317
+ return
1318
+
1319
+ last_status_id = self._last_status_step_id_by_thread.get(thread_id)
1320
+ if last_status_id:
1321
+ metadata["previous_step_ids"] = [last_status_id]
1322
+ event["metadata"] = metadata
1323
+ except Exception as e:
1324
+ logger.warning("Failed linking tool call to previous step: %s", e, exc_info=True)
1325
+
1326
+ def _register_tool_call_parent_steps(self, event: A2AEvent, tool_calls_details: list[dict]) -> None:
1327
+ """Register parent step IDs for each tool call ID.
1328
+
1329
+ Args:
1330
+ event: The A2AEvent containing the parent step.
1331
+ tool_calls_details: List of tool call details.
1332
+ """
1333
+ try:
1334
+ thread_id = _THREAD_ID_CVAR.get()
1335
+ if thread_id:
1336
+ parent_step_id = event["metadata"].get("step_id")
1337
+ if parent_step_id:
1338
+ parent_map = self._tool_parent_map_by_thread.setdefault(thread_id, {})
1339
+ for tool_call in tool_calls_details:
1340
+ tool_call_id = tool_call.get("id")
1341
+ if tool_call_id:
1342
+ parent_map[str(tool_call_id)] = str(parent_step_id)
1343
+ except Exception as e:
1344
+ logger.warning("Registering tool call parent steps failed: %s", e, exc_info=True)
1345
+
1346
+ def _create_tool_call_event(self, message: AIMessage) -> A2AEvent:
1347
+ """Create an A2AEvent for tool invocation from AIMessage.
1348
+
1349
+ Args:
1350
+ message: AIMessage containing tool calls.
1351
+
1352
+ Returns:
1353
+ A2AEvent with TOOL_CALL event type and structured tool information.
1354
+ """
1355
+ tool_calls_details = [
1356
+ {
1357
+ "id": tool_call.get("id"),
1358
+ "name": tool_call["name"],
1359
+ "args": tool_call["args"],
1360
+ }
1361
+ for tool_call in message.tool_calls
1362
+ ]
1363
+ tool_names = [details["name"] for details in tool_calls_details]
1364
+
1365
+ event = self._create_a2a_event(
1366
+ event_type=A2AStreamEventType.TOOL_CALL,
1367
+ content=self._get_tool_processing_content(tool_names),
1368
+ tool_info={"tool_calls": tool_calls_details, "status": "running"},
1369
+ metadata={"status": Status.RUNNING},
1370
+ is_final=False,
1371
+ step_usage=message.usage_metadata,
1372
+ )
1373
+
1374
+ self._record_emitted_tool_calls(tool_calls_details)
1375
+
1376
+ self._link_tool_call_to_previous_status(event)
1377
+ self._register_tool_call_parent_steps(event, tool_calls_details)
1378
+
1379
+ return event
1380
+
1381
+ def _get_sub_agent_previous_steps(self, message: ToolMessage) -> list[str] | None:
1382
+ """Extract previous step IDs from sub-agent response metadata.
1383
+
1384
+ Args:
1385
+ message: ToolMessage containing response metadata.
1386
+
1387
+ Returns:
1388
+ List of previous step IDs or None if not available.
1389
+ """
1390
+ try:
1391
+ if not hasattr(message, "response_metadata") or not isinstance(message.response_metadata, dict):
1392
+ return None
1393
+
1394
+ sub_prev = message.response_metadata.get("previous_step_ids")
1395
+ if isinstance(sub_prev, list) and sub_prev:
1396
+ return [str(x) for x in sub_prev if isinstance(x, str | int)]
1397
+ return None
1398
+ except Exception as e:
1399
+ logger.warning("Failed extracting sub-agent previous steps: %s", e, exc_info=True)
1400
+ return None
1401
+
1402
+ def _determine_previous_step_ids(self, message: ToolMessage, sub_prev: list[str] | None) -> list[str]:
1403
+ """Determine which previous step IDs to use for the event.
1404
+
1405
+ Args:
1406
+ message: ToolMessage for the tool call.
1407
+ sub_prev: Previous step IDs from sub-agent, if available.
1408
+
1409
+ Returns:
1410
+ List of previous step IDs to use.
1411
+ """
1412
+ if sub_prev:
1413
+ return sub_prev
1414
+
1415
+ try:
1416
+ thread_id = _THREAD_ID_CVAR.get()
1417
+ if thread_id:
1418
+ parent_map = self._tool_parent_map_by_thread.get(thread_id, {})
1419
+ parent_step = parent_map.get(str(message.tool_call_id))
1420
+ if parent_step:
1421
+ return [parent_step]
1422
+ except Exception as e:
1423
+ logger.warning("Determining previous step IDs failed: %s", e, exc_info=True)
1424
+
1425
+ return []
1426
+
1427
+ def _record_tool_completion(self, message: ToolMessage, event: A2AEvent) -> None:
1428
+ """Record tool completion for final event dependency tracking.
1429
+
1430
+ Args:
1431
+ message: ToolMessage for the completed tool.
1432
+ event: The A2AEvent for the tool result.
1433
+ """
1434
+ try:
1435
+ thread_id = _THREAD_ID_CVAR.get()
1436
+ if not thread_id:
1437
+ return
1438
+
1439
+ completed_list = self._completed_tool_steps_by_thread.setdefault(thread_id, [])
1440
+ coord_completed_list = self._coordinator_completed_tool_steps_by_thread.setdefault(thread_id, [])
1441
+
1442
+ event_sid = (event.get("metadata") or {}).get("step_id")
1443
+ if isinstance(event_sid, str) and event_sid:
1444
+ completed_list.append(event_sid)
1445
+ coord_completed_list.append(event_sid)
1446
+
1447
+ sub_prev = self._get_sub_agent_previous_steps(message) or []
1448
+ completed_list.extend(step_id for step_id in sub_prev if isinstance(step_id, str) and step_id)
1449
+
1450
+ self._completed_tool_steps_by_thread[thread_id] = list(dict.fromkeys(completed_list))
1451
+ self._coordinator_completed_tool_steps_by_thread[thread_id] = list(dict.fromkeys(coord_completed_list))
1452
+ except Exception as e:
1453
+ logger.warning("Recording tool completion failed: %s", e, exc_info=True)
1454
+
1455
+ def _create_tool_result_event(self, message: ToolMessage) -> A2AEvent:
1456
+ """Create an A2AEvent for tool completion from ToolMessage.
1457
+
1458
+ Args:
1459
+ message: ToolMessage containing tool execution results.
1460
+
1461
+ Returns:
1462
+ A2AEvent with TOOL_RESULT event type and execution details.
1463
+ """
1464
+ tool_info = self._extract_tool_info_from_message(message)
1465
+ previous_ids = self._determine_previous_step_ids(
1466
+ message,
1467
+ self._get_sub_agent_previous_steps(message),
1468
+ )
1469
+
1470
+ event = self._create_a2a_event(
1471
+ event_type=A2AStreamEventType.TOOL_RESULT,
1472
+ content=self._build_tool_event_content(tool_info["name"], tool_info["output"], message),
1473
+ tool_info={
1474
+ "id": message.tool_call_id,
1475
+ "name": tool_info["name"],
1476
+ "args": tool_info["args"],
1477
+ "output": tool_info["output"],
1478
+ "execution_time": tool_info["execution_time"],
1479
+ },
1480
+ metadata=self._build_tool_event_metadata(tool_info["execution_time"], previous_ids),
1481
+ is_final=False,
1482
+ step_usage=message.response_metadata.get(USAGE_METADATA_KEY),
1483
+ )
1484
+
1485
+ self._propagate_hitl_metadata(message, event)
1486
+ self._record_tool_completion(message, event)
1487
+ self._discard_emitted_tool_call(getattr(message, "tool_call_id", None))
1488
+
1489
+ return event
1490
+
1491
+ def _extract_tool_info_from_message(self, message: ToolMessage) -> dict[str, Any]:
1492
+ """Extract tool details from a ToolMessage.
1493
+
1494
+ Args:
1495
+ message: The ToolMessage to extract information from.
1496
+
1497
+ Returns:
1498
+ Dictionary containing tool name, args, output, and execution time.
1499
+ """
1500
+ tool_call_info = getattr(message, "tool_calls", {})
1501
+ tool_name = getattr(message, "name", None) or tool_call_info.get("name", "unknown")
1502
+ return {
1503
+ "name": tool_name,
1504
+ "args": tool_call_info.get("args", {}),
1505
+ "output": tool_call_info.get("output", message.content),
1506
+ "execution_time": tool_call_info.get("time"),
1507
+ }
1508
+
1509
+ def _build_tool_event_content(self, tool_name: str, tool_output: Any, message: ToolMessage) -> str:
1510
+ """Determine event content for a tool result.
1511
+
1512
+ Args:
1513
+ tool_name: Name of the tool that was executed.
1514
+ tool_output: The output returned by the tool.
1515
+ message: The ToolMessage containing response metadata and tool call information.
1516
+
1517
+ Returns:
1518
+ String content for the tool result event.
1519
+ """
1520
+ response_metadata = getattr(message, "response_metadata", None) or {}
1521
+ hitl_meta = response_metadata.get(MetadataFieldKeys.HITL) if isinstance(response_metadata, dict) else None
1522
+
1523
+ if hitl_meta and hitl_meta.get("required"):
1524
+ return str(tool_output) if tool_output else self._get_tool_processing_content([tool_name])
1525
+
1526
+ return self._get_tool_completion_content([tool_name])
1527
+
1528
+ def _build_tool_event_metadata(
1529
+ self,
1530
+ execution_time: Any,
1531
+ previous_ids: list[str] | None,
1532
+ ) -> dict[str, Any]:
1533
+ """Build metadata payload for tool result events.
1534
+
1535
+ Args:
1536
+ execution_time: Time taken to execute the tool.
1537
+ previous_ids: Optional list of previous step IDs this tool depends on.
1538
+
1539
+ Returns:
1540
+ Dictionary containing status, execution time, and previous step IDs.
1541
+ """
1542
+ return {
1543
+ "status": Status.FINISHED,
1544
+ "time": execution_time,
1545
+ "previous_step_ids": previous_ids,
1546
+ }
1547
+
1548
+ def _propagate_hitl_metadata(self, message: ToolMessage, event: A2AEvent) -> None:
1549
+ """Copy HITL metadata from ToolMessage into the event if available.
1550
+
1551
+ Args:
1552
+ message: The ToolMessage containing response metadata with HITL information.
1553
+ event: The A2AEvent to update with HITL metadata if present.
1554
+ """
1555
+ response_metadata = getattr(message, "response_metadata", None)
1556
+ if not isinstance(response_metadata, dict):
1557
+ return
1558
+
1559
+ hitl_meta = response_metadata.get(MetadataFieldKeys.HITL)
1560
+ if hitl_meta is None:
1561
+ return
1562
+
1563
+ try:
1564
+ hitl_model = HitlMetadata.model_validate(hitl_meta)
1565
+ except ValidationError as exc:
1566
+ raise ValueError("Invalid HITL metadata payload encountered") from exc
1567
+
1568
+ metadata = event.get("metadata")
1569
+ if isinstance(metadata, dict):
1570
+ try:
1571
+ metadata[MetadataFieldKeys.HITL] = hitl_model.as_payload()
1572
+ except Exception as exc: # noqa: BLE001
1573
+ logger.warning("Failed to propagate HITL metadata to event: %s", exc)
1574
+
1575
+ def _create_ai_message_event(
1576
+ self, message: AIMessage, state: dict[str, Any] | None = None
1577
+ ) -> tuple[A2AEvent, bool]:
1578
+ """Create an A2AEvent for AI-generated content from AIMessage.
1579
+
1580
+ Args:
1581
+ message: AIMessage containing AI-generated content.
1582
+ state: Optional state dictionary containing pii_mapping and other data.
1583
+
1584
+ Returns:
1585
+ A tuple containing:
1586
+ - A2AEvent: Either CONTENT_CHUNK or FINAL_RESPONSE event.
1587
+ - bool: True if this is a final response, False for streaming content.
1588
+ """
1589
+ is_final_response = self._is_final_response(message)
1590
+ metadata = self._build_metadata_for_final_response(is_final_response, state)
1591
+ content = deanonymize_final_response_content(
1592
+ content=message.content,
1593
+ is_final_response=is_final_response,
1594
+ metadata=metadata,
1595
+ )
1596
+ event = self._create_a2a_event(
1597
+ event_type=A2AStreamEventType.FINAL_RESPONSE if is_final_response else A2AStreamEventType.CONTENT_CHUNK,
1598
+ content=content,
1599
+ tool_info=None,
1600
+ metadata=metadata,
1601
+ is_final=is_final_response,
1602
+ step_usage=message.usage_metadata,
1603
+ )
1604
+ return event, is_final_response
1605
+
1606
+ def _is_final_response(self, message: AIMessage) -> bool:
1607
+ """Check if the message represents a final response.
1608
+
1609
+ Args:
1610
+ message: AIMessage to check.
1611
+
1612
+ Returns:
1613
+ True if this is a final response, False otherwise.
1614
+ """
1615
+ return bool(message.response_metadata) and message.response_metadata.get("finish_reason") == "stop"
1616
+
1617
+ def _build_metadata_for_final_response(
1618
+ self, is_final_response: bool, state: dict[str, Any] | None = None
1619
+ ) -> dict[str, Any]:
1620
+ """Build metadata for final response including previous_step_ids and pii_mapping.
1621
+
1622
+ Args:
1623
+ is_final_response: Whether this is a final response.
1624
+ state: Optional state dictionary containing pii_mapping and other data.
1625
+
1626
+ Returns:
1627
+ Metadata dictionary with previous_step_ids and pii_mapping if applicable.
1628
+ """
1629
+ metadata: dict[str, Any] = {}
1630
+
1631
+ if not is_final_response:
1632
+ return metadata
1633
+
1634
+ try:
1635
+ previous_step_ids = self._get_previous_step_ids()
1636
+ if previous_step_ids:
1637
+ metadata["previous_step_ids"] = previous_step_ids
1638
+ except Exception as e:
1639
+ logger.warning("Attaching previous_step_ids to final response failed: %s", e, exc_info=True)
1640
+
1641
+ # Add PII mapping if present in state or nested metadata
1642
+ if state:
1643
+ pii_mapping = state.get("pii_mapping") or state.get("metadata", {}).get("pii_mapping")
1644
+ if pii_mapping:
1645
+ metadata[MetadataFieldKeys.PII_MAPPING] = pii_mapping
1646
+
1647
+ return metadata
1648
+
1649
+ def _get_previous_step_ids(self) -> list[str] | None:
1650
+ """Get the list of previous step IDs based on thread context and operation mode.
1651
+
1652
+ Returns:
1653
+ List of step IDs or None if no thread context or steps found.
1654
+ """
1655
+ thread_id = _THREAD_ID_CVAR.get()
1656
+ if not thread_id:
1657
+ return None
1658
+
1659
+ operation_mode = _OPERATION_MODE_CVAR.get() or "parallel"
1660
+
1661
+ coord_ids = self._coordinator_completed_tool_steps_by_thread.get(thread_id, [])
1662
+ if coord_ids:
1663
+ return self._filter_step_ids_by_mode(coord_ids, operation_mode)
1664
+
1665
+ completed_ids = self._completed_tool_steps_by_thread.get(thread_id, [])
1666
+ if completed_ids:
1667
+ return self._filter_step_ids_by_mode(completed_ids, operation_mode)
1668
+
1669
+ return None
1670
+
1671
+ def _filter_step_ids_by_mode(self, step_ids: list[str], operation_mode: str) -> list[str]:
1672
+ """Filter step IDs based on operation mode.
1673
+
1674
+ Args:
1675
+ step_ids: List of step IDs to filter.
1676
+ operation_mode: Either "sequential" or "parallel".
1677
+
1678
+ Returns:
1679
+ Filtered list of step IDs.
1680
+ """
1681
+ if operation_mode == "sequential":
1682
+ return [step_ids[-1]] if step_ids else []
1683
+ else:
1684
+ return step_ids
1685
+
1686
+ def _process_artifacts(
1687
+ self,
1688
+ item: dict[str, Any],
1689
+ pending_artifacts: list,
1690
+ seen_artifact_hashes: set,
1691
+ ) -> None:
1692
+ """Process artifacts from a graph stream item.
1693
+
1694
+ Args:
1695
+ item: The event item from the graph stream.
1696
+ pending_artifacts: A list of artifacts waiting to be attached to a message.
1697
+ seen_artifact_hashes: A set of hashes of artifacts already processed.
1698
+ """
1699
+ if "artifacts" not in item or not item["artifacts"]:
1700
+ return
1701
+
1702
+ logger.info(f"Agent '{self.name}': Artifacts: {len(item['artifacts'])}")
1703
+ for artifact in item["artifacts"]:
1704
+ artifact_data = str(artifact.get("data", ""))
1705
+ artifact_name = artifact.get("name", "")
1706
+ artifact_hash = hashlib.sha256(f"{artifact_data}:{artifact_name}".encode()).hexdigest()
1707
+
1708
+ if artifact_hash not in seen_artifact_hashes:
1709
+ pending_artifacts.append(artifact)
1710
+ seen_artifact_hashes.add(artifact_hash)
1711
+
1712
+ def _process_a2a_stream_item(
1713
+ self,
1714
+ item: dict[str, Any],
1715
+ pending_artifacts: list,
1716
+ seen_artifact_hashes: set,
1717
+ processed_message_count: int,
1718
+ ) -> tuple[list[A2AEvent], bool, int]:
1719
+ """Process a single item from the graph stream, handling artifacts and messages.
1720
+
1721
+ This method processes LangGraph stream items and converts them into A2AEvent objects
1722
+ with proper metadata merging, artifact attachment, and reference handling.
1723
+
1724
+ Args:
1725
+ item: The event item from the graph stream containing messages and metadata.
1726
+ pending_artifacts: List of artifacts waiting to be attached to messages.
1727
+ seen_artifact_hashes: Set of hashes of artifacts already processed.
1728
+ processed_message_count: Number of messages already processed from the stream.
1729
+
1730
+ Returns:
1731
+ A tuple containing:
1732
+ - list[A2AEvent]: List of A2A events to yield to the executor.
1733
+ - bool: True if a final event was encountered.
1734
+ - int: Updated count of processed messages.
1735
+ """
1736
+ events_to_yield = []
1737
+ is_final_event = False
1738
+
1739
+ self._process_artifacts(item, pending_artifacts, seen_artifact_hashes)
1740
+ references = item.get("references", [])
1741
+
1742
+ if "messages" not in item or not item["messages"]:
1743
+ return [], False, processed_message_count
1744
+
1745
+ new_messages = item["messages"][processed_message_count:]
1746
+ updated_message_count = len(item["messages"])
1747
+ for message in new_messages:
1748
+ event_data, is_final = self._parse_a2a_stream_message(message, item)
1749
+
1750
+ if event_data and self._should_yield_a2a_event(event_data):
1751
+ self._enhance_event_with_context(event_data, item, pending_artifacts, references, is_final)
1752
+ events_to_yield.append(event_data)
1753
+
1754
+ if is_final:
1755
+ is_final_event = True
1756
+
1757
+ return events_to_yield, is_final_event, updated_message_count
1758
+
1759
+ def _enhance_event_with_context(
1760
+ self,
1761
+ event_data: A2AEvent,
1762
+ stream_item: dict[str, Any],
1763
+ pending_artifacts: list,
1764
+ references: list[Any],
1765
+ is_final: bool,
1766
+ ) -> None:
1767
+ """Enhance A2AEvent with context from the stream item.
1768
+
1769
+ This method adds metadata, artifacts, and references to the A2AEvent
1770
+ based on the current stream item context.
1771
+
1772
+ Args:
1773
+ event_data: The A2AEvent to enhance.
1774
+ stream_item: The stream item containing context information.
1775
+ pending_artifacts: List of artifacts to attach to the event.
1776
+ references: List of references to attach to final events.
1777
+ is_final: Whether this is a final event.
1778
+ """
1779
+ self._merge_event_metadata(event_data, stream_item)
1780
+ self._attach_pending_artifacts(event_data, pending_artifacts)
1781
+
1782
+ if is_final and references:
1783
+ self._attach_references_to_final_event(event_data, references)
1784
+
1785
+ if is_final and stream_item.get(TOTAL_USAGE_KEY):
1786
+ event_data[TOTAL_USAGE_KEY] = stream_item[TOTAL_USAGE_KEY]
1787
+
1788
+ def _merge_previous_step_ids(
1789
+ self,
1790
+ state_prev: list[str | int | None] | None,
1791
+ event_prev: list[str | int | None] | None,
1792
+ ) -> list[str | int] | None:
1793
+ """Merge previous_step_ids from state and event metadata.
1794
+
1795
+ Args:
1796
+ state_prev: Previous step IDs from state metadata.
1797
+ event_prev: Previous step IDs from event metadata.
1798
+
1799
+ Returns:
1800
+ Combined list of previous step IDs, or None if no lists to merge.
1801
+ """
1802
+ if (state_prev is None and event_prev is None) or (
1803
+ not isinstance(state_prev, list)
1804
+ and state_prev is not None
1805
+ and not isinstance(event_prev, list)
1806
+ and event_prev is not None
1807
+ ):
1808
+ return None
1809
+
1810
+ state_list = state_prev if isinstance(state_prev, list) else []
1811
+ event_list = event_prev if isinstance(event_prev, list) else []
1812
+
1813
+ combined: list[Any] = []
1814
+
1815
+ for step_id in event_list:
1816
+ if step_id is not None and step_id not in combined:
1817
+ combined.append(step_id)
1818
+
1819
+ for step_id in state_list:
1820
+ if step_id is not None and step_id not in combined:
1821
+ combined.append(step_id)
1822
+
1823
+ return combined
1824
+
1825
+ def _merge_event_metadata(self, event_data: A2AEvent, stream_item: dict[str, Any]) -> None:
1826
+ """Merge metadata from stream item into the A2AEvent.
1827
+
1828
+ Args:
1829
+ event_data: The A2AEvent to update with merged metadata.
1830
+ stream_item: The stream item containing state metadata.
1831
+ """
1832
+ state_metadata = stream_item.get("metadata", {})
1833
+ existing_metadata = event_data.get("metadata", {})
1834
+ if isinstance(existing_metadata, dict) and isinstance(state_metadata, dict):
1835
+ merged_metadata = {**state_metadata, **existing_metadata}
1836
+
1837
+ state_prev = state_metadata.get("previous_step_ids") or []
1838
+ event_prev = existing_metadata.get("previous_step_ids") or []
1839
+ combined_ids = self._merge_previous_step_ids(state_prev, event_prev)
1840
+ if combined_ids is not None:
1841
+ merged_metadata["previous_step_ids"] = combined_ids
1842
+ else:
1843
+ merged_metadata = state_metadata or existing_metadata
1844
+
1845
+ event_data["metadata"] = merged_metadata
1846
+
1847
+ def _attach_pending_artifacts(self, event_data: A2AEvent, pending_artifacts: list) -> None:
1848
+ """Attach pending artifacts to the A2AEvent and clear the pending list.
1849
+
1850
+ Args:
1851
+ event_data: The A2AEvent to attach artifacts to.
1852
+ pending_artifacts: List of artifacts to attach and clear.
1853
+ """
1854
+ if pending_artifacts:
1855
+ event_data["artifacts"] = pending_artifacts.copy()
1856
+ pending_artifacts.clear()
1857
+
1858
+ def _attach_references_to_final_event(self, event_data: A2AEvent, references: list[Any]) -> None:
1859
+ """Attach references to final events.
1860
+
1861
+ Args:
1862
+ event_data: The final A2AEvent to attach references to.
1863
+ references: List of references to attach.
1864
+ """
1865
+ if references:
1866
+ event_data["references"] = references
1867
+
1868
+ def _setup_thread_context(self, config: dict[str, Any]) -> tuple[str | None, Any]:
1869
+ """Set up thread context for step linkage during streaming.
1870
+
1871
+ Args:
1872
+ config: Graph configuration
1873
+
1874
+ Returns:
1875
+ Tuple of (thread_id, context_token)
1876
+ """
1877
+ current_thread_id: str | None = None
1878
+ try:
1879
+ configurable = config.get("configurable", {}) # type: ignore[assignment]
1880
+ thread_key = self.thread_id_key or "thread_id"
1881
+ current_thread_id = str(configurable.get(thread_key)) if configurable.get(thread_key) else None
1882
+ except Exception:
1883
+ current_thread_id = None
1884
+
1885
+ token = None
1886
+
1887
+ try:
1888
+ start_step_counter_scope()
1889
+ except Exception as exc:
1890
+ logger.debug("Starting step counter scope failed: %s", exc)
1891
+
1892
+ if current_thread_id:
1893
+ token = _THREAD_ID_CVAR.set(current_thread_id)
1894
+ self._tool_parent_map_by_thread[current_thread_id] = {}
1895
+ self._completed_tool_steps_by_thread[current_thread_id] = []
1896
+ self._emitted_tool_calls_by_thread[current_thread_id] = set()
1897
+
1898
+ return current_thread_id, token
1899
+
1900
+ def _cleanup_thread_context(self, current_thread_id: str | None, token: Any) -> None:
1901
+ """Clean up thread context and reset context variables.
1902
+
1903
+ Args:
1904
+ current_thread_id: The thread ID to clean up
1905
+ token: The context token to reset
1906
+ """
1907
+ try:
1908
+ end_step_counter_scope()
1909
+ except Exception as exc:
1910
+ logger.debug("Ending step counter scope failed: %s", exc)
1911
+
1912
+ if current_thread_id:
1913
+ self._tool_parent_map_by_thread.pop(current_thread_id, None)
1914
+ self._completed_tool_steps_by_thread.pop(current_thread_id, None)
1915
+ self._last_status_step_id_by_thread.pop(current_thread_id, None)
1916
+ self._emitted_tool_calls_by_thread.pop(current_thread_id, None)
1917
+
1918
+ if token is not None:
1919
+ try:
1920
+ _THREAD_ID_CVAR.reset(token)
1921
+ except ValueError as e:
1922
+ logger.debug("Context variable token from different context, skipping reset: %s", e)
1923
+ except Exception as e:
1924
+ logger.error("Resetting _THREAD_ID_CVAR failed: %s", e, exc_info=True)
1925
+ try:
1926
+ _STEP_LIMIT_CONFIG_CVAR.set(None)
1927
+ except Exception:
1928
+ logger.debug("Failed to reset step limit config context; continuing cleanup.")
1929
+
1930
+ def _handle_stream_item(
1931
+ self, item: tuple, pending_artifacts: list, seen_artifact_hashes: set, processed_message_count: int
1932
+ ) -> tuple[list[A2AEvent], bool, int]:
1933
+ """Handle a single stream item.
1934
+
1935
+ Args:
1936
+ item: Stream item tuple (mode, data)
1937
+ pending_artifacts: List of pending artifacts
1938
+ seen_artifact_hashes: Set of seen artifact hashes
1939
+ processed_message_count: Current message count
1940
+
1941
+ Returns:
1942
+ Tuple of (events_to_yield, is_final, updated_message_count)
1943
+ """
1944
+ mode, data = item
1945
+
1946
+ if mode == StreamMode.CUSTOM:
1947
+ delegation_event: A2AEvent = data
1948
+ if self._should_yield_a2a_event(delegation_event):
1949
+ return [delegation_event], False, processed_message_count
1950
+ return [], False, processed_message_count
1951
+ elif mode == StreamMode.VALUES:
1952
+ stream_data = data
1953
+ else:
1954
+ return [], False, processed_message_count
1955
+
1956
+ events, is_final, updated_message_count = self._process_a2a_stream_item(
1957
+ stream_data, pending_artifacts, seen_artifact_hashes, processed_message_count
1958
+ )
1959
+ return events, is_final, updated_message_count
1960
+
1961
+ async def _arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[dict[str, Any], None]:
1962
+ """Internal implementation of arun_a2a_stream without MCP handling.
1963
+
1964
+ Args:
1965
+ query: The input query for the agent.
1966
+ **kwargs: Additional keyword arguments.
1967
+
1968
+ Yields:
1969
+ Dictionaries with "status" and "content" keys for status events.
1970
+ Status events may include "artifacts" field when tools generate artifacts.
1971
+ Possible statuses: "working", "completed", "failed", "canceled".
1972
+ """
1973
+ context = self._initialize_streaming_context(query, **kwargs)
1974
+
1975
+ try:
1976
+ async for event in self._handle_streaming_process(context):
1977
+ yield event
1978
+
1979
+ self._persist_memory_if_needed(context)
1980
+
1981
+ async for event in self._ensure_final_completion(context):
1982
+ yield event
1983
+
1984
+ except Exception as e:
1985
+ async for event in self._handle_streaming_error(context, e):
1986
+ yield event
1987
+ finally:
1988
+ self._cleanup_thread_context(context.current_thread_id, context.token)
1989
+
1990
+ def _initialize_streaming_context(self, query: str, **kwargs: Any) -> "_StreamingContext":
1991
+ """Initialize the streaming context with all necessary setup.
1992
+
1993
+ Args:
1994
+ query: The user's input query to process.
1995
+ **kwargs: Additional keyword arguments including optional metadata and configuration.
1996
+
1997
+ Returns:
1998
+ Configured _StreamingContext object ready for streaming execution.
1999
+ """
2000
+ files = kwargs.pop("files", [])
2001
+ if files is None:
2002
+ files = []
2003
+
2004
+ memory_user_id: str | None = kwargs.get("memory_user_id")
2005
+
2006
+ # Create config first to ensure thread_id is generated
2007
+ config = self._create_graph_config(**kwargs)
2008
+ thread_id = self._get_thread_id_from_config(config)
2009
+
2010
+ augmented_query = augment_query_with_file_paths(query=query, files=files)
2011
+ graph_input = self._prepare_graph_input(augmented_query, thread_id=thread_id, **kwargs)
2012
+
2013
+ current_thread_id, token = self._setup_thread_context(config)
2014
+
2015
+ if self.enable_a2a_token_streaming and self.model:
2016
+ self.model.disable_streaming = False
2017
+
2018
+ return _StreamingContext(
2019
+ original_query=query,
2020
+ graph_input=graph_input,
2021
+ config=config,
2022
+ memory_user_id=memory_user_id,
2023
+ current_thread_id=current_thread_id,
2024
+ token=token,
2025
+ enable_token_streaming=self.enable_a2a_token_streaming,
2026
+ )
2027
+
2028
+ async def _handle_streaming_process(self, context: "_StreamingContext") -> AsyncGenerator[dict[str, Any], None]:
2029
+ """Handle the main streaming process including initial status and event processing.
2030
+
2031
+ Args:
2032
+ context: The streaming context containing query, config, and thread information.
2033
+
2034
+ Yields:
2035
+ Streaming events including initial status and processed streaming items.
2036
+ """
2037
+ initial_status_event = self._create_initial_status_event()
2038
+ self._log_streaming_event_debug("initial_status", initial_status_event)
2039
+ yield initial_status_event
2040
+
2041
+ async for event in self._process_streaming_items(context):
2042
+ self._log_streaming_event_debug("process_stream_item", event)
2043
+ yield event
2044
+
2045
+ def _create_initial_status_event(self) -> dict[str, Any]:
2046
+ """Create and setup the initial status event."""
2047
+ initial_status_event = self._create_a2a_event(
2048
+ event_type=A2AStreamEventType.STATUS_UPDATE, content=DefaultStepMessages.EN.value
2049
+ )
2050
+
2051
+ try:
2052
+ thread_id = _THREAD_ID_CVAR.get()
2053
+ if thread_id:
2054
+ step_id = initial_status_event.get("metadata", {}).get("step_id")
2055
+ if step_id:
2056
+ self._last_status_step_id_by_thread[thread_id] = str(step_id)
2057
+ except Exception:
2058
+ pass
2059
+
2060
+ return initial_status_event
2061
+
2062
+ async def _process_streaming_items(self, context: "_StreamingContext") -> AsyncGenerator[dict[str, Any], None]:
2063
+ """Process individual streaming items from the LangGraph execution.
2064
+
2065
+ Handles the core streaming logic by iterating through items produced by
2066
+ the compiled LangGraph, processing both VALUES and CUSTOM stream modes,
2067
+ and managing final event generation.
2068
+
2069
+ Args:
2070
+ context: The streaming context containing graph input, configuration,
2071
+ and state tracking information.
2072
+
2073
+ Yields:
2074
+ dict[str, Any]: A2A events generated from the stream processing,
2075
+ including status updates, final responses, and completion events.
2076
+ """
2077
+ if context.enable_token_streaming:
2078
+ if self.event_emitter is None:
2079
+ self.event_emitter = self._create_default_event_emitter()
2080
+ elif not self._get_stream_handler():
2081
+ logger.warning(
2082
+ "Agent '%s': No StreamEventHandler found in event_emitter. "
2083
+ "Reinitializing event_emitter using default emitter.",
2084
+ self.name,
2085
+ )
2086
+ self.event_emitter = self._create_default_event_emitter()
2087
+
2088
+ async for event in self._process_a2a_streaming_with_tokens(context):
2089
+ yield event
2090
+ else:
2091
+ enhanced_input = context.graph_input
2092
+ async for event in self._create_graph_stream_events(enhanced_input, context):
2093
+ yield event
2094
+
2095
+ async def _process_a2a_streaming_with_tokens(
2096
+ self, context: "_StreamingContext"
2097
+ ) -> AsyncGenerator[dict[str, Any], None]:
2098
+ """Process A2A streaming with token streaming support using aiostream.
2099
+
2100
+ Supports both LM Invoker and LangChain models by detecting the appropriate
2101
+ token source and merging with graph events.
2102
+
2103
+ Uses aiostream to merge token streaming and graph execution streams,
2104
+ yielding events in real-time order as they arrive.
2105
+
2106
+ Args:
2107
+ context: The streaming context containing graph input, configuration,
2108
+ and state tracking information.
2109
+
2110
+ Yields:
2111
+ dict[str, Any]: A2A events generated from the stream processing,
2112
+ including status updates, final responses, and completion events.
2113
+
2114
+ Raises:
2115
+ RuntimeError: If token streaming is requested but event_emitter is not available.
2116
+ """
2117
+ if not self.event_emitter:
2118
+ raise RuntimeError(f"Agent '{self.name}': Event emitter required for token streaming")
2119
+ if astream is None:
2120
+ raise RuntimeError(
2121
+ "aiostream is required for token streaming support. "
2122
+ "Install the 'aiostream' dependency or disable token streaming."
2123
+ )
2124
+
2125
+ try:
2126
+ if self._has_lm_invoker():
2127
+ token_stream, enhanced_input = self._create_token_stream(context)
2128
+ graph_stream = self._create_graph_stream_events(enhanced_input, context)
2129
+
2130
+ merged = astream.merge(token_stream, graph_stream)
2131
+ async with merged.stream() as merged_stream:
2132
+ async for event in merged_stream:
2133
+ yield event
2134
+ else:
2135
+ _, enhanced_input = self._create_token_stream(context)
2136
+ async for event in self._create_graph_stream_events(enhanced_input, context):
2137
+ yield event
2138
+
2139
+ except Exception as e:
2140
+ if self.event_emitter is not None:
2141
+ await self.event_emitter.close()
2142
+ logger.error(f"Agent '{self.name}': Error during A2A token streaming: {e}")
2143
+ raise
2144
+
2145
+ async def _create_lm_invoker_token_stream(self) -> AsyncGenerator[dict[str, Any], None]:
2146
+ """Generate A2A events from LM Invoker token stream.
2147
+
2148
+ Uses StreamEventHandler to capture tokens emitted by LM Invoker.
2149
+
2150
+ Yields:
2151
+ A2A events generated from LM Invoker token stream.
2152
+
2153
+ Raises:
2154
+ RuntimeError: If no StreamEventHandler is found in event_emitter.
2155
+ """
2156
+ stream_handler = self._get_stream_handler()
2157
+
2158
+ try:
2159
+ async for event in stream_handler.stream():
2160
+ if event is None:
2161
+ break
2162
+
2163
+ token_event = self._convert_raw_token_to_a2a_event(event)
2164
+ if token_event:
2165
+ yield token_event
2166
+ except Exception as e:
2167
+ logger.error(f"Agent '{self.name}': LM Invoker token stream error: {e}")
2168
+
2169
+ def _create_token_stream(
2170
+ self,
2171
+ context: "_StreamingContext",
2172
+ ) -> tuple[AsyncGenerator[dict[str, Any], None], dict[str, Any]]:
2173
+ """Create appropriate token stream and enhanced input for the active model backend.
2174
+
2175
+ Args:
2176
+ context: Streaming context containing graph input and configuration.
2177
+
2178
+ Returns:
2179
+ Tuple of (token_stream, enhanced_input) where token_stream yields A2A token
2180
+ events and enhanced_input is the graph input dictionary (augmented with event
2181
+ emitter when required by LM Invoker backends).
2182
+ """
2183
+ if self._has_lm_invoker():
2184
+ token_stream = self._create_lm_invoker_token_stream()
2185
+ enhanced_input = {**context.graph_input, "event_emitter": self.event_emitter}
2186
+ else:
2187
+ token_stream = None
2188
+ enhanced_input = context.graph_input
2189
+
2190
+ return token_stream, enhanced_input
2191
+
2192
+ async def _create_graph_stream_events(
2193
+ self, enhanced_input: dict[str, Any], context: "_StreamingContext"
2194
+ ) -> AsyncGenerator[dict[str, Any], None]:
2195
+ """Generate A2A events from graph execution.
2196
+
2197
+ Args:
2198
+ enhanced_input: The enhanced input for the graph execution.
2199
+ context: The streaming context containing state tracking information.
2200
+
2201
+ Yields:
2202
+ A2A events generated from graph execution.
2203
+ """
2204
+ try:
2205
+ stream_modes = self._get_stream_modes(context)
2206
+ graph_execution = self._compiled_graph.astream(
2207
+ enhanced_input, config=context.config, stream_mode=stream_modes
2208
+ )
2209
+
2210
+ async for item in graph_execution:
2211
+ stream_mode, stream_data = item
2212
+
2213
+ if stream_mode == StreamMode.MESSAGES:
2214
+ async for token_event in self._process_message_stream_item(stream_data):
2215
+ yield token_event
2216
+ continue
2217
+
2218
+ async for event in self._process_graph_stream_item(item, stream_mode, stream_data, context):
2219
+ yield event
2220
+ except Exception as e:
2221
+ logger.error(f"Agent '{self.name}': Graph processing error: {e}")
2222
+ raise
2223
+
2224
+ def _get_stream_modes(self, context: "_StreamingContext") -> list[str]:
2225
+ """Determine stream modes based on token streaming configuration.
2226
+
2227
+ Args:
2228
+ context: Streaming context containing token streaming configuration.
2229
+
2230
+ Returns:
2231
+ List of stream modes to use for graph execution.
2232
+ """
2233
+ stream_modes = [StreamMode.VALUES, StreamMode.CUSTOM]
2234
+
2235
+ if context.enable_token_streaming and not self._has_lm_invoker():
2236
+ stream_modes.append(StreamMode.MESSAGES)
2237
+
2238
+ return stream_modes
2239
+
2240
+ async def _process_graph_stream_item(
2241
+ self,
2242
+ item: tuple[str, Any],
2243
+ stream_mode: str,
2244
+ stream_data: Any,
2245
+ context: "_StreamingContext",
2246
+ ) -> AsyncGenerator[dict[str, Any], None]:
2247
+ """Process a single graph stream item and yield A2A events.
2248
+
2249
+ Args:
2250
+ item: The stream item tuple (mode, data).
2251
+ stream_mode: The stream mode of this item.
2252
+ stream_data: The data from the stream item.
2253
+ context: Streaming context for state tracking.
2254
+
2255
+ Yields:
2256
+ A2A events generated from the stream item.
2257
+ """
2258
+ context.final_state = copy.copy(stream_data) if stream_mode == StreamMode.VALUES else context.final_state
2259
+
2260
+ events, is_final, context.processed_message_count = self._handle_stream_item(
2261
+ item, context.pending_artifacts, context.seen_artifact_hashes, context.processed_message_count
2262
+ )
2263
+
2264
+ if is_final:
2265
+ context.final_event_yielded = True
2266
+
2267
+ for event in events:
2268
+ self._capture_final_content_if_needed(context, event)
2269
+ processed_event = self._update_final_response_for_streaming(context, event)
2270
+ yield processed_event
2271
+
2272
+ async def _process_message_stream_item(
2273
+ self, message_data: tuple[Any, dict[str, Any]]
2274
+ ) -> AsyncGenerator[dict[str, Any], None]:
2275
+ """Process message stream items to extract token events.
2276
+
2277
+ The "messages" stream mode yields tuples of (AIMessageChunk, metadata).
2278
+ This method extracts token content from AIMessageChunk and converts it
2279
+ to A2A CONTENT_CHUNK events with TOKEN kind.
2280
+
2281
+ Args:
2282
+ message_data: Tuple of (message_chunk, metadata) from messages stream
2283
+
2284
+ Yields:
2285
+ A2A CONTENT_CHUNK events with TOKEN kind
2286
+ """
2287
+ try:
2288
+ message_chunk, _ = message_data
2289
+
2290
+ # Filter out events with response_metadata.finish_reason attribute
2291
+ # since it is a response from subagent
2292
+ if hasattr(message_chunk, "response_metadata") and message_chunk.response_metadata:
2293
+ if "finish_reason" in message_chunk.response_metadata:
2294
+ return
2295
+
2296
+ is_tool_call_event = hasattr(message_chunk, "tool_calls") and message_chunk.tool_calls
2297
+ is_has_content_event = hasattr(message_chunk, "content") and message_chunk.content
2298
+
2299
+ if is_has_content_event and not is_tool_call_event:
2300
+ token_content = message_chunk.content
2301
+ token_event = self._create_a2a_event(
2302
+ event_type=A2AStreamEventType.CONTENT_CHUNK,
2303
+ content=token_content,
2304
+ metadata={MetadataFieldKeys.KIND: Kind.TOKEN},
2305
+ )
2306
+ yield token_event
2307
+
2308
+ except Exception as e:
2309
+ logger.error(f"Agent '{self.name}': Error processing message stream item: {e}")
2310
+
2311
+ def _update_final_response_for_streaming(
2312
+ self, context: "_StreamingContext", event: dict[str, Any]
2313
+ ) -> dict[str, Any]:
2314
+ """Update final response events with appropriate streaming configuration.
2315
+
2316
+ For FINAL_RESPONSE events, this method updates the metadata and optionally clears
2317
+ the content when token streaming is active to prevent sending duplicate content.
2318
+
2319
+ Args:
2320
+ context: The streaming context containing streaming configuration
2321
+ event: The event dictionary to process
2322
+
2323
+ Returns:
2324
+ The processed event dictionary with updated metadata and content
2325
+ """
2326
+ if event.get("event_type") == A2AStreamEventType.FINAL_RESPONSE:
2327
+ event["metadata"][MetadataFieldKeys.TOKEN_STREAMING] = False
2328
+ if context.enable_token_streaming:
2329
+ event["content"] = ""
2330
+ event["metadata"][MetadataFieldKeys.TOKEN_STREAMING] = True
2331
+ return event
2332
+
2333
+ def _convert_raw_token_to_a2a_event(self, raw_event: str) -> dict[str, Any] | None:
2334
+ """Parse raw token event into A2A event.
2335
+
2336
+ Args:
2337
+ raw_event: The raw event containing the raw event.
2338
+
2339
+ Returns:
2340
+ dict[str, Any]: A2A event generated from the stream processing,
2341
+ including status updates, final responses, and completion events.
2342
+ """
2343
+ try:
2344
+ event_data = json.loads(raw_event)
2345
+ content = event_data.get("value", "")
2346
+ if content:
2347
+ return self._create_a2a_event(
2348
+ event_type=A2AStreamEventType.CONTENT_CHUNK,
2349
+ content=content,
2350
+ metadata={MetadataFieldKeys.KIND: Kind.TOKEN},
2351
+ )
2352
+ except Exception as e:
2353
+ logger.debug(f"Agent '{self.name}': Error parsing token event: {e}")
2354
+ return None
2355
+
2356
+ def _capture_final_content_if_needed(self, context: "_StreamingContext", event: dict[str, Any]) -> None:
2357
+ """Capture final content from A2A events for memory persistence.
2358
+
2359
+ Monitors A2A events for final response content and triggers early memory
2360
+ persistence to ensure conversation content is saved even if consumers
2361
+ stop reading the stream after receiving the final response.
2362
+
2363
+ Args:
2364
+ context: The streaming context containing memory state and user
2365
+ identification information.
2366
+ event: The A2A event dictionary that may contain final response content.
2367
+ """
2368
+ try:
2369
+ if isinstance(event, dict) and event.get("event_type") == A2AStreamEventType.FINAL_RESPONSE:
2370
+ context.last_final_content = event.get("content")
2371
+ should_save_early = (
2372
+ self._memory_enabled()
2373
+ and (not context.saved_memory)
2374
+ and isinstance(context.last_final_content, str)
2375
+ and context.last_final_content
2376
+ )
2377
+ if should_save_early:
2378
+ try:
2379
+ logger.info(
2380
+ "Agent '%s': A2A persisting memory early (len=%d) for user_id='%s'",
2381
+ self.name,
2382
+ len(context.last_final_content),
2383
+ context.memory_user_id or self.memory_agent_id,
2384
+ )
2385
+ except Exception:
2386
+ pass
2387
+ try:
2388
+ self._memory_save_interaction(
2389
+ user_text=context.original_query,
2390
+ ai_text=context.last_final_content,
2391
+ memory_user_id=context.memory_user_id,
2392
+ )
2393
+ context.saved_memory = True
2394
+ except Exception:
2395
+ pass
2396
+ except Exception:
2397
+ pass
2398
+
2399
+ def _persist_memory_if_needed(self, context: "_StreamingContext") -> None:
2400
+ """Persist memory using the final state output (best-effort).
2401
+
2402
+ Attempts to save the conversation to memory using the best available
2403
+ content source, first trying captured final content, then falling back
2404
+ to extracting content from the final state.
2405
+
2406
+ Args:
2407
+ context: The streaming context containing the final state, captured
2408
+ content, and memory persistence state.
2409
+ """
2410
+ try:
2411
+ if context.last_final_content is not None:
2412
+ final_text = context.last_final_content
2413
+ elif isinstance(context.final_state, dict):
2414
+ final_text = self._extract_output_from_final_state(context.final_state)
2415
+ else:
2416
+ final_text = ""
2417
+ if (not context.saved_memory) and isinstance(final_text, str) and final_text:
2418
+ try:
2419
+ logger.info(
2420
+ "Agent '%s': A2A persisting memory after stream (len=%d) for user_id='%s'",
2421
+ self.name,
2422
+ len(final_text),
2423
+ context.memory_user_id or self.memory_agent_id,
2424
+ )
2425
+ except Exception:
2426
+ pass
2427
+ self._memory_save_interaction(
2428
+ user_text=context.original_query, ai_text=final_text, memory_user_id=context.memory_user_id
2429
+ )
2430
+ context.saved_memory = True
2431
+ except Exception:
2432
+ pass
2433
+
2434
+ async def _ensure_final_completion(self, context: "_StreamingContext") -> AsyncGenerator[dict[str, Any], None]:
2435
+ """Ensure final completion events are yielded if not already done.
2436
+
2437
+ Args:
2438
+ context: The streaming context containing pending artifacts and
2439
+ other state information.
2440
+
2441
+ Yields:
2442
+ dict[str, Any]: The final completion event.
2443
+ """
2444
+ if not context.final_event_yielded:
2445
+ completion_event = self._create_completion_event(context.pending_artifacts, context.final_state)
2446
+ self._log_streaming_event_debug("final_completion", completion_event)
2447
+ yield completion_event
2448
+
2449
+ async def _handle_streaming_error(
2450
+ self,
2451
+ context: "_StreamingContext",
2452
+ error: Exception,
2453
+ ) -> AsyncGenerator[dict[str, Any], None]:
2454
+ """Handle streaming errors gracefully.
2455
+
2456
+ Provides error handling for the A2A streaming process, ensuring errors
2457
+ are properly logged and communicated to the client while preserving
2458
+ any pending artifacts generated before the error occurred.
2459
+
2460
+ Args:
2461
+ context: The streaming context containing pending artifacts and
2462
+ other state information.
2463
+ error: The exception that occurred during streaming.
2464
+
2465
+ Yields:
2466
+ dict[str, Any]: An error event containing the failure status and
2467
+ error message, optionally including any pending artifacts.
2468
+ """
2469
+ logger.error(f"Error in agent stream: {error}", exc_info=True)
2470
+ error_event = {"status": "failed", "content": f"Error: {str(error)}"}
2471
+
2472
+ if context.pending_artifacts:
2473
+ error_event["artifacts"] = context.pending_artifacts
2474
+
2475
+ self._log_streaming_event_debug("error_event", error_event)
2476
+ yield error_event
2477
+
2478
+ def _extract_references_from_state(self, final_state: dict[str, Any] | None) -> list[Chunk] | None:
2479
+ """Extract and validate references from final state.
2480
+
2481
+ Args:
2482
+ final_state: The final state of the agent.
2483
+
2484
+ Returns:
2485
+ Validated references or None if not available.
2486
+ """
2487
+ if final_state and isinstance(final_state, dict) and final_state.get("references"):
2488
+ try:
2489
+ return validate_references(final_state["references"])
2490
+ except Exception:
2491
+ pass
2492
+ return None
2493
+
2494
+ def _extract_total_usage_from_state(self, final_state: dict[str, Any] | None) -> dict[str, Any] | None:
2495
+ """Extract total usage from final state.
2496
+
2497
+ Args:
2498
+ final_state: The final state of the agent.
2499
+
2500
+ Returns:
2501
+ Total usage metadata or None if not available.
2502
+ """
2503
+ if final_state and isinstance(final_state, dict) and final_state.get(TOTAL_USAGE_KEY):
2504
+ return final_state[TOTAL_USAGE_KEY]
2505
+ return None
2506
+
2507
+ def _build_completion_metadata(self, final_state: dict[str, Any] | None) -> dict[str, Any]:
2508
+ """Build metadata for completion event.
2509
+
2510
+ Args:
2511
+ final_state: The final state of the agent.
2512
+
2513
+ Returns:
2514
+ Metadata dictionary with previous_step_ids and pii_mapping if available.
2515
+ """
2516
+ metadata: dict[str, Any] = {}
2517
+
2518
+ # Add previous step IDs if available
2519
+ try:
2520
+ thread_id = _THREAD_ID_CVAR.get()
2521
+ if thread_id and thread_id in self._completed_tool_steps_by_thread:
2522
+ completed_ids = self._completed_tool_steps_by_thread[thread_id]
2523
+ if completed_ids:
2524
+ metadata["previous_step_ids"] = list(completed_ids)
2525
+ except Exception as e:
2526
+ logger.warning("Attaching previous_step_ids to completion event failed: %s", e, exc_info=True)
2527
+
2528
+ return metadata
2529
+
2530
+ def _create_completion_event(self, pending_artifacts: list, final_state: dict[str, Any]):
2531
+ """Helper to create the completion event with artifacts and references if available.
2532
+
2533
+ This method is used to create the completion event with artifacts and references if available.
2534
+
2535
+ Args:
2536
+ pending_artifacts: List of artifacts waiting to be attached to a message.
2537
+ final_state: The final state of the agent.
2538
+
2539
+ Returns:
2540
+ A dictionary with "status" and "content" keys
2541
+ Additional keys may include "artifacts" and "references" if available
2542
+ """
2543
+ artifacts = pending_artifacts if pending_artifacts else None
2544
+ references = self._extract_references_from_state(final_state)
2545
+ total_usage = self._extract_total_usage_from_state(final_state)
2546
+ metadata = self._build_completion_metadata(final_state)
2547
+
2548
+ return self._create_a2a_event(
2549
+ event_type=A2AStreamEventType.FINAL_RESPONSE,
2550
+ content="Stream finished.",
2551
+ tool_info=None,
2552
+ metadata=metadata,
2553
+ is_final=True,
2554
+ artifacts=artifacts,
2555
+ references=references,
2556
+ step_usage=None,
2557
+ total_usage=total_usage,
2558
+ )
2559
+
2560
+ def _extract_tool_name_prefix(self, tool_name: str) -> str:
2561
+ """Extract a meaningful prefix from a tool name.
2562
+
2563
+ Args:
2564
+ tool_name: The name of the tool.
2565
+
2566
+ Returns:
2567
+ A meaningful prefix.
2568
+ """
2569
+ if tool_name.startswith("delegate_to_"):
2570
+ agent_name = tool_name[12:]
2571
+ if agent_name.endswith("Agent"):
2572
+ agent_name = agent_name[:-5]
2573
+ return agent_name.lower()[:4]
2574
+
2575
+ if "_" in tool_name:
2576
+ parts = tool_name.split("_")
2577
+ for part in parts:
2578
+ if part not in ["tool", "generator", "calculator", "forecast"]:
2579
+ return part[:4]
2580
+ return parts[0][:4]
2581
+ else:
2582
+ return tool_name[:4]
2583
+
2584
+ def _generate_tool_call_step_id(self, tool_info: dict[str, Any], counter: int) -> str:
2585
+ """Generate step_id for tool call events.
2586
+
2587
+ Args:
2588
+ tool_info: Tool information
2589
+ counter: Step counter
2590
+
2591
+ Returns:
2592
+ Generated step_id
2593
+ """
2594
+ if not tool_info or not tool_info.get("tool_calls"):
2595
+ return f"tool_start_{counter:03d}"
2596
+
2597
+ tool_calls = tool_info["tool_calls"]
2598
+ if not tool_calls:
2599
+ return f"tool_start_{counter:03d}"
2600
+
2601
+ prefixes = [self._extract_tool_name_prefix(tc.get("name", "")) or "unkn" for tc in tool_calls]
2602
+ delegation_flags = self._get_delegation_info_from_tool_calls(tool_calls)
2603
+
2604
+ if len(tool_calls) == 1:
2605
+ category = "agent" if delegation_flags[0] else "tool"
2606
+ return f"{category}_{prefixes[0]}_start_{counter:03d}"
2607
+
2608
+ combined_name = "".join(prefixes).strip()[:6]
2609
+ combined_name = combined_name or "multi"
2610
+
2611
+ if all(delegation_flags):
2612
+ category = "agent"
2613
+ elif any(delegation_flags):
2614
+ category = "mixed"
2615
+ else:
2616
+ category = "tool"
2617
+
2618
+ return f"{category}_{combined_name}_parent_{counter:03d}"
2619
+
2620
+ def _generate_tool_result_step_id(self, tool_info: dict[str, Any], counter: int) -> str:
2621
+ """Generate step_id for tool result events.
2622
+
2623
+ Args:
2624
+ tool_info: Tool information
2625
+ counter: Step counter
2626
+
2627
+ Returns:
2628
+ Generated step_id
2629
+ """
2630
+ if not tool_info:
2631
+ return f"tool_done_{counter:03d}"
2632
+
2633
+ tool_name = tool_info.get("name", "")
2634
+ prefix = self._extract_tool_name_prefix(tool_name) or "unkn"
2635
+ category = "agent" if self._is_delegation_tool_from_info(tool_info) else "tool"
2636
+ return f"{category}_{prefix}_done_{counter:03d}"
2637
+
2638
+ @staticmethod
2639
+ def _is_delegation_tool_name(tool_name: str) -> bool:
2640
+ """Check if a tool name corresponds to a delegation (sub-agent) tool.
2641
+
2642
+ This method maintains backward compatibility by checking the tool name pattern.
2643
+ For new tools created by DelegationToolManager, use _is_delegation_tool() instead.
2644
+
2645
+ Args:
2646
+ tool_name: The name of the tool to check.
2647
+
2648
+ Returns:
2649
+ bool: True if the tool name indicates a delegation tool.
2650
+ """
2651
+ return isinstance(tool_name, str) and tool_name.startswith("delegate_to_")
2652
+
2653
+ @staticmethod
2654
+ def _is_delegation_tool(tool_instance: Any) -> bool:
2655
+ """Check delegation status based on metadata when available.
2656
+
2657
+ Args:
2658
+ tool_instance: The tool instance to check for delegation metadata.
2659
+
2660
+ Returns:
2661
+ True if the tool is marked as a delegation tool, False otherwise.
2662
+ """
2663
+ metadata = getattr(tool_instance, "metadata", None)
2664
+ if not metadata or not hasattr(metadata, "get"):
2665
+ return False
2666
+
2667
+ return bool(metadata.get("is_delegation_tool"))
2668
+
2669
+ def _get_delegation_info_from_tool_calls(self, tool_calls: list[dict[str, Any]] | None) -> list[bool]:
2670
+ """Return delegation flags for each tool call using hybrid detection.
2671
+
2672
+ Args:
2673
+ tool_calls: List of tool call dictionaries containing tool information.
2674
+
2675
+ Returns:
2676
+ List of boolean flags indicating delegation status for each tool call.
2677
+ """
2678
+ if not tool_calls:
2679
+ return []
2680
+
2681
+ delegation_flags: list[bool] = []
2682
+ for tc in tool_calls:
2683
+ if not isinstance(tc, dict):
2684
+ logger.warning("Unexpected tool call payload type: %s", type(tc))
2685
+ delegation_flags.append(False)
2686
+ continue
2687
+
2688
+ delegation_flags.append(self._is_delegation_tool_from_info(tc))
2689
+
2690
+ return delegation_flags
2691
+
2692
+ def _is_delegation_tool_from_info(self, tool_info: dict[str, Any] | None) -> bool:
2693
+ """Check delegation status from tool metadata, fallback to name pattern.
2694
+
2695
+ Args:
2696
+ tool_info: Dictionary containing tool information including name and instance.
2697
+
2698
+ Returns:
2699
+ True if the tool is identified as a delegation tool, False otherwise.
2700
+ """
2701
+ if not isinstance(tool_info, dict):
2702
+ logger.warning("Unexpected tool info payload type: %s", type(tool_info))
2703
+ return False
2704
+
2705
+ tool_instance = tool_info.get("tool_instance")
2706
+ if tool_instance and self._is_delegation_tool(tool_instance):
2707
+ return True
2708
+
2709
+ return self._is_delegation_tool_name(tool_info.get("name", ""))
2710
+
2711
+ def _generate_meaningful_step_id(
2712
+ self, event_type: A2AStreamEventType, tool_info: dict[str, Any] | None = None
2713
+ ) -> str:
2714
+ """Generate a meaningful step_id based on event type and tool information.
2715
+
2716
+ Args:
2717
+ event_type: The type of event (tool_call, tool_result, final_response, etc.)
2718
+ tool_info: Tool information containing tool names and IDs
2719
+
2720
+ Returns:
2721
+ A meaningful step_id string
2722
+ """
2723
+ try:
2724
+ counter = get_next_step_number()
2725
+
2726
+ step_id_generators = {
2727
+ A2AStreamEventType.TOOL_CALL: lambda: self._generate_tool_call_step_id(tool_info, counter),
2728
+ A2AStreamEventType.TOOL_RESULT: lambda: self._generate_tool_result_step_id(tool_info, counter),
2729
+ A2AStreamEventType.FINAL_RESPONSE: lambda: f"final_{counter:03d}",
2730
+ A2AStreamEventType.CONTENT_CHUNK: lambda: f"content_{counter:03d}",
2731
+ }
2732
+
2733
+ generator = step_id_generators.get(event_type)
2734
+ if generator:
2735
+ return generator()
2736
+
2737
+ event_value = event_type.value if hasattr(event_type, "value") else str(event_type)
2738
+ return f"{event_value}_{counter:03d}"
2739
+
2740
+ except Exception:
2741
+ return f"stp_{uuid.uuid4().hex[:8]}"
2742
+
2743
+ def _create_a2a_event( # noqa: PLR0913
2744
+ self,
2745
+ event_type: A2AStreamEventType,
2746
+ content: str,
2747
+ metadata: dict[str, Any] | None = None,
2748
+ tool_info: dict[str, Any] | None = None,
2749
+ thinking_and_activity_info: dict[str, Any] | None = None,
2750
+ is_final: bool = False,
2751
+ artifacts: list | None = None,
2752
+ references: list | None = None,
2753
+ step_usage: dict[str, Any] | None = None,
2754
+ total_usage: dict[str, Any] | None = None,
2755
+ ) -> A2AEvent:
2756
+ """Create a structured A2AEvent dictionary.
2757
+
2758
+ Args:
2759
+ event_type: The semantic type of the event.
2760
+ content: The main text content of the event.
2761
+ metadata: Additional metadata.
2762
+ tool_info: Tool-specific information.
2763
+ thinking_and_activity_info: Thinking and activity info from the model.
2764
+ is_final: Whether this is a final event.
2765
+ artifacts: List of artifacts to attach to the event.
2766
+ references: List of references to attach to the event.
2767
+ step_usage: Step-level token usage information.
2768
+ total_usage: Total token usage information.
2769
+
2770
+ Returns:
2771
+ A dictionary conforming to the A2AEvent TypedDict.
2772
+ """
2773
+ enriched_metadata: dict[str, Any] = metadata.copy() if isinstance(metadata, dict) else {}
2774
+ if "agent_name" not in enriched_metadata:
2775
+ enriched_metadata["agent_name"] = self.name
2776
+ if "step_id" not in enriched_metadata:
2777
+ enriched_metadata["step_id"] = self._generate_meaningful_step_id(event_type, tool_info)
2778
+ if "previous_step_ids" not in enriched_metadata:
2779
+ enriched_metadata["previous_step_ids"] = []
2780
+
2781
+ # Inject cumulative time since the first STATUS_UPDATE for this thread
2782
+ # Do not set cumulative time here; server executor enforces it for all SSE events
2783
+
2784
+ event = {
2785
+ "event_type": event_type,
2786
+ "content": content,
2787
+ "metadata": enriched_metadata,
2788
+ "tool_info": tool_info,
2789
+ "is_final": is_final,
2790
+ "artifacts": artifacts,
2791
+ "references": references,
2792
+ STEP_USAGE_KEY: step_usage,
2793
+ TOTAL_USAGE_KEY: total_usage,
2794
+ }
2795
+
2796
+ if thinking_and_activity_info is not None:
2797
+ event["thinking_and_activity_info"] = thinking_and_activity_info
2798
+
2799
+ try:
2800
+ content_preview = content if isinstance(content, str) else str(content)
2801
+ logger.info(
2802
+ "A2A emitting event: type=%s step_id=%s final=%s preview=%s",
2803
+ getattr(event_type, "value", event_type),
2804
+ enriched_metadata.get("step_id"),
2805
+ is_final,
2806
+ content_preview[:120].replace("\n", " "),
2807
+ )
2808
+ except Exception:
2809
+ logger.debug("A2A emitting event (logging preview failed)", exc_info=True)
2810
+
2811
+ return event
2812
+
2813
+ def _resolve_tool_event_type(self, event_type_raw: Any) -> A2AStreamEventType | None:
2814
+ """Normalize a raw event type to ``A2AStreamEventType``.
2815
+
2816
+ Args:
2817
+ event_type_raw: Raw ``event_type`` value from a streaming chunk.
2818
+
2819
+ Returns:
2820
+ The resolved ``A2AStreamEventType`` when supported, otherwise ``None``.
2821
+ """
2822
+ if isinstance(event_type_raw, A2AStreamEventType):
2823
+ return event_type_raw
2824
+ if isinstance(event_type_raw, str):
2825
+ try:
2826
+ return A2AStreamEventType(event_type_raw)
2827
+ except ValueError:
2828
+ return None
2829
+ return None
2830
+
2831
+ @staticmethod
2832
+ def _is_supported_tool_event(event_type: A2AStreamEventType) -> bool:
2833
+ """Return True when the event type is a tool-related streaming event.
2834
+
2835
+ Args:
2836
+ event_type: Candidate event type to evaluate.
2837
+
2838
+ Returns:
2839
+ True when the event type should be forwarded to the client.
2840
+ """
2841
+ return event_type in {
2842
+ A2AStreamEventType.TOOL_CALL,
2843
+ A2AStreamEventType.TOOL_RESULT,
2844
+ A2AStreamEventType.STATUS_UPDATE,
2845
+ }
2846
+
2847
+ def _build_tool_activity_payload(
2848
+ self,
2849
+ event_type: A2AStreamEventType,
2850
+ metadata: dict[str, Any] | None,
2851
+ tool_info: dict[str, Any] | None,
2852
+ activity_info: dict[str, Any] | None,
2853
+ ) -> dict[str, Any] | None:
2854
+ """Ensure tool events carry activity payloads per the streaming contract.
2855
+
2856
+ Args:
2857
+ event_type: Stream event type emitted by the tool.
2858
+ metadata: Optional metadata accompanying the chunk.
2859
+ tool_info: Tool details provided by the emitting runner.
2860
+ activity_info: Pre-built activity payload to reuse when present.
2861
+
2862
+ Returns:
2863
+ Activity dictionary ready to be serialized with the tool chunk.
2864
+ """
2865
+ if event_type not in (A2AStreamEventType.TOOL_CALL, A2AStreamEventType.TOOL_RESULT):
2866
+ return activity_info
2867
+
2868
+ if activity_info:
2869
+ return activity_info
2870
+
2871
+ activity_context = self._compose_tool_activity_context(metadata, tool_info)
2872
+ return create_tool_activity_info(activity_context)
2873
+
2874
+ def _compose_tool_activity_context(
2875
+ self,
2876
+ metadata: dict[str, Any] | None,
2877
+ tool_info: dict[str, Any] | None,
2878
+ ) -> dict[str, Any] | None:
2879
+ """Create a context dictionary for downstream activity message generation.
2880
+
2881
+ Args:
2882
+ metadata: Metadata payload extracted from the streaming chunk.
2883
+ tool_info: Tool descriptor containing ids and display names.
2884
+
2885
+ Returns:
2886
+ A merged context dictionary or None when no data was provided.
2887
+ """
2888
+ activity_context: dict[str, Any] | None = None
2889
+ if isinstance(metadata, dict):
2890
+ activity_context = metadata.copy()
2891
+ if isinstance(tool_info, dict):
2892
+ if activity_context is None:
2893
+ activity_context = {"tool_info": tool_info}
2894
+ else:
2895
+ activity_context.setdefault("tool_info", tool_info)
2896
+ return activity_context
2897
+
2898
+ def _create_tool_streaming_event(self, chunk: dict[str, Any], writer: StreamWriter, tool_name: str) -> None:
2899
+ """Create and emit tool streaming events.
2900
+
2901
+ Only processes TOOL_CALL and TOOL_RESULT event types.
2902
+
2903
+ Args:
2904
+ chunk: Streaming chunk from the tool.
2905
+ writer: Stream writer to emit events.
2906
+ tool_name: Name of the tool producing the chunk.
2907
+ """
2908
+ event_type = self._resolve_tool_event_type(chunk.get("event_type"))
2909
+ if not event_type or not self._is_supported_tool_event(event_type):
2910
+ return
2911
+
2912
+ tool_info = chunk.get("tool_info")
2913
+ metadata = chunk.get("metadata")
2914
+
2915
+ if (
2916
+ event_type == A2AStreamEventType.TOOL_RESULT
2917
+ and isinstance(tool_info, dict)
2918
+ and not tool_info.get("id")
2919
+ and isinstance(tool_info.get("tool_calls"), list)
2920
+ and tool_info.get("tool_calls")
2921
+ ):
2922
+ logger.info(
2923
+ "A2A skipping streaming tool_result without id (tool=%s)",
2924
+ tool_info.get("name"),
2925
+ )
2926
+ return
2927
+
2928
+ activity_info = self._build_tool_activity_payload(
2929
+ event_type,
2930
+ metadata if isinstance(metadata, dict) else None,
2931
+ tool_info if isinstance(tool_info, dict) else None,
2932
+ chunk.get("thinking_and_activity_info"),
2933
+ )
2934
+
2935
+ a2a_event = self._create_a2a_event(
2936
+ event_type=event_type,
2937
+ content=chunk.get("content", f"Processing with tools: {tool_name}"),
2938
+ metadata=metadata,
2939
+ tool_info=tool_info,
2940
+ thinking_and_activity_info=activity_info,
2941
+ )
2942
+ writer(a2a_event)