aip-agents-binary 0.5.25b8__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aip-agents-binary might be problematic. Click here for more details.

Files changed (296) hide show
  1. aip_agents/__init__.pyi +19 -0
  2. aip_agents/a2a/__init__.pyi +3 -0
  3. aip_agents/a2a/server/__init__.pyi +4 -0
  4. aip_agents/a2a/server/base_executor.pyi +73 -0
  5. aip_agents/a2a/server/google_adk_executor.pyi +51 -0
  6. aip_agents/a2a/server/langflow_executor.pyi +43 -0
  7. aip_agents/a2a/server/langgraph_executor.pyi +47 -0
  8. aip_agents/a2a/types.pyi +132 -0
  9. aip_agents/agent/__init__.pyi +9 -0
  10. aip_agents/agent/base_agent.pyi +221 -0
  11. aip_agents/agent/base_langgraph_agent.py +137 -68
  12. aip_agents/agent/base_langgraph_agent.pyi +233 -0
  13. aip_agents/agent/google_adk_agent.pyi +141 -0
  14. aip_agents/agent/google_adk_constants.pyi +3 -0
  15. aip_agents/agent/hitl/__init__.pyi +6 -0
  16. aip_agents/agent/hitl/config.pyi +15 -0
  17. aip_agents/agent/hitl/langgraph_hitl_mixin.pyi +42 -0
  18. aip_agents/agent/hitl/manager.pyi +200 -0
  19. aip_agents/agent/hitl/models.pyi +3 -0
  20. aip_agents/agent/hitl/prompt/__init__.pyi +4 -0
  21. aip_agents/agent/hitl/prompt/base.pyi +24 -0
  22. aip_agents/agent/hitl/prompt/deferred.pyi +30 -0
  23. aip_agents/agent/hitl/registry.pyi +101 -0
  24. aip_agents/agent/interface.pyi +81 -0
  25. aip_agents/agent/interfaces.pyi +44 -0
  26. aip_agents/agent/langflow_agent.pyi +133 -0
  27. aip_agents/agent/langgraph_memory_enhancer_agent.pyi +49 -0
  28. aip_agents/agent/langgraph_react_agent.py +58 -14
  29. aip_agents/agent/langgraph_react_agent.pyi +131 -0
  30. aip_agents/agent/system_instruction_context.pyi +13 -0
  31. aip_agents/clients/__init__.pyi +4 -0
  32. aip_agents/clients/langflow/__init__.pyi +4 -0
  33. aip_agents/clients/langflow/client.pyi +140 -0
  34. aip_agents/clients/langflow/types.pyi +7 -0
  35. aip_agents/constants.pyi +7 -0
  36. aip_agents/examples/__init__.pyi +0 -0
  37. aip_agents/examples/compare_streaming_client.py +2 -2
  38. aip_agents/examples/compare_streaming_client.pyi +48 -0
  39. aip_agents/examples/compare_streaming_server.py +1 -1
  40. aip_agents/examples/compare_streaming_server.pyi +18 -0
  41. aip_agents/examples/demo_memory_recall.pyi +58 -0
  42. aip_agents/examples/hello_world_a2a_google_adk_client.pyi +9 -0
  43. aip_agents/examples/hello_world_a2a_google_adk_client_agent.pyi +9 -0
  44. aip_agents/examples/hello_world_a2a_google_adk_client_streaming.pyi +9 -0
  45. aip_agents/examples/hello_world_a2a_google_adk_server.pyi +15 -0
  46. aip_agents/examples/hello_world_a2a_langchain_client.pyi +5 -0
  47. aip_agents/examples/hello_world_a2a_langchain_client_agent.pyi +5 -0
  48. aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.pyi +5 -0
  49. aip_agents/examples/hello_world_a2a_langchain_client_streaming.pyi +5 -0
  50. aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.pyi +5 -0
  51. aip_agents/examples/hello_world_a2a_langchain_reference_server.pyi +15 -0
  52. aip_agents/examples/hello_world_a2a_langchain_server.pyi +15 -0
  53. aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.pyi +15 -0
  54. aip_agents/examples/hello_world_a2a_langflow_client.pyi +9 -0
  55. aip_agents/examples/hello_world_a2a_langflow_server.pyi +14 -0
  56. aip_agents/examples/hello_world_a2a_langgraph_artifact_client.pyi +5 -0
  57. aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.pyi +5 -0
  58. aip_agents/examples/hello_world_a2a_langgraph_artifact_server.pyi +16 -0
  59. aip_agents/examples/hello_world_a2a_langgraph_client.pyi +9 -0
  60. aip_agents/examples/hello_world_a2a_langgraph_client_agent.pyi +9 -0
  61. aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.pyi +2 -0
  62. aip_agents/examples/hello_world_a2a_langgraph_client_streaming.pyi +9 -0
  63. aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.pyi +5 -0
  64. aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.pyi +5 -0
  65. aip_agents/examples/hello_world_a2a_langgraph_server.pyi +14 -0
  66. aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.pyi +15 -0
  67. aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.pyi +15 -0
  68. aip_agents/examples/hello_world_a2a_mcp_langgraph.pyi +48 -0
  69. aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.pyi +48 -0
  70. aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.pyi +45 -0
  71. aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.pyi +5 -0
  72. aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.pyi +15 -0
  73. aip_agents/examples/hello_world_google_adk.pyi +5 -0
  74. aip_agents/examples/hello_world_google_adk_mcp_http.pyi +5 -0
  75. aip_agents/examples/hello_world_google_adk_mcp_http_stream.pyi +5 -0
  76. aip_agents/examples/hello_world_google_adk_mcp_sse.pyi +5 -0
  77. aip_agents/examples/hello_world_google_adk_mcp_sse_stream.pyi +5 -0
  78. aip_agents/examples/hello_world_google_adk_mcp_stdio.pyi +5 -0
  79. aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.pyi +5 -0
  80. aip_agents/examples/hello_world_google_adk_stream.pyi +5 -0
  81. aip_agents/examples/hello_world_langchain.pyi +5 -0
  82. aip_agents/examples/hello_world_langchain_lm_invoker.pyi +2 -0
  83. aip_agents/examples/hello_world_langchain_mcp_http.pyi +5 -0
  84. aip_agents/examples/hello_world_langchain_mcp_http_interactive.pyi +16 -0
  85. aip_agents/examples/hello_world_langchain_mcp_http_stream.pyi +5 -0
  86. aip_agents/examples/hello_world_langchain_mcp_multi_server.pyi +18 -0
  87. aip_agents/examples/hello_world_langchain_mcp_sse.pyi +5 -0
  88. aip_agents/examples/hello_world_langchain_mcp_sse_stream.pyi +5 -0
  89. aip_agents/examples/hello_world_langchain_mcp_stdio.pyi +5 -0
  90. aip_agents/examples/hello_world_langchain_mcp_stdio_stream.pyi +5 -0
  91. aip_agents/examples/hello_world_langchain_stream.pyi +5 -0
  92. aip_agents/examples/hello_world_langchain_stream_lm_invoker.pyi +5 -0
  93. aip_agents/examples/hello_world_langflow_agent.pyi +35 -0
  94. aip_agents/examples/hello_world_langgraph.pyi +5 -0
  95. aip_agents/examples/hello_world_langgraph_gl_connector_twitter.pyi +5 -0
  96. aip_agents/examples/hello_world_langgraph_mcp_http.pyi +5 -0
  97. aip_agents/examples/hello_world_langgraph_mcp_http_stream.pyi +5 -0
  98. aip_agents/examples/hello_world_langgraph_mcp_sse.pyi +5 -0
  99. aip_agents/examples/hello_world_langgraph_mcp_sse_stream.pyi +5 -0
  100. aip_agents/examples/hello_world_langgraph_mcp_stdio.pyi +5 -0
  101. aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.pyi +5 -0
  102. aip_agents/examples/hello_world_langgraph_stream.pyi +5 -0
  103. aip_agents/examples/hello_world_langgraph_stream_lm_invoker.pyi +5 -0
  104. aip_agents/examples/hello_world_model_switch_cli.pyi +30 -0
  105. aip_agents/examples/hello_world_multi_agent_adk.pyi +6 -0
  106. aip_agents/examples/hello_world_multi_agent_langchain.pyi +5 -0
  107. aip_agents/examples/hello_world_multi_agent_langgraph.pyi +5 -0
  108. aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.pyi +5 -0
  109. aip_agents/examples/hello_world_pii_logger.pyi +5 -0
  110. aip_agents/examples/hello_world_sentry.pyi +21 -0
  111. aip_agents/examples/hello_world_step_limits.pyi +17 -0
  112. aip_agents/examples/hello_world_stock_a2a_server.pyi +17 -0
  113. aip_agents/examples/hello_world_tool_output_client.py +9 -0
  114. aip_agents/examples/hello_world_tool_output_client.pyi +5 -0
  115. aip_agents/examples/hello_world_tool_output_server.pyi +19 -0
  116. aip_agents/examples/hitl_demo.pyi +67 -0
  117. aip_agents/examples/pii_demo_langgraph_client.pyi +5 -0
  118. aip_agents/examples/pii_demo_langgraph_server.pyi +20 -0
  119. aip_agents/examples/pii_demo_multi_agent_client.pyi +5 -0
  120. aip_agents/examples/pii_demo_multi_agent_server.pyi +40 -0
  121. aip_agents/examples/todolist_planning_a2a_langchain_client.py +2 -2
  122. aip_agents/examples/todolist_planning_a2a_langchain_client.pyi +5 -0
  123. aip_agents/examples/todolist_planning_a2a_langgraph_server.py +1 -1
  124. aip_agents/examples/todolist_planning_a2a_langgraph_server.pyi +19 -0
  125. aip_agents/examples/tools/__init__.pyi +9 -0
  126. aip_agents/examples/tools/adk_arithmetic_tools.pyi +24 -0
  127. aip_agents/examples/tools/adk_weather_tool.pyi +18 -0
  128. aip_agents/examples/tools/data_generator_tool.pyi +15 -0
  129. aip_agents/examples/tools/data_visualization_tool.pyi +19 -0
  130. aip_agents/examples/tools/image_artifact_tool.pyi +26 -0
  131. aip_agents/examples/tools/langchain_arithmetic_tools.pyi +17 -0
  132. aip_agents/examples/tools/langchain_currency_exchange_tool.pyi +20 -0
  133. aip_agents/examples/tools/langchain_graph_artifact_tool.pyi +25 -0
  134. aip_agents/examples/tools/langchain_weather_tool.pyi +19 -0
  135. aip_agents/examples/tools/langgraph_streaming_tool.pyi +43 -0
  136. aip_agents/examples/tools/mock_retrieval_tool.pyi +13 -0
  137. aip_agents/examples/tools/pii_demo_tools.pyi +54 -0
  138. aip_agents/examples/tools/random_chart_tool.pyi +20 -0
  139. aip_agents/examples/tools/serper_tool.pyi +16 -0
  140. aip_agents/examples/tools/stock_tools.pyi +36 -0
  141. aip_agents/examples/tools/table_generator_tool.pyi +22 -0
  142. aip_agents/examples/tools/time_tool.pyi +15 -0
  143. aip_agents/examples/tools/weather_forecast_tool.pyi +14 -0
  144. aip_agents/guardrails/__init__.pyi +6 -0
  145. aip_agents/guardrails/engines/__init__.pyi +4 -0
  146. aip_agents/guardrails/engines/base.py +6 -6
  147. aip_agents/guardrails/engines/base.pyi +61 -0
  148. aip_agents/guardrails/engines/nemo.pyi +46 -0
  149. aip_agents/guardrails/engines/phrase_matcher.pyi +48 -0
  150. aip_agents/guardrails/exceptions.pyi +23 -0
  151. aip_agents/guardrails/manager.pyi +42 -0
  152. aip_agents/guardrails/middleware.pyi +87 -0
  153. aip_agents/guardrails/schemas.pyi +43 -0
  154. aip_agents/guardrails/utils.pyi +19 -0
  155. aip_agents/mcp/__init__.pyi +0 -0
  156. aip_agents/mcp/client/__init__.pyi +5 -0
  157. aip_agents/mcp/client/base_mcp_client.pyi +148 -0
  158. aip_agents/mcp/client/connection_manager.py +36 -1
  159. aip_agents/mcp/client/connection_manager.pyi +51 -0
  160. aip_agents/mcp/client/google_adk/__init__.pyi +3 -0
  161. aip_agents/mcp/client/google_adk/client.pyi +75 -0
  162. aip_agents/mcp/client/langchain/__init__.pyi +3 -0
  163. aip_agents/mcp/client/langchain/client.pyi +48 -0
  164. aip_agents/mcp/client/persistent_session.py +318 -68
  165. aip_agents/mcp/client/persistent_session.pyi +122 -0
  166. aip_agents/mcp/client/session_pool.pyi +101 -0
  167. aip_agents/mcp/client/transports.py +33 -2
  168. aip_agents/mcp/client/transports.pyi +132 -0
  169. aip_agents/mcp/utils/__init__.pyi +0 -0
  170. aip_agents/mcp/utils/config_validator.pyi +82 -0
  171. aip_agents/memory/__init__.pyi +5 -0
  172. aip_agents/memory/adapters/__init__.pyi +4 -0
  173. aip_agents/memory/adapters/base_adapter.pyi +150 -0
  174. aip_agents/memory/adapters/mem0.pyi +22 -0
  175. aip_agents/memory/base.pyi +60 -0
  176. aip_agents/memory/constants.pyi +25 -0
  177. aip_agents/memory/factory.pyi +24 -0
  178. aip_agents/memory/guidance.pyi +3 -0
  179. aip_agents/memory/simple_memory.pyi +23 -0
  180. aip_agents/middleware/__init__.pyi +5 -0
  181. aip_agents/middleware/base.pyi +75 -0
  182. aip_agents/middleware/manager.pyi +84 -0
  183. aip_agents/middleware/todolist.pyi +125 -0
  184. aip_agents/schema/__init__.pyi +9 -0
  185. aip_agents/schema/a2a.pyi +40 -0
  186. aip_agents/schema/agent.pyi +65 -0
  187. aip_agents/schema/hitl.pyi +89 -0
  188. aip_agents/schema/langgraph.pyi +28 -0
  189. aip_agents/schema/model_id.pyi +54 -0
  190. aip_agents/schema/step_limit.pyi +63 -0
  191. aip_agents/schema/storage.pyi +21 -0
  192. aip_agents/sentry/__init__.pyi +3 -0
  193. aip_agents/sentry/sentry.pyi +48 -0
  194. aip_agents/storage/__init__.pyi +8 -0
  195. aip_agents/storage/base.pyi +58 -0
  196. aip_agents/storage/clients/__init__.pyi +3 -0
  197. aip_agents/storage/clients/minio_client.pyi +137 -0
  198. aip_agents/storage/config.pyi +29 -0
  199. aip_agents/storage/providers/__init__.pyi +5 -0
  200. aip_agents/storage/providers/base.pyi +88 -0
  201. aip_agents/storage/providers/memory.pyi +79 -0
  202. aip_agents/storage/providers/object_storage.pyi +98 -0
  203. aip_agents/tools/__init__.pyi +9 -0
  204. aip_agents/tools/browser_use/__init__.pyi +14 -0
  205. aip_agents/tools/browser_use/action_parser.pyi +18 -0
  206. aip_agents/tools/browser_use/browser_use_tool.py +8 -0
  207. aip_agents/tools/browser_use/browser_use_tool.pyi +50 -0
  208. aip_agents/tools/browser_use/llm_config.pyi +52 -0
  209. aip_agents/tools/browser_use/minio_storage.pyi +109 -0
  210. aip_agents/tools/browser_use/schemas.pyi +32 -0
  211. aip_agents/tools/browser_use/session.pyi +4 -0
  212. aip_agents/tools/browser_use/session_errors.pyi +53 -0
  213. aip_agents/tools/browser_use/steel_session_recording.pyi +63 -0
  214. aip_agents/tools/browser_use/streaming.py +2 -0
  215. aip_agents/tools/browser_use/streaming.pyi +81 -0
  216. aip_agents/tools/browser_use/structured_data_parser.pyi +86 -0
  217. aip_agents/tools/browser_use/structured_data_recovery.pyi +43 -0
  218. aip_agents/tools/browser_use/types.pyi +45 -0
  219. aip_agents/tools/code_sandbox/__init__.pyi +3 -0
  220. aip_agents/tools/code_sandbox/constant.pyi +4 -0
  221. aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.pyi +102 -0
  222. aip_agents/tools/code_sandbox/e2b_sandbox_tool.pyi +29 -0
  223. aip_agents/tools/constants.pyi +138 -0
  224. aip_agents/tools/document_loader/__init__.pyi +7 -0
  225. aip_agents/tools/document_loader/base_reader.pyi +75 -0
  226. aip_agents/tools/document_loader/docx_reader_tool.pyi +10 -0
  227. aip_agents/tools/document_loader/excel_reader_tool.pyi +26 -0
  228. aip_agents/tools/document_loader/pdf_reader_tool.pyi +11 -0
  229. aip_agents/tools/document_loader/pdf_splitter.pyi +18 -0
  230. aip_agents/tools/gl_connector/__init__.pyi +3 -0
  231. aip_agents/tools/gl_connector/tool.pyi +74 -0
  232. aip_agents/tools/gl_connector_tools.pyi +39 -0
  233. aip_agents/tools/memory_search/__init__.pyi +5 -0
  234. aip_agents/tools/memory_search/base.pyi +69 -0
  235. aip_agents/tools/memory_search/mem0.pyi +19 -0
  236. aip_agents/tools/memory_search/schema.pyi +15 -0
  237. aip_agents/tools/memory_search_tool.pyi +3 -0
  238. aip_agents/tools/time_tool.pyi +16 -0
  239. aip_agents/tools/tool_config_injector.pyi +26 -0
  240. aip_agents/tools/web_search/__init__.pyi +3 -0
  241. aip_agents/tools/web_search/serper_tool.pyi +19 -0
  242. aip_agents/types/__init__.pyi +36 -0
  243. aip_agents/types/a2a_events.pyi +3 -0
  244. aip_agents/utils/__init__.pyi +11 -0
  245. aip_agents/utils/a2a_connector.pyi +146 -0
  246. aip_agents/utils/artifact_helpers.pyi +203 -0
  247. aip_agents/utils/constants.pyi +10 -0
  248. aip_agents/utils/datetime/__init__.pyi +4 -0
  249. aip_agents/utils/datetime/normalization.pyi +95 -0
  250. aip_agents/utils/datetime/timezone.pyi +48 -0
  251. aip_agents/utils/env_loader.pyi +10 -0
  252. aip_agents/utils/event_handler_registry.pyi +23 -0
  253. aip_agents/utils/file_prompt_utils.pyi +21 -0
  254. aip_agents/utils/final_response_builder.pyi +34 -0
  255. aip_agents/utils/formatter_llm_client.pyi +71 -0
  256. aip_agents/utils/langgraph/__init__.pyi +3 -0
  257. aip_agents/utils/langgraph/converter.pyi +49 -0
  258. aip_agents/utils/langgraph/tool_managers/__init__.pyi +5 -0
  259. aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.pyi +35 -0
  260. aip_agents/utils/langgraph/tool_managers/base_tool_manager.pyi +48 -0
  261. aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +26 -1
  262. aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.pyi +56 -0
  263. aip_agents/utils/langgraph/tool_output_management.py +80 -0
  264. aip_agents/utils/langgraph/tool_output_management.pyi +329 -0
  265. aip_agents/utils/logger.pyi +60 -0
  266. aip_agents/utils/metadata/__init__.pyi +5 -0
  267. aip_agents/utils/metadata/activity_metadata_helper.pyi +25 -0
  268. aip_agents/utils/metadata/activity_narrative/__init__.pyi +7 -0
  269. aip_agents/utils/metadata/activity_narrative/builder.pyi +35 -0
  270. aip_agents/utils/metadata/activity_narrative/constants.pyi +10 -0
  271. aip_agents/utils/metadata/activity_narrative/context.pyi +32 -0
  272. aip_agents/utils/metadata/activity_narrative/formatters.pyi +48 -0
  273. aip_agents/utils/metadata/activity_narrative/utils.pyi +12 -0
  274. aip_agents/utils/metadata/schemas/__init__.pyi +4 -0
  275. aip_agents/utils/metadata/schemas/activity_schema.pyi +18 -0
  276. aip_agents/utils/metadata/schemas/thinking_schema.pyi +20 -0
  277. aip_agents/utils/metadata/thinking_metadata_helper.pyi +4 -0
  278. aip_agents/utils/metadata_helper.pyi +117 -0
  279. aip_agents/utils/name_preprocessor/__init__.pyi +6 -0
  280. aip_agents/utils/name_preprocessor/base_name_preprocessor.pyi +52 -0
  281. aip_agents/utils/name_preprocessor/google_name_preprocessor.pyi +38 -0
  282. aip_agents/utils/name_preprocessor/name_preprocessor.pyi +41 -0
  283. aip_agents/utils/name_preprocessor/openai_name_preprocessor.pyi +34 -0
  284. aip_agents/utils/pii/__init__.pyi +5 -0
  285. aip_agents/utils/pii/pii_handler.pyi +96 -0
  286. aip_agents/utils/pii/pii_helper.pyi +78 -0
  287. aip_agents/utils/pii/uuid_deanonymizer_mapping.pyi +73 -0
  288. aip_agents/utils/reference_helper.pyi +81 -0
  289. aip_agents/utils/sse_chunk_transformer.pyi +166 -0
  290. aip_agents/utils/step_limit_manager.pyi +112 -0
  291. aip_agents/utils/token_usage_helper.pyi +60 -0
  292. {aip_agents_binary-0.5.25b8.dist-info → aip_agents_binary-0.6.0.dist-info}/METADATA +51 -48
  293. aip_agents_binary-0.6.0.dist-info/RECORD +566 -0
  294. aip_agents_binary-0.5.25b8.dist-info/RECORD +0 -290
  295. {aip_agents_binary-0.5.25b8.dist-info → aip_agents_binary-0.6.0.dist-info}/WHEEL +0 -0
  296. {aip_agents_binary-0.5.25b8.dist-info → aip_agents_binary-0.6.0.dist-info}/top_level.txt +0 -0
@@ -21,13 +21,13 @@ from concurrent.futures import Future
21
21
  from contextlib import suppress
22
22
  from contextvars import ContextVar
23
23
  from dataclasses import dataclass
24
- from typing import Annotated, Any
24
+ from typing import Annotated, Any, cast
25
25
 
26
26
  from a2a.types import AgentCard
27
27
  from aiostream import stream as astream
28
- from gllm_core.event import EventEmitter
29
- from gllm_core.event.handler import StreamEventHandler
30
- from gllm_core.schema import Chunk
28
+ from gllm_core.event import EventEmitter # type: ignore[import-untyped]
29
+ from gllm_core.event.handler import StreamEventHandler # type: ignore[import-untyped]
30
+ from gllm_core.schema import Chunk # type: ignore[import-untyped]
31
31
  from langchain_core.messages import AIMessage, BaseMessage, ToolMessage
32
32
  from langchain_core.tools import BaseTool
33
33
  from langgraph.graph import StateGraph
@@ -197,6 +197,7 @@ class BaseLangGraphAgent(BaseAgent):
197
197
  self.enable_a2a_token_streaming = enable_a2a_token_streaming
198
198
  self.event_emitter = event_emitter
199
199
  self.checkpointer = checkpointer
200
+ self.tool_output_manager = None
200
201
 
201
202
  self._mem0_client: Any | None = None
202
203
  self.memory: BaseMemory | None = None
@@ -384,10 +385,13 @@ class BaseLangGraphAgent(BaseAgent):
384
385
  try:
385
386
  user_id = override_user_id or self.memory_agent_id
386
387
  if hasattr(self.memory, MemoryMethod.SEARCH):
387
- results = self.memory.search(query=query, user_id=user_id, limit=self.memory_retrieval_limit) # type: ignore[attr-defined]
388
+ results = self.memory.search( # type: ignore[attr-defined]
389
+ query=query,
390
+ user_id=user_id,
391
+ limit=self.memory_retrieval_limit,
392
+ )
388
393
  return results if isinstance(results, list) else []
389
- else:
390
- return []
394
+ return []
391
395
  except Exception as e: # noqa: BLE001
392
396
  logger.debug(f"Memory: search ignored error: {e}")
393
397
  return []
@@ -415,7 +419,11 @@ class BaseLangGraphAgent(BaseAgent):
415
419
  future = save_async(user_text=str(user_text), ai_text=str(ai_text), user_id=user_id)
416
420
  self._watch_memory_future(future, user_id)
417
421
  elif hasattr(self.memory, MemoryMethod.SAVE_INTERACTION):
418
- self.memory.save_interaction(user_text=str(user_text), ai_text=str(ai_text), user_id=user_id) # type: ignore[attr-defined]
422
+ self.memory.save_interaction( # type: ignore[attr-defined]
423
+ user_text=str(user_text),
424
+ ai_text=str(ai_text),
425
+ user_id=user_id,
426
+ )
419
427
  else:
420
428
  logger.warning(
421
429
  "Memory: save_interaction method NOT available on memory adapter "
@@ -560,7 +568,11 @@ class BaseLangGraphAgent(BaseAgent):
560
568
  return
561
569
 
562
570
  try:
563
- tool.set_tool_config(tool_config_data)
571
+ set_tool_config = getattr(tool, "set_tool_config", None)
572
+ if callable(set_tool_config):
573
+ set_tool_config(tool_config_data)
574
+ else:
575
+ raise AttributeError("set_tool_config not available")
564
576
  logger.info(f"Agent '{self.name}': Configured tool '{tool.name}' with agent defaults: {tool_config_data}")
565
577
  except Exception as e:
566
578
  logger.warning(f"Agent '{self.name}': Failed to configure tool '{tool.name}': {e}")
@@ -598,7 +610,7 @@ class BaseLangGraphAgent(BaseAgent):
598
610
  self._sanitize_tool_names()
599
611
  try:
600
612
  if self.state_schema:
601
- graph_builder = StateGraph(self.state_schema)
613
+ graph_builder: StateGraph = StateGraph(self.state_schema)
602
614
  else:
603
615
 
604
616
  class DefaultAgentState(TypedDict):
@@ -715,7 +727,7 @@ class BaseLangGraphAgent(BaseAgent):
715
727
  return None
716
728
  last_item = list_state[-1]
717
729
  if isinstance(last_item, AIMessage) and getattr(last_item, "content", None) is not None:
718
- output_content = last_item.content
730
+ output_content = self._normalize_event_content(last_item.content)
719
731
  elif isinstance(last_item, str):
720
732
  output_content = last_item
721
733
  return output_content
@@ -995,7 +1007,7 @@ class BaseLangGraphAgent(BaseAgent):
995
1007
  key = self.thread_id_key or "thread_id"
996
1008
  return configurable.get(key)
997
1009
 
998
- def _process_langgraph_event(self, event: dict[str, Any]) -> str | dict[str, Any] | None:
1010
+ def _process_langgraph_event(self, event: Any) -> str | dict[str, Any] | A2AEvent | None:
999
1011
  """Process a single LangGraph streaming event.
1000
1012
 
1001
1013
  Args:
@@ -1045,6 +1057,36 @@ class BaseLangGraphAgent(BaseAgent):
1045
1057
 
1046
1058
  return True
1047
1059
 
1060
+ @staticmethod
1061
+ def _normalize_usage_metadata(usage: Any | None) -> dict[str, Any] | None:
1062
+ """Normalize usage metadata to a dictionary when possible.
1063
+
1064
+ Args:
1065
+ usage: Usage metadata from LangChain messages.
1066
+
1067
+ Returns:
1068
+ A dictionary copy when usage is available, otherwise None.
1069
+ """
1070
+ if usage is None:
1071
+ return None
1072
+ if isinstance(usage, dict):
1073
+ return dict(usage)
1074
+ return cast(dict[str, Any], usage)
1075
+
1076
+ @staticmethod
1077
+ def _normalize_event_content(content: Any) -> str:
1078
+ """Normalize event content to a string payload.
1079
+
1080
+ Args:
1081
+ content: Raw content payload from LangChain/LangGraph.
1082
+
1083
+ Returns:
1084
+ String representation suitable for A2A events.
1085
+ """
1086
+ if isinstance(content, str):
1087
+ return content
1088
+ return json.dumps(content)
1089
+
1048
1090
  async def _stream_with_langgraph(self, query: str, **kwargs: Any) -> AsyncGenerator[str | dict[str, Any], None]:
1049
1091
  """Handle streaming for LangChain models using LangGraph's native streaming.
1050
1092
 
@@ -1135,9 +1177,13 @@ class BaseLangGraphAgent(BaseAgent):
1135
1177
  logger.info(f"Agent '{self.name}': Initializing MCP tools with persistent sessions.")
1136
1178
 
1137
1179
  # Add timeout for initialization to prevent hanging
1138
- await asyncio.wait_for(self.mcp_client.initialize(), timeout=30.0)
1180
+ mcp_client = self.mcp_client
1181
+ if mcp_client is None:
1182
+ return
1183
+
1184
+ await asyncio.wait_for(mcp_client.initialize(), timeout=30.0)
1139
1185
 
1140
- mcp_tools = await self.mcp_client.get_tools()
1186
+ mcp_tools = await mcp_client.get_tools()
1141
1187
 
1142
1188
  if not mcp_tools:
1143
1189
  logger.warning(f"Agent '{self.name}': No MCP tools retrieved from configured servers.")
@@ -1169,7 +1215,7 @@ class BaseLangGraphAgent(BaseAgent):
1169
1215
  logger.warning(f"Agent '{self.name}': Error during MCP client cleanup: {e}")
1170
1216
  # Don't re-raise - cleanup should be best-effort
1171
1217
 
1172
- async def arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[dict[str, Any], None]:
1218
+ async def arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[A2AEvent, None]:
1173
1219
  """Asynchronously streams the agent's response in A2A format.
1174
1220
 
1175
1221
  Args:
@@ -1190,7 +1236,7 @@ class BaseLangGraphAgent(BaseAgent):
1190
1236
  task_id: str | None = None,
1191
1237
  context_id: str | None = None,
1192
1238
  **kwargs: Any,
1193
- ) -> AsyncGenerator[dict[str, Any], None]:
1239
+ ) -> AsyncGenerator[A2AEvent, None]:
1194
1240
  """Stream agent response as SSE-compatible chunks.
1195
1241
 
1196
1242
  This method wraps arun_a2a_stream and transforms output to the normalized
@@ -1222,7 +1268,8 @@ class BaseLangGraphAgent(BaseAgent):
1222
1268
  pii_mapping = kwargs.get("pii_mapping")
1223
1269
  transformer = SSEChunkTransformer(task_id=task_id, context_id=context_id, pii_mapping=pii_mapping)
1224
1270
  try:
1225
- async for chunk in transformer.transform_stream(self.arun_a2a_stream(query, **kwargs)):
1271
+ stream = self.arun_a2a_stream(query, **kwargs)
1272
+ async for chunk in transformer.transform_stream(stream):
1226
1273
  yield chunk
1227
1274
  except Exception as e:
1228
1275
  # Lazy import to support optional guardrails dependency
@@ -1358,14 +1405,20 @@ class BaseLangGraphAgent(BaseAgent):
1358
1405
  Returns:
1359
1406
  A2AEvent with TOOL_CALL event type and structured tool information.
1360
1407
  """
1361
- tool_calls_details = [
1362
- {
1363
- "id": tool_call.get("id"),
1364
- "name": tool_call["name"],
1365
- "args": tool_call["args"],
1366
- }
1367
- for tool_call in message.tool_calls
1368
- ]
1408
+ tool_calls_details: list[dict[str, Any]] = []
1409
+ manager = getattr(self, "tool_output_manager", None)
1410
+ thread_id = _THREAD_ID_CVAR.get()
1411
+ for tool_call in message.tool_calls:
1412
+ args = tool_call["args"]
1413
+ if manager and thread_id and isinstance(args, dict):
1414
+ args = manager.rewrite_args_with_latest_reference(args, thread_id)
1415
+ tool_calls_details.append(
1416
+ {
1417
+ "id": tool_call.get("id"),
1418
+ "name": tool_call["name"],
1419
+ "args": args,
1420
+ }
1421
+ )
1369
1422
  tool_names = [details["name"] for details in tool_calls_details]
1370
1423
 
1371
1424
  event = self._create_a2a_event(
@@ -1374,7 +1427,7 @@ class BaseLangGraphAgent(BaseAgent):
1374
1427
  tool_info={"tool_calls": tool_calls_details, "status": "running"},
1375
1428
  metadata={"status": Status.RUNNING},
1376
1429
  is_final=False,
1377
- step_usage=message.usage_metadata,
1430
+ step_usage=self._normalize_usage_metadata(message.usage_metadata),
1378
1431
  )
1379
1432
 
1380
1433
  self._record_emitted_tool_calls(tool_calls_details)
@@ -1594,8 +1647,9 @@ class BaseLangGraphAgent(BaseAgent):
1594
1647
  """
1595
1648
  is_final_response = self._is_final_response(message)
1596
1649
  metadata = self._build_metadata_for_final_response(is_final_response, state)
1650
+ raw_content = message.content
1597
1651
  content = deanonymize_final_response_content(
1598
- content=message.content,
1652
+ content=raw_content if isinstance(raw_content, str) else json.dumps(raw_content),
1599
1653
  is_final_response=is_final_response,
1600
1654
  metadata=metadata,
1601
1655
  )
@@ -1605,7 +1659,7 @@ class BaseLangGraphAgent(BaseAgent):
1605
1659
  tool_info=None,
1606
1660
  metadata=metadata,
1607
1661
  is_final=is_final_response,
1608
- step_usage=message.usage_metadata,
1662
+ step_usage=self._normalize_usage_metadata(message.usage_metadata),
1609
1663
  )
1610
1664
  return event, is_final_response
1611
1665
 
@@ -1882,7 +1936,7 @@ class BaseLangGraphAgent(BaseAgent):
1882
1936
  """
1883
1937
  current_thread_id: str | None = None
1884
1938
  try:
1885
- configurable = config.get("configurable", {}) # type: ignore[assignment]
1939
+ configurable = config.get("configurable", {})
1886
1940
  thread_key = self.thread_id_key or "thread_id"
1887
1941
  current_thread_id = str(configurable.get(thread_key)) if configurable.get(thread_key) else None
1888
1942
  except Exception:
@@ -1964,7 +2018,7 @@ class BaseLangGraphAgent(BaseAgent):
1964
2018
  )
1965
2019
  return events, is_final, updated_message_count
1966
2020
 
1967
- async def _arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[dict[str, Any], None]:
2021
+ async def _arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[A2AEvent, None]:
1968
2022
  """Internal implementation of arun_a2a_stream without MCP handling.
1969
2023
 
1970
2024
  Args:
@@ -2031,7 +2085,7 @@ class BaseLangGraphAgent(BaseAgent):
2031
2085
  enable_token_streaming=self.enable_a2a_token_streaming,
2032
2086
  )
2033
2087
 
2034
- async def _handle_streaming_process(self, context: "_StreamingContext") -> AsyncGenerator[dict[str, Any], None]:
2088
+ async def _handle_streaming_process(self, context: "_StreamingContext") -> AsyncGenerator[A2AEvent, None]:
2035
2089
  """Handle the main streaming process including initial status and event processing.
2036
2090
 
2037
2091
  Args:
@@ -2048,7 +2102,7 @@ class BaseLangGraphAgent(BaseAgent):
2048
2102
  self._log_streaming_event_debug("process_stream_item", event)
2049
2103
  yield event
2050
2104
 
2051
- def _create_initial_status_event(self) -> dict[str, Any]:
2105
+ def _create_initial_status_event(self) -> A2AEvent:
2052
2106
  """Create and setup the initial status event."""
2053
2107
  initial_status_event = self._create_a2a_event(
2054
2108
  event_type=A2AStreamEventType.STATUS_UPDATE, content=DefaultStepMessages.EN.value
@@ -2065,7 +2119,7 @@ class BaseLangGraphAgent(BaseAgent):
2065
2119
 
2066
2120
  return initial_status_event
2067
2121
 
2068
- async def _process_streaming_items(self, context: "_StreamingContext") -> AsyncGenerator[dict[str, Any], None]:
2122
+ async def _process_streaming_items(self, context: "_StreamingContext") -> AsyncGenerator[A2AEvent, None]:
2069
2123
  """Process individual streaming items from the LangGraph execution.
2070
2124
 
2071
2125
  Handles the core streaming logic by iterating through items produced by
@@ -2098,9 +2152,7 @@ class BaseLangGraphAgent(BaseAgent):
2098
2152
  async for event in self._create_graph_stream_events(enhanced_input, context):
2099
2153
  yield event
2100
2154
 
2101
- async def _process_a2a_streaming_with_tokens(
2102
- self, context: "_StreamingContext"
2103
- ) -> AsyncGenerator[dict[str, Any], None]:
2155
+ async def _process_a2a_streaming_with_tokens(self, context: "_StreamingContext") -> AsyncGenerator[A2AEvent, None]:
2104
2156
  """Process A2A streaming with token streaming support using aiostream.
2105
2157
 
2106
2158
  Supports both LM Invoker and LangChain models by detecting the appropriate
@@ -2133,6 +2185,9 @@ class BaseLangGraphAgent(BaseAgent):
2133
2185
  token_stream, enhanced_input = self._create_token_stream(context)
2134
2186
  graph_stream = self._create_graph_stream_events(enhanced_input, context)
2135
2187
 
2188
+ if token_stream is None:
2189
+ raise RuntimeError(f"Agent '{self.name}': Token stream not available for LM invoker.")
2190
+
2136
2191
  merged = astream.merge(token_stream, graph_stream)
2137
2192
  async with merged.stream() as merged_stream:
2138
2193
  async for event in merged_stream:
@@ -2148,7 +2203,7 @@ class BaseLangGraphAgent(BaseAgent):
2148
2203
  logger.error(f"Agent '{self.name}': Error during A2A token streaming: {e}")
2149
2204
  raise
2150
2205
 
2151
- async def _create_lm_invoker_token_stream(self) -> AsyncGenerator[dict[str, Any], None]:
2206
+ async def _create_lm_invoker_token_stream(self) -> AsyncGenerator[A2AEvent, None]:
2152
2207
  """Generate A2A events from LM Invoker token stream.
2153
2208
 
2154
2209
  Uses StreamEventHandler to capture tokens emitted by LM Invoker.
@@ -2160,6 +2215,8 @@ class BaseLangGraphAgent(BaseAgent):
2160
2215
  RuntimeError: If no StreamEventHandler is found in event_emitter.
2161
2216
  """
2162
2217
  stream_handler = self._get_stream_handler()
2218
+ if stream_handler is None:
2219
+ raise RuntimeError(f"Agent '{self.name}': StreamEventHandler is required for token streaming.")
2163
2220
 
2164
2221
  try:
2165
2222
  async for event in stream_handler.stream():
@@ -2175,7 +2232,7 @@ class BaseLangGraphAgent(BaseAgent):
2175
2232
  def _create_token_stream(
2176
2233
  self,
2177
2234
  context: "_StreamingContext",
2178
- ) -> tuple[AsyncGenerator[dict[str, Any], None], dict[str, Any]]:
2235
+ ) -> tuple[AsyncGenerator[A2AEvent, None] | None, dict[str, Any]]:
2179
2236
  """Create appropriate token stream and enhanced input for the active model backend.
2180
2237
 
2181
2238
  Args:
@@ -2197,7 +2254,7 @@ class BaseLangGraphAgent(BaseAgent):
2197
2254
 
2198
2255
  async def _create_graph_stream_events(
2199
2256
  self, enhanced_input: dict[str, Any], context: "_StreamingContext"
2200
- ) -> AsyncGenerator[dict[str, Any], None]:
2257
+ ) -> AsyncGenerator[A2AEvent, None]:
2201
2258
  """Generate A2A events from graph execution.
2202
2259
 
2203
2260
  Args:
@@ -2216,8 +2273,9 @@ class BaseLangGraphAgent(BaseAgent):
2216
2273
  async for item in graph_execution:
2217
2274
  stream_mode, stream_data = item
2218
2275
 
2219
- if stream_mode == StreamMode.MESSAGES:
2220
- async for token_event in self._process_message_stream_item(stream_data):
2276
+ if stream_mode == StreamMode.MESSAGES.value:
2277
+ message_data = cast(tuple[Any, dict[str, Any]], stream_data)
2278
+ async for token_event in self._process_message_stream_item(message_data):
2221
2279
  yield token_event
2222
2280
  continue
2223
2281
 
@@ -2236,10 +2294,10 @@ class BaseLangGraphAgent(BaseAgent):
2236
2294
  Returns:
2237
2295
  List of stream modes to use for graph execution.
2238
2296
  """
2239
- stream_modes = [StreamMode.VALUES, StreamMode.CUSTOM]
2297
+ stream_modes = [StreamMode.VALUES.value, StreamMode.CUSTOM.value]
2240
2298
 
2241
2299
  if context.enable_token_streaming and not self._has_lm_invoker():
2242
- stream_modes.append(StreamMode.MESSAGES)
2300
+ stream_modes.append(StreamMode.MESSAGES.value)
2243
2301
 
2244
2302
  return stream_modes
2245
2303
 
@@ -2249,7 +2307,7 @@ class BaseLangGraphAgent(BaseAgent):
2249
2307
  stream_mode: str,
2250
2308
  stream_data: Any,
2251
2309
  context: "_StreamingContext",
2252
- ) -> AsyncGenerator[dict[str, Any], None]:
2310
+ ) -> AsyncGenerator[A2AEvent, None]:
2253
2311
  """Process a single graph stream item and yield A2A events.
2254
2312
 
2255
2313
  Args:
@@ -2261,10 +2319,12 @@ class BaseLangGraphAgent(BaseAgent):
2261
2319
  Yields:
2262
2320
  A2A events generated from the stream item.
2263
2321
  """
2264
- context.final_state = copy.copy(stream_data) if stream_mode == StreamMode.VALUES else context.final_state
2322
+ context.final_state = copy.copy(stream_data) if stream_mode == StreamMode.VALUES.value else context.final_state
2265
2323
 
2324
+ pending_artifacts = context.pending_artifacts if context.pending_artifacts is not None else []
2325
+ seen_artifact_hashes = context.seen_artifact_hashes if context.seen_artifact_hashes is not None else set()
2266
2326
  events, is_final, context.processed_message_count = self._handle_stream_item(
2267
- item, context.pending_artifacts, context.seen_artifact_hashes, context.processed_message_count
2327
+ item, pending_artifacts, seen_artifact_hashes, context.processed_message_count
2268
2328
  )
2269
2329
 
2270
2330
  if is_final:
@@ -2277,7 +2337,7 @@ class BaseLangGraphAgent(BaseAgent):
2277
2337
 
2278
2338
  async def _process_message_stream_item(
2279
2339
  self, message_data: tuple[Any, dict[str, Any]]
2280
- ) -> AsyncGenerator[dict[str, Any], None]:
2340
+ ) -> AsyncGenerator[A2AEvent, None]:
2281
2341
  """Process message stream items to extract token events.
2282
2342
 
2283
2343
  The "messages" stream mode yields tuples of (AIMessageChunk, metadata).
@@ -2314,9 +2374,7 @@ class BaseLangGraphAgent(BaseAgent):
2314
2374
  except Exception as e:
2315
2375
  logger.error(f"Agent '{self.name}': Error processing message stream item: {e}")
2316
2376
 
2317
- def _update_final_response_for_streaming(
2318
- self, context: "_StreamingContext", event: dict[str, Any]
2319
- ) -> dict[str, Any]:
2377
+ def _update_final_response_for_streaming(self, context: "_StreamingContext", event: A2AEvent) -> A2AEvent:
2320
2378
  """Update final response events with appropriate streaming configuration.
2321
2379
 
2322
2380
  For FINAL_RESPONSE events, this method updates the metadata and optionally clears
@@ -2330,13 +2388,17 @@ class BaseLangGraphAgent(BaseAgent):
2330
2388
  The processed event dictionary with updated metadata and content
2331
2389
  """
2332
2390
  if event.get("event_type") == A2AStreamEventType.FINAL_RESPONSE:
2333
- event["metadata"][MetadataFieldKeys.TOKEN_STREAMING] = False
2391
+ metadata = event.get("metadata")
2392
+ if not isinstance(metadata, dict):
2393
+ metadata = {}
2394
+ event["metadata"] = metadata
2395
+ metadata[MetadataFieldKeys.TOKEN_STREAMING] = False
2334
2396
  if context.enable_token_streaming:
2335
2397
  event["content"] = ""
2336
- event["metadata"][MetadataFieldKeys.TOKEN_STREAMING] = True
2398
+ metadata[MetadataFieldKeys.TOKEN_STREAMING] = True
2337
2399
  return event
2338
2400
 
2339
- def _convert_raw_token_to_a2a_event(self, raw_event: str) -> dict[str, Any] | None:
2401
+ def _convert_raw_token_to_a2a_event(self, raw_event: str) -> A2AEvent | None:
2340
2402
  """Parse raw token event into A2A event.
2341
2403
 
2342
2404
  Args:
@@ -2359,7 +2421,7 @@ class BaseLangGraphAgent(BaseAgent):
2359
2421
  logger.debug(f"Agent '{self.name}': Error parsing token event: {e}")
2360
2422
  return None
2361
2423
 
2362
- def _capture_final_content_if_needed(self, context: "_StreamingContext", event: dict[str, Any]) -> None:
2424
+ def _capture_final_content_if_needed(self, context: "_StreamingContext", event: A2AEvent) -> None:
2363
2425
  """Capture final content from A2A events for memory persistence.
2364
2426
 
2365
2427
  Monitors A2A events for final response content and triggers early memory
@@ -2437,7 +2499,7 @@ class BaseLangGraphAgent(BaseAgent):
2437
2499
  except Exception:
2438
2500
  pass
2439
2501
 
2440
- async def _ensure_final_completion(self, context: "_StreamingContext") -> AsyncGenerator[dict[str, Any], None]:
2502
+ async def _ensure_final_completion(self, context: "_StreamingContext") -> AsyncGenerator[A2AEvent, None]:
2441
2503
  """Ensure final completion events are yielded if not already done.
2442
2504
 
2443
2505
  Args:
@@ -2448,7 +2510,9 @@ class BaseLangGraphAgent(BaseAgent):
2448
2510
  dict[str, Any]: The final completion event.
2449
2511
  """
2450
2512
  if not context.final_event_yielded:
2451
- completion_event = self._create_completion_event(context.pending_artifacts, context.final_state)
2513
+ pending_artifacts = context.pending_artifacts if context.pending_artifacts is not None else []
2514
+ final_state = context.final_state or {}
2515
+ completion_event = self._create_completion_event(pending_artifacts, final_state)
2452
2516
  self._log_streaming_event_debug("final_completion", completion_event)
2453
2517
  yield completion_event
2454
2518
 
@@ -2456,7 +2520,7 @@ class BaseLangGraphAgent(BaseAgent):
2456
2520
  self,
2457
2521
  context: "_StreamingContext",
2458
2522
  error: Exception,
2459
- ) -> AsyncGenerator[dict[str, Any], None]:
2523
+ ) -> AsyncGenerator[A2AEvent, None]:
2460
2524
  """Handle streaming errors gracefully.
2461
2525
 
2462
2526
  Provides error handling for the A2A streaming process, ensuring errors
@@ -2473,11 +2537,14 @@ class BaseLangGraphAgent(BaseAgent):
2473
2537
  error message, optionally including any pending artifacts.
2474
2538
  """
2475
2539
  logger.error(f"Error in agent stream: {error}", exc_info=True)
2476
- error_event = {"status": "failed", "content": f"Error: {str(error)}"}
2477
-
2478
- if context.pending_artifacts:
2479
- error_event["artifacts"] = context.pending_artifacts
2480
-
2540
+ error_event = self._create_a2a_event(
2541
+ event_type=A2AStreamEventType.ERROR,
2542
+ content=f"Error: {str(error)}",
2543
+ metadata={"status": "failed"},
2544
+ artifacts=context.pending_artifacts,
2545
+ is_final=True,
2546
+ )
2547
+ error_event["status"] = "failed"
2481
2548
  self._log_streaming_event_debug("error_event", error_event)
2482
2549
  yield error_event
2483
2550
 
@@ -2533,7 +2600,7 @@ class BaseLangGraphAgent(BaseAgent):
2533
2600
 
2534
2601
  return metadata
2535
2602
 
2536
- def _create_completion_event(self, pending_artifacts: list, final_state: dict[str, Any]):
2603
+ def _create_completion_event(self, pending_artifacts: list, final_state: dict[str, Any]) -> A2AEvent:
2537
2604
  """Helper to create the completion event with artifacts and references if available.
2538
2605
 
2539
2606
  This method is used to create the completion event with artifacts and references if available.
@@ -2587,7 +2654,7 @@ class BaseLangGraphAgent(BaseAgent):
2587
2654
  else:
2588
2655
  return tool_name[:4]
2589
2656
 
2590
- def _generate_tool_call_step_id(self, tool_info: dict[str, Any], counter: int) -> str:
2657
+ def _generate_tool_call_step_id(self, tool_info: dict[str, Any] | None, counter: int) -> str:
2591
2658
  """Generate step_id for tool call events.
2592
2659
 
2593
2660
  Args:
@@ -2623,7 +2690,7 @@ class BaseLangGraphAgent(BaseAgent):
2623
2690
 
2624
2691
  return f"{category}_{combined_name}_parent_{counter:03d}"
2625
2692
 
2626
- def _generate_tool_result_step_id(self, tool_info: dict[str, Any], counter: int) -> str:
2693
+ def _generate_tool_result_step_id(self, tool_info: dict[str, Any] | None, counter: int) -> str:
2627
2694
  """Generate step_id for tool result events.
2628
2695
 
2629
2696
  Args:
@@ -2749,7 +2816,7 @@ class BaseLangGraphAgent(BaseAgent):
2749
2816
  def _create_a2a_event( # noqa: PLR0913
2750
2817
  self,
2751
2818
  event_type: A2AStreamEventType,
2752
- content: str,
2819
+ content: Any,
2753
2820
  metadata: dict[str, Any] | None = None,
2754
2821
  tool_info: dict[str, Any] | None = None,
2755
2822
  thinking_and_activity_info: dict[str, Any] | None = None,
@@ -2787,9 +2854,11 @@ class BaseLangGraphAgent(BaseAgent):
2787
2854
  # Inject cumulative time since the first STATUS_UPDATE for this thread
2788
2855
  # Do not set cumulative time here; server executor enforces it for all SSE events
2789
2856
 
2857
+ normalized_content = self._normalize_event_content(content)
2858
+
2790
2859
  event = {
2791
2860
  "event_type": event_type,
2792
- "content": content,
2861
+ "content": normalized_content,
2793
2862
  "metadata": enriched_metadata,
2794
2863
  "tool_info": tool_info,
2795
2864
  "is_final": is_final,
@@ -2803,7 +2872,7 @@ class BaseLangGraphAgent(BaseAgent):
2803
2872
  event["thinking_and_activity_info"] = thinking_and_activity_info
2804
2873
 
2805
2874
  try:
2806
- content_preview = content if isinstance(content, str) else str(content)
2875
+ content_preview = normalized_content
2807
2876
  logger.info(
2808
2877
  "A2A emitting event: type=%s step_id=%s final=%s preview=%s",
2809
2878
  getattr(event_type, "value", event_type),