aip-agents-binary 0.5.25b1__py3-none-macosx_13_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (566) hide show
  1. aip_agents/__init__.py +65 -0
  2. aip_agents/__init__.pyi +19 -0
  3. aip_agents/a2a/__init__.py +19 -0
  4. aip_agents/a2a/__init__.pyi +3 -0
  5. aip_agents/a2a/server/__init__.py +10 -0
  6. aip_agents/a2a/server/__init__.pyi +4 -0
  7. aip_agents/a2a/server/base_executor.py +1086 -0
  8. aip_agents/a2a/server/base_executor.pyi +73 -0
  9. aip_agents/a2a/server/google_adk_executor.py +198 -0
  10. aip_agents/a2a/server/google_adk_executor.pyi +51 -0
  11. aip_agents/a2a/server/langflow_executor.py +180 -0
  12. aip_agents/a2a/server/langflow_executor.pyi +43 -0
  13. aip_agents/a2a/server/langgraph_executor.py +270 -0
  14. aip_agents/a2a/server/langgraph_executor.pyi +47 -0
  15. aip_agents/a2a/types.py +232 -0
  16. aip_agents/a2a/types.pyi +132 -0
  17. aip_agents/agent/__init__.py +27 -0
  18. aip_agents/agent/__init__.pyi +9 -0
  19. aip_agents/agent/base_agent.py +970 -0
  20. aip_agents/agent/base_agent.pyi +221 -0
  21. aip_agents/agent/base_langgraph_agent.py +2948 -0
  22. aip_agents/agent/base_langgraph_agent.pyi +232 -0
  23. aip_agents/agent/google_adk_agent.py +926 -0
  24. aip_agents/agent/google_adk_agent.pyi +141 -0
  25. aip_agents/agent/google_adk_constants.py +6 -0
  26. aip_agents/agent/google_adk_constants.pyi +3 -0
  27. aip_agents/agent/hitl/__init__.py +24 -0
  28. aip_agents/agent/hitl/__init__.pyi +6 -0
  29. aip_agents/agent/hitl/config.py +28 -0
  30. aip_agents/agent/hitl/config.pyi +15 -0
  31. aip_agents/agent/hitl/langgraph_hitl_mixin.py +515 -0
  32. aip_agents/agent/hitl/langgraph_hitl_mixin.pyi +42 -0
  33. aip_agents/agent/hitl/manager.py +532 -0
  34. aip_agents/agent/hitl/manager.pyi +200 -0
  35. aip_agents/agent/hitl/models.py +18 -0
  36. aip_agents/agent/hitl/models.pyi +3 -0
  37. aip_agents/agent/hitl/prompt/__init__.py +9 -0
  38. aip_agents/agent/hitl/prompt/__init__.pyi +4 -0
  39. aip_agents/agent/hitl/prompt/base.py +42 -0
  40. aip_agents/agent/hitl/prompt/base.pyi +24 -0
  41. aip_agents/agent/hitl/prompt/deferred.py +73 -0
  42. aip_agents/agent/hitl/prompt/deferred.pyi +30 -0
  43. aip_agents/agent/hitl/registry.py +149 -0
  44. aip_agents/agent/hitl/registry.pyi +101 -0
  45. aip_agents/agent/interface.py +138 -0
  46. aip_agents/agent/interface.pyi +81 -0
  47. aip_agents/agent/interfaces.py +65 -0
  48. aip_agents/agent/interfaces.pyi +44 -0
  49. aip_agents/agent/langflow_agent.py +464 -0
  50. aip_agents/agent/langflow_agent.pyi +133 -0
  51. aip_agents/agent/langgraph_memory_enhancer_agent.py +433 -0
  52. aip_agents/agent/langgraph_memory_enhancer_agent.pyi +49 -0
  53. aip_agents/agent/langgraph_react_agent.py +2596 -0
  54. aip_agents/agent/langgraph_react_agent.pyi +131 -0
  55. aip_agents/agent/system_instruction_context.py +34 -0
  56. aip_agents/agent/system_instruction_context.pyi +13 -0
  57. aip_agents/clients/__init__.py +10 -0
  58. aip_agents/clients/__init__.pyi +4 -0
  59. aip_agents/clients/langflow/__init__.py +10 -0
  60. aip_agents/clients/langflow/__init__.pyi +4 -0
  61. aip_agents/clients/langflow/client.py +477 -0
  62. aip_agents/clients/langflow/client.pyi +140 -0
  63. aip_agents/clients/langflow/types.py +18 -0
  64. aip_agents/clients/langflow/types.pyi +7 -0
  65. aip_agents/constants.py +23 -0
  66. aip_agents/constants.pyi +7 -0
  67. aip_agents/credentials/manager.py +132 -0
  68. aip_agents/examples/__init__.py +5 -0
  69. aip_agents/examples/__init__.pyi +0 -0
  70. aip_agents/examples/compare_streaming_client.py +783 -0
  71. aip_agents/examples/compare_streaming_client.pyi +48 -0
  72. aip_agents/examples/compare_streaming_server.py +142 -0
  73. aip_agents/examples/compare_streaming_server.pyi +18 -0
  74. aip_agents/examples/demo_memory_recall.py +401 -0
  75. aip_agents/examples/demo_memory_recall.pyi +58 -0
  76. aip_agents/examples/hello_world_a2a_google_adk_client.py +49 -0
  77. aip_agents/examples/hello_world_a2a_google_adk_client.pyi +9 -0
  78. aip_agents/examples/hello_world_a2a_google_adk_client_agent.py +48 -0
  79. aip_agents/examples/hello_world_a2a_google_adk_client_agent.pyi +9 -0
  80. aip_agents/examples/hello_world_a2a_google_adk_client_streaming.py +60 -0
  81. aip_agents/examples/hello_world_a2a_google_adk_client_streaming.pyi +9 -0
  82. aip_agents/examples/hello_world_a2a_google_adk_server.py +79 -0
  83. aip_agents/examples/hello_world_a2a_google_adk_server.pyi +15 -0
  84. aip_agents/examples/hello_world_a2a_langchain_client.py +39 -0
  85. aip_agents/examples/hello_world_a2a_langchain_client.pyi +5 -0
  86. aip_agents/examples/hello_world_a2a_langchain_client_agent.py +39 -0
  87. aip_agents/examples/hello_world_a2a_langchain_client_agent.pyi +5 -0
  88. aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.py +37 -0
  89. aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.pyi +5 -0
  90. aip_agents/examples/hello_world_a2a_langchain_client_streaming.py +41 -0
  91. aip_agents/examples/hello_world_a2a_langchain_client_streaming.pyi +5 -0
  92. aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.py +60 -0
  93. aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.pyi +5 -0
  94. aip_agents/examples/hello_world_a2a_langchain_reference_server.py +105 -0
  95. aip_agents/examples/hello_world_a2a_langchain_reference_server.pyi +15 -0
  96. aip_agents/examples/hello_world_a2a_langchain_server.py +79 -0
  97. aip_agents/examples/hello_world_a2a_langchain_server.pyi +15 -0
  98. aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.py +78 -0
  99. aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.pyi +15 -0
  100. aip_agents/examples/hello_world_a2a_langflow_client.py +83 -0
  101. aip_agents/examples/hello_world_a2a_langflow_client.pyi +9 -0
  102. aip_agents/examples/hello_world_a2a_langflow_server.py +82 -0
  103. aip_agents/examples/hello_world_a2a_langflow_server.pyi +14 -0
  104. aip_agents/examples/hello_world_a2a_langgraph_artifact_client.py +73 -0
  105. aip_agents/examples/hello_world_a2a_langgraph_artifact_client.pyi +5 -0
  106. aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.py +76 -0
  107. aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.pyi +5 -0
  108. aip_agents/examples/hello_world_a2a_langgraph_artifact_server.py +92 -0
  109. aip_agents/examples/hello_world_a2a_langgraph_artifact_server.pyi +16 -0
  110. aip_agents/examples/hello_world_a2a_langgraph_client.py +54 -0
  111. aip_agents/examples/hello_world_a2a_langgraph_client.pyi +9 -0
  112. aip_agents/examples/hello_world_a2a_langgraph_client_agent.py +54 -0
  113. aip_agents/examples/hello_world_a2a_langgraph_client_agent.pyi +9 -0
  114. aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.py +32 -0
  115. aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.pyi +2 -0
  116. aip_agents/examples/hello_world_a2a_langgraph_client_streaming.py +50 -0
  117. aip_agents/examples/hello_world_a2a_langgraph_client_streaming.pyi +9 -0
  118. aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.py +44 -0
  119. aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.pyi +5 -0
  120. aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.py +92 -0
  121. aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.pyi +5 -0
  122. aip_agents/examples/hello_world_a2a_langgraph_server.py +84 -0
  123. aip_agents/examples/hello_world_a2a_langgraph_server.pyi +14 -0
  124. aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.py +79 -0
  125. aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.pyi +15 -0
  126. aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.py +132 -0
  127. aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.pyi +15 -0
  128. aip_agents/examples/hello_world_a2a_mcp_langgraph.py +196 -0
  129. aip_agents/examples/hello_world_a2a_mcp_langgraph.pyi +48 -0
  130. aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.py +244 -0
  131. aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.pyi +48 -0
  132. aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.py +251 -0
  133. aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.pyi +45 -0
  134. aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.py +57 -0
  135. aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.pyi +5 -0
  136. aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.py +80 -0
  137. aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.pyi +15 -0
  138. aip_agents/examples/hello_world_google_adk.py +41 -0
  139. aip_agents/examples/hello_world_google_adk.pyi +5 -0
  140. aip_agents/examples/hello_world_google_adk_mcp_http.py +34 -0
  141. aip_agents/examples/hello_world_google_adk_mcp_http.pyi +5 -0
  142. aip_agents/examples/hello_world_google_adk_mcp_http_stream.py +40 -0
  143. aip_agents/examples/hello_world_google_adk_mcp_http_stream.pyi +5 -0
  144. aip_agents/examples/hello_world_google_adk_mcp_sse.py +44 -0
  145. aip_agents/examples/hello_world_google_adk_mcp_sse.pyi +5 -0
  146. aip_agents/examples/hello_world_google_adk_mcp_sse_stream.py +48 -0
  147. aip_agents/examples/hello_world_google_adk_mcp_sse_stream.pyi +5 -0
  148. aip_agents/examples/hello_world_google_adk_mcp_stdio.py +44 -0
  149. aip_agents/examples/hello_world_google_adk_mcp_stdio.pyi +5 -0
  150. aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.py +48 -0
  151. aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.pyi +5 -0
  152. aip_agents/examples/hello_world_google_adk_stream.py +44 -0
  153. aip_agents/examples/hello_world_google_adk_stream.pyi +5 -0
  154. aip_agents/examples/hello_world_langchain.py +28 -0
  155. aip_agents/examples/hello_world_langchain.pyi +5 -0
  156. aip_agents/examples/hello_world_langchain_lm_invoker.py +15 -0
  157. aip_agents/examples/hello_world_langchain_lm_invoker.pyi +2 -0
  158. aip_agents/examples/hello_world_langchain_mcp_http.py +34 -0
  159. aip_agents/examples/hello_world_langchain_mcp_http.pyi +5 -0
  160. aip_agents/examples/hello_world_langchain_mcp_http_interactive.py +130 -0
  161. aip_agents/examples/hello_world_langchain_mcp_http_interactive.pyi +16 -0
  162. aip_agents/examples/hello_world_langchain_mcp_http_stream.py +42 -0
  163. aip_agents/examples/hello_world_langchain_mcp_http_stream.pyi +5 -0
  164. aip_agents/examples/hello_world_langchain_mcp_multi_server.py +155 -0
  165. aip_agents/examples/hello_world_langchain_mcp_multi_server.pyi +18 -0
  166. aip_agents/examples/hello_world_langchain_mcp_sse.py +34 -0
  167. aip_agents/examples/hello_world_langchain_mcp_sse.pyi +5 -0
  168. aip_agents/examples/hello_world_langchain_mcp_sse_stream.py +40 -0
  169. aip_agents/examples/hello_world_langchain_mcp_sse_stream.pyi +5 -0
  170. aip_agents/examples/hello_world_langchain_mcp_stdio.py +30 -0
  171. aip_agents/examples/hello_world_langchain_mcp_stdio.pyi +5 -0
  172. aip_agents/examples/hello_world_langchain_mcp_stdio_stream.py +41 -0
  173. aip_agents/examples/hello_world_langchain_mcp_stdio_stream.pyi +5 -0
  174. aip_agents/examples/hello_world_langchain_stream.py +36 -0
  175. aip_agents/examples/hello_world_langchain_stream.pyi +5 -0
  176. aip_agents/examples/hello_world_langchain_stream_lm_invoker.py +39 -0
  177. aip_agents/examples/hello_world_langchain_stream_lm_invoker.pyi +5 -0
  178. aip_agents/examples/hello_world_langflow_agent.py +163 -0
  179. aip_agents/examples/hello_world_langflow_agent.pyi +35 -0
  180. aip_agents/examples/hello_world_langgraph.py +39 -0
  181. aip_agents/examples/hello_world_langgraph.pyi +5 -0
  182. aip_agents/examples/hello_world_langgraph_gl_connector_twitter.py +44 -0
  183. aip_agents/examples/hello_world_langgraph_gl_connector_twitter.pyi +5 -0
  184. aip_agents/examples/hello_world_langgraph_mcp_http.py +31 -0
  185. aip_agents/examples/hello_world_langgraph_mcp_http.pyi +5 -0
  186. aip_agents/examples/hello_world_langgraph_mcp_http_stream.py +34 -0
  187. aip_agents/examples/hello_world_langgraph_mcp_http_stream.pyi +5 -0
  188. aip_agents/examples/hello_world_langgraph_mcp_sse.py +35 -0
  189. aip_agents/examples/hello_world_langgraph_mcp_sse.pyi +5 -0
  190. aip_agents/examples/hello_world_langgraph_mcp_sse_stream.py +50 -0
  191. aip_agents/examples/hello_world_langgraph_mcp_sse_stream.pyi +5 -0
  192. aip_agents/examples/hello_world_langgraph_mcp_stdio.py +35 -0
  193. aip_agents/examples/hello_world_langgraph_mcp_stdio.pyi +5 -0
  194. aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.py +50 -0
  195. aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.pyi +5 -0
  196. aip_agents/examples/hello_world_langgraph_stream.py +43 -0
  197. aip_agents/examples/hello_world_langgraph_stream.pyi +5 -0
  198. aip_agents/examples/hello_world_langgraph_stream_lm_invoker.py +37 -0
  199. aip_agents/examples/hello_world_langgraph_stream_lm_invoker.pyi +5 -0
  200. aip_agents/examples/hello_world_model_switch_cli.py +210 -0
  201. aip_agents/examples/hello_world_model_switch_cli.pyi +30 -0
  202. aip_agents/examples/hello_world_multi_agent_adk.py +75 -0
  203. aip_agents/examples/hello_world_multi_agent_adk.pyi +6 -0
  204. aip_agents/examples/hello_world_multi_agent_langchain.py +54 -0
  205. aip_agents/examples/hello_world_multi_agent_langchain.pyi +5 -0
  206. aip_agents/examples/hello_world_multi_agent_langgraph.py +66 -0
  207. aip_agents/examples/hello_world_multi_agent_langgraph.pyi +5 -0
  208. aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.py +69 -0
  209. aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.pyi +5 -0
  210. aip_agents/examples/hello_world_pii_logger.py +21 -0
  211. aip_agents/examples/hello_world_pii_logger.pyi +5 -0
  212. aip_agents/examples/hello_world_sentry.py +133 -0
  213. aip_agents/examples/hello_world_sentry.pyi +21 -0
  214. aip_agents/examples/hello_world_step_limits.py +273 -0
  215. aip_agents/examples/hello_world_step_limits.pyi +17 -0
  216. aip_agents/examples/hello_world_stock_a2a_server.py +103 -0
  217. aip_agents/examples/hello_world_stock_a2a_server.pyi +17 -0
  218. aip_agents/examples/hello_world_tool_output_client.py +46 -0
  219. aip_agents/examples/hello_world_tool_output_client.pyi +5 -0
  220. aip_agents/examples/hello_world_tool_output_server.py +114 -0
  221. aip_agents/examples/hello_world_tool_output_server.pyi +19 -0
  222. aip_agents/examples/hitl_demo.py +724 -0
  223. aip_agents/examples/hitl_demo.pyi +67 -0
  224. aip_agents/examples/mcp_configs/configs.py +63 -0
  225. aip_agents/examples/mcp_servers/common.py +76 -0
  226. aip_agents/examples/mcp_servers/mcp_name.py +29 -0
  227. aip_agents/examples/mcp_servers/mcp_server_http.py +19 -0
  228. aip_agents/examples/mcp_servers/mcp_server_sse.py +19 -0
  229. aip_agents/examples/mcp_servers/mcp_server_stdio.py +19 -0
  230. aip_agents/examples/mcp_servers/mcp_time.py +10 -0
  231. aip_agents/examples/pii_demo_langgraph_client.py +69 -0
  232. aip_agents/examples/pii_demo_langgraph_client.pyi +5 -0
  233. aip_agents/examples/pii_demo_langgraph_server.py +126 -0
  234. aip_agents/examples/pii_demo_langgraph_server.pyi +20 -0
  235. aip_agents/examples/pii_demo_multi_agent_client.py +80 -0
  236. aip_agents/examples/pii_demo_multi_agent_client.pyi +5 -0
  237. aip_agents/examples/pii_demo_multi_agent_server.py +247 -0
  238. aip_agents/examples/pii_demo_multi_agent_server.pyi +40 -0
  239. aip_agents/examples/todolist_planning_a2a_langchain_client.py +70 -0
  240. aip_agents/examples/todolist_planning_a2a_langchain_client.pyi +5 -0
  241. aip_agents/examples/todolist_planning_a2a_langgraph_server.py +88 -0
  242. aip_agents/examples/todolist_planning_a2a_langgraph_server.pyi +19 -0
  243. aip_agents/examples/tools/__init__.py +27 -0
  244. aip_agents/examples/tools/__init__.pyi +9 -0
  245. aip_agents/examples/tools/adk_arithmetic_tools.py +36 -0
  246. aip_agents/examples/tools/adk_arithmetic_tools.pyi +24 -0
  247. aip_agents/examples/tools/adk_weather_tool.py +60 -0
  248. aip_agents/examples/tools/adk_weather_tool.pyi +18 -0
  249. aip_agents/examples/tools/data_generator_tool.py +103 -0
  250. aip_agents/examples/tools/data_generator_tool.pyi +15 -0
  251. aip_agents/examples/tools/data_visualization_tool.py +312 -0
  252. aip_agents/examples/tools/data_visualization_tool.pyi +19 -0
  253. aip_agents/examples/tools/image_artifact_tool.py +136 -0
  254. aip_agents/examples/tools/image_artifact_tool.pyi +26 -0
  255. aip_agents/examples/tools/langchain_arithmetic_tools.py +26 -0
  256. aip_agents/examples/tools/langchain_arithmetic_tools.pyi +17 -0
  257. aip_agents/examples/tools/langchain_currency_exchange_tool.py +88 -0
  258. aip_agents/examples/tools/langchain_currency_exchange_tool.pyi +20 -0
  259. aip_agents/examples/tools/langchain_graph_artifact_tool.py +172 -0
  260. aip_agents/examples/tools/langchain_graph_artifact_tool.pyi +25 -0
  261. aip_agents/examples/tools/langchain_weather_tool.py +48 -0
  262. aip_agents/examples/tools/langchain_weather_tool.pyi +19 -0
  263. aip_agents/examples/tools/langgraph_streaming_tool.py +130 -0
  264. aip_agents/examples/tools/langgraph_streaming_tool.pyi +43 -0
  265. aip_agents/examples/tools/mock_retrieval_tool.py +56 -0
  266. aip_agents/examples/tools/mock_retrieval_tool.pyi +13 -0
  267. aip_agents/examples/tools/pii_demo_tools.py +189 -0
  268. aip_agents/examples/tools/pii_demo_tools.pyi +54 -0
  269. aip_agents/examples/tools/random_chart_tool.py +142 -0
  270. aip_agents/examples/tools/random_chart_tool.pyi +20 -0
  271. aip_agents/examples/tools/serper_tool.py +202 -0
  272. aip_agents/examples/tools/serper_tool.pyi +16 -0
  273. aip_agents/examples/tools/stock_tools.py +82 -0
  274. aip_agents/examples/tools/stock_tools.pyi +36 -0
  275. aip_agents/examples/tools/table_generator_tool.py +167 -0
  276. aip_agents/examples/tools/table_generator_tool.pyi +22 -0
  277. aip_agents/examples/tools/time_tool.py +82 -0
  278. aip_agents/examples/tools/time_tool.pyi +15 -0
  279. aip_agents/examples/tools/weather_forecast_tool.py +38 -0
  280. aip_agents/examples/tools/weather_forecast_tool.pyi +14 -0
  281. aip_agents/executor/agent_executor.py +473 -0
  282. aip_agents/executor/base.py +48 -0
  283. aip_agents/guardrails/__init__.py +83 -0
  284. aip_agents/guardrails/__init__.pyi +6 -0
  285. aip_agents/guardrails/engines/__init__.py +69 -0
  286. aip_agents/guardrails/engines/__init__.pyi +4 -0
  287. aip_agents/guardrails/engines/base.py +90 -0
  288. aip_agents/guardrails/engines/base.pyi +61 -0
  289. aip_agents/guardrails/engines/nemo.py +101 -0
  290. aip_agents/guardrails/engines/nemo.pyi +46 -0
  291. aip_agents/guardrails/engines/phrase_matcher.py +113 -0
  292. aip_agents/guardrails/engines/phrase_matcher.pyi +48 -0
  293. aip_agents/guardrails/exceptions.py +39 -0
  294. aip_agents/guardrails/exceptions.pyi +23 -0
  295. aip_agents/guardrails/manager.py +163 -0
  296. aip_agents/guardrails/manager.pyi +42 -0
  297. aip_agents/guardrails/middleware.py +199 -0
  298. aip_agents/guardrails/middleware.pyi +87 -0
  299. aip_agents/guardrails/schemas.py +63 -0
  300. aip_agents/guardrails/schemas.pyi +43 -0
  301. aip_agents/guardrails/utils.py +45 -0
  302. aip_agents/guardrails/utils.pyi +19 -0
  303. aip_agents/mcp/__init__.py +1 -0
  304. aip_agents/mcp/__init__.pyi +0 -0
  305. aip_agents/mcp/client/__init__.py +14 -0
  306. aip_agents/mcp/client/__init__.pyi +5 -0
  307. aip_agents/mcp/client/base_mcp_client.py +369 -0
  308. aip_agents/mcp/client/base_mcp_client.pyi +148 -0
  309. aip_agents/mcp/client/connection_manager.py +193 -0
  310. aip_agents/mcp/client/connection_manager.pyi +48 -0
  311. aip_agents/mcp/client/google_adk/__init__.py +11 -0
  312. aip_agents/mcp/client/google_adk/__init__.pyi +3 -0
  313. aip_agents/mcp/client/google_adk/client.py +381 -0
  314. aip_agents/mcp/client/google_adk/client.pyi +75 -0
  315. aip_agents/mcp/client/langchain/__init__.py +11 -0
  316. aip_agents/mcp/client/langchain/__init__.pyi +3 -0
  317. aip_agents/mcp/client/langchain/client.py +265 -0
  318. aip_agents/mcp/client/langchain/client.pyi +48 -0
  319. aip_agents/mcp/client/persistent_session.py +362 -0
  320. aip_agents/mcp/client/persistent_session.pyi +113 -0
  321. aip_agents/mcp/client/session_pool.py +351 -0
  322. aip_agents/mcp/client/session_pool.pyi +101 -0
  323. aip_agents/mcp/client/transports.py +228 -0
  324. aip_agents/mcp/client/transports.pyi +123 -0
  325. aip_agents/mcp/utils/__init__.py +7 -0
  326. aip_agents/mcp/utils/__init__.pyi +0 -0
  327. aip_agents/mcp/utils/config_validator.py +139 -0
  328. aip_agents/mcp/utils/config_validator.pyi +82 -0
  329. aip_agents/memory/__init__.py +14 -0
  330. aip_agents/memory/__init__.pyi +5 -0
  331. aip_agents/memory/adapters/__init__.py +10 -0
  332. aip_agents/memory/adapters/__init__.pyi +4 -0
  333. aip_agents/memory/adapters/base_adapter.py +717 -0
  334. aip_agents/memory/adapters/base_adapter.pyi +150 -0
  335. aip_agents/memory/adapters/mem0.py +84 -0
  336. aip_agents/memory/adapters/mem0.pyi +22 -0
  337. aip_agents/memory/base.py +84 -0
  338. aip_agents/memory/base.pyi +60 -0
  339. aip_agents/memory/constants.py +49 -0
  340. aip_agents/memory/constants.pyi +25 -0
  341. aip_agents/memory/factory.py +86 -0
  342. aip_agents/memory/factory.pyi +24 -0
  343. aip_agents/memory/guidance.py +20 -0
  344. aip_agents/memory/guidance.pyi +3 -0
  345. aip_agents/memory/simple_memory.py +47 -0
  346. aip_agents/memory/simple_memory.pyi +23 -0
  347. aip_agents/middleware/__init__.py +17 -0
  348. aip_agents/middleware/__init__.pyi +5 -0
  349. aip_agents/middleware/base.py +96 -0
  350. aip_agents/middleware/base.pyi +75 -0
  351. aip_agents/middleware/manager.py +150 -0
  352. aip_agents/middleware/manager.pyi +84 -0
  353. aip_agents/middleware/todolist.py +274 -0
  354. aip_agents/middleware/todolist.pyi +125 -0
  355. aip_agents/schema/__init__.py +69 -0
  356. aip_agents/schema/__init__.pyi +9 -0
  357. aip_agents/schema/a2a.py +56 -0
  358. aip_agents/schema/a2a.pyi +40 -0
  359. aip_agents/schema/agent.py +111 -0
  360. aip_agents/schema/agent.pyi +65 -0
  361. aip_agents/schema/hitl.py +157 -0
  362. aip_agents/schema/hitl.pyi +89 -0
  363. aip_agents/schema/langgraph.py +37 -0
  364. aip_agents/schema/langgraph.pyi +28 -0
  365. aip_agents/schema/model_id.py +97 -0
  366. aip_agents/schema/model_id.pyi +54 -0
  367. aip_agents/schema/step_limit.py +108 -0
  368. aip_agents/schema/step_limit.pyi +63 -0
  369. aip_agents/schema/storage.py +40 -0
  370. aip_agents/schema/storage.pyi +21 -0
  371. aip_agents/sentry/__init__.py +11 -0
  372. aip_agents/sentry/__init__.pyi +3 -0
  373. aip_agents/sentry/sentry.py +151 -0
  374. aip_agents/sentry/sentry.pyi +48 -0
  375. aip_agents/storage/__init__.py +41 -0
  376. aip_agents/storage/__init__.pyi +8 -0
  377. aip_agents/storage/base.py +85 -0
  378. aip_agents/storage/base.pyi +58 -0
  379. aip_agents/storage/clients/__init__.py +12 -0
  380. aip_agents/storage/clients/__init__.pyi +3 -0
  381. aip_agents/storage/clients/minio_client.py +318 -0
  382. aip_agents/storage/clients/minio_client.pyi +137 -0
  383. aip_agents/storage/config.py +62 -0
  384. aip_agents/storage/config.pyi +29 -0
  385. aip_agents/storage/providers/__init__.py +15 -0
  386. aip_agents/storage/providers/__init__.pyi +5 -0
  387. aip_agents/storage/providers/base.py +106 -0
  388. aip_agents/storage/providers/base.pyi +88 -0
  389. aip_agents/storage/providers/memory.py +114 -0
  390. aip_agents/storage/providers/memory.pyi +79 -0
  391. aip_agents/storage/providers/object_storage.py +214 -0
  392. aip_agents/storage/providers/object_storage.pyi +98 -0
  393. aip_agents/tools/__init__.py +53 -0
  394. aip_agents/tools/__init__.pyi +9 -0
  395. aip_agents/tools/browser_use/__init__.py +82 -0
  396. aip_agents/tools/browser_use/__init__.pyi +14 -0
  397. aip_agents/tools/browser_use/action_parser.py +103 -0
  398. aip_agents/tools/browser_use/action_parser.pyi +18 -0
  399. aip_agents/tools/browser_use/browser_use_tool.py +1112 -0
  400. aip_agents/tools/browser_use/browser_use_tool.pyi +50 -0
  401. aip_agents/tools/browser_use/llm_config.py +120 -0
  402. aip_agents/tools/browser_use/llm_config.pyi +52 -0
  403. aip_agents/tools/browser_use/minio_storage.py +198 -0
  404. aip_agents/tools/browser_use/minio_storage.pyi +109 -0
  405. aip_agents/tools/browser_use/schemas.py +119 -0
  406. aip_agents/tools/browser_use/schemas.pyi +32 -0
  407. aip_agents/tools/browser_use/session.py +76 -0
  408. aip_agents/tools/browser_use/session.pyi +4 -0
  409. aip_agents/tools/browser_use/session_errors.py +132 -0
  410. aip_agents/tools/browser_use/session_errors.pyi +53 -0
  411. aip_agents/tools/browser_use/steel_session_recording.py +317 -0
  412. aip_agents/tools/browser_use/steel_session_recording.pyi +63 -0
  413. aip_agents/tools/browser_use/streaming.py +813 -0
  414. aip_agents/tools/browser_use/streaming.pyi +81 -0
  415. aip_agents/tools/browser_use/structured_data_parser.py +257 -0
  416. aip_agents/tools/browser_use/structured_data_parser.pyi +86 -0
  417. aip_agents/tools/browser_use/structured_data_recovery.py +204 -0
  418. aip_agents/tools/browser_use/structured_data_recovery.pyi +43 -0
  419. aip_agents/tools/browser_use/types.py +78 -0
  420. aip_agents/tools/browser_use/types.pyi +45 -0
  421. aip_agents/tools/code_sandbox/__init__.py +26 -0
  422. aip_agents/tools/code_sandbox/__init__.pyi +3 -0
  423. aip_agents/tools/code_sandbox/constant.py +13 -0
  424. aip_agents/tools/code_sandbox/constant.pyi +4 -0
  425. aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.py +306 -0
  426. aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.pyi +102 -0
  427. aip_agents/tools/code_sandbox/e2b_sandbox_tool.py +411 -0
  428. aip_agents/tools/code_sandbox/e2b_sandbox_tool.pyi +29 -0
  429. aip_agents/tools/constants.py +177 -0
  430. aip_agents/tools/constants.pyi +138 -0
  431. aip_agents/tools/document_loader/__init__.py +44 -0
  432. aip_agents/tools/document_loader/__init__.pyi +7 -0
  433. aip_agents/tools/document_loader/base_reader.py +302 -0
  434. aip_agents/tools/document_loader/base_reader.pyi +75 -0
  435. aip_agents/tools/document_loader/docx_reader_tool.py +68 -0
  436. aip_agents/tools/document_loader/docx_reader_tool.pyi +10 -0
  437. aip_agents/tools/document_loader/excel_reader_tool.py +171 -0
  438. aip_agents/tools/document_loader/excel_reader_tool.pyi +26 -0
  439. aip_agents/tools/document_loader/pdf_reader_tool.py +79 -0
  440. aip_agents/tools/document_loader/pdf_reader_tool.pyi +11 -0
  441. aip_agents/tools/document_loader/pdf_splitter.py +169 -0
  442. aip_agents/tools/document_loader/pdf_splitter.pyi +18 -0
  443. aip_agents/tools/gl_connector/__init__.py +5 -0
  444. aip_agents/tools/gl_connector/__init__.pyi +3 -0
  445. aip_agents/tools/gl_connector/tool.py +383 -0
  446. aip_agents/tools/gl_connector/tool.pyi +74 -0
  447. aip_agents/tools/gl_connector_tools.py +119 -0
  448. aip_agents/tools/gl_connector_tools.pyi +39 -0
  449. aip_agents/tools/memory_search/__init__.py +22 -0
  450. aip_agents/tools/memory_search/__init__.pyi +5 -0
  451. aip_agents/tools/memory_search/base.py +200 -0
  452. aip_agents/tools/memory_search/base.pyi +69 -0
  453. aip_agents/tools/memory_search/mem0.py +258 -0
  454. aip_agents/tools/memory_search/mem0.pyi +19 -0
  455. aip_agents/tools/memory_search/schema.py +48 -0
  456. aip_agents/tools/memory_search/schema.pyi +15 -0
  457. aip_agents/tools/memory_search_tool.py +26 -0
  458. aip_agents/tools/memory_search_tool.pyi +3 -0
  459. aip_agents/tools/time_tool.py +117 -0
  460. aip_agents/tools/time_tool.pyi +16 -0
  461. aip_agents/tools/tool_config_injector.py +300 -0
  462. aip_agents/tools/tool_config_injector.pyi +26 -0
  463. aip_agents/tools/web_search/__init__.py +15 -0
  464. aip_agents/tools/web_search/__init__.pyi +3 -0
  465. aip_agents/tools/web_search/serper_tool.py +187 -0
  466. aip_agents/tools/web_search/serper_tool.pyi +19 -0
  467. aip_agents/types/__init__.py +70 -0
  468. aip_agents/types/__init__.pyi +36 -0
  469. aip_agents/types/a2a_events.py +13 -0
  470. aip_agents/types/a2a_events.pyi +3 -0
  471. aip_agents/utils/__init__.py +79 -0
  472. aip_agents/utils/__init__.pyi +11 -0
  473. aip_agents/utils/a2a_connector.py +1757 -0
  474. aip_agents/utils/a2a_connector.pyi +146 -0
  475. aip_agents/utils/artifact_helpers.py +502 -0
  476. aip_agents/utils/artifact_helpers.pyi +203 -0
  477. aip_agents/utils/constants.py +22 -0
  478. aip_agents/utils/constants.pyi +10 -0
  479. aip_agents/utils/datetime/__init__.py +34 -0
  480. aip_agents/utils/datetime/__init__.pyi +4 -0
  481. aip_agents/utils/datetime/normalization.py +231 -0
  482. aip_agents/utils/datetime/normalization.pyi +95 -0
  483. aip_agents/utils/datetime/timezone.py +206 -0
  484. aip_agents/utils/datetime/timezone.pyi +48 -0
  485. aip_agents/utils/env_loader.py +27 -0
  486. aip_agents/utils/env_loader.pyi +10 -0
  487. aip_agents/utils/event_handler_registry.py +58 -0
  488. aip_agents/utils/event_handler_registry.pyi +23 -0
  489. aip_agents/utils/file_prompt_utils.py +176 -0
  490. aip_agents/utils/file_prompt_utils.pyi +21 -0
  491. aip_agents/utils/final_response_builder.py +211 -0
  492. aip_agents/utils/final_response_builder.pyi +34 -0
  493. aip_agents/utils/formatter_llm_client.py +231 -0
  494. aip_agents/utils/formatter_llm_client.pyi +71 -0
  495. aip_agents/utils/langgraph/__init__.py +19 -0
  496. aip_agents/utils/langgraph/__init__.pyi +3 -0
  497. aip_agents/utils/langgraph/converter.py +128 -0
  498. aip_agents/utils/langgraph/converter.pyi +49 -0
  499. aip_agents/utils/langgraph/tool_managers/__init__.py +15 -0
  500. aip_agents/utils/langgraph/tool_managers/__init__.pyi +5 -0
  501. aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.py +99 -0
  502. aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.pyi +35 -0
  503. aip_agents/utils/langgraph/tool_managers/base_tool_manager.py +66 -0
  504. aip_agents/utils/langgraph/tool_managers/base_tool_manager.pyi +48 -0
  505. aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +1071 -0
  506. aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.pyi +56 -0
  507. aip_agents/utils/langgraph/tool_output_management.py +967 -0
  508. aip_agents/utils/langgraph/tool_output_management.pyi +292 -0
  509. aip_agents/utils/logger.py +195 -0
  510. aip_agents/utils/logger.pyi +60 -0
  511. aip_agents/utils/metadata/__init__.py +27 -0
  512. aip_agents/utils/metadata/__init__.pyi +5 -0
  513. aip_agents/utils/metadata/activity_metadata_helper.py +407 -0
  514. aip_agents/utils/metadata/activity_metadata_helper.pyi +25 -0
  515. aip_agents/utils/metadata/activity_narrative/__init__.py +35 -0
  516. aip_agents/utils/metadata/activity_narrative/__init__.pyi +7 -0
  517. aip_agents/utils/metadata/activity_narrative/builder.py +817 -0
  518. aip_agents/utils/metadata/activity_narrative/builder.pyi +35 -0
  519. aip_agents/utils/metadata/activity_narrative/constants.py +51 -0
  520. aip_agents/utils/metadata/activity_narrative/constants.pyi +10 -0
  521. aip_agents/utils/metadata/activity_narrative/context.py +49 -0
  522. aip_agents/utils/metadata/activity_narrative/context.pyi +32 -0
  523. aip_agents/utils/metadata/activity_narrative/formatters.py +230 -0
  524. aip_agents/utils/metadata/activity_narrative/formatters.pyi +48 -0
  525. aip_agents/utils/metadata/activity_narrative/utils.py +35 -0
  526. aip_agents/utils/metadata/activity_narrative/utils.pyi +12 -0
  527. aip_agents/utils/metadata/schemas/__init__.py +16 -0
  528. aip_agents/utils/metadata/schemas/__init__.pyi +4 -0
  529. aip_agents/utils/metadata/schemas/activity_schema.py +29 -0
  530. aip_agents/utils/metadata/schemas/activity_schema.pyi +18 -0
  531. aip_agents/utils/metadata/schemas/thinking_schema.py +31 -0
  532. aip_agents/utils/metadata/schemas/thinking_schema.pyi +20 -0
  533. aip_agents/utils/metadata/thinking_metadata_helper.py +38 -0
  534. aip_agents/utils/metadata/thinking_metadata_helper.pyi +4 -0
  535. aip_agents/utils/metadata_helper.py +358 -0
  536. aip_agents/utils/metadata_helper.pyi +117 -0
  537. aip_agents/utils/name_preprocessor/__init__.py +17 -0
  538. aip_agents/utils/name_preprocessor/__init__.pyi +6 -0
  539. aip_agents/utils/name_preprocessor/base_name_preprocessor.py +73 -0
  540. aip_agents/utils/name_preprocessor/base_name_preprocessor.pyi +52 -0
  541. aip_agents/utils/name_preprocessor/google_name_preprocessor.py +100 -0
  542. aip_agents/utils/name_preprocessor/google_name_preprocessor.pyi +38 -0
  543. aip_agents/utils/name_preprocessor/name_preprocessor.py +87 -0
  544. aip_agents/utils/name_preprocessor/name_preprocessor.pyi +41 -0
  545. aip_agents/utils/name_preprocessor/openai_name_preprocessor.py +48 -0
  546. aip_agents/utils/name_preprocessor/openai_name_preprocessor.pyi +34 -0
  547. aip_agents/utils/pii/__init__.py +25 -0
  548. aip_agents/utils/pii/__init__.pyi +5 -0
  549. aip_agents/utils/pii/pii_handler.py +397 -0
  550. aip_agents/utils/pii/pii_handler.pyi +96 -0
  551. aip_agents/utils/pii/pii_helper.py +207 -0
  552. aip_agents/utils/pii/pii_helper.pyi +78 -0
  553. aip_agents/utils/pii/uuid_deanonymizer_mapping.py +195 -0
  554. aip_agents/utils/pii/uuid_deanonymizer_mapping.pyi +73 -0
  555. aip_agents/utils/reference_helper.py +273 -0
  556. aip_agents/utils/reference_helper.pyi +81 -0
  557. aip_agents/utils/sse_chunk_transformer.py +831 -0
  558. aip_agents/utils/sse_chunk_transformer.pyi +166 -0
  559. aip_agents/utils/step_limit_manager.py +265 -0
  560. aip_agents/utils/step_limit_manager.pyi +112 -0
  561. aip_agents/utils/token_usage_helper.py +156 -0
  562. aip_agents/utils/token_usage_helper.pyi +60 -0
  563. aip_agents_binary-0.5.25b1.dist-info/METADATA +681 -0
  564. aip_agents_binary-0.5.25b1.dist-info/RECORD +566 -0
  565. aip_agents_binary-0.5.25b1.dist-info/WHEEL +5 -0
  566. aip_agents_binary-0.5.25b1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2596 @@
1
+ """LangGraph ReAct Agent implementation.
2
+
3
+ A ReAct agent template built on LangGraph that can use either lm_invoker or LangChain BaseChatModel.
4
+
5
+ Authors:
6
+ Christian Trisno Sen Long Chen (christian.t.s.l.chen@gdplabs.id)
7
+ Fachriza Adhiatma (fachriza.d.adhiatma@gdplabs.id)
8
+ Raymond Christopher (raymond.christopher@gdplabs.id)
9
+ Reinhart Linanda (reinhart.linanda@gdplabs.id)
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import asyncio
15
+ import time
16
+ import uuid
17
+ from collections.abc import Awaitable, Callable, Sequence
18
+ from dataclasses import asdict, dataclass
19
+ from functools import reduce
20
+ from textwrap import dedent
21
+ from typing import TYPE_CHECKING, Annotated, Any
22
+
23
+ from deprecated import deprecated
24
+
25
+ if TYPE_CHECKING:
26
+ from aip_agents.guardrails.manager import GuardrailManager
27
+ from gllm_core.event import EventEmitter
28
+ from gllm_core.schema import Chunk
29
+ from langchain_core.language_models import BaseChatModel
30
+ from langchain_core.messages import (
31
+ AIMessage,
32
+ BaseMessage,
33
+ HumanMessage,
34
+ SystemMessage,
35
+ ToolMessage,
36
+ )
37
+ from langchain_core.messages.ai import UsageMetadata
38
+ from langchain_core.tools import BaseTool
39
+ from langgraph.config import get_stream_writer
40
+ from langgraph.graph import END, StateGraph
41
+ from langgraph.graph.message import add_messages
42
+ from langgraph.graph.state import CompiledStateGraph
43
+ from langgraph.managed import IsLastStep, RemainingSteps
44
+ from langgraph.types import Command, StreamWriter
45
+ from typing_extensions import TypedDict
46
+
47
+ from aip_agents.agent.base_langgraph_agent import _THREAD_ID_CVAR, BaseLangGraphAgent
48
+ from aip_agents.agent.hitl.langgraph_hitl_mixin import LangGraphHitLMixin
49
+ from aip_agents.agent.hitl.manager import TOOL_EXECUTION_BLOCKING_DECISIONS
50
+ from aip_agents.middleware.base import AgentMiddleware, ModelRequest
51
+ from aip_agents.middleware.manager import MiddlewareManager
52
+ from aip_agents.middleware.todolist import TodoList, TodoListMiddleware
53
+ from aip_agents.schema.a2a import A2AStreamEventType
54
+ from aip_agents.schema.hitl import ApprovalDecision, HitlMetadata
55
+ from aip_agents.schema.langgraph import ToolCallResult, ToolStorageParams
56
+ from aip_agents.schema.step_limit import MaxStepsExceededError, StepLimitConfig
57
+ from aip_agents.tools.memory_search_tool import MEMORY_SEARCH_TOOL_NAME
58
+ from aip_agents.tools.tool_config_injector import TOOL_CONFIGS_KEY
59
+ from aip_agents.utils import add_references_chunks
60
+ from aip_agents.utils.langgraph import (
61
+ convert_langchain_messages_to_gllm_messages,
62
+ convert_lm_output_to_langchain_message,
63
+ )
64
+ from aip_agents.utils.langgraph.tool_output_management import (
65
+ StoreOutputParams,
66
+ ToolOutputManager,
67
+ ToolReferenceError,
68
+ ToolReferenceResolver,
69
+ )
70
+ from aip_agents.utils.logger import get_logger
71
+ from aip_agents.utils.metadata.activity_metadata_helper import create_tool_activity_info
72
+ from aip_agents.utils.metadata_helper import Kind, MetadataFieldKeys, Status
73
+ from aip_agents.utils.pii import ToolPIIHandler, add_pii_mappings, normalize_enable_pii
74
+ from aip_agents.utils.reference_helper import extract_references_from_tool
75
+ from aip_agents.utils.step_limit_manager import (
76
+ _DELEGATION_CHAIN_CVAR,
77
+ _DELEGATION_DEPTH_CVAR,
78
+ _REMAINING_STEP_BUDGET_CVAR,
79
+ _STEP_LIMIT_CONFIG_CVAR,
80
+ StepLimitManager,
81
+ )
82
+ from aip_agents.utils.token_usage_helper import (
83
+ TOTAL_USAGE_KEY,
84
+ USAGE_METADATA_KEY,
85
+ add_usage_metadata,
86
+ extract_and_update_token_usage_from_ai_message,
87
+ extract_token_usage_from_tool_output,
88
+ )
89
+
90
+ logger = get_logger(__name__)
91
+
92
+ # Default instruction for ReAct agents
93
+ DEFAULT_INSTRUCTION = "You are a helpful assistant. Use the available tools to help answer questions."
94
+
95
+ # Tool method constants
96
+ TOOL_RUN_STREAMING_METHOD = "arun_streaming"
97
+
98
+ # Key Attributes
99
+ TOOL_OUTPUT_MANAGER_KEY = "tool_output_manager"
100
+ CALL_ID_KEY = "call_id"
101
+
102
+
103
+ @dataclass
104
+ class ToolCallContext:
105
+ """Context information for executing a single tool call."""
106
+
107
+ config: dict[str, Any] | None
108
+ state: dict[str, Any]
109
+ pending_artifacts: list[dict[str, Any]]
110
+ hitl_decision: ApprovalDecision | None = None
111
+
112
+
113
+ class ReactAgentState(TypedDict):
114
+ """State schema for the ReAct agent.
115
+
116
+ Includes messages, step tracking, optional event emission support, artifacts, references,
117
+ metadata, tool output management, and deep agents middleware state (todos, filesystem).
118
+ """
119
+
120
+ messages: Annotated[Sequence[BaseMessage], add_messages]
121
+ is_last_step: IsLastStep
122
+ remaining_steps: RemainingSteps
123
+ event_emitter: EventEmitter | None
124
+ artifacts: list[dict[str, Any]] | None
125
+ references: Annotated[list[Chunk], add_references_chunks]
126
+ metadata: dict[str, Any] | None
127
+ tool_output_manager: ToolOutputManager | None
128
+ total_usage: Annotated[UsageMetadata | None, add_usage_metadata]
129
+ pii_mapping: Annotated[dict[str, str] | None, add_pii_mappings]
130
+ thread_id: str
131
+
132
+ # Deep Agents Middleware State
133
+ todos: TodoList | None # Planning middleware - task decomposition state
134
+
135
+ # Step Limit State (Configurable Maximum Steps Feature)
136
+ current_step: int # Current step number (incremented after each LLM call or tool execution)
137
+ delegation_depth: int # Current depth in delegation chain (0 for root)
138
+ delegation_chain: list[str] # Agent names in delegation chain
139
+ step_limit_config: StepLimitConfig | None # Step and delegation limit configuration
140
+
141
+
142
+ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
143
+ """A ReAct agent template built on LangGraph.
144
+
145
+ This agent can use either:
146
+ - An LMInvoker (if self.lm_invoker is set by BaseAgent)
147
+ - A LangChain BaseChatModel (if self.model is set by BaseAgent)
148
+
149
+ The graph structure follows the standard ReAct pattern:
150
+ agent -> tools -> agent (loop) -> END
151
+ """
152
+
153
+ def __init__( # noqa: PLR0913
154
+ self,
155
+ name: str,
156
+ instruction: str = DEFAULT_INSTRUCTION,
157
+ model: BaseChatModel | str | Any | None = None,
158
+ tools: Sequence[BaseTool] | None = None,
159
+ agents: Sequence[Any] | None = None,
160
+ description: str | None = None,
161
+ thread_id_key: str = "thread_id",
162
+ event_emitter: EventEmitter | None = None,
163
+ tool_output_manager: ToolOutputManager | None = None,
164
+ planning: bool = False,
165
+ middlewares: Sequence[AgentMiddleware] | None = None,
166
+ guardrail: GuardrailManager | None = None,
167
+ step_limit_config: StepLimitConfig | None = None,
168
+ **kwargs: Any,
169
+ ):
170
+ """Initialize the LangGraph ReAct Agent.
171
+
172
+ Args:
173
+ name: The name of the agent.
174
+ instruction: The system instruction for the agent.
175
+ model: The model to use (lm_invoker, LangChain model, string, etc.).
176
+ tools: Sequence of LangChain tools available to the agent.
177
+ agents: Optional sequence of sub-agents for delegation (coordinator mode).
178
+ description: Human-readable description of the agent.
179
+ thread_id_key: Key for thread ID in configuration.
180
+ event_emitter: Optional event emitter for streaming updates.
181
+ tool_output_manager: Optional ToolOutputManager instance for tool output management.
182
+ When provided, enables tool output storage, reference resolution, and sharing capabilities.
183
+ This enables multi-agent workflows where agents can access each other's tool outputs.
184
+ If None, tool output management is disabled for this agent.
185
+ planning: Enable planning capabilities with TodoListMiddleware. Defaults to False.
186
+ middlewares: Optional sequence of custom middleware to COMPOSE (not override) with built-in middleware.
187
+ Execution order: [TodoListMiddleware (if planning=True),
188
+ GuardrailMiddleware (if guardrail provided),
189
+ ...custom middlewares in order provided]
190
+ All middleware hooks execute - this extends capabilities, never replaces them.
191
+ guardrail: Optional GuardrailManager for content filtering and safety checks.
192
+ When provided, automatically wraps in GuardrailMiddleware for transparent
193
+ input/output filtering during agent execution.
194
+ enable_pii: Optional toggle to enable PII handling for tool inputs and outputs.
195
+ step_limit_config: Optional configuration for step limits and delegation depth.
196
+ **kwargs: Additional keyword arguments passed to BaseLangGraphAgent.
197
+ """
198
+ # Use LangGraph's standard AgentState for ReAct
199
+ state_schema = kwargs.pop("state_schema", ReactAgentState)
200
+ enable_pii = kwargs.pop("enable_pii", None)
201
+ enable_pii = normalize_enable_pii(enable_pii)
202
+
203
+ super().__init__(
204
+ name=name,
205
+ instruction=instruction,
206
+ description=description,
207
+ model=model,
208
+ tools=tools,
209
+ state_schema=state_schema,
210
+ thread_id_key=thread_id_key,
211
+ event_emitter=event_emitter,
212
+ **kwargs,
213
+ )
214
+
215
+ # Handle tool output management
216
+ self.tool_output_manager = tool_output_manager
217
+ self._pii_handlers_by_thread: dict[str, ToolPIIHandler] = {}
218
+ self._enable_pii = enable_pii
219
+
220
+ # Initialize middleware tools list (populated by _setup_middleware)
221
+ self._middleware_tools: list[BaseTool] = []
222
+
223
+ # Setup middleware
224
+ self._middleware_manager = self._setup_middleware(
225
+ planning=planning,
226
+ guardrail=guardrail,
227
+ custom_middlewares=middlewares,
228
+ )
229
+
230
+ # Handle delegation agents (coordinator mode) - following legacy pattern
231
+ if agents:
232
+ self.register_delegation_agents(list(agents))
233
+
234
+ self.step_limit_config = step_limit_config
235
+
236
+ def _setup_middleware(
237
+ self,
238
+ planning: bool,
239
+ guardrail: GuardrailManager | None,
240
+ custom_middlewares: Sequence[AgentMiddleware] | None,
241
+ ) -> MiddlewareManager | None:
242
+ """Setup middleware based on configuration.
243
+
244
+ Creates auto-configured middleware (planning, guardrails) and composes
245
+ with custom middleware if provided.
246
+
247
+ Args:
248
+ planning: Whether to enable TodoListMiddleware.
249
+ guardrail: Optional GuardrailManager to wrap in GuardrailMiddleware.
250
+ custom_middlewares: Optional custom middlewares to append.
251
+
252
+ Returns:
253
+ MiddlewareManager if any middleware configured, None otherwise.
254
+ """
255
+ middleware_list: list[AgentMiddleware] = []
256
+
257
+ # Auto-configure TodoListMiddleware if planning enabled
258
+ if planning:
259
+ middleware_list.append(TodoListMiddleware())
260
+
261
+ # Auto-configure GuardrailMiddleware if guardrail provided
262
+ if guardrail:
263
+ from aip_agents.guardrails.middleware import GuardrailMiddleware
264
+
265
+ middleware_list.append(GuardrailMiddleware(guardrail))
266
+
267
+ # Append custom middlewares
268
+ if custom_middlewares:
269
+ middleware_list.extend(custom_middlewares)
270
+
271
+ # Return manager if any middleware configured
272
+ if middleware_list:
273
+ manager = MiddlewareManager(middleware_list)
274
+ # Store middleware tools separately for proper rebuild support
275
+ middleware_tools = manager.get_all_tools()
276
+ if middleware_tools:
277
+ self._middleware_tools = list(middleware_tools)
278
+ # Add to resolved_tools for immediate use
279
+ self.resolved_tools = list(self.resolved_tools) + self._middleware_tools
280
+ # Enhance instruction with middleware prompt additions
281
+ self.instruction = manager.build_system_prompt(self.instruction)
282
+ return manager
283
+
284
+ return None
285
+
286
+ async def _get_effective_writer(self, writer: StreamWriter | None = None) -> StreamWriter | None:
287
+ """Get the effective stream writer, falling back to ContextVar if needed.
288
+
289
+ Args:
290
+ writer: Optional stream writer to use.
291
+
292
+ Returns:
293
+ The effective stream writer or None if retrieval fails.
294
+ """
295
+ try:
296
+ return writer or get_stream_writer()
297
+ except Exception:
298
+ return None
299
+
300
+ def _get_step_limit_manager(
301
+ self,
302
+ state: dict[str, Any],
303
+ node_type: str,
304
+ writer: StreamWriter | None = None,
305
+ count: int = 1,
306
+ manager: StepLimitManager | None = None,
307
+ ) -> tuple[dict[str, Any] | None, StepLimitManager | None]:
308
+ """Return initialized StepLimitManager or early state update.
309
+
310
+ Args:
311
+ state: Current LangGraph state dictionary.
312
+ node_type: `"agent"` or `"tool"`; determines the fallback message format when limits are exceeded.
313
+ writer: Optional LangGraph `StreamWriter` used when limit events need to be emitted in the absence of an event emitter.
314
+ count: Number of steps to check.
315
+ manager: Optional existing manager to reuse.
316
+
317
+ Returns:
318
+ Tuple where the first element is a state update dict when execution should stop, and the second element is the active `StepLimitManager` when limits allow the node to proceed.
319
+ """
320
+ limit_error_update, manager = self._check_step_limits_helper(
321
+ state, node_type, writer=writer, count=count, manager=manager
322
+ )
323
+ if limit_error_update:
324
+ return limit_error_update, None
325
+ if manager is None:
326
+ return {}, None
327
+ manager.set_context()
328
+ return None, manager
329
+
330
+ def _emit_step_limit_event(
331
+ self,
332
+ event_type: A2AStreamEventType,
333
+ metadata: dict[str, Any],
334
+ writer: StreamWriter | None = None,
335
+ ) -> None:
336
+ """Emit a step limit event via LangGraph stream writer or EventEmitter.
337
+
338
+ Args:
339
+ event_type: The type of event to emit.
340
+ metadata: Metadata to include in the event.
341
+ writer: Optional LangGraph `StreamWriter` used when limit events need to be emitted in the absence of an event emitter.
342
+ """
343
+ enriched_metadata = dict(metadata)
344
+ enriched_metadata.setdefault("status", "error")
345
+ enriched_metadata.setdefault("kind", "agent_default")
346
+
347
+ event_payload = self._create_a2a_event(
348
+ event_type=event_type,
349
+ content=enriched_metadata.get("message", ""),
350
+ metadata=enriched_metadata,
351
+ )
352
+
353
+ try:
354
+ effective_writer = writer or get_stream_writer()
355
+ except Exception:
356
+ effective_writer = None
357
+
358
+ if effective_writer:
359
+ effective_writer(event_payload)
360
+ return
361
+
362
+ if self.event_emitter:
363
+ self.event_emitter.emit(event_payload["event_type"], event_payload["metadata"])
364
+
365
+ def _check_step_limits_helper(
366
+ self,
367
+ state: dict[str, Any],
368
+ node_type: str,
369
+ writer: StreamWriter | None = None,
370
+ count: int = 1,
371
+ manager: StepLimitManager | None = None,
372
+ ) -> tuple[dict[str, Any] | None, StepLimitManager | None]:
373
+ """Check step limits and return state update if limit exceeded.
374
+
375
+ Centralized logic to avoid duplication between agent_node and tool_node.
376
+
377
+ Args:
378
+ state: Current agent state.
379
+ node_type: Either 'agent' or 'tool' to determine return message types.
380
+ writer: Optional stream writer for emitting custom events if event_emitter is missing.
381
+ count: Number of steps to check.
382
+ manager: Optional existing manager to reuse.
383
+
384
+ Returns:
385
+ Tuple of (state update dict if limit exceeded else None, active StepLimitManager instance).
386
+ """
387
+ try:
388
+ if manager is None:
389
+ manager = StepLimitManager.from_state(state)
390
+ manager.check_step_limit(agent_name=self.name, count=count)
391
+
392
+ return None, manager
393
+
394
+ except MaxStepsExceededError as e:
395
+ logger.warning(f"Agent '{self.name}': {e.error_response.message}")
396
+ metadata = {
397
+ "message": e.error_response.message,
398
+ "agent_name": e.error_response.agent_name,
399
+ "current_value": e.error_response.current_value,
400
+ "configured_limit": e.error_response.configured_limit,
401
+ }
402
+ self._emit_step_limit_event(
403
+ A2AStreamEventType.STEP_LIMIT_EXCEEDED,
404
+ metadata,
405
+ writer,
406
+ )
407
+ if node_type == "tool":
408
+ return (
409
+ {
410
+ "messages": [ToolMessage(content=f"⚠️ {e.error_response.message}", tool_call_id="step_limit")],
411
+ },
412
+ None,
413
+ )
414
+ return (
415
+ {
416
+ "messages": [AIMessage(content=f"⚠️ {e.error_response.message}")],
417
+ },
418
+ None,
419
+ )
420
+
421
+ def _rebuild_resolved_tools(self) -> None:
422
+ """Rebuild resolved tools including middleware tools.
423
+
424
+ Overrides base class to ensure middleware tools are preserved
425
+ when tools are rebuilt (e.g., after update_regular_tools).
426
+ """
427
+ # Call base class to rebuild with regular, a2a, delegation, and mcp tools
428
+ super()._rebuild_resolved_tools()
429
+
430
+ # Add middleware tools if present
431
+ if hasattr(self, "_middleware_tools") and self._middleware_tools:
432
+ self.resolved_tools.extend(self._middleware_tools)
433
+
434
+ def _handle_tool_artifacts(
435
+ self, tool_output: Any, pending_artifacts: list[dict[str, Any]]
436
+ ) -> tuple[str, list[dict[str, Any]]]:
437
+ """Handle artifact extraction from tool output.
438
+
439
+ Args:
440
+ tool_output: The output from the tool execution.
441
+ pending_artifacts: Current list of pending artifacts.
442
+
443
+ Returns:
444
+ Tuple of (agent_result_text, updated_pending_artifacts).
445
+ """
446
+ if isinstance(tool_output, dict) and "artifacts" in tool_output:
447
+ artifacts = tool_output["artifacts"]
448
+ if isinstance(artifacts, list):
449
+ pending_artifacts.extend(artifacts)
450
+ return tool_output.get("result", ""), pending_artifacts
451
+ else:
452
+ return str(tool_output), pending_artifacts
453
+
454
+ # ruff: noqa: PLR0915
455
+ def define_graph(self, graph_builder: StateGraph) -> CompiledStateGraph:
456
+ """Define the ReAct agent graph structure.
457
+
458
+ Args:
459
+ graph_builder: The StateGraph builder to define the graph structure.
460
+
461
+ Returns:
462
+ Compiled LangGraph ready for execution.
463
+ """
464
+ # Create node functions using helper methods
465
+ agent_node = self._create_agent_node()
466
+ tool_node_logic = self._create_tool_node_logic()
467
+ should_continue = self._create_should_continue_logic(END)
468
+
469
+ # Add memory node if memory is enabled
470
+ if self._memory_enabled():
471
+ memory_enhancer_agent = self._create_memory_enhancer_agent()
472
+ graph_builder.add_node("memory_enhancer", self._create_memory_node(memory_enhancer_agent))
473
+ graph_builder.set_entry_point("memory_enhancer")
474
+ graph_builder.add_edge("memory_enhancer", "agent")
475
+ else:
476
+ graph_builder.set_entry_point("agent")
477
+
478
+ graph_builder.add_node("agent", agent_node)
479
+
480
+ if self.resolved_tools:
481
+ graph_builder.add_node("tools", tool_node_logic)
482
+ graph_builder.add_conditional_edges("agent", should_continue, {"tools": "tools", END: END})
483
+ graph_builder.add_edge("tools", "agent")
484
+ else:
485
+ graph_builder.add_edge("agent", END)
486
+
487
+ return graph_builder.compile(
488
+ checkpointer=self.checkpointer,
489
+ )
490
+
491
+ def _create_memory_enhancer_agent(self) -> Any:
492
+ """Create dedicated LangGraphMemoryEnhancerAgent instance for memory enhancement.
493
+
494
+ Returns:
495
+ LangGraphMemoryEnhancerAgent: Configured mini-agent for automatic memory retrieval.
496
+ """
497
+ # Lazy import to avoid circular dependency: LangGraphReactAgent imports
498
+ # LangGraphMemoryEnhancerAgent which inherits from LangGraphReactAgent.
499
+ from aip_agents.agent.langgraph_memory_enhancer_agent import ( # noqa: PLC0415
500
+ LangGraphMemoryEnhancerAgent,
501
+ )
502
+
503
+ model_id = getattr(self.lm_invoker, "model_id", None)
504
+ model = self.model or model_id
505
+ return LangGraphMemoryEnhancerAgent(
506
+ memory=self.memory,
507
+ model=model,
508
+ memory_agent_id=self.memory_agent_id,
509
+ memory_retrieval_limit=self.memory_retrieval_limit,
510
+ )
511
+
512
+ def _create_memory_node(self, memory_enhancer_agent: Any) -> Any:
513
+ """Create memory enhancement node that delegates to LangGraphMemoryEnhancerAgent.
514
+
515
+ Args:
516
+ memory_enhancer_agent: The LangGraphMemoryEnhancerAgent instance to use for enhancement.
517
+
518
+ Returns:
519
+ Callable: Async function that enhances user query with memory context.
520
+ """
521
+
522
+ async def memory_node(state: dict[str, Any], config: dict[str, Any] | None = None) -> dict[str, Any]:
523
+ """Enhance user query with memory context via LangGraphMemoryEnhancerAgent.
524
+
525
+ Args:
526
+ state: LangGraph state containing conversation messages.
527
+ config: Optional LangGraph configuration.
528
+
529
+ Returns:
530
+ State update with potentially enhanced last message.
531
+ """
532
+ user_query = self._extract_user_query_from_messages(state.get("messages", []))
533
+ if not user_query:
534
+ return {}
535
+
536
+ try:
537
+ metadata = state.get("metadata", {})
538
+ enhanced_result = await memory_enhancer_agent.arun(query=user_query, metadata=metadata)
539
+ enhanced_query = enhanced_result.get("output", user_query)
540
+
541
+ if enhanced_query == user_query:
542
+ logger.debug(f"Agent '{self.name}': No memory enhancement needed")
543
+ return {}
544
+
545
+ logger.info(f"Agent '{self.name}': Memory enhancement completed")
546
+ enhanced_message = HumanMessage(content=enhanced_query)
547
+ # Append enhanced message (with add_messages reducer, this creates: original + enhanced)
548
+ return {"messages": [enhanced_message]}
549
+
550
+ except Exception as e:
551
+ logger.warning(f"Agent '{self.name}': Memory enhancement failed: {e}")
552
+ return {}
553
+
554
+ return memory_node
555
+
556
+ def _extract_user_query_from_messages(self, messages: list[Any]) -> str | None:
557
+ """Get latest user query string from a list of messages.
558
+
559
+ Args:
560
+ messages: List of LangChain messages to search through.
561
+
562
+ Returns:
563
+ The content string from the most recent HumanMessage if valid, None otherwise.
564
+ """
565
+ if not messages:
566
+ return None
567
+ for i in range(len(messages) - 1, -1, -1):
568
+ msg = messages[i]
569
+ if isinstance(msg, HumanMessage) and hasattr(msg, "content"):
570
+ content = msg.content
571
+ if isinstance(content, str) and content.strip():
572
+ return content
573
+ return None
574
+ return None
575
+
576
+ def _create_agent_node(self) -> Callable[..., Awaitable[dict[str, Any]]]:
577
+ """Create the agent node function for the graph."""
578
+
579
+ async def agent_node(
580
+ state: dict[str, Any], config: dict[str, Any] | None = None, *, writer: StreamWriter = None
581
+ ) -> dict[str, Any]:
582
+ """Call the appropriate LLM and return new messages.
583
+
584
+ Args:
585
+ state: Current agent state containing messages and conversation context.
586
+ config: Optional configuration containing thread_id and execution parameters.
587
+ writer: Optional stream writer for emitting custom events.
588
+
589
+ Returns:
590
+ Updated state dictionary with new AI messages and token usage.
591
+ """
592
+ writer = await self._get_effective_writer(writer)
593
+ limit_error_update, manager = self._get_step_limit_manager(state, "agent", writer=writer)
594
+ if limit_error_update:
595
+ return limit_error_update
596
+ if manager is None:
597
+ return {}
598
+
599
+ current_messages = state["messages"]
600
+
601
+ # Execute LLM call
602
+ try:
603
+ if self.lm_invoker:
604
+ result = await self._handle_lm_invoker_call(current_messages, state, config)
605
+ elif isinstance(self.model, BaseChatModel):
606
+ result = await self._handle_langchain_model_call(current_messages, state, config)
607
+ else:
608
+ raise ValueError(
609
+ f"Agent '{self.name}': No valid LMInvoker or LangChain model configured for ReAct agent node."
610
+ )
611
+ except Exception as e:
612
+ # Lazy import to support optional guardrails dependency
613
+ from aip_agents.guardrails.exceptions import GuardrailViolationError
614
+
615
+ if isinstance(e, GuardrailViolationError):
616
+ return {
617
+ "messages": [
618
+ AIMessage(
619
+ content=f"⚠️ Guardrail violation: {e.result.reason}",
620
+ response_metadata={"finish_reason": "stop"},
621
+ )
622
+ ]
623
+ }
624
+ raise
625
+
626
+ # Increment step counter after successful execution
627
+ manager.increment_step()
628
+ # Update state with new step count
629
+ result.update(manager.to_state_update())
630
+
631
+ return result
632
+
633
+ return agent_node
634
+
635
+ def _extract_tool_calls_from_state(self, state: dict[str, Any]) -> tuple[AIMessage | None, int]:
636
+ """Extract the last AI message and tool call count from state.
637
+
638
+ Args:
639
+ state: Current agent state.
640
+
641
+ Returns:
642
+ Tuple of (last AI message or None, count of tool calls).
643
+ """
644
+ messages = state.get("messages", [])
645
+ last_message = messages[-1] if messages else None
646
+ if not self.resolved_tools or not isinstance(last_message, AIMessage) or not last_message.tool_calls:
647
+ return None, 0
648
+ return last_message, len(last_message.tool_calls)
649
+
650
+ def _check_tool_batch_limits(
651
+ self,
652
+ state: dict[str, Any],
653
+ tool_call_count: int,
654
+ manager: StepLimitManager,
655
+ writer: StreamWriter | None,
656
+ ) -> tuple[dict[str, Any] | None, StepLimitManager | None]:
657
+ """Check if tool batch exceeds limits.
658
+
659
+ Args:
660
+ state: Current LangGraph state dictionary.
661
+ tool_call_count: Number of tools in the current batch.
662
+ manager: Initialized StepLimitManager.
663
+ writer: Optional stream writer for events.
664
+
665
+ Returns:
666
+ Tuple of (limit update dict or None, manager instance).
667
+ """
668
+ if tool_call_count <= 1:
669
+ return None, manager
670
+ return self._get_step_limit_manager(state, "tool", writer=writer, count=tool_call_count, manager=manager)
671
+
672
+ def _create_tool_node_logic(self) -> Callable[..., Awaitable[dict[str, Any]]]:
673
+ """Create the tool node logic function for the graph."""
674
+
675
+ async def tool_node_logic(
676
+ state: dict[str, Any],
677
+ config: dict[str, Any] | None = None,
678
+ *,
679
+ writer: StreamWriter = None,
680
+ ) -> dict[str, Any]:
681
+ """Execute tools with artifact payload separation and reference collection.
682
+
683
+ Args:
684
+ state: Current agent state.
685
+ config: Optional execution configuration.
686
+ writer: Optional stream writer.
687
+
688
+ Returns:
689
+ Updated state dictionary with tool results.
690
+ """
691
+ writer = await self._get_effective_writer(writer)
692
+ limit_error, manager = self._get_step_limit_manager(state, "tool", writer=writer)
693
+ if limit_error or manager is None:
694
+ return limit_error or {}
695
+
696
+ last_message, tool_call_count = self._extract_tool_calls_from_state(state)
697
+ if not last_message:
698
+ return {}
699
+
700
+ # Re-check step limits with the actual batch count (Spec-3)
701
+ limit_error, manager = self._check_tool_batch_limits(state, tool_call_count, manager, writer)
702
+ if limit_error or manager is None:
703
+ return limit_error or {}
704
+
705
+ result = await self._execute_tool_calls(last_message, state, config)
706
+
707
+ # Increment step after tool execution
708
+ manager.increment_step(count=tool_call_count)
709
+ result.update(manager.to_state_update())
710
+
711
+ return result
712
+
713
+ return tool_node_logic
714
+
715
+ async def _execute_tool_calls(
716
+ self, last_message: AIMessage, state: dict[str, Any], config: dict[str, Any] | None
717
+ ) -> dict[str, Any]:
718
+ """Execute tool calls and aggregate results.
719
+
720
+ Runs multiple tool calls concurrently for better parallelism.
721
+
722
+ Args:
723
+ last_message: The AI message containing tool calls to execute.
724
+ state: Current agent state containing messages, artifacts, and metadata.
725
+ config: Optional configuration containing thread_id and other execution context.
726
+
727
+ Returns:
728
+ Updated state dictionary with tool execution results including messages,
729
+ artifacts, references, and metadata updates.
730
+ """
731
+ tool_messages: list[ToolMessage] = []
732
+ pending_artifacts: list[dict[str, Any]] = state.get("artifacts") or []
733
+ reference_updates: list[Chunk] = []
734
+ tool_map = {tool.name: tool for tool in self.resolved_tools}
735
+ pii_mapping = {}
736
+
737
+ aggregated_metadata_delta: dict[str, Any] = {}
738
+ total_tools_token_usage: list[UsageMetadata] = []
739
+
740
+ async def run_tool(tool_call: dict[str, Any]):
741
+ """Run a single tool call asynchronously.
742
+
743
+ Args:
744
+ tool_call: Tool call dictionary.
745
+
746
+ Returns:
747
+ Tool result from execution.
748
+ """
749
+ return await self._run_single_tool_call(
750
+ tool_map=tool_map,
751
+ tool_call=tool_call,
752
+ context=ToolCallContext(
753
+ config=config,
754
+ state=state,
755
+ pending_artifacts=pending_artifacts,
756
+ ),
757
+ )
758
+
759
+ tasks = [asyncio.create_task(run_tool(tc)) for tc in last_message.tool_calls]
760
+
761
+ for coro in asyncio.as_completed(tasks):
762
+ tool_result = await coro
763
+ self._accumulate_tool_result(
764
+ tool_result,
765
+ tool_messages,
766
+ pending_artifacts,
767
+ aggregated_metadata_delta,
768
+ reference_updates,
769
+ total_tools_token_usage,
770
+ pii_mapping,
771
+ )
772
+
773
+ return self._build_tool_state_updates(
774
+ tool_messages,
775
+ pending_artifacts,
776
+ reference_updates,
777
+ aggregated_metadata_delta,
778
+ total_tools_token_usage,
779
+ pii_mapping,
780
+ )
781
+
782
+ def _accumulate_tool_result( # noqa: PLR0913
783
+ self,
784
+ tool_result: Any,
785
+ tool_messages: list[ToolMessage],
786
+ pending_artifacts: list[dict[str, Any]],
787
+ aggregated_metadata_delta: dict[str, Any],
788
+ reference_updates: list[Chunk],
789
+ total_tools_token_usage: list[UsageMetadata],
790
+ pii_mapping: dict[str, str] | None,
791
+ ) -> None: # noqa: PLR0913
792
+ """Accumulate results from a single tool call.
793
+
794
+ Args:
795
+ tool_result: The result object from a single tool execution containing messages,
796
+ artifacts, metadata_delta, references, usage information, and PII mapping.
797
+ tool_messages: List to accumulate tool messages into.
798
+ pending_artifacts: List to accumulate artifacts into.
799
+ aggregated_metadata_delta: Dictionary to accumulate metadata updates into.
800
+ reference_updates: List to accumulate reference chunks into.
801
+ total_tools_token_usage: List to accumulate token usage metadata into.
802
+ pii_mapping: Dictionary to accumulate PII mappings into (mutated in place).
803
+ """
804
+ if tool_result.messages:
805
+ tool_messages.extend(tool_result.messages)
806
+ if tool_result.artifacts:
807
+ pending_artifacts.extend(tool_result.artifacts)
808
+ if tool_result.metadata_delta:
809
+ aggregated_metadata_delta.update(tool_result.metadata_delta)
810
+ if tool_result.references:
811
+ reference_updates.extend(tool_result.references)
812
+ if tool_result.step_usage:
813
+ total_tools_token_usage.append(tool_result.step_usage)
814
+ if tool_result.pii_mapping:
815
+ pii_mapping.update(tool_result.pii_mapping)
816
+
817
+ def _build_tool_state_updates(
818
+ self,
819
+ tool_messages: list[ToolMessage],
820
+ pending_artifacts: list[dict[str, Any]],
821
+ reference_updates: list[Chunk],
822
+ aggregated_metadata_delta: dict[str, Any],
823
+ total_tools_token_usage: list[UsageMetadata],
824
+ pii_mapping: dict[str, str] | None = None,
825
+ ) -> dict[str, Any]:
826
+ """Build state updates from accumulated tool results.
827
+
828
+ Args:
829
+ tool_messages: List of tool messages to include in state updates.
830
+ pending_artifacts: List of artifacts to include in state updates.
831
+ reference_updates: List of reference chunks to include in state updates.
832
+ aggregated_metadata_delta: Metadata changes to include in state updates.
833
+ total_tools_token_usage: List of token usage metadata from all tool executions.
834
+ pii_mapping: Current PII mapping to include in state updates.
835
+
836
+ Returns:
837
+ Dictionary containing state updates with messages, artifacts, references,
838
+ metadata, token usage, and PII mapping information.
839
+ """
840
+ state_updates: dict[str, Any] = {"messages": tool_messages, "artifacts": pending_artifacts}
841
+
842
+ if reference_updates:
843
+ state_updates["references"] = reference_updates
844
+
845
+ # Clean metadata delta to avoid leaking linkage-only fields
846
+ if "previous_step_ids" in aggregated_metadata_delta:
847
+ aggregated_metadata_delta = {k: v for k, v in aggregated_metadata_delta.items() if k != "previous_step_ids"}
848
+
849
+ if aggregated_metadata_delta:
850
+ state_updates["metadata"] = aggregated_metadata_delta
851
+
852
+ # Process accumulated tool usage
853
+ total_tool_usage = self._process_tool_usage(total_tools_token_usage)
854
+ if total_tool_usage:
855
+ state_updates[TOTAL_USAGE_KEY] = total_tool_usage
856
+
857
+ # Include PII mapping in state updates if present
858
+ if pii_mapping:
859
+ state_updates["pii_mapping"] = pii_mapping
860
+
861
+ return state_updates
862
+
863
+ def _create_should_continue_logic(self, end_node: str) -> Callable[[dict[str, Any]], str]:
864
+ """Create the should_continue function for conditional edges.
865
+
866
+ Args:
867
+ end_node: The name of the end node to return when execution should stop.
868
+
869
+ Returns:
870
+ Function that determines the next node based on the current state.
871
+ """
872
+
873
+ def should_continue(state: dict[str, Any]) -> str:
874
+ """Determine whether to continue to tools or end.
875
+
876
+ Args:
877
+ state: Current agent state containing messages and execution status.
878
+
879
+ Returns:
880
+ Either "tools" to continue tool execution or the end_node to stop execution.
881
+ """
882
+ messages = state.get("messages", [])
883
+ if not messages:
884
+ return end_node
885
+
886
+ last_message = messages[-1]
887
+
888
+ # Check if this is the last step
889
+ if state.get("is_last_step", False):
890
+ logger.debug(f"Agent '{self.name}': Reached last step, ending execution")
891
+ return end_node
892
+
893
+ if not isinstance(last_message, AIMessage) or not last_message.tool_calls:
894
+ return end_node
895
+
896
+ return "tools"
897
+
898
+ return should_continue
899
+
900
+ def _add_usage_metadata_to_tool_message(
901
+ self, messages: list[ToolMessage], usage_metadata: UsageMetadata | None
902
+ ) -> None:
903
+ """Add usage metadata to a tool message's response metadata.
904
+
905
+ Args:
906
+ messages: List of tool messages to potentially update.
907
+ usage_metadata: The usage metadata to add to the first tool message, if any.
908
+
909
+ Note:
910
+ - Used for streaming purposes only, to show token usage by tool via ToolMessage response_metadata.
911
+ - Tool message that are coming from Command with single message or a dictionary will have exactly 1 message.
912
+ - For those cases, we will add usage_metadata to the response_metadata of the first message.
913
+ """
914
+ if len(messages) == 1 and isinstance(messages[0], ToolMessage) and usage_metadata is not None:
915
+ messages[0].response_metadata[USAGE_METADATA_KEY] = usage_metadata
916
+
917
+ def _process_tool_usage(self, total_tools_token_usage: list[UsageMetadata]) -> UsageMetadata | None:
918
+ """Process accumulated tool usage metadata.
919
+
920
+ Args:
921
+ total_tools_token_usage: List of UsageMetadata objects to process.
922
+
923
+ Returns:
924
+ UsageMetadata: The accumulated token usage metadata.
925
+ """
926
+ if not total_tools_token_usage:
927
+ return None
928
+
929
+ # More concise and functional
930
+ return reduce(add_usage_metadata, total_tools_token_usage, None)
931
+
932
+ def _process_command_tool_output(
933
+ self,
934
+ tool_output: Command,
935
+ tool_call: dict[str, Any],
936
+ execution_time: float,
937
+ ) -> tuple[list[ToolMessage], list[dict[str, Any]], dict[str, Any]]:
938
+ """Convert a Command tool output into messages, artifacts, and metadata deltas.
939
+
940
+ Args:
941
+ tool_output: The Command returned by the tool.
942
+ tool_call: The tool call info (id, name, args) for ToolMessage context.
943
+ execution_time: Execution time to include in ToolMessage tool_calls.
944
+
945
+ Returns:
946
+ A tuple of (messages, artifacts, metadata_delta).
947
+ """
948
+ update: dict[str, Any] = getattr(tool_output, "update", {}) or {}
949
+
950
+ out_messages: list[ToolMessage] = []
951
+ out_artifacts: list[dict[str, Any]] = []
952
+ metadata_delta: dict[str, Any] = {}
953
+
954
+ # Artifacts
955
+ artifacts_update = update.get("artifacts")
956
+ if isinstance(artifacts_update, list):
957
+ out_artifacts.extend(artifacts_update)
958
+
959
+ # Metadata
960
+ md_update = update.get("metadata")
961
+ if isinstance(md_update, dict):
962
+ metadata_delta.update(md_update)
963
+
964
+ # Messages or fallback to result
965
+ messages_update = update.get("messages")
966
+ if isinstance(messages_update, list):
967
+ out_messages.extend(messages_update)
968
+ else:
969
+ agent_result = str(update.get("result", ""))
970
+ out_messages.append(
971
+ ToolMessage(
972
+ content=agent_result,
973
+ tool_call_id=tool_call["id"],
974
+ tool_calls={
975
+ "name": tool_call["name"],
976
+ "args": tool_call["args"],
977
+ "output": agent_result,
978
+ "time": execution_time,
979
+ },
980
+ )
981
+ )
982
+
983
+ # If metadata contains linkage info, attach to first ToolMessage response_metadata
984
+ md = update.get("metadata")
985
+ if isinstance(md, dict):
986
+ prev_ids = md.get("previous_step_ids")
987
+ if isinstance(prev_ids, list) and prev_ids and out_messages:
988
+ try:
989
+ out_messages[0].response_metadata.setdefault("previous_step_ids", [])
990
+ existing = out_messages[0].response_metadata.get("previous_step_ids", [])
991
+ combined = list(dict.fromkeys(list(existing) + list(prev_ids)))
992
+ out_messages[0].response_metadata["previous_step_ids"] = combined
993
+ except Exception:
994
+ pass
995
+
996
+ return out_messages, out_artifacts, metadata_delta
997
+
998
+ def _process_simple_tool_output(
999
+ self,
1000
+ agent_result_text: str,
1001
+ tool_call: dict[str, Any],
1002
+ execution_time: float,
1003
+ ) -> tuple[list[ToolMessage], list[dict[str, Any]]]:
1004
+ """Convert a simple string tool output into messages with no artifacts.
1005
+
1006
+ Args:
1007
+ agent_result_text: The string result from tool execution.
1008
+ tool_call: The tool call information containing id, name, and args.
1009
+ execution_time: Time taken to execute the tool.
1010
+
1011
+ Returns:
1012
+ Tuple of (tool_messages, artifacts) where artifacts is always an empty list.
1013
+ """
1014
+ messages = [
1015
+ ToolMessage(
1016
+ content=agent_result_text,
1017
+ tool_call_id=tool_call["id"],
1018
+ tool_calls={
1019
+ "name": tool_call["name"],
1020
+ "args": tool_call["args"],
1021
+ "output": agent_result_text,
1022
+ "time": execution_time,
1023
+ },
1024
+ )
1025
+ ]
1026
+ return messages, []
1027
+
1028
+ @deprecated(version="0.5.0", reason="Use _process_command_tool_output instead")
1029
+ def _process_legacy_tool_output(
1030
+ self,
1031
+ tool_output: dict[str, Any],
1032
+ tool_call: dict[str, Any],
1033
+ execution_time: float,
1034
+ pending_artifacts: list[dict[str, Any]],
1035
+ ) -> tuple[list[ToolMessage], list[dict[str, Any]]]:
1036
+ """Normalize legacy dict outputs into ToolMessages and artifacts.
1037
+
1038
+ Supports legacy tools that return a mapping possibly containing 'artifacts'
1039
+ and 'result' keys.
1040
+
1041
+ Args:
1042
+ tool_output: The legacy dict output from tool execution.
1043
+ tool_call: The tool call information containing id, name, and args.
1044
+ execution_time: Time taken to execute the tool.
1045
+ pending_artifacts: Current list of pending artifacts to extend with new ones.
1046
+
1047
+ Returns:
1048
+ Tuple of (tool_messages, updated_pending_artifacts).
1049
+ """
1050
+ if isinstance(tool_output.get("artifacts"), list):
1051
+ pending_artifacts.extend(tool_output["artifacts"])
1052
+
1053
+ agent_result = str(tool_output.get("result", tool_output))
1054
+
1055
+ # Extract metadata from tool_output if present
1056
+ response_metadata = {}
1057
+ if isinstance(tool_output, dict) and isinstance(tool_output.get("metadata"), dict):
1058
+ response_metadata.update(tool_output["metadata"])
1059
+
1060
+ messages = [
1061
+ ToolMessage(
1062
+ content=agent_result,
1063
+ tool_call_id=tool_call["id"],
1064
+ tool_calls={
1065
+ "name": tool_call["name"],
1066
+ "args": tool_call["args"],
1067
+ "output": agent_result,
1068
+ "time": execution_time,
1069
+ },
1070
+ response_metadata=response_metadata,
1071
+ )
1072
+ ]
1073
+ return messages, pending_artifacts
1074
+
1075
+ async def _run_single_tool_call(
1076
+ self,
1077
+ tool_map: dict[str, BaseTool],
1078
+ tool_call: dict[str, Any],
1079
+ context: ToolCallContext,
1080
+ ) -> ToolCallResult:
1081
+ """Execute a single tool call with tool output management and reference resolution.
1082
+
1083
+ This method handles the complete lifecycle of a tool call including:
1084
+ - Reference resolution for tool arguments
1085
+ - Tool execution with enhanced configuration
1086
+ - Automatic and manual tool output storage
1087
+ - Error handling for reference and execution failures
1088
+
1089
+ Args:
1090
+ tool_map: Mapping of tool name to tool instance.
1091
+ tool_call: The tool call information from the AI message.
1092
+ context: Tool call context containing config, state, pending artifacts, and HITL decision.
1093
+
1094
+ Returns:
1095
+ ToolCallResult containing messages, artifacts, metadata_delta, references, and usage_metadata.
1096
+ """
1097
+ tool = tool_map.get(tool_call["name"]) # type: ignore[index]
1098
+ tool_call_id = tool_call.get("id", f"tool_call_{uuid.uuid4().hex[:8]}")
1099
+
1100
+ # Check for HITL approval if configured
1101
+ if context.hitl_decision is None:
1102
+ try:
1103
+ context.hitl_decision = await self._check_hitl_approval(
1104
+ tool_call=tool_call, tool_name=tool_call["name"], state=context.state
1105
+ )
1106
+
1107
+ if context.hitl_decision and context.hitl_decision.decision in TOOL_EXECUTION_BLOCKING_DECISIONS:
1108
+ # Return sentinel result for pending/rejected/skipped tools
1109
+ return self._create_hitl_blocking_result(tool_call, context.hitl_decision)
1110
+ except Exception as e:
1111
+ # Log HITL failure but continue with normal tool execution
1112
+ logger.warning(
1113
+ "HITL approval check failed for tool '%s' (error: %s: %s). Proceeding with tool execution.",
1114
+ tool_call["name"],
1115
+ type(e).__name__,
1116
+ e,
1117
+ )
1118
+
1119
+ # Execute tool and handle errors
1120
+ tool_output, execution_time, references, updated_pii_mapping = await self._execute_tool_with_management(
1121
+ tool=tool,
1122
+ tool_call=tool_call,
1123
+ tool_call_id=tool_call_id,
1124
+ config=context.config,
1125
+ state=context.state,
1126
+ )
1127
+
1128
+ # Process tool output into messages and artifacts
1129
+ messages, artifacts, metadata_delta = self._process_tool_output_result(
1130
+ tool_output=tool_output,
1131
+ tool_call=tool_call,
1132
+ execution_time=execution_time,
1133
+ pending_artifacts=context.pending_artifacts,
1134
+ )
1135
+
1136
+ # Capture and merge new PII mapping from subagent
1137
+ updated_pii_mapping = self._merge_tool_pii_mapping(metadata_delta, updated_pii_mapping)
1138
+
1139
+ # If HITL was required, annotate the first ToolMessage with HITL metadata
1140
+ try:
1141
+ if context.hitl_decision and messages:
1142
+ first_msg = messages[0]
1143
+ if isinstance(first_msg, ToolMessage):
1144
+ response_metadata = getattr(first_msg, "response_metadata", None) or {}
1145
+ response_metadata = dict(response_metadata)
1146
+ hitl_model = HitlMetadata.from_decision(context.hitl_decision)
1147
+ response_metadata["hitl"] = hitl_model.as_payload()
1148
+ first_msg.response_metadata = response_metadata
1149
+ except Exception as e:
1150
+ # Non-fatal: continue even if metadata injection fails
1151
+ logger.warning(f"Failed to inject HITL metadata into tool message: {e}")
1152
+
1153
+ # Extract and add usage metadata
1154
+ tool_usage_metadata = extract_token_usage_from_tool_output(tool_output)
1155
+ self._add_usage_metadata_to_tool_message(messages, tool_usage_metadata)
1156
+
1157
+ return ToolCallResult(
1158
+ messages=messages,
1159
+ artifacts=artifacts,
1160
+ metadata_delta=metadata_delta,
1161
+ references=references,
1162
+ step_usage=tool_usage_metadata,
1163
+ pii_mapping=updated_pii_mapping,
1164
+ )
1165
+
1166
+ def _merge_tool_pii_mapping(
1167
+ self,
1168
+ metadata_delta: dict[str, Any],
1169
+ updated_pii_mapping: dict[str, str] | None,
1170
+ ) -> dict[str, str] | None:
1171
+ """Merge PII mapping from metadata delta into existing mapping.
1172
+
1173
+ Args:
1174
+ metadata_delta: Metadata delta returned from tool execution.
1175
+ updated_pii_mapping: PII mapping produced during tool execution, if any.
1176
+
1177
+ Returns:
1178
+ New merged PII mapping or None if no PII information is present.
1179
+ """
1180
+ if "pii_mapping" not in metadata_delta:
1181
+ return updated_pii_mapping
1182
+
1183
+ metadata_pii_mapping = metadata_delta.get("pii_mapping") or {}
1184
+ if not isinstance(metadata_pii_mapping, dict) or not metadata_pii_mapping:
1185
+ return updated_pii_mapping
1186
+
1187
+ if updated_pii_mapping:
1188
+ return {**updated_pii_mapping, **metadata_pii_mapping}
1189
+
1190
+ return metadata_pii_mapping
1191
+
1192
+ async def _execute_tool_with_management(
1193
+ self,
1194
+ tool: BaseTool | None,
1195
+ tool_call: dict[str, Any],
1196
+ tool_call_id: str,
1197
+ config: dict[str, Any] | None,
1198
+ state: dict[str, Any],
1199
+ ) -> tuple[Any, float, list[Chunk], dict[str, str] | None]:
1200
+ """Execute tool with output management, reference resolution, and error handling.
1201
+
1202
+ Args:
1203
+ tool: The tool instance to execute, or None if not found.
1204
+ tool_call: The tool call information from the AI message.
1205
+ tool_call_id: Unique identifier for this tool call.
1206
+ config: Optional configuration passed down to the tool.
1207
+ state: Current agent state containing tool output manager.
1208
+
1209
+ Returns:
1210
+ Tuple of (tool_output, execution_time, references, updated_pii_mapping).
1211
+ """
1212
+ execution_time = 0.0
1213
+ references: list[Chunk] = []
1214
+ updated_pii_mapping: dict[str, str] | None = None
1215
+
1216
+ if not tool:
1217
+ return f"Error: Tool '{tool_call['name']}' not found.", execution_time, references, updated_pii_mapping
1218
+
1219
+ start_time = time.time()
1220
+ try:
1221
+ # Resolve tool argument references
1222
+ resolved_args = self._resolve_tool_arguments(tool_call, state, config)
1223
+ predefined_pii_mapping = self._get_predefined_pii_mapping(state, config)
1224
+
1225
+ enable_pii = self._enable_pii
1226
+ if enable_pii is False:
1227
+ pii_handler = ToolPIIHandler.create_mapping_only(predefined_pii_mapping)
1228
+ else:
1229
+ pii_handler = self._create_pii_handler(predefined_pii_mapping, config)
1230
+
1231
+ # Deanonymize tool arguments if PII handler is enabled
1232
+ resolved_args = self._deanonymize_tool_args(pii_handler, resolved_args)
1233
+
1234
+ # Create enhanced tool configuration with output management
1235
+ tool_config = self._create_enhanced_tool_config(config, state, tool_call["name"], tool_call_id)
1236
+
1237
+ arun_streaming_method = getattr(tool, TOOL_RUN_STREAMING_METHOD, None)
1238
+
1239
+ if arun_streaming_method and callable(arun_streaming_method):
1240
+ tool_output = await self._execute_tool_with_streaming(tool, tool_call, tool_config)
1241
+ else:
1242
+ tool_output = await tool.ainvoke(resolved_args, tool_config)
1243
+
1244
+ references = extract_references_from_tool(tool, tool_output)
1245
+
1246
+ # Anonymize tool output if PII handler is enabled
1247
+ tool_output, updated_pii_mapping = self._anonymize_tool_output(pii_handler, tool_output)
1248
+
1249
+ # Handle automatic storage if enabled
1250
+ self._handle_automatic_tool_storage(
1251
+ ToolStorageParams(
1252
+ tool=tool,
1253
+ tool_output=tool_output,
1254
+ tool_call=tool_call,
1255
+ tool_call_id=tool_call_id,
1256
+ resolved_args=resolved_args,
1257
+ state=state,
1258
+ ),
1259
+ config=config,
1260
+ )
1261
+
1262
+ return tool_output, time.time() - start_time, references, updated_pii_mapping
1263
+
1264
+ except ToolReferenceError as ref_error:
1265
+ tool_output = f"Reference error in tool '{tool_call['name']}': {str(ref_error)}"
1266
+ logger.error(f"Tool reference error: {ref_error}", exc_info=True)
1267
+ return tool_output, time.time() - start_time, references, updated_pii_mapping
1268
+ except Exception as e: # noqa: BLE001
1269
+ tool_output = f"Error executing tool '{tool_call['name']}': {str(e)}"
1270
+ logger.error(f"Tool execution error: {e}", exc_info=True)
1271
+ return tool_output, time.time() - start_time, references, updated_pii_mapping
1272
+
1273
+ def _get_predefined_pii_mapping(
1274
+ self,
1275
+ state: dict[str, Any],
1276
+ config: dict[str, Any] | None,
1277
+ ) -> dict[str, str] | None:
1278
+ """Get predefined PII mapping from state or configuration.
1279
+
1280
+ This helper centralizes the logic for resolving an existing PII mapping,
1281
+ first checking the agent state metadata, then falling back to the config
1282
+ metadata if available.
1283
+
1284
+ Args:
1285
+ state: Current LangGraph agent state.
1286
+ config: Optional LangGraph configuration dictionary.
1287
+
1288
+ Returns:
1289
+ The resolved PII mapping dictionary if found, otherwise None.
1290
+ """
1291
+ metadata_from_state = state.get("metadata") or {}
1292
+ mapping_from_state = metadata_from_state.get("pii_mapping")
1293
+ if isinstance(mapping_from_state, dict) and mapping_from_state:
1294
+ return mapping_from_state # type: ignore[return-value]
1295
+
1296
+ if not config:
1297
+ return None
1298
+
1299
+ metadata_from_config = config.get("metadata") or {}
1300
+ mapping_from_config = metadata_from_config.get("pii_mapping")
1301
+ if isinstance(mapping_from_config, dict) and mapping_from_config:
1302
+ return mapping_from_config # type: ignore[return-value]
1303
+
1304
+ return None
1305
+
1306
+ def _create_pii_handler(
1307
+ self, predefined_pii_mapping: dict[str, str] | None, config: dict[str, Any] | None
1308
+ ) -> ToolPIIHandler | None:
1309
+ """Create (or reuse) a PII handler scoped to the current thread.
1310
+
1311
+ Thin wrapper around ToolPIIHandler.create_if_enabled to keep
1312
+ _execute_tool_with_management focused on orchestration. The handler can
1313
+ operate in mapping-only mode when no NER credentials are configured.
1314
+
1315
+ Args:
1316
+ predefined_pii_mapping: Existing PII mapping to seed the handler with.
1317
+ config: LangGraph configuration needed to scope handlers per thread.
1318
+
1319
+ Returns:
1320
+ A ToolPIIHandler instance when mapping/NER config is available, otherwise None.
1321
+ """
1322
+ thread_id: str | None = None
1323
+ if config:
1324
+ try:
1325
+ thread_id = self._extract_thread_id_from_config(config)
1326
+ except Exception:
1327
+ thread_id = None
1328
+ if thread_id:
1329
+ handler = self._pii_handlers_by_thread.get(thread_id)
1330
+ if handler:
1331
+ return handler
1332
+ handler = ToolPIIHandler.create_if_enabled(predefined_pii_mapping)
1333
+ if handler and thread_id:
1334
+ self._pii_handlers_by_thread[thread_id] = handler
1335
+
1336
+ return handler
1337
+
1338
+ def _deanonymize_tool_args(
1339
+ self,
1340
+ pii_handler: ToolPIIHandler | None,
1341
+ resolved_args: dict[str, Any],
1342
+ ) -> dict[str, Any]:
1343
+ """Deanonymize tool arguments using the provided PII handler.
1344
+
1345
+ Args:
1346
+ pii_handler: Optional ToolPIIHandler instance.
1347
+ resolved_args: Tool arguments after reference resolution.
1348
+
1349
+ Returns:
1350
+ Tool arguments with PII tags replaced by real values when a handler
1351
+ is available, otherwise the original arguments.
1352
+ """
1353
+ if not pii_handler:
1354
+ return resolved_args
1355
+ return pii_handler.deanonymize_tool_args(resolved_args)
1356
+
1357
+ def _anonymize_tool_output(
1358
+ self,
1359
+ pii_handler: ToolPIIHandler | None,
1360
+ tool_output: Any,
1361
+ ) -> tuple[Any, dict[str, str] | None]:
1362
+ """Anonymize tool output and return updated PII mapping when enabled.
1363
+
1364
+ Args:
1365
+ pii_handler: Optional ToolPIIHandler instance.
1366
+ tool_output: Raw output returned by the tool.
1367
+
1368
+ Returns:
1369
+ Tuple of (possibly anonymized tool_output, updated PII mapping or None).
1370
+ """
1371
+ if not pii_handler:
1372
+ return tool_output, None
1373
+
1374
+ anonymized_output, updated_mapping = pii_handler.anonymize_tool_output(tool_output)
1375
+ return anonymized_output, updated_mapping
1376
+
1377
+ def _resolve_tool_arguments(
1378
+ self, tool_call: dict[str, Any], state: dict[str, Any], config: dict[str, Any] | None = None
1379
+ ) -> dict[str, Any]:
1380
+ """Resolve tool argument references using the tool output manager.
1381
+
1382
+ Args:
1383
+ tool_call: The tool call information containing arguments.
1384
+ state: Current agent state containing tool output manager.
1385
+ config: Optional configuration containing thread_id information.
1386
+
1387
+ Returns:
1388
+ Resolved arguments dictionary.
1389
+
1390
+ Raises:
1391
+ ToolReferenceError: If reference resolution fails.
1392
+ """
1393
+ manager = state.get(TOOL_OUTPUT_MANAGER_KEY)
1394
+ resolved_args = tool_call["args"]
1395
+
1396
+ if manager and self.tool_output_manager:
1397
+ thread_id = self._extract_thread_id_from_config(config)
1398
+
1399
+ if manager.has_outputs(thread_id):
1400
+ resolver = ToolReferenceResolver(self.tool_output_manager.config)
1401
+ resolved_args = resolver.resolve_references(resolved_args, manager, thread_id)
1402
+ logger.debug(
1403
+ f"Resolved references for tool '{tool_call['name']}' in thread '{thread_id}', "
1404
+ f"Resolved args: {resolved_args}"
1405
+ )
1406
+
1407
+ return resolved_args
1408
+
1409
+ def _create_enhanced_tool_config(
1410
+ self, config: dict[str, Any] | None, state: dict[str, Any], tool_name: str, tool_call_id: str
1411
+ ) -> dict[str, Any]:
1412
+ """Create enhanced tool configuration with output management capabilities.
1413
+
1414
+ Args:
1415
+ config: Base configuration passed down to the tool.
1416
+ state: Current agent state containing tool output manager.
1417
+ tool_name: Name of the tool being executed.
1418
+ tool_call_id: Unique identifier for this tool call.
1419
+
1420
+ Returns:
1421
+ Enhanced tool configuration dictionary.
1422
+ """
1423
+ tool_config = self._create_tool_config(config, state, tool_name=tool_name)
1424
+
1425
+ # Add tool output management capabilities
1426
+ manager = state.get(TOOL_OUTPUT_MANAGER_KEY)
1427
+ if manager and self.tool_output_manager:
1428
+ tool_config[TOOL_OUTPUT_MANAGER_KEY] = manager
1429
+ tool_config[CALL_ID_KEY] = tool_call_id
1430
+
1431
+ # Attach coordinator parent step id so delegated sub-agents can link their start step properly
1432
+ try:
1433
+ thread_id = self._extract_thread_id_from_config(config)
1434
+ parent_map = self._tool_parent_map_by_thread.get(thread_id, {})
1435
+ parent_step_id = parent_map.get(str(tool_call_id))
1436
+ if parent_step_id:
1437
+ tool_config["parent_step_id"] = parent_step_id
1438
+ cfg = tool_config.get("configurable")
1439
+ if not isinstance(cfg, dict):
1440
+ cfg = {}
1441
+ cfg["parent_step_id"] = parent_step_id
1442
+ tool_config["configurable"] = cfg
1443
+ except Exception:
1444
+ pass
1445
+
1446
+ return tool_config
1447
+
1448
+ def _extract_thread_id_from_config(self, config: dict[str, Any] | None) -> str:
1449
+ """Extract thread_id from LangGraph configuration.
1450
+
1451
+ Since BaseLangGraphAgent._create_graph_config() guarantees a thread ID is always present,
1452
+ this method should always find a valid thread ID. If config is somehow None (which
1453
+ should never happen), creates a new UUID.
1454
+
1455
+ Args:
1456
+ config: LangGraph configuration dictionary.
1457
+
1458
+ Returns:
1459
+ Thread ID string from the configuration.
1460
+ """
1461
+ # This should never happen since _create_graph_config always creates config
1462
+ if not config:
1463
+ thread_id = str(uuid.uuid4())
1464
+ logger.warning(f"Agent '{self.name}': No config provided, generated new thread_id: {thread_id}")
1465
+ return thread_id
1466
+
1467
+ configurable = config["configurable"]
1468
+ thread_key = self.thread_id_key or "thread_id"
1469
+ return str(configurable[thread_key])
1470
+
1471
+ def _handle_automatic_tool_storage(
1472
+ self,
1473
+ params: ToolStorageParams,
1474
+ config: dict[str, Any] | None = None,
1475
+ ) -> None:
1476
+ """Handle automatic storage for tools with store_final_output enabled.
1477
+
1478
+ Args:
1479
+ params: ToolStorageParams containing all necessary parameters.
1480
+ config: Optional configuration containing thread_id information.
1481
+ """
1482
+ manager = params.state.get(TOOL_OUTPUT_MANAGER_KEY)
1483
+
1484
+ if (
1485
+ manager
1486
+ and self.tool_output_manager
1487
+ and params.tool_output is not None
1488
+ and getattr(params.tool, "store_final_output", False)
1489
+ ):
1490
+ # Extract thread_id from config
1491
+ thread_id = self._extract_thread_id_from_config(config)
1492
+
1493
+ storable_data = self._extract_storable_data(params.tool_output)
1494
+ store_params = StoreOutputParams(
1495
+ call_id=params.tool_call_id,
1496
+ tool_name=params.tool_call["name"],
1497
+ data=storable_data,
1498
+ tool_args=params.resolved_args,
1499
+ thread_id=thread_id,
1500
+ description=None, # No automatic description
1501
+ tags=None,
1502
+ agent_name=self.name,
1503
+ )
1504
+ manager.store_output(store_params)
1505
+ logger.debug(
1506
+ f"Auto-stored output for tool '{params.tool_call['name']}' with call_id: {params.tool_call_id} "
1507
+ f"in thread: {thread_id}"
1508
+ )
1509
+
1510
+ def _process_tool_output_result(
1511
+ self,
1512
+ tool_output: Any,
1513
+ tool_call: dict[str, Any],
1514
+ execution_time: float,
1515
+ pending_artifacts: list[dict[str, Any]],
1516
+ ) -> tuple[list[BaseMessage], list[dict[str, Any]], dict[str, Any]]:
1517
+ """Process tool output into messages, artifacts, and metadata.
1518
+
1519
+ Args:
1520
+ tool_output: The output returned by the tool.
1521
+ tool_call: The tool call information from the AI message.
1522
+ execution_time: Time taken to execute the tool.
1523
+ pending_artifacts: List of artifacts to be updated with new artifacts from this tool call.
1524
+
1525
+ Returns:
1526
+ Tuple of (messages, artifacts, metadata_delta).
1527
+ """
1528
+ metadata_delta: dict[str, Any] = {}
1529
+
1530
+ # Handle Command outputs
1531
+ if isinstance(tool_output, Command):
1532
+ return self._handle_command_output(tool_output, tool_call, execution_time, metadata_delta)
1533
+
1534
+ if isinstance(tool_output, dict):
1535
+ return self._handle_legacy_output(tool_output, tool_call, execution_time, pending_artifacts, metadata_delta)
1536
+
1537
+ # Handle string outputs, coercing other simple types
1538
+ if not isinstance(tool_output, str):
1539
+ tool_output = str(tool_output)
1540
+ return self._handle_string_output(tool_output, tool_call, execution_time)
1541
+
1542
+ def _handle_command_output(
1543
+ self, tool_output: Command, tool_call: dict[str, Any], execution_time: float, metadata_delta: dict[str, Any]
1544
+ ) -> tuple[list[BaseMessage], list[dict[str, Any]], dict[str, Any]]:
1545
+ """Handle Command type tool outputs.
1546
+
1547
+ Args:
1548
+ tool_output: The Command object returned by the tool.
1549
+ tool_call: The tool call information containing id, name, and args.
1550
+ execution_time: Time taken to execute the tool.
1551
+ metadata_delta: Dictionary to accumulate metadata updates into.
1552
+
1553
+ Returns:
1554
+ Tuple of (messages, artifacts, updated_metadata_delta).
1555
+ """
1556
+ messages, artifacts, md_delta = self._process_command_tool_output(
1557
+ tool_output=tool_output,
1558
+ tool_call=tool_call,
1559
+ execution_time=execution_time,
1560
+ )
1561
+ if md_delta:
1562
+ metadata_delta.update(md_delta)
1563
+
1564
+ update: dict[str, Any] = getattr(tool_output, "update", {}) or {}
1565
+ pii_mapping = update.get("pii_mapping")
1566
+ if isinstance(pii_mapping, dict) and pii_mapping:
1567
+ metadata_delta["pii_mapping"] = pii_mapping
1568
+
1569
+ return messages, artifacts, metadata_delta
1570
+
1571
+ def _handle_string_output(
1572
+ self, tool_output: str, tool_call: dict[str, Any], execution_time: float
1573
+ ) -> tuple[list[BaseMessage], list[dict[str, Any]], dict[str, Any]]:
1574
+ """Handle string type tool outputs.
1575
+
1576
+ Args:
1577
+ tool_output: The string output from tool execution.
1578
+ tool_call: The tool call information containing id, name, and args.
1579
+ execution_time: Time taken to execute the tool.
1580
+
1581
+ Returns:
1582
+ Tuple of (messages, artifacts, metadata_delta) where artifacts is empty
1583
+ and metadata_delta is empty dict.
1584
+ """
1585
+ messages, artifacts = self._process_simple_tool_output(
1586
+ agent_result_text=tool_output,
1587
+ tool_call=tool_call,
1588
+ execution_time=execution_time,
1589
+ )
1590
+ return messages, artifacts, {}
1591
+
1592
+ def _handle_legacy_output(
1593
+ self,
1594
+ tool_output: Any,
1595
+ tool_call: dict[str, Any],
1596
+ execution_time: float,
1597
+ pending_artifacts: list[dict[str, Any]],
1598
+ metadata_delta: dict[str, Any],
1599
+ ) -> tuple[list[BaseMessage], list[dict[str, Any]], dict[str, Any]]:
1600
+ """Handle legacy dict and other tool outputs.
1601
+
1602
+ Args:
1603
+ tool_output: The output from tool execution (typically a dict).
1604
+ tool_call: The tool call information containing id, name, and args.
1605
+ execution_time: Time taken to execute the tool.
1606
+ pending_artifacts: Current list of pending artifacts to extend with new ones.
1607
+ metadata_delta: Dictionary to accumulate metadata updates into.
1608
+
1609
+ Returns:
1610
+ Tuple of (messages, updated_pending_artifacts, updated_metadata_delta).
1611
+ """
1612
+ messages, artifacts = self._process_legacy_tool_output(
1613
+ tool_output=tool_output, # type: ignore[arg-type]
1614
+ tool_call=tool_call,
1615
+ execution_time=execution_time,
1616
+ pending_artifacts=pending_artifacts,
1617
+ )
1618
+
1619
+ # Process metadata from legacy dict outputs
1620
+ if isinstance(tool_output, dict):
1621
+ self._process_legacy_metadata(tool_output, messages, metadata_delta)
1622
+
1623
+ return messages, artifacts, metadata_delta
1624
+
1625
+ def _process_legacy_metadata(
1626
+ self, tool_output: dict[str, Any], messages: list[BaseMessage], metadata_delta: dict[str, Any]
1627
+ ) -> None:
1628
+ """Process metadata from legacy dict tool outputs.
1629
+
1630
+ Args:
1631
+ tool_output: The dict tool output containing metadata
1632
+ messages: List of messages to potentially update with metadata
1633
+ metadata_delta: Metadata delta to update
1634
+ """
1635
+ md = tool_output.get("metadata")
1636
+ if not isinstance(md, dict):
1637
+ return
1638
+
1639
+ prev_ids = md.get("previous_step_ids")
1640
+ if isinstance(prev_ids, list):
1641
+ metadata_delta["previous_step_ids"] = list(prev_ids)
1642
+ self._attach_previous_step_ids_to_message(messages, prev_ids)
1643
+
1644
+ def _attach_previous_step_ids_to_message(self, messages: list[BaseMessage], prev_ids: list[Any]) -> None:
1645
+ """Attach previous step IDs to the first ToolMessage's response metadata.
1646
+
1647
+ Args:
1648
+ messages: List of messages to update
1649
+ prev_ids: Previous step IDs to attach
1650
+ """
1651
+ if not messages or not isinstance(messages[0], ToolMessage):
1652
+ return
1653
+
1654
+ try:
1655
+ tool_message = messages[0]
1656
+ tool_message.response_metadata.setdefault("previous_step_ids", [])
1657
+ existing = tool_message.response_metadata.get("previous_step_ids", [])
1658
+ combined = list(dict.fromkeys(list(existing) + list(prev_ids)))
1659
+ tool_message.response_metadata["previous_step_ids"] = combined
1660
+ except Exception:
1661
+ pass
1662
+
1663
+ async def _execute_tool_with_streaming(
1664
+ self,
1665
+ tool: BaseTool,
1666
+ tool_call: dict[str, Any],
1667
+ tool_config: dict[str, Any] | None = None,
1668
+ ) -> str:
1669
+ """Execute a tool with streaming support and emit streaming chunks.
1670
+
1671
+ This method dynamically passes all tool arguments to the streaming method
1672
+ using **kwargs, making it flexible for tools with different parameter structures.
1673
+
1674
+ Args:
1675
+ tool: The tool instance to execute.
1676
+ tool_call: The tool call information from the AI message.
1677
+ tool_config: Optional configuration passed down to the tool.
1678
+
1679
+ Returns:
1680
+ The final output from the tool execution.
1681
+ """
1682
+ writer: StreamWriter = get_stream_writer()
1683
+ final_output: Any = None
1684
+ saw_tool_result = False
1685
+ start_time = time.time()
1686
+
1687
+ tool_call_id = tool_call.get("id", f"tool_call_{uuid.uuid4().hex[:8]}")
1688
+ tool_name = tool_call.get("name", "")
1689
+ tool_args = self._normalize_tool_args(tool_call.get("args"))
1690
+
1691
+ logger.info("Streaming tool start detected: agent=%s tool=%s call_id=%s", self.name, tool_name, tool_call_id)
1692
+
1693
+ try:
1694
+ self._emit_default_tool_call_event(writer, tool_name, tool_call_id, tool_args)
1695
+
1696
+ streaming_kwargs = self._build_streaming_kwargs(tool_args, tool_config)
1697
+
1698
+ async for chunk in tool.arun_streaming(**streaming_kwargs):
1699
+ final_output, saw_tool_result = self._handle_streaming_chunk(
1700
+ chunk=chunk,
1701
+ writer=writer,
1702
+ tool_name=tool_call["name"],
1703
+ current_output=final_output,
1704
+ saw_tool_result=saw_tool_result,
1705
+ )
1706
+
1707
+ final_output = self._finalize_streaming_tool(
1708
+ writer=writer,
1709
+ tool_name=tool_name,
1710
+ tool_call_id=tool_call_id,
1711
+ tool_args=tool_args,
1712
+ final_output=final_output,
1713
+ saw_tool_result=saw_tool_result,
1714
+ start_time=start_time,
1715
+ )
1716
+ logger.info(
1717
+ "Streaming tool completed: agent=%s tool=%s call_id=%s",
1718
+ self.name,
1719
+ tool_name,
1720
+ tool_call_id,
1721
+ )
1722
+
1723
+ except Exception as e:
1724
+ final_output = f"Error during streaming execution of tool '{tool_call['name']}': {str(e)}"
1725
+ logger.error(f"Tool streaming error: {final_output}", exc_info=True)
1726
+ self._emit_tool_error_event(writer, tool_call["name"], final_output)
1727
+
1728
+ return final_output
1729
+
1730
+ @staticmethod
1731
+ def _normalize_tool_args(raw_tool_args: Any) -> dict[str, Any]:
1732
+ """Normalize raw tool arguments into a dictionary.
1733
+
1734
+ Args:
1735
+ raw_tool_args: The raw tool arguments to normalize.
1736
+
1737
+ Returns:
1738
+ A dictionary containing the normalized tool arguments.
1739
+ """
1740
+ if isinstance(raw_tool_args, dict):
1741
+ return raw_tool_args
1742
+ if raw_tool_args is None:
1743
+ return {}
1744
+ return {"value": raw_tool_args}
1745
+
1746
+ @staticmethod
1747
+ def _build_streaming_kwargs(tool_args: dict[str, Any], tool_config: dict[str, Any] | None) -> dict[str, Any]:
1748
+ """Create kwargs payload for streaming execution.
1749
+
1750
+ Args:
1751
+ tool_args: The tool arguments to include in the streaming kwargs.
1752
+ tool_config: Optional tool configuration to include.
1753
+
1754
+ Returns:
1755
+ A dictionary containing the streaming kwargs.
1756
+ """
1757
+ streaming_kwargs = tool_args.copy()
1758
+ if tool_config:
1759
+ streaming_kwargs["config"] = tool_config
1760
+ return streaming_kwargs
1761
+
1762
+ def _handle_streaming_chunk(
1763
+ self,
1764
+ *,
1765
+ chunk: Any,
1766
+ writer: StreamWriter,
1767
+ tool_name: str,
1768
+ current_output: Any,
1769
+ saw_tool_result: bool,
1770
+ ) -> tuple[Any, bool]:
1771
+ """Process a single streaming chunk and update output/result flag.
1772
+
1773
+ Args:
1774
+ chunk: The streaming chunk to process.
1775
+ writer: The stream writer for output.
1776
+ tool_name: The name of the tool being executed.
1777
+ current_output: The current accumulated output.
1778
+ saw_tool_result: Whether a tool result has been seen.
1779
+
1780
+ Returns:
1781
+ A tuple of (updated_output, saw_tool_result).
1782
+ """
1783
+ if not isinstance(chunk, dict):
1784
+ return current_output, saw_tool_result
1785
+
1786
+ event_type_raw = chunk.get("event_type")
1787
+ event_type = self._resolve_tool_event_type(event_type_raw)
1788
+ if event_type == A2AStreamEventType.TOOL_CALL or (
1789
+ event_type is None
1790
+ and isinstance(event_type_raw, str)
1791
+ and event_type_raw.lower() == A2AStreamEventType.TOOL_CALL.value
1792
+ ):
1793
+ return current_output, saw_tool_result
1794
+
1795
+ self._create_tool_streaming_event(chunk, writer, tool_name)
1796
+ new_output = self._extract_output_from_chunk(chunk, current_output)
1797
+ if event_type == A2AStreamEventType.STATUS_UPDATE:
1798
+ metadata = chunk.get("metadata")
1799
+ kind = None
1800
+ if isinstance(metadata, dict):
1801
+ kind = metadata.get(MetadataFieldKeys.KIND)
1802
+ if getattr(kind, "value", kind) == Kind.FINAL_THINKING_STEP.value:
1803
+ return new_output, True
1804
+ if event_type == A2AStreamEventType.TOOL_RESULT:
1805
+ return new_output, True
1806
+ return new_output, saw_tool_result
1807
+
1808
+ def _emit_default_tool_call_event(
1809
+ self,
1810
+ writer: StreamWriter,
1811
+ tool_name: str,
1812
+ tool_call_id: str,
1813
+ tool_args: dict[str, Any],
1814
+ ) -> None:
1815
+ """Emit a standardized TOOL_CALL event for streaming tools.
1816
+
1817
+ Args:
1818
+ writer: The stream writer to emit events to.
1819
+ tool_name: Name of the tool being called.
1820
+ tool_call_id: Unique identifier for the tool call.
1821
+ tool_args: Arguments passed to the tool.
1822
+ """
1823
+ thread_id = _THREAD_ID_CVAR.get()
1824
+ if thread_id:
1825
+ emitted = self._emitted_tool_calls_by_thread.get(thread_id, set())
1826
+ if tool_call_id in emitted:
1827
+ logger.info(
1828
+ "Skipping fallback tool call event: agent=%s tool=%s call_id=%s",
1829
+ self.name,
1830
+ tool_name,
1831
+ tool_call_id,
1832
+ )
1833
+ return
1834
+
1835
+ tool_call_info = {
1836
+ "tool_calls": [
1837
+ {
1838
+ "id": tool_call_id,
1839
+ "name": tool_name,
1840
+ "args": tool_args,
1841
+ }
1842
+ ],
1843
+ "status": "running",
1844
+ }
1845
+ metadata = {
1846
+ MetadataFieldKeys.KIND: Kind.AGENT_THINKING_STEP,
1847
+ MetadataFieldKeys.STATUS: Status.RUNNING,
1848
+ MetadataFieldKeys.TOOL_INFO: tool_call_info,
1849
+ }
1850
+ activity_info = create_tool_activity_info({"tool_info": tool_call_info})
1851
+ event = {
1852
+ "event_type": A2AStreamEventType.TOOL_CALL,
1853
+ "content": f"Processing with tools: {tool_name}",
1854
+ "metadata": metadata,
1855
+ "tool_info": tool_call_info,
1856
+ "thinking_and_activity_info": activity_info,
1857
+ }
1858
+ self._create_tool_streaming_event(event, writer, tool_name)
1859
+
1860
+ @staticmethod
1861
+ def _extract_output_from_chunk(chunk: dict[str, Any], current_output: Any) -> Any:
1862
+ """Return most recent tool output derived from streaming chunk.
1863
+
1864
+ Args:
1865
+ chunk: The streaming chunk containing tool information.
1866
+ current_output: The current output value to fall back to.
1867
+
1868
+ Returns:
1869
+ The extracted output from the chunk or the current_output if not found.
1870
+ """
1871
+ tool_info = chunk.get("tool_info")
1872
+ if isinstance(tool_info, dict):
1873
+ return tool_info.get("output", current_output)
1874
+ return current_output
1875
+
1876
+ def _finalize_streaming_tool(
1877
+ self,
1878
+ *,
1879
+ writer: StreamWriter,
1880
+ tool_name: str,
1881
+ tool_call_id: str,
1882
+ tool_args: dict[str, Any],
1883
+ final_output: Any,
1884
+ saw_tool_result: bool,
1885
+ start_time: float,
1886
+ ) -> str:
1887
+ """Emit final tool event when needed and return final output as string.
1888
+
1889
+ Args:
1890
+ writer: The stream writer to emit events to.
1891
+ tool_name: Name of the tool being called.
1892
+ tool_call_id: Unique identifier for the tool call.
1893
+ tool_args: Arguments passed to the tool.
1894
+ final_output: The final output from the tool execution.
1895
+ saw_tool_result: Whether a TOOL_RESULT event was observed during streaming.
1896
+ start_time: Timestamp when the tool execution started.
1897
+
1898
+ Returns:
1899
+ The final output as a string.
1900
+ """
1901
+ output_text = final_output
1902
+ if output_text is None:
1903
+ output_text = f"Tool '{tool_name}' completed successfully"
1904
+ if not isinstance(output_text, str):
1905
+ output_text = str(output_text)
1906
+
1907
+ logger.debug(
1908
+ "Streaming tool finalize check: agent=%s tool=%s call_id=%s saw_tool_result=%s",
1909
+ self.name,
1910
+ tool_name,
1911
+ tool_call_id,
1912
+ saw_tool_result,
1913
+ )
1914
+ if not saw_tool_result:
1915
+ logger.debug(
1916
+ "Streaming tool finalize emitting default result: agent=%s tool=%s call_id=%s",
1917
+ self.name,
1918
+ tool_name,
1919
+ tool_call_id,
1920
+ )
1921
+ self._emit_default_tool_result_event(
1922
+ writer=writer,
1923
+ tool_name=tool_name,
1924
+ tool_call_id=tool_call_id,
1925
+ tool_args=tool_args,
1926
+ output_text=output_text,
1927
+ start_time=start_time,
1928
+ )
1929
+
1930
+ return output_text
1931
+
1932
+ def _emit_default_tool_result_event(
1933
+ self,
1934
+ *,
1935
+ writer: StreamWriter,
1936
+ tool_name: str,
1937
+ tool_call_id: str,
1938
+ tool_args: dict[str, Any],
1939
+ output_text: str,
1940
+ start_time: float,
1941
+ ) -> None:
1942
+ """Emit a standardized TOOL_RESULT event for streaming tools.
1943
+
1944
+ Args:
1945
+ writer: The stream writer to emit events to.
1946
+ tool_name: Name of the tool that was executed.
1947
+ tool_call_id: Unique identifier for the tool call.
1948
+ tool_args: Arguments passed to the tool.
1949
+ output_text: The output text from the tool execution.
1950
+ start_time: Timestamp when the tool execution started.
1951
+ """
1952
+ execution_time = time.time() - start_time
1953
+ tool_result_info = {
1954
+ "name": tool_name,
1955
+ "args": tool_args,
1956
+ "output": output_text,
1957
+ "execution_time": execution_time,
1958
+ }
1959
+ metadata = {
1960
+ MetadataFieldKeys.KIND: Kind.AGENT_THINKING_STEP,
1961
+ MetadataFieldKeys.STATUS: Status.FINISHED,
1962
+ MetadataFieldKeys.TOOL_INFO: tool_result_info,
1963
+ }
1964
+ activity_info = create_tool_activity_info({"tool_info": tool_result_info})
1965
+ event = {
1966
+ "event_type": A2AStreamEventType.TOOL_RESULT,
1967
+ "content": output_text,
1968
+ "metadata": metadata,
1969
+ "tool_info": tool_result_info,
1970
+ "thinking_and_activity_info": activity_info,
1971
+ }
1972
+ self._create_tool_streaming_event(event, writer, tool_name)
1973
+
1974
+ def _emit_tool_error_event(self, writer: StreamWriter, tool_name: str, error_msg: str) -> None:
1975
+ """Emit a tool error event to the stream.
1976
+
1977
+ Args:
1978
+ writer: Stream writer to emit events.
1979
+ tool_name: Name of the tool that encountered an error.
1980
+ error_msg: The error message.
1981
+ """
1982
+ a2a_event = self._create_a2a_event(
1983
+ event_type=A2AStreamEventType.ERROR,
1984
+ content=f"Error in {tool_name}: {error_msg}",
1985
+ tool_info={
1986
+ "name": tool_name,
1987
+ "error": error_msg,
1988
+ },
1989
+ )
1990
+ writer(a2a_event)
1991
+
1992
+ async def _execute_abefore_model_hook(self, state: dict[str, Any]) -> None:
1993
+ """Asynchronously execute abefore_model middleware hook and update state.
1994
+
1995
+ Args:
1996
+ state: Current agent state to potentially update.
1997
+ """
1998
+ if self._middleware_manager:
1999
+ try:
2000
+ before_updates = await self._middleware_manager.abefore_model(state)
2001
+ if before_updates:
2002
+ state.update(before_updates)
2003
+ except Exception as e:
2004
+ # Lazy import to support optional guardrails dependency
2005
+ from aip_agents.guardrails.exceptions import GuardrailViolationError
2006
+
2007
+ if isinstance(e, GuardrailViolationError):
2008
+ # Re-raise guardrail violations to be caught by the agent node
2009
+ raise
2010
+ logger.error(f"Agent '{self.name}': Middleware abefore_model hook failed: {e}")
2011
+
2012
+ async def _execute_aafter_model_hook(self, state_updates: dict[str, Any], state: dict[str, Any]) -> None:
2013
+ """Asynchronously execute aafter_model middleware hook.
2014
+
2015
+ Args:
2016
+ state_updates: Updates to be merged into state.
2017
+ state: Current agent state for context.
2018
+ """
2019
+ if self._middleware_manager:
2020
+ try:
2021
+ after_updates = await self._middleware_manager.aafter_model(state)
2022
+ if after_updates:
2023
+ state_updates.update(after_updates)
2024
+ except Exception as e:
2025
+ # Lazy import to support optional guardrails dependency
2026
+ from aip_agents.guardrails.exceptions import GuardrailViolationError
2027
+
2028
+ if isinstance(e, GuardrailViolationError):
2029
+ # Re-raise guardrail violations
2030
+ raise
2031
+ logger.error(f"Agent '{self.name}': Middleware aafter_model hook failed: {e}")
2032
+
2033
+ def _execute_before_model_hook(self, state: dict[str, Any]) -> None:
2034
+ """Execute before_model middleware hook and update state.
2035
+
2036
+ Args:
2037
+ state: Current agent state to potentially update.
2038
+ """
2039
+ if self._middleware_manager:
2040
+ try:
2041
+ before_updates = self._middleware_manager.before_model(state)
2042
+ if before_updates:
2043
+ state.update(before_updates)
2044
+ except Exception as e:
2045
+ # Lazy import to support optional guardrails dependency
2046
+ from aip_agents.guardrails.exceptions import GuardrailViolationError
2047
+
2048
+ if isinstance(e, GuardrailViolationError):
2049
+ # Re-raise guardrail violations to be caught by the agent node
2050
+ raise
2051
+ logger.error(f"Agent '{self.name}': Middleware before_model hook failed: {e}")
2052
+
2053
+ def _execute_modify_model_request_hook(
2054
+ self, messages: list[Any], enhanced_instruction: str, state: dict[str, Any]
2055
+ ) -> tuple[list[Any], str]:
2056
+ """Execute modify_model_request middleware hook.
2057
+
2058
+ Args:
2059
+ messages: Current messages to potentially modify.
2060
+ enhanced_instruction: Current system prompt to potentially modify.
2061
+ state: Current agent state for context.
2062
+
2063
+ Returns:
2064
+ Tuple of (potentially modified messages, potentially modified system prompt).
2065
+ """
2066
+ if not self._middleware_manager:
2067
+ return messages, enhanced_instruction
2068
+
2069
+ try:
2070
+ model_request: ModelRequest = {
2071
+ "messages": messages,
2072
+ "tools": self.resolved_tools or [],
2073
+ "system_prompt": enhanced_instruction,
2074
+ }
2075
+ model_request = self._middleware_manager.modify_model_request(model_request, state)
2076
+
2077
+ modified_messages = model_request.get("messages", messages)
2078
+ modified_prompt = model_request.get("system_prompt", enhanced_instruction)
2079
+
2080
+ return modified_messages, modified_prompt
2081
+ except Exception as e:
2082
+ logger.error(f"Agent '{self.name}': Middleware modify_model_request hook failed: {e}")
2083
+ return messages, enhanced_instruction
2084
+
2085
+ def _execute_after_model_hook(self, state_updates: dict[str, Any], state: dict[str, Any]) -> None:
2086
+ """Execute after_model middleware hook and update state_updates.
2087
+
2088
+ Args:
2089
+ state_updates: Dictionary to update with middleware changes.
2090
+ state: Current agent state for context.
2091
+ """
2092
+ if self._middleware_manager:
2093
+ try:
2094
+ after_updates = self._middleware_manager.after_model(state)
2095
+ if after_updates:
2096
+ state_updates.update(after_updates)
2097
+ except Exception as e:
2098
+ logger.error(f"Agent '{self.name}': Middleware after_model hook failed: {e}")
2099
+
2100
+ async def _handle_lm_invoker_call(
2101
+ self, current_messages: Sequence[BaseMessage], state: dict[str, Any], config: dict[str, Any] | None = None
2102
+ ) -> dict[str, Any]:
2103
+ """Handle LMInvoker model calls with bridge conversion and tool output context.
2104
+
2105
+ Args:
2106
+ current_messages: The current messages in the agent.
2107
+ state: The current state of the agent.
2108
+ config: The configuration for the agent.
2109
+
2110
+ Returns:
2111
+ dict[str, Any]: A dictionary containing the new messages and updated token usage.
2112
+ """
2113
+ # Execute before_model middleware hook
2114
+ await self._execute_abefore_model_hook(state)
2115
+
2116
+ # Build tool output aware instruction
2117
+ enhanced_instruction = self._build_tool_output_aware_instruction(self.instruction, state, config)
2118
+
2119
+ # Execute modify_model_request middleware hook
2120
+ _, enhanced_instruction = self._execute_modify_model_request_hook(
2121
+ list(current_messages), enhanced_instruction, state
2122
+ )
2123
+
2124
+ messages = convert_langchain_messages_to_gllm_messages(list(current_messages), enhanced_instruction)
2125
+
2126
+ effective_event_emitter = state.get("event_emitter") or self.event_emitter
2127
+
2128
+ if self.resolved_tools:
2129
+ self.lm_invoker.set_tools(self.resolved_tools)
2130
+
2131
+ # Debug timing for LLM invocation
2132
+ _t0 = time.perf_counter()
2133
+ logger.info(f"Agent '{self.name}': LLM invoke start (tools={len(self.resolved_tools)})")
2134
+ lm_output = await self.lm_invoker.invoke(messages=messages, event_emitter=effective_event_emitter)
2135
+ _dt = time.perf_counter() - _t0
2136
+ logger.info(f"Agent '{self.name}': LLM invoke finished in {_dt:.3f}s")
2137
+
2138
+ ai_message = convert_lm_output_to_langchain_message(lm_output)
2139
+
2140
+ # Update token usage if available in the message
2141
+ state_updates = {"messages": [ai_message]}
2142
+
2143
+ # Extract and accumulate token usage from the message
2144
+ token_usage_updates = extract_and_update_token_usage_from_ai_message(ai_message)
2145
+ state_updates.update(token_usage_updates)
2146
+
2147
+ # Execute after_model middleware hook
2148
+ await self._execute_aafter_model_hook(state_updates, state)
2149
+
2150
+ return state_updates
2151
+
2152
+ async def _handle_langchain_model_call(
2153
+ self, current_messages: Sequence[BaseMessage], state: dict[str, Any], config: dict[str, Any] | None = None
2154
+ ) -> dict[str, Any]:
2155
+ """Handle LangChain BaseChatModel calls with tool output context.
2156
+
2157
+ Args:
2158
+ current_messages: The current messages in the agent.
2159
+ state: The current state of the agent.
2160
+ config: The configuration for the agent.
2161
+
2162
+ Returns:
2163
+ dict[str, Any]: A dictionary containing the new messages and updated token usage.
2164
+ """
2165
+ # Execute before_model middleware hook
2166
+ await self._execute_abefore_model_hook(state)
2167
+
2168
+ # Build tool output aware instruction
2169
+ enhanced_instruction = self._build_tool_output_aware_instruction(self.instruction, state, config)
2170
+
2171
+ langchain_prompt: list[BaseMessage] = [SystemMessage(content=enhanced_instruction)] + list(current_messages)
2172
+
2173
+ # Execute modify_model_request middleware hook
2174
+ langchain_prompt, enhanced_instruction = self._execute_modify_model_request_hook(
2175
+ langchain_prompt, enhanced_instruction, state
2176
+ )
2177
+
2178
+ # Rebuild prompt if needed (invalid structure or system prompt was modified)
2179
+ if (
2180
+ not langchain_prompt
2181
+ or not isinstance(langchain_prompt[0], SystemMessage)
2182
+ or langchain_prompt[0].content != enhanced_instruction
2183
+ ):
2184
+ langchain_prompt = [SystemMessage(content=enhanced_instruction)] + list(current_messages)
2185
+
2186
+ model_with_tools = self.model.bind_tools(self.resolved_tools) if self.resolved_tools else self.model
2187
+
2188
+ ai_message = await model_with_tools.ainvoke(langchain_prompt, config)
2189
+
2190
+ # Update token usage if available in the message
2191
+ state_updates = {"messages": [ai_message]}
2192
+
2193
+ # Extract and accumulate token usage from the message
2194
+ token_usage_updates = extract_and_update_token_usage_from_ai_message(ai_message)
2195
+ state_updates.update(token_usage_updates)
2196
+
2197
+ # Execute after_model middleware hook
2198
+ await self._execute_aafter_model_hook(state_updates, state)
2199
+
2200
+ return state_updates
2201
+
2202
+ def _add_user_id_memory_tool_config(self, metadata: dict[str, Any], memory_user_id: str) -> None:
2203
+ """Add user ID to memory tool config.
2204
+
2205
+ Args:
2206
+ metadata: The metadata to add the user ID to.
2207
+ memory_user_id: The user ID to add.
2208
+ """
2209
+ try:
2210
+ tool_cfgs = metadata.get(TOOL_CONFIGS_KEY, {})
2211
+ per_tool_config = tool_cfgs.get(MEMORY_SEARCH_TOOL_NAME)
2212
+ if not isinstance(per_tool_config, dict):
2213
+ per_tool_config = {}
2214
+ per_tool_config["user_id"] = memory_user_id
2215
+ tool_cfgs[MEMORY_SEARCH_TOOL_NAME] = per_tool_config
2216
+ metadata[TOOL_CONFIGS_KEY] = tool_cfgs
2217
+ except Exception as e:
2218
+ # Non-fatal; metadata injection is best-effort
2219
+ logger.warning("Failed to add user ID to memory tool config: %s", e)
2220
+
2221
+ def _prepare_graph_input(self, input_data: str | dict[str, Any], **kwargs: Any) -> dict[str, Any]:
2222
+ """Convert user input to graph state format.
2223
+
2224
+ Extracts mixed metadata schema supporting per-tool configuration.
2225
+ Delegation tools are isolated and do not receive parent per-tool metadata.
2226
+ Initializes tool output management for efficient tool result sharing.
2227
+
2228
+ Args:
2229
+ input_data: The user's input (typically a query string).
2230
+ **kwargs: Additional keyword arguments including optional metadata.
2231
+ - thread_id: Thread identifier passed from _create_graph_config.
2232
+
2233
+ Returns:
2234
+ Dictionary representing the initial graph state with messages, metadata, artifacts,
2235
+ and tool output management components.
2236
+ """
2237
+ if isinstance(input_data, str):
2238
+ query = input_data
2239
+ elif isinstance(input_data, dict) and "query" in input_data:
2240
+ query = input_data["query"]
2241
+ else:
2242
+ raise TypeError(f"Unsupported input type for LangGraphReactAgent: {type(input_data)}")
2243
+
2244
+ existing_messages = kwargs.get("messages", []) or []
2245
+ messages: list[BaseMessage] = existing_messages + [HumanMessage(content=query)]
2246
+
2247
+ # Extract metadata for tools and agent context
2248
+ metadata = self._extract_metadata_from_kwargs(**kwargs)
2249
+
2250
+ # If caller specified memory_user_id, inject it as per-tool config for the Mem0 tool
2251
+ memory_user_id: str | None = kwargs.get("memory_user_id")
2252
+ if memory_user_id and self._memory_enabled():
2253
+ self._add_user_id_memory_tool_config(metadata, memory_user_id)
2254
+
2255
+ # thread_id is passed explicitly from the caller after _create_graph_config
2256
+ thread_id = kwargs.get("thread_id")
2257
+
2258
+ # Use the agent's tool output manager (shared or private)
2259
+ step_limit_config = kwargs.get("step_limit_config") or self.step_limit_config
2260
+
2261
+ # Step limit context inheritance (Spec-2)
2262
+ try:
2263
+ inherited_depth = _DELEGATION_DEPTH_CVAR.get()
2264
+ except LookupError:
2265
+ inherited_depth = 0
2266
+
2267
+ try:
2268
+ inherited_chain = list(_DELEGATION_CHAIN_CVAR.get())
2269
+ except LookupError:
2270
+ inherited_chain = []
2271
+
2272
+ try:
2273
+ inherited_budget = _REMAINING_STEP_BUDGET_CVAR.get()
2274
+ except LookupError:
2275
+ inherited_budget = None
2276
+
2277
+ # Set step_limit_config in ContextVar so delegation tools can access it
2278
+ if step_limit_config:
2279
+ _STEP_LIMIT_CONFIG_CVAR.set(step_limit_config)
2280
+
2281
+ graph_input = {
2282
+ "messages": messages,
2283
+ "event_emitter": kwargs.get("event_emitter"),
2284
+ "artifacts": [],
2285
+ "metadata": metadata,
2286
+ "tool_output_manager": self.tool_output_manager,
2287
+ "thread_id": thread_id,
2288
+ # Step limit state initialization
2289
+ "current_step": 0, # Start at step 0
2290
+ "delegation_depth": inherited_depth,
2291
+ "delegation_chain": inherited_chain,
2292
+ "step_limit_config": asdict(step_limit_config) if step_limit_config else None,
2293
+ "remaining_step_budget": inherited_budget,
2294
+ }
2295
+
2296
+ return graph_input
2297
+
2298
+ def _resolve_tool_metadata(self, tool_name: str, metadata: dict[str, Any] | None) -> dict[str, Any]:
2299
+ """Resolve effective metadata for a specific tool given the mixed schema.
2300
+
2301
+ Metadata Resolution Hierarchy (lowest to highest precedence):
2302
+
2303
+ 1. Agent-level flat defaults: Apply to all tools from self.tool_configs
2304
+ - Skips 'tool_configs' key and dict values (per-tool configs)
2305
+
2306
+ 2. Agent-level per-tool defaults: From self.tool_configs[tool_name] or
2307
+ self.tool_configs['tool_configs'][tool_name]
2308
+
2309
+ 3. Request-level global metadata: From metadata kwargs, excluding 'tool_configs' key
2310
+
2311
+ 4. Request-level per-tool metadata: From metadata['tool_configs'][tool_name]
2312
+ - Highest precedence, overrides all previous layers
2313
+
2314
+ Tool names are sanitized for consistent lookup across all layers.
2315
+
2316
+ Args:
2317
+ tool_name: Sanitized runtime tool name (e.g., 'delegate_to_report_generator')
2318
+ metadata: Raw metadata from kwargs (flat dict or mixed schema)
2319
+
2320
+ Returns:
2321
+ Merged metadata for this tool with proper precedence hierarchy applied.
2322
+ """
2323
+ effective_metadata: dict[str, Any] = {}
2324
+
2325
+ # Layer 1: Agent-level defaults (lowest precedence)
2326
+ self._apply_agent_defaults(effective_metadata, tool_name)
2327
+
2328
+ # Layer 2: Request-level global metadata (middle precedence)
2329
+ self._apply_global_metadata(effective_metadata, metadata)
2330
+
2331
+ # Layer 3: Request-level per-tool metadata (highest precedence)
2332
+ self._apply_per_tool_metadata(effective_metadata, tool_name, metadata)
2333
+
2334
+ return effective_metadata
2335
+
2336
+ def _apply_agent_defaults(self, effective_metadata: dict[str, Any], tool_name: str) -> None:
2337
+ """Apply agent-level default configurations to effective metadata.
2338
+
2339
+ This method implements a 3-layer agent configuration hierarchy:
2340
+
2341
+ 1. Flat agent defaults: Apply to ALL tools from self.tool_configs
2342
+ - Processes top-level key-value pairs (excluding TOOL_CONFIGS_KEY)
2343
+ - Skips dictionary values as they are per-tool configurations
2344
+ - Example: {"api_timeout": 30, "retry_count": 3}
2345
+
2346
+ 2. Agent per-tool defaults (direct key mapping): From self.tool_configs[tool_name]
2347
+ - Direct tool name as key in agent configuration
2348
+ - Example: self.tool_configs["search_tool"] = {"max_results": 10}
2349
+
2350
+ 3. Agent per-tool defaults (nested structure): From self.tool_configs[TOOL_CONFIGS_KEY][tool_name]
2351
+ - Tool configurations nested under TOOL_CONFIGS_KEY
2352
+ - Tool names are sanitized for consistent lookup
2353
+ - Example: self.tool_configs["tool_configs"]["search_tool"] = {"max_results": 10}
2354
+
2355
+ Configuration Precedence (later layers override earlier ones):
2356
+ Flat defaults < Direct per-tool < Nested per-tool
2357
+
2358
+ Args:
2359
+ effective_metadata: The metadata dict to update with agent defaults
2360
+ tool_name: The sanitized tool name to apply configurations for
2361
+ """
2362
+ if not isinstance(self.tool_configs, dict):
2363
+ return
2364
+
2365
+ # Flat agent defaults (apply to all tools)
2366
+ for k, v in self.tool_configs.items():
2367
+ if k != TOOL_CONFIGS_KEY and not isinstance(v, dict):
2368
+ effective_metadata[k] = v
2369
+
2370
+ # Agent per-tool defaults (direct key mapping)
2371
+ agent_direct = self.tool_configs.get(tool_name)
2372
+ if isinstance(agent_direct, dict):
2373
+ effective_metadata.update(agent_direct)
2374
+
2375
+ # Agent per-tool defaults (nested under 'tool_configs')
2376
+ agent_nested_map = self.tool_configs.get(TOOL_CONFIGS_KEY)
2377
+ if isinstance(agent_nested_map, dict):
2378
+ sanitized_map = self._sanitize_tool_names_map(agent_nested_map)
2379
+ agent_nested = sanitized_map.get(tool_name)
2380
+ if isinstance(agent_nested, dict):
2381
+ effective_metadata.update(agent_nested)
2382
+
2383
+ def _apply_global_metadata(self, effective_metadata: dict[str, Any], metadata: dict[str, Any] | None) -> None:
2384
+ """Apply request-level global metadata to effective metadata.
2385
+
2386
+ Args:
2387
+ effective_metadata: The metadata dict to update
2388
+ metadata: Raw metadata from request
2389
+ """
2390
+ if not (metadata and isinstance(metadata, dict)):
2391
+ return
2392
+
2393
+ # Extract global metadata (excluding per-tool section)
2394
+ global_metadata = {k: v for k, v in metadata.items() if k != TOOL_CONFIGS_KEY}
2395
+ effective_metadata.update(global_metadata)
2396
+
2397
+ def _apply_per_tool_metadata(
2398
+ self, effective_metadata: dict[str, Any], tool_name: str, metadata: dict[str, Any] | None
2399
+ ) -> None:
2400
+ """Apply request-level per-tool metadata to effective metadata.
2401
+
2402
+ Args:
2403
+ effective_metadata: The metadata dict to update
2404
+ tool_name: The sanitized tool name
2405
+ metadata: Raw metadata from request
2406
+ """
2407
+ if metadata and isinstance(metadata, dict):
2408
+ tools_metadata = metadata.get(TOOL_CONFIGS_KEY, {})
2409
+ if isinstance(tools_metadata, dict):
2410
+ sanitized_tools_map = self._sanitize_tool_names_map(tools_metadata)
2411
+ tool_specific = sanitized_tools_map.get(tool_name, {})
2412
+ if isinstance(tool_specific, dict):
2413
+ effective_metadata.update(tool_specific)
2414
+
2415
+ def _sanitize_tool_names_map(self, tools_map: dict[str, Any]) -> dict[str, Any]:
2416
+ """Sanitize tool names in a mapping for consistent lookup.
2417
+
2418
+ Args:
2419
+ tools_map: Dictionary with potentially unsanitized tool names as keys
2420
+
2421
+ Returns:
2422
+ Dictionary with sanitized tool names as keys
2423
+ """
2424
+ sanitized_map = {}
2425
+ for user_key, tool_meta in tools_map.items():
2426
+ sanitized_key = self.name_preprocessor.sanitize_tool_name(user_key)
2427
+ sanitized_map[sanitized_key] = tool_meta
2428
+ return sanitized_map
2429
+
2430
+ def _create_tool_config(
2431
+ self, base_config: dict[str, Any] | None, state: dict[str, Any], tool_name: str | None = None
2432
+ ) -> dict[str, Any]:
2433
+ """Create enriched tool configuration with metadata and context.
2434
+
2435
+ Args:
2436
+ base_config: The base configuration passed to the tool node.
2437
+ state: The current agent state containing metadata and other context.
2438
+ tool_name: Optional tool name for per-tool metadata resolution.
2439
+
2440
+ Returns:
2441
+ dict[str, Any]: Enriched configuration for tool execution.
2442
+ """
2443
+ tool_config = base_config.copy() if base_config else {}
2444
+
2445
+ state_metadata = state.get("metadata")
2446
+ if tool_name:
2447
+ effective_metadata = self._resolve_tool_metadata(tool_name, state_metadata)
2448
+ else:
2449
+ effective_metadata = state_metadata if isinstance(state_metadata, dict) else {}
2450
+
2451
+ if effective_metadata:
2452
+ if "metadata" not in tool_config:
2453
+ tool_config["metadata"] = effective_metadata
2454
+ else:
2455
+ tool_config["metadata"].update(effective_metadata)
2456
+ logger.debug(f"Agent '{self.name}': Passing metadata to tool '{tool_name}': {effective_metadata}")
2457
+
2458
+ return tool_config
2459
+
2460
+ def _extract_storable_data(self, tool_output: Any) -> Any:
2461
+ """Extract storable data from tool output for the tool output management system.
2462
+
2463
+ This method determines what part of a tool's output should be stored for later
2464
+ reference by other tools. It handles different output formats and extracts the
2465
+ most relevant data for storage.
2466
+
2467
+ The extraction logic varies by type:
2468
+ - Command objects: Extracts the 'result' field from the update dict, or the entire update dict
2469
+ - String objects: Returns the string as-is
2470
+ - Dict objects: Returns the 'result' key if present, otherwise the entire dict
2471
+ - Other types: Converts to string representation
2472
+
2473
+ This method is used in the tool output management system to automatically store
2474
+ outputs from tools that have `store_final_output=True` set. The extracted data can
2475
+ then be referenced by other tools using the `$tool_output.<call_id>` syntax.
2476
+
2477
+ Example:
2478
+ For a Command object with update = {"result": "success", "data": [1, 2, 3]},
2479
+ this method would return "success".
2480
+
2481
+ For a dict = {"result": "completed", "status": "ok"},
2482
+ this method would return "completed".
2483
+
2484
+ For a dict = {"status": "ok", "data": [1, 2, 3]} (no "result" key),
2485
+ this method would return the entire dict.
2486
+
2487
+ Args:
2488
+ tool_output: The raw output from a tool execution. Can be any type including
2489
+ Command, str, dict, or other objects.
2490
+
2491
+ Returns:
2492
+ The data that should be stored in the tool output management system.
2493
+ The return type depends on the input type:
2494
+ - Command -> dict or the value of update.get("result")
2495
+ - str -> str (unchanged)
2496
+ - dict -> dict (either the value of .get("result") or the original dict)
2497
+ - other -> str (string representation of the object)
2498
+ """
2499
+ if isinstance(tool_output, Command):
2500
+ update = getattr(tool_output, "update", {}) or {}
2501
+ return update.get("result", update)
2502
+ elif isinstance(tool_output, str):
2503
+ return tool_output
2504
+ elif isinstance(tool_output, dict):
2505
+ return tool_output.get("result", tool_output)
2506
+ else:
2507
+ return str(tool_output)
2508
+
2509
+ def _build_tool_output_aware_instruction(
2510
+ self, base_instruction: str, state: dict[str, Any], config: dict[str, Any] | None = None
2511
+ ) -> str:
2512
+ """Build LLM instruction that includes context about available tool outputs.
2513
+
2514
+ This method enhances the base instruction with information about previously
2515
+ stored tool outputs, allowing the LLM to make informed decisions about
2516
+ which outputs to reference in subsequent tool calls.
2517
+
2518
+ Args:
2519
+ base_instruction: The original system instruction for the agent.
2520
+ state: Current agent state containing the tool output manager.
2521
+ config: Optional configuration containing thread_id information.
2522
+
2523
+ Returns:
2524
+ Enhanced instruction string that includes tool output context.
2525
+ """
2526
+ manager = state.get(TOOL_OUTPUT_MANAGER_KEY)
2527
+
2528
+ if not manager or not self.tool_output_manager:
2529
+ return base_instruction
2530
+
2531
+ thread_id = self._extract_thread_id_from_config(config)
2532
+
2533
+ if not manager.has_outputs(thread_id):
2534
+ return base_instruction
2535
+ outputs_summary = manager.generate_summary(max_entries=10, thread_id=thread_id)
2536
+
2537
+ # Build enhanced instruction
2538
+ prompt = dedent(f"""
2539
+ {base_instruction}
2540
+
2541
+ <TOOL_OUTPUT_REFERENCES>
2542
+
2543
+ # Goal
2544
+ - Use the most relevant stored tool output via "$tool_output.<call_id>" to avoid copying large data.
2545
+
2546
+ # Usage
2547
+ - Syntax: "$tool_output.<call_id>" in any tool argument; returns the full stored output.
2548
+ - IDs: Use only those listed below; do not invent or modify.
2549
+ - Selection: Pick the most relevant (usually most recent).
2550
+ - Don’ts: Don’t paste raw output or expand references.
2551
+ - Errors: Invalid/missing IDs fail—ask for the correct call_id or run the prerequisite tool.
2552
+
2553
+ # Example
2554
+ - tool_name.run(tool_argument="$tool_output.abc123")
2555
+
2556
+ # User Output Schema
2557
+ - "reference": "$tool_output.<call_id>", "tool": "<tool_name>", "agent": "<agent_name>", "data_preview": "<truncated preview>"
2558
+
2559
+ Available Outputs
2560
+ {outputs_summary}
2561
+ </TOOL_OUTPUT_REFERENCES>
2562
+ """) # noqa: E501
2563
+ return prompt
2564
+
2565
+ def _cleanup_thread_context(self, current_thread_id: str | None, token: Any) -> None:
2566
+ """Extend base cleanup to dispose cached PII handlers.
2567
+
2568
+ Args:
2569
+ current_thread_id: ID of the thread whose context is being cleaned up.
2570
+ token: Cancellation or execution token passed from the caller.
2571
+
2572
+ Returns:
2573
+ None. This method performs cleanup side effects only.
2574
+ """
2575
+ super()._cleanup_thread_context(current_thread_id, token)
2576
+ if current_thread_id:
2577
+ self._pii_handlers_by_thread.pop(current_thread_id, None)
2578
+
2579
+ def _format_graph_output(self, final_state_result: dict[str, Any]) -> Any:
2580
+ """Convert final graph state to user-friendly output.
2581
+
2582
+ Args:
2583
+ final_state_result: The final state from graph execution.
2584
+
2585
+ Returns:
2586
+ Formatted output dictionary.
2587
+ """
2588
+ return self._extract_output_from_final_state(final_state_result)
2589
+
2590
+
2591
+ class LangGraphAgent(LangGraphReactAgent):
2592
+ """Alias for LangGraphReactAgent."""
2593
+
2594
+
2595
+ class LangChainAgent(LangGraphReactAgent):
2596
+ """Alias for LangGraphReactAgent."""