aip-agents-binary 0.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aip-agents-binary might be problematic. Click here for more details.

Files changed (612) hide show
  1. aip_agents/__init__.py +65 -0
  2. aip_agents/__init__.pyi +19 -0
  3. aip_agents/a2a/__init__.py +19 -0
  4. aip_agents/a2a/__init__.pyi +3 -0
  5. aip_agents/a2a/server/__init__.py +10 -0
  6. aip_agents/a2a/server/__init__.pyi +4 -0
  7. aip_agents/a2a/server/base_executor.py +1086 -0
  8. aip_agents/a2a/server/base_executor.pyi +73 -0
  9. aip_agents/a2a/server/google_adk_executor.py +198 -0
  10. aip_agents/a2a/server/google_adk_executor.pyi +51 -0
  11. aip_agents/a2a/server/langflow_executor.py +180 -0
  12. aip_agents/a2a/server/langflow_executor.pyi +43 -0
  13. aip_agents/a2a/server/langgraph_executor.py +270 -0
  14. aip_agents/a2a/server/langgraph_executor.pyi +47 -0
  15. aip_agents/a2a/types.py +232 -0
  16. aip_agents/a2a/types.pyi +132 -0
  17. aip_agents/agent/__init__.py +27 -0
  18. aip_agents/agent/__init__.pyi +9 -0
  19. aip_agents/agent/base_agent.py +970 -0
  20. aip_agents/agent/base_agent.pyi +221 -0
  21. aip_agents/agent/base_langgraph_agent.py +3037 -0
  22. aip_agents/agent/base_langgraph_agent.pyi +233 -0
  23. aip_agents/agent/google_adk_agent.py +926 -0
  24. aip_agents/agent/google_adk_agent.pyi +141 -0
  25. aip_agents/agent/google_adk_constants.py +6 -0
  26. aip_agents/agent/google_adk_constants.pyi +3 -0
  27. aip_agents/agent/hitl/__init__.py +24 -0
  28. aip_agents/agent/hitl/__init__.pyi +6 -0
  29. aip_agents/agent/hitl/config.py +28 -0
  30. aip_agents/agent/hitl/config.pyi +15 -0
  31. aip_agents/agent/hitl/langgraph_hitl_mixin.py +515 -0
  32. aip_agents/agent/hitl/langgraph_hitl_mixin.pyi +42 -0
  33. aip_agents/agent/hitl/manager.py +532 -0
  34. aip_agents/agent/hitl/manager.pyi +200 -0
  35. aip_agents/agent/hitl/models.py +18 -0
  36. aip_agents/agent/hitl/models.pyi +3 -0
  37. aip_agents/agent/hitl/prompt/__init__.py +9 -0
  38. aip_agents/agent/hitl/prompt/__init__.pyi +4 -0
  39. aip_agents/agent/hitl/prompt/base.py +42 -0
  40. aip_agents/agent/hitl/prompt/base.pyi +24 -0
  41. aip_agents/agent/hitl/prompt/deferred.py +73 -0
  42. aip_agents/agent/hitl/prompt/deferred.pyi +30 -0
  43. aip_agents/agent/hitl/registry.py +149 -0
  44. aip_agents/agent/hitl/registry.pyi +101 -0
  45. aip_agents/agent/interface.py +138 -0
  46. aip_agents/agent/interface.pyi +81 -0
  47. aip_agents/agent/interfaces.py +65 -0
  48. aip_agents/agent/interfaces.pyi +44 -0
  49. aip_agents/agent/langflow_agent.py +464 -0
  50. aip_agents/agent/langflow_agent.pyi +133 -0
  51. aip_agents/agent/langgraph_memory_enhancer_agent.py +767 -0
  52. aip_agents/agent/langgraph_memory_enhancer_agent.pyi +50 -0
  53. aip_agents/agent/langgraph_react_agent.py +2856 -0
  54. aip_agents/agent/langgraph_react_agent.pyi +170 -0
  55. aip_agents/agent/system_instruction_context.py +34 -0
  56. aip_agents/agent/system_instruction_context.pyi +13 -0
  57. aip_agents/clients/__init__.py +10 -0
  58. aip_agents/clients/__init__.pyi +4 -0
  59. aip_agents/clients/langflow/__init__.py +10 -0
  60. aip_agents/clients/langflow/__init__.pyi +4 -0
  61. aip_agents/clients/langflow/client.py +477 -0
  62. aip_agents/clients/langflow/client.pyi +140 -0
  63. aip_agents/clients/langflow/types.py +18 -0
  64. aip_agents/clients/langflow/types.pyi +7 -0
  65. aip_agents/constants.py +23 -0
  66. aip_agents/constants.pyi +7 -0
  67. aip_agents/credentials/manager.py +132 -0
  68. aip_agents/examples/__init__.py +5 -0
  69. aip_agents/examples/__init__.pyi +0 -0
  70. aip_agents/examples/compare_streaming_client.py +783 -0
  71. aip_agents/examples/compare_streaming_client.pyi +48 -0
  72. aip_agents/examples/compare_streaming_server.py +142 -0
  73. aip_agents/examples/compare_streaming_server.pyi +18 -0
  74. aip_agents/examples/hello_world_a2a_google_adk_client.py +49 -0
  75. aip_agents/examples/hello_world_a2a_google_adk_client.pyi +9 -0
  76. aip_agents/examples/hello_world_a2a_google_adk_client_agent.py +48 -0
  77. aip_agents/examples/hello_world_a2a_google_adk_client_agent.pyi +9 -0
  78. aip_agents/examples/hello_world_a2a_google_adk_client_streaming.py +60 -0
  79. aip_agents/examples/hello_world_a2a_google_adk_client_streaming.pyi +9 -0
  80. aip_agents/examples/hello_world_a2a_google_adk_server.py +79 -0
  81. aip_agents/examples/hello_world_a2a_google_adk_server.pyi +15 -0
  82. aip_agents/examples/hello_world_a2a_langchain_client.py +39 -0
  83. aip_agents/examples/hello_world_a2a_langchain_client.pyi +5 -0
  84. aip_agents/examples/hello_world_a2a_langchain_client_agent.py +39 -0
  85. aip_agents/examples/hello_world_a2a_langchain_client_agent.pyi +5 -0
  86. aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.py +37 -0
  87. aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.pyi +5 -0
  88. aip_agents/examples/hello_world_a2a_langchain_client_streaming.py +41 -0
  89. aip_agents/examples/hello_world_a2a_langchain_client_streaming.pyi +5 -0
  90. aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.py +60 -0
  91. aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.pyi +5 -0
  92. aip_agents/examples/hello_world_a2a_langchain_reference_server.py +105 -0
  93. aip_agents/examples/hello_world_a2a_langchain_reference_server.pyi +15 -0
  94. aip_agents/examples/hello_world_a2a_langchain_server.py +79 -0
  95. aip_agents/examples/hello_world_a2a_langchain_server.pyi +15 -0
  96. aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.py +78 -0
  97. aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.pyi +15 -0
  98. aip_agents/examples/hello_world_a2a_langflow_client.py +83 -0
  99. aip_agents/examples/hello_world_a2a_langflow_client.pyi +9 -0
  100. aip_agents/examples/hello_world_a2a_langflow_server.py +82 -0
  101. aip_agents/examples/hello_world_a2a_langflow_server.pyi +14 -0
  102. aip_agents/examples/hello_world_a2a_langgraph_artifact_client.py +73 -0
  103. aip_agents/examples/hello_world_a2a_langgraph_artifact_client.pyi +5 -0
  104. aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.py +76 -0
  105. aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.pyi +5 -0
  106. aip_agents/examples/hello_world_a2a_langgraph_artifact_server.py +92 -0
  107. aip_agents/examples/hello_world_a2a_langgraph_artifact_server.pyi +16 -0
  108. aip_agents/examples/hello_world_a2a_langgraph_client.py +54 -0
  109. aip_agents/examples/hello_world_a2a_langgraph_client.pyi +9 -0
  110. aip_agents/examples/hello_world_a2a_langgraph_client_agent.py +54 -0
  111. aip_agents/examples/hello_world_a2a_langgraph_client_agent.pyi +9 -0
  112. aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.py +32 -0
  113. aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.pyi +2 -0
  114. aip_agents/examples/hello_world_a2a_langgraph_client_streaming.py +50 -0
  115. aip_agents/examples/hello_world_a2a_langgraph_client_streaming.pyi +9 -0
  116. aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.py +44 -0
  117. aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.pyi +5 -0
  118. aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.py +92 -0
  119. aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.pyi +5 -0
  120. aip_agents/examples/hello_world_a2a_langgraph_server.py +84 -0
  121. aip_agents/examples/hello_world_a2a_langgraph_server.pyi +14 -0
  122. aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.py +79 -0
  123. aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.pyi +15 -0
  124. aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.py +132 -0
  125. aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.pyi +15 -0
  126. aip_agents/examples/hello_world_a2a_mcp_langgraph.py +196 -0
  127. aip_agents/examples/hello_world_a2a_mcp_langgraph.pyi +48 -0
  128. aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.py +244 -0
  129. aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.pyi +48 -0
  130. aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.py +251 -0
  131. aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.pyi +45 -0
  132. aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.py +57 -0
  133. aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.pyi +5 -0
  134. aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.py +80 -0
  135. aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.pyi +15 -0
  136. aip_agents/examples/hello_world_google_adk.py +41 -0
  137. aip_agents/examples/hello_world_google_adk.pyi +5 -0
  138. aip_agents/examples/hello_world_google_adk_mcp_http.py +34 -0
  139. aip_agents/examples/hello_world_google_adk_mcp_http.pyi +5 -0
  140. aip_agents/examples/hello_world_google_adk_mcp_http_stream.py +40 -0
  141. aip_agents/examples/hello_world_google_adk_mcp_http_stream.pyi +5 -0
  142. aip_agents/examples/hello_world_google_adk_mcp_sse.py +44 -0
  143. aip_agents/examples/hello_world_google_adk_mcp_sse.pyi +5 -0
  144. aip_agents/examples/hello_world_google_adk_mcp_sse_stream.py +48 -0
  145. aip_agents/examples/hello_world_google_adk_mcp_sse_stream.pyi +5 -0
  146. aip_agents/examples/hello_world_google_adk_mcp_stdio.py +44 -0
  147. aip_agents/examples/hello_world_google_adk_mcp_stdio.pyi +5 -0
  148. aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.py +48 -0
  149. aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.pyi +5 -0
  150. aip_agents/examples/hello_world_google_adk_stream.py +44 -0
  151. aip_agents/examples/hello_world_google_adk_stream.pyi +5 -0
  152. aip_agents/examples/hello_world_langchain.py +28 -0
  153. aip_agents/examples/hello_world_langchain.pyi +5 -0
  154. aip_agents/examples/hello_world_langchain_lm_invoker.py +15 -0
  155. aip_agents/examples/hello_world_langchain_lm_invoker.pyi +2 -0
  156. aip_agents/examples/hello_world_langchain_mcp_http.py +34 -0
  157. aip_agents/examples/hello_world_langchain_mcp_http.pyi +5 -0
  158. aip_agents/examples/hello_world_langchain_mcp_http_interactive.py +130 -0
  159. aip_agents/examples/hello_world_langchain_mcp_http_interactive.pyi +16 -0
  160. aip_agents/examples/hello_world_langchain_mcp_http_stream.py +42 -0
  161. aip_agents/examples/hello_world_langchain_mcp_http_stream.pyi +5 -0
  162. aip_agents/examples/hello_world_langchain_mcp_multi_server.py +155 -0
  163. aip_agents/examples/hello_world_langchain_mcp_multi_server.pyi +18 -0
  164. aip_agents/examples/hello_world_langchain_mcp_sse.py +34 -0
  165. aip_agents/examples/hello_world_langchain_mcp_sse.pyi +5 -0
  166. aip_agents/examples/hello_world_langchain_mcp_sse_stream.py +40 -0
  167. aip_agents/examples/hello_world_langchain_mcp_sse_stream.pyi +5 -0
  168. aip_agents/examples/hello_world_langchain_mcp_stdio.py +30 -0
  169. aip_agents/examples/hello_world_langchain_mcp_stdio.pyi +5 -0
  170. aip_agents/examples/hello_world_langchain_mcp_stdio_stream.py +41 -0
  171. aip_agents/examples/hello_world_langchain_mcp_stdio_stream.pyi +5 -0
  172. aip_agents/examples/hello_world_langchain_stream.py +36 -0
  173. aip_agents/examples/hello_world_langchain_stream.pyi +5 -0
  174. aip_agents/examples/hello_world_langchain_stream_lm_invoker.py +39 -0
  175. aip_agents/examples/hello_world_langchain_stream_lm_invoker.pyi +5 -0
  176. aip_agents/examples/hello_world_langflow_agent.py +163 -0
  177. aip_agents/examples/hello_world_langflow_agent.pyi +35 -0
  178. aip_agents/examples/hello_world_langgraph.py +39 -0
  179. aip_agents/examples/hello_world_langgraph.pyi +5 -0
  180. aip_agents/examples/hello_world_langgraph_gl_connector_twitter.py +44 -0
  181. aip_agents/examples/hello_world_langgraph_gl_connector_twitter.pyi +5 -0
  182. aip_agents/examples/hello_world_langgraph_mcp_http.py +31 -0
  183. aip_agents/examples/hello_world_langgraph_mcp_http.pyi +5 -0
  184. aip_agents/examples/hello_world_langgraph_mcp_http_stream.py +34 -0
  185. aip_agents/examples/hello_world_langgraph_mcp_http_stream.pyi +5 -0
  186. aip_agents/examples/hello_world_langgraph_mcp_sse.py +35 -0
  187. aip_agents/examples/hello_world_langgraph_mcp_sse.pyi +5 -0
  188. aip_agents/examples/hello_world_langgraph_mcp_sse_stream.py +50 -0
  189. aip_agents/examples/hello_world_langgraph_mcp_sse_stream.pyi +5 -0
  190. aip_agents/examples/hello_world_langgraph_mcp_stdio.py +35 -0
  191. aip_agents/examples/hello_world_langgraph_mcp_stdio.pyi +5 -0
  192. aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.py +50 -0
  193. aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.pyi +5 -0
  194. aip_agents/examples/hello_world_langgraph_stream.py +43 -0
  195. aip_agents/examples/hello_world_langgraph_stream.pyi +5 -0
  196. aip_agents/examples/hello_world_langgraph_stream_lm_invoker.py +37 -0
  197. aip_agents/examples/hello_world_langgraph_stream_lm_invoker.pyi +5 -0
  198. aip_agents/examples/hello_world_model_switch_cli.py +210 -0
  199. aip_agents/examples/hello_world_model_switch_cli.pyi +30 -0
  200. aip_agents/examples/hello_world_multi_agent_adk.py +75 -0
  201. aip_agents/examples/hello_world_multi_agent_adk.pyi +6 -0
  202. aip_agents/examples/hello_world_multi_agent_langchain.py +54 -0
  203. aip_agents/examples/hello_world_multi_agent_langchain.pyi +5 -0
  204. aip_agents/examples/hello_world_multi_agent_langgraph.py +66 -0
  205. aip_agents/examples/hello_world_multi_agent_langgraph.pyi +5 -0
  206. aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.py +69 -0
  207. aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.pyi +5 -0
  208. aip_agents/examples/hello_world_pii_logger.py +21 -0
  209. aip_agents/examples/hello_world_pii_logger.pyi +5 -0
  210. aip_agents/examples/hello_world_ptc.py +49 -0
  211. aip_agents/examples/hello_world_ptc.pyi +5 -0
  212. aip_agents/examples/hello_world_sentry.py +133 -0
  213. aip_agents/examples/hello_world_sentry.pyi +21 -0
  214. aip_agents/examples/hello_world_step_limits.py +273 -0
  215. aip_agents/examples/hello_world_step_limits.pyi +17 -0
  216. aip_agents/examples/hello_world_stock_a2a_server.py +103 -0
  217. aip_agents/examples/hello_world_stock_a2a_server.pyi +17 -0
  218. aip_agents/examples/hello_world_tool_output_client.py +55 -0
  219. aip_agents/examples/hello_world_tool_output_client.pyi +5 -0
  220. aip_agents/examples/hello_world_tool_output_server.py +114 -0
  221. aip_agents/examples/hello_world_tool_output_server.pyi +19 -0
  222. aip_agents/examples/hitl_demo.py +724 -0
  223. aip_agents/examples/hitl_demo.pyi +67 -0
  224. aip_agents/examples/mcp_configs/configs.py +63 -0
  225. aip_agents/examples/mcp_servers/common.py +76 -0
  226. aip_agents/examples/mcp_servers/mcp_name.py +29 -0
  227. aip_agents/examples/mcp_servers/mcp_server_http.py +19 -0
  228. aip_agents/examples/mcp_servers/mcp_server_sse.py +19 -0
  229. aip_agents/examples/mcp_servers/mcp_server_stdio.py +19 -0
  230. aip_agents/examples/mcp_servers/mcp_time.py +10 -0
  231. aip_agents/examples/pii_demo_langgraph_client.py +69 -0
  232. aip_agents/examples/pii_demo_langgraph_client.pyi +5 -0
  233. aip_agents/examples/pii_demo_langgraph_server.py +126 -0
  234. aip_agents/examples/pii_demo_langgraph_server.pyi +20 -0
  235. aip_agents/examples/pii_demo_multi_agent_client.py +80 -0
  236. aip_agents/examples/pii_demo_multi_agent_client.pyi +5 -0
  237. aip_agents/examples/pii_demo_multi_agent_server.py +247 -0
  238. aip_agents/examples/pii_demo_multi_agent_server.pyi +40 -0
  239. aip_agents/examples/todolist_planning_a2a_langchain_client.py +70 -0
  240. aip_agents/examples/todolist_planning_a2a_langchain_client.pyi +5 -0
  241. aip_agents/examples/todolist_planning_a2a_langgraph_server.py +88 -0
  242. aip_agents/examples/todolist_planning_a2a_langgraph_server.pyi +19 -0
  243. aip_agents/examples/tools/__init__.py +27 -0
  244. aip_agents/examples/tools/__init__.pyi +9 -0
  245. aip_agents/examples/tools/adk_arithmetic_tools.py +36 -0
  246. aip_agents/examples/tools/adk_arithmetic_tools.pyi +24 -0
  247. aip_agents/examples/tools/adk_weather_tool.py +60 -0
  248. aip_agents/examples/tools/adk_weather_tool.pyi +18 -0
  249. aip_agents/examples/tools/data_generator_tool.py +103 -0
  250. aip_agents/examples/tools/data_generator_tool.pyi +15 -0
  251. aip_agents/examples/tools/data_visualization_tool.py +312 -0
  252. aip_agents/examples/tools/data_visualization_tool.pyi +19 -0
  253. aip_agents/examples/tools/image_artifact_tool.py +136 -0
  254. aip_agents/examples/tools/image_artifact_tool.pyi +26 -0
  255. aip_agents/examples/tools/langchain_arithmetic_tools.py +26 -0
  256. aip_agents/examples/tools/langchain_arithmetic_tools.pyi +17 -0
  257. aip_agents/examples/tools/langchain_currency_exchange_tool.py +88 -0
  258. aip_agents/examples/tools/langchain_currency_exchange_tool.pyi +20 -0
  259. aip_agents/examples/tools/langchain_graph_artifact_tool.py +172 -0
  260. aip_agents/examples/tools/langchain_graph_artifact_tool.pyi +25 -0
  261. aip_agents/examples/tools/langchain_weather_tool.py +48 -0
  262. aip_agents/examples/tools/langchain_weather_tool.pyi +19 -0
  263. aip_agents/examples/tools/langgraph_streaming_tool.py +130 -0
  264. aip_agents/examples/tools/langgraph_streaming_tool.pyi +43 -0
  265. aip_agents/examples/tools/mock_retrieval_tool.py +56 -0
  266. aip_agents/examples/tools/mock_retrieval_tool.pyi +13 -0
  267. aip_agents/examples/tools/pii_demo_tools.py +189 -0
  268. aip_agents/examples/tools/pii_demo_tools.pyi +54 -0
  269. aip_agents/examples/tools/random_chart_tool.py +142 -0
  270. aip_agents/examples/tools/random_chart_tool.pyi +20 -0
  271. aip_agents/examples/tools/serper_tool.py +202 -0
  272. aip_agents/examples/tools/serper_tool.pyi +16 -0
  273. aip_agents/examples/tools/stock_tools.py +82 -0
  274. aip_agents/examples/tools/stock_tools.pyi +36 -0
  275. aip_agents/examples/tools/table_generator_tool.py +167 -0
  276. aip_agents/examples/tools/table_generator_tool.pyi +22 -0
  277. aip_agents/examples/tools/time_tool.py +82 -0
  278. aip_agents/examples/tools/time_tool.pyi +15 -0
  279. aip_agents/examples/tools/weather_forecast_tool.py +38 -0
  280. aip_agents/examples/tools/weather_forecast_tool.pyi +14 -0
  281. aip_agents/executor/agent_executor.py +473 -0
  282. aip_agents/executor/base.py +48 -0
  283. aip_agents/guardrails/__init__.py +83 -0
  284. aip_agents/guardrails/__init__.pyi +6 -0
  285. aip_agents/guardrails/engines/__init__.py +69 -0
  286. aip_agents/guardrails/engines/__init__.pyi +4 -0
  287. aip_agents/guardrails/engines/base.py +90 -0
  288. aip_agents/guardrails/engines/base.pyi +61 -0
  289. aip_agents/guardrails/engines/nemo.py +101 -0
  290. aip_agents/guardrails/engines/nemo.pyi +46 -0
  291. aip_agents/guardrails/engines/phrase_matcher.py +113 -0
  292. aip_agents/guardrails/engines/phrase_matcher.pyi +48 -0
  293. aip_agents/guardrails/exceptions.py +39 -0
  294. aip_agents/guardrails/exceptions.pyi +23 -0
  295. aip_agents/guardrails/manager.py +163 -0
  296. aip_agents/guardrails/manager.pyi +42 -0
  297. aip_agents/guardrails/middleware.py +199 -0
  298. aip_agents/guardrails/middleware.pyi +87 -0
  299. aip_agents/guardrails/schemas.py +63 -0
  300. aip_agents/guardrails/schemas.pyi +43 -0
  301. aip_agents/guardrails/utils.py +45 -0
  302. aip_agents/guardrails/utils.pyi +19 -0
  303. aip_agents/mcp/__init__.py +1 -0
  304. aip_agents/mcp/__init__.pyi +0 -0
  305. aip_agents/mcp/client/__init__.py +14 -0
  306. aip_agents/mcp/client/__init__.pyi +5 -0
  307. aip_agents/mcp/client/base_mcp_client.py +369 -0
  308. aip_agents/mcp/client/base_mcp_client.pyi +148 -0
  309. aip_agents/mcp/client/connection_manager.py +228 -0
  310. aip_agents/mcp/client/connection_manager.pyi +51 -0
  311. aip_agents/mcp/client/google_adk/__init__.py +11 -0
  312. aip_agents/mcp/client/google_adk/__init__.pyi +3 -0
  313. aip_agents/mcp/client/google_adk/client.py +381 -0
  314. aip_agents/mcp/client/google_adk/client.pyi +75 -0
  315. aip_agents/mcp/client/langchain/__init__.py +11 -0
  316. aip_agents/mcp/client/langchain/__init__.pyi +3 -0
  317. aip_agents/mcp/client/langchain/client.py +265 -0
  318. aip_agents/mcp/client/langchain/client.pyi +48 -0
  319. aip_agents/mcp/client/persistent_session.py +612 -0
  320. aip_agents/mcp/client/persistent_session.pyi +122 -0
  321. aip_agents/mcp/client/session_pool.py +351 -0
  322. aip_agents/mcp/client/session_pool.pyi +101 -0
  323. aip_agents/mcp/client/transports.py +263 -0
  324. aip_agents/mcp/client/transports.pyi +132 -0
  325. aip_agents/mcp/utils/__init__.py +7 -0
  326. aip_agents/mcp/utils/__init__.pyi +0 -0
  327. aip_agents/mcp/utils/config_validator.py +139 -0
  328. aip_agents/mcp/utils/config_validator.pyi +82 -0
  329. aip_agents/memory/__init__.py +14 -0
  330. aip_agents/memory/__init__.pyi +5 -0
  331. aip_agents/memory/adapters/__init__.py +10 -0
  332. aip_agents/memory/adapters/__init__.pyi +4 -0
  333. aip_agents/memory/adapters/base_adapter.py +811 -0
  334. aip_agents/memory/adapters/base_adapter.pyi +176 -0
  335. aip_agents/memory/adapters/mem0.py +84 -0
  336. aip_agents/memory/adapters/mem0.pyi +22 -0
  337. aip_agents/memory/base.py +84 -0
  338. aip_agents/memory/base.pyi +60 -0
  339. aip_agents/memory/constants.py +49 -0
  340. aip_agents/memory/constants.pyi +25 -0
  341. aip_agents/memory/factory.py +86 -0
  342. aip_agents/memory/factory.pyi +24 -0
  343. aip_agents/memory/guidance.py +20 -0
  344. aip_agents/memory/guidance.pyi +3 -0
  345. aip_agents/memory/simple_memory.py +47 -0
  346. aip_agents/memory/simple_memory.pyi +23 -0
  347. aip_agents/middleware/__init__.py +17 -0
  348. aip_agents/middleware/__init__.pyi +5 -0
  349. aip_agents/middleware/base.py +96 -0
  350. aip_agents/middleware/base.pyi +75 -0
  351. aip_agents/middleware/manager.py +150 -0
  352. aip_agents/middleware/manager.pyi +84 -0
  353. aip_agents/middleware/todolist.py +274 -0
  354. aip_agents/middleware/todolist.pyi +125 -0
  355. aip_agents/ptc/__init__.py +48 -0
  356. aip_agents/ptc/__init__.pyi +10 -0
  357. aip_agents/ptc/doc_gen.py +122 -0
  358. aip_agents/ptc/doc_gen.pyi +40 -0
  359. aip_agents/ptc/exceptions.py +39 -0
  360. aip_agents/ptc/exceptions.pyi +22 -0
  361. aip_agents/ptc/executor.py +143 -0
  362. aip_agents/ptc/executor.pyi +73 -0
  363. aip_agents/ptc/mcp/__init__.py +45 -0
  364. aip_agents/ptc/mcp/__init__.pyi +7 -0
  365. aip_agents/ptc/mcp/sandbox_bridge.py +668 -0
  366. aip_agents/ptc/mcp/sandbox_bridge.pyi +47 -0
  367. aip_agents/ptc/mcp/templates/__init__.py +1 -0
  368. aip_agents/ptc/mcp/templates/__init__.pyi +0 -0
  369. aip_agents/ptc/mcp/templates/mcp_client.py.template +239 -0
  370. aip_agents/ptc/naming.py +184 -0
  371. aip_agents/ptc/naming.pyi +76 -0
  372. aip_agents/ptc/payload.py +26 -0
  373. aip_agents/ptc/payload.pyi +15 -0
  374. aip_agents/ptc/prompt_builder.py +571 -0
  375. aip_agents/ptc/prompt_builder.pyi +55 -0
  376. aip_agents/ptc/ptc_helper.py +16 -0
  377. aip_agents/ptc/ptc_helper.pyi +1 -0
  378. aip_agents/ptc/sandbox_bridge.py +58 -0
  379. aip_agents/ptc/sandbox_bridge.pyi +25 -0
  380. aip_agents/ptc/template_utils.py +33 -0
  381. aip_agents/ptc/template_utils.pyi +13 -0
  382. aip_agents/ptc/templates/__init__.py +1 -0
  383. aip_agents/ptc/templates/__init__.pyi +0 -0
  384. aip_agents/ptc/templates/ptc_helper.py.template +134 -0
  385. aip_agents/sandbox/__init__.py +43 -0
  386. aip_agents/sandbox/__init__.pyi +5 -0
  387. aip_agents/sandbox/defaults.py +9 -0
  388. aip_agents/sandbox/defaults.pyi +2 -0
  389. aip_agents/sandbox/e2b_runtime.py +267 -0
  390. aip_agents/sandbox/e2b_runtime.pyi +51 -0
  391. aip_agents/sandbox/template_builder.py +131 -0
  392. aip_agents/sandbox/template_builder.pyi +36 -0
  393. aip_agents/sandbox/types.py +24 -0
  394. aip_agents/sandbox/types.pyi +14 -0
  395. aip_agents/sandbox/validation.py +50 -0
  396. aip_agents/sandbox/validation.pyi +20 -0
  397. aip_agents/schema/__init__.py +69 -0
  398. aip_agents/schema/__init__.pyi +9 -0
  399. aip_agents/schema/a2a.py +56 -0
  400. aip_agents/schema/a2a.pyi +40 -0
  401. aip_agents/schema/agent.py +111 -0
  402. aip_agents/schema/agent.pyi +65 -0
  403. aip_agents/schema/hitl.py +157 -0
  404. aip_agents/schema/hitl.pyi +89 -0
  405. aip_agents/schema/langgraph.py +37 -0
  406. aip_agents/schema/langgraph.pyi +28 -0
  407. aip_agents/schema/model_id.py +97 -0
  408. aip_agents/schema/model_id.pyi +54 -0
  409. aip_agents/schema/step_limit.py +108 -0
  410. aip_agents/schema/step_limit.pyi +63 -0
  411. aip_agents/schema/storage.py +40 -0
  412. aip_agents/schema/storage.pyi +21 -0
  413. aip_agents/sentry/__init__.py +11 -0
  414. aip_agents/sentry/__init__.pyi +3 -0
  415. aip_agents/sentry/sentry.py +151 -0
  416. aip_agents/sentry/sentry.pyi +48 -0
  417. aip_agents/storage/__init__.py +41 -0
  418. aip_agents/storage/__init__.pyi +8 -0
  419. aip_agents/storage/base.py +85 -0
  420. aip_agents/storage/base.pyi +58 -0
  421. aip_agents/storage/clients/__init__.py +12 -0
  422. aip_agents/storage/clients/__init__.pyi +3 -0
  423. aip_agents/storage/clients/minio_client.py +318 -0
  424. aip_agents/storage/clients/minio_client.pyi +137 -0
  425. aip_agents/storage/config.py +62 -0
  426. aip_agents/storage/config.pyi +29 -0
  427. aip_agents/storage/providers/__init__.py +15 -0
  428. aip_agents/storage/providers/__init__.pyi +5 -0
  429. aip_agents/storage/providers/base.py +106 -0
  430. aip_agents/storage/providers/base.pyi +88 -0
  431. aip_agents/storage/providers/memory.py +114 -0
  432. aip_agents/storage/providers/memory.pyi +79 -0
  433. aip_agents/storage/providers/object_storage.py +214 -0
  434. aip_agents/storage/providers/object_storage.pyi +98 -0
  435. aip_agents/tools/__init__.py +64 -0
  436. aip_agents/tools/__init__.pyi +11 -0
  437. aip_agents/tools/browser_use/__init__.py +82 -0
  438. aip_agents/tools/browser_use/__init__.pyi +14 -0
  439. aip_agents/tools/browser_use/action_parser.py +103 -0
  440. aip_agents/tools/browser_use/action_parser.pyi +18 -0
  441. aip_agents/tools/browser_use/browser_use_tool.py +1120 -0
  442. aip_agents/tools/browser_use/browser_use_tool.pyi +50 -0
  443. aip_agents/tools/browser_use/llm_config.py +120 -0
  444. aip_agents/tools/browser_use/llm_config.pyi +52 -0
  445. aip_agents/tools/browser_use/minio_storage.py +198 -0
  446. aip_agents/tools/browser_use/minio_storage.pyi +109 -0
  447. aip_agents/tools/browser_use/schemas.py +119 -0
  448. aip_agents/tools/browser_use/schemas.pyi +32 -0
  449. aip_agents/tools/browser_use/session.py +76 -0
  450. aip_agents/tools/browser_use/session.pyi +4 -0
  451. aip_agents/tools/browser_use/session_errors.py +132 -0
  452. aip_agents/tools/browser_use/session_errors.pyi +53 -0
  453. aip_agents/tools/browser_use/steel_session_recording.py +317 -0
  454. aip_agents/tools/browser_use/steel_session_recording.pyi +63 -0
  455. aip_agents/tools/browser_use/streaming.py +815 -0
  456. aip_agents/tools/browser_use/streaming.pyi +81 -0
  457. aip_agents/tools/browser_use/structured_data_parser.py +257 -0
  458. aip_agents/tools/browser_use/structured_data_parser.pyi +86 -0
  459. aip_agents/tools/browser_use/structured_data_recovery.py +204 -0
  460. aip_agents/tools/browser_use/structured_data_recovery.pyi +43 -0
  461. aip_agents/tools/browser_use/types.py +78 -0
  462. aip_agents/tools/browser_use/types.pyi +45 -0
  463. aip_agents/tools/code_sandbox/__init__.py +26 -0
  464. aip_agents/tools/code_sandbox/__init__.pyi +3 -0
  465. aip_agents/tools/code_sandbox/constant.py +13 -0
  466. aip_agents/tools/code_sandbox/constant.pyi +4 -0
  467. aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.py +306 -0
  468. aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.pyi +102 -0
  469. aip_agents/tools/code_sandbox/e2b_sandbox_tool.py +411 -0
  470. aip_agents/tools/code_sandbox/e2b_sandbox_tool.pyi +29 -0
  471. aip_agents/tools/constants.py +177 -0
  472. aip_agents/tools/constants.pyi +138 -0
  473. aip_agents/tools/date_range_tool.py +554 -0
  474. aip_agents/tools/date_range_tool.pyi +21 -0
  475. aip_agents/tools/document_loader/__init__.py +44 -0
  476. aip_agents/tools/document_loader/__init__.pyi +7 -0
  477. aip_agents/tools/document_loader/base_reader.py +302 -0
  478. aip_agents/tools/document_loader/base_reader.pyi +75 -0
  479. aip_agents/tools/document_loader/docx_reader_tool.py +68 -0
  480. aip_agents/tools/document_loader/docx_reader_tool.pyi +10 -0
  481. aip_agents/tools/document_loader/excel_reader_tool.py +171 -0
  482. aip_agents/tools/document_loader/excel_reader_tool.pyi +26 -0
  483. aip_agents/tools/document_loader/pdf_reader_tool.py +79 -0
  484. aip_agents/tools/document_loader/pdf_reader_tool.pyi +11 -0
  485. aip_agents/tools/document_loader/pdf_splitter.py +169 -0
  486. aip_agents/tools/document_loader/pdf_splitter.pyi +18 -0
  487. aip_agents/tools/execute_ptc_code.py +308 -0
  488. aip_agents/tools/execute_ptc_code.pyi +90 -0
  489. aip_agents/tools/gl_connector/__init__.py +5 -0
  490. aip_agents/tools/gl_connector/__init__.pyi +3 -0
  491. aip_agents/tools/gl_connector/tool.py +383 -0
  492. aip_agents/tools/gl_connector/tool.pyi +74 -0
  493. aip_agents/tools/gl_connector_tools.py +119 -0
  494. aip_agents/tools/gl_connector_tools.pyi +39 -0
  495. aip_agents/tools/memory_search/__init__.py +29 -0
  496. aip_agents/tools/memory_search/__init__.pyi +5 -0
  497. aip_agents/tools/memory_search/base.py +200 -0
  498. aip_agents/tools/memory_search/base.pyi +69 -0
  499. aip_agents/tools/memory_search/mem0.py +365 -0
  500. aip_agents/tools/memory_search/mem0.pyi +29 -0
  501. aip_agents/tools/memory_search/schema.py +81 -0
  502. aip_agents/tools/memory_search/schema.pyi +25 -0
  503. aip_agents/tools/memory_search_tool.py +34 -0
  504. aip_agents/tools/memory_search_tool.pyi +3 -0
  505. aip_agents/tools/time_tool.py +117 -0
  506. aip_agents/tools/time_tool.pyi +16 -0
  507. aip_agents/tools/tool_config_injector.py +300 -0
  508. aip_agents/tools/tool_config_injector.pyi +26 -0
  509. aip_agents/tools/web_search/__init__.py +15 -0
  510. aip_agents/tools/web_search/__init__.pyi +3 -0
  511. aip_agents/tools/web_search/serper_tool.py +187 -0
  512. aip_agents/tools/web_search/serper_tool.pyi +19 -0
  513. aip_agents/types/__init__.py +70 -0
  514. aip_agents/types/__init__.pyi +36 -0
  515. aip_agents/types/a2a_events.py +13 -0
  516. aip_agents/types/a2a_events.pyi +3 -0
  517. aip_agents/utils/__init__.py +79 -0
  518. aip_agents/utils/__init__.pyi +11 -0
  519. aip_agents/utils/a2a_connector.py +1757 -0
  520. aip_agents/utils/a2a_connector.pyi +146 -0
  521. aip_agents/utils/artifact_helpers.py +502 -0
  522. aip_agents/utils/artifact_helpers.pyi +203 -0
  523. aip_agents/utils/constants.py +22 -0
  524. aip_agents/utils/constants.pyi +10 -0
  525. aip_agents/utils/datetime/__init__.py +34 -0
  526. aip_agents/utils/datetime/__init__.pyi +4 -0
  527. aip_agents/utils/datetime/normalization.py +231 -0
  528. aip_agents/utils/datetime/normalization.pyi +95 -0
  529. aip_agents/utils/datetime/timezone.py +206 -0
  530. aip_agents/utils/datetime/timezone.pyi +48 -0
  531. aip_agents/utils/env_loader.py +27 -0
  532. aip_agents/utils/env_loader.pyi +10 -0
  533. aip_agents/utils/event_handler_registry.py +58 -0
  534. aip_agents/utils/event_handler_registry.pyi +23 -0
  535. aip_agents/utils/file_prompt_utils.py +176 -0
  536. aip_agents/utils/file_prompt_utils.pyi +21 -0
  537. aip_agents/utils/final_response_builder.py +211 -0
  538. aip_agents/utils/final_response_builder.pyi +34 -0
  539. aip_agents/utils/formatter_llm_client.py +231 -0
  540. aip_agents/utils/formatter_llm_client.pyi +71 -0
  541. aip_agents/utils/langgraph/__init__.py +19 -0
  542. aip_agents/utils/langgraph/__init__.pyi +3 -0
  543. aip_agents/utils/langgraph/converter.py +128 -0
  544. aip_agents/utils/langgraph/converter.pyi +49 -0
  545. aip_agents/utils/langgraph/tool_managers/__init__.py +15 -0
  546. aip_agents/utils/langgraph/tool_managers/__init__.pyi +5 -0
  547. aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.py +99 -0
  548. aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.pyi +35 -0
  549. aip_agents/utils/langgraph/tool_managers/base_tool_manager.py +66 -0
  550. aip_agents/utils/langgraph/tool_managers/base_tool_manager.pyi +48 -0
  551. aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +1096 -0
  552. aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.pyi +56 -0
  553. aip_agents/utils/langgraph/tool_output_management.py +1047 -0
  554. aip_agents/utils/langgraph/tool_output_management.pyi +329 -0
  555. aip_agents/utils/logger.py +195 -0
  556. aip_agents/utils/logger.pyi +60 -0
  557. aip_agents/utils/metadata/__init__.py +27 -0
  558. aip_agents/utils/metadata/__init__.pyi +5 -0
  559. aip_agents/utils/metadata/activity_metadata_helper.py +407 -0
  560. aip_agents/utils/metadata/activity_metadata_helper.pyi +25 -0
  561. aip_agents/utils/metadata/activity_narrative/__init__.py +35 -0
  562. aip_agents/utils/metadata/activity_narrative/__init__.pyi +7 -0
  563. aip_agents/utils/metadata/activity_narrative/builder.py +817 -0
  564. aip_agents/utils/metadata/activity_narrative/builder.pyi +35 -0
  565. aip_agents/utils/metadata/activity_narrative/constants.py +51 -0
  566. aip_agents/utils/metadata/activity_narrative/constants.pyi +10 -0
  567. aip_agents/utils/metadata/activity_narrative/context.py +49 -0
  568. aip_agents/utils/metadata/activity_narrative/context.pyi +32 -0
  569. aip_agents/utils/metadata/activity_narrative/formatters.py +230 -0
  570. aip_agents/utils/metadata/activity_narrative/formatters.pyi +48 -0
  571. aip_agents/utils/metadata/activity_narrative/utils.py +35 -0
  572. aip_agents/utils/metadata/activity_narrative/utils.pyi +12 -0
  573. aip_agents/utils/metadata/schemas/__init__.py +16 -0
  574. aip_agents/utils/metadata/schemas/__init__.pyi +4 -0
  575. aip_agents/utils/metadata/schemas/activity_schema.py +29 -0
  576. aip_agents/utils/metadata/schemas/activity_schema.pyi +18 -0
  577. aip_agents/utils/metadata/schemas/thinking_schema.py +31 -0
  578. aip_agents/utils/metadata/schemas/thinking_schema.pyi +20 -0
  579. aip_agents/utils/metadata/thinking_metadata_helper.py +38 -0
  580. aip_agents/utils/metadata/thinking_metadata_helper.pyi +4 -0
  581. aip_agents/utils/metadata_helper.py +358 -0
  582. aip_agents/utils/metadata_helper.pyi +117 -0
  583. aip_agents/utils/name_preprocessor/__init__.py +17 -0
  584. aip_agents/utils/name_preprocessor/__init__.pyi +6 -0
  585. aip_agents/utils/name_preprocessor/base_name_preprocessor.py +73 -0
  586. aip_agents/utils/name_preprocessor/base_name_preprocessor.pyi +52 -0
  587. aip_agents/utils/name_preprocessor/google_name_preprocessor.py +100 -0
  588. aip_agents/utils/name_preprocessor/google_name_preprocessor.pyi +38 -0
  589. aip_agents/utils/name_preprocessor/name_preprocessor.py +87 -0
  590. aip_agents/utils/name_preprocessor/name_preprocessor.pyi +41 -0
  591. aip_agents/utils/name_preprocessor/openai_name_preprocessor.py +48 -0
  592. aip_agents/utils/name_preprocessor/openai_name_preprocessor.pyi +34 -0
  593. aip_agents/utils/pii/__init__.py +25 -0
  594. aip_agents/utils/pii/__init__.pyi +5 -0
  595. aip_agents/utils/pii/pii_handler.py +397 -0
  596. aip_agents/utils/pii/pii_handler.pyi +96 -0
  597. aip_agents/utils/pii/pii_helper.py +207 -0
  598. aip_agents/utils/pii/pii_helper.pyi +78 -0
  599. aip_agents/utils/pii/uuid_deanonymizer_mapping.py +195 -0
  600. aip_agents/utils/pii/uuid_deanonymizer_mapping.pyi +73 -0
  601. aip_agents/utils/reference_helper.py +273 -0
  602. aip_agents/utils/reference_helper.pyi +81 -0
  603. aip_agents/utils/sse_chunk_transformer.py +831 -0
  604. aip_agents/utils/sse_chunk_transformer.pyi +166 -0
  605. aip_agents/utils/step_limit_manager.py +265 -0
  606. aip_agents/utils/step_limit_manager.pyi +112 -0
  607. aip_agents/utils/token_usage_helper.py +156 -0
  608. aip_agents/utils/token_usage_helper.pyi +60 -0
  609. aip_agents_binary-0.6.4.dist-info/METADATA +673 -0
  610. aip_agents_binary-0.6.4.dist-info/RECORD +612 -0
  611. aip_agents_binary-0.6.4.dist-info/WHEEL +5 -0
  612. aip_agents_binary-0.6.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2856 @@
1
+ """LangGraph ReAct Agent implementation.
2
+
3
+ A ReAct agent template built on LangGraph that can use either lm_invoker or LangChain BaseChatModel.
4
+
5
+ Authors:
6
+ Christian Trisno Sen Long Chen (christian.t.s.l.chen@gdplabs.id)
7
+ Fachriza Adhiatma (fachriza.d.adhiatma@gdplabs.id)
8
+ Raymond Christopher (raymond.christopher@gdplabs.id)
9
+ Reinhart Linanda (reinhart.linanda@gdplabs.id)
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import asyncio
15
+ import time
16
+ import uuid
17
+ from collections.abc import Awaitable, Callable, Sequence
18
+ from dataclasses import asdict, dataclass
19
+ from functools import reduce
20
+ from textwrap import dedent
21
+ from typing import TYPE_CHECKING, Annotated, Any, cast
22
+
23
+ from deprecated import deprecated # type: ignore[import-untyped]
24
+
25
+ if TYPE_CHECKING:
26
+ from aip_agents.guardrails.manager import GuardrailManager
27
+ from gllm_core.event import EventEmitter # type: ignore[import-untyped]
28
+ from gllm_core.schema import Chunk # type: ignore[import-untyped]
29
+ from langchain_core.language_models import BaseChatModel
30
+ from langchain_core.messages import (
31
+ AIMessage,
32
+ BaseMessage,
33
+ HumanMessage,
34
+ SystemMessage,
35
+ ToolMessage,
36
+ )
37
+ from langchain_core.messages.ai import UsageMetadata
38
+ from langchain_core.tools import BaseTool
39
+ from langgraph.config import get_stream_writer
40
+ from langgraph.graph import END, StateGraph
41
+ from langgraph.graph.message import add_messages
42
+ from langgraph.graph.state import CompiledStateGraph
43
+ from langgraph.managed import IsLastStep, RemainingSteps
44
+ from langgraph.types import Command, StreamWriter
45
+ from typing_extensions import TypedDict
46
+
47
+ from aip_agents.agent.base_langgraph_agent import _THREAD_ID_CVAR, BaseLangGraphAgent
48
+ from aip_agents.agent.hitl.langgraph_hitl_mixin import LangGraphHitLMixin
49
+ from aip_agents.agent.hitl.manager import TOOL_EXECUTION_BLOCKING_DECISIONS
50
+ from aip_agents.middleware.base import AgentMiddleware, ModelRequest
51
+ from aip_agents.middleware.manager import MiddlewareManager
52
+ from aip_agents.middleware.todolist import TodoList, TodoListMiddleware
53
+ from aip_agents.schema.a2a import A2AStreamEventType
54
+ from aip_agents.schema.hitl import ApprovalDecision, HitlMetadata
55
+ from aip_agents.schema.langgraph import ToolCallResult, ToolStorageParams
56
+ from aip_agents.schema.step_limit import MaxStepsExceededError, StepLimitConfig
57
+ from aip_agents.tools.memory_search_tool import MEMORY_DELETE_TOOL_NAME, MEMORY_SEARCH_TOOL_NAME
58
+ from aip_agents.tools.tool_config_injector import TOOL_CONFIGS_KEY
59
+ from aip_agents.utils import add_references_chunks
60
+ from aip_agents.utils.langgraph import (
61
+ convert_langchain_messages_to_gllm_messages,
62
+ convert_lm_output_to_langchain_message,
63
+ )
64
+ from aip_agents.utils.langgraph.tool_output_management import (
65
+ StoreOutputParams,
66
+ ToolOutputManager,
67
+ ToolReferenceError,
68
+ ToolReferenceResolver,
69
+ )
70
+ from aip_agents.utils.logger import get_logger
71
+ from aip_agents.utils.metadata.activity_metadata_helper import create_tool_activity_info
72
+ from aip_agents.utils.metadata_helper import Kind, MetadataFieldKeys, Status
73
+ from aip_agents.utils.pii import ToolPIIHandler, add_pii_mappings, normalize_enable_pii
74
+ from aip_agents.utils.reference_helper import extract_references_from_tool
75
+ from aip_agents.utils.step_limit_manager import (
76
+ _DELEGATION_CHAIN_CVAR,
77
+ _DELEGATION_DEPTH_CVAR,
78
+ _REMAINING_STEP_BUDGET_CVAR,
79
+ _STEP_LIMIT_CONFIG_CVAR,
80
+ StepLimitManager,
81
+ )
82
+ from aip_agents.utils.token_usage_helper import (
83
+ TOTAL_USAGE_KEY,
84
+ USAGE_METADATA_KEY,
85
+ add_usage_metadata,
86
+ extract_and_update_token_usage_from_ai_message,
87
+ extract_token_usage_from_tool_output,
88
+ )
89
+
90
+ if TYPE_CHECKING:
91
+ from aip_agents.ptc import PTCSandboxConfig
92
+
93
+ logger = get_logger(__name__)
94
+
95
+ # Default instruction for ReAct agents
96
+ DEFAULT_INSTRUCTION = "You are a helpful assistant. Use the available tools to help answer questions."
97
+
98
+ # Tool method constants
99
+ TOOL_RUN_STREAMING_METHOD = "arun_streaming"
100
+
101
+ # Key Attributes
102
+ TOOL_OUTPUT_MANAGER_KEY = "tool_output_manager"
103
+ CALL_ID_KEY = "call_id"
104
+
105
+
106
+ @dataclass
107
+ class ToolCallContext:
108
+ """Context information for executing a single tool call."""
109
+
110
+ config: dict[str, Any] | None
111
+ state: dict[str, Any]
112
+ pending_artifacts: list[dict[str, Any]]
113
+ hitl_decision: ApprovalDecision | None = None
114
+
115
+
116
+ class ReactAgentState(TypedDict):
117
+ """State schema for the ReAct agent.
118
+
119
+ Includes messages, step tracking, optional event emission support, artifacts, references,
120
+ metadata, tool output management, and deep agents middleware state (todos, filesystem).
121
+ """
122
+
123
+ messages: Annotated[Sequence[BaseMessage], add_messages]
124
+ is_last_step: IsLastStep
125
+ remaining_steps: RemainingSteps
126
+ event_emitter: EventEmitter | None
127
+ artifacts: list[dict[str, Any]] | None
128
+ references: Annotated[list[Chunk], add_references_chunks]
129
+ metadata: dict[str, Any] | None
130
+ tool_output_manager: ToolOutputManager | None
131
+ total_usage: Annotated[UsageMetadata | None, add_usage_metadata]
132
+ pii_mapping: Annotated[dict[str, str] | None, add_pii_mappings]
133
+ thread_id: str
134
+
135
+ # Deep Agents Middleware State
136
+ todos: TodoList | None # Planning middleware - task decomposition state
137
+
138
+ # Step Limit State (Configurable Maximum Steps Feature)
139
+ current_step: int # Current step number (incremented after each LLM call or tool execution)
140
+ delegation_depth: int # Current depth in delegation chain (0 for root)
141
+ delegation_chain: list[str] # Agent names in delegation chain
142
+ step_limit_config: StepLimitConfig | None # Step and delegation limit configuration
143
+
144
+
145
+ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
146
+ """A ReAct agent template built on LangGraph.
147
+
148
+ This agent can use either:
149
+ - An LMInvoker (if self.lm_invoker is set by BaseAgent)
150
+ - A LangChain BaseChatModel (if self.model is set by BaseAgent)
151
+
152
+ The graph structure follows the standard ReAct pattern:
153
+ agent -> tools -> agent (loop) -> END
154
+ """
155
+
156
+ def __init__( # noqa: PLR0913
157
+ self,
158
+ name: str,
159
+ instruction: str = DEFAULT_INSTRUCTION,
160
+ model: BaseChatModel | str | Any | None = None,
161
+ tools: Sequence[BaseTool] | None = None,
162
+ agents: Sequence[Any] | None = None,
163
+ description: str | None = None,
164
+ thread_id_key: str = "thread_id",
165
+ event_emitter: EventEmitter | None = None,
166
+ tool_output_manager: ToolOutputManager | None = None,
167
+ planning: bool = False,
168
+ middlewares: Sequence[AgentMiddleware] | None = None,
169
+ guardrail: GuardrailManager | None = None,
170
+ step_limit_config: StepLimitConfig | None = None,
171
+ ptc_config: PTCSandboxConfig | None = None,
172
+ **kwargs: Any,
173
+ ):
174
+ """Initialize the LangGraph ReAct Agent.
175
+
176
+ Args:
177
+ name: The name of the agent.
178
+ instruction: The system instruction for the agent.
179
+ model: The model to use (lm_invoker, LangChain model, string, etc.).
180
+ tools: Sequence of LangChain tools available to the agent.
181
+ agents: Optional sequence of sub-agents for delegation (coordinator mode).
182
+ description: Human-readable description of the agent.
183
+ thread_id_key: Key for thread ID in configuration.
184
+ event_emitter: Optional event emitter for streaming updates.
185
+ tool_output_manager: Optional ToolOutputManager instance for tool output management.
186
+ When provided, enables tool output storage, reference resolution, and sharing capabilities.
187
+ This enables multi-agent workflows where agents can access each other's tool outputs.
188
+ If None, tool output management is disabled for this agent.
189
+ planning: Enable planning capabilities with TodoListMiddleware. Defaults to False.
190
+ middlewares: Optional sequence of custom middleware to COMPOSE (not override) with built-in middleware.
191
+ Execution order: [TodoListMiddleware (if planning=True),
192
+ GuardrailMiddleware (if guardrail provided),
193
+ ...custom middlewares in order provided]
194
+ All middleware hooks execute - this extends capabilities, never replaces them.
195
+ guardrail: Optional GuardrailManager for content filtering and safety checks.
196
+ When provided, automatically wraps in GuardrailMiddleware for transparent
197
+ input/output filtering during agent execution.
198
+ enable_pii: Optional toggle to enable PII handling for tool inputs and outputs.
199
+ step_limit_config: Optional configuration for step limits and delegation depth.
200
+ ptc_config: Optional configuration for PTC sandbox execution. See PTCSandboxConfig
201
+ for available options including enabled flag, sandbox timeout, and template settings.
202
+ PTC is enabled when ptc_config is not None and ptc_config.enabled is True.
203
+ When enabled, prompt guidance is automatically injected into the agent's instruction.
204
+ PTC runs in a sandbox only; there is no in-process trusted PTC path.
205
+ **kwargs: Additional keyword arguments passed to BaseLangGraphAgent.
206
+ """
207
+ # Use LangGraph's standard AgentState for ReAct
208
+ state_schema = kwargs.pop("state_schema", ReactAgentState)
209
+ enable_pii = kwargs.pop("enable_pii", None)
210
+ enable_pii = normalize_enable_pii(enable_pii)
211
+
212
+ super().__init__(
213
+ name=name,
214
+ instruction=instruction,
215
+ description=description,
216
+ model=model,
217
+ tools=tools,
218
+ state_schema=state_schema,
219
+ thread_id_key=thread_id_key,
220
+ event_emitter=event_emitter,
221
+ **kwargs,
222
+ )
223
+
224
+ if self.model is None and self.lm_invoker is None:
225
+ logger.warning(
226
+ "Agent '%s': Model and LM invoker are both unset. Calls that require a model will fail.",
227
+ self.name,
228
+ )
229
+
230
+ # Handle tool output management
231
+ self.tool_output_manager = tool_output_manager
232
+ self._pii_handlers_by_thread: dict[str, ToolPIIHandler] = {}
233
+ self._enable_pii = enable_pii
234
+
235
+ # Initialize middleware tools list (populated by _setup_middleware)
236
+ self._middleware_tools: list[BaseTool] = []
237
+
238
+ # Setup middleware
239
+ self._middleware_manager = self._setup_middleware(
240
+ planning=planning,
241
+ guardrail=guardrail,
242
+ custom_middlewares=middlewares,
243
+ )
244
+
245
+ # Handle delegation agents (coordinator mode) - following legacy pattern
246
+ if agents:
247
+ self.register_delegation_agents(list(agents))
248
+
249
+ self.step_limit_config = step_limit_config
250
+
251
+ # Initialize PTC state (Programmatic Tool Calling)
252
+ self._ptc_config: PTCSandboxConfig | None = None
253
+ self._ptc_tool_synced = False
254
+ self._ptc_tool: BaseTool | None = None
255
+ self._ptc_prompt_hash: str = ""
256
+ # Capture instruction after middleware setup so middleware prompts are preserved
257
+ self._original_instruction: str = self.instruction
258
+
259
+ # Enable PTC if requested via constructor
260
+ if ptc_config is not None and ptc_config.enabled:
261
+ self.enable_ptc(ptc_config)
262
+
263
+ def _setup_middleware(
264
+ self,
265
+ planning: bool,
266
+ guardrail: GuardrailManager | None,
267
+ custom_middlewares: Sequence[AgentMiddleware] | None,
268
+ ) -> MiddlewareManager | None:
269
+ """Setup middleware based on configuration.
270
+
271
+ Creates auto-configured middleware (planning, guardrails) and composes
272
+ with custom middleware if provided.
273
+
274
+ Args:
275
+ planning: Whether to enable TodoListMiddleware.
276
+ guardrail: Optional GuardrailManager to wrap in GuardrailMiddleware.
277
+ custom_middlewares: Optional custom middlewares to append.
278
+
279
+ Returns:
280
+ MiddlewareManager if any middleware configured, None otherwise.
281
+ """
282
+ middleware_list: list[AgentMiddleware] = []
283
+
284
+ # Auto-configure TodoListMiddleware if planning enabled
285
+ if planning:
286
+ middleware_list.append(cast(AgentMiddleware, TodoListMiddleware()))
287
+
288
+ # Auto-configure GuardrailMiddleware if guardrail provided
289
+ if guardrail:
290
+ from aip_agents.guardrails.middleware import GuardrailMiddleware
291
+
292
+ middleware_list.append(GuardrailMiddleware(guardrail))
293
+
294
+ # Append custom middlewares
295
+ if custom_middlewares:
296
+ middleware_list.extend(custom_middlewares)
297
+
298
+ # Return manager if any middleware configured
299
+ if middleware_list:
300
+ manager = MiddlewareManager(middleware_list)
301
+ # Store middleware tools separately for proper rebuild support
302
+ middleware_tools = manager.get_all_tools()
303
+ if middleware_tools:
304
+ self._middleware_tools = list(middleware_tools)
305
+ # Add to resolved_tools for immediate use
306
+ self.resolved_tools = list(self.resolved_tools) + self._middleware_tools
307
+ # Enhance instruction with middleware prompt additions
308
+ self.instruction = manager.build_system_prompt(self.instruction)
309
+ return manager
310
+
311
+ return None
312
+
313
+ async def _get_effective_writer(self, writer: StreamWriter | None = None) -> StreamWriter | None:
314
+ """Get the effective stream writer, falling back to ContextVar if needed.
315
+
316
+ Args:
317
+ writer: Optional stream writer to use.
318
+
319
+ Returns:
320
+ The effective stream writer or None if retrieval fails.
321
+ """
322
+ try:
323
+ return writer or get_stream_writer()
324
+ except Exception:
325
+ return None
326
+
327
+ def _get_step_limit_manager(
328
+ self,
329
+ state: dict[str, Any],
330
+ node_type: str,
331
+ writer: StreamWriter | None = None,
332
+ count: int = 1,
333
+ manager: StepLimitManager | None = None,
334
+ ) -> tuple[dict[str, Any] | None, StepLimitManager | None]:
335
+ """Return initialized StepLimitManager or early state update.
336
+
337
+ Args:
338
+ state: Current LangGraph state dictionary.
339
+ node_type: `"agent"` or `"tool"`; determines the fallback message format when limits are exceeded.
340
+ writer: Optional LangGraph `StreamWriter` used when limit events need to be emitted in the absence of an event emitter.
341
+ count: Number of steps to check.
342
+ manager: Optional existing manager to reuse.
343
+
344
+ Returns:
345
+ Tuple where the first element is a state update dict when execution should stop, and the second element is the active `StepLimitManager` when limits allow the node to proceed.
346
+ """
347
+ limit_error_update, manager = self._check_step_limits_helper(
348
+ state, node_type, writer=writer, count=count, manager=manager
349
+ )
350
+ if limit_error_update:
351
+ return limit_error_update, None
352
+ if manager is None:
353
+ return {}, None
354
+ manager.set_context()
355
+ return None, manager
356
+
357
+ def _emit_step_limit_event(
358
+ self,
359
+ event_type: A2AStreamEventType,
360
+ metadata: dict[str, Any],
361
+ writer: StreamWriter | None = None,
362
+ ) -> None:
363
+ """Emit a step limit event via LangGraph stream writer or EventEmitter.
364
+
365
+ Args:
366
+ event_type: The type of event to emit.
367
+ metadata: Metadata to include in the event.
368
+ writer: Optional LangGraph `StreamWriter` used when limit events need to be emitted in the absence of an event emitter.
369
+ """
370
+ enriched_metadata = dict(metadata)
371
+ enriched_metadata.setdefault("status", "error")
372
+ enriched_metadata.setdefault("kind", "agent_default")
373
+
374
+ event_payload = self._create_a2a_event(
375
+ event_type=event_type,
376
+ content=enriched_metadata.get("message", ""),
377
+ metadata=enriched_metadata,
378
+ )
379
+
380
+ try:
381
+ effective_writer = writer or get_stream_writer()
382
+ except Exception:
383
+ effective_writer = None
384
+
385
+ if effective_writer:
386
+ effective_writer(event_payload)
387
+ return
388
+
389
+ if self.event_emitter:
390
+ self.event_emitter.emit(event_payload["event_type"], event_payload["metadata"])
391
+
392
+ def _check_step_limits_helper(
393
+ self,
394
+ state: dict[str, Any],
395
+ node_type: str,
396
+ writer: StreamWriter | None = None,
397
+ count: int = 1,
398
+ manager: StepLimitManager | None = None,
399
+ ) -> tuple[dict[str, Any] | None, StepLimitManager | None]:
400
+ """Check step limits and return state update if limit exceeded.
401
+
402
+ Centralized logic to avoid duplication between agent_node and tool_node.
403
+
404
+ Args:
405
+ state: Current agent state.
406
+ node_type: Either 'agent' or 'tool' to determine return message types.
407
+ writer: Optional stream writer for emitting custom events if event_emitter is missing.
408
+ count: Number of steps to check.
409
+ manager: Optional existing manager to reuse.
410
+
411
+ Returns:
412
+ Tuple of (state update dict if limit exceeded else None, active StepLimitManager instance).
413
+ """
414
+ try:
415
+ if manager is None:
416
+ manager = StepLimitManager.from_state(state)
417
+ manager.check_step_limit(agent_name=self.name, count=count)
418
+
419
+ return None, manager
420
+
421
+ except MaxStepsExceededError as e:
422
+ logger.warning(f"Agent '{self.name}': {e.error_response.message}")
423
+ metadata = {
424
+ "message": e.error_response.message,
425
+ "agent_name": e.error_response.agent_name,
426
+ "current_value": e.error_response.current_value,
427
+ "configured_limit": e.error_response.configured_limit,
428
+ }
429
+ self._emit_step_limit_event(
430
+ A2AStreamEventType.STEP_LIMIT_EXCEEDED,
431
+ metadata,
432
+ writer,
433
+ )
434
+ if node_type == "tool":
435
+ return (
436
+ {
437
+ "messages": [ToolMessage(content=f"⚠️ {e.error_response.message}", tool_call_id="step_limit")],
438
+ },
439
+ None,
440
+ )
441
+ return (
442
+ {
443
+ "messages": [AIMessage(content=f"⚠️ {e.error_response.message}")],
444
+ },
445
+ None,
446
+ )
447
+
448
+ def _rebuild_resolved_tools(self) -> None:
449
+ """Rebuild resolved tools including middleware and PTC tools.
450
+
451
+ Overrides base class to ensure middleware tools and the PTC tool are preserved
452
+ when tools are rebuilt (e.g., after update_regular_tools).
453
+ """
454
+ # Call base class to rebuild with regular, a2a, delegation, and mcp tools
455
+ super()._rebuild_resolved_tools()
456
+
457
+ # Add middleware tools if present
458
+ if hasattr(self, "_middleware_tools") and self._middleware_tools:
459
+ self.resolved_tools.extend(self._middleware_tools)
460
+
461
+ # Add PTC tool if synced
462
+ if hasattr(self, "_ptc_tool") and self._ptc_tool is not None:
463
+ self.resolved_tools.append(self._ptc_tool)
464
+
465
+ def _handle_tool_artifacts(
466
+ self, tool_output: Any, pending_artifacts: list[dict[str, Any]]
467
+ ) -> tuple[str, list[dict[str, Any]]]:
468
+ """Handle artifact extraction from tool output.
469
+
470
+ Args:
471
+ tool_output: The output from the tool execution.
472
+ pending_artifacts: Current list of pending artifacts.
473
+
474
+ Returns:
475
+ Tuple of (agent_result_text, updated_pending_artifacts).
476
+ """
477
+ if isinstance(tool_output, dict) and "artifacts" in tool_output:
478
+ artifacts = tool_output["artifacts"]
479
+ if isinstance(artifacts, list):
480
+ pending_artifacts.extend(artifacts)
481
+ return tool_output.get("result", ""), pending_artifacts
482
+ else:
483
+ return str(tool_output), pending_artifacts
484
+
485
+ # ruff: noqa: PLR0915
486
+ def define_graph(self, graph_builder: StateGraph) -> CompiledStateGraph:
487
+ """Define the ReAct agent graph structure.
488
+
489
+ Args:
490
+ graph_builder: The StateGraph builder to define the graph structure.
491
+
492
+ Returns:
493
+ Compiled LangGraph ready for execution.
494
+ """
495
+ # Create node functions using helper methods
496
+ agent_node = self._create_agent_node()
497
+ tool_node_logic = self._create_tool_node_logic()
498
+ should_continue = self._create_should_continue_logic(END)
499
+
500
+ # Add memory node if memory is enabled
501
+ if self._memory_enabled():
502
+ memory_enhancer_agent = self._create_memory_enhancer_agent()
503
+ graph_builder.add_node("memory_enhancer", self._create_memory_node(memory_enhancer_agent))
504
+ graph_builder.set_entry_point("memory_enhancer")
505
+ graph_builder.add_edge("memory_enhancer", "agent")
506
+ else:
507
+ graph_builder.set_entry_point("agent")
508
+
509
+ graph_builder.add_node("agent", agent_node)
510
+
511
+ if self.resolved_tools:
512
+ graph_builder.add_node("tools", tool_node_logic)
513
+ graph_builder.add_conditional_edges("agent", should_continue, {"tools": "tools", END: END})
514
+ graph_builder.add_edge("tools", "agent")
515
+ else:
516
+ graph_builder.add_edge("agent", END)
517
+
518
+ return graph_builder.compile(
519
+ checkpointer=self.checkpointer,
520
+ )
521
+
522
+ def _create_memory_enhancer_agent(self) -> Any:
523
+ """Create dedicated LangGraphMemoryEnhancerAgent instance for memory enhancement.
524
+
525
+ Returns:
526
+ LangGraphMemoryEnhancerAgent: Configured mini-agent for automatic memory retrieval.
527
+ """
528
+ # Lazy import to avoid circular dependency: LangGraphReactAgent imports
529
+ # LangGraphMemoryEnhancerAgent which inherits from LangGraphReactAgent.
530
+ from aip_agents.agent.langgraph_memory_enhancer_agent import ( # noqa: PLC0415
531
+ LangGraphMemoryEnhancerAgent,
532
+ )
533
+
534
+ model_id = getattr(self.lm_invoker, "model_id", None)
535
+ model = self.model or model_id
536
+ return LangGraphMemoryEnhancerAgent(
537
+ memory=self.memory,
538
+ model=model,
539
+ memory_agent_id=self.memory_agent_id,
540
+ memory_retrieval_limit=self.memory_retrieval_limit,
541
+ )
542
+
543
+ def _create_memory_node(self, memory_enhancer_agent: Any) -> Any:
544
+ """Create memory enhancement node that delegates to LangGraphMemoryEnhancerAgent.
545
+
546
+ Args:
547
+ memory_enhancer_agent: The LangGraphMemoryEnhancerAgent instance to use for enhancement.
548
+
549
+ Returns:
550
+ Callable: Async function that enhances user query with memory context.
551
+ """
552
+
553
+ async def memory_node(state: dict[str, Any], config: dict[str, Any] | None = None) -> dict[str, Any]:
554
+ """Enhance user query with memory context via LangGraphMemoryEnhancerAgent.
555
+
556
+ Args:
557
+ state: LangGraph state containing conversation messages.
558
+ config: Optional LangGraph configuration.
559
+
560
+ Returns:
561
+ State update with potentially enhanced last message.
562
+ """
563
+ user_query = self._extract_user_query_from_messages(state.get("messages", []))
564
+ if not user_query:
565
+ return {}
566
+
567
+ try:
568
+ metadata = state.get("metadata", {})
569
+ enhanced_result = await memory_enhancer_agent.arun(query=user_query, metadata=metadata)
570
+ enhanced_query = enhanced_result.get("output", user_query)
571
+
572
+ if enhanced_query == user_query:
573
+ logger.debug(f"Agent '{self.name}': No memory enhancement needed")
574
+ return {}
575
+
576
+ logger.info(f"Agent '{self.name}': Memory enhancement completed")
577
+ enhanced_message = HumanMessage(content=enhanced_query)
578
+ # Append enhanced message (with add_messages reducer, this creates: original + enhanced)
579
+ return {"messages": [enhanced_message]}
580
+
581
+ except Exception as e:
582
+ logger.warning(f"Agent '{self.name}': Memory enhancement failed: {e}")
583
+ return {}
584
+
585
+ return memory_node
586
+
587
+ def _should_save_interaction(self, final_state: dict[str, Any] | None) -> bool:
588
+ """Return True when interaction should be saved to memory."""
589
+ if self._contains_memory_delete_action(final_state):
590
+ logger.info("Memory: Skipping save_interaction due to memory delete action in state.")
591
+ return False
592
+ return True
593
+
594
+ @staticmethod
595
+ def _contains_memory_delete_action(final_state: dict[str, Any] | None) -> bool:
596
+ """Return True when final state includes a delete memory action block."""
597
+ if not isinstance(final_state, dict):
598
+ return False
599
+ messages = final_state.get("messages")
600
+ if not isinstance(messages, list):
601
+ return False
602
+ for message in messages:
603
+ content = getattr(message, "content", None)
604
+ if not isinstance(content, str):
605
+ continue
606
+ if "<MEMORY_ACTION>" in content and "action=delete" in content:
607
+ return True
608
+ return False
609
+
610
+ def _extract_user_query_from_messages(self, messages: list[Any]) -> str | None:
611
+ """Get latest user query string from a list of messages.
612
+
613
+ Args:
614
+ messages: List of LangChain messages to search through.
615
+
616
+ Returns:
617
+ The content string from the most recent HumanMessage if valid, None otherwise.
618
+ """
619
+ if not messages:
620
+ return None
621
+ for i in range(len(messages) - 1, -1, -1):
622
+ msg = messages[i]
623
+ if isinstance(msg, HumanMessage) and hasattr(msg, "content"):
624
+ content = msg.content
625
+ if isinstance(content, str) and content.strip():
626
+ return content
627
+ return None
628
+ return None
629
+
630
+ def _create_agent_node(self) -> Callable[..., Awaitable[dict[str, Any]]]:
631
+ """Create the agent node function for the graph."""
632
+
633
+ async def agent_node(
634
+ state: dict[str, Any], config: dict[str, Any] | None = None, *, writer: StreamWriter = None
635
+ ) -> dict[str, Any]:
636
+ """Call the appropriate LLM and return new messages.
637
+
638
+ Args:
639
+ state: Current agent state containing messages and conversation context.
640
+ config: Optional configuration containing thread_id and execution parameters.
641
+ writer: Optional stream writer for emitting custom events.
642
+
643
+ Returns:
644
+ Updated state dictionary with new AI messages and token usage.
645
+ """
646
+ writer = await self._get_effective_writer(writer)
647
+ limit_error_update, manager = self._get_step_limit_manager(state, "agent", writer=writer)
648
+ if limit_error_update:
649
+ return limit_error_update
650
+ if manager is None:
651
+ return {}
652
+
653
+ current_messages = state["messages"]
654
+
655
+ # Execute LLM call
656
+ try:
657
+ if self.lm_invoker:
658
+ result = await self._handle_lm_invoker_call(current_messages, state, config)
659
+ elif isinstance(self.model, BaseChatModel):
660
+ result = await self._handle_langchain_model_call(current_messages, state, config)
661
+ else:
662
+ raise ValueError(
663
+ f"Agent '{self.name}': No valid LMInvoker or LangChain model configured for ReAct agent node."
664
+ )
665
+ except Exception as e:
666
+ # Lazy import to support optional guardrails dependency
667
+ from aip_agents.guardrails.exceptions import GuardrailViolationError
668
+
669
+ if isinstance(e, GuardrailViolationError):
670
+ return {
671
+ "messages": [
672
+ AIMessage(
673
+ content=f"⚠️ Guardrail violation: {e.result.reason}",
674
+ response_metadata={"finish_reason": "stop"},
675
+ )
676
+ ]
677
+ }
678
+ raise
679
+
680
+ # Increment step counter after successful execution
681
+ manager.increment_step()
682
+ # Update state with new step count
683
+ result.update(manager.to_state_update())
684
+
685
+ return result
686
+
687
+ return agent_node
688
+
689
+ def _extract_tool_calls_from_state(self, state: dict[str, Any]) -> tuple[AIMessage | None, int]:
690
+ """Extract the last AI message and tool call count from state.
691
+
692
+ Args:
693
+ state: Current agent state.
694
+
695
+ Returns:
696
+ Tuple of (last AI message or None, count of tool calls).
697
+ """
698
+ messages = state.get("messages", [])
699
+ last_message = messages[-1] if messages else None
700
+ if not self.resolved_tools or not isinstance(last_message, AIMessage) or not last_message.tool_calls:
701
+ return None, 0
702
+ return last_message, len(last_message.tool_calls)
703
+
704
+ def _check_tool_batch_limits(
705
+ self,
706
+ state: dict[str, Any],
707
+ tool_call_count: int,
708
+ manager: StepLimitManager,
709
+ writer: StreamWriter | None,
710
+ ) -> tuple[dict[str, Any] | None, StepLimitManager | None]:
711
+ """Check if tool batch exceeds limits.
712
+
713
+ Args:
714
+ state: Current LangGraph state dictionary.
715
+ tool_call_count: Number of tools in the current batch.
716
+ manager: Initialized StepLimitManager.
717
+ writer: Optional stream writer for events.
718
+
719
+ Returns:
720
+ Tuple of (limit update dict or None, manager instance).
721
+ """
722
+ if tool_call_count <= 1:
723
+ return None, manager
724
+ return self._get_step_limit_manager(state, "tool", writer=writer, count=tool_call_count, manager=manager)
725
+
726
+ def _create_tool_node_logic(self) -> Callable[..., Awaitable[dict[str, Any]]]:
727
+ """Create the tool node logic function for the graph."""
728
+
729
+ async def tool_node_logic(
730
+ state: dict[str, Any],
731
+ config: dict[str, Any] | None = None,
732
+ *,
733
+ writer: StreamWriter = None,
734
+ ) -> dict[str, Any]:
735
+ """Execute tools with artifact payload separation and reference collection.
736
+
737
+ Args:
738
+ state: Current agent state.
739
+ config: Optional execution configuration.
740
+ writer: Optional stream writer.
741
+
742
+ Returns:
743
+ Updated state dictionary with tool results.
744
+ """
745
+ writer = await self._get_effective_writer(writer)
746
+ limit_error, manager = self._get_step_limit_manager(state, "tool", writer=writer)
747
+ if limit_error or manager is None:
748
+ return limit_error or {}
749
+
750
+ last_message, tool_call_count = self._extract_tool_calls_from_state(state)
751
+ if not last_message:
752
+ return {}
753
+
754
+ # Re-check step limits with the actual batch count (Spec-3)
755
+ limit_error, manager = self._check_tool_batch_limits(state, tool_call_count, manager, writer)
756
+ if limit_error or manager is None:
757
+ return limit_error or {}
758
+
759
+ result = await self._execute_tool_calls(last_message, state, config)
760
+
761
+ # Increment step after tool execution
762
+ manager.increment_step(count=tool_call_count)
763
+ result.update(manager.to_state_update())
764
+
765
+ return result
766
+
767
+ return tool_node_logic
768
+
769
+ async def _execute_tool_calls(
770
+ self, last_message: AIMessage, state: dict[str, Any], config: dict[str, Any] | None
771
+ ) -> dict[str, Any]:
772
+ """Execute tool calls and aggregate results.
773
+
774
+ Runs multiple tool calls concurrently for better parallelism.
775
+
776
+ Args:
777
+ last_message: The AI message containing tool calls to execute.
778
+ state: Current agent state containing messages, artifacts, and metadata.
779
+ config: Optional configuration containing thread_id and other execution context.
780
+
781
+ Returns:
782
+ Updated state dictionary with tool execution results including messages,
783
+ artifacts, references, and metadata updates.
784
+ """
785
+ tool_messages: list[ToolMessage] = []
786
+ pending_artifacts: list[dict[str, Any]] = state.get("artifacts") or []
787
+ reference_updates: list[Chunk] = []
788
+ tool_map = {tool.name: tool for tool in self.resolved_tools}
789
+ pii_mapping: dict[str, str] = {}
790
+
791
+ aggregated_metadata_delta: dict[str, Any] = {}
792
+ total_tools_token_usage: list[UsageMetadata] = []
793
+
794
+ async def run_tool(tool_call: dict[str, Any]):
795
+ """Run a single tool call asynchronously.
796
+
797
+ Args:
798
+ tool_call: Tool call dictionary.
799
+
800
+ Returns:
801
+ Tool result from execution.
802
+ """
803
+ return await self._run_single_tool_call(
804
+ tool_map=tool_map,
805
+ tool_call=tool_call,
806
+ context=ToolCallContext(
807
+ config=config,
808
+ state=state,
809
+ pending_artifacts=pending_artifacts,
810
+ ),
811
+ )
812
+
813
+ normalized_tool_calls = [self._normalize_tool_call(tc) for tc in last_message.tool_calls]
814
+ tasks = [asyncio.create_task(run_tool(tc)) for tc in normalized_tool_calls]
815
+
816
+ for coro in asyncio.as_completed(tasks):
817
+ tool_result = await coro
818
+ self._accumulate_tool_result(
819
+ tool_result,
820
+ tool_messages,
821
+ pending_artifacts,
822
+ aggregated_metadata_delta,
823
+ reference_updates,
824
+ total_tools_token_usage,
825
+ pii_mapping,
826
+ )
827
+
828
+ return self._build_tool_state_updates(
829
+ tool_messages,
830
+ pending_artifacts,
831
+ reference_updates,
832
+ aggregated_metadata_delta,
833
+ total_tools_token_usage,
834
+ pii_mapping,
835
+ )
836
+
837
+ def _normalize_tool_call(self, tool_call: Any) -> dict[str, Any]:
838
+ """Normalize tool call inputs into a dict with required keys."""
839
+ if isinstance(tool_call, dict):
840
+ normalized = dict(tool_call)
841
+ elif hasattr(tool_call, "model_dump"):
842
+ normalized = tool_call.model_dump()
843
+ elif hasattr(tool_call, "dict"):
844
+ normalized = tool_call.dict()
845
+ elif hasattr(tool_call, "name") and hasattr(tool_call, "args"):
846
+ normalized = {
847
+ "id": getattr(tool_call, "id", None),
848
+ "name": getattr(tool_call, "name", None),
849
+ "args": getattr(tool_call, "args", None),
850
+ }
851
+ else:
852
+ raise TypeError("Tool call must be a dict-like object or ToolCall instance.")
853
+
854
+ if not isinstance(normalized, dict):
855
+ raise TypeError("Tool call normalization did not produce a dict.")
856
+
857
+ if "name" not in normalized or "args" not in normalized:
858
+ raise TypeError("Tool call must include 'name' and 'args' fields.")
859
+
860
+ return normalized
861
+
862
+ def _accumulate_tool_result( # noqa: PLR0913
863
+ self,
864
+ tool_result: Any,
865
+ tool_messages: list[ToolMessage],
866
+ pending_artifacts: list[dict[str, Any]],
867
+ aggregated_metadata_delta: dict[str, Any],
868
+ reference_updates: list[Chunk],
869
+ total_tools_token_usage: list[UsageMetadata],
870
+ pii_mapping: dict[str, str],
871
+ ) -> None: # noqa: PLR0913
872
+ """Accumulate results from a single tool call.
873
+
874
+ Args:
875
+ tool_result: The result object from a single tool execution containing messages,
876
+ artifacts, metadata_delta, references, usage information, and PII mapping.
877
+ tool_messages: List to accumulate tool messages into.
878
+ pending_artifacts: List to accumulate artifacts into.
879
+ aggregated_metadata_delta: Dictionary to accumulate metadata updates into.
880
+ reference_updates: List to accumulate reference chunks into.
881
+ total_tools_token_usage: List to accumulate token usage metadata into.
882
+ pii_mapping: Dictionary to accumulate PII mappings into (mutated in place).
883
+ """
884
+ if tool_result.messages:
885
+ tool_messages.extend(tool_result.messages)
886
+ if tool_result.artifacts:
887
+ pending_artifacts.extend(tool_result.artifacts)
888
+ if tool_result.metadata_delta:
889
+ aggregated_metadata_delta.update(tool_result.metadata_delta)
890
+ if tool_result.references:
891
+ reference_updates.extend(tool_result.references)
892
+ if tool_result.step_usage:
893
+ total_tools_token_usage.append(tool_result.step_usage)
894
+ if tool_result.pii_mapping:
895
+ pii_mapping.update(tool_result.pii_mapping)
896
+
897
+ def _build_tool_state_updates(
898
+ self,
899
+ tool_messages: list[ToolMessage],
900
+ pending_artifacts: list[dict[str, Any]],
901
+ reference_updates: list[Chunk],
902
+ aggregated_metadata_delta: dict[str, Any],
903
+ total_tools_token_usage: list[UsageMetadata],
904
+ pii_mapping: dict[str, str] | None = None,
905
+ ) -> dict[str, Any]:
906
+ """Build state updates from accumulated tool results.
907
+
908
+ Args:
909
+ tool_messages: List of tool messages to include in state updates.
910
+ pending_artifacts: List of artifacts to include in state updates.
911
+ reference_updates: List of reference chunks to include in state updates.
912
+ aggregated_metadata_delta: Metadata changes to include in state updates.
913
+ total_tools_token_usage: List of token usage metadata from all tool executions.
914
+ pii_mapping: Current PII mapping to include in state updates.
915
+
916
+ Returns:
917
+ Dictionary containing state updates with messages, artifacts, references,
918
+ metadata, token usage, and PII mapping information.
919
+ """
920
+ state_updates: dict[str, Any] = {"messages": tool_messages, "artifacts": pending_artifacts}
921
+
922
+ if reference_updates:
923
+ state_updates["references"] = reference_updates
924
+
925
+ # Clean metadata delta to avoid leaking linkage-only fields
926
+ if "previous_step_ids" in aggregated_metadata_delta:
927
+ aggregated_metadata_delta = {k: v for k, v in aggregated_metadata_delta.items() if k != "previous_step_ids"}
928
+
929
+ if aggregated_metadata_delta:
930
+ state_updates["metadata"] = aggregated_metadata_delta
931
+
932
+ # Process accumulated tool usage
933
+ total_tool_usage = self._process_tool_usage(total_tools_token_usage)
934
+ if total_tool_usage:
935
+ state_updates[TOTAL_USAGE_KEY] = total_tool_usage
936
+
937
+ # Include PII mapping in state updates if present
938
+ if pii_mapping:
939
+ state_updates["pii_mapping"] = pii_mapping
940
+
941
+ return state_updates
942
+
943
+ def _create_should_continue_logic(self, end_node: str) -> Callable[[dict[str, Any]], str]:
944
+ """Create the should_continue function for conditional edges.
945
+
946
+ Args:
947
+ end_node: The name of the end node to return when execution should stop.
948
+
949
+ Returns:
950
+ Function that determines the next node based on the current state.
951
+ """
952
+
953
+ def should_continue(state: dict[str, Any]) -> str:
954
+ """Determine whether to continue to tools or end.
955
+
956
+ Args:
957
+ state: Current agent state containing messages and execution status.
958
+
959
+ Returns:
960
+ Either "tools" to continue tool execution or the end_node to stop execution.
961
+ """
962
+ messages = state.get("messages", [])
963
+ if not messages:
964
+ return end_node
965
+
966
+ last_message = messages[-1]
967
+
968
+ # Check if this is the last step
969
+ if state.get("is_last_step", False):
970
+ logger.debug(f"Agent '{self.name}': Reached last step, ending execution")
971
+ return end_node
972
+
973
+ if not isinstance(last_message, AIMessage) or not last_message.tool_calls:
974
+ return end_node
975
+
976
+ return "tools"
977
+
978
+ return should_continue
979
+
980
+ def _add_usage_metadata_to_tool_message(
981
+ self, messages: list[ToolMessage], usage_metadata: UsageMetadata | None
982
+ ) -> None:
983
+ """Add usage metadata to a tool message's response metadata.
984
+
985
+ Args:
986
+ messages: List of tool messages to potentially update.
987
+ usage_metadata: The usage metadata to add to the first tool message, if any.
988
+
989
+ Note:
990
+ - Used for streaming purposes only, to show token usage by tool via ToolMessage response_metadata.
991
+ - Tool message that are coming from Command with single message or a dictionary will have exactly 1 message.
992
+ - For those cases, we will add usage_metadata to the response_metadata of the first message.
993
+ """
994
+ if len(messages) == 1 and isinstance(messages[0], ToolMessage) and usage_metadata is not None:
995
+ messages[0].response_metadata[USAGE_METADATA_KEY] = usage_metadata
996
+
997
+ def _process_tool_usage(self, total_tools_token_usage: list[UsageMetadata]) -> UsageMetadata | None:
998
+ """Process accumulated tool usage metadata.
999
+
1000
+ Args:
1001
+ total_tools_token_usage: List of UsageMetadata objects to process.
1002
+
1003
+ Returns:
1004
+ UsageMetadata: The accumulated token usage metadata.
1005
+ """
1006
+ if not total_tools_token_usage:
1007
+ return None
1008
+
1009
+ # More concise and functional
1010
+ return reduce(add_usage_metadata, total_tools_token_usage, None)
1011
+
1012
+ def _process_command_tool_output(
1013
+ self,
1014
+ tool_output: Command,
1015
+ tool_call: dict[str, Any],
1016
+ execution_time: float,
1017
+ ) -> tuple[list[ToolMessage], list[dict[str, Any]], dict[str, Any]]:
1018
+ """Convert a Command tool output into messages, artifacts, and metadata deltas.
1019
+
1020
+ Args:
1021
+ tool_output: The Command returned by the tool.
1022
+ tool_call: The tool call info (id, name, args) for ToolMessage context.
1023
+ execution_time: Execution time to include in ToolMessage tool_calls.
1024
+
1025
+ Returns:
1026
+ A tuple of (messages, artifacts, metadata_delta).
1027
+ """
1028
+ update: dict[str, Any] = getattr(tool_output, "update", {}) or {}
1029
+
1030
+ out_messages: list[ToolMessage] = []
1031
+ out_artifacts: list[dict[str, Any]] = []
1032
+ metadata_delta: dict[str, Any] = {}
1033
+
1034
+ # Artifacts
1035
+ artifacts_update = update.get("artifacts")
1036
+ if isinstance(artifacts_update, list):
1037
+ out_artifacts.extend(artifacts_update)
1038
+
1039
+ # Metadata
1040
+ md_update = update.get("metadata")
1041
+ if isinstance(md_update, dict):
1042
+ metadata_delta.update(md_update)
1043
+
1044
+ # Messages or fallback to result
1045
+ messages_update = update.get("messages")
1046
+ if isinstance(messages_update, list):
1047
+ out_messages.extend(messages_update)
1048
+ else:
1049
+ agent_result = str(update.get("result", ""))
1050
+ out_messages.append(
1051
+ ToolMessage(
1052
+ content=agent_result,
1053
+ tool_call_id=tool_call["id"],
1054
+ tool_calls={
1055
+ "name": tool_call["name"],
1056
+ "args": tool_call["args"],
1057
+ "output": agent_result,
1058
+ "time": execution_time,
1059
+ },
1060
+ )
1061
+ )
1062
+
1063
+ # If metadata contains linkage info, attach to first ToolMessage response_metadata
1064
+ md = update.get("metadata")
1065
+ if isinstance(md, dict):
1066
+ prev_ids = md.get("previous_step_ids")
1067
+ if isinstance(prev_ids, list) and prev_ids and out_messages:
1068
+ try:
1069
+ out_messages[0].response_metadata.setdefault("previous_step_ids", [])
1070
+ existing = out_messages[0].response_metadata.get("previous_step_ids", [])
1071
+ combined = list(dict.fromkeys(list(existing) + list(prev_ids)))
1072
+ out_messages[0].response_metadata["previous_step_ids"] = combined
1073
+ except Exception:
1074
+ pass
1075
+
1076
+ return out_messages, out_artifacts, metadata_delta
1077
+
1078
+ def _process_simple_tool_output(
1079
+ self,
1080
+ agent_result_text: str,
1081
+ tool_call: dict[str, Any],
1082
+ execution_time: float,
1083
+ ) -> tuple[list[ToolMessage], list[dict[str, Any]]]:
1084
+ """Convert a simple string tool output into messages with no artifacts.
1085
+
1086
+ Args:
1087
+ agent_result_text: The string result from tool execution.
1088
+ tool_call: The tool call information containing id, name, and args.
1089
+ execution_time: Time taken to execute the tool.
1090
+
1091
+ Returns:
1092
+ Tuple of (tool_messages, artifacts) where artifacts is always an empty list.
1093
+ """
1094
+ messages = [
1095
+ ToolMessage(
1096
+ content=agent_result_text,
1097
+ tool_call_id=tool_call["id"],
1098
+ tool_calls={
1099
+ "name": tool_call["name"],
1100
+ "args": tool_call["args"],
1101
+ "output": agent_result_text,
1102
+ "time": execution_time,
1103
+ },
1104
+ )
1105
+ ]
1106
+ return messages, []
1107
+
1108
+ @deprecated(version="0.5.0", reason="Use _process_command_tool_output instead")
1109
+ def _process_legacy_tool_output(
1110
+ self,
1111
+ tool_output: dict[str, Any],
1112
+ tool_call: dict[str, Any],
1113
+ execution_time: float,
1114
+ pending_artifacts: list[dict[str, Any]],
1115
+ ) -> tuple[list[ToolMessage], list[dict[str, Any]]]:
1116
+ """Normalize legacy dict outputs into ToolMessages and artifacts.
1117
+
1118
+ Supports legacy tools that return a mapping possibly containing 'artifacts'
1119
+ and 'result' keys.
1120
+
1121
+ Args:
1122
+ tool_output: The legacy dict output from tool execution.
1123
+ tool_call: The tool call information containing id, name, and args.
1124
+ execution_time: Time taken to execute the tool.
1125
+ pending_artifacts: Current list of pending artifacts to extend with new ones.
1126
+
1127
+ Returns:
1128
+ Tuple of (tool_messages, updated_pending_artifacts).
1129
+ """
1130
+ if isinstance(tool_output.get("artifacts"), list):
1131
+ pending_artifacts.extend(tool_output["artifacts"])
1132
+
1133
+ agent_result = str(tool_output.get("result", tool_output))
1134
+
1135
+ # Extract metadata from tool_output if present
1136
+ response_metadata = {}
1137
+ if isinstance(tool_output, dict) and isinstance(tool_output.get("metadata"), dict):
1138
+ response_metadata.update(tool_output["metadata"])
1139
+
1140
+ messages = [
1141
+ ToolMessage(
1142
+ content=agent_result,
1143
+ tool_call_id=tool_call["id"],
1144
+ tool_calls={
1145
+ "name": tool_call["name"],
1146
+ "args": tool_call["args"],
1147
+ "output": agent_result,
1148
+ "time": execution_time,
1149
+ },
1150
+ response_metadata=response_metadata,
1151
+ )
1152
+ ]
1153
+ return messages, pending_artifacts
1154
+
1155
+ async def _run_single_tool_call(
1156
+ self,
1157
+ tool_map: dict[str, BaseTool],
1158
+ tool_call: dict[str, Any],
1159
+ context: ToolCallContext,
1160
+ ) -> ToolCallResult:
1161
+ """Execute a single tool call with tool output management and reference resolution.
1162
+
1163
+ This method handles the complete lifecycle of a tool call including:
1164
+ - Reference resolution for tool arguments
1165
+ - Tool execution with enhanced configuration
1166
+ - Automatic and manual tool output storage
1167
+ - Error handling for reference and execution failures
1168
+
1169
+ Args:
1170
+ tool_map: Mapping of tool name to tool instance.
1171
+ tool_call: The tool call information from the AI message.
1172
+ context: Tool call context containing config, state, pending artifacts, and HITL decision.
1173
+
1174
+ Returns:
1175
+ ToolCallResult containing messages, artifacts, metadata_delta, references, and usage_metadata.
1176
+ """
1177
+ tool = tool_map.get(tool_call["name"]) # type: ignore[index]
1178
+ tool_call_id = tool_call.get("id", f"tool_call_{uuid.uuid4().hex[:8]}")
1179
+
1180
+ # Check for HITL approval if configured
1181
+ if context.hitl_decision is None:
1182
+ try:
1183
+ context.hitl_decision = await self._check_hitl_approval(
1184
+ tool_call=tool_call, tool_name=tool_call["name"], state=context.state
1185
+ )
1186
+
1187
+ if context.hitl_decision and context.hitl_decision.decision in TOOL_EXECUTION_BLOCKING_DECISIONS:
1188
+ # Return sentinel result for pending/rejected/skipped tools
1189
+ return self._create_hitl_blocking_result(tool_call, context.hitl_decision)
1190
+ except Exception as e:
1191
+ # Log HITL failure but continue with normal tool execution
1192
+ logger.warning(
1193
+ "HITL approval check failed for tool '%s' (error: %s: %s). Proceeding with tool execution.",
1194
+ tool_call["name"],
1195
+ type(e).__name__,
1196
+ e,
1197
+ )
1198
+
1199
+ # Execute tool and handle errors
1200
+ tool_output, execution_time, references, updated_pii_mapping = await self._execute_tool_with_management(
1201
+ tool=tool,
1202
+ tool_call=tool_call,
1203
+ tool_call_id=tool_call_id,
1204
+ config=context.config,
1205
+ state=context.state,
1206
+ )
1207
+
1208
+ # Process tool output into messages and artifacts
1209
+ messages, artifacts, metadata_delta = self._process_tool_output_result(
1210
+ tool_output=tool_output,
1211
+ tool_call=tool_call,
1212
+ execution_time=execution_time,
1213
+ pending_artifacts=context.pending_artifacts,
1214
+ )
1215
+
1216
+ # Capture and merge new PII mapping from subagent
1217
+ updated_pii_mapping = self._merge_tool_pii_mapping(metadata_delta, updated_pii_mapping)
1218
+
1219
+ # If HITL was required, annotate the first ToolMessage with HITL metadata
1220
+ try:
1221
+ if context.hitl_decision and messages:
1222
+ first_msg = messages[0]
1223
+ if isinstance(first_msg, ToolMessage):
1224
+ response_metadata = getattr(first_msg, "response_metadata", None) or {}
1225
+ response_metadata = dict(response_metadata)
1226
+ hitl_model = HitlMetadata.from_decision(context.hitl_decision)
1227
+ response_metadata["hitl"] = hitl_model.as_payload()
1228
+ first_msg.response_metadata = response_metadata
1229
+ except Exception as e:
1230
+ # Non-fatal: continue even if metadata injection fails
1231
+ logger.warning(f"Failed to inject HITL metadata into tool message: {e}")
1232
+
1233
+ # Extract and add usage metadata
1234
+ tool_usage_metadata = extract_token_usage_from_tool_output(tool_output)
1235
+ self._add_usage_metadata_to_tool_message(messages, tool_usage_metadata)
1236
+
1237
+ return ToolCallResult(
1238
+ messages=messages,
1239
+ artifacts=artifacts,
1240
+ metadata_delta=metadata_delta,
1241
+ references=references,
1242
+ step_usage=tool_usage_metadata,
1243
+ pii_mapping=updated_pii_mapping,
1244
+ )
1245
+
1246
+ def _merge_tool_pii_mapping(
1247
+ self,
1248
+ metadata_delta: dict[str, Any],
1249
+ updated_pii_mapping: dict[str, str] | None,
1250
+ ) -> dict[str, str] | None:
1251
+ """Merge PII mapping from metadata delta into existing mapping.
1252
+
1253
+ Args:
1254
+ metadata_delta: Metadata delta returned from tool execution.
1255
+ updated_pii_mapping: PII mapping produced during tool execution, if any.
1256
+
1257
+ Returns:
1258
+ New merged PII mapping or None if no PII information is present.
1259
+ """
1260
+ if "pii_mapping" not in metadata_delta:
1261
+ return updated_pii_mapping
1262
+
1263
+ metadata_pii_mapping = metadata_delta.get("pii_mapping") or {}
1264
+ if not isinstance(metadata_pii_mapping, dict) or not metadata_pii_mapping:
1265
+ return updated_pii_mapping
1266
+
1267
+ if updated_pii_mapping:
1268
+ return {**updated_pii_mapping, **metadata_pii_mapping}
1269
+
1270
+ return metadata_pii_mapping
1271
+
1272
+ async def _execute_tool_with_management(
1273
+ self,
1274
+ tool: BaseTool | None,
1275
+ tool_call: dict[str, Any],
1276
+ tool_call_id: str,
1277
+ config: dict[str, Any] | None,
1278
+ state: dict[str, Any],
1279
+ ) -> tuple[Any, float, list[Chunk], dict[str, str] | None]:
1280
+ """Execute tool with output management, reference resolution, and error handling.
1281
+
1282
+ Args:
1283
+ tool: The tool instance to execute, or None if not found.
1284
+ tool_call: The tool call information from the AI message.
1285
+ tool_call_id: Unique identifier for this tool call.
1286
+ config: Optional configuration passed down to the tool.
1287
+ state: Current agent state containing tool output manager.
1288
+
1289
+ Returns:
1290
+ Tuple of (tool_output, execution_time, references, updated_pii_mapping).
1291
+ """
1292
+ execution_time = 0.0
1293
+ references: list[Chunk] = []
1294
+ updated_pii_mapping: dict[str, str] | None = None
1295
+
1296
+ if not tool:
1297
+ return f"Error: Tool '{tool_call['name']}' not found.", execution_time, references, updated_pii_mapping
1298
+
1299
+ start_time = time.time()
1300
+ try:
1301
+ # Resolve tool argument references
1302
+ resolved_args = self._resolve_tool_arguments(tool_call, state, config)
1303
+ predefined_pii_mapping = self._get_predefined_pii_mapping(state, config)
1304
+
1305
+ enable_pii = self._enable_pii
1306
+ if enable_pii is False:
1307
+ pii_handler = ToolPIIHandler.create_mapping_only(predefined_pii_mapping)
1308
+ else:
1309
+ pii_handler = self._create_pii_handler(predefined_pii_mapping, config)
1310
+
1311
+ # Deanonymize tool arguments if PII handler is enabled
1312
+ resolved_args = self._deanonymize_tool_args(pii_handler, resolved_args)
1313
+
1314
+ # Create enhanced tool configuration with output management
1315
+ tool_config = self._create_enhanced_tool_config(config, state, tool_call["name"], tool_call_id)
1316
+ if not isinstance(tool_config, dict):
1317
+ raise TypeError("Tool configuration must be a dictionary.")
1318
+ tool_config_runnable = tool_config
1319
+
1320
+ arun_streaming_method = getattr(tool, TOOL_RUN_STREAMING_METHOD, None)
1321
+
1322
+ if arun_streaming_method and callable(arun_streaming_method):
1323
+ tool_output = await self._execute_tool_with_streaming(tool, tool_call, tool_config)
1324
+ else:
1325
+ tool_output = await tool.ainvoke(resolved_args, tool_config_runnable)
1326
+
1327
+ references = extract_references_from_tool(tool, tool_output)
1328
+
1329
+ # Anonymize tool output if PII handler is enabled
1330
+ tool_output, updated_pii_mapping = self._anonymize_tool_output(pii_handler, tool_output)
1331
+
1332
+ # Handle automatic storage if enabled
1333
+ self._handle_automatic_tool_storage(
1334
+ ToolStorageParams(
1335
+ tool=tool,
1336
+ tool_output=tool_output,
1337
+ tool_call=tool_call,
1338
+ tool_call_id=tool_call_id,
1339
+ resolved_args=resolved_args,
1340
+ state=state,
1341
+ ),
1342
+ config=config,
1343
+ )
1344
+
1345
+ return tool_output, time.time() - start_time, references, updated_pii_mapping
1346
+
1347
+ except ToolReferenceError as ref_error:
1348
+ tool_output = f"Reference error in tool '{tool_call['name']}': {str(ref_error)}"
1349
+ logger.error(f"Tool reference error: {ref_error}", exc_info=True)
1350
+ return tool_output, time.time() - start_time, references, updated_pii_mapping
1351
+ except Exception as e: # noqa: BLE001
1352
+ tool_output = f"Error executing tool '{tool_call['name']}': {str(e)}"
1353
+ logger.error(f"Tool execution error: {e}", exc_info=True)
1354
+ return tool_output, time.time() - start_time, references, updated_pii_mapping
1355
+
1356
+ def _get_predefined_pii_mapping(
1357
+ self,
1358
+ state: dict[str, Any],
1359
+ config: dict[str, Any] | None,
1360
+ ) -> dict[str, str] | None:
1361
+ """Get predefined PII mapping from state or configuration.
1362
+
1363
+ This helper centralizes the logic for resolving an existing PII mapping,
1364
+ first checking the agent state metadata, then falling back to the config
1365
+ metadata if available.
1366
+
1367
+ Args:
1368
+ state: Current LangGraph agent state.
1369
+ config: Optional LangGraph configuration dictionary.
1370
+
1371
+ Returns:
1372
+ The resolved PII mapping dictionary if found, otherwise None.
1373
+ """
1374
+ metadata_from_state = state.get("metadata") or {}
1375
+ mapping_from_state = metadata_from_state.get("pii_mapping")
1376
+ if isinstance(mapping_from_state, dict) and mapping_from_state:
1377
+ return mapping_from_state # type: ignore[return-value]
1378
+
1379
+ if not config:
1380
+ return None
1381
+
1382
+ metadata_from_config = config.get("metadata") or {}
1383
+ mapping_from_config = metadata_from_config.get("pii_mapping")
1384
+ if isinstance(mapping_from_config, dict) and mapping_from_config:
1385
+ return mapping_from_config # type: ignore[return-value]
1386
+
1387
+ return None
1388
+
1389
+ def _create_pii_handler(
1390
+ self, predefined_pii_mapping: dict[str, str] | None, config: dict[str, Any] | None
1391
+ ) -> ToolPIIHandler | None:
1392
+ """Create (or reuse) a PII handler scoped to the current thread.
1393
+
1394
+ Thin wrapper around ToolPIIHandler.create_if_enabled to keep
1395
+ _execute_tool_with_management focused on orchestration. The handler can
1396
+ operate in mapping-only mode when no NER credentials are configured.
1397
+
1398
+ Args:
1399
+ predefined_pii_mapping: Existing PII mapping to seed the handler with.
1400
+ config: LangGraph configuration needed to scope handlers per thread.
1401
+
1402
+ Returns:
1403
+ A ToolPIIHandler instance when mapping/NER config is available, otherwise None.
1404
+ """
1405
+ thread_id: str | None = None
1406
+ if config:
1407
+ try:
1408
+ thread_id = self._extract_thread_id_from_config(config)
1409
+ except Exception:
1410
+ thread_id = None
1411
+ if thread_id:
1412
+ handler = self._pii_handlers_by_thread.get(thread_id)
1413
+ if handler:
1414
+ return handler
1415
+ handler = ToolPIIHandler.create_if_enabled(predefined_pii_mapping)
1416
+ if handler and thread_id:
1417
+ self._pii_handlers_by_thread[thread_id] = handler
1418
+
1419
+ return handler
1420
+
1421
+ def _deanonymize_tool_args(
1422
+ self,
1423
+ pii_handler: ToolPIIHandler | None,
1424
+ resolved_args: dict[str, Any],
1425
+ ) -> dict[str, Any]:
1426
+ """Deanonymize tool arguments using the provided PII handler.
1427
+
1428
+ Args:
1429
+ pii_handler: Optional ToolPIIHandler instance.
1430
+ resolved_args: Tool arguments after reference resolution.
1431
+
1432
+ Returns:
1433
+ Tool arguments with PII tags replaced by real values when a handler
1434
+ is available, otherwise the original arguments.
1435
+ """
1436
+ if not pii_handler:
1437
+ return resolved_args
1438
+ return pii_handler.deanonymize_tool_args(resolved_args)
1439
+
1440
+ def _anonymize_tool_output(
1441
+ self,
1442
+ pii_handler: ToolPIIHandler | None,
1443
+ tool_output: Any,
1444
+ ) -> tuple[Any, dict[str, str] | None]:
1445
+ """Anonymize tool output and return updated PII mapping when enabled.
1446
+
1447
+ Args:
1448
+ pii_handler: Optional ToolPIIHandler instance.
1449
+ tool_output: Raw output returned by the tool.
1450
+
1451
+ Returns:
1452
+ Tuple of (possibly anonymized tool_output, updated PII mapping or None).
1453
+ """
1454
+ if not pii_handler:
1455
+ return tool_output, None
1456
+
1457
+ anonymized_output, updated_mapping = pii_handler.anonymize_tool_output(tool_output)
1458
+ return anonymized_output, updated_mapping
1459
+
1460
+ def _resolve_tool_arguments(
1461
+ self, tool_call: dict[str, Any], state: dict[str, Any], config: dict[str, Any] | None = None
1462
+ ) -> dict[str, Any]:
1463
+ """Resolve tool argument references using the tool output manager.
1464
+
1465
+ Args:
1466
+ tool_call: The tool call information containing arguments.
1467
+ state: Current agent state containing tool output manager.
1468
+ config: Optional configuration containing thread_id information.
1469
+
1470
+ Returns:
1471
+ Resolved arguments dictionary.
1472
+
1473
+ Raises:
1474
+ ToolReferenceError: If reference resolution fails.
1475
+ """
1476
+ manager = state.get(TOOL_OUTPUT_MANAGER_KEY)
1477
+ resolved_args = tool_call["args"]
1478
+
1479
+ if manager and self.tool_output_manager:
1480
+ thread_id = self._extract_thread_id_from_config(config)
1481
+
1482
+ if manager.has_outputs(thread_id):
1483
+ resolver = ToolReferenceResolver(self.tool_output_manager.config)
1484
+ resolved_args = resolver.resolve_references(resolved_args, manager, thread_id)
1485
+ logger.debug(
1486
+ f"Resolved references for tool '{tool_call['name']}' in thread '{thread_id}', "
1487
+ f"Resolved args: {resolved_args}"
1488
+ )
1489
+
1490
+ return resolved_args
1491
+
1492
+ def _create_enhanced_tool_config(
1493
+ self, config: dict[str, Any] | None, state: dict[str, Any], tool_name: str, tool_call_id: str
1494
+ ) -> dict[str, Any]:
1495
+ """Create enhanced tool configuration with output management capabilities.
1496
+
1497
+ Args:
1498
+ config: Base configuration passed down to the tool.
1499
+ state: Current agent state containing tool output manager.
1500
+ tool_name: Name of the tool being executed.
1501
+ tool_call_id: Unique identifier for this tool call.
1502
+
1503
+ Returns:
1504
+ Enhanced tool configuration dictionary.
1505
+ """
1506
+ tool_config = self._create_tool_config(config, state, tool_name=tool_name)
1507
+
1508
+ # Add tool output management capabilities
1509
+ manager = state.get(TOOL_OUTPUT_MANAGER_KEY)
1510
+ if manager and self.tool_output_manager:
1511
+ tool_config[TOOL_OUTPUT_MANAGER_KEY] = manager
1512
+ tool_config[CALL_ID_KEY] = tool_call_id
1513
+
1514
+ # Attach coordinator parent step id so delegated sub-agents can link their start step properly
1515
+ try:
1516
+ thread_id = self._extract_thread_id_from_config(config)
1517
+ parent_map = self._tool_parent_map_by_thread.get(thread_id, {})
1518
+ parent_step_id = parent_map.get(str(tool_call_id))
1519
+ if parent_step_id:
1520
+ tool_config["parent_step_id"] = parent_step_id
1521
+ cfg = tool_config.get("configurable")
1522
+ if not isinstance(cfg, dict):
1523
+ cfg = {}
1524
+ cfg["parent_step_id"] = parent_step_id
1525
+ tool_config["configurable"] = cfg
1526
+ except Exception:
1527
+ pass
1528
+
1529
+ return tool_config
1530
+
1531
+ def _extract_thread_id_from_config(self, config: dict[str, Any] | None) -> str:
1532
+ """Extract thread_id from LangGraph configuration.
1533
+
1534
+ Since BaseLangGraphAgent._create_graph_config() guarantees a thread ID is always present,
1535
+ this method should always find a valid thread ID. If config is somehow None (which
1536
+ should never happen), creates a new UUID.
1537
+
1538
+ Args:
1539
+ config: LangGraph configuration dictionary.
1540
+
1541
+ Returns:
1542
+ Thread ID string from the configuration.
1543
+ """
1544
+ # This should never happen since _create_graph_config always creates config
1545
+ if not config:
1546
+ thread_id = str(uuid.uuid4())
1547
+ logger.warning(f"Agent '{self.name}': No config provided, generated new thread_id: {thread_id}")
1548
+ return thread_id
1549
+
1550
+ configurable = config["configurable"]
1551
+ thread_key = self.thread_id_key or "thread_id"
1552
+ return str(configurable[thread_key])
1553
+
1554
+ def _handle_automatic_tool_storage(
1555
+ self,
1556
+ params: ToolStorageParams,
1557
+ config: dict[str, Any] | None = None,
1558
+ ) -> None:
1559
+ """Handle automatic storage for tools with store_final_output enabled.
1560
+
1561
+ Args:
1562
+ params: ToolStorageParams containing all necessary parameters.
1563
+ config: Optional configuration containing thread_id information.
1564
+ """
1565
+ manager = params.state.get(TOOL_OUTPUT_MANAGER_KEY)
1566
+
1567
+ if (
1568
+ manager
1569
+ and self.tool_output_manager
1570
+ and params.tool_output is not None
1571
+ and getattr(params.tool, "store_final_output", False)
1572
+ ):
1573
+ # Extract thread_id from config
1574
+ thread_id = self._extract_thread_id_from_config(config)
1575
+
1576
+ storable_data = self._extract_storable_data(params.tool_output)
1577
+ store_params = StoreOutputParams(
1578
+ call_id=params.tool_call_id,
1579
+ tool_name=params.tool_call["name"],
1580
+ data=storable_data,
1581
+ tool_args=params.resolved_args,
1582
+ thread_id=thread_id,
1583
+ description=None, # No automatic description
1584
+ tags=None,
1585
+ agent_name=self.name,
1586
+ )
1587
+ manager.store_output(store_params)
1588
+ logger.debug(
1589
+ f"Auto-stored output for tool '{params.tool_call['name']}' with call_id: {params.tool_call_id} "
1590
+ f"in thread: {thread_id}"
1591
+ )
1592
+
1593
+ def _process_tool_output_result(
1594
+ self,
1595
+ tool_output: Any,
1596
+ tool_call: dict[str, Any],
1597
+ execution_time: float,
1598
+ pending_artifacts: list[dict[str, Any]],
1599
+ ) -> tuple[list[ToolMessage], list[dict[str, Any]], dict[str, Any]]:
1600
+ """Process tool output into messages, artifacts, and metadata.
1601
+
1602
+ Args:
1603
+ tool_output: The output returned by the tool.
1604
+ tool_call: The tool call information from the AI message.
1605
+ execution_time: Time taken to execute the tool.
1606
+ pending_artifacts: List of artifacts to be updated with new artifacts from this tool call.
1607
+
1608
+ Returns:
1609
+ Tuple of (messages, artifacts, metadata_delta).
1610
+ """
1611
+ metadata_delta: dict[str, Any] = {}
1612
+
1613
+ # Handle Command outputs
1614
+ if isinstance(tool_output, Command):
1615
+ return self._handle_command_output(tool_output, tool_call, execution_time, metadata_delta)
1616
+
1617
+ if isinstance(tool_output, dict):
1618
+ return self._handle_legacy_output(tool_output, tool_call, execution_time, pending_artifacts, metadata_delta)
1619
+
1620
+ # Handle string outputs, coercing other simple types
1621
+ if not isinstance(tool_output, str):
1622
+ tool_output = str(tool_output)
1623
+ return self._handle_string_output(tool_output, tool_call, execution_time)
1624
+
1625
+ def _handle_command_output(
1626
+ self, tool_output: Command, tool_call: dict[str, Any], execution_time: float, metadata_delta: dict[str, Any]
1627
+ ) -> tuple[list[ToolMessage], list[dict[str, Any]], dict[str, Any]]:
1628
+ """Handle Command type tool outputs.
1629
+
1630
+ Args:
1631
+ tool_output: The Command object returned by the tool.
1632
+ tool_call: The tool call information containing id, name, and args.
1633
+ execution_time: Time taken to execute the tool.
1634
+ metadata_delta: Dictionary to accumulate metadata updates into.
1635
+
1636
+ Returns:
1637
+ Tuple of (messages, artifacts, updated_metadata_delta).
1638
+ """
1639
+ messages, artifacts, md_delta = self._process_command_tool_output(
1640
+ tool_output=tool_output,
1641
+ tool_call=tool_call,
1642
+ execution_time=execution_time,
1643
+ )
1644
+ if md_delta:
1645
+ metadata_delta.update(md_delta)
1646
+
1647
+ update: dict[str, Any] = getattr(tool_output, "update", {}) or {}
1648
+ pii_mapping = update.get("pii_mapping")
1649
+ if isinstance(pii_mapping, dict) and pii_mapping:
1650
+ metadata_delta["pii_mapping"] = pii_mapping
1651
+
1652
+ return messages, artifacts, metadata_delta
1653
+
1654
+ def _handle_string_output(
1655
+ self, tool_output: str, tool_call: dict[str, Any], execution_time: float
1656
+ ) -> tuple[list[ToolMessage], list[dict[str, Any]], dict[str, Any]]:
1657
+ """Handle string type tool outputs.
1658
+
1659
+ Args:
1660
+ tool_output: The string output from tool execution.
1661
+ tool_call: The tool call information containing id, name, and args.
1662
+ execution_time: Time taken to execute the tool.
1663
+
1664
+ Returns:
1665
+ Tuple of (messages, artifacts, metadata_delta) where artifacts is empty
1666
+ and metadata_delta is empty dict.
1667
+ """
1668
+ messages, artifacts = self._process_simple_tool_output(
1669
+ agent_result_text=tool_output,
1670
+ tool_call=tool_call,
1671
+ execution_time=execution_time,
1672
+ )
1673
+ return messages, artifacts, {}
1674
+
1675
+ def _handle_legacy_output(
1676
+ self,
1677
+ tool_output: Any,
1678
+ tool_call: dict[str, Any],
1679
+ execution_time: float,
1680
+ pending_artifacts: list[dict[str, Any]],
1681
+ metadata_delta: dict[str, Any],
1682
+ ) -> tuple[list[ToolMessage], list[dict[str, Any]], dict[str, Any]]:
1683
+ """Handle legacy dict and other tool outputs.
1684
+
1685
+ Args:
1686
+ tool_output: The output from tool execution (typically a dict).
1687
+ tool_call: The tool call information containing id, name, and args.
1688
+ execution_time: Time taken to execute the tool.
1689
+ pending_artifacts: Current list of pending artifacts to extend with new ones.
1690
+ metadata_delta: Dictionary to accumulate metadata updates into.
1691
+
1692
+ Returns:
1693
+ Tuple of (messages, updated_pending_artifacts, updated_metadata_delta).
1694
+ """
1695
+ messages, artifacts = self._process_legacy_tool_output(
1696
+ tool_output=tool_output, # type: ignore[arg-type]
1697
+ tool_call=tool_call,
1698
+ execution_time=execution_time,
1699
+ pending_artifacts=pending_artifacts,
1700
+ )
1701
+
1702
+ # Process metadata from legacy dict outputs
1703
+ if isinstance(tool_output, dict):
1704
+ self._process_legacy_metadata(tool_output, messages, metadata_delta)
1705
+
1706
+ return messages, artifacts, metadata_delta
1707
+
1708
+ def _process_legacy_metadata(
1709
+ self, tool_output: dict[str, Any], messages: list[BaseMessage], metadata_delta: dict[str, Any]
1710
+ ) -> None:
1711
+ """Process metadata from legacy dict tool outputs.
1712
+
1713
+ Args:
1714
+ tool_output: The dict tool output containing metadata
1715
+ messages: List of messages to potentially update with metadata
1716
+ metadata_delta: Metadata delta to update
1717
+ """
1718
+ md = tool_output.get("metadata")
1719
+ if not isinstance(md, dict):
1720
+ return
1721
+
1722
+ prev_ids = md.get("previous_step_ids")
1723
+ if isinstance(prev_ids, list):
1724
+ metadata_delta["previous_step_ids"] = list(prev_ids)
1725
+ self._attach_previous_step_ids_to_message(messages, prev_ids)
1726
+
1727
+ def _attach_previous_step_ids_to_message(self, messages: list[BaseMessage], prev_ids: list[Any]) -> None:
1728
+ """Attach previous step IDs to the first ToolMessage's response metadata.
1729
+
1730
+ Args:
1731
+ messages: List of messages to update
1732
+ prev_ids: Previous step IDs to attach
1733
+ """
1734
+ if not messages or not isinstance(messages[0], ToolMessage):
1735
+ return
1736
+
1737
+ try:
1738
+ tool_message = messages[0]
1739
+ tool_message.response_metadata.setdefault("previous_step_ids", [])
1740
+ existing = tool_message.response_metadata.get("previous_step_ids", [])
1741
+ combined = list(dict.fromkeys(list(existing) + list(prev_ids)))
1742
+ tool_message.response_metadata["previous_step_ids"] = combined
1743
+ except Exception:
1744
+ pass
1745
+
1746
+ async def _execute_tool_with_streaming(
1747
+ self,
1748
+ tool: BaseTool,
1749
+ tool_call: dict[str, Any],
1750
+ tool_config: dict[str, Any] | None = None,
1751
+ ) -> str:
1752
+ """Execute a tool with streaming support and emit streaming chunks.
1753
+
1754
+ This method dynamically passes all tool arguments to the streaming method
1755
+ using **kwargs, making it flexible for tools with different parameter structures.
1756
+
1757
+ Args:
1758
+ tool: The tool instance to execute.
1759
+ tool_call: The tool call information from the AI message.
1760
+ tool_config: Optional configuration passed down to the tool.
1761
+
1762
+ Returns:
1763
+ The final output from the tool execution.
1764
+ """
1765
+ writer: StreamWriter = get_stream_writer()
1766
+ final_output: Any = None
1767
+ saw_tool_result = False
1768
+ start_time = time.time()
1769
+
1770
+ tool_call_id = tool_call.get("id", f"tool_call_{uuid.uuid4().hex[:8]}")
1771
+ tool_name = tool_call.get("name", "")
1772
+ tool_args = self._normalize_tool_args(tool_call.get("args"))
1773
+
1774
+ logger.info("Streaming tool start detected: agent=%s tool=%s call_id=%s", self.name, tool_name, tool_call_id)
1775
+
1776
+ try:
1777
+ self._emit_default_tool_call_event(writer, tool_name, tool_call_id, tool_args)
1778
+
1779
+ streaming_kwargs = self._build_streaming_kwargs(tool_args, tool_config)
1780
+ arun_streaming_method = getattr(tool, TOOL_RUN_STREAMING_METHOD, None)
1781
+ if not callable(arun_streaming_method):
1782
+ raise RuntimeError(f"Tool '{tool_name}' does not implement streaming.")
1783
+
1784
+ async for chunk in arun_streaming_method(**streaming_kwargs):
1785
+ final_output, saw_tool_result = self._handle_streaming_chunk(
1786
+ chunk=chunk,
1787
+ writer=writer,
1788
+ tool_name=tool_call["name"],
1789
+ current_output=final_output,
1790
+ saw_tool_result=saw_tool_result,
1791
+ )
1792
+
1793
+ final_output = self._finalize_streaming_tool(
1794
+ writer=writer,
1795
+ tool_name=tool_name,
1796
+ tool_call_id=tool_call_id,
1797
+ tool_args=tool_args,
1798
+ final_output=final_output,
1799
+ saw_tool_result=saw_tool_result,
1800
+ start_time=start_time,
1801
+ )
1802
+ logger.info(
1803
+ "Streaming tool completed: agent=%s tool=%s call_id=%s",
1804
+ self.name,
1805
+ tool_name,
1806
+ tool_call_id,
1807
+ )
1808
+
1809
+ except Exception as e:
1810
+ final_output = f"Error during streaming execution of tool '{tool_call['name']}': {str(e)}"
1811
+ logger.error(f"Tool streaming error: {final_output}", exc_info=True)
1812
+ self._emit_tool_error_event(writer, tool_call["name"], final_output)
1813
+
1814
+ return final_output
1815
+
1816
+ @staticmethod
1817
+ def _normalize_tool_args(raw_tool_args: Any) -> dict[str, Any]:
1818
+ """Normalize raw tool arguments into a dictionary.
1819
+
1820
+ Args:
1821
+ raw_tool_args: The raw tool arguments to normalize.
1822
+
1823
+ Returns:
1824
+ A dictionary containing the normalized tool arguments.
1825
+ """
1826
+ if isinstance(raw_tool_args, dict):
1827
+ return raw_tool_args
1828
+ if raw_tool_args is None:
1829
+ return {}
1830
+ return {"value": raw_tool_args}
1831
+
1832
+ @staticmethod
1833
+ def _build_streaming_kwargs(tool_args: dict[str, Any], tool_config: dict[str, Any] | None) -> dict[str, Any]:
1834
+ """Create kwargs payload for streaming execution.
1835
+
1836
+ Args:
1837
+ tool_args: The tool arguments to include in the streaming kwargs.
1838
+ tool_config: Optional tool configuration to include.
1839
+
1840
+ Returns:
1841
+ A dictionary containing the streaming kwargs.
1842
+ """
1843
+ streaming_kwargs = tool_args.copy()
1844
+ if tool_config:
1845
+ streaming_kwargs["config"] = tool_config
1846
+ return streaming_kwargs
1847
+
1848
+ def _handle_streaming_chunk(
1849
+ self,
1850
+ *,
1851
+ chunk: Any,
1852
+ writer: StreamWriter,
1853
+ tool_name: str,
1854
+ current_output: Any,
1855
+ saw_tool_result: bool,
1856
+ ) -> tuple[Any, bool]:
1857
+ """Process a single streaming chunk and update output/result flag.
1858
+
1859
+ Args:
1860
+ chunk: The streaming chunk to process.
1861
+ writer: The stream writer for output.
1862
+ tool_name: The name of the tool being executed.
1863
+ current_output: The current accumulated output.
1864
+ saw_tool_result: Whether a tool result has been seen.
1865
+
1866
+ Returns:
1867
+ A tuple of (updated_output, saw_tool_result).
1868
+ """
1869
+ if not isinstance(chunk, dict):
1870
+ return current_output, saw_tool_result
1871
+
1872
+ event_type_raw = chunk.get("event_type")
1873
+ event_type = self._resolve_tool_event_type(event_type_raw)
1874
+ if event_type == A2AStreamEventType.TOOL_CALL or (
1875
+ event_type is None
1876
+ and isinstance(event_type_raw, str)
1877
+ and event_type_raw.lower() == A2AStreamEventType.TOOL_CALL.value
1878
+ ):
1879
+ return current_output, saw_tool_result
1880
+
1881
+ self._create_tool_streaming_event(chunk, writer, tool_name)
1882
+ new_output = self._extract_output_from_chunk(chunk, current_output)
1883
+ if event_type == A2AStreamEventType.STATUS_UPDATE:
1884
+ metadata = chunk.get("metadata")
1885
+ kind = None
1886
+ if isinstance(metadata, dict):
1887
+ kind = metadata.get(MetadataFieldKeys.KIND)
1888
+ if getattr(kind, "value", kind) == Kind.FINAL_THINKING_STEP.value:
1889
+ return new_output, True
1890
+ if event_type == A2AStreamEventType.TOOL_RESULT:
1891
+ return new_output, True
1892
+ return new_output, saw_tool_result
1893
+
1894
+ def _emit_default_tool_call_event(
1895
+ self,
1896
+ writer: StreamWriter,
1897
+ tool_name: str,
1898
+ tool_call_id: str,
1899
+ tool_args: dict[str, Any],
1900
+ ) -> None:
1901
+ """Emit a standardized TOOL_CALL event for streaming tools.
1902
+
1903
+ Args:
1904
+ writer: The stream writer to emit events to.
1905
+ tool_name: Name of the tool being called.
1906
+ tool_call_id: Unique identifier for the tool call.
1907
+ tool_args: Arguments passed to the tool.
1908
+ """
1909
+ thread_id = _THREAD_ID_CVAR.get()
1910
+ if thread_id:
1911
+ emitted = self._emitted_tool_calls_by_thread.get(thread_id, set())
1912
+ if tool_call_id in emitted:
1913
+ logger.info(
1914
+ "Skipping fallback tool call event: agent=%s tool=%s call_id=%s",
1915
+ self.name,
1916
+ tool_name,
1917
+ tool_call_id,
1918
+ )
1919
+ return
1920
+
1921
+ tool_call_info = {
1922
+ "tool_calls": [
1923
+ {
1924
+ "id": tool_call_id,
1925
+ "name": tool_name,
1926
+ "args": tool_args,
1927
+ }
1928
+ ],
1929
+ "status": "running",
1930
+ }
1931
+ metadata = {
1932
+ MetadataFieldKeys.KIND: Kind.AGENT_THINKING_STEP,
1933
+ MetadataFieldKeys.STATUS: Status.RUNNING,
1934
+ MetadataFieldKeys.TOOL_INFO: tool_call_info,
1935
+ }
1936
+ activity_info = create_tool_activity_info({"tool_info": tool_call_info})
1937
+ event = {
1938
+ "event_type": A2AStreamEventType.TOOL_CALL,
1939
+ "content": f"Processing with tools: {tool_name}",
1940
+ "metadata": metadata,
1941
+ "tool_info": tool_call_info,
1942
+ "thinking_and_activity_info": activity_info,
1943
+ }
1944
+ self._create_tool_streaming_event(event, writer, tool_name)
1945
+
1946
+ @staticmethod
1947
+ def _extract_output_from_chunk(chunk: dict[str, Any], current_output: Any) -> Any:
1948
+ """Return most recent tool output derived from streaming chunk.
1949
+
1950
+ Args:
1951
+ chunk: The streaming chunk containing tool information.
1952
+ current_output: The current output value to fall back to.
1953
+
1954
+ Returns:
1955
+ The extracted output from the chunk or the current_output if not found.
1956
+ """
1957
+ tool_info = chunk.get("tool_info")
1958
+ if isinstance(tool_info, dict):
1959
+ return tool_info.get("output", current_output)
1960
+ return current_output
1961
+
1962
+ def _finalize_streaming_tool(
1963
+ self,
1964
+ *,
1965
+ writer: StreamWriter,
1966
+ tool_name: str,
1967
+ tool_call_id: str,
1968
+ tool_args: dict[str, Any],
1969
+ final_output: Any,
1970
+ saw_tool_result: bool,
1971
+ start_time: float,
1972
+ ) -> str:
1973
+ """Emit final tool event when needed and return final output as string.
1974
+
1975
+ Args:
1976
+ writer: The stream writer to emit events to.
1977
+ tool_name: Name of the tool being called.
1978
+ tool_call_id: Unique identifier for the tool call.
1979
+ tool_args: Arguments passed to the tool.
1980
+ final_output: The final output from the tool execution.
1981
+ saw_tool_result: Whether a TOOL_RESULT event was observed during streaming.
1982
+ start_time: Timestamp when the tool execution started.
1983
+
1984
+ Returns:
1985
+ The final output as a string.
1986
+ """
1987
+ output_text = final_output
1988
+ if output_text is None:
1989
+ output_text = f"Tool '{tool_name}' completed successfully"
1990
+ if not isinstance(output_text, str):
1991
+ output_text = str(output_text)
1992
+
1993
+ logger.debug(
1994
+ "Streaming tool finalize check: agent=%s tool=%s call_id=%s saw_tool_result=%s",
1995
+ self.name,
1996
+ tool_name,
1997
+ tool_call_id,
1998
+ saw_tool_result,
1999
+ )
2000
+ if not saw_tool_result:
2001
+ logger.debug(
2002
+ "Streaming tool finalize emitting default result: agent=%s tool=%s call_id=%s",
2003
+ self.name,
2004
+ tool_name,
2005
+ tool_call_id,
2006
+ )
2007
+ self._emit_default_tool_result_event(
2008
+ writer=writer,
2009
+ tool_name=tool_name,
2010
+ tool_call_id=tool_call_id,
2011
+ tool_args=tool_args,
2012
+ output_text=output_text,
2013
+ start_time=start_time,
2014
+ )
2015
+
2016
+ return output_text
2017
+
2018
+ def _emit_default_tool_result_event(
2019
+ self,
2020
+ *,
2021
+ writer: StreamWriter,
2022
+ tool_name: str,
2023
+ tool_call_id: str,
2024
+ tool_args: dict[str, Any],
2025
+ output_text: str,
2026
+ start_time: float,
2027
+ ) -> None:
2028
+ """Emit a standardized TOOL_RESULT event for streaming tools.
2029
+
2030
+ Args:
2031
+ writer: The stream writer to emit events to.
2032
+ tool_name: Name of the tool that was executed.
2033
+ tool_call_id: Unique identifier for the tool call.
2034
+ tool_args: Arguments passed to the tool.
2035
+ output_text: The output text from the tool execution.
2036
+ start_time: Timestamp when the tool execution started.
2037
+ """
2038
+ execution_time = time.time() - start_time
2039
+ tool_result_info = {
2040
+ "name": tool_name,
2041
+ "args": tool_args,
2042
+ "output": output_text,
2043
+ "execution_time": execution_time,
2044
+ }
2045
+ metadata = {
2046
+ MetadataFieldKeys.KIND: Kind.AGENT_THINKING_STEP,
2047
+ MetadataFieldKeys.STATUS: Status.FINISHED,
2048
+ MetadataFieldKeys.TOOL_INFO: tool_result_info,
2049
+ }
2050
+ activity_info = create_tool_activity_info({"tool_info": tool_result_info})
2051
+ event = {
2052
+ "event_type": A2AStreamEventType.TOOL_RESULT,
2053
+ "content": output_text,
2054
+ "metadata": metadata,
2055
+ "tool_info": tool_result_info,
2056
+ "thinking_and_activity_info": activity_info,
2057
+ }
2058
+ self._create_tool_streaming_event(event, writer, tool_name)
2059
+
2060
+ def _emit_tool_error_event(self, writer: StreamWriter, tool_name: str, error_msg: str) -> None:
2061
+ """Emit a tool error event to the stream.
2062
+
2063
+ Args:
2064
+ writer: Stream writer to emit events.
2065
+ tool_name: Name of the tool that encountered an error.
2066
+ error_msg: The error message.
2067
+ """
2068
+ a2a_event = self._create_a2a_event(
2069
+ event_type=A2AStreamEventType.ERROR,
2070
+ content=f"Error in {tool_name}: {error_msg}",
2071
+ tool_info={
2072
+ "name": tool_name,
2073
+ "error": error_msg,
2074
+ },
2075
+ )
2076
+ writer(a2a_event)
2077
+
2078
+ async def _execute_abefore_model_hook(self, state: dict[str, Any]) -> None:
2079
+ """Asynchronously execute abefore_model middleware hook and update state.
2080
+
2081
+ Args:
2082
+ state: Current agent state to potentially update.
2083
+ """
2084
+ if self._middleware_manager:
2085
+ try:
2086
+ before_updates = await self._middleware_manager.abefore_model(state)
2087
+ if before_updates:
2088
+ state.update(before_updates)
2089
+ except Exception as e:
2090
+ # Lazy import to support optional guardrails dependency
2091
+ from aip_agents.guardrails.exceptions import GuardrailViolationError
2092
+
2093
+ if isinstance(e, GuardrailViolationError):
2094
+ # Re-raise guardrail violations to be caught by the agent node
2095
+ raise
2096
+ logger.error(f"Agent '{self.name}': Middleware abefore_model hook failed: {e}")
2097
+
2098
+ async def _execute_aafter_model_hook(self, state_updates: dict[str, Any], state: dict[str, Any]) -> None:
2099
+ """Asynchronously execute aafter_model middleware hook.
2100
+
2101
+ Args:
2102
+ state_updates: Updates to be merged into state.
2103
+ state: Current agent state for context.
2104
+ """
2105
+ if self._middleware_manager:
2106
+ try:
2107
+ after_updates = await self._middleware_manager.aafter_model(state)
2108
+ if after_updates:
2109
+ state_updates.update(after_updates)
2110
+ except Exception as e:
2111
+ # Lazy import to support optional guardrails dependency
2112
+ from aip_agents.guardrails.exceptions import GuardrailViolationError
2113
+
2114
+ if isinstance(e, GuardrailViolationError):
2115
+ # Re-raise guardrail violations
2116
+ raise
2117
+ logger.error(f"Agent '{self.name}': Middleware aafter_model hook failed: {e}")
2118
+
2119
+ def _execute_before_model_hook(self, state: dict[str, Any]) -> None:
2120
+ """Execute before_model middleware hook and update state.
2121
+
2122
+ Args:
2123
+ state: Current agent state to potentially update.
2124
+ """
2125
+ if self._middleware_manager:
2126
+ try:
2127
+ before_updates = self._middleware_manager.before_model(state)
2128
+ if before_updates:
2129
+ state.update(before_updates)
2130
+ except Exception as e:
2131
+ # Lazy import to support optional guardrails dependency
2132
+ from aip_agents.guardrails.exceptions import GuardrailViolationError
2133
+
2134
+ if isinstance(e, GuardrailViolationError):
2135
+ # Re-raise guardrail violations to be caught by the agent node
2136
+ raise
2137
+ logger.error(f"Agent '{self.name}': Middleware before_model hook failed: {e}")
2138
+
2139
+ def _execute_modify_model_request_hook(
2140
+ self, messages: list[Any], enhanced_instruction: str, state: dict[str, Any]
2141
+ ) -> tuple[list[Any], str]:
2142
+ """Execute modify_model_request middleware hook.
2143
+
2144
+ Args:
2145
+ messages: Current messages to potentially modify.
2146
+ enhanced_instruction: Current system prompt to potentially modify.
2147
+ state: Current agent state for context.
2148
+
2149
+ Returns:
2150
+ Tuple of (potentially modified messages, potentially modified system prompt).
2151
+ """
2152
+ if not self._middleware_manager:
2153
+ return messages, enhanced_instruction
2154
+
2155
+ try:
2156
+ model_request: ModelRequest = {
2157
+ "messages": messages,
2158
+ "tools": self.resolved_tools or [],
2159
+ "system_prompt": enhanced_instruction,
2160
+ }
2161
+ model_request = self._middleware_manager.modify_model_request(model_request, state)
2162
+
2163
+ modified_messages = model_request.get("messages", messages)
2164
+ modified_prompt = model_request.get("system_prompt", enhanced_instruction)
2165
+
2166
+ return modified_messages, modified_prompt
2167
+ except Exception as e:
2168
+ logger.error(f"Agent '{self.name}': Middleware modify_model_request hook failed: {e}")
2169
+ return messages, enhanced_instruction
2170
+
2171
+ def _execute_after_model_hook(self, state_updates: dict[str, Any], state: dict[str, Any]) -> None:
2172
+ """Execute after_model middleware hook and update state_updates.
2173
+
2174
+ Args:
2175
+ state_updates: Dictionary to update with middleware changes.
2176
+ state: Current agent state for context.
2177
+ """
2178
+ if self._middleware_manager:
2179
+ try:
2180
+ after_updates = self._middleware_manager.after_model(state)
2181
+ if after_updates:
2182
+ state_updates.update(after_updates)
2183
+ except Exception as e:
2184
+ logger.error(f"Agent '{self.name}': Middleware after_model hook failed: {e}")
2185
+
2186
+ async def _handle_lm_invoker_call(
2187
+ self, current_messages: Sequence[BaseMessage], state: dict[str, Any], config: dict[str, Any] | None = None
2188
+ ) -> dict[str, Any]:
2189
+ """Handle LMInvoker model calls with bridge conversion and tool output context.
2190
+
2191
+ Args:
2192
+ current_messages: The current messages in the agent.
2193
+ state: The current state of the agent.
2194
+ config: The configuration for the agent.
2195
+
2196
+ Returns:
2197
+ dict[str, Any]: A dictionary containing the new messages and updated token usage.
2198
+ """
2199
+ # Execute before_model middleware hook
2200
+ await self._execute_abefore_model_hook(state)
2201
+
2202
+ # Build tool output aware instruction
2203
+ enhanced_instruction = self._build_tool_output_aware_instruction(self.instruction, state, config)
2204
+
2205
+ # Execute modify_model_request middleware hook
2206
+ _, enhanced_instruction = self._execute_modify_model_request_hook(
2207
+ list(current_messages), enhanced_instruction, state
2208
+ )
2209
+
2210
+ messages = convert_langchain_messages_to_gllm_messages(list(current_messages), enhanced_instruction)
2211
+
2212
+ effective_event_emitter = state.get("event_emitter") or self.event_emitter
2213
+
2214
+ if self.lm_invoker is None:
2215
+ raise RuntimeError("LM invoker is required for this execution path.")
2216
+
2217
+ if self.resolved_tools:
2218
+ self.lm_invoker.set_tools(self.resolved_tools)
2219
+
2220
+ # Debug timing for LLM invocation
2221
+ _t0 = time.perf_counter()
2222
+ logger.info(f"Agent '{self.name}': LLM invoke start (tools={len(self.resolved_tools)})")
2223
+ lm_output = await self.lm_invoker.invoke(messages=messages, event_emitter=effective_event_emitter)
2224
+ _dt = time.perf_counter() - _t0
2225
+ logger.info(f"Agent '{self.name}': LLM invoke finished in {_dt:.3f}s")
2226
+
2227
+ ai_message = convert_lm_output_to_langchain_message(lm_output)
2228
+
2229
+ # Update token usage if available in the message
2230
+ state_updates = {"messages": [ai_message]}
2231
+
2232
+ # Extract and accumulate token usage from the message
2233
+ token_usage_updates = extract_and_update_token_usage_from_ai_message(ai_message)
2234
+ state_updates.update(token_usage_updates)
2235
+
2236
+ # Execute after_model middleware hook
2237
+ await self._execute_aafter_model_hook(state_updates, state)
2238
+
2239
+ return state_updates
2240
+
2241
+ async def _handle_langchain_model_call(
2242
+ self, current_messages: Sequence[BaseMessage], state: dict[str, Any], config: dict[str, Any] | None = None
2243
+ ) -> dict[str, Any]:
2244
+ """Handle LangChain BaseChatModel calls with tool output context.
2245
+
2246
+ Args:
2247
+ current_messages: The current messages in the agent.
2248
+ state: The current state of the agent.
2249
+ config: The configuration for the agent.
2250
+
2251
+ Returns:
2252
+ dict[str, Any]: A dictionary containing the new messages and updated token usage.
2253
+ """
2254
+ # Execute before_model middleware hook
2255
+ await self._execute_abefore_model_hook(state)
2256
+
2257
+ # Build tool output aware instruction
2258
+ enhanced_instruction = self._build_tool_output_aware_instruction(self.instruction, state, config)
2259
+
2260
+ langchain_prompt: list[BaseMessage] = [SystemMessage(content=enhanced_instruction)] + list(current_messages)
2261
+
2262
+ # Execute modify_model_request middleware hook
2263
+ langchain_prompt, enhanced_instruction = self._execute_modify_model_request_hook(
2264
+ langchain_prompt, enhanced_instruction, state
2265
+ )
2266
+
2267
+ # Rebuild prompt if needed (invalid structure or system prompt was modified)
2268
+ if (
2269
+ not langchain_prompt
2270
+ or not isinstance(langchain_prompt[0], SystemMessage)
2271
+ or langchain_prompt[0].content != enhanced_instruction
2272
+ ):
2273
+ langchain_prompt = [SystemMessage(content=enhanced_instruction)] + list(current_messages)
2274
+
2275
+ if self.model is None:
2276
+ raise RuntimeError("Model is required for this execution path.")
2277
+
2278
+ model_with_tools = self.model.bind_tools(self.resolved_tools) if self.resolved_tools else self.model
2279
+
2280
+ ai_message = await model_with_tools.ainvoke(langchain_prompt, config)
2281
+
2282
+ # Update token usage if available in the message
2283
+ state_updates = {"messages": [ai_message]}
2284
+
2285
+ # Extract and accumulate token usage from the message
2286
+ token_usage_updates = extract_and_update_token_usage_from_ai_message(ai_message)
2287
+ state_updates.update(token_usage_updates)
2288
+
2289
+ # Execute after_model middleware hook
2290
+ await self._execute_aafter_model_hook(state_updates, state)
2291
+
2292
+ return state_updates
2293
+
2294
+ def _add_user_id_memory_tool_config(self, metadata: dict[str, Any], memory_user_id: str) -> None:
2295
+ """Add user ID to memory tool config.
2296
+
2297
+ Args:
2298
+ metadata: The metadata to add the user ID to.
2299
+ memory_user_id: The user ID to add.
2300
+ """
2301
+ try:
2302
+ tool_cfgs = metadata.get(TOOL_CONFIGS_KEY, {})
2303
+ for tool_name in (MEMORY_SEARCH_TOOL_NAME, MEMORY_DELETE_TOOL_NAME):
2304
+ per_tool_config = tool_cfgs.get(tool_name)
2305
+ if not isinstance(per_tool_config, dict):
2306
+ per_tool_config = {}
2307
+ per_tool_config["user_id"] = memory_user_id
2308
+ tool_cfgs[tool_name] = per_tool_config
2309
+ metadata[TOOL_CONFIGS_KEY] = tool_cfgs
2310
+ except Exception as e:
2311
+ # Non-fatal; metadata injection is best-effort
2312
+ logger.warning("Failed to add user ID to memory tool config: %s", e)
2313
+
2314
+ def _prepare_graph_input(self, input_data: str | dict[str, Any], **kwargs: Any) -> dict[str, Any]:
2315
+ """Convert user input to graph state format.
2316
+
2317
+ Extracts mixed metadata schema supporting per-tool configuration.
2318
+ Delegation tools are isolated and do not receive parent per-tool metadata.
2319
+ Initializes tool output management for efficient tool result sharing.
2320
+
2321
+ Args:
2322
+ input_data: The user's input (typically a query string).
2323
+ **kwargs: Additional keyword arguments including optional metadata.
2324
+ - thread_id: Thread identifier passed from _create_graph_config.
2325
+
2326
+ Returns:
2327
+ Dictionary representing the initial graph state with messages, metadata, artifacts,
2328
+ and tool output management components.
2329
+ """
2330
+ if isinstance(input_data, str):
2331
+ query = input_data
2332
+ elif isinstance(input_data, dict) and "query" in input_data:
2333
+ query = input_data["query"]
2334
+ else:
2335
+ raise TypeError(f"Unsupported input type for LangGraphReactAgent: {type(input_data)}")
2336
+
2337
+ existing_messages = kwargs.get("messages", []) or []
2338
+ messages: list[BaseMessage] = existing_messages + [HumanMessage(content=query)]
2339
+
2340
+ # Extract metadata for tools and agent context
2341
+ metadata = self._extract_metadata_from_kwargs(**kwargs)
2342
+
2343
+ # If caller specified memory_user_id, inject it as per-tool config for the Mem0 tool
2344
+ memory_user_id: str | None = kwargs.get("memory_user_id")
2345
+ if memory_user_id and self._memory_enabled():
2346
+ self._add_user_id_memory_tool_config(metadata, memory_user_id)
2347
+
2348
+ # thread_id is passed explicitly from the caller after _create_graph_config
2349
+ thread_id = kwargs.get("thread_id")
2350
+
2351
+ # Use the agent's tool output manager (shared or private)
2352
+ step_limit_config = kwargs.get("step_limit_config") or self.step_limit_config
2353
+
2354
+ # Step limit context inheritance (Spec-2)
2355
+ try:
2356
+ inherited_depth = _DELEGATION_DEPTH_CVAR.get()
2357
+ except LookupError:
2358
+ inherited_depth = 0
2359
+
2360
+ try:
2361
+ inherited_chain = list(_DELEGATION_CHAIN_CVAR.get())
2362
+ except LookupError:
2363
+ inherited_chain = []
2364
+
2365
+ try:
2366
+ inherited_budget = _REMAINING_STEP_BUDGET_CVAR.get()
2367
+ except LookupError:
2368
+ inherited_budget = None
2369
+
2370
+ # Set step_limit_config in ContextVar so delegation tools can access it
2371
+ if step_limit_config:
2372
+ _STEP_LIMIT_CONFIG_CVAR.set(step_limit_config)
2373
+
2374
+ graph_input = {
2375
+ "messages": messages,
2376
+ "event_emitter": kwargs.get("event_emitter"),
2377
+ "artifacts": [],
2378
+ "metadata": metadata,
2379
+ "tool_output_manager": self.tool_output_manager,
2380
+ "thread_id": thread_id,
2381
+ # Step limit state initialization
2382
+ "current_step": 0, # Start at step 0
2383
+ "delegation_depth": inherited_depth,
2384
+ "delegation_chain": inherited_chain,
2385
+ "step_limit_config": asdict(step_limit_config) if step_limit_config else None,
2386
+ "remaining_step_budget": inherited_budget,
2387
+ }
2388
+
2389
+ return graph_input
2390
+
2391
+ def _resolve_tool_metadata(self, tool_name: str, metadata: dict[str, Any] | None) -> dict[str, Any]:
2392
+ """Resolve effective metadata for a specific tool given the mixed schema.
2393
+
2394
+ Metadata Resolution Hierarchy (lowest to highest precedence):
2395
+
2396
+ 1. Agent-level flat defaults: Apply to all tools from self.tool_configs
2397
+ - Skips 'tool_configs' key and dict values (per-tool configs)
2398
+
2399
+ 2. Agent-level per-tool defaults: From self.tool_configs[tool_name] or
2400
+ self.tool_configs['tool_configs'][tool_name]
2401
+
2402
+ 3. Request-level global metadata: From metadata kwargs, excluding 'tool_configs' key
2403
+
2404
+ 4. Request-level per-tool metadata: From metadata['tool_configs'][tool_name]
2405
+ - Highest precedence, overrides all previous layers
2406
+
2407
+ Tool names are sanitized for consistent lookup across all layers.
2408
+
2409
+ Args:
2410
+ tool_name: Sanitized runtime tool name (e.g., 'delegate_to_report_generator')
2411
+ metadata: Raw metadata from kwargs (flat dict or mixed schema)
2412
+
2413
+ Returns:
2414
+ Merged metadata for this tool with proper precedence hierarchy applied.
2415
+ """
2416
+ effective_metadata: dict[str, Any] = {}
2417
+
2418
+ # Layer 1: Agent-level defaults (lowest precedence)
2419
+ self._apply_agent_defaults(effective_metadata, tool_name)
2420
+
2421
+ # Layer 2: Request-level global metadata (middle precedence)
2422
+ self._apply_global_metadata(effective_metadata, metadata)
2423
+
2424
+ # Layer 3: Request-level per-tool metadata (highest precedence)
2425
+ self._apply_per_tool_metadata(effective_metadata, tool_name, metadata)
2426
+
2427
+ return effective_metadata
2428
+
2429
+ def _apply_agent_defaults(self, effective_metadata: dict[str, Any], tool_name: str) -> None:
2430
+ """Apply agent-level default configurations to effective metadata.
2431
+
2432
+ This method implements a 3-layer agent configuration hierarchy:
2433
+
2434
+ 1. Flat agent defaults: Apply to ALL tools from self.tool_configs
2435
+ - Processes top-level key-value pairs (excluding TOOL_CONFIGS_KEY)
2436
+ - Skips dictionary values as they are per-tool configurations
2437
+ - Example: {"api_timeout": 30, "retry_count": 3}
2438
+
2439
+ 2. Agent per-tool defaults (direct key mapping): From self.tool_configs[tool_name]
2440
+ - Direct tool name as key in agent configuration
2441
+ - Example: self.tool_configs["search_tool"] = {"max_results": 10}
2442
+
2443
+ 3. Agent per-tool defaults (nested structure): From self.tool_configs[TOOL_CONFIGS_KEY][tool_name]
2444
+ - Tool configurations nested under TOOL_CONFIGS_KEY
2445
+ - Tool names are sanitized for consistent lookup
2446
+ - Example: self.tool_configs["tool_configs"]["search_tool"] = {"max_results": 10}
2447
+
2448
+ Configuration Precedence (later layers override earlier ones):
2449
+ Flat defaults < Direct per-tool < Nested per-tool
2450
+
2451
+ Args:
2452
+ effective_metadata: The metadata dict to update with agent defaults
2453
+ tool_name: The sanitized tool name to apply configurations for
2454
+ """
2455
+ if not isinstance(self.tool_configs, dict):
2456
+ return
2457
+
2458
+ # Flat agent defaults (apply to all tools)
2459
+ for k, v in self.tool_configs.items():
2460
+ if k != TOOL_CONFIGS_KEY and not isinstance(v, dict):
2461
+ effective_metadata[k] = v
2462
+
2463
+ # Agent per-tool defaults (direct key mapping)
2464
+ agent_direct = self.tool_configs.get(tool_name)
2465
+ if isinstance(agent_direct, dict):
2466
+ effective_metadata.update(agent_direct)
2467
+
2468
+ # Agent per-tool defaults (nested under 'tool_configs')
2469
+ agent_nested_map = self.tool_configs.get(TOOL_CONFIGS_KEY)
2470
+ if isinstance(agent_nested_map, dict):
2471
+ sanitized_map = self._sanitize_tool_names_map(agent_nested_map)
2472
+ agent_nested = sanitized_map.get(tool_name)
2473
+ if isinstance(agent_nested, dict):
2474
+ effective_metadata.update(agent_nested)
2475
+
2476
+ def _apply_global_metadata(self, effective_metadata: dict[str, Any], metadata: dict[str, Any] | None) -> None:
2477
+ """Apply request-level global metadata to effective metadata.
2478
+
2479
+ Args:
2480
+ effective_metadata: The metadata dict to update
2481
+ metadata: Raw metadata from request
2482
+ """
2483
+ if not (metadata and isinstance(metadata, dict)):
2484
+ return
2485
+
2486
+ # Extract global metadata (excluding per-tool section)
2487
+ global_metadata = {k: v for k, v in metadata.items() if k != TOOL_CONFIGS_KEY}
2488
+ effective_metadata.update(global_metadata)
2489
+
2490
+ def _apply_per_tool_metadata(
2491
+ self, effective_metadata: dict[str, Any], tool_name: str, metadata: dict[str, Any] | None
2492
+ ) -> None:
2493
+ """Apply request-level per-tool metadata to effective metadata.
2494
+
2495
+ Args:
2496
+ effective_metadata: The metadata dict to update
2497
+ tool_name: The sanitized tool name
2498
+ metadata: Raw metadata from request
2499
+ """
2500
+ if metadata and isinstance(metadata, dict):
2501
+ tools_metadata = metadata.get(TOOL_CONFIGS_KEY, {})
2502
+ if isinstance(tools_metadata, dict):
2503
+ sanitized_tools_map = self._sanitize_tool_names_map(tools_metadata)
2504
+ tool_specific = sanitized_tools_map.get(tool_name, {})
2505
+ if isinstance(tool_specific, dict):
2506
+ effective_metadata.update(tool_specific)
2507
+
2508
+ def _sanitize_tool_names_map(self, tools_map: dict[str, Any]) -> dict[str, Any]:
2509
+ """Sanitize tool names in a mapping for consistent lookup.
2510
+
2511
+ Args:
2512
+ tools_map: Dictionary with potentially unsanitized tool names as keys
2513
+
2514
+ Returns:
2515
+ Dictionary with sanitized tool names as keys
2516
+ """
2517
+ sanitized_map = {}
2518
+ for user_key, tool_meta in tools_map.items():
2519
+ sanitized_key = self.name_preprocessor.sanitize_tool_name(user_key)
2520
+ sanitized_map[sanitized_key] = tool_meta
2521
+ return sanitized_map
2522
+
2523
+ def _create_tool_config(
2524
+ self, base_config: dict[str, Any] | None, state: dict[str, Any], tool_name: str | None = None
2525
+ ) -> dict[str, Any]:
2526
+ """Create enriched tool configuration with metadata and context.
2527
+
2528
+ Args:
2529
+ base_config: The base configuration passed to the tool node.
2530
+ state: The current agent state containing metadata and other context.
2531
+ tool_name: Optional tool name for per-tool metadata resolution.
2532
+
2533
+ Returns:
2534
+ dict[str, Any]: Enriched configuration for tool execution.
2535
+ """
2536
+ tool_config = base_config.copy() if base_config else {}
2537
+
2538
+ state_metadata = state.get("metadata")
2539
+ if tool_name:
2540
+ effective_metadata = self._resolve_tool_metadata(tool_name, state_metadata)
2541
+ else:
2542
+ effective_metadata = state_metadata if isinstance(state_metadata, dict) else {}
2543
+
2544
+ if effective_metadata:
2545
+ if "metadata" not in tool_config:
2546
+ tool_config["metadata"] = effective_metadata
2547
+ else:
2548
+ tool_config["metadata"].update(effective_metadata)
2549
+ logger.debug(f"Agent '{self.name}': Passing metadata to tool '{tool_name}': {effective_metadata}")
2550
+
2551
+ return tool_config
2552
+
2553
+ def _extract_storable_data(self, tool_output: Any) -> Any:
2554
+ """Extract storable data from tool output for the tool output management system.
2555
+
2556
+ This method determines what part of a tool's output should be stored for later
2557
+ reference by other tools. It handles different output formats and extracts the
2558
+ most relevant data for storage.
2559
+
2560
+ The extraction logic varies by type:
2561
+ - Command objects: Extracts the 'result' field from the update dict, or the entire update dict
2562
+ - String objects: Returns the string as-is
2563
+ - Dict objects: Returns the 'result' key if present, otherwise the entire dict
2564
+ - Other types: Converts to string representation
2565
+
2566
+ This method is used in the tool output management system to automatically store
2567
+ outputs from tools that have `store_final_output=True` set. The extracted data can
2568
+ then be referenced by other tools using the `$tool_output.<call_id>` syntax.
2569
+
2570
+ Example:
2571
+ For a Command object with update = {"result": "success", "data": [1, 2, 3]},
2572
+ this method would return "success".
2573
+
2574
+ For a dict = {"result": "completed", "status": "ok"},
2575
+ this method would return "completed".
2576
+
2577
+ For a dict = {"status": "ok", "data": [1, 2, 3]} (no "result" key),
2578
+ this method would return the entire dict.
2579
+
2580
+ Args:
2581
+ tool_output: The raw output from a tool execution. Can be any type including
2582
+ Command, str, dict, or other objects.
2583
+
2584
+ Returns:
2585
+ The data that should be stored in the tool output management system.
2586
+ The return type depends on the input type:
2587
+ - Command -> dict or the value of update.get("result")
2588
+ - str -> str (unchanged)
2589
+ - dict -> dict (either the value of .get("result") or the original dict)
2590
+ - other -> str (string representation of the object)
2591
+ """
2592
+ if isinstance(tool_output, Command):
2593
+ update = getattr(tool_output, "update", {}) or {}
2594
+ return update.get("result", update)
2595
+ elif isinstance(tool_output, str):
2596
+ return tool_output
2597
+ elif isinstance(tool_output, dict):
2598
+ return tool_output.get("result", tool_output)
2599
+ else:
2600
+ return str(tool_output)
2601
+
2602
+ def _build_tool_output_aware_instruction(
2603
+ self, base_instruction: str, state: dict[str, Any], config: dict[str, Any] | None = None
2604
+ ) -> str:
2605
+ """Build LLM instruction that includes context about available tool outputs.
2606
+
2607
+ This method enhances the base instruction with information about previously
2608
+ stored tool outputs, allowing the LLM to make informed decisions about
2609
+ which outputs to reference in subsequent tool calls.
2610
+
2611
+ Args:
2612
+ base_instruction: The original system instruction for the agent.
2613
+ state: Current agent state containing the tool output manager.
2614
+ config: Optional configuration containing thread_id information.
2615
+
2616
+ Returns:
2617
+ Enhanced instruction string that includes tool output context.
2618
+ """
2619
+ manager = state.get(TOOL_OUTPUT_MANAGER_KEY)
2620
+
2621
+ if not manager or not self.tool_output_manager:
2622
+ return base_instruction
2623
+
2624
+ thread_id = self._extract_thread_id_from_config(config)
2625
+
2626
+ if not manager.has_outputs(thread_id):
2627
+ return base_instruction
2628
+ outputs_summary = manager.generate_summary(max_entries=10, thread_id=thread_id)
2629
+
2630
+ # Build enhanced instruction
2631
+ prompt = dedent(f"""
2632
+ {base_instruction}
2633
+
2634
+ <TOOL_OUTPUT_REFERENCES>
2635
+
2636
+ # Goal
2637
+ - Use the most relevant stored tool output via "$tool_output.<call_id>" to avoid copying large data.
2638
+
2639
+ # Usage
2640
+ - Syntax: "$tool_output.<call_id>" in any tool argument; returns the full stored output.
2641
+ - IDs: Use only those listed below; do not invent or modify.
2642
+ - Selection: Pick the most relevant (usually most recent).
2643
+ - Don’ts: Don’t paste raw output or expand references.
2644
+ - Errors: Invalid/missing IDs fail—ask for the correct call_id or run the prerequisite tool.
2645
+
2646
+ # Example
2647
+ - tool_name.run(tool_argument="$tool_output.abc123")
2648
+
2649
+ # User Output Schema
2650
+ - "reference": "$tool_output.<call_id>", "tool": "<tool_name>", "agent": "<agent_name>", "data_preview": "<truncated preview>"
2651
+
2652
+ Available Outputs
2653
+ {outputs_summary}
2654
+ </TOOL_OUTPUT_REFERENCES>
2655
+ """) # noqa: E501
2656
+ return prompt
2657
+
2658
+ def _cleanup_thread_context(self, current_thread_id: str | None, token: Any) -> None:
2659
+ """Extend base cleanup to dispose cached PII handlers.
2660
+
2661
+ Args:
2662
+ current_thread_id: ID of the thread whose context is being cleaned up.
2663
+ token: Cancellation or execution token passed from the caller.
2664
+
2665
+ Returns:
2666
+ None. This method performs cleanup side effects only.
2667
+ """
2668
+ super()._cleanup_thread_context(current_thread_id, token)
2669
+ if current_thread_id:
2670
+ self._pii_handlers_by_thread.pop(current_thread_id, None)
2671
+
2672
+ # ==========================================================================
2673
+ # Programmatic Tool Calling (PTC) Methods
2674
+ # ==========================================================================
2675
+
2676
+ def add_mcp_server(self, mcp_config: dict[str, dict[str, Any]]) -> None:
2677
+ """Add MCP servers and refresh PTC tool state if needed."""
2678
+ super().add_mcp_server(mcp_config)
2679
+
2680
+ if not self._ptc_config or not self._ptc_config.enabled:
2681
+ return
2682
+
2683
+ if self._ptc_tool is not None:
2684
+ self._ptc_tool = None
2685
+
2686
+ self._ptc_tool_synced = False
2687
+ logger.debug(f"Agent '{self.name}': PTC tool will resync after MCP changes")
2688
+
2689
+ def enable_ptc(self, config: PTCSandboxConfig | None = None) -> None:
2690
+ """Enable Programmatic Tool Calling (PTC) for this agent.
2691
+
2692
+ PTC allows the LLM to execute Python code that calls MCP tools
2693
+ programmatically inside a sandboxed environment. This is useful for
2694
+ chaining multiple tool calls with local data processing.
2695
+
2696
+ The execute_ptc_code tool is automatically added to the agent's tools
2697
+ after MCP servers are configured. If no MCP servers are configured,
2698
+ the tool sync is deferred until servers are added.
2699
+
2700
+ Args:
2701
+ config: Optional configuration for PTC sandbox execution.
2702
+ See PTCSandboxConfig for options like enabled flag and sandbox_timeout.
2703
+ If None is passed, a default config with enabled=True will be created.
2704
+
2705
+ Example:
2706
+ agent.enable_ptc(PTCSandboxConfig(enabled=True))
2707
+ agent.add_mcp_server({"yfinance": {...}})
2708
+ # execute_ptc_code tool is now available
2709
+
2710
+ Note:
2711
+ PTC can also be enabled via the constructor by passing
2712
+ ptc_config=PTCSandboxConfig(enabled=True, ...).
2713
+ """
2714
+ # Lazy import to avoid circular dependencies
2715
+ from aip_agents.ptc.executor import PTCSandboxConfig
2716
+
2717
+ self._ptc_config = config or PTCSandboxConfig()
2718
+ self._ptc_config.enabled = True
2719
+ self._ptc_tool_synced = False
2720
+
2721
+ logger.info(f"Agent '{self.name}': PTC enabled")
2722
+
2723
+ # Attempt to sync PTC tool if MCP client is available
2724
+ self._sync_ptc_tool()
2725
+
2726
+ def _sync_ptc_tool(self) -> None:
2727
+ """Build and register the execute_ptc_code tool when MCP servers are available.
2728
+
2729
+ This method is called after enable_ptc() and after MCP servers are added.
2730
+ It creates the execute_ptc_code tool using the current MCP client
2731
+ configuration and adds it to the agent's resolved tools.
2732
+
2733
+ The tool is only created once. Subsequent calls are no-ops if the tool
2734
+ has already been synced.
2735
+ """
2736
+ if not self._ptc_config or not self._ptc_config.enabled:
2737
+ return
2738
+
2739
+ if self._ptc_tool_synced:
2740
+ return
2741
+
2742
+ # Check if we have MCP servers configured
2743
+ if not self.mcp_config:
2744
+ logger.debug(f"Agent '{self.name}': PTC tool sync deferred - no MCP servers configured")
2745
+ return
2746
+
2747
+ if not self.mcp_client:
2748
+ logger.debug(f"Agent '{self.name}': PTC tool sync deferred - no MCP client yet")
2749
+ return
2750
+
2751
+ if not self.mcp_client.is_initialized:
2752
+ logger.debug(f"Agent '{self.name}': PTC tool sync deferred - MCP client not initialized")
2753
+ return
2754
+
2755
+ # Lazy import to avoid circular dependencies
2756
+ from aip_agents.tools.execute_ptc_code import create_execute_ptc_code_tool
2757
+
2758
+ logger.info(f"Agent '{self.name}': Syncing PTC tool with MCP client")
2759
+
2760
+ # Create the execute_ptc_code tool with agent's tool configs
2761
+ self._ptc_tool = create_execute_ptc_code_tool(
2762
+ self.mcp_client, self._ptc_config, agent_tool_configs=self.tool_configs
2763
+ )
2764
+
2765
+ # Rebuild graph to include PTC tool
2766
+ self._rebuild_graph()
2767
+
2768
+ self._ptc_tool_synced = True
2769
+ logger.info(f"Agent '{self.name}': PTC tool synced successfully")
2770
+
2771
+ # Sync PTC prompt guidance
2772
+ self._sync_ptc_prompt()
2773
+
2774
+ def _sync_ptc_prompt(self) -> None:
2775
+ """Sync PTC usage guidance into the agent instruction.
2776
+
2777
+ This method builds and injects a PTC usage block into the agent's
2778
+ instruction when PTC is enabled. The prompt is refreshed when MCP
2779
+ configuration changes (detected via hash).
2780
+ """
2781
+ if not self._ptc_config or not self._ptc_config.enabled:
2782
+ return
2783
+
2784
+ if not self.mcp_client:
2785
+ return
2786
+
2787
+ # Lazy import to avoid circular dependencies
2788
+ from aip_agents.ptc.prompt_builder import build_ptc_prompt, compute_ptc_prompt_hash
2789
+
2790
+ # Get prompt config from PTC sandbox config
2791
+ prompt_config = self._ptc_config.prompt if self._ptc_config else None
2792
+
2793
+ # Check if MCP config has changed
2794
+ current_hash = compute_ptc_prompt_hash(self.mcp_client, config=prompt_config)
2795
+ if current_hash == self._ptc_prompt_hash:
2796
+ logger.debug(f"Agent '{self.name}': PTC prompt unchanged, skipping refresh")
2797
+ return
2798
+
2799
+ # Build and inject the prompt
2800
+ ptc_prompt = build_ptc_prompt(self.mcp_client, config=prompt_config)
2801
+
2802
+ # Rebuild instruction from original + PTC guidance
2803
+ self.instruction = f"{self._original_instruction}\n\n{ptc_prompt}"
2804
+ self._ptc_prompt_hash = current_hash
2805
+
2806
+ logger.info(f"Agent '{self.name}': PTC prompt guidance injected")
2807
+
2808
+ async def _register_mcp_tools(self) -> None:
2809
+ """Override to sync PTC tool after MCP tools are registered.
2810
+
2811
+ This extends the base implementation to ensure the execute_ptc_code
2812
+ tool is added after MCP servers are initialized.
2813
+ """
2814
+ await super()._register_mcp_tools()
2815
+
2816
+ # Sync PTC tool after MCP tools are registered
2817
+ if self._ptc_config and self._ptc_config.enabled and not self._ptc_tool_synced:
2818
+ self._sync_ptc_tool()
2819
+
2820
+ async def cleanup(self) -> None:
2821
+ """Cleanup agent resources including PTC sandbox.
2822
+
2823
+ Extends base cleanup to also cleanup the PTC sandbox runtime if
2824
+ execute_ptc_code tool was created.
2825
+ """
2826
+ # Cleanup PTC tool's sandbox runtime if present
2827
+ if self._ptc_tool is not None:
2828
+ try:
2829
+ cleanup_method = getattr(self._ptc_tool, "cleanup", None)
2830
+ if cleanup_method and callable(cleanup_method):
2831
+ await cleanup_method()
2832
+ logger.debug(f"Agent '{self.name}': PTC sandbox cleanup completed")
2833
+ except Exception as e:
2834
+ logger.warning(f"Agent '{self.name}': Error during PTC sandbox cleanup: {e}")
2835
+
2836
+ # Call parent cleanup for MCP client
2837
+ await super().cleanup()
2838
+
2839
+ def _format_graph_output(self, final_state_result: dict[str, Any]) -> Any:
2840
+ """Convert final graph state to user-friendly output.
2841
+
2842
+ Args:
2843
+ final_state_result: The final state from graph execution.
2844
+
2845
+ Returns:
2846
+ Formatted output dictionary.
2847
+ """
2848
+ return self._extract_output_from_final_state(final_state_result)
2849
+
2850
+
2851
+ class LangGraphAgent(LangGraphReactAgent):
2852
+ """Alias for LangGraphReactAgent."""
2853
+
2854
+
2855
+ class LangChainAgent(LangGraphReactAgent):
2856
+ """Alias for LangGraphReactAgent."""