@illuma-ai/agents 1.0.81

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (558) hide show
  1. package/README.md +485 -0
  2. package/dist/cjs/agents/AgentContext.cjs +734 -0
  3. package/dist/cjs/agents/AgentContext.cjs.map +1 -0
  4. package/dist/cjs/common/enum.cjs +190 -0
  5. package/dist/cjs/common/enum.cjs.map +1 -0
  6. package/dist/cjs/events.cjs +172 -0
  7. package/dist/cjs/events.cjs.map +1 -0
  8. package/dist/cjs/graphs/Graph.cjs +1615 -0
  9. package/dist/cjs/graphs/Graph.cjs.map +1 -0
  10. package/dist/cjs/graphs/MultiAgentGraph.cjs +890 -0
  11. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -0
  12. package/dist/cjs/instrumentation.cjs +21 -0
  13. package/dist/cjs/instrumentation.cjs.map +1 -0
  14. package/dist/cjs/llm/anthropic/index.cjs +292 -0
  15. package/dist/cjs/llm/anthropic/index.cjs.map +1 -0
  16. package/dist/cjs/llm/anthropic/types.cjs +50 -0
  17. package/dist/cjs/llm/anthropic/types.cjs.map +1 -0
  18. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +630 -0
  19. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -0
  20. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +218 -0
  21. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -0
  22. package/dist/cjs/llm/anthropic/utils/tools.cjs +29 -0
  23. package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -0
  24. package/dist/cjs/llm/bedrock/index.cjs +282 -0
  25. package/dist/cjs/llm/bedrock/index.cjs.map +1 -0
  26. package/dist/cjs/llm/fake.cjs +97 -0
  27. package/dist/cjs/llm/fake.cjs.map +1 -0
  28. package/dist/cjs/llm/google/index.cjs +216 -0
  29. package/dist/cjs/llm/google/index.cjs.map +1 -0
  30. package/dist/cjs/llm/google/utils/common.cjs +647 -0
  31. package/dist/cjs/llm/google/utils/common.cjs.map +1 -0
  32. package/dist/cjs/llm/openai/index.cjs +1028 -0
  33. package/dist/cjs/llm/openai/index.cjs.map +1 -0
  34. package/dist/cjs/llm/openai/utils/index.cjs +765 -0
  35. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -0
  36. package/dist/cjs/llm/openrouter/index.cjs +212 -0
  37. package/dist/cjs/llm/openrouter/index.cjs.map +1 -0
  38. package/dist/cjs/llm/providers.cjs +43 -0
  39. package/dist/cjs/llm/providers.cjs.map +1 -0
  40. package/dist/cjs/llm/text.cjs +69 -0
  41. package/dist/cjs/llm/text.cjs.map +1 -0
  42. package/dist/cjs/llm/vertexai/index.cjs +329 -0
  43. package/dist/cjs/llm/vertexai/index.cjs.map +1 -0
  44. package/dist/cjs/main.cjs +240 -0
  45. package/dist/cjs/main.cjs.map +1 -0
  46. package/dist/cjs/messages/cache.cjs +387 -0
  47. package/dist/cjs/messages/cache.cjs.map +1 -0
  48. package/dist/cjs/messages/content.cjs +53 -0
  49. package/dist/cjs/messages/content.cjs.map +1 -0
  50. package/dist/cjs/messages/core.cjs +367 -0
  51. package/dist/cjs/messages/core.cjs.map +1 -0
  52. package/dist/cjs/messages/format.cjs +761 -0
  53. package/dist/cjs/messages/format.cjs.map +1 -0
  54. package/dist/cjs/messages/ids.cjs +23 -0
  55. package/dist/cjs/messages/ids.cjs.map +1 -0
  56. package/dist/cjs/messages/prune.cjs +398 -0
  57. package/dist/cjs/messages/prune.cjs.map +1 -0
  58. package/dist/cjs/messages/tools.cjs +96 -0
  59. package/dist/cjs/messages/tools.cjs.map +1 -0
  60. package/dist/cjs/run.cjs +328 -0
  61. package/dist/cjs/run.cjs.map +1 -0
  62. package/dist/cjs/schemas/validate.cjs +324 -0
  63. package/dist/cjs/schemas/validate.cjs.map +1 -0
  64. package/dist/cjs/splitStream.cjs +210 -0
  65. package/dist/cjs/splitStream.cjs.map +1 -0
  66. package/dist/cjs/stream.cjs +620 -0
  67. package/dist/cjs/stream.cjs.map +1 -0
  68. package/dist/cjs/tools/BrowserTools.cjs +248 -0
  69. package/dist/cjs/tools/BrowserTools.cjs.map +1 -0
  70. package/dist/cjs/tools/Calculator.cjs +66 -0
  71. package/dist/cjs/tools/Calculator.cjs.map +1 -0
  72. package/dist/cjs/tools/CodeExecutor.cjs +234 -0
  73. package/dist/cjs/tools/CodeExecutor.cjs.map +1 -0
  74. package/dist/cjs/tools/ProgrammaticToolCalling.cjs +636 -0
  75. package/dist/cjs/tools/ProgrammaticToolCalling.cjs.map +1 -0
  76. package/dist/cjs/tools/ToolNode.cjs +548 -0
  77. package/dist/cjs/tools/ToolNode.cjs.map +1 -0
  78. package/dist/cjs/tools/ToolSearch.cjs +909 -0
  79. package/dist/cjs/tools/ToolSearch.cjs.map +1 -0
  80. package/dist/cjs/tools/handlers.cjs +255 -0
  81. package/dist/cjs/tools/handlers.cjs.map +1 -0
  82. package/dist/cjs/tools/schema.cjs +31 -0
  83. package/dist/cjs/tools/schema.cjs.map +1 -0
  84. package/dist/cjs/tools/search/anthropic.cjs +40 -0
  85. package/dist/cjs/tools/search/anthropic.cjs.map +1 -0
  86. package/dist/cjs/tools/search/content.cjs +140 -0
  87. package/dist/cjs/tools/search/content.cjs.map +1 -0
  88. package/dist/cjs/tools/search/firecrawl.cjs +179 -0
  89. package/dist/cjs/tools/search/firecrawl.cjs.map +1 -0
  90. package/dist/cjs/tools/search/format.cjs +203 -0
  91. package/dist/cjs/tools/search/format.cjs.map +1 -0
  92. package/dist/cjs/tools/search/highlights.cjs +245 -0
  93. package/dist/cjs/tools/search/highlights.cjs.map +1 -0
  94. package/dist/cjs/tools/search/rerankers.cjs +174 -0
  95. package/dist/cjs/tools/search/rerankers.cjs.map +1 -0
  96. package/dist/cjs/tools/search/schema.cjs +117 -0
  97. package/dist/cjs/tools/search/schema.cjs.map +1 -0
  98. package/dist/cjs/tools/search/search.cjs +566 -0
  99. package/dist/cjs/tools/search/search.cjs.map +1 -0
  100. package/dist/cjs/tools/search/serper-scraper.cjs +132 -0
  101. package/dist/cjs/tools/search/serper-scraper.cjs.map +1 -0
  102. package/dist/cjs/tools/search/tool.cjs +456 -0
  103. package/dist/cjs/tools/search/tool.cjs.map +1 -0
  104. package/dist/cjs/tools/search/utils.cjs +66 -0
  105. package/dist/cjs/tools/search/utils.cjs.map +1 -0
  106. package/dist/cjs/types/graph.cjs +29 -0
  107. package/dist/cjs/types/graph.cjs.map +1 -0
  108. package/dist/cjs/utils/contextAnalytics.cjs +66 -0
  109. package/dist/cjs/utils/contextAnalytics.cjs.map +1 -0
  110. package/dist/cjs/utils/events.cjs +31 -0
  111. package/dist/cjs/utils/events.cjs.map +1 -0
  112. package/dist/cjs/utils/graph.cjs +16 -0
  113. package/dist/cjs/utils/graph.cjs.map +1 -0
  114. package/dist/cjs/utils/handlers.cjs +70 -0
  115. package/dist/cjs/utils/handlers.cjs.map +1 -0
  116. package/dist/cjs/utils/llm.cjs +27 -0
  117. package/dist/cjs/utils/llm.cjs.map +1 -0
  118. package/dist/cjs/utils/misc.cjs +56 -0
  119. package/dist/cjs/utils/misc.cjs.map +1 -0
  120. package/dist/cjs/utils/run.cjs +73 -0
  121. package/dist/cjs/utils/run.cjs.map +1 -0
  122. package/dist/cjs/utils/schema.cjs +27 -0
  123. package/dist/cjs/utils/schema.cjs.map +1 -0
  124. package/dist/cjs/utils/title.cjs +125 -0
  125. package/dist/cjs/utils/title.cjs.map +1 -0
  126. package/dist/cjs/utils/tokens.cjs +125 -0
  127. package/dist/cjs/utils/tokens.cjs.map +1 -0
  128. package/dist/cjs/utils/toonFormat.cjs +388 -0
  129. package/dist/cjs/utils/toonFormat.cjs.map +1 -0
  130. package/dist/esm/agents/AgentContext.mjs +732 -0
  131. package/dist/esm/agents/AgentContext.mjs.map +1 -0
  132. package/dist/esm/common/enum.mjs +190 -0
  133. package/dist/esm/common/enum.mjs.map +1 -0
  134. package/dist/esm/events.mjs +164 -0
  135. package/dist/esm/events.mjs.map +1 -0
  136. package/dist/esm/graphs/Graph.mjs +1612 -0
  137. package/dist/esm/graphs/Graph.mjs.map +1 -0
  138. package/dist/esm/graphs/MultiAgentGraph.mjs +888 -0
  139. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -0
  140. package/dist/esm/instrumentation.mjs +19 -0
  141. package/dist/esm/instrumentation.mjs.map +1 -0
  142. package/dist/esm/llm/anthropic/index.mjs +290 -0
  143. package/dist/esm/llm/anthropic/index.mjs.map +1 -0
  144. package/dist/esm/llm/anthropic/types.mjs +48 -0
  145. package/dist/esm/llm/anthropic/types.mjs.map +1 -0
  146. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +627 -0
  147. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -0
  148. package/dist/esm/llm/anthropic/utils/message_outputs.mjs +216 -0
  149. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -0
  150. package/dist/esm/llm/anthropic/utils/tools.mjs +27 -0
  151. package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -0
  152. package/dist/esm/llm/bedrock/index.mjs +280 -0
  153. package/dist/esm/llm/bedrock/index.mjs.map +1 -0
  154. package/dist/esm/llm/fake.mjs +94 -0
  155. package/dist/esm/llm/fake.mjs.map +1 -0
  156. package/dist/esm/llm/google/index.mjs +214 -0
  157. package/dist/esm/llm/google/index.mjs.map +1 -0
  158. package/dist/esm/llm/google/utils/common.mjs +638 -0
  159. package/dist/esm/llm/google/utils/common.mjs.map +1 -0
  160. package/dist/esm/llm/openai/index.mjs +1018 -0
  161. package/dist/esm/llm/openai/index.mjs.map +1 -0
  162. package/dist/esm/llm/openai/utils/index.mjs +759 -0
  163. package/dist/esm/llm/openai/utils/index.mjs.map +1 -0
  164. package/dist/esm/llm/openrouter/index.mjs +210 -0
  165. package/dist/esm/llm/openrouter/index.mjs.map +1 -0
  166. package/dist/esm/llm/providers.mjs +39 -0
  167. package/dist/esm/llm/providers.mjs.map +1 -0
  168. package/dist/esm/llm/text.mjs +67 -0
  169. package/dist/esm/llm/text.mjs.map +1 -0
  170. package/dist/esm/llm/vertexai/index.mjs +327 -0
  171. package/dist/esm/llm/vertexai/index.mjs.map +1 -0
  172. package/dist/esm/main.mjs +37 -0
  173. package/dist/esm/main.mjs.map +1 -0
  174. package/dist/esm/messages/cache.mjs +382 -0
  175. package/dist/esm/messages/cache.mjs.map +1 -0
  176. package/dist/esm/messages/content.mjs +51 -0
  177. package/dist/esm/messages/content.mjs.map +1 -0
  178. package/dist/esm/messages/core.mjs +359 -0
  179. package/dist/esm/messages/core.mjs.map +1 -0
  180. package/dist/esm/messages/format.mjs +752 -0
  181. package/dist/esm/messages/format.mjs.map +1 -0
  182. package/dist/esm/messages/ids.mjs +21 -0
  183. package/dist/esm/messages/ids.mjs.map +1 -0
  184. package/dist/esm/messages/prune.mjs +393 -0
  185. package/dist/esm/messages/prune.mjs.map +1 -0
  186. package/dist/esm/messages/tools.mjs +93 -0
  187. package/dist/esm/messages/tools.mjs.map +1 -0
  188. package/dist/esm/run.mjs +325 -0
  189. package/dist/esm/run.mjs.map +1 -0
  190. package/dist/esm/schemas/validate.mjs +317 -0
  191. package/dist/esm/schemas/validate.mjs.map +1 -0
  192. package/dist/esm/splitStream.mjs +207 -0
  193. package/dist/esm/splitStream.mjs.map +1 -0
  194. package/dist/esm/stream.mjs +616 -0
  195. package/dist/esm/stream.mjs.map +1 -0
  196. package/dist/esm/tools/BrowserTools.mjs +244 -0
  197. package/dist/esm/tools/BrowserTools.mjs.map +1 -0
  198. package/dist/esm/tools/Calculator.mjs +41 -0
  199. package/dist/esm/tools/Calculator.mjs.map +1 -0
  200. package/dist/esm/tools/CodeExecutor.mjs +226 -0
  201. package/dist/esm/tools/CodeExecutor.mjs.map +1 -0
  202. package/dist/esm/tools/ProgrammaticToolCalling.mjs +622 -0
  203. package/dist/esm/tools/ProgrammaticToolCalling.mjs.map +1 -0
  204. package/dist/esm/tools/ToolNode.mjs +545 -0
  205. package/dist/esm/tools/ToolNode.mjs.map +1 -0
  206. package/dist/esm/tools/ToolSearch.mjs +870 -0
  207. package/dist/esm/tools/ToolSearch.mjs.map +1 -0
  208. package/dist/esm/tools/handlers.mjs +250 -0
  209. package/dist/esm/tools/handlers.mjs.map +1 -0
  210. package/dist/esm/tools/schema.mjs +28 -0
  211. package/dist/esm/tools/schema.mjs.map +1 -0
  212. package/dist/esm/tools/search/anthropic.mjs +37 -0
  213. package/dist/esm/tools/search/anthropic.mjs.map +1 -0
  214. package/dist/esm/tools/search/content.mjs +119 -0
  215. package/dist/esm/tools/search/content.mjs.map +1 -0
  216. package/dist/esm/tools/search/firecrawl.mjs +176 -0
  217. package/dist/esm/tools/search/firecrawl.mjs.map +1 -0
  218. package/dist/esm/tools/search/format.mjs +201 -0
  219. package/dist/esm/tools/search/format.mjs.map +1 -0
  220. package/dist/esm/tools/search/highlights.mjs +243 -0
  221. package/dist/esm/tools/search/highlights.mjs.map +1 -0
  222. package/dist/esm/tools/search/rerankers.mjs +168 -0
  223. package/dist/esm/tools/search/rerankers.mjs.map +1 -0
  224. package/dist/esm/tools/search/schema.mjs +104 -0
  225. package/dist/esm/tools/search/schema.mjs.map +1 -0
  226. package/dist/esm/tools/search/search.mjs +563 -0
  227. package/dist/esm/tools/search/search.mjs.map +1 -0
  228. package/dist/esm/tools/search/serper-scraper.mjs +129 -0
  229. package/dist/esm/tools/search/serper-scraper.mjs.map +1 -0
  230. package/dist/esm/tools/search/tool.mjs +454 -0
  231. package/dist/esm/tools/search/tool.mjs.map +1 -0
  232. package/dist/esm/tools/search/utils.mjs +61 -0
  233. package/dist/esm/tools/search/utils.mjs.map +1 -0
  234. package/dist/esm/types/graph.mjs +26 -0
  235. package/dist/esm/types/graph.mjs.map +1 -0
  236. package/dist/esm/utils/contextAnalytics.mjs +64 -0
  237. package/dist/esm/utils/contextAnalytics.mjs.map +1 -0
  238. package/dist/esm/utils/events.mjs +29 -0
  239. package/dist/esm/utils/events.mjs.map +1 -0
  240. package/dist/esm/utils/graph.mjs +13 -0
  241. package/dist/esm/utils/graph.mjs.map +1 -0
  242. package/dist/esm/utils/handlers.mjs +68 -0
  243. package/dist/esm/utils/handlers.mjs.map +1 -0
  244. package/dist/esm/utils/llm.mjs +24 -0
  245. package/dist/esm/utils/llm.mjs.map +1 -0
  246. package/dist/esm/utils/misc.mjs +53 -0
  247. package/dist/esm/utils/misc.mjs.map +1 -0
  248. package/dist/esm/utils/run.mjs +70 -0
  249. package/dist/esm/utils/run.mjs.map +1 -0
  250. package/dist/esm/utils/schema.mjs +24 -0
  251. package/dist/esm/utils/schema.mjs.map +1 -0
  252. package/dist/esm/utils/title.mjs +122 -0
  253. package/dist/esm/utils/title.mjs.map +1 -0
  254. package/dist/esm/utils/tokens.mjs +121 -0
  255. package/dist/esm/utils/tokens.mjs.map +1 -0
  256. package/dist/esm/utils/toonFormat.mjs +381 -0
  257. package/dist/esm/utils/toonFormat.mjs.map +1 -0
  258. package/dist/types/agents/AgentContext.d.ts +293 -0
  259. package/dist/types/common/enum.d.ts +155 -0
  260. package/dist/types/common/index.d.ts +1 -0
  261. package/dist/types/events.d.ts +31 -0
  262. package/dist/types/graphs/Graph.d.ts +216 -0
  263. package/dist/types/graphs/MultiAgentGraph.d.ts +104 -0
  264. package/dist/types/graphs/index.d.ts +2 -0
  265. package/dist/types/index.d.ts +21 -0
  266. package/dist/types/instrumentation.d.ts +1 -0
  267. package/dist/types/llm/anthropic/index.d.ts +39 -0
  268. package/dist/types/llm/anthropic/types.d.ts +37 -0
  269. package/dist/types/llm/anthropic/utils/message_inputs.d.ts +14 -0
  270. package/dist/types/llm/anthropic/utils/message_outputs.d.ts +14 -0
  271. package/dist/types/llm/anthropic/utils/output_parsers.d.ts +22 -0
  272. package/dist/types/llm/anthropic/utils/tools.d.ts +3 -0
  273. package/dist/types/llm/bedrock/index.d.ts +141 -0
  274. package/dist/types/llm/bedrock/types.d.ts +27 -0
  275. package/dist/types/llm/bedrock/utils/index.d.ts +5 -0
  276. package/dist/types/llm/bedrock/utils/message_inputs.d.ts +31 -0
  277. package/dist/types/llm/bedrock/utils/message_outputs.d.ts +33 -0
  278. package/dist/types/llm/fake.d.ts +31 -0
  279. package/dist/types/llm/google/index.d.ts +24 -0
  280. package/dist/types/llm/google/types.d.ts +42 -0
  281. package/dist/types/llm/google/utils/common.d.ts +34 -0
  282. package/dist/types/llm/google/utils/tools.d.ts +10 -0
  283. package/dist/types/llm/google/utils/zod_to_genai_parameters.d.ts +14 -0
  284. package/dist/types/llm/openai/index.d.ts +127 -0
  285. package/dist/types/llm/openai/types.d.ts +10 -0
  286. package/dist/types/llm/openai/utils/index.d.ts +29 -0
  287. package/dist/types/llm/openrouter/index.d.ts +15 -0
  288. package/dist/types/llm/providers.d.ts +5 -0
  289. package/dist/types/llm/text.d.ts +21 -0
  290. package/dist/types/llm/vertexai/index.d.ts +293 -0
  291. package/dist/types/messages/cache.d.ts +54 -0
  292. package/dist/types/messages/content.d.ts +7 -0
  293. package/dist/types/messages/core.d.ts +14 -0
  294. package/dist/types/messages/format.d.ts +137 -0
  295. package/dist/types/messages/ids.d.ts +3 -0
  296. package/dist/types/messages/index.d.ts +7 -0
  297. package/dist/types/messages/prune.d.ts +52 -0
  298. package/dist/types/messages/reducer.d.ts +9 -0
  299. package/dist/types/messages/tools.d.ts +17 -0
  300. package/dist/types/mockStream.d.ts +32 -0
  301. package/dist/types/prompts/collab.d.ts +1 -0
  302. package/dist/types/prompts/index.d.ts +2 -0
  303. package/dist/types/prompts/taskmanager.d.ts +41 -0
  304. package/dist/types/run.d.ts +41 -0
  305. package/dist/types/schemas/index.d.ts +1 -0
  306. package/dist/types/schemas/validate.d.ts +59 -0
  307. package/dist/types/splitStream.d.ts +37 -0
  308. package/dist/types/stream.d.ts +15 -0
  309. package/dist/types/test/mockTools.d.ts +28 -0
  310. package/dist/types/tools/BrowserTools.d.ts +87 -0
  311. package/dist/types/tools/Calculator.d.ts +34 -0
  312. package/dist/types/tools/CodeExecutor.d.ts +57 -0
  313. package/dist/types/tools/ProgrammaticToolCalling.d.ts +138 -0
  314. package/dist/types/tools/ToolNode.d.ts +51 -0
  315. package/dist/types/tools/ToolSearch.d.ts +219 -0
  316. package/dist/types/tools/handlers.d.ts +22 -0
  317. package/dist/types/tools/schema.d.ts +12 -0
  318. package/dist/types/tools/search/anthropic.d.ts +16 -0
  319. package/dist/types/tools/search/content.d.ts +4 -0
  320. package/dist/types/tools/search/firecrawl.d.ts +54 -0
  321. package/dist/types/tools/search/format.d.ts +5 -0
  322. package/dist/types/tools/search/highlights.d.ts +13 -0
  323. package/dist/types/tools/search/index.d.ts +3 -0
  324. package/dist/types/tools/search/rerankers.d.ts +38 -0
  325. package/dist/types/tools/search/schema.d.ts +103 -0
  326. package/dist/types/tools/search/search.d.ts +8 -0
  327. package/dist/types/tools/search/serper-scraper.d.ts +59 -0
  328. package/dist/types/tools/search/test.d.ts +1 -0
  329. package/dist/types/tools/search/tool.d.ts +3 -0
  330. package/dist/types/tools/search/types.d.ts +575 -0
  331. package/dist/types/tools/search/utils.d.ts +10 -0
  332. package/dist/types/types/graph.d.ts +399 -0
  333. package/dist/types/types/index.d.ts +5 -0
  334. package/dist/types/types/llm.d.ts +105 -0
  335. package/dist/types/types/messages.d.ts +4 -0
  336. package/dist/types/types/run.d.ts +112 -0
  337. package/dist/types/types/stream.d.ts +308 -0
  338. package/dist/types/types/tools.d.ts +296 -0
  339. package/dist/types/utils/contextAnalytics.d.ts +37 -0
  340. package/dist/types/utils/events.d.ts +6 -0
  341. package/dist/types/utils/graph.d.ts +2 -0
  342. package/dist/types/utils/handlers.d.ts +34 -0
  343. package/dist/types/utils/index.d.ts +9 -0
  344. package/dist/types/utils/llm.d.ts +3 -0
  345. package/dist/types/utils/llmConfig.d.ts +3 -0
  346. package/dist/types/utils/logging.d.ts +1 -0
  347. package/dist/types/utils/misc.d.ts +7 -0
  348. package/dist/types/utils/run.d.ts +27 -0
  349. package/dist/types/utils/schema.d.ts +8 -0
  350. package/dist/types/utils/title.d.ts +4 -0
  351. package/dist/types/utils/tokens.d.ts +28 -0
  352. package/dist/types/utils/toonFormat.d.ts +111 -0
  353. package/package.json +190 -0
  354. package/src/agents/AgentContext.test.ts +458 -0
  355. package/src/agents/AgentContext.ts +972 -0
  356. package/src/agents/__tests__/AgentContext.test.ts +805 -0
  357. package/src/agents/__tests__/resolveStructuredOutputMode.test.ts +137 -0
  358. package/src/common/enum.ts +203 -0
  359. package/src/common/index.ts +2 -0
  360. package/src/events.ts +223 -0
  361. package/src/graphs/Graph.ts +2228 -0
  362. package/src/graphs/MultiAgentGraph.ts +1063 -0
  363. package/src/graphs/__tests__/structured-output.integration.test.ts +809 -0
  364. package/src/graphs/__tests__/structured-output.test.ts +183 -0
  365. package/src/graphs/index.ts +2 -0
  366. package/src/index.ts +34 -0
  367. package/src/instrumentation.ts +22 -0
  368. package/src/llm/anthropic/Jacob_Lee_Resume_2023.pdf +0 -0
  369. package/src/llm/anthropic/index.ts +413 -0
  370. package/src/llm/anthropic/llm.spec.ts +1442 -0
  371. package/src/llm/anthropic/types.ts +140 -0
  372. package/src/llm/anthropic/utils/message_inputs.ts +757 -0
  373. package/src/llm/anthropic/utils/message_outputs.ts +289 -0
  374. package/src/llm/anthropic/utils/output_parsers.ts +133 -0
  375. package/src/llm/anthropic/utils/tools.ts +29 -0
  376. package/src/llm/bedrock/__tests__/bedrock-caching.test.ts +495 -0
  377. package/src/llm/bedrock/index.ts +411 -0
  378. package/src/llm/bedrock/llm.spec.ts +616 -0
  379. package/src/llm/bedrock/types.ts +51 -0
  380. package/src/llm/bedrock/utils/index.ts +18 -0
  381. package/src/llm/bedrock/utils/message_inputs.ts +563 -0
  382. package/src/llm/bedrock/utils/message_outputs.ts +310 -0
  383. package/src/llm/fake.ts +133 -0
  384. package/src/llm/google/data/gettysburg10.wav +0 -0
  385. package/src/llm/google/data/hotdog.jpg +0 -0
  386. package/src/llm/google/index.ts +337 -0
  387. package/src/llm/google/llm.spec.ts +934 -0
  388. package/src/llm/google/types.ts +56 -0
  389. package/src/llm/google/utils/common.ts +873 -0
  390. package/src/llm/google/utils/tools.ts +160 -0
  391. package/src/llm/google/utils/zod_to_genai_parameters.ts +86 -0
  392. package/src/llm/openai/index.ts +1366 -0
  393. package/src/llm/openai/types.ts +24 -0
  394. package/src/llm/openai/utils/index.ts +1035 -0
  395. package/src/llm/openai/utils/isReasoningModel.test.ts +90 -0
  396. package/src/llm/openrouter/index.ts +291 -0
  397. package/src/llm/providers.ts +52 -0
  398. package/src/llm/text.ts +94 -0
  399. package/src/llm/vertexai/index.ts +359 -0
  400. package/src/messages/__tests__/tools.test.ts +473 -0
  401. package/src/messages/cache.test.ts +1261 -0
  402. package/src/messages/cache.ts +518 -0
  403. package/src/messages/content.test.ts +362 -0
  404. package/src/messages/content.ts +63 -0
  405. package/src/messages/core.ts +473 -0
  406. package/src/messages/ensureThinkingBlock.test.ts +468 -0
  407. package/src/messages/format.ts +1029 -0
  408. package/src/messages/formatAgentMessages.test.ts +1513 -0
  409. package/src/messages/formatAgentMessages.tools.test.ts +419 -0
  410. package/src/messages/formatMessage.test.ts +693 -0
  411. package/src/messages/ids.ts +26 -0
  412. package/src/messages/index.ts +7 -0
  413. package/src/messages/labelContentByAgent.test.ts +887 -0
  414. package/src/messages/prune.ts +568 -0
  415. package/src/messages/reducer.ts +80 -0
  416. package/src/messages/shiftIndexTokenCountMap.test.ts +81 -0
  417. package/src/messages/tools.ts +108 -0
  418. package/src/mockStream.ts +99 -0
  419. package/src/prompts/collab.ts +6 -0
  420. package/src/prompts/index.ts +2 -0
  421. package/src/prompts/taskmanager.ts +61 -0
  422. package/src/run.ts +467 -0
  423. package/src/schemas/index.ts +2 -0
  424. package/src/schemas/schema-preparation.test.ts +500 -0
  425. package/src/schemas/validate.test.ts +358 -0
  426. package/src/schemas/validate.ts +454 -0
  427. package/src/scripts/abort.ts +157 -0
  428. package/src/scripts/ant_web_search.ts +158 -0
  429. package/src/scripts/ant_web_search_edge_case.ts +162 -0
  430. package/src/scripts/ant_web_search_error_edge_case.ts +148 -0
  431. package/src/scripts/args.ts +48 -0
  432. package/src/scripts/caching.ts +132 -0
  433. package/src/scripts/cli.ts +172 -0
  434. package/src/scripts/cli2.ts +133 -0
  435. package/src/scripts/cli3.ts +184 -0
  436. package/src/scripts/cli4.ts +191 -0
  437. package/src/scripts/cli5.ts +191 -0
  438. package/src/scripts/code_exec.ts +213 -0
  439. package/src/scripts/code_exec_files.ts +236 -0
  440. package/src/scripts/code_exec_multi_session.ts +241 -0
  441. package/src/scripts/code_exec_ptc.ts +334 -0
  442. package/src/scripts/code_exec_session.ts +282 -0
  443. package/src/scripts/code_exec_simple.ts +147 -0
  444. package/src/scripts/content.ts +138 -0
  445. package/src/scripts/empty_input.ts +137 -0
  446. package/src/scripts/handoff-test.ts +135 -0
  447. package/src/scripts/image.ts +178 -0
  448. package/src/scripts/memory.ts +97 -0
  449. package/src/scripts/multi-agent-chain.ts +331 -0
  450. package/src/scripts/multi-agent-conditional.ts +221 -0
  451. package/src/scripts/multi-agent-document-review-chain.ts +197 -0
  452. package/src/scripts/multi-agent-hybrid-flow.ts +310 -0
  453. package/src/scripts/multi-agent-parallel-start.ts +265 -0
  454. package/src/scripts/multi-agent-parallel.ts +394 -0
  455. package/src/scripts/multi-agent-sequence.ts +217 -0
  456. package/src/scripts/multi-agent-supervisor.ts +365 -0
  457. package/src/scripts/multi-agent-test.ts +186 -0
  458. package/src/scripts/parallel-asymmetric-tools-test.ts +274 -0
  459. package/src/scripts/parallel-full-metadata-test.ts +240 -0
  460. package/src/scripts/parallel-tools-test.ts +340 -0
  461. package/src/scripts/programmatic_exec.ts +396 -0
  462. package/src/scripts/programmatic_exec_agent.ts +231 -0
  463. package/src/scripts/search.ts +146 -0
  464. package/src/scripts/sequential-full-metadata-test.ts +197 -0
  465. package/src/scripts/simple.ts +225 -0
  466. package/src/scripts/single-agent-metadata-test.ts +198 -0
  467. package/src/scripts/stream.ts +140 -0
  468. package/src/scripts/test-custom-prompt-key.ts +145 -0
  469. package/src/scripts/test-handoff-input.ts +170 -0
  470. package/src/scripts/test-handoff-preamble.ts +277 -0
  471. package/src/scripts/test-multi-agent-list-handoff.ts +417 -0
  472. package/src/scripts/test-parallel-agent-labeling.ts +325 -0
  473. package/src/scripts/test-parallel-handoffs.ts +291 -0
  474. package/src/scripts/test-thinking-handoff-bedrock.ts +153 -0
  475. package/src/scripts/test-thinking-handoff.ts +155 -0
  476. package/src/scripts/test-tools-before-handoff.ts +226 -0
  477. package/src/scripts/test_code_api.ts +361 -0
  478. package/src/scripts/thinking-bedrock.ts +159 -0
  479. package/src/scripts/thinking.ts +171 -0
  480. package/src/scripts/tool_search.ts +162 -0
  481. package/src/scripts/tools.ts +177 -0
  482. package/src/specs/agent-handoffs.test.ts +888 -0
  483. package/src/specs/anthropic.simple.test.ts +387 -0
  484. package/src/specs/azure.simple.test.ts +364 -0
  485. package/src/specs/cache.simple.test.ts +396 -0
  486. package/src/specs/deepseek.simple.test.ts +283 -0
  487. package/src/specs/emergency-prune.test.ts +407 -0
  488. package/src/specs/moonshot.simple.test.ts +358 -0
  489. package/src/specs/openai.simple.test.ts +311 -0
  490. package/src/specs/openrouter.simple.test.ts +107 -0
  491. package/src/specs/prune.test.ts +901 -0
  492. package/src/specs/reasoning.test.ts +201 -0
  493. package/src/specs/spec.utils.ts +3 -0
  494. package/src/specs/thinking-handoff.test.ts +620 -0
  495. package/src/specs/thinking-prune.test.ts +703 -0
  496. package/src/specs/token-distribution-edge-case.test.ts +316 -0
  497. package/src/specs/token-memoization.test.ts +32 -0
  498. package/src/specs/tool-error.test.ts +198 -0
  499. package/src/splitStream.test.ts +691 -0
  500. package/src/splitStream.ts +234 -0
  501. package/src/stream.test.ts +94 -0
  502. package/src/stream.ts +801 -0
  503. package/src/test/mockTools.ts +386 -0
  504. package/src/tools/BrowserTools.ts +393 -0
  505. package/src/tools/Calculator.test.ts +278 -0
  506. package/src/tools/Calculator.ts +46 -0
  507. package/src/tools/CodeExecutor.ts +270 -0
  508. package/src/tools/ProgrammaticToolCalling.ts +785 -0
  509. package/src/tools/ToolNode.ts +674 -0
  510. package/src/tools/ToolSearch.ts +1095 -0
  511. package/src/tools/__tests__/BrowserTools.test.ts +265 -0
  512. package/src/tools/__tests__/ProgrammaticToolCalling.integration.test.ts +319 -0
  513. package/src/tools/__tests__/ProgrammaticToolCalling.test.ts +1006 -0
  514. package/src/tools/__tests__/ToolSearch.integration.test.ts +162 -0
  515. package/src/tools/__tests__/ToolSearch.test.ts +1003 -0
  516. package/src/tools/handlers.ts +363 -0
  517. package/src/tools/schema.ts +37 -0
  518. package/src/tools/search/anthropic.ts +51 -0
  519. package/src/tools/search/content.test.ts +173 -0
  520. package/src/tools/search/content.ts +147 -0
  521. package/src/tools/search/firecrawl.ts +210 -0
  522. package/src/tools/search/format.ts +250 -0
  523. package/src/tools/search/highlights.ts +320 -0
  524. package/src/tools/search/index.ts +3 -0
  525. package/src/tools/search/jina-reranker.test.ts +130 -0
  526. package/src/tools/search/output.md +2775 -0
  527. package/src/tools/search/rerankers.ts +242 -0
  528. package/src/tools/search/schema.ts +113 -0
  529. package/src/tools/search/search.ts +768 -0
  530. package/src/tools/search/serper-scraper.ts +155 -0
  531. package/src/tools/search/test.html +884 -0
  532. package/src/tools/search/test.md +643 -0
  533. package/src/tools/search/test.ts +159 -0
  534. package/src/tools/search/tool.ts +657 -0
  535. package/src/tools/search/types.ts +665 -0
  536. package/src/tools/search/utils.ts +79 -0
  537. package/src/types/graph.test.ts +218 -0
  538. package/src/types/graph.ts +533 -0
  539. package/src/types/index.ts +6 -0
  540. package/src/types/llm.ts +140 -0
  541. package/src/types/messages.ts +4 -0
  542. package/src/types/run.ts +128 -0
  543. package/src/types/stream.ts +417 -0
  544. package/src/types/tools.ts +355 -0
  545. package/src/utils/contextAnalytics.ts +103 -0
  546. package/src/utils/events.ts +32 -0
  547. package/src/utils/graph.ts +11 -0
  548. package/src/utils/handlers.ts +107 -0
  549. package/src/utils/index.ts +9 -0
  550. package/src/utils/llm.ts +26 -0
  551. package/src/utils/llmConfig.ts +208 -0
  552. package/src/utils/logging.ts +48 -0
  553. package/src/utils/misc.ts +57 -0
  554. package/src/utils/run.ts +106 -0
  555. package/src/utils/schema.ts +35 -0
  556. package/src/utils/title.ts +177 -0
  557. package/src/utils/tokens.ts +142 -0
  558. package/src/utils/toonFormat.ts +475 -0
@@ -0,0 +1,1612 @@
1
+ import { nanoid } from 'nanoid';
2
+ import { concat } from '@langchain/core/utils/stream';
3
+ import { ChatVertexAI } from '@langchain/google-vertexai';
4
+ import { Annotation, messagesStateReducer, StateGraph, START, END } from '@langchain/langgraph';
5
+ import { RunnableLambda } from '@langchain/core/runnables';
6
+ import { SystemMessage, HumanMessage, AIMessageChunk } from '@langchain/core/messages';
7
+ import { GraphNodeKeys, ContentTypes, Providers, GraphEvents, MessageTypes, StepTypes, Constants } from '../common/enum.mjs';
8
+ import { convertMessagesToContent, modifyDeltaProperties, formatAnthropicArtifactContent, formatArtifactPayload } from '../messages/core.mjs';
9
+ import { createPruneMessages } from '../messages/prune.mjs';
10
+ import { ensureThinkingBlockInMessages } from '../messages/format.mjs';
11
+ import { addCacheControl, addBedrockCacheControl } from '../messages/cache.mjs';
12
+ import { formatContentStrings } from '../messages/content.mjs';
13
+ import { extractToolDiscoveries } from '../messages/tools.mjs';
14
+ import { resetIfNotEmpty, joinKeys } from '../utils/graph.mjs';
15
+ import { isOpenAILike, isGoogleLike } from '../utils/llm.mjs';
16
+ import { sleep } from '../utils/run.mjs';
17
+ import 'js-tiktoken';
18
+ import '../utils/toonFormat.mjs';
19
+ import { buildContextAnalytics } from '../utils/contextAnalytics.mjs';
20
+ import 'zod-to-json-schema';
21
+ import { getChatModelClass, manualToolStreamProviders } from '../llm/providers.mjs';
22
+ import { ToolNode, toolsCondition } from '../tools/ToolNode.mjs';
23
+ import { ChatOpenAI, AzureChatOpenAI } from '../llm/openai/index.mjs';
24
+ import { safeDispatchCustomEvent } from '../utils/events.mjs';
25
+ import { createSchemaOnlyTools } from '../tools/schema.mjs';
26
+ import { prepareSchemaForProvider } from '../schemas/validate.mjs';
27
+ import { AgentContext } from '../agents/AgentContext.mjs';
28
+ import { StructuredOutputTruncatedError, StructuredOutputRefusalError } from '../types/graph.mjs';
29
+ import { createFakeStreamingLLM } from '../llm/fake.mjs';
30
+
31
+ /* eslint-disable no-console */
32
+ // src/graphs/Graph.ts
33
+ const { AGENT, TOOLS } = GraphNodeKeys;
34
+ class Graph {
35
+ messageStepHasToolCalls = new Map();
36
+ messageIdsByStepKey = new Map();
37
+ prelimMessageIdsByStepKey = new Map();
38
+ config;
39
+ contentData = [];
40
+ stepKeyIds = new Map();
41
+ contentIndexMap = new Map();
42
+ toolCallStepIds = new Map();
43
+ signal;
44
+ /** Set of invoked tool call IDs from non-message run steps completed mid-run, if any */
45
+ invokedToolIds;
46
+ handlerRegistry;
47
+ /**
48
+ * Tool session contexts for automatic state persistence across tool invocations.
49
+ * Keyed by tool name (e.g., Constants.EXECUTE_CODE).
50
+ * Currently supports code execution session tracking (session_id, files).
51
+ */
52
+ sessions = new Map();
53
+ }
54
+ class StandardGraph extends Graph {
55
+ overrideModel;
56
+ /** Optional compile options passed into workflow.compile() */
57
+ compileOptions;
58
+ messages = [];
59
+ runId;
60
+ startIndex = 0;
61
+ signal;
62
+ /** Map of agent contexts by agent ID */
63
+ agentContexts = new Map();
64
+ /** Default agent ID to use */
65
+ defaultAgentId;
66
+ /** Normalized finish/stop reason from the last LLM invocation */
67
+ lastFinishReason;
68
+ constructor({
69
+ // parent-level graph inputs
70
+ runId, signal, agents, tokenCounter, indexTokenCountMap, }) {
71
+ super();
72
+ this.runId = runId;
73
+ this.signal = signal;
74
+ if (agents.length === 0) {
75
+ throw new Error('At least one agent configuration is required');
76
+ }
77
+ for (const agentConfig of agents) {
78
+ const agentContext = AgentContext.fromConfig(agentConfig, tokenCounter, indexTokenCountMap);
79
+ this.agentContexts.set(agentConfig.agentId, agentContext);
80
+ }
81
+ this.defaultAgentId = agents[0].agentId;
82
+ }
83
+ /* Init */
84
+ resetValues(keepContent) {
85
+ this.messages = [];
86
+ this.lastFinishReason = undefined;
87
+ this.config = resetIfNotEmpty(this.config, undefined);
88
+ if (keepContent !== true) {
89
+ this.contentData = resetIfNotEmpty(this.contentData, []);
90
+ this.contentIndexMap = resetIfNotEmpty(this.contentIndexMap, new Map());
91
+ }
92
+ this.stepKeyIds = resetIfNotEmpty(this.stepKeyIds, new Map());
93
+ this.toolCallStepIds = resetIfNotEmpty(this.toolCallStepIds, new Map());
94
+ this.messageIdsByStepKey = resetIfNotEmpty(this.messageIdsByStepKey, new Map());
95
+ this.messageStepHasToolCalls = resetIfNotEmpty(this.messageStepHasToolCalls, new Map());
96
+ this.prelimMessageIdsByStepKey = resetIfNotEmpty(this.prelimMessageIdsByStepKey, new Map());
97
+ this.invokedToolIds = resetIfNotEmpty(this.invokedToolIds, undefined);
98
+ for (const context of this.agentContexts.values()) {
99
+ context.reset();
100
+ }
101
+ }
102
+ /**
103
+ * Returns the normalized finish/stop reason from the last LLM invocation.
104
+ * Used by callers to detect when the response was truncated due to max_tokens.
105
+ */
106
+ getLastFinishReason() {
107
+ return this.lastFinishReason;
108
+ }
109
+ /**
110
+ * Estimates a human-friendly description of the conversation timeframe based on message count.
111
+ * Uses rough heuristics to provide context about how much history is available.
112
+ *
113
+ * @param messageCount - Number of messages in the remaining context
114
+ * @returns A friendly description like "the last few minutes", "the past hour", etc.
115
+ */
116
+ getContextTimeframeDescription(messageCount) {
117
+ // Rough heuristics based on typical conversation patterns:
118
+ // - Very active chat: ~20-30 messages per hour
119
+ // - Normal chat: ~10-15 messages per hour
120
+ // - Slow/thoughtful chat: ~5-8 messages per hour
121
+ // We use a middle estimate of ~12 messages per hour
122
+ if (messageCount <= 5) {
123
+ return 'just the last few exchanges';
124
+ }
125
+ else if (messageCount <= 15) {
126
+ return 'the last several minutes';
127
+ }
128
+ else if (messageCount <= 30) {
129
+ return 'roughly the past hour';
130
+ }
131
+ else if (messageCount <= 60) {
132
+ return 'the past couple of hours';
133
+ }
134
+ else if (messageCount <= 150) {
135
+ return 'the past few hours';
136
+ }
137
+ else if (messageCount <= 300) {
138
+ return 'roughly a day\'s worth';
139
+ }
140
+ else if (messageCount <= 700) {
141
+ return 'the past few days';
142
+ }
143
+ else {
144
+ return 'about a week or more';
145
+ }
146
+ }
147
+ /* Run Step Processing */
148
+ getRunStep(stepId) {
149
+ const index = this.contentIndexMap.get(stepId);
150
+ if (index !== undefined) {
151
+ return this.contentData[index];
152
+ }
153
+ return undefined;
154
+ }
155
+ getAgentContext(metadata) {
156
+ if (!metadata) {
157
+ throw new Error('No metadata provided to retrieve agent context');
158
+ }
159
+ const currentNode = metadata.langgraph_node;
160
+ if (!currentNode) {
161
+ throw new Error('No langgraph_node in metadata to retrieve agent context');
162
+ }
163
+ let agentId;
164
+ if (currentNode.startsWith(AGENT)) {
165
+ agentId = currentNode.substring(AGENT.length);
166
+ }
167
+ else if (currentNode.startsWith(TOOLS)) {
168
+ agentId = currentNode.substring(TOOLS.length);
169
+ }
170
+ const agentContext = this.agentContexts.get(agentId ?? '');
171
+ if (!agentContext) {
172
+ throw new Error(`No agent context found for agent ID ${agentId}`);
173
+ }
174
+ return agentContext;
175
+ }
176
+ getStepKey(metadata) {
177
+ if (!metadata)
178
+ return '';
179
+ const keyList = this.getKeyList(metadata);
180
+ if (this.checkKeyList(keyList)) {
181
+ throw new Error('Missing metadata');
182
+ }
183
+ return joinKeys(keyList);
184
+ }
185
+ getStepIdByKey(stepKey, index) {
186
+ const stepIds = this.stepKeyIds.get(stepKey);
187
+ if (!stepIds) {
188
+ throw new Error(`No step IDs found for stepKey ${stepKey}`);
189
+ }
190
+ if (index === undefined) {
191
+ return stepIds[stepIds.length - 1];
192
+ }
193
+ return stepIds[index];
194
+ }
195
+ generateStepId(stepKey) {
196
+ const stepIds = this.stepKeyIds.get(stepKey);
197
+ let newStepId;
198
+ let stepIndex = 0;
199
+ if (stepIds) {
200
+ stepIndex = stepIds.length;
201
+ newStepId = `step_${nanoid()}`;
202
+ stepIds.push(newStepId);
203
+ this.stepKeyIds.set(stepKey, stepIds);
204
+ }
205
+ else {
206
+ newStepId = `step_${nanoid()}`;
207
+ this.stepKeyIds.set(stepKey, [newStepId]);
208
+ }
209
+ return [newStepId, stepIndex];
210
+ }
211
+ getKeyList(metadata) {
212
+ if (!metadata)
213
+ return [];
214
+ const keyList = [
215
+ metadata.run_id,
216
+ metadata.thread_id,
217
+ metadata.langgraph_node,
218
+ metadata.langgraph_step,
219
+ metadata.checkpoint_ns,
220
+ ];
221
+ const agentContext = this.getAgentContext(metadata);
222
+ if (agentContext.currentTokenType === ContentTypes.THINK ||
223
+ agentContext.currentTokenType === 'think_and_text') {
224
+ keyList.push('reasoning');
225
+ }
226
+ else if (agentContext.tokenTypeSwitch === 'content') {
227
+ keyList.push('post-reasoning');
228
+ }
229
+ if (this.invokedToolIds != null && this.invokedToolIds.size > 0) {
230
+ keyList.push(this.invokedToolIds.size + '');
231
+ }
232
+ return keyList;
233
+ }
234
+ checkKeyList(keyList) {
235
+ return keyList.some((key) => key === undefined);
236
+ }
237
+ /* Misc.*/
238
+ getRunMessages() {
239
+ return this.messages.slice(this.startIndex);
240
+ }
241
+ getContentParts() {
242
+ return convertMessagesToContent(this.messages.slice(this.startIndex));
243
+ }
244
+ /**
245
+ * Get all run steps, optionally filtered by agent ID
246
+ */
247
+ getRunSteps(agentId) {
248
+ if (agentId == null || agentId === '') {
249
+ return [...this.contentData];
250
+ }
251
+ return this.contentData.filter((step) => step.agentId === agentId);
252
+ }
253
+ /**
254
+ * Get run steps grouped by agent ID
255
+ */
256
+ getRunStepsByAgent() {
257
+ const stepsByAgent = new Map();
258
+ for (const step of this.contentData) {
259
+ if (step.agentId == null || step.agentId === '')
260
+ continue;
261
+ const steps = stepsByAgent.get(step.agentId) ?? [];
262
+ steps.push(step);
263
+ stepsByAgent.set(step.agentId, steps);
264
+ }
265
+ return stepsByAgent;
266
+ }
267
+ /**
268
+ * Get agent IDs that participated in this run
269
+ */
270
+ getActiveAgentIds() {
271
+ const agentIds = new Set();
272
+ for (const step of this.contentData) {
273
+ if (step.agentId != null && step.agentId !== '') {
274
+ agentIds.add(step.agentId);
275
+ }
276
+ }
277
+ return Array.from(agentIds);
278
+ }
279
+ /**
280
+ * Maps contentPart indices to agent IDs for post-run analysis
281
+ * Returns a map where key is the contentPart index and value is the agentId
282
+ */
283
+ getContentPartAgentMap() {
284
+ const contentPartAgentMap = new Map();
285
+ for (const step of this.contentData) {
286
+ if (step.agentId != null &&
287
+ step.agentId !== '' &&
288
+ Number.isFinite(step.index)) {
289
+ contentPartAgentMap.set(step.index, step.agentId);
290
+ }
291
+ }
292
+ return contentPartAgentMap;
293
+ }
294
+ /**
295
+ * Get the context breakdown from the primary agent for admin token tracking.
296
+ * Returns detailed token counts for instructions, tools, etc.
297
+ */
298
+ getContextBreakdown() {
299
+ const primaryContext = this.agentContexts.get(this.defaultAgentId);
300
+ if (!primaryContext) {
301
+ return null;
302
+ }
303
+ return primaryContext.getContextBreakdown();
304
+ }
305
+ /**
306
+ * Get the latest context analytics from the graph.
307
+ * Returns metrics like utilization %, TOON stats, message breakdown.
308
+ */
309
+ getContextAnalytics() {
310
+ return this.lastContextAnalytics ?? null;
311
+ }
312
+ /** Store the latest context analytics for retrieval after run */
313
+ lastContextAnalytics = null;
314
+ /* Graph */
315
+ createSystemRunnable({ provider, clientOptions, instructions, additional_instructions, }) {
316
+ let finalInstructions = instructions;
317
+ if (additional_instructions != null && additional_instructions !== '') {
318
+ finalInstructions =
319
+ finalInstructions != null && finalInstructions
320
+ ? `${finalInstructions}\n\n${additional_instructions}`
321
+ : additional_instructions;
322
+ }
323
+ if (finalInstructions != null &&
324
+ finalInstructions &&
325
+ provider === Providers.ANTHROPIC &&
326
+ clientOptions.promptCache === true) {
327
+ finalInstructions = {
328
+ content: [
329
+ {
330
+ type: 'text',
331
+ text: instructions,
332
+ cache_control: { type: 'ephemeral' },
333
+ },
334
+ ],
335
+ };
336
+ }
337
+ if (finalInstructions != null && finalInstructions !== '') {
338
+ const systemMessage = new SystemMessage(finalInstructions);
339
+ return RunnableLambda.from((messages) => {
340
+ return [systemMessage, ...messages];
341
+ }).withConfig({ runName: 'prompt' });
342
+ }
343
+ }
344
+ initializeTools({ currentTools, currentToolMap, agentContext, }) {
345
+ const toolDefinitions = agentContext?.toolDefinitions;
346
+ const eventDrivenMode = toolDefinitions != null && toolDefinitions.length > 0;
347
+ if (eventDrivenMode) {
348
+ const schemaTools = createSchemaOnlyTools(toolDefinitions);
349
+ const toolDefMap = new Map(toolDefinitions.map((def) => [def.name, def]));
350
+ return new ToolNode({
351
+ tools: schemaTools,
352
+ toolMap: new Map(schemaTools.map((tool) => [tool.name, tool])),
353
+ toolCallStepIds: this.toolCallStepIds,
354
+ errorHandler: (data, metadata) => StandardGraph.handleToolCallErrorStatic(this, data, metadata),
355
+ toolRegistry: agentContext?.toolRegistry,
356
+ sessions: this.sessions,
357
+ eventDrivenMode: true,
358
+ toolDefinitions: toolDefMap,
359
+ agentId: agentContext?.agentId,
360
+ });
361
+ }
362
+ return new ToolNode({
363
+ tools: currentTools ?? [],
364
+ toolMap: currentToolMap,
365
+ toolCallStepIds: this.toolCallStepIds,
366
+ errorHandler: (data, metadata) => StandardGraph.handleToolCallErrorStatic(this, data, metadata),
367
+ toolRegistry: agentContext?.toolRegistry,
368
+ sessions: this.sessions,
369
+ });
370
+ }
371
+ initializeModel({ provider, tools, clientOptions, }) {
372
+ const ChatModelClass = getChatModelClass(provider);
373
+ const model = new ChatModelClass(clientOptions ?? {});
374
+ if (isOpenAILike(provider) &&
375
+ (model instanceof ChatOpenAI || model instanceof AzureChatOpenAI)) {
376
+ model.temperature = clientOptions
377
+ .temperature;
378
+ model.topP = clientOptions.topP;
379
+ model.frequencyPenalty = clientOptions
380
+ .frequencyPenalty;
381
+ model.presencePenalty = clientOptions
382
+ .presencePenalty;
383
+ model.n = clientOptions.n;
384
+ }
385
+ else if (provider === Providers.VERTEXAI &&
386
+ model instanceof ChatVertexAI) {
387
+ model.temperature = clientOptions
388
+ .temperature;
389
+ model.topP = clientOptions.topP;
390
+ model.topK = clientOptions.topK;
391
+ model.topLogprobs = clientOptions
392
+ .topLogprobs;
393
+ model.frequencyPenalty = clientOptions
394
+ .frequencyPenalty;
395
+ model.presencePenalty = clientOptions
396
+ .presencePenalty;
397
+ model.maxOutputTokens = clientOptions
398
+ .maxOutputTokens;
399
+ }
400
+ if (!tools || tools.length === 0) {
401
+ return model;
402
+ }
403
+ return model.bindTools(tools);
404
+ }
405
+ overrideTestModel(responses, sleep, toolCalls) {
406
+ this.overrideModel = createFakeStreamingLLM({
407
+ responses,
408
+ sleep,
409
+ toolCalls,
410
+ });
411
+ }
412
+ getNewModel({ provider, clientOptions, }) {
413
+ const ChatModelClass = getChatModelClass(provider);
414
+ return new ChatModelClass(clientOptions ?? {});
415
+ }
416
+ getUsageMetadata(finalMessage) {
417
+ if (finalMessage &&
418
+ 'usage_metadata' in finalMessage &&
419
+ finalMessage.usage_metadata != null) {
420
+ return finalMessage.usage_metadata;
421
+ }
422
+ }
423
+ /** Execute model invocation with streaming support */
424
+ async attemptInvoke({ currentModel, finalMessages, provider, tools, }, config) {
425
+ const model = this.overrideModel ?? currentModel;
426
+ if (!model) {
427
+ throw new Error('No model found');
428
+ }
429
+ if ((tools?.length ?? 0) > 0 && manualToolStreamProviders.has(provider)) {
430
+ if (!model.stream) {
431
+ throw new Error('Model does not support stream');
432
+ }
433
+ const stream = await model.stream(finalMessages, config);
434
+ let finalChunk;
435
+ for await (const chunk of stream) {
436
+ await safeDispatchCustomEvent(GraphEvents.CHAT_MODEL_STREAM, { chunk, emitted: true }, config);
437
+ finalChunk = finalChunk ? concat(finalChunk, chunk) : chunk;
438
+ }
439
+ finalChunk = modifyDeltaProperties(provider, finalChunk);
440
+ return { messages: [finalChunk] };
441
+ }
442
+ else {
443
+ const finalMessage = await model.invoke(finalMessages, config);
444
+ if ((finalMessage.tool_calls?.length ?? 0) > 0) {
445
+ finalMessage.tool_calls = finalMessage.tool_calls?.filter((tool_call) => !!tool_call.name);
446
+ }
447
+ return { messages: [finalMessage] };
448
+ }
449
+ }
450
+ /**
451
+ * Execute model invocation with structured output.
452
+ * Uses native constrained decoding (jsonSchema method) for supported providers,
453
+ * or falls back to withStructuredOutput with functionCalling/jsonMode.
454
+ *
455
+ * Native mode uses provider APIs directly:
456
+ * - Anthropic: output_config.format via LangChain's method: 'json_schema'
457
+ * - OpenAI/Azure: response_format.json_schema via LangChain's method: 'jsonSchema'
458
+ * - Bedrock: falls back to functionCalling (LangChain doesn't support native yet)
459
+ */
460
+ async attemptStructuredInvoke({ currentModel, finalMessages, schema, structuredOutputConfig, provider, agentContext, }, config) {
461
+ const model = this.overrideModel ?? currentModel;
462
+ if (!model) {
463
+ throw new Error('No model found');
464
+ }
465
+ // Check if model supports withStructuredOutput
466
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
467
+ if (typeof model.withStructuredOutput !== 'function') {
468
+ throw new Error(`The selected model does not support structured output. ` +
469
+ `Please use a model that supports JSON schema output (e.g., OpenAI GPT-4, Anthropic Claude, Google Gemini) ` +
470
+ `or disable structured output for this agent.`);
471
+ }
472
+ const { name = 'StructuredResponse', includeRaw: _includeRaw = false, handleErrors = true, maxRetries = 2, } = structuredOutputConfig;
473
+ // Resolve the structured output method using AgentContext's provider-aware logic
474
+ let method;
475
+ if (agentContext) {
476
+ const resolved = agentContext.resolveStructuredOutputMode();
477
+ method = resolved.method;
478
+ if (resolved.warnings.length > 0) {
479
+ console.warn('[Graph] Structured output mode warnings:', resolved.warnings);
480
+ }
481
+ }
482
+ else {
483
+ // Legacy fallback: use the old mode-based resolution
484
+ const mode = structuredOutputConfig.mode ?? 'auto';
485
+ if (mode === 'tool') {
486
+ method = 'functionCalling';
487
+ }
488
+ else if (mode === 'provider') {
489
+ method = provider === Providers.BEDROCK ? 'functionCalling' : 'jsonMode';
490
+ }
491
+ else {
492
+ method = undefined;
493
+ }
494
+ }
495
+ // Prepare schema for provider-specific constraints when using native/jsonSchema mode
496
+ let preparedSchema = schema;
497
+ if (method === 'jsonSchema' && provider) {
498
+ const { schema: prepared, warnings } = prepareSchemaForProvider(schema, provider, structuredOutputConfig.strict !== false);
499
+ preparedSchema = prepared;
500
+ if (warnings.length > 0) {
501
+ console.log('[Graph] Schema preparation warnings:', warnings);
502
+ }
503
+ }
504
+ // Use withStructuredOutput to bind the schema
505
+ // Always use includeRaw: true internally so we can debug what's returned
506
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
507
+ const structuredModel = model.withStructuredOutput(preparedSchema, {
508
+ name,
509
+ method: method === 'native' ? undefined : method,
510
+ includeRaw: true, // Always true internally for debugging
511
+ strict: structuredOutputConfig.strict !== false,
512
+ });
513
+ console.log('[Graph] Structured output config:', {
514
+ name,
515
+ method,
516
+ provider,
517
+ schemaKeys: Object.keys(preparedSchema),
518
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
519
+ modelName: model.model || model.modelId || 'unknown',
520
+ });
521
+ let lastError;
522
+ let attempts = 0;
523
+ while (attempts <= maxRetries) {
524
+ try {
525
+ // Note: We pass the original config here. The stream aggregator will filter out
526
+ // the synthetic "response" tool call events from withStructuredOutput()
527
+ const result = await structuredModel.invoke(finalMessages, config);
528
+ // Debug: log what we got back
529
+ console.log('[Graph] Structured output raw result type:', typeof result);
530
+ // Check for refusal or truncation in the raw message
531
+ if (result?.raw) {
532
+ const rawMsg = result.raw;
533
+ console.log('[Graph] Raw message content type:', typeof rawMsg?.content);
534
+ console.log('[Graph] Raw message tool_calls:', rawMsg?.tool_calls?.length ?? 0);
535
+ if (rawMsg?.content && typeof rawMsg.content === 'string' && rawMsg.content.length > 0) {
536
+ console.log('[Graph] Raw message text content (first 200):', rawMsg.content.substring(0, 200));
537
+ }
538
+ // Check stop reason for refusal or truncation
539
+ const responseMetadata = rawMsg.response_metadata ?? {};
540
+ const stopReason = responseMetadata.stop_reason ?? // Anthropic
541
+ responseMetadata.finish_reason ?? // OpenAI
542
+ responseMetadata.stopReason; // Bedrock
543
+ if (stopReason === 'max_tokens' || stopReason === 'length') {
544
+ throw new StructuredOutputTruncatedError(stopReason);
545
+ }
546
+ // Check for Anthropic refusal (stop_reason won't be 'refusal' but content may indicate it)
547
+ // OpenAI uses message.refusal field
548
+ const refusal = rawMsg.refusal;
549
+ if (refusal) {
550
+ throw new StructuredOutputRefusalError(refusal);
551
+ }
552
+ }
553
+ // Handle response - we always use includeRaw internally
554
+ if (result?.raw && result?.parsed !== undefined) {
555
+ return {
556
+ structuredResponse: result.parsed,
557
+ rawMessage: result.raw,
558
+ };
559
+ }
560
+ // Fallback for models that don't support includeRaw
561
+ return {
562
+ structuredResponse: result,
563
+ };
564
+ }
565
+ catch (error) {
566
+ // Don't retry on refusal or truncation errors — they need user action
567
+ if (error instanceof StructuredOutputRefusalError ||
568
+ error instanceof StructuredOutputTruncatedError) {
569
+ throw error;
570
+ }
571
+ lastError = error;
572
+ attempts++;
573
+ // If error handling is disabled, throw immediately
574
+ if (handleErrors === false) {
575
+ throw error;
576
+ }
577
+ // If we've exhausted retries, throw
578
+ if (attempts > maxRetries) {
579
+ throw new Error(`Structured output failed after ${maxRetries + 1} attempts: ${lastError.message}`);
580
+ }
581
+ // Add error message to conversation for retry
582
+ const errorMessage = typeof handleErrors === 'string'
583
+ ? handleErrors
584
+ : `The response did not match the expected schema. Error: ${lastError.message}. Please try again with a valid response.`;
585
+ console.warn(`[Graph] Structured output attempt ${attempts} failed: ${lastError.message}. Retrying...`);
586
+ // Add the error as a human message for context
587
+ finalMessages = [
588
+ ...finalMessages,
589
+ new HumanMessage({
590
+ content: `[VALIDATION ERROR]\n${errorMessage}`,
591
+ }),
592
+ ];
593
+ }
594
+ }
595
+ throw lastError ?? new Error('Structured output failed');
596
+ }
597
+ cleanupSignalListener(currentModel) {
598
+ if (!this.signal) {
599
+ return;
600
+ }
601
+ const model = this.overrideModel ?? currentModel;
602
+ if (!model) {
603
+ return;
604
+ }
605
+ const client = model?.exposedClient;
606
+ if (!client?.abortHandler) {
607
+ return;
608
+ }
609
+ this.signal.removeEventListener('abort', client.abortHandler);
610
+ client.abortHandler = undefined;
611
+ }
612
+ /**
613
+ * Perform structured output invocation: creates a fresh model without tools bound,
614
+ * removes thinking configuration, invokes with the schema, emits the event,
615
+ * and returns a clean AIMessageChunk without tool_calls.
616
+ *
617
+ * Used by both the immediate path (no tools) and the deferred path (after tool use).
618
+ */
619
+ async performStructuredOutput({ agentContext, finalMessages, config, }) {
620
+ const schema = agentContext.getStructuredOutputSchema();
621
+ if (!schema) {
622
+ throw new Error('Structured output schema is not configured');
623
+ }
624
+ // Get a fresh model WITHOUT tools bound
625
+ // bindTools() returns RunnableBinding which lacks withStructuredOutput
626
+ // Also disable thinking mode - Anthropic/Bedrock doesn't allow tool_choice with thinking enabled
627
+ const structuredClientOptions = { ...agentContext.clientOptions };
628
+ // Determine if streaming is possible for this structured output mode
629
+ // Native/jsonSchema modes can stream; tool/functionCalling modes cannot (synthetic tool calls break UX)
630
+ const resolved = agentContext.resolveStructuredOutputMode();
631
+ const canStream = resolved.method === 'jsonSchema' || resolved.method === 'jsonMode';
632
+ if (!canStream) {
633
+ // Disable streaming for function calling mode (synthetic tool calls break streaming UX)
634
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
635
+ structuredClientOptions.streaming = false;
636
+ }
637
+ // For native/jsonSchema mode, Anthropic's constrained decoding works with thinking enabled
638
+ // (grammar only applies to final output, not thinking blocks). For function calling mode,
639
+ // thinking must be disabled because forced tool_choice is incompatible with thinking.
640
+ const needsThinkingDisabled = resolved.method !== 'jsonSchema';
641
+ if (needsThinkingDisabled) {
642
+ // Remove thinking configuration for Bedrock
643
+ if (agentContext.provider === Providers.BEDROCK) {
644
+ const bedrockOpts = structuredClientOptions;
645
+ if (bedrockOpts.additionalModelRequestFields) {
646
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
647
+ const additionalFields = Object.assign({}, bedrockOpts.additionalModelRequestFields);
648
+ delete additionalFields.thinking;
649
+ delete additionalFields.budgetTokens;
650
+ bedrockOpts.additionalModelRequestFields = additionalFields;
651
+ }
652
+ }
653
+ // Remove thinking configuration for Anthropic direct API
654
+ if (agentContext.provider === Providers.ANTHROPIC) {
655
+ const anthropicOpts = structuredClientOptions;
656
+ if (anthropicOpts.thinking) {
657
+ delete anthropicOpts.thinking;
658
+ }
659
+ }
660
+ }
661
+ const structuredModel = this.getNewModel({
662
+ provider: agentContext.provider,
663
+ clientOptions: structuredClientOptions,
664
+ });
665
+ const { structuredResponse, rawMessage } = await this.attemptStructuredInvoke({
666
+ currentModel: structuredModel,
667
+ finalMessages,
668
+ schema,
669
+ structuredOutputConfig: agentContext.structuredOutput,
670
+ provider: agentContext.provider,
671
+ agentContext,
672
+ }, config);
673
+ // Emit structured output event
674
+ await safeDispatchCustomEvent(GraphEvents.ON_STRUCTURED_OUTPUT, {
675
+ structuredResponse,
676
+ schema,
677
+ raw: rawMessage,
678
+ }, config);
679
+ // Create a clean message WITHOUT tool_calls for structured output.
680
+ // The rawMessage contains a tool_call for the structured output schema (e.g., "response"),
681
+ // which would cause the graph router to send it to the tool node.
682
+ // We return a clean AI message that ends the graph.
683
+ let cleanMessage;
684
+ if (rawMessage) {
685
+ cleanMessage = new AIMessageChunk({
686
+ content: JSON.stringify(structuredResponse, null, 2),
687
+ id: rawMessage.id,
688
+ response_metadata: rawMessage.response_metadata,
689
+ usage_metadata: rawMessage.usage_metadata,
690
+ });
691
+ }
692
+ return {
693
+ messages: cleanMessage ? [cleanMessage] : [],
694
+ structuredResponse,
695
+ };
696
+ }
697
+ createCallModel(agentId = 'default') {
698
+ return async (state, config) => {
699
+ /**
700
+ * Get agent context - it must exist by this point
701
+ */
702
+ const agentContext = this.agentContexts.get(agentId);
703
+ if (!agentContext) {
704
+ throw new Error(`Agent context not found for agentId: ${agentId}`);
705
+ }
706
+ if (!config) {
707
+ throw new Error('No config provided');
708
+ }
709
+ let { messages } = state;
710
+ // CACHE OPTIMIZATION: Inject dynamicContext as a HumanMessage at the start of conversation
711
+ // This keeps the system message static (cacheable) while providing dynamic context
712
+ // (timestamps, user info, tool context) as conversation content instead.
713
+ // Only inject on the first turn when messages don't already have the context marker.
714
+ if (agentContext.dynamicContext &&
715
+ messages.length > 0 &&
716
+ !messages.some((m) => m instanceof HumanMessage &&
717
+ typeof m.content === 'string' &&
718
+ m.content.startsWith('[SESSION_CONTEXT]'))) {
719
+ const dynamicContextMessage = new HumanMessage({
720
+ content: `[SESSION_CONTEXT]\n${agentContext.dynamicContext}`,
721
+ });
722
+ const ackMessage = new AIMessageChunk({
723
+ content: 'Understood. I have noted the session context including the current date/time (CST) and will apply it appropriately.',
724
+ });
725
+ messages = [dynamicContextMessage, ackMessage, ...messages];
726
+ }
727
+ // Extract tool discoveries from current turn only (similar to formatArtifactPayload pattern)
728
+ const discoveredNames = extractToolDiscoveries(messages);
729
+ if (discoveredNames.length > 0) {
730
+ agentContext.markToolsAsDiscovered(discoveredNames);
731
+ }
732
+ const toolsForBinding = agentContext.getToolsForBinding();
733
+ let model = this.overrideModel ??
734
+ this.initializeModel({
735
+ tools: toolsForBinding,
736
+ provider: agentContext.provider,
737
+ clientOptions: agentContext.clientOptions,
738
+ });
739
+ if (agentContext.systemRunnable) {
740
+ model = agentContext.systemRunnable.pipe(model);
741
+ }
742
+ if (agentContext.tokenCalculationPromise) {
743
+ await agentContext.tokenCalculationPromise;
744
+ }
745
+ if (!config.signal) {
746
+ config.signal = this.signal;
747
+ }
748
+ this.config = config;
749
+ let messagesToUse = messages;
750
+ // ====================================================================
751
+ // PRE-PRUNING DELEGATION CHECK
752
+ // Before pruning strips messages (losing context), check if we should
753
+ // delegate instead. If context would be pruned AND the agent has the
754
+ // task tool, inject a delegation hint and SKIP pruning — preserving
755
+ // the content for the LLM to understand what to delegate.
756
+ // ====================================================================
757
+ let delegationInjectedPrePrune = false;
758
+ const hasTaskToolPrePrune = agentContext.tools?.some((tool) => {
759
+ const toolName = typeof tool === 'object' && 'name' in tool
760
+ ? tool.name
761
+ : '';
762
+ return toolName === 'task';
763
+ });
764
+ if (hasTaskToolPrePrune &&
765
+ agentContext.tokenCounter &&
766
+ agentContext.maxContextTokens != null) {
767
+ // Estimate total tokens in messages BEFORE pruning
768
+ let prePruneTokens = 0;
769
+ for (const msg of messages) {
770
+ prePruneTokens += agentContext.tokenCounter(msg);
771
+ }
772
+ // Add instruction tokens (system prompt)
773
+ prePruneTokens += agentContext.instructionTokens ?? 0;
774
+ const prePruneUtilization = (prePruneTokens / agentContext.maxContextTokens) * 100;
775
+ if (prePruneUtilization > 70) {
776
+ console.warn(`[Graph] PRE-PRUNE delegation check: ${prePruneUtilization.toFixed(1)}% utilization ` +
777
+ `(${prePruneTokens}/${agentContext.maxContextTokens} tokens). ` +
778
+ `Injecting delegation hint INSTEAD of pruning.`);
779
+ delegationInjectedPrePrune = true;
780
+ }
781
+ }
782
+ if (!agentContext.pruneMessages &&
783
+ agentContext.tokenCounter &&
784
+ agentContext.maxContextTokens != null &&
785
+ agentContext.indexTokenCountMap[0] != null) {
786
+ const isAnthropicWithThinking = (agentContext.provider === Providers.ANTHROPIC &&
787
+ agentContext.clientOptions.thinking !=
788
+ null) ||
789
+ (agentContext.provider === Providers.BEDROCK &&
790
+ agentContext.clientOptions
791
+ .additionalModelRequestFields?.['thinking'] != null) ||
792
+ (agentContext.provider === Providers.OPENAI &&
793
+ agentContext.clientOptions.modelKwargs
794
+ ?.thinking?.type === 'enabled');
795
+ agentContext.pruneMessages = createPruneMessages({
796
+ startIndex: this.startIndex,
797
+ provider: agentContext.provider,
798
+ tokenCounter: agentContext.tokenCounter,
799
+ maxTokens: agentContext.maxContextTokens,
800
+ thinkingEnabled: isAnthropicWithThinking,
801
+ indexTokenCountMap: agentContext.indexTokenCountMap,
802
+ });
803
+ }
804
+ if (agentContext.pruneMessages && !delegationInjectedPrePrune) {
805
+ const { context, indexTokenCountMap, messagesToRefine } = agentContext.pruneMessages({
806
+ messages,
807
+ usageMetadata: agentContext.currentUsage,
808
+ // startOnMessageType: 'human',
809
+ });
810
+ agentContext.indexTokenCountMap = indexTokenCountMap;
811
+ messagesToUse = context;
812
+ // Summarize discarded messages if callback provided
813
+ if (messagesToRefine && messagesToRefine.length > 0 && agentContext.summarizeCallback) {
814
+ try {
815
+ const summary = await agentContext.summarizeCallback(messagesToRefine);
816
+ if (summary) {
817
+ const summaryMsg = new SystemMessage(`[Conversation Summary]\n${summary}`);
818
+ // Insert after system message (if present), before conversation messages
819
+ const systemIdx = messagesToUse[0]?.getType() === 'system' ? 1 : 0;
820
+ messagesToUse = [
821
+ ...messagesToUse.slice(0, systemIdx),
822
+ summaryMsg,
823
+ ...messagesToUse.slice(systemIdx),
824
+ ];
825
+ }
826
+ }
827
+ catch (err) {
828
+ console.error('[Graph] Summarization callback failed:', err);
829
+ }
830
+ }
831
+ }
832
+ else if (delegationInjectedPrePrune) {
833
+ console.info('[Graph] Skipping pruning — delegation will handle context pressure');
834
+ }
835
+ let finalMessages = messagesToUse;
836
+ if (agentContext.useLegacyContent) {
837
+ finalMessages = formatContentStrings(finalMessages);
838
+ }
839
+ const lastMessageX = finalMessages.length >= 2
840
+ ? finalMessages[finalMessages.length - 2]
841
+ : null;
842
+ const lastMessageY = finalMessages.length >= 1
843
+ ? finalMessages[finalMessages.length - 1]
844
+ : null;
845
+ if (agentContext.provider === Providers.BEDROCK &&
846
+ lastMessageX instanceof AIMessageChunk &&
847
+ lastMessageY?.getType() === MessageTypes.TOOL &&
848
+ typeof lastMessageX.content === 'string') {
849
+ finalMessages[finalMessages.length - 2].content = '';
850
+ }
851
+ // Use getType() instead of instanceof to avoid module mismatch issues
852
+ const isLatestToolMessage = lastMessageY?.getType() === MessageTypes.TOOL;
853
+ if (isLatestToolMessage &&
854
+ agentContext.provider === Providers.ANTHROPIC) {
855
+ formatAnthropicArtifactContent(finalMessages);
856
+ }
857
+ else if (isLatestToolMessage &&
858
+ ((isOpenAILike(agentContext.provider) &&
859
+ agentContext.provider !== Providers.DEEPSEEK) ||
860
+ isGoogleLike(agentContext.provider))) {
861
+ formatArtifactPayload(finalMessages);
862
+ }
863
+ /**
864
+ * Handle edge case: when switching from a non-thinking agent to a thinking-enabled agent,
865
+ * convert AI messages with tool calls to HumanMessages to avoid thinking block requirements.
866
+ * This is required by Anthropic/Bedrock when thinking is enabled.
867
+ *
868
+ * IMPORTANT: This MUST happen BEFORE cache control is applied.
869
+ * If we add cachePoint to an AI message first, then convert that AI message to a HumanMessage,
870
+ * the cachePoint is lost. By converting first, we ensure cache control is applied to the
871
+ * final message structure that will be sent to the API.
872
+ */
873
+ const isAnthropicWithThinking = (agentContext.provider === Providers.ANTHROPIC &&
874
+ agentContext.clientOptions.thinking !=
875
+ null) ||
876
+ (agentContext.provider === Providers.BEDROCK &&
877
+ agentContext.clientOptions
878
+ .additionalModelRequestFields?.['thinking'] != null);
879
+ if (isAnthropicWithThinking) {
880
+ finalMessages = ensureThinkingBlockInMessages(finalMessages, agentContext.provider);
881
+ }
882
+ // Apply cache control AFTER thinking block handling to ensure cachePoints aren't lost
883
+ // when AI messages are converted to HumanMessages
884
+ if (agentContext.provider === Providers.ANTHROPIC) {
885
+ const anthropicOptions = agentContext.clientOptions;
886
+ if (anthropicOptions?.promptCache === true) {
887
+ finalMessages = addCacheControl(finalMessages);
888
+ }
889
+ }
890
+ else if (agentContext.provider === Providers.BEDROCK) {
891
+ const bedrockOptions = agentContext.clientOptions;
892
+ // Both Claude and Nova models support cachePoint in system and messages
893
+ // (Llama, Titan, and other models do NOT support cachePoint)
894
+ const modelId = bedrockOptions?.model?.toLowerCase() ?? '';
895
+ const supportsCaching = modelId.includes('claude') ||
896
+ modelId.includes('anthropic') ||
897
+ modelId.includes('nova');
898
+ if (bedrockOptions?.promptCache === true && supportsCaching) {
899
+ finalMessages = addBedrockCacheControl(finalMessages);
900
+ }
901
+ }
902
+ if (agentContext.lastStreamCall != null &&
903
+ agentContext.streamBuffer != null) {
904
+ const timeSinceLastCall = Date.now() - agentContext.lastStreamCall;
905
+ if (timeSinceLastCall < agentContext.streamBuffer) {
906
+ const timeToWait = Math.ceil((agentContext.streamBuffer - timeSinceLastCall) / 1000) *
907
+ 1000;
908
+ await sleep(timeToWait);
909
+ }
910
+ }
911
+ agentContext.lastStreamCall = Date.now();
912
+ let result;
913
+ const fallbacks = agentContext.clientOptions?.fallbacks ??
914
+ [];
915
+ if (finalMessages.length === 0) {
916
+ throw new Error(JSON.stringify({
917
+ type: 'empty_messages',
918
+ info: 'Message pruning removed all messages as none fit in the context window. Please increase the context window size or make your message shorter.',
919
+ }));
920
+ }
921
+ // Get model info for analytics
922
+ const bedrockOpts = agentContext.clientOptions;
923
+ const modelId = bedrockOpts?.model ||
924
+ agentContext.clientOptions
925
+ ?.modelName;
926
+ const thinkingConfig = bedrockOpts?.additionalModelRequestFields?.['thinking'] ||
927
+ agentContext.clientOptions
928
+ ?.thinking;
929
+ // Build and emit context analytics for traces
930
+ const contextAnalytics = buildContextAnalytics(finalMessages, {
931
+ tokenCounter: agentContext.tokenCounter,
932
+ maxContextTokens: agentContext.maxContextTokens,
933
+ instructionTokens: agentContext.instructionTokens,
934
+ indexTokenCountMap: agentContext.indexTokenCountMap,
935
+ });
936
+ // Store for retrieval via getContextAnalytics() after run completes
937
+ this.lastContextAnalytics = contextAnalytics;
938
+ await safeDispatchCustomEvent(GraphEvents.ON_CONTEXT_ANALYTICS, {
939
+ provider: agentContext.provider,
940
+ model: modelId,
941
+ thinkingEnabled: thinkingConfig != null,
942
+ cacheEnabled: bedrockOpts?.promptCache === true,
943
+ analytics: contextAnalytics,
944
+ }, config);
945
+ // ====================================================================
946
+ // CONTEXT PRESSURE AWARENESS — Intelligent Sub-Agent Delegation
947
+ //
948
+ // Two triggers for delegation hints:
949
+ // 1. DOCUMENT COUNT: When 3+ documents are detected in the conversation,
950
+ // inject a delegation hint on the FIRST iteration (before the LLM
951
+ // has called any tools). This ensures the agent delegates upfront
952
+ // rather than trying to process all documents itself.
953
+ // 2. TOKEN UTILIZATION: At EVERY iteration, if context is filling up
954
+ // (70%/85%), inject escalating hints to delegate remaining work.
955
+ //
956
+ // This runs mid-chain — so even if tool responses push context up
957
+ // after the first LLM call, subsequent iterations get the hint.
958
+ // ====================================================================
959
+ const hasTaskToolInContext = agentContext.tools?.some((tool) => {
960
+ const toolName = typeof tool === 'object' && 'name' in tool
961
+ ? tool.name
962
+ : '';
963
+ return toolName === 'task';
964
+ });
965
+ if (hasTaskToolInContext &&
966
+ contextAnalytics.utilizationPercent != null &&
967
+ contextAnalytics.maxContextTokens != null) {
968
+ const utilization = contextAnalytics.utilizationPercent;
969
+ const totalTokens = contextAnalytics.totalTokens;
970
+ const maxTokens = contextAnalytics.maxContextTokens;
971
+ const remainingTokens = maxTokens - totalTokens;
972
+ // Count attached documents by scanning for document patterns in HumanMessages:
973
+ // 1. # "filename" headers in "Attached document(s):" blocks (text content)
974
+ // 2. **filename1, filename2** in "The user has attached:" blocks (embedded files)
975
+ // 3. Filenames in file_search tool results
976
+ let documentCount = 0;
977
+ const documentNames = [];
978
+ for (const msg of finalMessages) {
979
+ const content = typeof msg.content === 'string'
980
+ ? msg.content
981
+ : Array.isArray(msg.content)
982
+ ? msg.content.map((p) => {
983
+ const part = p;
984
+ return String(part.text || part.content || '');
985
+ }).join(' ')
986
+ : '';
987
+ // Pattern 1: # "filename" headers in attached document blocks
988
+ const docMatches = content.match(/# "([^"]+)"/g);
989
+ if (docMatches) {
990
+ for (const match of docMatches) {
991
+ const name = match.replace(/# "/, '').replace(/"$/, '');
992
+ if (!documentNames.includes(name)) {
993
+ documentNames.push(name);
994
+ documentCount++;
995
+ }
996
+ }
997
+ }
998
+ // Pattern 2: "The user has attached: **file1, file2**" (embedded files)
999
+ const attachedMatch = content.match(/user has attached:\s*\*\*([^*]+)\*\*/i);
1000
+ if (attachedMatch) {
1001
+ const names = attachedMatch[1].split(',').map((n) => n.trim()).filter(Boolean);
1002
+ for (const name of names) {
1003
+ if (!documentNames.includes(name)) {
1004
+ documentNames.push(name);
1005
+ documentCount++;
1006
+ }
1007
+ }
1008
+ }
1009
+ }
1010
+ // BASELINE LOG: Always fires so we can verify this code path runs
1011
+ console.info(`[Graph] Context utilization: ${utilization.toFixed(1)}% ` +
1012
+ `(${totalTokens}/${maxTokens} tokens, ${remainingTokens} remaining) | ` +
1013
+ `hasTaskTool: true | messages: ${finalMessages.length} | docs: ${documentCount}`);
1014
+ // TRIGGER 1: Multi-document delegation (3+ documents detected)
1015
+ // Only inject on first iteration (no AI messages yet = agent hasn't responded)
1016
+ const hasAiResponse = finalMessages.some((m) => m._getType?.() === 'ai' || m._getType?.() === 'tool');
1017
+ if (documentCount >= 3 && !hasAiResponse) {
1018
+ const pressureMsg = new HumanMessage({
1019
+ content: `[MULTI-DOCUMENT PROCESSING — ${documentCount} documents detected]\n` +
1020
+ `Documents: ${documentNames.join(', ')}\n\n` +
1021
+ `You have ${documentCount} documents attached. For thorough analysis, use the "task" tool ` +
1022
+ `to delegate each document (or group of related documents) to a sub-agent.\n` +
1023
+ `Each sub-agent has its own fresh context window and can use file_search to retrieve the full document content.\n` +
1024
+ `After all sub-agents complete, synthesize their results into a comprehensive response.\n\n` +
1025
+ `This approach ensures each document gets full attention without context limitations.`,
1026
+ });
1027
+ finalMessages = [...finalMessages, pressureMsg];
1028
+ console.info(`[Graph] Multi-document delegation hint injected for ${documentCount} documents: ` +
1029
+ `${documentNames.join(', ')}`);
1030
+ }
1031
+ // TRIGGER 2: Token utilization thresholds (mid-chain safety net)
1032
+ // Also fires when we skipped pruning due to delegationInjectedPrePrune
1033
+ if (utilization > 85 || (delegationInjectedPrePrune && utilization > 50)) {
1034
+ // CRITICAL: Context is high — MANDATE delegation
1035
+ const pressureMsg = new HumanMessage({
1036
+ content: `[CONTEXT BUDGET CRITICAL — ${utilization.toFixed(0)}% used]\n` +
1037
+ `You have used ${totalTokens} of ${maxTokens} tokens (${remainingTokens} remaining).\n` +
1038
+ `Your context is very large. You MUST use the "task" tool to delegate work to sub-agents.\n` +
1039
+ `Each sub-agent runs in its own fresh context window and can use file_search to access documents.\n` +
1040
+ `Do NOT attempt to process documents directly — delegate each document to a sub-agent, then synthesize results.`,
1041
+ });
1042
+ finalMessages = [...finalMessages, pressureMsg];
1043
+ console.warn(`[Graph] Context pressure CRITICAL (${utilization.toFixed(0)}%): ` +
1044
+ `Injected mandatory delegation hint. ${remainingTokens} tokens remaining. ` +
1045
+ `prePruneSkipped: ${delegationInjectedPrePrune}`);
1046
+ }
1047
+ else if (utilization > 70) {
1048
+ // WARNING: Context filling up — suggest delegation
1049
+ const pressureMsg = new HumanMessage({
1050
+ content: `[CONTEXT BUDGET WARNING — ${utilization.toFixed(0)}% used]\n` +
1051
+ `You have used ${totalTokens} of ${maxTokens} tokens (${remainingTokens} remaining).\n` +
1052
+ `Your context is filling up. Consider using the "task" tool to delegate complex operations to sub-agents.\n` +
1053
+ `Sub-agents run in fresh context windows and won't consume your remaining budget.`,
1054
+ });
1055
+ finalMessages = [...finalMessages, pressureMsg];
1056
+ console.info(`[Graph] Context pressure WARNING (${utilization.toFixed(0)}%): ` +
1057
+ `Injected delegation suggestion. ${remainingTokens} tokens remaining.`);
1058
+ }
1059
+ }
1060
+ // Structured output mode: when the agent has NO tools, produce structured JSON immediately.
1061
+ // When the agent HAS tools, we defer structured output until after tool use completes
1062
+ // (see the deferred structured output block after attemptInvoke below).
1063
+ const hasTools = (toolsForBinding?.length ?? 0) > 0;
1064
+ if (agentContext.isStructuredOutputMode &&
1065
+ agentContext.structuredOutput &&
1066
+ !hasTools) {
1067
+ try {
1068
+ const structuredResult = await this.performStructuredOutput({
1069
+ agentContext,
1070
+ finalMessages,
1071
+ config,
1072
+ });
1073
+ agentContext.currentUsage = this.getUsageMetadata(structuredResult.messages?.[0]);
1074
+ this.cleanupSignalListener();
1075
+ return structuredResult;
1076
+ }
1077
+ catch (structuredError) {
1078
+ console.error('[Graph] Structured output failed:', structuredError);
1079
+ throw structuredError;
1080
+ }
1081
+ }
1082
+ try {
1083
+ result = await this.attemptInvoke({
1084
+ currentModel: model,
1085
+ finalMessages,
1086
+ provider: agentContext.provider,
1087
+ tools: agentContext.tools,
1088
+ }, config);
1089
+ }
1090
+ catch (primaryError) {
1091
+ // Check if this is a "input too long" error from Bedrock/Anthropic
1092
+ const errorMessage = primaryError.message.toLowerCase() ?? '';
1093
+ const isInputTooLongError = errorMessage.includes('too long') ||
1094
+ errorMessage.includes('input is too long') ||
1095
+ errorMessage.includes('context length') ||
1096
+ errorMessage.includes('maximum context') ||
1097
+ errorMessage.includes('validationexception') ||
1098
+ errorMessage.includes('prompt is too long');
1099
+ // Log when we detect the error
1100
+ if (isInputTooLongError) {
1101
+ console.warn('[Graph] Detected input too long error:', errorMessage.substring(0, 200));
1102
+ console.warn('[Graph] Checking emergency pruning conditions:', {
1103
+ hasPruneMessages: !!agentContext.pruneMessages,
1104
+ hasTokenCounter: !!agentContext.tokenCounter,
1105
+ maxContextTokens: agentContext.maxContextTokens,
1106
+ indexTokenMapKeys: Object.keys(agentContext.indexTokenCountMap)
1107
+ .length,
1108
+ });
1109
+ }
1110
+ // If input too long and we have pruning capability OR tokenCounter, retry with progressively more aggressive pruning
1111
+ // Note: We can create emergency pruneMessages dynamically if we have tokenCounter and maxContextTokens
1112
+ const canPrune = agentContext.tokenCounter && agentContext.maxContextTokens;
1113
+ if (isInputTooLongError && canPrune) {
1114
+ // Progressive reduction: 50% -> 25% -> 10% of original context
1115
+ const reductionLevels = [0.5, 0.25, 0.1];
1116
+ for (const reductionFactor of reductionLevels) {
1117
+ if (result)
1118
+ break; // Exit if we got a result
1119
+ const reducedMaxTokens = Math.floor(agentContext.maxContextTokens * reductionFactor);
1120
+ console.warn(`[Graph] Input too long. Retrying with ${reductionFactor * 100}% context (${reducedMaxTokens} tokens)...`);
1121
+ // Build fresh indexTokenCountMap if missing/incomplete
1122
+ // This is needed when messages were dynamically added without updating the token map
1123
+ let tokenMapForPruning = agentContext.indexTokenCountMap;
1124
+ if (Object.keys(tokenMapForPruning).length < messages.length) {
1125
+ console.warn('[Graph] Building fresh token count map for emergency pruning...');
1126
+ tokenMapForPruning = {};
1127
+ for (let i = 0; i < messages.length; i++) {
1128
+ tokenMapForPruning[i] = agentContext.tokenCounter(messages[i]);
1129
+ }
1130
+ }
1131
+ const emergencyPrune = createPruneMessages({
1132
+ startIndex: this.startIndex,
1133
+ provider: agentContext.provider,
1134
+ tokenCounter: agentContext.tokenCounter,
1135
+ maxTokens: reducedMaxTokens,
1136
+ thinkingEnabled: false, // Disable thinking for emergency prune
1137
+ indexTokenCountMap: tokenMapForPruning,
1138
+ });
1139
+ const { context: reducedMessages } = emergencyPrune({
1140
+ messages,
1141
+ usageMetadata: agentContext.currentUsage,
1142
+ });
1143
+ // Skip if we can't fit any messages
1144
+ if (reducedMessages.length === 0) {
1145
+ console.warn(`[Graph] Cannot fit any messages at ${reductionFactor * 100}% reduction, trying next level...`);
1146
+ continue;
1147
+ }
1148
+ // Calculate how many messages were pruned and estimate context timeframe
1149
+ const prunedCount = finalMessages.length - reducedMessages.length;
1150
+ const remainingCount = reducedMessages.length;
1151
+ const estimatedContextDescription = this.getContextTimeframeDescription(remainingCount);
1152
+ // Inject a personalized context message to inform the agent about pruning
1153
+ const pruneNoticeMessage = new HumanMessage({
1154
+ content: `[CONTEXT NOTICE]
1155
+ Our conversation has grown quite long, so I've focused on ${estimatedContextDescription} of our chat (${remainingCount} recent messages). ${prunedCount} earlier messages are no longer in my immediate memory.
1156
+
1157
+ If I seem to be missing something we discussed earlier, just give me a quick reminder and I'll pick right back up! I'm still fully engaged and ready to help with whatever you need.`,
1158
+ });
1159
+ // Insert the notice after the system message (if any) but before conversation
1160
+ const hasSystemMessage = reducedMessages[0]?.getType() === 'system';
1161
+ const insertIndex = hasSystemMessage ? 1 : 0;
1162
+ // Create new array with the pruning notice
1163
+ const messagesWithNotice = [
1164
+ ...reducedMessages.slice(0, insertIndex),
1165
+ pruneNoticeMessage,
1166
+ ...reducedMessages.slice(insertIndex),
1167
+ ];
1168
+ let retryMessages = agentContext.useLegacyContent
1169
+ ? formatContentStrings(messagesWithNotice)
1170
+ : messagesWithNotice;
1171
+ // Apply thinking block handling first (before cache control)
1172
+ // This ensures AI+Tool sequences are converted to HumanMessages
1173
+ // before we add cache points that could be lost in the conversion
1174
+ if (isAnthropicWithThinking) {
1175
+ retryMessages = ensureThinkingBlockInMessages(retryMessages, agentContext.provider);
1176
+ }
1177
+ // Apply Bedrock cache control if needed (after thinking block handling)
1178
+ if (agentContext.provider === Providers.BEDROCK) {
1179
+ const bedrockOptions = agentContext.clientOptions;
1180
+ const modelId = bedrockOptions?.model?.toLowerCase() ?? '';
1181
+ const supportsCaching = modelId.includes('claude') ||
1182
+ modelId.includes('anthropic') ||
1183
+ modelId.includes('nova');
1184
+ if (bedrockOptions?.promptCache === true && supportsCaching) {
1185
+ retryMessages =
1186
+ addBedrockCacheControl(retryMessages);
1187
+ }
1188
+ }
1189
+ try {
1190
+ result = await this.attemptInvoke({
1191
+ currentModel: model,
1192
+ finalMessages: retryMessages,
1193
+ provider: agentContext.provider,
1194
+ tools: agentContext.tools,
1195
+ }, config);
1196
+ // Success with reduced context
1197
+ console.info(`[Graph] ✅ Retry successful at ${reductionFactor * 100}% with ${reducedMessages.length} messages (reduced from ${finalMessages.length})`);
1198
+ }
1199
+ catch (retryError) {
1200
+ const retryErrorMsg = retryError.message.toLowerCase() ?? '';
1201
+ const stillTooLong = retryErrorMsg.includes('too long') ||
1202
+ retryErrorMsg.includes('context length') ||
1203
+ retryErrorMsg.includes('validationexception');
1204
+ if (stillTooLong && reductionFactor > 0.1) {
1205
+ console.warn(`[Graph] Still too long at ${reductionFactor * 100}%, trying more aggressive pruning...`);
1206
+ }
1207
+ else {
1208
+ console.error(`[Graph] Retry at ${reductionFactor * 100}% failed:`, retryError.message);
1209
+ }
1210
+ }
1211
+ }
1212
+ }
1213
+ // If we got a result from retry, skip fallbacks
1214
+ if (result) ;
1215
+ else {
1216
+ let lastError = primaryError;
1217
+ for (const fb of fallbacks) {
1218
+ try {
1219
+ let model = this.getNewModel({
1220
+ provider: fb.provider,
1221
+ clientOptions: fb.clientOptions,
1222
+ });
1223
+ const bindableTools = agentContext.tools;
1224
+ model = (!bindableTools || bindableTools.length === 0
1225
+ ? model
1226
+ : model.bindTools(bindableTools));
1227
+ result = await this.attemptInvoke({
1228
+ currentModel: model,
1229
+ finalMessages,
1230
+ provider: fb.provider,
1231
+ tools: agentContext.tools,
1232
+ }, config);
1233
+ lastError = undefined;
1234
+ break;
1235
+ }
1236
+ catch (e) {
1237
+ lastError = e;
1238
+ continue;
1239
+ }
1240
+ }
1241
+ if (lastError !== undefined) {
1242
+ throw lastError;
1243
+ }
1244
+ }
1245
+ }
1246
+ if (!result) {
1247
+ throw new Error('No result after model invocation');
1248
+ }
1249
+ agentContext.currentUsage = this.getUsageMetadata(result.messages?.[0]);
1250
+ // Extract and normalize the LLM's finish/stop reason for auto-continuation support
1251
+ const finalMsg = result.messages?.[0];
1252
+ if (finalMsg && 'response_metadata' in finalMsg) {
1253
+ const meta = finalMsg.response_metadata;
1254
+ // Bedrock streaming nests stopReason inside messageStop: { stopReason: '...' }
1255
+ const messageStop = meta.messageStop;
1256
+ this.lastFinishReason =
1257
+ meta.finish_reason ?? // OpenAI/Azure
1258
+ meta.stop_reason ?? // Anthropic direct API
1259
+ meta.stopReason ?? // Bedrock invoke (non-streaming)
1260
+ messageStop?.stopReason ?? // Bedrock streaming
1261
+ meta.finishReason ?? // VertexAI/Google
1262
+ undefined;
1263
+ }
1264
+ this.cleanupSignalListener();
1265
+ // DEFERRED STRUCTURED OUTPUT: When the agent has tools AND structured output configured,
1266
+ // we let the agent use tools normally via attemptInvoke(). Once the agent's response
1267
+ // has NO tool_calls (it's done with tools), we produce the final structured JSON response.
1268
+ if (agentContext.isStructuredOutputMode &&
1269
+ agentContext.structuredOutput &&
1270
+ result) {
1271
+ const lastMessage = result.messages?.[0];
1272
+ const resultHasToolCalls = lastMessage &&
1273
+ 'tool_calls' in lastMessage &&
1274
+ (lastMessage.tool_calls?.length ?? 0) > 0;
1275
+ if (!resultHasToolCalls) {
1276
+ try {
1277
+ // Build messages for structured output: include the full conversation
1278
+ // plus the agent's text response from attemptInvoke, so the structured
1279
+ // output model has full context (tool results + agent reasoning).
1280
+ const messagesForStructured = [...finalMessages];
1281
+ if (lastMessage) {
1282
+ messagesForStructured.push(lastMessage);
1283
+ }
1284
+ const structuredResult = await this.performStructuredOutput({
1285
+ agentContext,
1286
+ finalMessages: messagesForStructured,
1287
+ config,
1288
+ });
1289
+ // Accumulate token usage from both API calls
1290
+ const structuredUsage = this.getUsageMetadata(structuredResult.messages?.[0]);
1291
+ if (structuredUsage && agentContext.currentUsage) {
1292
+ agentContext.currentUsage = {
1293
+ input_tokens: (agentContext.currentUsage.input_tokens ?? 0) +
1294
+ (structuredUsage.input_tokens ?? 0),
1295
+ output_tokens: (agentContext.currentUsage.output_tokens ?? 0) +
1296
+ (structuredUsage.output_tokens ?? 0),
1297
+ total_tokens: (agentContext.currentUsage.total_tokens ?? 0) +
1298
+ (structuredUsage.total_tokens ?? 0),
1299
+ };
1300
+ }
1301
+ else if (structuredUsage) {
1302
+ agentContext.currentUsage = structuredUsage;
1303
+ }
1304
+ return structuredResult;
1305
+ }
1306
+ catch (structuredError) {
1307
+ // Graceful fallback: the agent completed its work with tools,
1308
+ // but we couldn't format the output as structured JSON.
1309
+ // Return the unstructured text response from attemptInvoke.
1310
+ console.error('[Graph] Deferred structured output failed after successful tool use:', structuredError);
1311
+ console.warn('[Graph] Falling back to unstructured response from tool-use phase');
1312
+ return result;
1313
+ }
1314
+ }
1315
+ }
1316
+ return result;
1317
+ };
1318
+ }
1319
+ createAgentNode(agentId) {
1320
+ const agentContext = this.agentContexts.get(agentId);
1321
+ if (!agentContext) {
1322
+ throw new Error(`Agent context not found for agentId: ${agentId}`);
1323
+ }
1324
+ const agentNode = `${AGENT}${agentId}`;
1325
+ const toolNode = `${TOOLS}${agentId}`;
1326
+ const routeMessage = (state, config) => {
1327
+ this.config = config;
1328
+ return toolsCondition(state, toolNode, this.invokedToolIds);
1329
+ };
1330
+ const StateAnnotation = Annotation.Root({
1331
+ messages: Annotation({
1332
+ reducer: messagesStateReducer,
1333
+ default: () => [],
1334
+ }),
1335
+ });
1336
+ const workflow = new StateGraph(StateAnnotation)
1337
+ .addNode(agentNode, this.createCallModel(agentId))
1338
+ .addNode(toolNode, this.initializeTools({
1339
+ currentTools: agentContext.tools,
1340
+ currentToolMap: agentContext.toolMap,
1341
+ agentContext,
1342
+ }))
1343
+ .addEdge(START, agentNode)
1344
+ .addConditionalEdges(agentNode, routeMessage)
1345
+ .addEdge(toolNode, agentContext.toolEnd ? END : agentNode);
1346
+ // Cast to unknown to avoid tight coupling to external types; options are opt-in
1347
+ return workflow.compile(this.compileOptions);
1348
+ }
1349
+ createWorkflow() {
1350
+ /** Use the default (first) agent for now */
1351
+ const agentNode = this.createAgentNode(this.defaultAgentId);
1352
+ const StateAnnotation = Annotation.Root({
1353
+ messages: Annotation({
1354
+ reducer: (a, b) => {
1355
+ if (!a.length) {
1356
+ this.startIndex = a.length + b.length;
1357
+ }
1358
+ const result = messagesStateReducer(a, b);
1359
+ this.messages = result;
1360
+ return result;
1361
+ },
1362
+ default: () => [],
1363
+ }),
1364
+ });
1365
+ const workflow = new StateGraph(StateAnnotation)
1366
+ .addNode(this.defaultAgentId, agentNode, { ends: [END] })
1367
+ .addEdge(START, this.defaultAgentId)
1368
+ .compile();
1369
+ return workflow;
1370
+ }
1371
+ /**
1372
+ * Indicates if this is a multi-agent graph.
1373
+ * Override in MultiAgentGraph to return true.
1374
+ * Used to conditionally include agentId in RunStep for frontend rendering.
1375
+ */
1376
+ isMultiAgentGraph() {
1377
+ return false;
1378
+ }
1379
+ /**
1380
+ * Get the parallel group ID for an agent, if any.
1381
+ * Override in MultiAgentGraph to provide actual group IDs.
1382
+ * Group IDs are incrementing numbers (1, 2, 3...) reflecting execution order.
1383
+ * @param _agentId - The agent ID to look up
1384
+ * @returns undefined for StandardGraph (no parallel groups), or group number for MultiAgentGraph
1385
+ */
1386
+ getParallelGroupIdForAgent(_agentId) {
1387
+ return undefined;
1388
+ }
1389
+ /* Dispatchers */
1390
+ /**
1391
+ * Dispatches a run step to the client, returns the step ID
1392
+ */
1393
+ async dispatchRunStep(stepKey, stepDetails, metadata) {
1394
+ if (!this.config) {
1395
+ throw new Error('No config provided');
1396
+ }
1397
+ const [stepId, stepIndex] = this.generateStepId(stepKey);
1398
+ if (stepDetails.type === StepTypes.TOOL_CALLS && stepDetails.tool_calls) {
1399
+ for (const tool_call of stepDetails.tool_calls) {
1400
+ const toolCallId = tool_call.id ?? '';
1401
+ if (!toolCallId || this.toolCallStepIds.has(toolCallId)) {
1402
+ continue;
1403
+ }
1404
+ this.toolCallStepIds.set(toolCallId, stepId);
1405
+ }
1406
+ }
1407
+ const runStep = {
1408
+ stepIndex,
1409
+ id: stepId,
1410
+ type: stepDetails.type,
1411
+ index: this.contentData.length,
1412
+ stepDetails,
1413
+ usage: null,
1414
+ };
1415
+ const runId = this.runId ?? '';
1416
+ if (runId) {
1417
+ runStep.runId = runId;
1418
+ }
1419
+ /**
1420
+ * Extract agentId and parallelGroupId from metadata
1421
+ * Only set agentId for MultiAgentGraph (so frontend knows when to show agent labels)
1422
+ */
1423
+ if (metadata) {
1424
+ try {
1425
+ const agentContext = this.getAgentContext(metadata);
1426
+ if (this.isMultiAgentGraph() && agentContext.agentId) {
1427
+ // Only include agentId for MultiAgentGraph - enables frontend to show agent labels
1428
+ runStep.agentId = agentContext.agentId;
1429
+ // Set group ID if this agent is part of a parallel group
1430
+ // Group IDs are incrementing numbers (1, 2, 3...) reflecting execution order
1431
+ const groupId = this.getParallelGroupIdForAgent(agentContext.agentId);
1432
+ if (groupId != null) {
1433
+ runStep.groupId = groupId;
1434
+ }
1435
+ }
1436
+ }
1437
+ catch (_e) {
1438
+ /** If we can't get agent context, that's okay - agentId remains undefined */
1439
+ }
1440
+ }
1441
+ this.contentData.push(runStep);
1442
+ this.contentIndexMap.set(stepId, runStep.index);
1443
+ await safeDispatchCustomEvent(GraphEvents.ON_RUN_STEP, runStep, this.config);
1444
+ return stepId;
1445
+ }
1446
+ async handleToolCallCompleted(data, metadata, omitOutput) {
1447
+ if (!this.config) {
1448
+ throw new Error('No config provided');
1449
+ }
1450
+ if (!data.output) {
1451
+ return;
1452
+ }
1453
+ const { input, output: _output } = data;
1454
+ if (_output?.lg_name === 'Command') {
1455
+ return;
1456
+ }
1457
+ const output = _output;
1458
+ const { tool_call_id } = output;
1459
+ const stepId = this.toolCallStepIds.get(tool_call_id) ?? '';
1460
+ if (!stepId) {
1461
+ throw new Error(`No stepId found for tool_call_id ${tool_call_id}`);
1462
+ }
1463
+ const runStep = this.getRunStep(stepId);
1464
+ if (!runStep) {
1465
+ throw new Error(`No run step found for stepId ${stepId}`);
1466
+ }
1467
+ /**
1468
+ * Extract and store code execution session context from artifacts.
1469
+ * Each file is stamped with its source session_id to support multi-session file tracking.
1470
+ * When the same filename appears in a later execution, the newer version replaces the old.
1471
+ */
1472
+ const toolName = output.name;
1473
+ if (toolName === Constants.EXECUTE_CODE ||
1474
+ toolName === Constants.PROGRAMMATIC_TOOL_CALLING) {
1475
+ const artifact = output.artifact;
1476
+ const newFiles = artifact?.files ?? [];
1477
+ const hasNewFiles = newFiles.length > 0;
1478
+ if (hasNewFiles &&
1479
+ artifact?.session_id != null &&
1480
+ artifact.session_id !== '') {
1481
+ /**
1482
+ * Stamp each new file with its source session_id.
1483
+ * This enables files from different executions (parallel or sequential)
1484
+ * to be tracked and passed to subsequent calls.
1485
+ */
1486
+ const filesWithSession = newFiles.map((file) => ({
1487
+ ...file,
1488
+ session_id: artifact.session_id,
1489
+ }));
1490
+ const existingSession = this.sessions.get(Constants.EXECUTE_CODE);
1491
+ const existingFiles = existingSession?.files ?? [];
1492
+ /**
1493
+ * Merge files, preferring latest versions by name.
1494
+ * If a file with the same name exists, replace it with the new version.
1495
+ * This handles cases where files are edited/recreated in subsequent executions.
1496
+ */
1497
+ const newFileNames = new Set(filesWithSession.map((f) => f.name));
1498
+ const filteredExisting = existingFiles.filter((f) => !newFileNames.has(f.name));
1499
+ this.sessions.set(Constants.EXECUTE_CODE, {
1500
+ /** Keep latest session_id for reference/fallback */
1501
+ session_id: artifact.session_id,
1502
+ /** Accumulated files with latest versions preferred */
1503
+ files: [...filteredExisting, ...filesWithSession],
1504
+ lastUpdated: Date.now(),
1505
+ });
1506
+ }
1507
+ }
1508
+ const dispatchedOutput = typeof output.content === 'string'
1509
+ ? output.content
1510
+ : JSON.stringify(output.content);
1511
+ const args = typeof input === 'string' ? input : input.input;
1512
+ const tool_call = {
1513
+ args: typeof args === 'string' ? args : JSON.stringify(args),
1514
+ name: output.name ?? '',
1515
+ id: output.tool_call_id,
1516
+ output: omitOutput === true ? '' : dispatchedOutput,
1517
+ progress: 1,
1518
+ };
1519
+ await this.handlerRegistry
1520
+ ?.getHandler(GraphEvents.ON_RUN_STEP_COMPLETED)
1521
+ ?.handle(GraphEvents.ON_RUN_STEP_COMPLETED, {
1522
+ result: {
1523
+ id: stepId,
1524
+ index: runStep.index,
1525
+ type: 'tool_call',
1526
+ tool_call,
1527
+ },
1528
+ }, metadata, this);
1529
+ }
1530
+ /**
1531
+ * Static version of handleToolCallError to avoid creating strong references
1532
+ * that prevent garbage collection
1533
+ */
1534
+ static async handleToolCallErrorStatic(graph, data, metadata) {
1535
+ if (!graph.config) {
1536
+ throw new Error('No config provided');
1537
+ }
1538
+ if (!data.id) {
1539
+ console.warn('No Tool ID provided for Tool Error');
1540
+ return;
1541
+ }
1542
+ const stepId = graph.toolCallStepIds.get(data.id) ?? '';
1543
+ if (!stepId) {
1544
+ throw new Error(`No stepId found for tool_call_id ${data.id}`);
1545
+ }
1546
+ const { name, input: args, error } = data;
1547
+ const runStep = graph.getRunStep(stepId);
1548
+ if (!runStep) {
1549
+ throw new Error(`No run step found for stepId ${stepId}`);
1550
+ }
1551
+ const tool_call = {
1552
+ id: data.id,
1553
+ name: name || '',
1554
+ args: typeof args === 'string' ? args : JSON.stringify(args),
1555
+ output: `Error processing tool${error?.message != null ? `: ${error.message}` : ''}`,
1556
+ progress: 1,
1557
+ };
1558
+ await graph.handlerRegistry
1559
+ ?.getHandler(GraphEvents.ON_RUN_STEP_COMPLETED)
1560
+ ?.handle(GraphEvents.ON_RUN_STEP_COMPLETED, {
1561
+ result: {
1562
+ id: stepId,
1563
+ index: runStep.index,
1564
+ type: 'tool_call',
1565
+ tool_call,
1566
+ },
1567
+ }, metadata, graph);
1568
+ }
1569
+ /**
1570
+ * Instance method that delegates to the static method
1571
+ * Kept for backward compatibility
1572
+ */
1573
+ async handleToolCallError(data, metadata) {
1574
+ await StandardGraph.handleToolCallErrorStatic(this, data, metadata);
1575
+ }
1576
+ async dispatchRunStepDelta(id, delta) {
1577
+ if (!this.config) {
1578
+ throw new Error('No config provided');
1579
+ }
1580
+ else if (!id) {
1581
+ throw new Error('No step ID found');
1582
+ }
1583
+ const runStepDelta = {
1584
+ id,
1585
+ delta,
1586
+ };
1587
+ await safeDispatchCustomEvent(GraphEvents.ON_RUN_STEP_DELTA, runStepDelta, this.config);
1588
+ }
1589
+ async dispatchMessageDelta(id, delta) {
1590
+ if (!this.config) {
1591
+ throw new Error('No config provided');
1592
+ }
1593
+ const messageDelta = {
1594
+ id,
1595
+ delta,
1596
+ };
1597
+ await safeDispatchCustomEvent(GraphEvents.ON_MESSAGE_DELTA, messageDelta, this.config);
1598
+ }
1599
+ dispatchReasoningDelta = async (stepId, delta) => {
1600
+ if (!this.config) {
1601
+ throw new Error('No config provided');
1602
+ }
1603
+ const reasoningDelta = {
1604
+ id: stepId,
1605
+ delta,
1606
+ };
1607
+ await safeDispatchCustomEvent(GraphEvents.ON_REASONING_DELTA, reasoningDelta, this.config);
1608
+ };
1609
+ }
1610
+
1611
+ export { Graph, StandardGraph };
1612
+ //# sourceMappingURL=Graph.mjs.map