@illuma-ai/agents 1.0.81

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (558) hide show
  1. package/README.md +485 -0
  2. package/dist/cjs/agents/AgentContext.cjs +734 -0
  3. package/dist/cjs/agents/AgentContext.cjs.map +1 -0
  4. package/dist/cjs/common/enum.cjs +190 -0
  5. package/dist/cjs/common/enum.cjs.map +1 -0
  6. package/dist/cjs/events.cjs +172 -0
  7. package/dist/cjs/events.cjs.map +1 -0
  8. package/dist/cjs/graphs/Graph.cjs +1615 -0
  9. package/dist/cjs/graphs/Graph.cjs.map +1 -0
  10. package/dist/cjs/graphs/MultiAgentGraph.cjs +890 -0
  11. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -0
  12. package/dist/cjs/instrumentation.cjs +21 -0
  13. package/dist/cjs/instrumentation.cjs.map +1 -0
  14. package/dist/cjs/llm/anthropic/index.cjs +292 -0
  15. package/dist/cjs/llm/anthropic/index.cjs.map +1 -0
  16. package/dist/cjs/llm/anthropic/types.cjs +50 -0
  17. package/dist/cjs/llm/anthropic/types.cjs.map +1 -0
  18. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +630 -0
  19. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -0
  20. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +218 -0
  21. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -0
  22. package/dist/cjs/llm/anthropic/utils/tools.cjs +29 -0
  23. package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -0
  24. package/dist/cjs/llm/bedrock/index.cjs +282 -0
  25. package/dist/cjs/llm/bedrock/index.cjs.map +1 -0
  26. package/dist/cjs/llm/fake.cjs +97 -0
  27. package/dist/cjs/llm/fake.cjs.map +1 -0
  28. package/dist/cjs/llm/google/index.cjs +216 -0
  29. package/dist/cjs/llm/google/index.cjs.map +1 -0
  30. package/dist/cjs/llm/google/utils/common.cjs +647 -0
  31. package/dist/cjs/llm/google/utils/common.cjs.map +1 -0
  32. package/dist/cjs/llm/openai/index.cjs +1028 -0
  33. package/dist/cjs/llm/openai/index.cjs.map +1 -0
  34. package/dist/cjs/llm/openai/utils/index.cjs +765 -0
  35. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -0
  36. package/dist/cjs/llm/openrouter/index.cjs +212 -0
  37. package/dist/cjs/llm/openrouter/index.cjs.map +1 -0
  38. package/dist/cjs/llm/providers.cjs +43 -0
  39. package/dist/cjs/llm/providers.cjs.map +1 -0
  40. package/dist/cjs/llm/text.cjs +69 -0
  41. package/dist/cjs/llm/text.cjs.map +1 -0
  42. package/dist/cjs/llm/vertexai/index.cjs +329 -0
  43. package/dist/cjs/llm/vertexai/index.cjs.map +1 -0
  44. package/dist/cjs/main.cjs +240 -0
  45. package/dist/cjs/main.cjs.map +1 -0
  46. package/dist/cjs/messages/cache.cjs +387 -0
  47. package/dist/cjs/messages/cache.cjs.map +1 -0
  48. package/dist/cjs/messages/content.cjs +53 -0
  49. package/dist/cjs/messages/content.cjs.map +1 -0
  50. package/dist/cjs/messages/core.cjs +367 -0
  51. package/dist/cjs/messages/core.cjs.map +1 -0
  52. package/dist/cjs/messages/format.cjs +761 -0
  53. package/dist/cjs/messages/format.cjs.map +1 -0
  54. package/dist/cjs/messages/ids.cjs +23 -0
  55. package/dist/cjs/messages/ids.cjs.map +1 -0
  56. package/dist/cjs/messages/prune.cjs +398 -0
  57. package/dist/cjs/messages/prune.cjs.map +1 -0
  58. package/dist/cjs/messages/tools.cjs +96 -0
  59. package/dist/cjs/messages/tools.cjs.map +1 -0
  60. package/dist/cjs/run.cjs +328 -0
  61. package/dist/cjs/run.cjs.map +1 -0
  62. package/dist/cjs/schemas/validate.cjs +324 -0
  63. package/dist/cjs/schemas/validate.cjs.map +1 -0
  64. package/dist/cjs/splitStream.cjs +210 -0
  65. package/dist/cjs/splitStream.cjs.map +1 -0
  66. package/dist/cjs/stream.cjs +620 -0
  67. package/dist/cjs/stream.cjs.map +1 -0
  68. package/dist/cjs/tools/BrowserTools.cjs +248 -0
  69. package/dist/cjs/tools/BrowserTools.cjs.map +1 -0
  70. package/dist/cjs/tools/Calculator.cjs +66 -0
  71. package/dist/cjs/tools/Calculator.cjs.map +1 -0
  72. package/dist/cjs/tools/CodeExecutor.cjs +234 -0
  73. package/dist/cjs/tools/CodeExecutor.cjs.map +1 -0
  74. package/dist/cjs/tools/ProgrammaticToolCalling.cjs +636 -0
  75. package/dist/cjs/tools/ProgrammaticToolCalling.cjs.map +1 -0
  76. package/dist/cjs/tools/ToolNode.cjs +548 -0
  77. package/dist/cjs/tools/ToolNode.cjs.map +1 -0
  78. package/dist/cjs/tools/ToolSearch.cjs +909 -0
  79. package/dist/cjs/tools/ToolSearch.cjs.map +1 -0
  80. package/dist/cjs/tools/handlers.cjs +255 -0
  81. package/dist/cjs/tools/handlers.cjs.map +1 -0
  82. package/dist/cjs/tools/schema.cjs +31 -0
  83. package/dist/cjs/tools/schema.cjs.map +1 -0
  84. package/dist/cjs/tools/search/anthropic.cjs +40 -0
  85. package/dist/cjs/tools/search/anthropic.cjs.map +1 -0
  86. package/dist/cjs/tools/search/content.cjs +140 -0
  87. package/dist/cjs/tools/search/content.cjs.map +1 -0
  88. package/dist/cjs/tools/search/firecrawl.cjs +179 -0
  89. package/dist/cjs/tools/search/firecrawl.cjs.map +1 -0
  90. package/dist/cjs/tools/search/format.cjs +203 -0
  91. package/dist/cjs/tools/search/format.cjs.map +1 -0
  92. package/dist/cjs/tools/search/highlights.cjs +245 -0
  93. package/dist/cjs/tools/search/highlights.cjs.map +1 -0
  94. package/dist/cjs/tools/search/rerankers.cjs +174 -0
  95. package/dist/cjs/tools/search/rerankers.cjs.map +1 -0
  96. package/dist/cjs/tools/search/schema.cjs +117 -0
  97. package/dist/cjs/tools/search/schema.cjs.map +1 -0
  98. package/dist/cjs/tools/search/search.cjs +566 -0
  99. package/dist/cjs/tools/search/search.cjs.map +1 -0
  100. package/dist/cjs/tools/search/serper-scraper.cjs +132 -0
  101. package/dist/cjs/tools/search/serper-scraper.cjs.map +1 -0
  102. package/dist/cjs/tools/search/tool.cjs +456 -0
  103. package/dist/cjs/tools/search/tool.cjs.map +1 -0
  104. package/dist/cjs/tools/search/utils.cjs +66 -0
  105. package/dist/cjs/tools/search/utils.cjs.map +1 -0
  106. package/dist/cjs/types/graph.cjs +29 -0
  107. package/dist/cjs/types/graph.cjs.map +1 -0
  108. package/dist/cjs/utils/contextAnalytics.cjs +66 -0
  109. package/dist/cjs/utils/contextAnalytics.cjs.map +1 -0
  110. package/dist/cjs/utils/events.cjs +31 -0
  111. package/dist/cjs/utils/events.cjs.map +1 -0
  112. package/dist/cjs/utils/graph.cjs +16 -0
  113. package/dist/cjs/utils/graph.cjs.map +1 -0
  114. package/dist/cjs/utils/handlers.cjs +70 -0
  115. package/dist/cjs/utils/handlers.cjs.map +1 -0
  116. package/dist/cjs/utils/llm.cjs +27 -0
  117. package/dist/cjs/utils/llm.cjs.map +1 -0
  118. package/dist/cjs/utils/misc.cjs +56 -0
  119. package/dist/cjs/utils/misc.cjs.map +1 -0
  120. package/dist/cjs/utils/run.cjs +73 -0
  121. package/dist/cjs/utils/run.cjs.map +1 -0
  122. package/dist/cjs/utils/schema.cjs +27 -0
  123. package/dist/cjs/utils/schema.cjs.map +1 -0
  124. package/dist/cjs/utils/title.cjs +125 -0
  125. package/dist/cjs/utils/title.cjs.map +1 -0
  126. package/dist/cjs/utils/tokens.cjs +125 -0
  127. package/dist/cjs/utils/tokens.cjs.map +1 -0
  128. package/dist/cjs/utils/toonFormat.cjs +388 -0
  129. package/dist/cjs/utils/toonFormat.cjs.map +1 -0
  130. package/dist/esm/agents/AgentContext.mjs +732 -0
  131. package/dist/esm/agents/AgentContext.mjs.map +1 -0
  132. package/dist/esm/common/enum.mjs +190 -0
  133. package/dist/esm/common/enum.mjs.map +1 -0
  134. package/dist/esm/events.mjs +164 -0
  135. package/dist/esm/events.mjs.map +1 -0
  136. package/dist/esm/graphs/Graph.mjs +1612 -0
  137. package/dist/esm/graphs/Graph.mjs.map +1 -0
  138. package/dist/esm/graphs/MultiAgentGraph.mjs +888 -0
  139. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -0
  140. package/dist/esm/instrumentation.mjs +19 -0
  141. package/dist/esm/instrumentation.mjs.map +1 -0
  142. package/dist/esm/llm/anthropic/index.mjs +290 -0
  143. package/dist/esm/llm/anthropic/index.mjs.map +1 -0
  144. package/dist/esm/llm/anthropic/types.mjs +48 -0
  145. package/dist/esm/llm/anthropic/types.mjs.map +1 -0
  146. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +627 -0
  147. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -0
  148. package/dist/esm/llm/anthropic/utils/message_outputs.mjs +216 -0
  149. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -0
  150. package/dist/esm/llm/anthropic/utils/tools.mjs +27 -0
  151. package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -0
  152. package/dist/esm/llm/bedrock/index.mjs +280 -0
  153. package/dist/esm/llm/bedrock/index.mjs.map +1 -0
  154. package/dist/esm/llm/fake.mjs +94 -0
  155. package/dist/esm/llm/fake.mjs.map +1 -0
  156. package/dist/esm/llm/google/index.mjs +214 -0
  157. package/dist/esm/llm/google/index.mjs.map +1 -0
  158. package/dist/esm/llm/google/utils/common.mjs +638 -0
  159. package/dist/esm/llm/google/utils/common.mjs.map +1 -0
  160. package/dist/esm/llm/openai/index.mjs +1018 -0
  161. package/dist/esm/llm/openai/index.mjs.map +1 -0
  162. package/dist/esm/llm/openai/utils/index.mjs +759 -0
  163. package/dist/esm/llm/openai/utils/index.mjs.map +1 -0
  164. package/dist/esm/llm/openrouter/index.mjs +210 -0
  165. package/dist/esm/llm/openrouter/index.mjs.map +1 -0
  166. package/dist/esm/llm/providers.mjs +39 -0
  167. package/dist/esm/llm/providers.mjs.map +1 -0
  168. package/dist/esm/llm/text.mjs +67 -0
  169. package/dist/esm/llm/text.mjs.map +1 -0
  170. package/dist/esm/llm/vertexai/index.mjs +327 -0
  171. package/dist/esm/llm/vertexai/index.mjs.map +1 -0
  172. package/dist/esm/main.mjs +37 -0
  173. package/dist/esm/main.mjs.map +1 -0
  174. package/dist/esm/messages/cache.mjs +382 -0
  175. package/dist/esm/messages/cache.mjs.map +1 -0
  176. package/dist/esm/messages/content.mjs +51 -0
  177. package/dist/esm/messages/content.mjs.map +1 -0
  178. package/dist/esm/messages/core.mjs +359 -0
  179. package/dist/esm/messages/core.mjs.map +1 -0
  180. package/dist/esm/messages/format.mjs +752 -0
  181. package/dist/esm/messages/format.mjs.map +1 -0
  182. package/dist/esm/messages/ids.mjs +21 -0
  183. package/dist/esm/messages/ids.mjs.map +1 -0
  184. package/dist/esm/messages/prune.mjs +393 -0
  185. package/dist/esm/messages/prune.mjs.map +1 -0
  186. package/dist/esm/messages/tools.mjs +93 -0
  187. package/dist/esm/messages/tools.mjs.map +1 -0
  188. package/dist/esm/run.mjs +325 -0
  189. package/dist/esm/run.mjs.map +1 -0
  190. package/dist/esm/schemas/validate.mjs +317 -0
  191. package/dist/esm/schemas/validate.mjs.map +1 -0
  192. package/dist/esm/splitStream.mjs +207 -0
  193. package/dist/esm/splitStream.mjs.map +1 -0
  194. package/dist/esm/stream.mjs +616 -0
  195. package/dist/esm/stream.mjs.map +1 -0
  196. package/dist/esm/tools/BrowserTools.mjs +244 -0
  197. package/dist/esm/tools/BrowserTools.mjs.map +1 -0
  198. package/dist/esm/tools/Calculator.mjs +41 -0
  199. package/dist/esm/tools/Calculator.mjs.map +1 -0
  200. package/dist/esm/tools/CodeExecutor.mjs +226 -0
  201. package/dist/esm/tools/CodeExecutor.mjs.map +1 -0
  202. package/dist/esm/tools/ProgrammaticToolCalling.mjs +622 -0
  203. package/dist/esm/tools/ProgrammaticToolCalling.mjs.map +1 -0
  204. package/dist/esm/tools/ToolNode.mjs +545 -0
  205. package/dist/esm/tools/ToolNode.mjs.map +1 -0
  206. package/dist/esm/tools/ToolSearch.mjs +870 -0
  207. package/dist/esm/tools/ToolSearch.mjs.map +1 -0
  208. package/dist/esm/tools/handlers.mjs +250 -0
  209. package/dist/esm/tools/handlers.mjs.map +1 -0
  210. package/dist/esm/tools/schema.mjs +28 -0
  211. package/dist/esm/tools/schema.mjs.map +1 -0
  212. package/dist/esm/tools/search/anthropic.mjs +37 -0
  213. package/dist/esm/tools/search/anthropic.mjs.map +1 -0
  214. package/dist/esm/tools/search/content.mjs +119 -0
  215. package/dist/esm/tools/search/content.mjs.map +1 -0
  216. package/dist/esm/tools/search/firecrawl.mjs +176 -0
  217. package/dist/esm/tools/search/firecrawl.mjs.map +1 -0
  218. package/dist/esm/tools/search/format.mjs +201 -0
  219. package/dist/esm/tools/search/format.mjs.map +1 -0
  220. package/dist/esm/tools/search/highlights.mjs +243 -0
  221. package/dist/esm/tools/search/highlights.mjs.map +1 -0
  222. package/dist/esm/tools/search/rerankers.mjs +168 -0
  223. package/dist/esm/tools/search/rerankers.mjs.map +1 -0
  224. package/dist/esm/tools/search/schema.mjs +104 -0
  225. package/dist/esm/tools/search/schema.mjs.map +1 -0
  226. package/dist/esm/tools/search/search.mjs +563 -0
  227. package/dist/esm/tools/search/search.mjs.map +1 -0
  228. package/dist/esm/tools/search/serper-scraper.mjs +129 -0
  229. package/dist/esm/tools/search/serper-scraper.mjs.map +1 -0
  230. package/dist/esm/tools/search/tool.mjs +454 -0
  231. package/dist/esm/tools/search/tool.mjs.map +1 -0
  232. package/dist/esm/tools/search/utils.mjs +61 -0
  233. package/dist/esm/tools/search/utils.mjs.map +1 -0
  234. package/dist/esm/types/graph.mjs +26 -0
  235. package/dist/esm/types/graph.mjs.map +1 -0
  236. package/dist/esm/utils/contextAnalytics.mjs +64 -0
  237. package/dist/esm/utils/contextAnalytics.mjs.map +1 -0
  238. package/dist/esm/utils/events.mjs +29 -0
  239. package/dist/esm/utils/events.mjs.map +1 -0
  240. package/dist/esm/utils/graph.mjs +13 -0
  241. package/dist/esm/utils/graph.mjs.map +1 -0
  242. package/dist/esm/utils/handlers.mjs +68 -0
  243. package/dist/esm/utils/handlers.mjs.map +1 -0
  244. package/dist/esm/utils/llm.mjs +24 -0
  245. package/dist/esm/utils/llm.mjs.map +1 -0
  246. package/dist/esm/utils/misc.mjs +53 -0
  247. package/dist/esm/utils/misc.mjs.map +1 -0
  248. package/dist/esm/utils/run.mjs +70 -0
  249. package/dist/esm/utils/run.mjs.map +1 -0
  250. package/dist/esm/utils/schema.mjs +24 -0
  251. package/dist/esm/utils/schema.mjs.map +1 -0
  252. package/dist/esm/utils/title.mjs +122 -0
  253. package/dist/esm/utils/title.mjs.map +1 -0
  254. package/dist/esm/utils/tokens.mjs +121 -0
  255. package/dist/esm/utils/tokens.mjs.map +1 -0
  256. package/dist/esm/utils/toonFormat.mjs +381 -0
  257. package/dist/esm/utils/toonFormat.mjs.map +1 -0
  258. package/dist/types/agents/AgentContext.d.ts +293 -0
  259. package/dist/types/common/enum.d.ts +155 -0
  260. package/dist/types/common/index.d.ts +1 -0
  261. package/dist/types/events.d.ts +31 -0
  262. package/dist/types/graphs/Graph.d.ts +216 -0
  263. package/dist/types/graphs/MultiAgentGraph.d.ts +104 -0
  264. package/dist/types/graphs/index.d.ts +2 -0
  265. package/dist/types/index.d.ts +21 -0
  266. package/dist/types/instrumentation.d.ts +1 -0
  267. package/dist/types/llm/anthropic/index.d.ts +39 -0
  268. package/dist/types/llm/anthropic/types.d.ts +37 -0
  269. package/dist/types/llm/anthropic/utils/message_inputs.d.ts +14 -0
  270. package/dist/types/llm/anthropic/utils/message_outputs.d.ts +14 -0
  271. package/dist/types/llm/anthropic/utils/output_parsers.d.ts +22 -0
  272. package/dist/types/llm/anthropic/utils/tools.d.ts +3 -0
  273. package/dist/types/llm/bedrock/index.d.ts +141 -0
  274. package/dist/types/llm/bedrock/types.d.ts +27 -0
  275. package/dist/types/llm/bedrock/utils/index.d.ts +5 -0
  276. package/dist/types/llm/bedrock/utils/message_inputs.d.ts +31 -0
  277. package/dist/types/llm/bedrock/utils/message_outputs.d.ts +33 -0
  278. package/dist/types/llm/fake.d.ts +31 -0
  279. package/dist/types/llm/google/index.d.ts +24 -0
  280. package/dist/types/llm/google/types.d.ts +42 -0
  281. package/dist/types/llm/google/utils/common.d.ts +34 -0
  282. package/dist/types/llm/google/utils/tools.d.ts +10 -0
  283. package/dist/types/llm/google/utils/zod_to_genai_parameters.d.ts +14 -0
  284. package/dist/types/llm/openai/index.d.ts +127 -0
  285. package/dist/types/llm/openai/types.d.ts +10 -0
  286. package/dist/types/llm/openai/utils/index.d.ts +29 -0
  287. package/dist/types/llm/openrouter/index.d.ts +15 -0
  288. package/dist/types/llm/providers.d.ts +5 -0
  289. package/dist/types/llm/text.d.ts +21 -0
  290. package/dist/types/llm/vertexai/index.d.ts +293 -0
  291. package/dist/types/messages/cache.d.ts +54 -0
  292. package/dist/types/messages/content.d.ts +7 -0
  293. package/dist/types/messages/core.d.ts +14 -0
  294. package/dist/types/messages/format.d.ts +137 -0
  295. package/dist/types/messages/ids.d.ts +3 -0
  296. package/dist/types/messages/index.d.ts +7 -0
  297. package/dist/types/messages/prune.d.ts +52 -0
  298. package/dist/types/messages/reducer.d.ts +9 -0
  299. package/dist/types/messages/tools.d.ts +17 -0
  300. package/dist/types/mockStream.d.ts +32 -0
  301. package/dist/types/prompts/collab.d.ts +1 -0
  302. package/dist/types/prompts/index.d.ts +2 -0
  303. package/dist/types/prompts/taskmanager.d.ts +41 -0
  304. package/dist/types/run.d.ts +41 -0
  305. package/dist/types/schemas/index.d.ts +1 -0
  306. package/dist/types/schemas/validate.d.ts +59 -0
  307. package/dist/types/splitStream.d.ts +37 -0
  308. package/dist/types/stream.d.ts +15 -0
  309. package/dist/types/test/mockTools.d.ts +28 -0
  310. package/dist/types/tools/BrowserTools.d.ts +87 -0
  311. package/dist/types/tools/Calculator.d.ts +34 -0
  312. package/dist/types/tools/CodeExecutor.d.ts +57 -0
  313. package/dist/types/tools/ProgrammaticToolCalling.d.ts +138 -0
  314. package/dist/types/tools/ToolNode.d.ts +51 -0
  315. package/dist/types/tools/ToolSearch.d.ts +219 -0
  316. package/dist/types/tools/handlers.d.ts +22 -0
  317. package/dist/types/tools/schema.d.ts +12 -0
  318. package/dist/types/tools/search/anthropic.d.ts +16 -0
  319. package/dist/types/tools/search/content.d.ts +4 -0
  320. package/dist/types/tools/search/firecrawl.d.ts +54 -0
  321. package/dist/types/tools/search/format.d.ts +5 -0
  322. package/dist/types/tools/search/highlights.d.ts +13 -0
  323. package/dist/types/tools/search/index.d.ts +3 -0
  324. package/dist/types/tools/search/rerankers.d.ts +38 -0
  325. package/dist/types/tools/search/schema.d.ts +103 -0
  326. package/dist/types/tools/search/search.d.ts +8 -0
  327. package/dist/types/tools/search/serper-scraper.d.ts +59 -0
  328. package/dist/types/tools/search/test.d.ts +1 -0
  329. package/dist/types/tools/search/tool.d.ts +3 -0
  330. package/dist/types/tools/search/types.d.ts +575 -0
  331. package/dist/types/tools/search/utils.d.ts +10 -0
  332. package/dist/types/types/graph.d.ts +399 -0
  333. package/dist/types/types/index.d.ts +5 -0
  334. package/dist/types/types/llm.d.ts +105 -0
  335. package/dist/types/types/messages.d.ts +4 -0
  336. package/dist/types/types/run.d.ts +112 -0
  337. package/dist/types/types/stream.d.ts +308 -0
  338. package/dist/types/types/tools.d.ts +296 -0
  339. package/dist/types/utils/contextAnalytics.d.ts +37 -0
  340. package/dist/types/utils/events.d.ts +6 -0
  341. package/dist/types/utils/graph.d.ts +2 -0
  342. package/dist/types/utils/handlers.d.ts +34 -0
  343. package/dist/types/utils/index.d.ts +9 -0
  344. package/dist/types/utils/llm.d.ts +3 -0
  345. package/dist/types/utils/llmConfig.d.ts +3 -0
  346. package/dist/types/utils/logging.d.ts +1 -0
  347. package/dist/types/utils/misc.d.ts +7 -0
  348. package/dist/types/utils/run.d.ts +27 -0
  349. package/dist/types/utils/schema.d.ts +8 -0
  350. package/dist/types/utils/title.d.ts +4 -0
  351. package/dist/types/utils/tokens.d.ts +28 -0
  352. package/dist/types/utils/toonFormat.d.ts +111 -0
  353. package/package.json +190 -0
  354. package/src/agents/AgentContext.test.ts +458 -0
  355. package/src/agents/AgentContext.ts +972 -0
  356. package/src/agents/__tests__/AgentContext.test.ts +805 -0
  357. package/src/agents/__tests__/resolveStructuredOutputMode.test.ts +137 -0
  358. package/src/common/enum.ts +203 -0
  359. package/src/common/index.ts +2 -0
  360. package/src/events.ts +223 -0
  361. package/src/graphs/Graph.ts +2228 -0
  362. package/src/graphs/MultiAgentGraph.ts +1063 -0
  363. package/src/graphs/__tests__/structured-output.integration.test.ts +809 -0
  364. package/src/graphs/__tests__/structured-output.test.ts +183 -0
  365. package/src/graphs/index.ts +2 -0
  366. package/src/index.ts +34 -0
  367. package/src/instrumentation.ts +22 -0
  368. package/src/llm/anthropic/Jacob_Lee_Resume_2023.pdf +0 -0
  369. package/src/llm/anthropic/index.ts +413 -0
  370. package/src/llm/anthropic/llm.spec.ts +1442 -0
  371. package/src/llm/anthropic/types.ts +140 -0
  372. package/src/llm/anthropic/utils/message_inputs.ts +757 -0
  373. package/src/llm/anthropic/utils/message_outputs.ts +289 -0
  374. package/src/llm/anthropic/utils/output_parsers.ts +133 -0
  375. package/src/llm/anthropic/utils/tools.ts +29 -0
  376. package/src/llm/bedrock/__tests__/bedrock-caching.test.ts +495 -0
  377. package/src/llm/bedrock/index.ts +411 -0
  378. package/src/llm/bedrock/llm.spec.ts +616 -0
  379. package/src/llm/bedrock/types.ts +51 -0
  380. package/src/llm/bedrock/utils/index.ts +18 -0
  381. package/src/llm/bedrock/utils/message_inputs.ts +563 -0
  382. package/src/llm/bedrock/utils/message_outputs.ts +310 -0
  383. package/src/llm/fake.ts +133 -0
  384. package/src/llm/google/data/gettysburg10.wav +0 -0
  385. package/src/llm/google/data/hotdog.jpg +0 -0
  386. package/src/llm/google/index.ts +337 -0
  387. package/src/llm/google/llm.spec.ts +934 -0
  388. package/src/llm/google/types.ts +56 -0
  389. package/src/llm/google/utils/common.ts +873 -0
  390. package/src/llm/google/utils/tools.ts +160 -0
  391. package/src/llm/google/utils/zod_to_genai_parameters.ts +86 -0
  392. package/src/llm/openai/index.ts +1366 -0
  393. package/src/llm/openai/types.ts +24 -0
  394. package/src/llm/openai/utils/index.ts +1035 -0
  395. package/src/llm/openai/utils/isReasoningModel.test.ts +90 -0
  396. package/src/llm/openrouter/index.ts +291 -0
  397. package/src/llm/providers.ts +52 -0
  398. package/src/llm/text.ts +94 -0
  399. package/src/llm/vertexai/index.ts +359 -0
  400. package/src/messages/__tests__/tools.test.ts +473 -0
  401. package/src/messages/cache.test.ts +1261 -0
  402. package/src/messages/cache.ts +518 -0
  403. package/src/messages/content.test.ts +362 -0
  404. package/src/messages/content.ts +63 -0
  405. package/src/messages/core.ts +473 -0
  406. package/src/messages/ensureThinkingBlock.test.ts +468 -0
  407. package/src/messages/format.ts +1029 -0
  408. package/src/messages/formatAgentMessages.test.ts +1513 -0
  409. package/src/messages/formatAgentMessages.tools.test.ts +419 -0
  410. package/src/messages/formatMessage.test.ts +693 -0
  411. package/src/messages/ids.ts +26 -0
  412. package/src/messages/index.ts +7 -0
  413. package/src/messages/labelContentByAgent.test.ts +887 -0
  414. package/src/messages/prune.ts +568 -0
  415. package/src/messages/reducer.ts +80 -0
  416. package/src/messages/shiftIndexTokenCountMap.test.ts +81 -0
  417. package/src/messages/tools.ts +108 -0
  418. package/src/mockStream.ts +99 -0
  419. package/src/prompts/collab.ts +6 -0
  420. package/src/prompts/index.ts +2 -0
  421. package/src/prompts/taskmanager.ts +61 -0
  422. package/src/run.ts +467 -0
  423. package/src/schemas/index.ts +2 -0
  424. package/src/schemas/schema-preparation.test.ts +500 -0
  425. package/src/schemas/validate.test.ts +358 -0
  426. package/src/schemas/validate.ts +454 -0
  427. package/src/scripts/abort.ts +157 -0
  428. package/src/scripts/ant_web_search.ts +158 -0
  429. package/src/scripts/ant_web_search_edge_case.ts +162 -0
  430. package/src/scripts/ant_web_search_error_edge_case.ts +148 -0
  431. package/src/scripts/args.ts +48 -0
  432. package/src/scripts/caching.ts +132 -0
  433. package/src/scripts/cli.ts +172 -0
  434. package/src/scripts/cli2.ts +133 -0
  435. package/src/scripts/cli3.ts +184 -0
  436. package/src/scripts/cli4.ts +191 -0
  437. package/src/scripts/cli5.ts +191 -0
  438. package/src/scripts/code_exec.ts +213 -0
  439. package/src/scripts/code_exec_files.ts +236 -0
  440. package/src/scripts/code_exec_multi_session.ts +241 -0
  441. package/src/scripts/code_exec_ptc.ts +334 -0
  442. package/src/scripts/code_exec_session.ts +282 -0
  443. package/src/scripts/code_exec_simple.ts +147 -0
  444. package/src/scripts/content.ts +138 -0
  445. package/src/scripts/empty_input.ts +137 -0
  446. package/src/scripts/handoff-test.ts +135 -0
  447. package/src/scripts/image.ts +178 -0
  448. package/src/scripts/memory.ts +97 -0
  449. package/src/scripts/multi-agent-chain.ts +331 -0
  450. package/src/scripts/multi-agent-conditional.ts +221 -0
  451. package/src/scripts/multi-agent-document-review-chain.ts +197 -0
  452. package/src/scripts/multi-agent-hybrid-flow.ts +310 -0
  453. package/src/scripts/multi-agent-parallel-start.ts +265 -0
  454. package/src/scripts/multi-agent-parallel.ts +394 -0
  455. package/src/scripts/multi-agent-sequence.ts +217 -0
  456. package/src/scripts/multi-agent-supervisor.ts +365 -0
  457. package/src/scripts/multi-agent-test.ts +186 -0
  458. package/src/scripts/parallel-asymmetric-tools-test.ts +274 -0
  459. package/src/scripts/parallel-full-metadata-test.ts +240 -0
  460. package/src/scripts/parallel-tools-test.ts +340 -0
  461. package/src/scripts/programmatic_exec.ts +396 -0
  462. package/src/scripts/programmatic_exec_agent.ts +231 -0
  463. package/src/scripts/search.ts +146 -0
  464. package/src/scripts/sequential-full-metadata-test.ts +197 -0
  465. package/src/scripts/simple.ts +225 -0
  466. package/src/scripts/single-agent-metadata-test.ts +198 -0
  467. package/src/scripts/stream.ts +140 -0
  468. package/src/scripts/test-custom-prompt-key.ts +145 -0
  469. package/src/scripts/test-handoff-input.ts +170 -0
  470. package/src/scripts/test-handoff-preamble.ts +277 -0
  471. package/src/scripts/test-multi-agent-list-handoff.ts +417 -0
  472. package/src/scripts/test-parallel-agent-labeling.ts +325 -0
  473. package/src/scripts/test-parallel-handoffs.ts +291 -0
  474. package/src/scripts/test-thinking-handoff-bedrock.ts +153 -0
  475. package/src/scripts/test-thinking-handoff.ts +155 -0
  476. package/src/scripts/test-tools-before-handoff.ts +226 -0
  477. package/src/scripts/test_code_api.ts +361 -0
  478. package/src/scripts/thinking-bedrock.ts +159 -0
  479. package/src/scripts/thinking.ts +171 -0
  480. package/src/scripts/tool_search.ts +162 -0
  481. package/src/scripts/tools.ts +177 -0
  482. package/src/specs/agent-handoffs.test.ts +888 -0
  483. package/src/specs/anthropic.simple.test.ts +387 -0
  484. package/src/specs/azure.simple.test.ts +364 -0
  485. package/src/specs/cache.simple.test.ts +396 -0
  486. package/src/specs/deepseek.simple.test.ts +283 -0
  487. package/src/specs/emergency-prune.test.ts +407 -0
  488. package/src/specs/moonshot.simple.test.ts +358 -0
  489. package/src/specs/openai.simple.test.ts +311 -0
  490. package/src/specs/openrouter.simple.test.ts +107 -0
  491. package/src/specs/prune.test.ts +901 -0
  492. package/src/specs/reasoning.test.ts +201 -0
  493. package/src/specs/spec.utils.ts +3 -0
  494. package/src/specs/thinking-handoff.test.ts +620 -0
  495. package/src/specs/thinking-prune.test.ts +703 -0
  496. package/src/specs/token-distribution-edge-case.test.ts +316 -0
  497. package/src/specs/token-memoization.test.ts +32 -0
  498. package/src/specs/tool-error.test.ts +198 -0
  499. package/src/splitStream.test.ts +691 -0
  500. package/src/splitStream.ts +234 -0
  501. package/src/stream.test.ts +94 -0
  502. package/src/stream.ts +801 -0
  503. package/src/test/mockTools.ts +386 -0
  504. package/src/tools/BrowserTools.ts +393 -0
  505. package/src/tools/Calculator.test.ts +278 -0
  506. package/src/tools/Calculator.ts +46 -0
  507. package/src/tools/CodeExecutor.ts +270 -0
  508. package/src/tools/ProgrammaticToolCalling.ts +785 -0
  509. package/src/tools/ToolNode.ts +674 -0
  510. package/src/tools/ToolSearch.ts +1095 -0
  511. package/src/tools/__tests__/BrowserTools.test.ts +265 -0
  512. package/src/tools/__tests__/ProgrammaticToolCalling.integration.test.ts +319 -0
  513. package/src/tools/__tests__/ProgrammaticToolCalling.test.ts +1006 -0
  514. package/src/tools/__tests__/ToolSearch.integration.test.ts +162 -0
  515. package/src/tools/__tests__/ToolSearch.test.ts +1003 -0
  516. package/src/tools/handlers.ts +363 -0
  517. package/src/tools/schema.ts +37 -0
  518. package/src/tools/search/anthropic.ts +51 -0
  519. package/src/tools/search/content.test.ts +173 -0
  520. package/src/tools/search/content.ts +147 -0
  521. package/src/tools/search/firecrawl.ts +210 -0
  522. package/src/tools/search/format.ts +250 -0
  523. package/src/tools/search/highlights.ts +320 -0
  524. package/src/tools/search/index.ts +3 -0
  525. package/src/tools/search/jina-reranker.test.ts +130 -0
  526. package/src/tools/search/output.md +2775 -0
  527. package/src/tools/search/rerankers.ts +242 -0
  528. package/src/tools/search/schema.ts +113 -0
  529. package/src/tools/search/search.ts +768 -0
  530. package/src/tools/search/serper-scraper.ts +155 -0
  531. package/src/tools/search/test.html +884 -0
  532. package/src/tools/search/test.md +643 -0
  533. package/src/tools/search/test.ts +159 -0
  534. package/src/tools/search/tool.ts +657 -0
  535. package/src/tools/search/types.ts +665 -0
  536. package/src/tools/search/utils.ts +79 -0
  537. package/src/types/graph.test.ts +218 -0
  538. package/src/types/graph.ts +533 -0
  539. package/src/types/index.ts +6 -0
  540. package/src/types/llm.ts +140 -0
  541. package/src/types/messages.ts +4 -0
  542. package/src/types/run.ts +128 -0
  543. package/src/types/stream.ts +417 -0
  544. package/src/types/tools.ts +355 -0
  545. package/src/utils/contextAnalytics.ts +103 -0
  546. package/src/utils/events.ts +32 -0
  547. package/src/utils/graph.ts +11 -0
  548. package/src/utils/handlers.ts +107 -0
  549. package/src/utils/index.ts +9 -0
  550. package/src/utils/llm.ts +26 -0
  551. package/src/utils/llmConfig.ts +208 -0
  552. package/src/utils/logging.ts +48 -0
  553. package/src/utils/misc.ts +57 -0
  554. package/src/utils/run.ts +106 -0
  555. package/src/utils/schema.ts +35 -0
  556. package/src/utils/title.ts +177 -0
  557. package/src/utils/tokens.ts +142 -0
  558. package/src/utils/toonFormat.ts +475 -0
@@ -0,0 +1,1615 @@
1
+ 'use strict';
2
+
3
+ var nanoid = require('nanoid');
4
+ var stream = require('@langchain/core/utils/stream');
5
+ var googleVertexai = require('@langchain/google-vertexai');
6
+ var langgraph = require('@langchain/langgraph');
7
+ var runnables = require('@langchain/core/runnables');
8
+ var messages = require('@langchain/core/messages');
9
+ var _enum = require('../common/enum.cjs');
10
+ var core = require('../messages/core.cjs');
11
+ var prune = require('../messages/prune.cjs');
12
+ var format = require('../messages/format.cjs');
13
+ var cache = require('../messages/cache.cjs');
14
+ var content = require('../messages/content.cjs');
15
+ var tools = require('../messages/tools.cjs');
16
+ var graph = require('../utils/graph.cjs');
17
+ var llm = require('../utils/llm.cjs');
18
+ var run = require('../utils/run.cjs');
19
+ require('js-tiktoken');
20
+ require('../utils/toonFormat.cjs');
21
+ var contextAnalytics = require('../utils/contextAnalytics.cjs');
22
+ require('zod-to-json-schema');
23
+ var providers = require('../llm/providers.cjs');
24
+ var ToolNode = require('../tools/ToolNode.cjs');
25
+ var index = require('../llm/openai/index.cjs');
26
+ var events = require('../utils/events.cjs');
27
+ var schema = require('../tools/schema.cjs');
28
+ var validate = require('../schemas/validate.cjs');
29
+ var AgentContext = require('../agents/AgentContext.cjs');
30
+ var graph$1 = require('../types/graph.cjs');
31
+ var fake = require('../llm/fake.cjs');
32
+
33
+ /* eslint-disable no-console */
34
+ // src/graphs/Graph.ts
35
+ const { AGENT, TOOLS } = _enum.GraphNodeKeys;
36
+ class Graph {
37
+ messageStepHasToolCalls = new Map();
38
+ messageIdsByStepKey = new Map();
39
+ prelimMessageIdsByStepKey = new Map();
40
+ config;
41
+ contentData = [];
42
+ stepKeyIds = new Map();
43
+ contentIndexMap = new Map();
44
+ toolCallStepIds = new Map();
45
+ signal;
46
+ /** Set of invoked tool call IDs from non-message run steps completed mid-run, if any */
47
+ invokedToolIds;
48
+ handlerRegistry;
49
+ /**
50
+ * Tool session contexts for automatic state persistence across tool invocations.
51
+ * Keyed by tool name (e.g., Constants.EXECUTE_CODE).
52
+ * Currently supports code execution session tracking (session_id, files).
53
+ */
54
+ sessions = new Map();
55
+ }
56
+ class StandardGraph extends Graph {
57
+ overrideModel;
58
+ /** Optional compile options passed into workflow.compile() */
59
+ compileOptions;
60
+ messages = [];
61
+ runId;
62
+ startIndex = 0;
63
+ signal;
64
+ /** Map of agent contexts by agent ID */
65
+ agentContexts = new Map();
66
+ /** Default agent ID to use */
67
+ defaultAgentId;
68
+ /** Normalized finish/stop reason from the last LLM invocation */
69
+ lastFinishReason;
70
+ constructor({
71
+ // parent-level graph inputs
72
+ runId, signal, agents, tokenCounter, indexTokenCountMap, }) {
73
+ super();
74
+ this.runId = runId;
75
+ this.signal = signal;
76
+ if (agents.length === 0) {
77
+ throw new Error('At least one agent configuration is required');
78
+ }
79
+ for (const agentConfig of agents) {
80
+ const agentContext = AgentContext.AgentContext.fromConfig(agentConfig, tokenCounter, indexTokenCountMap);
81
+ this.agentContexts.set(agentConfig.agentId, agentContext);
82
+ }
83
+ this.defaultAgentId = agents[0].agentId;
84
+ }
85
+ /* Init */
86
+ resetValues(keepContent) {
87
+ this.messages = [];
88
+ this.lastFinishReason = undefined;
89
+ this.config = graph.resetIfNotEmpty(this.config, undefined);
90
+ if (keepContent !== true) {
91
+ this.contentData = graph.resetIfNotEmpty(this.contentData, []);
92
+ this.contentIndexMap = graph.resetIfNotEmpty(this.contentIndexMap, new Map());
93
+ }
94
+ this.stepKeyIds = graph.resetIfNotEmpty(this.stepKeyIds, new Map());
95
+ this.toolCallStepIds = graph.resetIfNotEmpty(this.toolCallStepIds, new Map());
96
+ this.messageIdsByStepKey = graph.resetIfNotEmpty(this.messageIdsByStepKey, new Map());
97
+ this.messageStepHasToolCalls = graph.resetIfNotEmpty(this.messageStepHasToolCalls, new Map());
98
+ this.prelimMessageIdsByStepKey = graph.resetIfNotEmpty(this.prelimMessageIdsByStepKey, new Map());
99
+ this.invokedToolIds = graph.resetIfNotEmpty(this.invokedToolIds, undefined);
100
+ for (const context of this.agentContexts.values()) {
101
+ context.reset();
102
+ }
103
+ }
104
+ /**
105
+ * Returns the normalized finish/stop reason from the last LLM invocation.
106
+ * Used by callers to detect when the response was truncated due to max_tokens.
107
+ */
108
+ getLastFinishReason() {
109
+ return this.lastFinishReason;
110
+ }
111
+ /**
112
+ * Estimates a human-friendly description of the conversation timeframe based on message count.
113
+ * Uses rough heuristics to provide context about how much history is available.
114
+ *
115
+ * @param messageCount - Number of messages in the remaining context
116
+ * @returns A friendly description like "the last few minutes", "the past hour", etc.
117
+ */
118
+ getContextTimeframeDescription(messageCount) {
119
+ // Rough heuristics based on typical conversation patterns:
120
+ // - Very active chat: ~20-30 messages per hour
121
+ // - Normal chat: ~10-15 messages per hour
122
+ // - Slow/thoughtful chat: ~5-8 messages per hour
123
+ // We use a middle estimate of ~12 messages per hour
124
+ if (messageCount <= 5) {
125
+ return 'just the last few exchanges';
126
+ }
127
+ else if (messageCount <= 15) {
128
+ return 'the last several minutes';
129
+ }
130
+ else if (messageCount <= 30) {
131
+ return 'roughly the past hour';
132
+ }
133
+ else if (messageCount <= 60) {
134
+ return 'the past couple of hours';
135
+ }
136
+ else if (messageCount <= 150) {
137
+ return 'the past few hours';
138
+ }
139
+ else if (messageCount <= 300) {
140
+ return 'roughly a day\'s worth';
141
+ }
142
+ else if (messageCount <= 700) {
143
+ return 'the past few days';
144
+ }
145
+ else {
146
+ return 'about a week or more';
147
+ }
148
+ }
149
+ /* Run Step Processing */
150
+ getRunStep(stepId) {
151
+ const index = this.contentIndexMap.get(stepId);
152
+ if (index !== undefined) {
153
+ return this.contentData[index];
154
+ }
155
+ return undefined;
156
+ }
157
+ getAgentContext(metadata) {
158
+ if (!metadata) {
159
+ throw new Error('No metadata provided to retrieve agent context');
160
+ }
161
+ const currentNode = metadata.langgraph_node;
162
+ if (!currentNode) {
163
+ throw new Error('No langgraph_node in metadata to retrieve agent context');
164
+ }
165
+ let agentId;
166
+ if (currentNode.startsWith(AGENT)) {
167
+ agentId = currentNode.substring(AGENT.length);
168
+ }
169
+ else if (currentNode.startsWith(TOOLS)) {
170
+ agentId = currentNode.substring(TOOLS.length);
171
+ }
172
+ const agentContext = this.agentContexts.get(agentId ?? '');
173
+ if (!agentContext) {
174
+ throw new Error(`No agent context found for agent ID ${agentId}`);
175
+ }
176
+ return agentContext;
177
+ }
178
+ getStepKey(metadata) {
179
+ if (!metadata)
180
+ return '';
181
+ const keyList = this.getKeyList(metadata);
182
+ if (this.checkKeyList(keyList)) {
183
+ throw new Error('Missing metadata');
184
+ }
185
+ return graph.joinKeys(keyList);
186
+ }
187
+ getStepIdByKey(stepKey, index) {
188
+ const stepIds = this.stepKeyIds.get(stepKey);
189
+ if (!stepIds) {
190
+ throw new Error(`No step IDs found for stepKey ${stepKey}`);
191
+ }
192
+ if (index === undefined) {
193
+ return stepIds[stepIds.length - 1];
194
+ }
195
+ return stepIds[index];
196
+ }
197
+ generateStepId(stepKey) {
198
+ const stepIds = this.stepKeyIds.get(stepKey);
199
+ let newStepId;
200
+ let stepIndex = 0;
201
+ if (stepIds) {
202
+ stepIndex = stepIds.length;
203
+ newStepId = `step_${nanoid.nanoid()}`;
204
+ stepIds.push(newStepId);
205
+ this.stepKeyIds.set(stepKey, stepIds);
206
+ }
207
+ else {
208
+ newStepId = `step_${nanoid.nanoid()}`;
209
+ this.stepKeyIds.set(stepKey, [newStepId]);
210
+ }
211
+ return [newStepId, stepIndex];
212
+ }
213
+ getKeyList(metadata) {
214
+ if (!metadata)
215
+ return [];
216
+ const keyList = [
217
+ metadata.run_id,
218
+ metadata.thread_id,
219
+ metadata.langgraph_node,
220
+ metadata.langgraph_step,
221
+ metadata.checkpoint_ns,
222
+ ];
223
+ const agentContext = this.getAgentContext(metadata);
224
+ if (agentContext.currentTokenType === _enum.ContentTypes.THINK ||
225
+ agentContext.currentTokenType === 'think_and_text') {
226
+ keyList.push('reasoning');
227
+ }
228
+ else if (agentContext.tokenTypeSwitch === 'content') {
229
+ keyList.push('post-reasoning');
230
+ }
231
+ if (this.invokedToolIds != null && this.invokedToolIds.size > 0) {
232
+ keyList.push(this.invokedToolIds.size + '');
233
+ }
234
+ return keyList;
235
+ }
236
+ checkKeyList(keyList) {
237
+ return keyList.some((key) => key === undefined);
238
+ }
239
+ /* Misc.*/
240
+ getRunMessages() {
241
+ return this.messages.slice(this.startIndex);
242
+ }
243
+ getContentParts() {
244
+ return core.convertMessagesToContent(this.messages.slice(this.startIndex));
245
+ }
246
+ /**
247
+ * Get all run steps, optionally filtered by agent ID
248
+ */
249
+ getRunSteps(agentId) {
250
+ if (agentId == null || agentId === '') {
251
+ return [...this.contentData];
252
+ }
253
+ return this.contentData.filter((step) => step.agentId === agentId);
254
+ }
255
+ /**
256
+ * Get run steps grouped by agent ID
257
+ */
258
+ getRunStepsByAgent() {
259
+ const stepsByAgent = new Map();
260
+ for (const step of this.contentData) {
261
+ if (step.agentId == null || step.agentId === '')
262
+ continue;
263
+ const steps = stepsByAgent.get(step.agentId) ?? [];
264
+ steps.push(step);
265
+ stepsByAgent.set(step.agentId, steps);
266
+ }
267
+ return stepsByAgent;
268
+ }
269
+ /**
270
+ * Get agent IDs that participated in this run
271
+ */
272
+ getActiveAgentIds() {
273
+ const agentIds = new Set();
274
+ for (const step of this.contentData) {
275
+ if (step.agentId != null && step.agentId !== '') {
276
+ agentIds.add(step.agentId);
277
+ }
278
+ }
279
+ return Array.from(agentIds);
280
+ }
281
+ /**
282
+ * Maps contentPart indices to agent IDs for post-run analysis
283
+ * Returns a map where key is the contentPart index and value is the agentId
284
+ */
285
+ getContentPartAgentMap() {
286
+ const contentPartAgentMap = new Map();
287
+ for (const step of this.contentData) {
288
+ if (step.agentId != null &&
289
+ step.agentId !== '' &&
290
+ Number.isFinite(step.index)) {
291
+ contentPartAgentMap.set(step.index, step.agentId);
292
+ }
293
+ }
294
+ return contentPartAgentMap;
295
+ }
296
+ /**
297
+ * Get the context breakdown from the primary agent for admin token tracking.
298
+ * Returns detailed token counts for instructions, tools, etc.
299
+ */
300
+ getContextBreakdown() {
301
+ const primaryContext = this.agentContexts.get(this.defaultAgentId);
302
+ if (!primaryContext) {
303
+ return null;
304
+ }
305
+ return primaryContext.getContextBreakdown();
306
+ }
307
+ /**
308
+ * Get the latest context analytics from the graph.
309
+ * Returns metrics like utilization %, TOON stats, message breakdown.
310
+ */
311
+ getContextAnalytics() {
312
+ return this.lastContextAnalytics ?? null;
313
+ }
314
+ /** Store the latest context analytics for retrieval after run */
315
+ lastContextAnalytics = null;
316
+ /* Graph */
317
+ createSystemRunnable({ provider, clientOptions, instructions, additional_instructions, }) {
318
+ let finalInstructions = instructions;
319
+ if (additional_instructions != null && additional_instructions !== '') {
320
+ finalInstructions =
321
+ finalInstructions != null && finalInstructions
322
+ ? `${finalInstructions}\n\n${additional_instructions}`
323
+ : additional_instructions;
324
+ }
325
+ if (finalInstructions != null &&
326
+ finalInstructions &&
327
+ provider === _enum.Providers.ANTHROPIC &&
328
+ clientOptions.promptCache === true) {
329
+ finalInstructions = {
330
+ content: [
331
+ {
332
+ type: 'text',
333
+ text: instructions,
334
+ cache_control: { type: 'ephemeral' },
335
+ },
336
+ ],
337
+ };
338
+ }
339
+ if (finalInstructions != null && finalInstructions !== '') {
340
+ const systemMessage = new messages.SystemMessage(finalInstructions);
341
+ return runnables.RunnableLambda.from((messages) => {
342
+ return [systemMessage, ...messages];
343
+ }).withConfig({ runName: 'prompt' });
344
+ }
345
+ }
346
+ initializeTools({ currentTools, currentToolMap, agentContext, }) {
347
+ const toolDefinitions = agentContext?.toolDefinitions;
348
+ const eventDrivenMode = toolDefinitions != null && toolDefinitions.length > 0;
349
+ if (eventDrivenMode) {
350
+ const schemaTools = schema.createSchemaOnlyTools(toolDefinitions);
351
+ const toolDefMap = new Map(toolDefinitions.map((def) => [def.name, def]));
352
+ return new ToolNode.ToolNode({
353
+ tools: schemaTools,
354
+ toolMap: new Map(schemaTools.map((tool) => [tool.name, tool])),
355
+ toolCallStepIds: this.toolCallStepIds,
356
+ errorHandler: (data, metadata) => StandardGraph.handleToolCallErrorStatic(this, data, metadata),
357
+ toolRegistry: agentContext?.toolRegistry,
358
+ sessions: this.sessions,
359
+ eventDrivenMode: true,
360
+ toolDefinitions: toolDefMap,
361
+ agentId: agentContext?.agentId,
362
+ });
363
+ }
364
+ return new ToolNode.ToolNode({
365
+ tools: currentTools ?? [],
366
+ toolMap: currentToolMap,
367
+ toolCallStepIds: this.toolCallStepIds,
368
+ errorHandler: (data, metadata) => StandardGraph.handleToolCallErrorStatic(this, data, metadata),
369
+ toolRegistry: agentContext?.toolRegistry,
370
+ sessions: this.sessions,
371
+ });
372
+ }
373
+ initializeModel({ provider, tools, clientOptions, }) {
374
+ const ChatModelClass = providers.getChatModelClass(provider);
375
+ const model = new ChatModelClass(clientOptions ?? {});
376
+ if (llm.isOpenAILike(provider) &&
377
+ (model instanceof index.ChatOpenAI || model instanceof index.AzureChatOpenAI)) {
378
+ model.temperature = clientOptions
379
+ .temperature;
380
+ model.topP = clientOptions.topP;
381
+ model.frequencyPenalty = clientOptions
382
+ .frequencyPenalty;
383
+ model.presencePenalty = clientOptions
384
+ .presencePenalty;
385
+ model.n = clientOptions.n;
386
+ }
387
+ else if (provider === _enum.Providers.VERTEXAI &&
388
+ model instanceof googleVertexai.ChatVertexAI) {
389
+ model.temperature = clientOptions
390
+ .temperature;
391
+ model.topP = clientOptions.topP;
392
+ model.topK = clientOptions.topK;
393
+ model.topLogprobs = clientOptions
394
+ .topLogprobs;
395
+ model.frequencyPenalty = clientOptions
396
+ .frequencyPenalty;
397
+ model.presencePenalty = clientOptions
398
+ .presencePenalty;
399
+ model.maxOutputTokens = clientOptions
400
+ .maxOutputTokens;
401
+ }
402
+ if (!tools || tools.length === 0) {
403
+ return model;
404
+ }
405
+ return model.bindTools(tools);
406
+ }
407
+ overrideTestModel(responses, sleep, toolCalls) {
408
+ this.overrideModel = fake.createFakeStreamingLLM({
409
+ responses,
410
+ sleep,
411
+ toolCalls,
412
+ });
413
+ }
414
+ getNewModel({ provider, clientOptions, }) {
415
+ const ChatModelClass = providers.getChatModelClass(provider);
416
+ return new ChatModelClass(clientOptions ?? {});
417
+ }
418
+ getUsageMetadata(finalMessage) {
419
+ if (finalMessage &&
420
+ 'usage_metadata' in finalMessage &&
421
+ finalMessage.usage_metadata != null) {
422
+ return finalMessage.usage_metadata;
423
+ }
424
+ }
425
+ /** Execute model invocation with streaming support */
426
+ async attemptInvoke({ currentModel, finalMessages, provider, tools, }, config) {
427
+ const model = this.overrideModel ?? currentModel;
428
+ if (!model) {
429
+ throw new Error('No model found');
430
+ }
431
+ if ((tools?.length ?? 0) > 0 && providers.manualToolStreamProviders.has(provider)) {
432
+ if (!model.stream) {
433
+ throw new Error('Model does not support stream');
434
+ }
435
+ const stream$1 = await model.stream(finalMessages, config);
436
+ let finalChunk;
437
+ for await (const chunk of stream$1) {
438
+ await events.safeDispatchCustomEvent(_enum.GraphEvents.CHAT_MODEL_STREAM, { chunk, emitted: true }, config);
439
+ finalChunk = finalChunk ? stream.concat(finalChunk, chunk) : chunk;
440
+ }
441
+ finalChunk = core.modifyDeltaProperties(provider, finalChunk);
442
+ return { messages: [finalChunk] };
443
+ }
444
+ else {
445
+ const finalMessage = await model.invoke(finalMessages, config);
446
+ if ((finalMessage.tool_calls?.length ?? 0) > 0) {
447
+ finalMessage.tool_calls = finalMessage.tool_calls?.filter((tool_call) => !!tool_call.name);
448
+ }
449
+ return { messages: [finalMessage] };
450
+ }
451
+ }
452
+ /**
453
+ * Execute model invocation with structured output.
454
+ * Uses native constrained decoding (jsonSchema method) for supported providers,
455
+ * or falls back to withStructuredOutput with functionCalling/jsonMode.
456
+ *
457
+ * Native mode uses provider APIs directly:
458
+ * - Anthropic: output_config.format via LangChain's method: 'json_schema'
459
+ * - OpenAI/Azure: response_format.json_schema via LangChain's method: 'jsonSchema'
460
+ * - Bedrock: falls back to functionCalling (LangChain doesn't support native yet)
461
+ */
462
+ async attemptStructuredInvoke({ currentModel, finalMessages, schema, structuredOutputConfig, provider, agentContext, }, config) {
463
+ const model = this.overrideModel ?? currentModel;
464
+ if (!model) {
465
+ throw new Error('No model found');
466
+ }
467
+ // Check if model supports withStructuredOutput
468
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
469
+ if (typeof model.withStructuredOutput !== 'function') {
470
+ throw new Error(`The selected model does not support structured output. ` +
471
+ `Please use a model that supports JSON schema output (e.g., OpenAI GPT-4, Anthropic Claude, Google Gemini) ` +
472
+ `or disable structured output for this agent.`);
473
+ }
474
+ const { name = 'StructuredResponse', includeRaw: _includeRaw = false, handleErrors = true, maxRetries = 2, } = structuredOutputConfig;
475
+ // Resolve the structured output method using AgentContext's provider-aware logic
476
+ let method;
477
+ if (agentContext) {
478
+ const resolved = agentContext.resolveStructuredOutputMode();
479
+ method = resolved.method;
480
+ if (resolved.warnings.length > 0) {
481
+ console.warn('[Graph] Structured output mode warnings:', resolved.warnings);
482
+ }
483
+ }
484
+ else {
485
+ // Legacy fallback: use the old mode-based resolution
486
+ const mode = structuredOutputConfig.mode ?? 'auto';
487
+ if (mode === 'tool') {
488
+ method = 'functionCalling';
489
+ }
490
+ else if (mode === 'provider') {
491
+ method = provider === _enum.Providers.BEDROCK ? 'functionCalling' : 'jsonMode';
492
+ }
493
+ else {
494
+ method = undefined;
495
+ }
496
+ }
497
+ // Prepare schema for provider-specific constraints when using native/jsonSchema mode
498
+ let preparedSchema = schema;
499
+ if (method === 'jsonSchema' && provider) {
500
+ const { schema: prepared, warnings } = validate.prepareSchemaForProvider(schema, provider, structuredOutputConfig.strict !== false);
501
+ preparedSchema = prepared;
502
+ if (warnings.length > 0) {
503
+ console.log('[Graph] Schema preparation warnings:', warnings);
504
+ }
505
+ }
506
+ // Use withStructuredOutput to bind the schema
507
+ // Always use includeRaw: true internally so we can debug what's returned
508
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
509
+ const structuredModel = model.withStructuredOutput(preparedSchema, {
510
+ name,
511
+ method: method === 'native' ? undefined : method,
512
+ includeRaw: true, // Always true internally for debugging
513
+ strict: structuredOutputConfig.strict !== false,
514
+ });
515
+ console.log('[Graph] Structured output config:', {
516
+ name,
517
+ method,
518
+ provider,
519
+ schemaKeys: Object.keys(preparedSchema),
520
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
521
+ modelName: model.model || model.modelId || 'unknown',
522
+ });
523
+ let lastError;
524
+ let attempts = 0;
525
+ while (attempts <= maxRetries) {
526
+ try {
527
+ // Note: We pass the original config here. The stream aggregator will filter out
528
+ // the synthetic "response" tool call events from withStructuredOutput()
529
+ const result = await structuredModel.invoke(finalMessages, config);
530
+ // Debug: log what we got back
531
+ console.log('[Graph] Structured output raw result type:', typeof result);
532
+ // Check for refusal or truncation in the raw message
533
+ if (result?.raw) {
534
+ const rawMsg = result.raw;
535
+ console.log('[Graph] Raw message content type:', typeof rawMsg?.content);
536
+ console.log('[Graph] Raw message tool_calls:', rawMsg?.tool_calls?.length ?? 0);
537
+ if (rawMsg?.content && typeof rawMsg.content === 'string' && rawMsg.content.length > 0) {
538
+ console.log('[Graph] Raw message text content (first 200):', rawMsg.content.substring(0, 200));
539
+ }
540
+ // Check stop reason for refusal or truncation
541
+ const responseMetadata = rawMsg.response_metadata ?? {};
542
+ const stopReason = responseMetadata.stop_reason ?? // Anthropic
543
+ responseMetadata.finish_reason ?? // OpenAI
544
+ responseMetadata.stopReason; // Bedrock
545
+ if (stopReason === 'max_tokens' || stopReason === 'length') {
546
+ throw new graph$1.StructuredOutputTruncatedError(stopReason);
547
+ }
548
+ // Check for Anthropic refusal (stop_reason won't be 'refusal' but content may indicate it)
549
+ // OpenAI uses message.refusal field
550
+ const refusal = rawMsg.refusal;
551
+ if (refusal) {
552
+ throw new graph$1.StructuredOutputRefusalError(refusal);
553
+ }
554
+ }
555
+ // Handle response - we always use includeRaw internally
556
+ if (result?.raw && result?.parsed !== undefined) {
557
+ return {
558
+ structuredResponse: result.parsed,
559
+ rawMessage: result.raw,
560
+ };
561
+ }
562
+ // Fallback for models that don't support includeRaw
563
+ return {
564
+ structuredResponse: result,
565
+ };
566
+ }
567
+ catch (error) {
568
+ // Don't retry on refusal or truncation errors — they need user action
569
+ if (error instanceof graph$1.StructuredOutputRefusalError ||
570
+ error instanceof graph$1.StructuredOutputTruncatedError) {
571
+ throw error;
572
+ }
573
+ lastError = error;
574
+ attempts++;
575
+ // If error handling is disabled, throw immediately
576
+ if (handleErrors === false) {
577
+ throw error;
578
+ }
579
+ // If we've exhausted retries, throw
580
+ if (attempts > maxRetries) {
581
+ throw new Error(`Structured output failed after ${maxRetries + 1} attempts: ${lastError.message}`);
582
+ }
583
+ // Add error message to conversation for retry
584
+ const errorMessage = typeof handleErrors === 'string'
585
+ ? handleErrors
586
+ : `The response did not match the expected schema. Error: ${lastError.message}. Please try again with a valid response.`;
587
+ console.warn(`[Graph] Structured output attempt ${attempts} failed: ${lastError.message}. Retrying...`);
588
+ // Add the error as a human message for context
589
+ finalMessages = [
590
+ ...finalMessages,
591
+ new messages.HumanMessage({
592
+ content: `[VALIDATION ERROR]\n${errorMessage}`,
593
+ }),
594
+ ];
595
+ }
596
+ }
597
+ throw lastError ?? new Error('Structured output failed');
598
+ }
599
+ cleanupSignalListener(currentModel) {
600
+ if (!this.signal) {
601
+ return;
602
+ }
603
+ const model = this.overrideModel ?? currentModel;
604
+ if (!model) {
605
+ return;
606
+ }
607
+ const client = model?.exposedClient;
608
+ if (!client?.abortHandler) {
609
+ return;
610
+ }
611
+ this.signal.removeEventListener('abort', client.abortHandler);
612
+ client.abortHandler = undefined;
613
+ }
614
+ /**
615
+ * Perform structured output invocation: creates a fresh model without tools bound,
616
+ * removes thinking configuration, invokes with the schema, emits the event,
617
+ * and returns a clean AIMessageChunk without tool_calls.
618
+ *
619
+ * Used by both the immediate path (no tools) and the deferred path (after tool use).
620
+ */
621
+ async performStructuredOutput({ agentContext, finalMessages, config, }) {
622
+ const schema = agentContext.getStructuredOutputSchema();
623
+ if (!schema) {
624
+ throw new Error('Structured output schema is not configured');
625
+ }
626
+ // Get a fresh model WITHOUT tools bound
627
+ // bindTools() returns RunnableBinding which lacks withStructuredOutput
628
+ // Also disable thinking mode - Anthropic/Bedrock doesn't allow tool_choice with thinking enabled
629
+ const structuredClientOptions = { ...agentContext.clientOptions };
630
+ // Determine if streaming is possible for this structured output mode
631
+ // Native/jsonSchema modes can stream; tool/functionCalling modes cannot (synthetic tool calls break UX)
632
+ const resolved = agentContext.resolveStructuredOutputMode();
633
+ const canStream = resolved.method === 'jsonSchema' || resolved.method === 'jsonMode';
634
+ if (!canStream) {
635
+ // Disable streaming for function calling mode (synthetic tool calls break streaming UX)
636
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
637
+ structuredClientOptions.streaming = false;
638
+ }
639
+ // For native/jsonSchema mode, Anthropic's constrained decoding works with thinking enabled
640
+ // (grammar only applies to final output, not thinking blocks). For function calling mode,
641
+ // thinking must be disabled because forced tool_choice is incompatible with thinking.
642
+ const needsThinkingDisabled = resolved.method !== 'jsonSchema';
643
+ if (needsThinkingDisabled) {
644
+ // Remove thinking configuration for Bedrock
645
+ if (agentContext.provider === _enum.Providers.BEDROCK) {
646
+ const bedrockOpts = structuredClientOptions;
647
+ if (bedrockOpts.additionalModelRequestFields) {
648
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
649
+ const additionalFields = Object.assign({}, bedrockOpts.additionalModelRequestFields);
650
+ delete additionalFields.thinking;
651
+ delete additionalFields.budgetTokens;
652
+ bedrockOpts.additionalModelRequestFields = additionalFields;
653
+ }
654
+ }
655
+ // Remove thinking configuration for Anthropic direct API
656
+ if (agentContext.provider === _enum.Providers.ANTHROPIC) {
657
+ const anthropicOpts = structuredClientOptions;
658
+ if (anthropicOpts.thinking) {
659
+ delete anthropicOpts.thinking;
660
+ }
661
+ }
662
+ }
663
+ const structuredModel = this.getNewModel({
664
+ provider: agentContext.provider,
665
+ clientOptions: structuredClientOptions,
666
+ });
667
+ const { structuredResponse, rawMessage } = await this.attemptStructuredInvoke({
668
+ currentModel: structuredModel,
669
+ finalMessages,
670
+ schema,
671
+ structuredOutputConfig: agentContext.structuredOutput,
672
+ provider: agentContext.provider,
673
+ agentContext,
674
+ }, config);
675
+ // Emit structured output event
676
+ await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_STRUCTURED_OUTPUT, {
677
+ structuredResponse,
678
+ schema,
679
+ raw: rawMessage,
680
+ }, config);
681
+ // Create a clean message WITHOUT tool_calls for structured output.
682
+ // The rawMessage contains a tool_call for the structured output schema (e.g., "response"),
683
+ // which would cause the graph router to send it to the tool node.
684
+ // We return a clean AI message that ends the graph.
685
+ let cleanMessage;
686
+ if (rawMessage) {
687
+ cleanMessage = new messages.AIMessageChunk({
688
+ content: JSON.stringify(structuredResponse, null, 2),
689
+ id: rawMessage.id,
690
+ response_metadata: rawMessage.response_metadata,
691
+ usage_metadata: rawMessage.usage_metadata,
692
+ });
693
+ }
694
+ return {
695
+ messages: cleanMessage ? [cleanMessage] : [],
696
+ structuredResponse,
697
+ };
698
+ }
699
+ createCallModel(agentId = 'default') {
700
+ return async (state, config) => {
701
+ /**
702
+ * Get agent context - it must exist by this point
703
+ */
704
+ const agentContext = this.agentContexts.get(agentId);
705
+ if (!agentContext) {
706
+ throw new Error(`Agent context not found for agentId: ${agentId}`);
707
+ }
708
+ if (!config) {
709
+ throw new Error('No config provided');
710
+ }
711
+ let { messages: messages$1 } = state;
712
+ // CACHE OPTIMIZATION: Inject dynamicContext as a HumanMessage at the start of conversation
713
+ // This keeps the system message static (cacheable) while providing dynamic context
714
+ // (timestamps, user info, tool context) as conversation content instead.
715
+ // Only inject on the first turn when messages don't already have the context marker.
716
+ if (agentContext.dynamicContext &&
717
+ messages$1.length > 0 &&
718
+ !messages$1.some((m) => m instanceof messages.HumanMessage &&
719
+ typeof m.content === 'string' &&
720
+ m.content.startsWith('[SESSION_CONTEXT]'))) {
721
+ const dynamicContextMessage = new messages.HumanMessage({
722
+ content: `[SESSION_CONTEXT]\n${agentContext.dynamicContext}`,
723
+ });
724
+ const ackMessage = new messages.AIMessageChunk({
725
+ content: 'Understood. I have noted the session context including the current date/time (CST) and will apply it appropriately.',
726
+ });
727
+ messages$1 = [dynamicContextMessage, ackMessage, ...messages$1];
728
+ }
729
+ // Extract tool discoveries from current turn only (similar to formatArtifactPayload pattern)
730
+ const discoveredNames = tools.extractToolDiscoveries(messages$1);
731
+ if (discoveredNames.length > 0) {
732
+ agentContext.markToolsAsDiscovered(discoveredNames);
733
+ }
734
+ const toolsForBinding = agentContext.getToolsForBinding();
735
+ let model = this.overrideModel ??
736
+ this.initializeModel({
737
+ tools: toolsForBinding,
738
+ provider: agentContext.provider,
739
+ clientOptions: agentContext.clientOptions,
740
+ });
741
+ if (agentContext.systemRunnable) {
742
+ model = agentContext.systemRunnable.pipe(model);
743
+ }
744
+ if (agentContext.tokenCalculationPromise) {
745
+ await agentContext.tokenCalculationPromise;
746
+ }
747
+ if (!config.signal) {
748
+ config.signal = this.signal;
749
+ }
750
+ this.config = config;
751
+ let messagesToUse = messages$1;
752
+ // ====================================================================
753
+ // PRE-PRUNING DELEGATION CHECK
754
+ // Before pruning strips messages (losing context), check if we should
755
+ // delegate instead. If context would be pruned AND the agent has the
756
+ // task tool, inject a delegation hint and SKIP pruning — preserving
757
+ // the content for the LLM to understand what to delegate.
758
+ // ====================================================================
759
+ let delegationInjectedPrePrune = false;
760
+ const hasTaskToolPrePrune = agentContext.tools?.some((tool) => {
761
+ const toolName = typeof tool === 'object' && 'name' in tool
762
+ ? tool.name
763
+ : '';
764
+ return toolName === 'task';
765
+ });
766
+ if (hasTaskToolPrePrune &&
767
+ agentContext.tokenCounter &&
768
+ agentContext.maxContextTokens != null) {
769
+ // Estimate total tokens in messages BEFORE pruning
770
+ let prePruneTokens = 0;
771
+ for (const msg of messages$1) {
772
+ prePruneTokens += agentContext.tokenCounter(msg);
773
+ }
774
+ // Add instruction tokens (system prompt)
775
+ prePruneTokens += agentContext.instructionTokens ?? 0;
776
+ const prePruneUtilization = (prePruneTokens / agentContext.maxContextTokens) * 100;
777
+ if (prePruneUtilization > 70) {
778
+ console.warn(`[Graph] PRE-PRUNE delegation check: ${prePruneUtilization.toFixed(1)}% utilization ` +
779
+ `(${prePruneTokens}/${agentContext.maxContextTokens} tokens). ` +
780
+ `Injecting delegation hint INSTEAD of pruning.`);
781
+ delegationInjectedPrePrune = true;
782
+ }
783
+ }
784
+ if (!agentContext.pruneMessages &&
785
+ agentContext.tokenCounter &&
786
+ agentContext.maxContextTokens != null &&
787
+ agentContext.indexTokenCountMap[0] != null) {
788
+ const isAnthropicWithThinking = (agentContext.provider === _enum.Providers.ANTHROPIC &&
789
+ agentContext.clientOptions.thinking !=
790
+ null) ||
791
+ (agentContext.provider === _enum.Providers.BEDROCK &&
792
+ agentContext.clientOptions
793
+ .additionalModelRequestFields?.['thinking'] != null) ||
794
+ (agentContext.provider === _enum.Providers.OPENAI &&
795
+ agentContext.clientOptions.modelKwargs
796
+ ?.thinking?.type === 'enabled');
797
+ agentContext.pruneMessages = prune.createPruneMessages({
798
+ startIndex: this.startIndex,
799
+ provider: agentContext.provider,
800
+ tokenCounter: agentContext.tokenCounter,
801
+ maxTokens: agentContext.maxContextTokens,
802
+ thinkingEnabled: isAnthropicWithThinking,
803
+ indexTokenCountMap: agentContext.indexTokenCountMap,
804
+ });
805
+ }
806
+ if (agentContext.pruneMessages && !delegationInjectedPrePrune) {
807
+ const { context, indexTokenCountMap, messagesToRefine } = agentContext.pruneMessages({
808
+ messages: messages$1,
809
+ usageMetadata: agentContext.currentUsage,
810
+ // startOnMessageType: 'human',
811
+ });
812
+ agentContext.indexTokenCountMap = indexTokenCountMap;
813
+ messagesToUse = context;
814
+ // Summarize discarded messages if callback provided
815
+ if (messagesToRefine && messagesToRefine.length > 0 && agentContext.summarizeCallback) {
816
+ try {
817
+ const summary = await agentContext.summarizeCallback(messagesToRefine);
818
+ if (summary) {
819
+ const summaryMsg = new messages.SystemMessage(`[Conversation Summary]\n${summary}`);
820
+ // Insert after system message (if present), before conversation messages
821
+ const systemIdx = messagesToUse[0]?.getType() === 'system' ? 1 : 0;
822
+ messagesToUse = [
823
+ ...messagesToUse.slice(0, systemIdx),
824
+ summaryMsg,
825
+ ...messagesToUse.slice(systemIdx),
826
+ ];
827
+ }
828
+ }
829
+ catch (err) {
830
+ console.error('[Graph] Summarization callback failed:', err);
831
+ }
832
+ }
833
+ }
834
+ else if (delegationInjectedPrePrune) {
835
+ console.info('[Graph] Skipping pruning — delegation will handle context pressure');
836
+ }
837
+ let finalMessages = messagesToUse;
838
+ if (agentContext.useLegacyContent) {
839
+ finalMessages = content.formatContentStrings(finalMessages);
840
+ }
841
+ const lastMessageX = finalMessages.length >= 2
842
+ ? finalMessages[finalMessages.length - 2]
843
+ : null;
844
+ const lastMessageY = finalMessages.length >= 1
845
+ ? finalMessages[finalMessages.length - 1]
846
+ : null;
847
+ if (agentContext.provider === _enum.Providers.BEDROCK &&
848
+ lastMessageX instanceof messages.AIMessageChunk &&
849
+ lastMessageY?.getType() === _enum.MessageTypes.TOOL &&
850
+ typeof lastMessageX.content === 'string') {
851
+ finalMessages[finalMessages.length - 2].content = '';
852
+ }
853
+ // Use getType() instead of instanceof to avoid module mismatch issues
854
+ const isLatestToolMessage = lastMessageY?.getType() === _enum.MessageTypes.TOOL;
855
+ if (isLatestToolMessage &&
856
+ agentContext.provider === _enum.Providers.ANTHROPIC) {
857
+ core.formatAnthropicArtifactContent(finalMessages);
858
+ }
859
+ else if (isLatestToolMessage &&
860
+ ((llm.isOpenAILike(agentContext.provider) &&
861
+ agentContext.provider !== _enum.Providers.DEEPSEEK) ||
862
+ llm.isGoogleLike(agentContext.provider))) {
863
+ core.formatArtifactPayload(finalMessages);
864
+ }
865
+ /**
866
+ * Handle edge case: when switching from a non-thinking agent to a thinking-enabled agent,
867
+ * convert AI messages with tool calls to HumanMessages to avoid thinking block requirements.
868
+ * This is required by Anthropic/Bedrock when thinking is enabled.
869
+ *
870
+ * IMPORTANT: This MUST happen BEFORE cache control is applied.
871
+ * If we add cachePoint to an AI message first, then convert that AI message to a HumanMessage,
872
+ * the cachePoint is lost. By converting first, we ensure cache control is applied to the
873
+ * final message structure that will be sent to the API.
874
+ */
875
+ const isAnthropicWithThinking = (agentContext.provider === _enum.Providers.ANTHROPIC &&
876
+ agentContext.clientOptions.thinking !=
877
+ null) ||
878
+ (agentContext.provider === _enum.Providers.BEDROCK &&
879
+ agentContext.clientOptions
880
+ .additionalModelRequestFields?.['thinking'] != null);
881
+ if (isAnthropicWithThinking) {
882
+ finalMessages = format.ensureThinkingBlockInMessages(finalMessages, agentContext.provider);
883
+ }
884
+ // Apply cache control AFTER thinking block handling to ensure cachePoints aren't lost
885
+ // when AI messages are converted to HumanMessages
886
+ if (agentContext.provider === _enum.Providers.ANTHROPIC) {
887
+ const anthropicOptions = agentContext.clientOptions;
888
+ if (anthropicOptions?.promptCache === true) {
889
+ finalMessages = cache.addCacheControl(finalMessages);
890
+ }
891
+ }
892
+ else if (agentContext.provider === _enum.Providers.BEDROCK) {
893
+ const bedrockOptions = agentContext.clientOptions;
894
+ // Both Claude and Nova models support cachePoint in system and messages
895
+ // (Llama, Titan, and other models do NOT support cachePoint)
896
+ const modelId = bedrockOptions?.model?.toLowerCase() ?? '';
897
+ const supportsCaching = modelId.includes('claude') ||
898
+ modelId.includes('anthropic') ||
899
+ modelId.includes('nova');
900
+ if (bedrockOptions?.promptCache === true && supportsCaching) {
901
+ finalMessages = cache.addBedrockCacheControl(finalMessages);
902
+ }
903
+ }
904
+ if (agentContext.lastStreamCall != null &&
905
+ agentContext.streamBuffer != null) {
906
+ const timeSinceLastCall = Date.now() - agentContext.lastStreamCall;
907
+ if (timeSinceLastCall < agentContext.streamBuffer) {
908
+ const timeToWait = Math.ceil((agentContext.streamBuffer - timeSinceLastCall) / 1000) *
909
+ 1000;
910
+ await run.sleep(timeToWait);
911
+ }
912
+ }
913
+ agentContext.lastStreamCall = Date.now();
914
+ let result;
915
+ const fallbacks = agentContext.clientOptions?.fallbacks ??
916
+ [];
917
+ if (finalMessages.length === 0) {
918
+ throw new Error(JSON.stringify({
919
+ type: 'empty_messages',
920
+ info: 'Message pruning removed all messages as none fit in the context window. Please increase the context window size or make your message shorter.',
921
+ }));
922
+ }
923
+ // Get model info for analytics
924
+ const bedrockOpts = agentContext.clientOptions;
925
+ const modelId = bedrockOpts?.model ||
926
+ agentContext.clientOptions
927
+ ?.modelName;
928
+ const thinkingConfig = bedrockOpts?.additionalModelRequestFields?.['thinking'] ||
929
+ agentContext.clientOptions
930
+ ?.thinking;
931
+ // Build and emit context analytics for traces
932
+ const contextAnalytics$1 = contextAnalytics.buildContextAnalytics(finalMessages, {
933
+ tokenCounter: agentContext.tokenCounter,
934
+ maxContextTokens: agentContext.maxContextTokens,
935
+ instructionTokens: agentContext.instructionTokens,
936
+ indexTokenCountMap: agentContext.indexTokenCountMap,
937
+ });
938
+ // Store for retrieval via getContextAnalytics() after run completes
939
+ this.lastContextAnalytics = contextAnalytics$1;
940
+ await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_CONTEXT_ANALYTICS, {
941
+ provider: agentContext.provider,
942
+ model: modelId,
943
+ thinkingEnabled: thinkingConfig != null,
944
+ cacheEnabled: bedrockOpts?.promptCache === true,
945
+ analytics: contextAnalytics$1,
946
+ }, config);
947
+ // ====================================================================
948
+ // CONTEXT PRESSURE AWARENESS — Intelligent Sub-Agent Delegation
949
+ //
950
+ // Two triggers for delegation hints:
951
+ // 1. DOCUMENT COUNT: When 3+ documents are detected in the conversation,
952
+ // inject a delegation hint on the FIRST iteration (before the LLM
953
+ // has called any tools). This ensures the agent delegates upfront
954
+ // rather than trying to process all documents itself.
955
+ // 2. TOKEN UTILIZATION: At EVERY iteration, if context is filling up
956
+ // (70%/85%), inject escalating hints to delegate remaining work.
957
+ //
958
+ // This runs mid-chain — so even if tool responses push context up
959
+ // after the first LLM call, subsequent iterations get the hint.
960
+ // ====================================================================
961
+ const hasTaskToolInContext = agentContext.tools?.some((tool) => {
962
+ const toolName = typeof tool === 'object' && 'name' in tool
963
+ ? tool.name
964
+ : '';
965
+ return toolName === 'task';
966
+ });
967
+ if (hasTaskToolInContext &&
968
+ contextAnalytics$1.utilizationPercent != null &&
969
+ contextAnalytics$1.maxContextTokens != null) {
970
+ const utilization = contextAnalytics$1.utilizationPercent;
971
+ const totalTokens = contextAnalytics$1.totalTokens;
972
+ const maxTokens = contextAnalytics$1.maxContextTokens;
973
+ const remainingTokens = maxTokens - totalTokens;
974
+ // Count attached documents by scanning for document patterns in HumanMessages:
975
+ // 1. # "filename" headers in "Attached document(s):" blocks (text content)
976
+ // 2. **filename1, filename2** in "The user has attached:" blocks (embedded files)
977
+ // 3. Filenames in file_search tool results
978
+ let documentCount = 0;
979
+ const documentNames = [];
980
+ for (const msg of finalMessages) {
981
+ const content = typeof msg.content === 'string'
982
+ ? msg.content
983
+ : Array.isArray(msg.content)
984
+ ? msg.content.map((p) => {
985
+ const part = p;
986
+ return String(part.text || part.content || '');
987
+ }).join(' ')
988
+ : '';
989
+ // Pattern 1: # "filename" headers in attached document blocks
990
+ const docMatches = content.match(/# "([^"]+)"/g);
991
+ if (docMatches) {
992
+ for (const match of docMatches) {
993
+ const name = match.replace(/# "/, '').replace(/"$/, '');
994
+ if (!documentNames.includes(name)) {
995
+ documentNames.push(name);
996
+ documentCount++;
997
+ }
998
+ }
999
+ }
1000
+ // Pattern 2: "The user has attached: **file1, file2**" (embedded files)
1001
+ const attachedMatch = content.match(/user has attached:\s*\*\*([^*]+)\*\*/i);
1002
+ if (attachedMatch) {
1003
+ const names = attachedMatch[1].split(',').map((n) => n.trim()).filter(Boolean);
1004
+ for (const name of names) {
1005
+ if (!documentNames.includes(name)) {
1006
+ documentNames.push(name);
1007
+ documentCount++;
1008
+ }
1009
+ }
1010
+ }
1011
+ }
1012
+ // BASELINE LOG: Always fires so we can verify this code path runs
1013
+ console.info(`[Graph] Context utilization: ${utilization.toFixed(1)}% ` +
1014
+ `(${totalTokens}/${maxTokens} tokens, ${remainingTokens} remaining) | ` +
1015
+ `hasTaskTool: true | messages: ${finalMessages.length} | docs: ${documentCount}`);
1016
+ // TRIGGER 1: Multi-document delegation (3+ documents detected)
1017
+ // Only inject on first iteration (no AI messages yet = agent hasn't responded)
1018
+ const hasAiResponse = finalMessages.some((m) => m._getType?.() === 'ai' || m._getType?.() === 'tool');
1019
+ if (documentCount >= 3 && !hasAiResponse) {
1020
+ const pressureMsg = new messages.HumanMessage({
1021
+ content: `[MULTI-DOCUMENT PROCESSING — ${documentCount} documents detected]\n` +
1022
+ `Documents: ${documentNames.join(', ')}\n\n` +
1023
+ `You have ${documentCount} documents attached. For thorough analysis, use the "task" tool ` +
1024
+ `to delegate each document (or group of related documents) to a sub-agent.\n` +
1025
+ `Each sub-agent has its own fresh context window and can use file_search to retrieve the full document content.\n` +
1026
+ `After all sub-agents complete, synthesize their results into a comprehensive response.\n\n` +
1027
+ `This approach ensures each document gets full attention without context limitations.`,
1028
+ });
1029
+ finalMessages = [...finalMessages, pressureMsg];
1030
+ console.info(`[Graph] Multi-document delegation hint injected for ${documentCount} documents: ` +
1031
+ `${documentNames.join(', ')}`);
1032
+ }
1033
+ // TRIGGER 2: Token utilization thresholds (mid-chain safety net)
1034
+ // Also fires when we skipped pruning due to delegationInjectedPrePrune
1035
+ if (utilization > 85 || (delegationInjectedPrePrune && utilization > 50)) {
1036
+ // CRITICAL: Context is high — MANDATE delegation
1037
+ const pressureMsg = new messages.HumanMessage({
1038
+ content: `[CONTEXT BUDGET CRITICAL — ${utilization.toFixed(0)}% used]\n` +
1039
+ `You have used ${totalTokens} of ${maxTokens} tokens (${remainingTokens} remaining).\n` +
1040
+ `Your context is very large. You MUST use the "task" tool to delegate work to sub-agents.\n` +
1041
+ `Each sub-agent runs in its own fresh context window and can use file_search to access documents.\n` +
1042
+ `Do NOT attempt to process documents directly — delegate each document to a sub-agent, then synthesize results.`,
1043
+ });
1044
+ finalMessages = [...finalMessages, pressureMsg];
1045
+ console.warn(`[Graph] Context pressure CRITICAL (${utilization.toFixed(0)}%): ` +
1046
+ `Injected mandatory delegation hint. ${remainingTokens} tokens remaining. ` +
1047
+ `prePruneSkipped: ${delegationInjectedPrePrune}`);
1048
+ }
1049
+ else if (utilization > 70) {
1050
+ // WARNING: Context filling up — suggest delegation
1051
+ const pressureMsg = new messages.HumanMessage({
1052
+ content: `[CONTEXT BUDGET WARNING — ${utilization.toFixed(0)}% used]\n` +
1053
+ `You have used ${totalTokens} of ${maxTokens} tokens (${remainingTokens} remaining).\n` +
1054
+ `Your context is filling up. Consider using the "task" tool to delegate complex operations to sub-agents.\n` +
1055
+ `Sub-agents run in fresh context windows and won't consume your remaining budget.`,
1056
+ });
1057
+ finalMessages = [...finalMessages, pressureMsg];
1058
+ console.info(`[Graph] Context pressure WARNING (${utilization.toFixed(0)}%): ` +
1059
+ `Injected delegation suggestion. ${remainingTokens} tokens remaining.`);
1060
+ }
1061
+ }
1062
+ // Structured output mode: when the agent has NO tools, produce structured JSON immediately.
1063
+ // When the agent HAS tools, we defer structured output until after tool use completes
1064
+ // (see the deferred structured output block after attemptInvoke below).
1065
+ const hasTools = (toolsForBinding?.length ?? 0) > 0;
1066
+ if (agentContext.isStructuredOutputMode &&
1067
+ agentContext.structuredOutput &&
1068
+ !hasTools) {
1069
+ try {
1070
+ const structuredResult = await this.performStructuredOutput({
1071
+ agentContext,
1072
+ finalMessages,
1073
+ config,
1074
+ });
1075
+ agentContext.currentUsage = this.getUsageMetadata(structuredResult.messages?.[0]);
1076
+ this.cleanupSignalListener();
1077
+ return structuredResult;
1078
+ }
1079
+ catch (structuredError) {
1080
+ console.error('[Graph] Structured output failed:', structuredError);
1081
+ throw structuredError;
1082
+ }
1083
+ }
1084
+ try {
1085
+ result = await this.attemptInvoke({
1086
+ currentModel: model,
1087
+ finalMessages,
1088
+ provider: agentContext.provider,
1089
+ tools: agentContext.tools,
1090
+ }, config);
1091
+ }
1092
+ catch (primaryError) {
1093
+ // Check if this is a "input too long" error from Bedrock/Anthropic
1094
+ const errorMessage = primaryError.message.toLowerCase() ?? '';
1095
+ const isInputTooLongError = errorMessage.includes('too long') ||
1096
+ errorMessage.includes('input is too long') ||
1097
+ errorMessage.includes('context length') ||
1098
+ errorMessage.includes('maximum context') ||
1099
+ errorMessage.includes('validationexception') ||
1100
+ errorMessage.includes('prompt is too long');
1101
+ // Log when we detect the error
1102
+ if (isInputTooLongError) {
1103
+ console.warn('[Graph] Detected input too long error:', errorMessage.substring(0, 200));
1104
+ console.warn('[Graph] Checking emergency pruning conditions:', {
1105
+ hasPruneMessages: !!agentContext.pruneMessages,
1106
+ hasTokenCounter: !!agentContext.tokenCounter,
1107
+ maxContextTokens: agentContext.maxContextTokens,
1108
+ indexTokenMapKeys: Object.keys(agentContext.indexTokenCountMap)
1109
+ .length,
1110
+ });
1111
+ }
1112
+ // If input too long and we have pruning capability OR tokenCounter, retry with progressively more aggressive pruning
1113
+ // Note: We can create emergency pruneMessages dynamically if we have tokenCounter and maxContextTokens
1114
+ const canPrune = agentContext.tokenCounter && agentContext.maxContextTokens;
1115
+ if (isInputTooLongError && canPrune) {
1116
+ // Progressive reduction: 50% -> 25% -> 10% of original context
1117
+ const reductionLevels = [0.5, 0.25, 0.1];
1118
+ for (const reductionFactor of reductionLevels) {
1119
+ if (result)
1120
+ break; // Exit if we got a result
1121
+ const reducedMaxTokens = Math.floor(agentContext.maxContextTokens * reductionFactor);
1122
+ console.warn(`[Graph] Input too long. Retrying with ${reductionFactor * 100}% context (${reducedMaxTokens} tokens)...`);
1123
+ // Build fresh indexTokenCountMap if missing/incomplete
1124
+ // This is needed when messages were dynamically added without updating the token map
1125
+ let tokenMapForPruning = agentContext.indexTokenCountMap;
1126
+ if (Object.keys(tokenMapForPruning).length < messages$1.length) {
1127
+ console.warn('[Graph] Building fresh token count map for emergency pruning...');
1128
+ tokenMapForPruning = {};
1129
+ for (let i = 0; i < messages$1.length; i++) {
1130
+ tokenMapForPruning[i] = agentContext.tokenCounter(messages$1[i]);
1131
+ }
1132
+ }
1133
+ const emergencyPrune = prune.createPruneMessages({
1134
+ startIndex: this.startIndex,
1135
+ provider: agentContext.provider,
1136
+ tokenCounter: agentContext.tokenCounter,
1137
+ maxTokens: reducedMaxTokens,
1138
+ thinkingEnabled: false, // Disable thinking for emergency prune
1139
+ indexTokenCountMap: tokenMapForPruning,
1140
+ });
1141
+ const { context: reducedMessages } = emergencyPrune({
1142
+ messages: messages$1,
1143
+ usageMetadata: agentContext.currentUsage,
1144
+ });
1145
+ // Skip if we can't fit any messages
1146
+ if (reducedMessages.length === 0) {
1147
+ console.warn(`[Graph] Cannot fit any messages at ${reductionFactor * 100}% reduction, trying next level...`);
1148
+ continue;
1149
+ }
1150
+ // Calculate how many messages were pruned and estimate context timeframe
1151
+ const prunedCount = finalMessages.length - reducedMessages.length;
1152
+ const remainingCount = reducedMessages.length;
1153
+ const estimatedContextDescription = this.getContextTimeframeDescription(remainingCount);
1154
+ // Inject a personalized context message to inform the agent about pruning
1155
+ const pruneNoticeMessage = new messages.HumanMessage({
1156
+ content: `[CONTEXT NOTICE]
1157
+ Our conversation has grown quite long, so I've focused on ${estimatedContextDescription} of our chat (${remainingCount} recent messages). ${prunedCount} earlier messages are no longer in my immediate memory.
1158
+
1159
+ If I seem to be missing something we discussed earlier, just give me a quick reminder and I'll pick right back up! I'm still fully engaged and ready to help with whatever you need.`,
1160
+ });
1161
+ // Insert the notice after the system message (if any) but before conversation
1162
+ const hasSystemMessage = reducedMessages[0]?.getType() === 'system';
1163
+ const insertIndex = hasSystemMessage ? 1 : 0;
1164
+ // Create new array with the pruning notice
1165
+ const messagesWithNotice = [
1166
+ ...reducedMessages.slice(0, insertIndex),
1167
+ pruneNoticeMessage,
1168
+ ...reducedMessages.slice(insertIndex),
1169
+ ];
1170
+ let retryMessages = agentContext.useLegacyContent
1171
+ ? content.formatContentStrings(messagesWithNotice)
1172
+ : messagesWithNotice;
1173
+ // Apply thinking block handling first (before cache control)
1174
+ // This ensures AI+Tool sequences are converted to HumanMessages
1175
+ // before we add cache points that could be lost in the conversion
1176
+ if (isAnthropicWithThinking) {
1177
+ retryMessages = format.ensureThinkingBlockInMessages(retryMessages, agentContext.provider);
1178
+ }
1179
+ // Apply Bedrock cache control if needed (after thinking block handling)
1180
+ if (agentContext.provider === _enum.Providers.BEDROCK) {
1181
+ const bedrockOptions = agentContext.clientOptions;
1182
+ const modelId = bedrockOptions?.model?.toLowerCase() ?? '';
1183
+ const supportsCaching = modelId.includes('claude') ||
1184
+ modelId.includes('anthropic') ||
1185
+ modelId.includes('nova');
1186
+ if (bedrockOptions?.promptCache === true && supportsCaching) {
1187
+ retryMessages =
1188
+ cache.addBedrockCacheControl(retryMessages);
1189
+ }
1190
+ }
1191
+ try {
1192
+ result = await this.attemptInvoke({
1193
+ currentModel: model,
1194
+ finalMessages: retryMessages,
1195
+ provider: agentContext.provider,
1196
+ tools: agentContext.tools,
1197
+ }, config);
1198
+ // Success with reduced context
1199
+ console.info(`[Graph] ✅ Retry successful at ${reductionFactor * 100}% with ${reducedMessages.length} messages (reduced from ${finalMessages.length})`);
1200
+ }
1201
+ catch (retryError) {
1202
+ const retryErrorMsg = retryError.message.toLowerCase() ?? '';
1203
+ const stillTooLong = retryErrorMsg.includes('too long') ||
1204
+ retryErrorMsg.includes('context length') ||
1205
+ retryErrorMsg.includes('validationexception');
1206
+ if (stillTooLong && reductionFactor > 0.1) {
1207
+ console.warn(`[Graph] Still too long at ${reductionFactor * 100}%, trying more aggressive pruning...`);
1208
+ }
1209
+ else {
1210
+ console.error(`[Graph] Retry at ${reductionFactor * 100}% failed:`, retryError.message);
1211
+ }
1212
+ }
1213
+ }
1214
+ }
1215
+ // If we got a result from retry, skip fallbacks
1216
+ if (result) ;
1217
+ else {
1218
+ let lastError = primaryError;
1219
+ for (const fb of fallbacks) {
1220
+ try {
1221
+ let model = this.getNewModel({
1222
+ provider: fb.provider,
1223
+ clientOptions: fb.clientOptions,
1224
+ });
1225
+ const bindableTools = agentContext.tools;
1226
+ model = (!bindableTools || bindableTools.length === 0
1227
+ ? model
1228
+ : model.bindTools(bindableTools));
1229
+ result = await this.attemptInvoke({
1230
+ currentModel: model,
1231
+ finalMessages,
1232
+ provider: fb.provider,
1233
+ tools: agentContext.tools,
1234
+ }, config);
1235
+ lastError = undefined;
1236
+ break;
1237
+ }
1238
+ catch (e) {
1239
+ lastError = e;
1240
+ continue;
1241
+ }
1242
+ }
1243
+ if (lastError !== undefined) {
1244
+ throw lastError;
1245
+ }
1246
+ }
1247
+ }
1248
+ if (!result) {
1249
+ throw new Error('No result after model invocation');
1250
+ }
1251
+ agentContext.currentUsage = this.getUsageMetadata(result.messages?.[0]);
1252
+ // Extract and normalize the LLM's finish/stop reason for auto-continuation support
1253
+ const finalMsg = result.messages?.[0];
1254
+ if (finalMsg && 'response_metadata' in finalMsg) {
1255
+ const meta = finalMsg.response_metadata;
1256
+ // Bedrock streaming nests stopReason inside messageStop: { stopReason: '...' }
1257
+ const messageStop = meta.messageStop;
1258
+ this.lastFinishReason =
1259
+ meta.finish_reason ?? // OpenAI/Azure
1260
+ meta.stop_reason ?? // Anthropic direct API
1261
+ meta.stopReason ?? // Bedrock invoke (non-streaming)
1262
+ messageStop?.stopReason ?? // Bedrock streaming
1263
+ meta.finishReason ?? // VertexAI/Google
1264
+ undefined;
1265
+ }
1266
+ this.cleanupSignalListener();
1267
+ // DEFERRED STRUCTURED OUTPUT: When the agent has tools AND structured output configured,
1268
+ // we let the agent use tools normally via attemptInvoke(). Once the agent's response
1269
+ // has NO tool_calls (it's done with tools), we produce the final structured JSON response.
1270
+ if (agentContext.isStructuredOutputMode &&
1271
+ agentContext.structuredOutput &&
1272
+ result) {
1273
+ const lastMessage = result.messages?.[0];
1274
+ const resultHasToolCalls = lastMessage &&
1275
+ 'tool_calls' in lastMessage &&
1276
+ (lastMessage.tool_calls?.length ?? 0) > 0;
1277
+ if (!resultHasToolCalls) {
1278
+ try {
1279
+ // Build messages for structured output: include the full conversation
1280
+ // plus the agent's text response from attemptInvoke, so the structured
1281
+ // output model has full context (tool results + agent reasoning).
1282
+ const messagesForStructured = [...finalMessages];
1283
+ if (lastMessage) {
1284
+ messagesForStructured.push(lastMessage);
1285
+ }
1286
+ const structuredResult = await this.performStructuredOutput({
1287
+ agentContext,
1288
+ finalMessages: messagesForStructured,
1289
+ config,
1290
+ });
1291
+ // Accumulate token usage from both API calls
1292
+ const structuredUsage = this.getUsageMetadata(structuredResult.messages?.[0]);
1293
+ if (structuredUsage && agentContext.currentUsage) {
1294
+ agentContext.currentUsage = {
1295
+ input_tokens: (agentContext.currentUsage.input_tokens ?? 0) +
1296
+ (structuredUsage.input_tokens ?? 0),
1297
+ output_tokens: (agentContext.currentUsage.output_tokens ?? 0) +
1298
+ (structuredUsage.output_tokens ?? 0),
1299
+ total_tokens: (agentContext.currentUsage.total_tokens ?? 0) +
1300
+ (structuredUsage.total_tokens ?? 0),
1301
+ };
1302
+ }
1303
+ else if (structuredUsage) {
1304
+ agentContext.currentUsage = structuredUsage;
1305
+ }
1306
+ return structuredResult;
1307
+ }
1308
+ catch (structuredError) {
1309
+ // Graceful fallback: the agent completed its work with tools,
1310
+ // but we couldn't format the output as structured JSON.
1311
+ // Return the unstructured text response from attemptInvoke.
1312
+ console.error('[Graph] Deferred structured output failed after successful tool use:', structuredError);
1313
+ console.warn('[Graph] Falling back to unstructured response from tool-use phase');
1314
+ return result;
1315
+ }
1316
+ }
1317
+ }
1318
+ return result;
1319
+ };
1320
+ }
1321
+ createAgentNode(agentId) {
1322
+ const agentContext = this.agentContexts.get(agentId);
1323
+ if (!agentContext) {
1324
+ throw new Error(`Agent context not found for agentId: ${agentId}`);
1325
+ }
1326
+ const agentNode = `${AGENT}${agentId}`;
1327
+ const toolNode = `${TOOLS}${agentId}`;
1328
+ const routeMessage = (state, config) => {
1329
+ this.config = config;
1330
+ return ToolNode.toolsCondition(state, toolNode, this.invokedToolIds);
1331
+ };
1332
+ const StateAnnotation = langgraph.Annotation.Root({
1333
+ messages: langgraph.Annotation({
1334
+ reducer: langgraph.messagesStateReducer,
1335
+ default: () => [],
1336
+ }),
1337
+ });
1338
+ const workflow = new langgraph.StateGraph(StateAnnotation)
1339
+ .addNode(agentNode, this.createCallModel(agentId))
1340
+ .addNode(toolNode, this.initializeTools({
1341
+ currentTools: agentContext.tools,
1342
+ currentToolMap: agentContext.toolMap,
1343
+ agentContext,
1344
+ }))
1345
+ .addEdge(langgraph.START, agentNode)
1346
+ .addConditionalEdges(agentNode, routeMessage)
1347
+ .addEdge(toolNode, agentContext.toolEnd ? langgraph.END : agentNode);
1348
+ // Cast to unknown to avoid tight coupling to external types; options are opt-in
1349
+ return workflow.compile(this.compileOptions);
1350
+ }
1351
+ createWorkflow() {
1352
+ /** Use the default (first) agent for now */
1353
+ const agentNode = this.createAgentNode(this.defaultAgentId);
1354
+ const StateAnnotation = langgraph.Annotation.Root({
1355
+ messages: langgraph.Annotation({
1356
+ reducer: (a, b) => {
1357
+ if (!a.length) {
1358
+ this.startIndex = a.length + b.length;
1359
+ }
1360
+ const result = langgraph.messagesStateReducer(a, b);
1361
+ this.messages = result;
1362
+ return result;
1363
+ },
1364
+ default: () => [],
1365
+ }),
1366
+ });
1367
+ const workflow = new langgraph.StateGraph(StateAnnotation)
1368
+ .addNode(this.defaultAgentId, agentNode, { ends: [langgraph.END] })
1369
+ .addEdge(langgraph.START, this.defaultAgentId)
1370
+ .compile();
1371
+ return workflow;
1372
+ }
1373
+ /**
1374
+ * Indicates if this is a multi-agent graph.
1375
+ * Override in MultiAgentGraph to return true.
1376
+ * Used to conditionally include agentId in RunStep for frontend rendering.
1377
+ */
1378
+ isMultiAgentGraph() {
1379
+ return false;
1380
+ }
1381
+ /**
1382
+ * Get the parallel group ID for an agent, if any.
1383
+ * Override in MultiAgentGraph to provide actual group IDs.
1384
+ * Group IDs are incrementing numbers (1, 2, 3...) reflecting execution order.
1385
+ * @param _agentId - The agent ID to look up
1386
+ * @returns undefined for StandardGraph (no parallel groups), or group number for MultiAgentGraph
1387
+ */
1388
+ getParallelGroupIdForAgent(_agentId) {
1389
+ return undefined;
1390
+ }
1391
+ /* Dispatchers */
1392
+ /**
1393
+ * Dispatches a run step to the client, returns the step ID
1394
+ */
1395
+ async dispatchRunStep(stepKey, stepDetails, metadata) {
1396
+ if (!this.config) {
1397
+ throw new Error('No config provided');
1398
+ }
1399
+ const [stepId, stepIndex] = this.generateStepId(stepKey);
1400
+ if (stepDetails.type === _enum.StepTypes.TOOL_CALLS && stepDetails.tool_calls) {
1401
+ for (const tool_call of stepDetails.tool_calls) {
1402
+ const toolCallId = tool_call.id ?? '';
1403
+ if (!toolCallId || this.toolCallStepIds.has(toolCallId)) {
1404
+ continue;
1405
+ }
1406
+ this.toolCallStepIds.set(toolCallId, stepId);
1407
+ }
1408
+ }
1409
+ const runStep = {
1410
+ stepIndex,
1411
+ id: stepId,
1412
+ type: stepDetails.type,
1413
+ index: this.contentData.length,
1414
+ stepDetails,
1415
+ usage: null,
1416
+ };
1417
+ const runId = this.runId ?? '';
1418
+ if (runId) {
1419
+ runStep.runId = runId;
1420
+ }
1421
+ /**
1422
+ * Extract agentId and parallelGroupId from metadata
1423
+ * Only set agentId for MultiAgentGraph (so frontend knows when to show agent labels)
1424
+ */
1425
+ if (metadata) {
1426
+ try {
1427
+ const agentContext = this.getAgentContext(metadata);
1428
+ if (this.isMultiAgentGraph() && agentContext.agentId) {
1429
+ // Only include agentId for MultiAgentGraph - enables frontend to show agent labels
1430
+ runStep.agentId = agentContext.agentId;
1431
+ // Set group ID if this agent is part of a parallel group
1432
+ // Group IDs are incrementing numbers (1, 2, 3...) reflecting execution order
1433
+ const groupId = this.getParallelGroupIdForAgent(agentContext.agentId);
1434
+ if (groupId != null) {
1435
+ runStep.groupId = groupId;
1436
+ }
1437
+ }
1438
+ }
1439
+ catch (_e) {
1440
+ /** If we can't get agent context, that's okay - agentId remains undefined */
1441
+ }
1442
+ }
1443
+ this.contentData.push(runStep);
1444
+ this.contentIndexMap.set(stepId, runStep.index);
1445
+ await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_RUN_STEP, runStep, this.config);
1446
+ return stepId;
1447
+ }
1448
+ async handleToolCallCompleted(data, metadata, omitOutput) {
1449
+ if (!this.config) {
1450
+ throw new Error('No config provided');
1451
+ }
1452
+ if (!data.output) {
1453
+ return;
1454
+ }
1455
+ const { input, output: _output } = data;
1456
+ if (_output?.lg_name === 'Command') {
1457
+ return;
1458
+ }
1459
+ const output = _output;
1460
+ const { tool_call_id } = output;
1461
+ const stepId = this.toolCallStepIds.get(tool_call_id) ?? '';
1462
+ if (!stepId) {
1463
+ throw new Error(`No stepId found for tool_call_id ${tool_call_id}`);
1464
+ }
1465
+ const runStep = this.getRunStep(stepId);
1466
+ if (!runStep) {
1467
+ throw new Error(`No run step found for stepId ${stepId}`);
1468
+ }
1469
+ /**
1470
+ * Extract and store code execution session context from artifacts.
1471
+ * Each file is stamped with its source session_id to support multi-session file tracking.
1472
+ * When the same filename appears in a later execution, the newer version replaces the old.
1473
+ */
1474
+ const toolName = output.name;
1475
+ if (toolName === _enum.Constants.EXECUTE_CODE ||
1476
+ toolName === _enum.Constants.PROGRAMMATIC_TOOL_CALLING) {
1477
+ const artifact = output.artifact;
1478
+ const newFiles = artifact?.files ?? [];
1479
+ const hasNewFiles = newFiles.length > 0;
1480
+ if (hasNewFiles &&
1481
+ artifact?.session_id != null &&
1482
+ artifact.session_id !== '') {
1483
+ /**
1484
+ * Stamp each new file with its source session_id.
1485
+ * This enables files from different executions (parallel or sequential)
1486
+ * to be tracked and passed to subsequent calls.
1487
+ */
1488
+ const filesWithSession = newFiles.map((file) => ({
1489
+ ...file,
1490
+ session_id: artifact.session_id,
1491
+ }));
1492
+ const existingSession = this.sessions.get(_enum.Constants.EXECUTE_CODE);
1493
+ const existingFiles = existingSession?.files ?? [];
1494
+ /**
1495
+ * Merge files, preferring latest versions by name.
1496
+ * If a file with the same name exists, replace it with the new version.
1497
+ * This handles cases where files are edited/recreated in subsequent executions.
1498
+ */
1499
+ const newFileNames = new Set(filesWithSession.map((f) => f.name));
1500
+ const filteredExisting = existingFiles.filter((f) => !newFileNames.has(f.name));
1501
+ this.sessions.set(_enum.Constants.EXECUTE_CODE, {
1502
+ /** Keep latest session_id for reference/fallback */
1503
+ session_id: artifact.session_id,
1504
+ /** Accumulated files with latest versions preferred */
1505
+ files: [...filteredExisting, ...filesWithSession],
1506
+ lastUpdated: Date.now(),
1507
+ });
1508
+ }
1509
+ }
1510
+ const dispatchedOutput = typeof output.content === 'string'
1511
+ ? output.content
1512
+ : JSON.stringify(output.content);
1513
+ const args = typeof input === 'string' ? input : input.input;
1514
+ const tool_call = {
1515
+ args: typeof args === 'string' ? args : JSON.stringify(args),
1516
+ name: output.name ?? '',
1517
+ id: output.tool_call_id,
1518
+ output: omitOutput === true ? '' : dispatchedOutput,
1519
+ progress: 1,
1520
+ };
1521
+ await this.handlerRegistry
1522
+ ?.getHandler(_enum.GraphEvents.ON_RUN_STEP_COMPLETED)
1523
+ ?.handle(_enum.GraphEvents.ON_RUN_STEP_COMPLETED, {
1524
+ result: {
1525
+ id: stepId,
1526
+ index: runStep.index,
1527
+ type: 'tool_call',
1528
+ tool_call,
1529
+ },
1530
+ }, metadata, this);
1531
+ }
1532
+ /**
1533
+ * Static version of handleToolCallError to avoid creating strong references
1534
+ * that prevent garbage collection
1535
+ */
1536
+ static async handleToolCallErrorStatic(graph, data, metadata) {
1537
+ if (!graph.config) {
1538
+ throw new Error('No config provided');
1539
+ }
1540
+ if (!data.id) {
1541
+ console.warn('No Tool ID provided for Tool Error');
1542
+ return;
1543
+ }
1544
+ const stepId = graph.toolCallStepIds.get(data.id) ?? '';
1545
+ if (!stepId) {
1546
+ throw new Error(`No stepId found for tool_call_id ${data.id}`);
1547
+ }
1548
+ const { name, input: args, error } = data;
1549
+ const runStep = graph.getRunStep(stepId);
1550
+ if (!runStep) {
1551
+ throw new Error(`No run step found for stepId ${stepId}`);
1552
+ }
1553
+ const tool_call = {
1554
+ id: data.id,
1555
+ name: name || '',
1556
+ args: typeof args === 'string' ? args : JSON.stringify(args),
1557
+ output: `Error processing tool${error?.message != null ? `: ${error.message}` : ''}`,
1558
+ progress: 1,
1559
+ };
1560
+ await graph.handlerRegistry
1561
+ ?.getHandler(_enum.GraphEvents.ON_RUN_STEP_COMPLETED)
1562
+ ?.handle(_enum.GraphEvents.ON_RUN_STEP_COMPLETED, {
1563
+ result: {
1564
+ id: stepId,
1565
+ index: runStep.index,
1566
+ type: 'tool_call',
1567
+ tool_call,
1568
+ },
1569
+ }, metadata, graph);
1570
+ }
1571
+ /**
1572
+ * Instance method that delegates to the static method
1573
+ * Kept for backward compatibility
1574
+ */
1575
+ async handleToolCallError(data, metadata) {
1576
+ await StandardGraph.handleToolCallErrorStatic(this, data, metadata);
1577
+ }
1578
+ async dispatchRunStepDelta(id, delta) {
1579
+ if (!this.config) {
1580
+ throw new Error('No config provided');
1581
+ }
1582
+ else if (!id) {
1583
+ throw new Error('No step ID found');
1584
+ }
1585
+ const runStepDelta = {
1586
+ id,
1587
+ delta,
1588
+ };
1589
+ await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_RUN_STEP_DELTA, runStepDelta, this.config);
1590
+ }
1591
+ async dispatchMessageDelta(id, delta) {
1592
+ if (!this.config) {
1593
+ throw new Error('No config provided');
1594
+ }
1595
+ const messageDelta = {
1596
+ id,
1597
+ delta,
1598
+ };
1599
+ await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_MESSAGE_DELTA, messageDelta, this.config);
1600
+ }
1601
+ dispatchReasoningDelta = async (stepId, delta) => {
1602
+ if (!this.config) {
1603
+ throw new Error('No config provided');
1604
+ }
1605
+ const reasoningDelta = {
1606
+ id: stepId,
1607
+ delta,
1608
+ };
1609
+ await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_REASONING_DELTA, reasoningDelta, this.config);
1610
+ };
1611
+ }
1612
+
1613
+ exports.Graph = Graph;
1614
+ exports.StandardGraph = StandardGraph;
1615
+ //# sourceMappingURL=Graph.cjs.map