@illuma-ai/agents 1.0.81

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (558) hide show
  1. package/README.md +485 -0
  2. package/dist/cjs/agents/AgentContext.cjs +734 -0
  3. package/dist/cjs/agents/AgentContext.cjs.map +1 -0
  4. package/dist/cjs/common/enum.cjs +190 -0
  5. package/dist/cjs/common/enum.cjs.map +1 -0
  6. package/dist/cjs/events.cjs +172 -0
  7. package/dist/cjs/events.cjs.map +1 -0
  8. package/dist/cjs/graphs/Graph.cjs +1615 -0
  9. package/dist/cjs/graphs/Graph.cjs.map +1 -0
  10. package/dist/cjs/graphs/MultiAgentGraph.cjs +890 -0
  11. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -0
  12. package/dist/cjs/instrumentation.cjs +21 -0
  13. package/dist/cjs/instrumentation.cjs.map +1 -0
  14. package/dist/cjs/llm/anthropic/index.cjs +292 -0
  15. package/dist/cjs/llm/anthropic/index.cjs.map +1 -0
  16. package/dist/cjs/llm/anthropic/types.cjs +50 -0
  17. package/dist/cjs/llm/anthropic/types.cjs.map +1 -0
  18. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +630 -0
  19. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -0
  20. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +218 -0
  21. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -0
  22. package/dist/cjs/llm/anthropic/utils/tools.cjs +29 -0
  23. package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -0
  24. package/dist/cjs/llm/bedrock/index.cjs +282 -0
  25. package/dist/cjs/llm/bedrock/index.cjs.map +1 -0
  26. package/dist/cjs/llm/fake.cjs +97 -0
  27. package/dist/cjs/llm/fake.cjs.map +1 -0
  28. package/dist/cjs/llm/google/index.cjs +216 -0
  29. package/dist/cjs/llm/google/index.cjs.map +1 -0
  30. package/dist/cjs/llm/google/utils/common.cjs +647 -0
  31. package/dist/cjs/llm/google/utils/common.cjs.map +1 -0
  32. package/dist/cjs/llm/openai/index.cjs +1028 -0
  33. package/dist/cjs/llm/openai/index.cjs.map +1 -0
  34. package/dist/cjs/llm/openai/utils/index.cjs +765 -0
  35. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -0
  36. package/dist/cjs/llm/openrouter/index.cjs +212 -0
  37. package/dist/cjs/llm/openrouter/index.cjs.map +1 -0
  38. package/dist/cjs/llm/providers.cjs +43 -0
  39. package/dist/cjs/llm/providers.cjs.map +1 -0
  40. package/dist/cjs/llm/text.cjs +69 -0
  41. package/dist/cjs/llm/text.cjs.map +1 -0
  42. package/dist/cjs/llm/vertexai/index.cjs +329 -0
  43. package/dist/cjs/llm/vertexai/index.cjs.map +1 -0
  44. package/dist/cjs/main.cjs +240 -0
  45. package/dist/cjs/main.cjs.map +1 -0
  46. package/dist/cjs/messages/cache.cjs +387 -0
  47. package/dist/cjs/messages/cache.cjs.map +1 -0
  48. package/dist/cjs/messages/content.cjs +53 -0
  49. package/dist/cjs/messages/content.cjs.map +1 -0
  50. package/dist/cjs/messages/core.cjs +367 -0
  51. package/dist/cjs/messages/core.cjs.map +1 -0
  52. package/dist/cjs/messages/format.cjs +761 -0
  53. package/dist/cjs/messages/format.cjs.map +1 -0
  54. package/dist/cjs/messages/ids.cjs +23 -0
  55. package/dist/cjs/messages/ids.cjs.map +1 -0
  56. package/dist/cjs/messages/prune.cjs +398 -0
  57. package/dist/cjs/messages/prune.cjs.map +1 -0
  58. package/dist/cjs/messages/tools.cjs +96 -0
  59. package/dist/cjs/messages/tools.cjs.map +1 -0
  60. package/dist/cjs/run.cjs +328 -0
  61. package/dist/cjs/run.cjs.map +1 -0
  62. package/dist/cjs/schemas/validate.cjs +324 -0
  63. package/dist/cjs/schemas/validate.cjs.map +1 -0
  64. package/dist/cjs/splitStream.cjs +210 -0
  65. package/dist/cjs/splitStream.cjs.map +1 -0
  66. package/dist/cjs/stream.cjs +620 -0
  67. package/dist/cjs/stream.cjs.map +1 -0
  68. package/dist/cjs/tools/BrowserTools.cjs +248 -0
  69. package/dist/cjs/tools/BrowserTools.cjs.map +1 -0
  70. package/dist/cjs/tools/Calculator.cjs +66 -0
  71. package/dist/cjs/tools/Calculator.cjs.map +1 -0
  72. package/dist/cjs/tools/CodeExecutor.cjs +234 -0
  73. package/dist/cjs/tools/CodeExecutor.cjs.map +1 -0
  74. package/dist/cjs/tools/ProgrammaticToolCalling.cjs +636 -0
  75. package/dist/cjs/tools/ProgrammaticToolCalling.cjs.map +1 -0
  76. package/dist/cjs/tools/ToolNode.cjs +548 -0
  77. package/dist/cjs/tools/ToolNode.cjs.map +1 -0
  78. package/dist/cjs/tools/ToolSearch.cjs +909 -0
  79. package/dist/cjs/tools/ToolSearch.cjs.map +1 -0
  80. package/dist/cjs/tools/handlers.cjs +255 -0
  81. package/dist/cjs/tools/handlers.cjs.map +1 -0
  82. package/dist/cjs/tools/schema.cjs +31 -0
  83. package/dist/cjs/tools/schema.cjs.map +1 -0
  84. package/dist/cjs/tools/search/anthropic.cjs +40 -0
  85. package/dist/cjs/tools/search/anthropic.cjs.map +1 -0
  86. package/dist/cjs/tools/search/content.cjs +140 -0
  87. package/dist/cjs/tools/search/content.cjs.map +1 -0
  88. package/dist/cjs/tools/search/firecrawl.cjs +179 -0
  89. package/dist/cjs/tools/search/firecrawl.cjs.map +1 -0
  90. package/dist/cjs/tools/search/format.cjs +203 -0
  91. package/dist/cjs/tools/search/format.cjs.map +1 -0
  92. package/dist/cjs/tools/search/highlights.cjs +245 -0
  93. package/dist/cjs/tools/search/highlights.cjs.map +1 -0
  94. package/dist/cjs/tools/search/rerankers.cjs +174 -0
  95. package/dist/cjs/tools/search/rerankers.cjs.map +1 -0
  96. package/dist/cjs/tools/search/schema.cjs +117 -0
  97. package/dist/cjs/tools/search/schema.cjs.map +1 -0
  98. package/dist/cjs/tools/search/search.cjs +566 -0
  99. package/dist/cjs/tools/search/search.cjs.map +1 -0
  100. package/dist/cjs/tools/search/serper-scraper.cjs +132 -0
  101. package/dist/cjs/tools/search/serper-scraper.cjs.map +1 -0
  102. package/dist/cjs/tools/search/tool.cjs +456 -0
  103. package/dist/cjs/tools/search/tool.cjs.map +1 -0
  104. package/dist/cjs/tools/search/utils.cjs +66 -0
  105. package/dist/cjs/tools/search/utils.cjs.map +1 -0
  106. package/dist/cjs/types/graph.cjs +29 -0
  107. package/dist/cjs/types/graph.cjs.map +1 -0
  108. package/dist/cjs/utils/contextAnalytics.cjs +66 -0
  109. package/dist/cjs/utils/contextAnalytics.cjs.map +1 -0
  110. package/dist/cjs/utils/events.cjs +31 -0
  111. package/dist/cjs/utils/events.cjs.map +1 -0
  112. package/dist/cjs/utils/graph.cjs +16 -0
  113. package/dist/cjs/utils/graph.cjs.map +1 -0
  114. package/dist/cjs/utils/handlers.cjs +70 -0
  115. package/dist/cjs/utils/handlers.cjs.map +1 -0
  116. package/dist/cjs/utils/llm.cjs +27 -0
  117. package/dist/cjs/utils/llm.cjs.map +1 -0
  118. package/dist/cjs/utils/misc.cjs +56 -0
  119. package/dist/cjs/utils/misc.cjs.map +1 -0
  120. package/dist/cjs/utils/run.cjs +73 -0
  121. package/dist/cjs/utils/run.cjs.map +1 -0
  122. package/dist/cjs/utils/schema.cjs +27 -0
  123. package/dist/cjs/utils/schema.cjs.map +1 -0
  124. package/dist/cjs/utils/title.cjs +125 -0
  125. package/dist/cjs/utils/title.cjs.map +1 -0
  126. package/dist/cjs/utils/tokens.cjs +125 -0
  127. package/dist/cjs/utils/tokens.cjs.map +1 -0
  128. package/dist/cjs/utils/toonFormat.cjs +388 -0
  129. package/dist/cjs/utils/toonFormat.cjs.map +1 -0
  130. package/dist/esm/agents/AgentContext.mjs +732 -0
  131. package/dist/esm/agents/AgentContext.mjs.map +1 -0
  132. package/dist/esm/common/enum.mjs +190 -0
  133. package/dist/esm/common/enum.mjs.map +1 -0
  134. package/dist/esm/events.mjs +164 -0
  135. package/dist/esm/events.mjs.map +1 -0
  136. package/dist/esm/graphs/Graph.mjs +1612 -0
  137. package/dist/esm/graphs/Graph.mjs.map +1 -0
  138. package/dist/esm/graphs/MultiAgentGraph.mjs +888 -0
  139. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -0
  140. package/dist/esm/instrumentation.mjs +19 -0
  141. package/dist/esm/instrumentation.mjs.map +1 -0
  142. package/dist/esm/llm/anthropic/index.mjs +290 -0
  143. package/dist/esm/llm/anthropic/index.mjs.map +1 -0
  144. package/dist/esm/llm/anthropic/types.mjs +48 -0
  145. package/dist/esm/llm/anthropic/types.mjs.map +1 -0
  146. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +627 -0
  147. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -0
  148. package/dist/esm/llm/anthropic/utils/message_outputs.mjs +216 -0
  149. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -0
  150. package/dist/esm/llm/anthropic/utils/tools.mjs +27 -0
  151. package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -0
  152. package/dist/esm/llm/bedrock/index.mjs +280 -0
  153. package/dist/esm/llm/bedrock/index.mjs.map +1 -0
  154. package/dist/esm/llm/fake.mjs +94 -0
  155. package/dist/esm/llm/fake.mjs.map +1 -0
  156. package/dist/esm/llm/google/index.mjs +214 -0
  157. package/dist/esm/llm/google/index.mjs.map +1 -0
  158. package/dist/esm/llm/google/utils/common.mjs +638 -0
  159. package/dist/esm/llm/google/utils/common.mjs.map +1 -0
  160. package/dist/esm/llm/openai/index.mjs +1018 -0
  161. package/dist/esm/llm/openai/index.mjs.map +1 -0
  162. package/dist/esm/llm/openai/utils/index.mjs +759 -0
  163. package/dist/esm/llm/openai/utils/index.mjs.map +1 -0
  164. package/dist/esm/llm/openrouter/index.mjs +210 -0
  165. package/dist/esm/llm/openrouter/index.mjs.map +1 -0
  166. package/dist/esm/llm/providers.mjs +39 -0
  167. package/dist/esm/llm/providers.mjs.map +1 -0
  168. package/dist/esm/llm/text.mjs +67 -0
  169. package/dist/esm/llm/text.mjs.map +1 -0
  170. package/dist/esm/llm/vertexai/index.mjs +327 -0
  171. package/dist/esm/llm/vertexai/index.mjs.map +1 -0
  172. package/dist/esm/main.mjs +37 -0
  173. package/dist/esm/main.mjs.map +1 -0
  174. package/dist/esm/messages/cache.mjs +382 -0
  175. package/dist/esm/messages/cache.mjs.map +1 -0
  176. package/dist/esm/messages/content.mjs +51 -0
  177. package/dist/esm/messages/content.mjs.map +1 -0
  178. package/dist/esm/messages/core.mjs +359 -0
  179. package/dist/esm/messages/core.mjs.map +1 -0
  180. package/dist/esm/messages/format.mjs +752 -0
  181. package/dist/esm/messages/format.mjs.map +1 -0
  182. package/dist/esm/messages/ids.mjs +21 -0
  183. package/dist/esm/messages/ids.mjs.map +1 -0
  184. package/dist/esm/messages/prune.mjs +393 -0
  185. package/dist/esm/messages/prune.mjs.map +1 -0
  186. package/dist/esm/messages/tools.mjs +93 -0
  187. package/dist/esm/messages/tools.mjs.map +1 -0
  188. package/dist/esm/run.mjs +325 -0
  189. package/dist/esm/run.mjs.map +1 -0
  190. package/dist/esm/schemas/validate.mjs +317 -0
  191. package/dist/esm/schemas/validate.mjs.map +1 -0
  192. package/dist/esm/splitStream.mjs +207 -0
  193. package/dist/esm/splitStream.mjs.map +1 -0
  194. package/dist/esm/stream.mjs +616 -0
  195. package/dist/esm/stream.mjs.map +1 -0
  196. package/dist/esm/tools/BrowserTools.mjs +244 -0
  197. package/dist/esm/tools/BrowserTools.mjs.map +1 -0
  198. package/dist/esm/tools/Calculator.mjs +41 -0
  199. package/dist/esm/tools/Calculator.mjs.map +1 -0
  200. package/dist/esm/tools/CodeExecutor.mjs +226 -0
  201. package/dist/esm/tools/CodeExecutor.mjs.map +1 -0
  202. package/dist/esm/tools/ProgrammaticToolCalling.mjs +622 -0
  203. package/dist/esm/tools/ProgrammaticToolCalling.mjs.map +1 -0
  204. package/dist/esm/tools/ToolNode.mjs +545 -0
  205. package/dist/esm/tools/ToolNode.mjs.map +1 -0
  206. package/dist/esm/tools/ToolSearch.mjs +870 -0
  207. package/dist/esm/tools/ToolSearch.mjs.map +1 -0
  208. package/dist/esm/tools/handlers.mjs +250 -0
  209. package/dist/esm/tools/handlers.mjs.map +1 -0
  210. package/dist/esm/tools/schema.mjs +28 -0
  211. package/dist/esm/tools/schema.mjs.map +1 -0
  212. package/dist/esm/tools/search/anthropic.mjs +37 -0
  213. package/dist/esm/tools/search/anthropic.mjs.map +1 -0
  214. package/dist/esm/tools/search/content.mjs +119 -0
  215. package/dist/esm/tools/search/content.mjs.map +1 -0
  216. package/dist/esm/tools/search/firecrawl.mjs +176 -0
  217. package/dist/esm/tools/search/firecrawl.mjs.map +1 -0
  218. package/dist/esm/tools/search/format.mjs +201 -0
  219. package/dist/esm/tools/search/format.mjs.map +1 -0
  220. package/dist/esm/tools/search/highlights.mjs +243 -0
  221. package/dist/esm/tools/search/highlights.mjs.map +1 -0
  222. package/dist/esm/tools/search/rerankers.mjs +168 -0
  223. package/dist/esm/tools/search/rerankers.mjs.map +1 -0
  224. package/dist/esm/tools/search/schema.mjs +104 -0
  225. package/dist/esm/tools/search/schema.mjs.map +1 -0
  226. package/dist/esm/tools/search/search.mjs +563 -0
  227. package/dist/esm/tools/search/search.mjs.map +1 -0
  228. package/dist/esm/tools/search/serper-scraper.mjs +129 -0
  229. package/dist/esm/tools/search/serper-scraper.mjs.map +1 -0
  230. package/dist/esm/tools/search/tool.mjs +454 -0
  231. package/dist/esm/tools/search/tool.mjs.map +1 -0
  232. package/dist/esm/tools/search/utils.mjs +61 -0
  233. package/dist/esm/tools/search/utils.mjs.map +1 -0
  234. package/dist/esm/types/graph.mjs +26 -0
  235. package/dist/esm/types/graph.mjs.map +1 -0
  236. package/dist/esm/utils/contextAnalytics.mjs +64 -0
  237. package/dist/esm/utils/contextAnalytics.mjs.map +1 -0
  238. package/dist/esm/utils/events.mjs +29 -0
  239. package/dist/esm/utils/events.mjs.map +1 -0
  240. package/dist/esm/utils/graph.mjs +13 -0
  241. package/dist/esm/utils/graph.mjs.map +1 -0
  242. package/dist/esm/utils/handlers.mjs +68 -0
  243. package/dist/esm/utils/handlers.mjs.map +1 -0
  244. package/dist/esm/utils/llm.mjs +24 -0
  245. package/dist/esm/utils/llm.mjs.map +1 -0
  246. package/dist/esm/utils/misc.mjs +53 -0
  247. package/dist/esm/utils/misc.mjs.map +1 -0
  248. package/dist/esm/utils/run.mjs +70 -0
  249. package/dist/esm/utils/run.mjs.map +1 -0
  250. package/dist/esm/utils/schema.mjs +24 -0
  251. package/dist/esm/utils/schema.mjs.map +1 -0
  252. package/dist/esm/utils/title.mjs +122 -0
  253. package/dist/esm/utils/title.mjs.map +1 -0
  254. package/dist/esm/utils/tokens.mjs +121 -0
  255. package/dist/esm/utils/tokens.mjs.map +1 -0
  256. package/dist/esm/utils/toonFormat.mjs +381 -0
  257. package/dist/esm/utils/toonFormat.mjs.map +1 -0
  258. package/dist/types/agents/AgentContext.d.ts +293 -0
  259. package/dist/types/common/enum.d.ts +155 -0
  260. package/dist/types/common/index.d.ts +1 -0
  261. package/dist/types/events.d.ts +31 -0
  262. package/dist/types/graphs/Graph.d.ts +216 -0
  263. package/dist/types/graphs/MultiAgentGraph.d.ts +104 -0
  264. package/dist/types/graphs/index.d.ts +2 -0
  265. package/dist/types/index.d.ts +21 -0
  266. package/dist/types/instrumentation.d.ts +1 -0
  267. package/dist/types/llm/anthropic/index.d.ts +39 -0
  268. package/dist/types/llm/anthropic/types.d.ts +37 -0
  269. package/dist/types/llm/anthropic/utils/message_inputs.d.ts +14 -0
  270. package/dist/types/llm/anthropic/utils/message_outputs.d.ts +14 -0
  271. package/dist/types/llm/anthropic/utils/output_parsers.d.ts +22 -0
  272. package/dist/types/llm/anthropic/utils/tools.d.ts +3 -0
  273. package/dist/types/llm/bedrock/index.d.ts +141 -0
  274. package/dist/types/llm/bedrock/types.d.ts +27 -0
  275. package/dist/types/llm/bedrock/utils/index.d.ts +5 -0
  276. package/dist/types/llm/bedrock/utils/message_inputs.d.ts +31 -0
  277. package/dist/types/llm/bedrock/utils/message_outputs.d.ts +33 -0
  278. package/dist/types/llm/fake.d.ts +31 -0
  279. package/dist/types/llm/google/index.d.ts +24 -0
  280. package/dist/types/llm/google/types.d.ts +42 -0
  281. package/dist/types/llm/google/utils/common.d.ts +34 -0
  282. package/dist/types/llm/google/utils/tools.d.ts +10 -0
  283. package/dist/types/llm/google/utils/zod_to_genai_parameters.d.ts +14 -0
  284. package/dist/types/llm/openai/index.d.ts +127 -0
  285. package/dist/types/llm/openai/types.d.ts +10 -0
  286. package/dist/types/llm/openai/utils/index.d.ts +29 -0
  287. package/dist/types/llm/openrouter/index.d.ts +15 -0
  288. package/dist/types/llm/providers.d.ts +5 -0
  289. package/dist/types/llm/text.d.ts +21 -0
  290. package/dist/types/llm/vertexai/index.d.ts +293 -0
  291. package/dist/types/messages/cache.d.ts +54 -0
  292. package/dist/types/messages/content.d.ts +7 -0
  293. package/dist/types/messages/core.d.ts +14 -0
  294. package/dist/types/messages/format.d.ts +137 -0
  295. package/dist/types/messages/ids.d.ts +3 -0
  296. package/dist/types/messages/index.d.ts +7 -0
  297. package/dist/types/messages/prune.d.ts +52 -0
  298. package/dist/types/messages/reducer.d.ts +9 -0
  299. package/dist/types/messages/tools.d.ts +17 -0
  300. package/dist/types/mockStream.d.ts +32 -0
  301. package/dist/types/prompts/collab.d.ts +1 -0
  302. package/dist/types/prompts/index.d.ts +2 -0
  303. package/dist/types/prompts/taskmanager.d.ts +41 -0
  304. package/dist/types/run.d.ts +41 -0
  305. package/dist/types/schemas/index.d.ts +1 -0
  306. package/dist/types/schemas/validate.d.ts +59 -0
  307. package/dist/types/splitStream.d.ts +37 -0
  308. package/dist/types/stream.d.ts +15 -0
  309. package/dist/types/test/mockTools.d.ts +28 -0
  310. package/dist/types/tools/BrowserTools.d.ts +87 -0
  311. package/dist/types/tools/Calculator.d.ts +34 -0
  312. package/dist/types/tools/CodeExecutor.d.ts +57 -0
  313. package/dist/types/tools/ProgrammaticToolCalling.d.ts +138 -0
  314. package/dist/types/tools/ToolNode.d.ts +51 -0
  315. package/dist/types/tools/ToolSearch.d.ts +219 -0
  316. package/dist/types/tools/handlers.d.ts +22 -0
  317. package/dist/types/tools/schema.d.ts +12 -0
  318. package/dist/types/tools/search/anthropic.d.ts +16 -0
  319. package/dist/types/tools/search/content.d.ts +4 -0
  320. package/dist/types/tools/search/firecrawl.d.ts +54 -0
  321. package/dist/types/tools/search/format.d.ts +5 -0
  322. package/dist/types/tools/search/highlights.d.ts +13 -0
  323. package/dist/types/tools/search/index.d.ts +3 -0
  324. package/dist/types/tools/search/rerankers.d.ts +38 -0
  325. package/dist/types/tools/search/schema.d.ts +103 -0
  326. package/dist/types/tools/search/search.d.ts +8 -0
  327. package/dist/types/tools/search/serper-scraper.d.ts +59 -0
  328. package/dist/types/tools/search/test.d.ts +1 -0
  329. package/dist/types/tools/search/tool.d.ts +3 -0
  330. package/dist/types/tools/search/types.d.ts +575 -0
  331. package/dist/types/tools/search/utils.d.ts +10 -0
  332. package/dist/types/types/graph.d.ts +399 -0
  333. package/dist/types/types/index.d.ts +5 -0
  334. package/dist/types/types/llm.d.ts +105 -0
  335. package/dist/types/types/messages.d.ts +4 -0
  336. package/dist/types/types/run.d.ts +112 -0
  337. package/dist/types/types/stream.d.ts +308 -0
  338. package/dist/types/types/tools.d.ts +296 -0
  339. package/dist/types/utils/contextAnalytics.d.ts +37 -0
  340. package/dist/types/utils/events.d.ts +6 -0
  341. package/dist/types/utils/graph.d.ts +2 -0
  342. package/dist/types/utils/handlers.d.ts +34 -0
  343. package/dist/types/utils/index.d.ts +9 -0
  344. package/dist/types/utils/llm.d.ts +3 -0
  345. package/dist/types/utils/llmConfig.d.ts +3 -0
  346. package/dist/types/utils/logging.d.ts +1 -0
  347. package/dist/types/utils/misc.d.ts +7 -0
  348. package/dist/types/utils/run.d.ts +27 -0
  349. package/dist/types/utils/schema.d.ts +8 -0
  350. package/dist/types/utils/title.d.ts +4 -0
  351. package/dist/types/utils/tokens.d.ts +28 -0
  352. package/dist/types/utils/toonFormat.d.ts +111 -0
  353. package/package.json +190 -0
  354. package/src/agents/AgentContext.test.ts +458 -0
  355. package/src/agents/AgentContext.ts +972 -0
  356. package/src/agents/__tests__/AgentContext.test.ts +805 -0
  357. package/src/agents/__tests__/resolveStructuredOutputMode.test.ts +137 -0
  358. package/src/common/enum.ts +203 -0
  359. package/src/common/index.ts +2 -0
  360. package/src/events.ts +223 -0
  361. package/src/graphs/Graph.ts +2228 -0
  362. package/src/graphs/MultiAgentGraph.ts +1063 -0
  363. package/src/graphs/__tests__/structured-output.integration.test.ts +809 -0
  364. package/src/graphs/__tests__/structured-output.test.ts +183 -0
  365. package/src/graphs/index.ts +2 -0
  366. package/src/index.ts +34 -0
  367. package/src/instrumentation.ts +22 -0
  368. package/src/llm/anthropic/Jacob_Lee_Resume_2023.pdf +0 -0
  369. package/src/llm/anthropic/index.ts +413 -0
  370. package/src/llm/anthropic/llm.spec.ts +1442 -0
  371. package/src/llm/anthropic/types.ts +140 -0
  372. package/src/llm/anthropic/utils/message_inputs.ts +757 -0
  373. package/src/llm/anthropic/utils/message_outputs.ts +289 -0
  374. package/src/llm/anthropic/utils/output_parsers.ts +133 -0
  375. package/src/llm/anthropic/utils/tools.ts +29 -0
  376. package/src/llm/bedrock/__tests__/bedrock-caching.test.ts +495 -0
  377. package/src/llm/bedrock/index.ts +411 -0
  378. package/src/llm/bedrock/llm.spec.ts +616 -0
  379. package/src/llm/bedrock/types.ts +51 -0
  380. package/src/llm/bedrock/utils/index.ts +18 -0
  381. package/src/llm/bedrock/utils/message_inputs.ts +563 -0
  382. package/src/llm/bedrock/utils/message_outputs.ts +310 -0
  383. package/src/llm/fake.ts +133 -0
  384. package/src/llm/google/data/gettysburg10.wav +0 -0
  385. package/src/llm/google/data/hotdog.jpg +0 -0
  386. package/src/llm/google/index.ts +337 -0
  387. package/src/llm/google/llm.spec.ts +934 -0
  388. package/src/llm/google/types.ts +56 -0
  389. package/src/llm/google/utils/common.ts +873 -0
  390. package/src/llm/google/utils/tools.ts +160 -0
  391. package/src/llm/google/utils/zod_to_genai_parameters.ts +86 -0
  392. package/src/llm/openai/index.ts +1366 -0
  393. package/src/llm/openai/types.ts +24 -0
  394. package/src/llm/openai/utils/index.ts +1035 -0
  395. package/src/llm/openai/utils/isReasoningModel.test.ts +90 -0
  396. package/src/llm/openrouter/index.ts +291 -0
  397. package/src/llm/providers.ts +52 -0
  398. package/src/llm/text.ts +94 -0
  399. package/src/llm/vertexai/index.ts +359 -0
  400. package/src/messages/__tests__/tools.test.ts +473 -0
  401. package/src/messages/cache.test.ts +1261 -0
  402. package/src/messages/cache.ts +518 -0
  403. package/src/messages/content.test.ts +362 -0
  404. package/src/messages/content.ts +63 -0
  405. package/src/messages/core.ts +473 -0
  406. package/src/messages/ensureThinkingBlock.test.ts +468 -0
  407. package/src/messages/format.ts +1029 -0
  408. package/src/messages/formatAgentMessages.test.ts +1513 -0
  409. package/src/messages/formatAgentMessages.tools.test.ts +419 -0
  410. package/src/messages/formatMessage.test.ts +693 -0
  411. package/src/messages/ids.ts +26 -0
  412. package/src/messages/index.ts +7 -0
  413. package/src/messages/labelContentByAgent.test.ts +887 -0
  414. package/src/messages/prune.ts +568 -0
  415. package/src/messages/reducer.ts +80 -0
  416. package/src/messages/shiftIndexTokenCountMap.test.ts +81 -0
  417. package/src/messages/tools.ts +108 -0
  418. package/src/mockStream.ts +99 -0
  419. package/src/prompts/collab.ts +6 -0
  420. package/src/prompts/index.ts +2 -0
  421. package/src/prompts/taskmanager.ts +61 -0
  422. package/src/run.ts +467 -0
  423. package/src/schemas/index.ts +2 -0
  424. package/src/schemas/schema-preparation.test.ts +500 -0
  425. package/src/schemas/validate.test.ts +358 -0
  426. package/src/schemas/validate.ts +454 -0
  427. package/src/scripts/abort.ts +157 -0
  428. package/src/scripts/ant_web_search.ts +158 -0
  429. package/src/scripts/ant_web_search_edge_case.ts +162 -0
  430. package/src/scripts/ant_web_search_error_edge_case.ts +148 -0
  431. package/src/scripts/args.ts +48 -0
  432. package/src/scripts/caching.ts +132 -0
  433. package/src/scripts/cli.ts +172 -0
  434. package/src/scripts/cli2.ts +133 -0
  435. package/src/scripts/cli3.ts +184 -0
  436. package/src/scripts/cli4.ts +191 -0
  437. package/src/scripts/cli5.ts +191 -0
  438. package/src/scripts/code_exec.ts +213 -0
  439. package/src/scripts/code_exec_files.ts +236 -0
  440. package/src/scripts/code_exec_multi_session.ts +241 -0
  441. package/src/scripts/code_exec_ptc.ts +334 -0
  442. package/src/scripts/code_exec_session.ts +282 -0
  443. package/src/scripts/code_exec_simple.ts +147 -0
  444. package/src/scripts/content.ts +138 -0
  445. package/src/scripts/empty_input.ts +137 -0
  446. package/src/scripts/handoff-test.ts +135 -0
  447. package/src/scripts/image.ts +178 -0
  448. package/src/scripts/memory.ts +97 -0
  449. package/src/scripts/multi-agent-chain.ts +331 -0
  450. package/src/scripts/multi-agent-conditional.ts +221 -0
  451. package/src/scripts/multi-agent-document-review-chain.ts +197 -0
  452. package/src/scripts/multi-agent-hybrid-flow.ts +310 -0
  453. package/src/scripts/multi-agent-parallel-start.ts +265 -0
  454. package/src/scripts/multi-agent-parallel.ts +394 -0
  455. package/src/scripts/multi-agent-sequence.ts +217 -0
  456. package/src/scripts/multi-agent-supervisor.ts +365 -0
  457. package/src/scripts/multi-agent-test.ts +186 -0
  458. package/src/scripts/parallel-asymmetric-tools-test.ts +274 -0
  459. package/src/scripts/parallel-full-metadata-test.ts +240 -0
  460. package/src/scripts/parallel-tools-test.ts +340 -0
  461. package/src/scripts/programmatic_exec.ts +396 -0
  462. package/src/scripts/programmatic_exec_agent.ts +231 -0
  463. package/src/scripts/search.ts +146 -0
  464. package/src/scripts/sequential-full-metadata-test.ts +197 -0
  465. package/src/scripts/simple.ts +225 -0
  466. package/src/scripts/single-agent-metadata-test.ts +198 -0
  467. package/src/scripts/stream.ts +140 -0
  468. package/src/scripts/test-custom-prompt-key.ts +145 -0
  469. package/src/scripts/test-handoff-input.ts +170 -0
  470. package/src/scripts/test-handoff-preamble.ts +277 -0
  471. package/src/scripts/test-multi-agent-list-handoff.ts +417 -0
  472. package/src/scripts/test-parallel-agent-labeling.ts +325 -0
  473. package/src/scripts/test-parallel-handoffs.ts +291 -0
  474. package/src/scripts/test-thinking-handoff-bedrock.ts +153 -0
  475. package/src/scripts/test-thinking-handoff.ts +155 -0
  476. package/src/scripts/test-tools-before-handoff.ts +226 -0
  477. package/src/scripts/test_code_api.ts +361 -0
  478. package/src/scripts/thinking-bedrock.ts +159 -0
  479. package/src/scripts/thinking.ts +171 -0
  480. package/src/scripts/tool_search.ts +162 -0
  481. package/src/scripts/tools.ts +177 -0
  482. package/src/specs/agent-handoffs.test.ts +888 -0
  483. package/src/specs/anthropic.simple.test.ts +387 -0
  484. package/src/specs/azure.simple.test.ts +364 -0
  485. package/src/specs/cache.simple.test.ts +396 -0
  486. package/src/specs/deepseek.simple.test.ts +283 -0
  487. package/src/specs/emergency-prune.test.ts +407 -0
  488. package/src/specs/moonshot.simple.test.ts +358 -0
  489. package/src/specs/openai.simple.test.ts +311 -0
  490. package/src/specs/openrouter.simple.test.ts +107 -0
  491. package/src/specs/prune.test.ts +901 -0
  492. package/src/specs/reasoning.test.ts +201 -0
  493. package/src/specs/spec.utils.ts +3 -0
  494. package/src/specs/thinking-handoff.test.ts +620 -0
  495. package/src/specs/thinking-prune.test.ts +703 -0
  496. package/src/specs/token-distribution-edge-case.test.ts +316 -0
  497. package/src/specs/token-memoization.test.ts +32 -0
  498. package/src/specs/tool-error.test.ts +198 -0
  499. package/src/splitStream.test.ts +691 -0
  500. package/src/splitStream.ts +234 -0
  501. package/src/stream.test.ts +94 -0
  502. package/src/stream.ts +801 -0
  503. package/src/test/mockTools.ts +386 -0
  504. package/src/tools/BrowserTools.ts +393 -0
  505. package/src/tools/Calculator.test.ts +278 -0
  506. package/src/tools/Calculator.ts +46 -0
  507. package/src/tools/CodeExecutor.ts +270 -0
  508. package/src/tools/ProgrammaticToolCalling.ts +785 -0
  509. package/src/tools/ToolNode.ts +674 -0
  510. package/src/tools/ToolSearch.ts +1095 -0
  511. package/src/tools/__tests__/BrowserTools.test.ts +265 -0
  512. package/src/tools/__tests__/ProgrammaticToolCalling.integration.test.ts +319 -0
  513. package/src/tools/__tests__/ProgrammaticToolCalling.test.ts +1006 -0
  514. package/src/tools/__tests__/ToolSearch.integration.test.ts +162 -0
  515. package/src/tools/__tests__/ToolSearch.test.ts +1003 -0
  516. package/src/tools/handlers.ts +363 -0
  517. package/src/tools/schema.ts +37 -0
  518. package/src/tools/search/anthropic.ts +51 -0
  519. package/src/tools/search/content.test.ts +173 -0
  520. package/src/tools/search/content.ts +147 -0
  521. package/src/tools/search/firecrawl.ts +210 -0
  522. package/src/tools/search/format.ts +250 -0
  523. package/src/tools/search/highlights.ts +320 -0
  524. package/src/tools/search/index.ts +3 -0
  525. package/src/tools/search/jina-reranker.test.ts +130 -0
  526. package/src/tools/search/output.md +2775 -0
  527. package/src/tools/search/rerankers.ts +242 -0
  528. package/src/tools/search/schema.ts +113 -0
  529. package/src/tools/search/search.ts +768 -0
  530. package/src/tools/search/serper-scraper.ts +155 -0
  531. package/src/tools/search/test.html +884 -0
  532. package/src/tools/search/test.md +643 -0
  533. package/src/tools/search/test.ts +159 -0
  534. package/src/tools/search/tool.ts +657 -0
  535. package/src/tools/search/types.ts +665 -0
  536. package/src/tools/search/utils.ts +79 -0
  537. package/src/types/graph.test.ts +218 -0
  538. package/src/types/graph.ts +533 -0
  539. package/src/types/index.ts +6 -0
  540. package/src/types/llm.ts +140 -0
  541. package/src/types/messages.ts +4 -0
  542. package/src/types/run.ts +128 -0
  543. package/src/types/stream.ts +417 -0
  544. package/src/types/tools.ts +355 -0
  545. package/src/utils/contextAnalytics.ts +103 -0
  546. package/src/utils/events.ts +32 -0
  547. package/src/utils/graph.ts +11 -0
  548. package/src/utils/handlers.ts +107 -0
  549. package/src/utils/index.ts +9 -0
  550. package/src/utils/llm.ts +26 -0
  551. package/src/utils/llmConfig.ts +208 -0
  552. package/src/utils/logging.ts +48 -0
  553. package/src/utils/misc.ts +57 -0
  554. package/src/utils/run.ts +106 -0
  555. package/src/utils/schema.ts +35 -0
  556. package/src/utils/title.ts +177 -0
  557. package/src/utils/tokens.ts +142 -0
  558. package/src/utils/toonFormat.ts +475 -0
@@ -0,0 +1,1018 @@
1
+ import { AzureOpenAI } from 'openai';
2
+ import { ChatXAI as ChatXAI$1 } from '@langchain/xai';
3
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
4
+ import { AIMessage, AIMessageChunk } from '@langchain/core/messages';
5
+ import '@langchain/core/utils/function_calling';
6
+ import { ChatDeepSeek as ChatDeepSeek$1 } from '@langchain/deepseek';
7
+ import { getEndpoint, AzureChatOpenAI as AzureChatOpenAI$1, ChatOpenAI as ChatOpenAI$1, OpenAIClient } from '@langchain/openai';
8
+ import { _convertMessagesToOpenAIParams, isReasoningModel, _convertMessagesToOpenAIResponsesParams, _convertOpenAIResponsesDeltaToBaseMessageChunk } from './utils/index.mjs';
9
+ import '../../common/enum.mjs';
10
+ import 'nanoid';
11
+ import '../../messages/core.mjs';
12
+ import '../../utils/toonFormat.mjs';
13
+ import { sleep } from '../../utils/run.mjs';
14
+ import 'js-tiktoken';
15
+ import 'zod-to-json-schema';
16
+
17
+ // eslint-disable-next-line @typescript-eslint/explicit-function-return-type
18
+ const iife = (fn) => fn();
19
+ function isHeaders(headers) {
20
+ return (typeof Headers !== 'undefined' &&
21
+ headers !== null &&
22
+ typeof headers === 'object' &&
23
+ Object.prototype.toString.call(headers) === '[object Headers]');
24
+ }
25
+ function normalizeHeaders(headers) {
26
+ const output = iife(() => {
27
+ // If headers is a Headers instance
28
+ if (isHeaders(headers)) {
29
+ return headers;
30
+ }
31
+ // If headers is an array of [key, value] pairs
32
+ else if (Array.isArray(headers)) {
33
+ return new Headers(headers);
34
+ }
35
+ // If headers is a NullableHeaders-like object (has 'values' property that is a Headers)
36
+ else if (typeof headers === 'object' &&
37
+ headers !== null &&
38
+ 'values' in headers &&
39
+ isHeaders(headers.values)) {
40
+ return headers.values;
41
+ }
42
+ // If headers is a plain object
43
+ else if (typeof headers === 'object' && headers !== null) {
44
+ const entries = Object.entries(headers)
45
+ .filter(([, v]) => typeof v === 'string')
46
+ .map(([k, v]) => [k, v]);
47
+ return new Headers(entries);
48
+ }
49
+ return new Headers();
50
+ });
51
+ return Object.fromEntries(output.entries());
52
+ }
53
+ function createAbortHandler(controller) {
54
+ return function () {
55
+ controller.abort();
56
+ };
57
+ }
58
+ class CustomOpenAIClient extends OpenAIClient {
59
+ abortHandler;
60
+ async fetchWithTimeout(url, init, ms, controller) {
61
+ const { signal, ...options } = init || {};
62
+ const handler = createAbortHandler(controller);
63
+ this.abortHandler = handler;
64
+ if (signal)
65
+ signal.addEventListener('abort', handler, { once: true });
66
+ const timeout = setTimeout(() => handler, ms);
67
+ const fetchOptions = {
68
+ signal: controller.signal,
69
+ ...options,
70
+ };
71
+ if (fetchOptions.method != null) {
72
+ // Custom methods like 'patch' need to be uppercased
73
+ // See https://github.com/nodejs/undici/issues/2294
74
+ fetchOptions.method = fetchOptions.method.toUpperCase();
75
+ }
76
+ return (
77
+ // use undefined this binding; fetch errors if bound to something else in browser/cloudflare
78
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
79
+ /** @ts-ignore */
80
+ this.fetch.call(undefined, url, fetchOptions).finally(() => {
81
+ clearTimeout(timeout);
82
+ }));
83
+ }
84
+ }
85
+ class CustomAzureOpenAIClient extends AzureOpenAI {
86
+ abortHandler;
87
+ async fetchWithTimeout(url, init, ms, controller) {
88
+ const { signal, ...options } = init || {};
89
+ const handler = createAbortHandler(controller);
90
+ this.abortHandler = handler;
91
+ if (signal)
92
+ signal.addEventListener('abort', handler, { once: true });
93
+ const timeout = setTimeout(() => handler, ms);
94
+ const fetchOptions = {
95
+ signal: controller.signal,
96
+ ...options,
97
+ };
98
+ if (fetchOptions.method != null) {
99
+ // Custom methods like 'patch' need to be uppercased
100
+ // See https://github.com/nodejs/undici/issues/2294
101
+ fetchOptions.method = fetchOptions.method.toUpperCase();
102
+ }
103
+ return (
104
+ // use undefined this binding; fetch errors if bound to something else in browser/cloudflare
105
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
106
+ /** @ts-ignore */
107
+ this.fetch.call(undefined, url, fetchOptions).finally(() => {
108
+ clearTimeout(timeout);
109
+ }));
110
+ }
111
+ }
112
+ /** @ts-expect-error We are intentionally overriding `getReasoningParams` */
113
+ class ChatOpenAI extends ChatOpenAI$1 {
114
+ _lc_stream_delay;
115
+ constructor(fields) {
116
+ super(fields);
117
+ this._lc_stream_delay = fields?._lc_stream_delay;
118
+ }
119
+ get exposedClient() {
120
+ return this.client;
121
+ }
122
+ static lc_name() {
123
+ return 'IllumaOpenAI';
124
+ }
125
+ _getClientOptions(options) {
126
+ if (!this.client) {
127
+ const openAIEndpointConfig = {
128
+ baseURL: this.clientConfig.baseURL,
129
+ };
130
+ const endpoint = getEndpoint(openAIEndpointConfig);
131
+ const params = {
132
+ ...this.clientConfig,
133
+ baseURL: endpoint,
134
+ timeout: this.timeout,
135
+ maxRetries: 0,
136
+ };
137
+ if (params.baseURL == null) {
138
+ delete params.baseURL;
139
+ }
140
+ this.client = new CustomOpenAIClient(params);
141
+ }
142
+ const requestOptions = {
143
+ ...this.clientConfig,
144
+ ...options,
145
+ };
146
+ return requestOptions;
147
+ }
148
+ /**
149
+ * Returns backwards compatible reasoning parameters from constructor params and call options
150
+ * @internal
151
+ */
152
+ getReasoningParams(options) {
153
+ // apply options in reverse order of importance -- newer options supersede older options
154
+ let reasoning;
155
+ if (this.reasoning !== undefined) {
156
+ reasoning = {
157
+ ...reasoning,
158
+ ...this.reasoning,
159
+ };
160
+ }
161
+ if (options?.reasoning !== undefined) {
162
+ reasoning = {
163
+ ...reasoning,
164
+ ...options.reasoning,
165
+ };
166
+ }
167
+ return reasoning;
168
+ }
169
+ _getReasoningParams(options) {
170
+ return this.getReasoningParams(options);
171
+ }
172
+ async *_streamResponseChunks(messages, options, runManager) {
173
+ if (!this._useResponseApi(options)) {
174
+ return yield* this._streamResponseChunks2(messages, options, runManager);
175
+ }
176
+ const streamIterable = await this.responseApiWithRetry({
177
+ ...this.invocationParams(options, { streaming: true }),
178
+ input: _convertMessagesToOpenAIResponsesParams(messages, this.model, this.zdrEnabled),
179
+ stream: true,
180
+ }, options);
181
+ for await (const data of streamIterable) {
182
+ const chunk = _convertOpenAIResponsesDeltaToBaseMessageChunk(data);
183
+ if (chunk == null)
184
+ continue;
185
+ yield chunk;
186
+ if (this._lc_stream_delay != null) {
187
+ await sleep(this._lc_stream_delay);
188
+ }
189
+ await runManager?.handleLLMNewToken(chunk.text || '', undefined, undefined, undefined, undefined, { chunk });
190
+ }
191
+ return;
192
+ }
193
+ async *_streamResponseChunks2(messages, options, runManager) {
194
+ const messagesMapped = _convertMessagesToOpenAIParams(messages, this.model);
195
+ const params = {
196
+ ...this.invocationParams(options, {
197
+ streaming: true,
198
+ }),
199
+ messages: messagesMapped,
200
+ stream: true,
201
+ };
202
+ let defaultRole;
203
+ const streamIterable = await this.completionWithRetry(params, options);
204
+ let usage;
205
+ for await (const data of streamIterable) {
206
+ const choice = data.choices[0];
207
+ if (data.usage) {
208
+ usage = data.usage;
209
+ }
210
+ if (!choice) {
211
+ continue;
212
+ }
213
+ const { delta } = choice;
214
+ if (!delta) {
215
+ continue;
216
+ }
217
+ const chunk = this._convertOpenAIDeltaToBaseMessageChunk(delta, data, defaultRole);
218
+ if ('reasoning_content' in delta) {
219
+ chunk.additional_kwargs.reasoning_content = delta.reasoning_content;
220
+ }
221
+ else if ('reasoning' in delta) {
222
+ chunk.additional_kwargs.reasoning_content = delta.reasoning;
223
+ }
224
+ if ('provider_specific_fields' in delta) {
225
+ chunk.additional_kwargs.provider_specific_fields =
226
+ delta.provider_specific_fields;
227
+ }
228
+ defaultRole = delta.role ?? defaultRole;
229
+ const newTokenIndices = {
230
+ prompt: options.promptIndex ?? 0,
231
+ completion: choice.index ?? 0,
232
+ };
233
+ if (typeof chunk.content !== 'string') {
234
+ // eslint-disable-next-line no-console
235
+ console.log('[WARNING]: Received non-string content from OpenAI. This is currently not supported.');
236
+ continue;
237
+ }
238
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
239
+ const generationInfo = { ...newTokenIndices };
240
+ if (choice.finish_reason != null) {
241
+ generationInfo.finish_reason = choice.finish_reason;
242
+ // Only include system fingerprint in the last chunk for now
243
+ // to avoid concatenation issues
244
+ generationInfo.system_fingerprint = data.system_fingerprint;
245
+ generationInfo.model_name = data.model;
246
+ generationInfo.service_tier = data.service_tier;
247
+ }
248
+ if (this.logprobs == true) {
249
+ generationInfo.logprobs = choice.logprobs;
250
+ }
251
+ const generationChunk = new ChatGenerationChunk({
252
+ message: chunk,
253
+ text: chunk.content,
254
+ generationInfo,
255
+ });
256
+ yield generationChunk;
257
+ if (this._lc_stream_delay != null) {
258
+ await sleep(this._lc_stream_delay);
259
+ }
260
+ await runManager?.handleLLMNewToken(generationChunk.text || '', newTokenIndices, undefined, undefined, undefined, { chunk: generationChunk });
261
+ }
262
+ if (usage) {
263
+ const inputTokenDetails = {
264
+ ...(usage.prompt_tokens_details?.audio_tokens != null && {
265
+ audio: usage.prompt_tokens_details.audio_tokens,
266
+ }),
267
+ ...(usage.prompt_tokens_details?.cached_tokens != null && {
268
+ cache_read: usage.prompt_tokens_details.cached_tokens,
269
+ }),
270
+ };
271
+ const outputTokenDetails = {
272
+ ...(usage.completion_tokens_details?.audio_tokens != null && {
273
+ audio: usage.completion_tokens_details.audio_tokens,
274
+ }),
275
+ ...(usage.completion_tokens_details?.reasoning_tokens != null && {
276
+ reasoning: usage.completion_tokens_details.reasoning_tokens,
277
+ }),
278
+ };
279
+ const generationChunk = new ChatGenerationChunk({
280
+ message: new AIMessageChunk({
281
+ content: '',
282
+ response_metadata: {
283
+ usage: { ...usage },
284
+ },
285
+ usage_metadata: {
286
+ input_tokens: usage.prompt_tokens,
287
+ output_tokens: usage.completion_tokens,
288
+ total_tokens: usage.total_tokens,
289
+ ...(Object.keys(inputTokenDetails).length > 0 && {
290
+ input_token_details: inputTokenDetails,
291
+ }),
292
+ ...(Object.keys(outputTokenDetails).length > 0 && {
293
+ output_token_details: outputTokenDetails,
294
+ }),
295
+ },
296
+ }),
297
+ text: '',
298
+ });
299
+ yield generationChunk;
300
+ if (this._lc_stream_delay != null) {
301
+ await sleep(this._lc_stream_delay);
302
+ }
303
+ }
304
+ if (options.signal?.aborted === true) {
305
+ throw new Error('AbortError');
306
+ }
307
+ }
308
+ }
309
+ /** @ts-expect-error We are intentionally overriding `getReasoningParams` */
310
+ class AzureChatOpenAI extends AzureChatOpenAI$1 {
311
+ _lc_stream_delay;
312
+ constructor(fields) {
313
+ super(fields);
314
+ this._lc_stream_delay = fields?._lc_stream_delay;
315
+ }
316
+ get exposedClient() {
317
+ return this.client;
318
+ }
319
+ static lc_name() {
320
+ return 'IllumaAzureOpenAI';
321
+ }
322
+ /**
323
+ * Returns backwards compatible reasoning parameters from constructor params and call options
324
+ * @internal
325
+ */
326
+ getReasoningParams(options) {
327
+ if (!isReasoningModel(this.model)) {
328
+ return;
329
+ }
330
+ // apply options in reverse order of importance -- newer options supersede older options
331
+ let reasoning;
332
+ if (this.reasoning !== undefined) {
333
+ reasoning = {
334
+ ...reasoning,
335
+ ...this.reasoning,
336
+ };
337
+ }
338
+ if (options?.reasoning !== undefined) {
339
+ reasoning = {
340
+ ...reasoning,
341
+ ...options.reasoning,
342
+ };
343
+ }
344
+ return reasoning;
345
+ }
346
+ _getReasoningParams(options) {
347
+ return this.getReasoningParams(options);
348
+ }
349
+ _getClientOptions(options) {
350
+ if (!this.client) {
351
+ const openAIEndpointConfig = {
352
+ azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName,
353
+ azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName,
354
+ azureOpenAIApiKey: this.azureOpenAIApiKey,
355
+ azureOpenAIBasePath: this.azureOpenAIBasePath,
356
+ azureADTokenProvider: this.azureADTokenProvider,
357
+ baseURL: this.clientConfig.baseURL,
358
+ };
359
+ const endpoint = getEndpoint(openAIEndpointConfig);
360
+ const params = {
361
+ ...this.clientConfig,
362
+ baseURL: endpoint,
363
+ timeout: this.timeout,
364
+ maxRetries: 0,
365
+ };
366
+ if (!this.azureADTokenProvider) {
367
+ params.apiKey = openAIEndpointConfig.azureOpenAIApiKey;
368
+ }
369
+ if (params.baseURL == null) {
370
+ delete params.baseURL;
371
+ }
372
+ const defaultHeaders = normalizeHeaders(params.defaultHeaders);
373
+ params.defaultHeaders = {
374
+ ...params.defaultHeaders,
375
+ 'User-Agent': defaultHeaders['User-Agent'] != null
376
+ ? `${defaultHeaders['User-Agent']}: illuma-azure-openai-v2`
377
+ : 'illuma-azure-openai-v2',
378
+ };
379
+ this.client = new CustomAzureOpenAIClient({
380
+ apiVersion: this.azureOpenAIApiVersion,
381
+ azureADTokenProvider: this.azureADTokenProvider,
382
+ ...params,
383
+ });
384
+ }
385
+ const requestOptions = {
386
+ ...this.clientConfig,
387
+ ...options,
388
+ };
389
+ if (this.azureOpenAIApiKey != null) {
390
+ requestOptions.headers = {
391
+ 'api-key': this.azureOpenAIApiKey,
392
+ ...requestOptions.headers,
393
+ };
394
+ requestOptions.query = {
395
+ 'api-version': this.azureOpenAIApiVersion,
396
+ ...requestOptions.query,
397
+ };
398
+ }
399
+ return requestOptions;
400
+ }
401
+ async *_streamResponseChunks(messages, options, runManager) {
402
+ if (!this._useResponseApi(options)) {
403
+ return yield* super._streamResponseChunks(messages, options, runManager);
404
+ }
405
+ const streamIterable = await this.responseApiWithRetry({
406
+ ...this.invocationParams(options, { streaming: true }),
407
+ input: _convertMessagesToOpenAIResponsesParams(messages, this.model, this.zdrEnabled),
408
+ stream: true,
409
+ }, options);
410
+ for await (const data of streamIterable) {
411
+ const chunk = _convertOpenAIResponsesDeltaToBaseMessageChunk(data);
412
+ if (chunk == null)
413
+ continue;
414
+ yield chunk;
415
+ if (this._lc_stream_delay != null) {
416
+ await sleep(this._lc_stream_delay);
417
+ }
418
+ await runManager?.handleLLMNewToken(chunk.text || '', undefined, undefined, undefined, undefined, { chunk });
419
+ }
420
+ return;
421
+ }
422
+ }
423
+ class ChatDeepSeek extends ChatDeepSeek$1 {
424
+ get exposedClient() {
425
+ return this.client;
426
+ }
427
+ static lc_name() {
428
+ return 'IllumaDeepSeek';
429
+ }
430
+ _convertMessages(messages) {
431
+ return _convertMessagesToOpenAIParams(messages, this.model, {
432
+ includeReasoningContent: true,
433
+ });
434
+ }
435
+ async _generate(messages, options, runManager) {
436
+ const params = this.invocationParams(options);
437
+ if (params.stream === true) {
438
+ return super._generate(messages, options ?? {}, runManager);
439
+ }
440
+ const messagesMapped = this._convertMessages(messages);
441
+ const data = await this.completionWithRetry({
442
+ ...params,
443
+ stream: false,
444
+ messages: messagesMapped,
445
+ }, {
446
+ signal: options?.signal,
447
+ ...options?.options,
448
+ });
449
+ const { completion_tokens, prompt_tokens, total_tokens } = data.usage ?? {};
450
+ const generations = [];
451
+ for (const part of data.choices ?? []) {
452
+ const text = part.message.content ?? '';
453
+ const generation = {
454
+ text: typeof text === 'string' ? text : '',
455
+ message: this._convertResponseToMessage(part, data),
456
+ };
457
+ generation.generationInfo = {
458
+ ...(part.finish_reason != null
459
+ ? { finish_reason: part.finish_reason }
460
+ : {}),
461
+ ...(part.logprobs ? { logprobs: part.logprobs } : {}),
462
+ };
463
+ generations.push(generation);
464
+ }
465
+ return {
466
+ generations,
467
+ llmOutput: {
468
+ tokenUsage: {
469
+ completionTokens: completion_tokens,
470
+ promptTokens: prompt_tokens,
471
+ totalTokens: total_tokens,
472
+ },
473
+ },
474
+ };
475
+ }
476
+ _convertResponseToMessage(choice, data) {
477
+ const { message } = choice;
478
+ const rawToolCalls = message.tool_calls;
479
+ const toolCalls = rawToolCalls?.map((tc) => ({
480
+ id: tc.id,
481
+ name: tc.function.name,
482
+ args: JSON.parse(tc.function.arguments || '{}'),
483
+ type: 'tool_call',
484
+ }));
485
+ const additional_kwargs = {};
486
+ if (rawToolCalls) {
487
+ additional_kwargs.tool_calls = rawToolCalls;
488
+ }
489
+ if ('reasoning_content' in message &&
490
+ message.reasoning_content != null &&
491
+ message.reasoning_content !== '') {
492
+ additional_kwargs.reasoning_content = message.reasoning_content;
493
+ }
494
+ return new AIMessage({
495
+ content: message.content ?? '',
496
+ tool_calls: toolCalls,
497
+ additional_kwargs,
498
+ usage_metadata: data.usage
499
+ ? {
500
+ input_tokens: data.usage.prompt_tokens,
501
+ output_tokens: data.usage.completion_tokens,
502
+ total_tokens: data.usage.total_tokens,
503
+ }
504
+ : undefined,
505
+ response_metadata: {
506
+ model_name: data.model,
507
+ system_fingerprint: data.system_fingerprint,
508
+ finish_reason: choice.finish_reason,
509
+ },
510
+ });
511
+ }
512
+ _getClientOptions(options) {
513
+ if (!this.client) {
514
+ const openAIEndpointConfig = {
515
+ baseURL: this.clientConfig.baseURL,
516
+ };
517
+ const endpoint = getEndpoint(openAIEndpointConfig);
518
+ const params = {
519
+ ...this.clientConfig,
520
+ baseURL: endpoint,
521
+ timeout: this.timeout,
522
+ maxRetries: 0,
523
+ };
524
+ if (params.baseURL == null) {
525
+ delete params.baseURL;
526
+ }
527
+ this.client = new CustomOpenAIClient(params);
528
+ }
529
+ const requestOptions = {
530
+ ...this.clientConfig,
531
+ ...options,
532
+ };
533
+ return requestOptions;
534
+ }
535
+ async *_streamResponseChunks(messages, options, runManager) {
536
+ const messagesMapped = _convertMessagesToOpenAIParams(messages, this.model, {
537
+ includeReasoningContent: true,
538
+ });
539
+ const params = {
540
+ ...this.invocationParams(options, {
541
+ streaming: true,
542
+ }),
543
+ messages: messagesMapped,
544
+ stream: true,
545
+ };
546
+ let defaultRole;
547
+ const streamIterable = await this.completionWithRetry(params, options);
548
+ let usage;
549
+ for await (const data of streamIterable) {
550
+ const choice = data.choices[0];
551
+ if (data.usage) {
552
+ usage = data.usage;
553
+ }
554
+ if (!choice) {
555
+ continue;
556
+ }
557
+ const { delta } = choice;
558
+ if (!delta) {
559
+ continue;
560
+ }
561
+ const chunk = this._convertOpenAIDeltaToBaseMessageChunk(delta, data, defaultRole);
562
+ if ('reasoning_content' in delta) {
563
+ chunk.additional_kwargs.reasoning_content = delta.reasoning_content;
564
+ }
565
+ defaultRole = delta.role ?? defaultRole;
566
+ const newTokenIndices = {
567
+ prompt: options.promptIndex ?? 0,
568
+ completion: choice.index ?? 0,
569
+ };
570
+ if (typeof chunk.content !== 'string') {
571
+ // eslint-disable-next-line no-console
572
+ console.log('[WARNING]: Received non-string content from OpenAI. This is currently not supported.');
573
+ continue;
574
+ }
575
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
576
+ const generationInfo = { ...newTokenIndices };
577
+ if (choice.finish_reason != null) {
578
+ generationInfo.finish_reason = choice.finish_reason;
579
+ generationInfo.system_fingerprint = data.system_fingerprint;
580
+ generationInfo.model_name = data.model;
581
+ generationInfo.service_tier = data.service_tier;
582
+ }
583
+ if (this.logprobs == true) {
584
+ generationInfo.logprobs = choice.logprobs;
585
+ }
586
+ const generationChunk = new ChatGenerationChunk({
587
+ message: chunk,
588
+ text: chunk.content,
589
+ generationInfo,
590
+ });
591
+ yield generationChunk;
592
+ await runManager?.handleLLMNewToken(generationChunk.text || '', newTokenIndices, undefined, undefined, undefined, { chunk: generationChunk });
593
+ }
594
+ if (usage) {
595
+ const inputTokenDetails = {
596
+ ...(usage.prompt_tokens_details?.audio_tokens != null && {
597
+ audio: usage.prompt_tokens_details.audio_tokens,
598
+ }),
599
+ ...(usage.prompt_tokens_details?.cached_tokens != null && {
600
+ cache_read: usage.prompt_tokens_details.cached_tokens,
601
+ }),
602
+ };
603
+ const outputTokenDetails = {
604
+ ...(usage.completion_tokens_details?.audio_tokens != null && {
605
+ audio: usage.completion_tokens_details.audio_tokens,
606
+ }),
607
+ ...(usage.completion_tokens_details?.reasoning_tokens != null && {
608
+ reasoning: usage.completion_tokens_details.reasoning_tokens,
609
+ }),
610
+ };
611
+ const generationChunk = new ChatGenerationChunk({
612
+ message: new AIMessageChunk({
613
+ content: '',
614
+ response_metadata: {
615
+ usage: { ...usage },
616
+ },
617
+ usage_metadata: {
618
+ input_tokens: usage.prompt_tokens,
619
+ output_tokens: usage.completion_tokens,
620
+ total_tokens: usage.total_tokens,
621
+ ...(Object.keys(inputTokenDetails).length > 0 && {
622
+ input_token_details: inputTokenDetails,
623
+ }),
624
+ ...(Object.keys(outputTokenDetails).length > 0 && {
625
+ output_token_details: outputTokenDetails,
626
+ }),
627
+ },
628
+ }),
629
+ text: '',
630
+ });
631
+ yield generationChunk;
632
+ }
633
+ if (options.signal?.aborted === true) {
634
+ throw new Error('AbortError');
635
+ }
636
+ }
637
+ }
638
+ class ChatMoonshot extends ChatOpenAI {
639
+ static lc_name() {
640
+ return 'LibreChatMoonshot';
641
+ }
642
+ _convertMessages(messages) {
643
+ return _convertMessagesToOpenAIParams(messages, this.model, {
644
+ includeReasoningContent: true,
645
+ });
646
+ }
647
+ async _generate(messages, options, runManager) {
648
+ const params = this.invocationParams(options);
649
+ if (params.stream === true) {
650
+ return super._generate(messages, options, runManager);
651
+ }
652
+ const messagesMapped = this._convertMessages(messages);
653
+ const data = await this.completionWithRetry({
654
+ ...params,
655
+ stream: false,
656
+ messages: messagesMapped,
657
+ }, {
658
+ signal: options.signal,
659
+ ...options.options,
660
+ });
661
+ const { completion_tokens, prompt_tokens, total_tokens } = data.usage ?? {};
662
+ const generations = [];
663
+ for (const part of data.choices ?? []) {
664
+ const text = part.message.content ?? '';
665
+ const generation = {
666
+ text: typeof text === 'string' ? text : '',
667
+ message: this._convertResponseToMessage(part, data),
668
+ };
669
+ generation.generationInfo = {
670
+ ...(part.finish_reason ? { finish_reason: part.finish_reason } : {}),
671
+ ...(part.logprobs ? { logprobs: part.logprobs } : {}),
672
+ };
673
+ generations.push(generation);
674
+ }
675
+ return {
676
+ generations,
677
+ llmOutput: {
678
+ tokenUsage: {
679
+ completionTokens: completion_tokens,
680
+ promptTokens: prompt_tokens,
681
+ totalTokens: total_tokens,
682
+ },
683
+ },
684
+ };
685
+ }
686
+ _convertResponseToMessage(choice, data) {
687
+ const { message } = choice;
688
+ const rawToolCalls = message.tool_calls;
689
+ const toolCalls = rawToolCalls?.map((tc) => ({
690
+ id: tc.id,
691
+ name: tc.function.name,
692
+ args: JSON.parse(tc.function.arguments || '{}'),
693
+ type: 'tool_call',
694
+ }));
695
+ const additional_kwargs = {};
696
+ if (rawToolCalls) {
697
+ additional_kwargs.tool_calls = rawToolCalls;
698
+ }
699
+ if ('reasoning_content' in message &&
700
+ message.reasoning_content != null &&
701
+ message.reasoning_content !== '') {
702
+ additional_kwargs.reasoning_content = message.reasoning_content;
703
+ }
704
+ return new AIMessage({
705
+ content: message.content ?? '',
706
+ tool_calls: toolCalls,
707
+ additional_kwargs,
708
+ usage_metadata: data.usage
709
+ ? {
710
+ input_tokens: data.usage.prompt_tokens,
711
+ output_tokens: data.usage.completion_tokens,
712
+ total_tokens: data.usage.total_tokens,
713
+ }
714
+ : undefined,
715
+ response_metadata: {
716
+ model_name: data.model,
717
+ system_fingerprint: data.system_fingerprint,
718
+ finish_reason: choice.finish_reason,
719
+ },
720
+ });
721
+ }
722
+ async *_streamResponseChunks(messages, options, runManager) {
723
+ const messagesMapped = _convertMessagesToOpenAIParams(messages, this.model, {
724
+ includeReasoningContent: true,
725
+ });
726
+ const params = {
727
+ ...this.invocationParams(options, {
728
+ streaming: true,
729
+ }),
730
+ messages: messagesMapped,
731
+ stream: true,
732
+ };
733
+ let defaultRole;
734
+ const streamIterable = await this.completionWithRetry(params, options);
735
+ let usage;
736
+ for await (const data of streamIterable) {
737
+ const choice = data.choices[0];
738
+ if (data.usage) {
739
+ usage = data.usage;
740
+ }
741
+ if (!choice) {
742
+ continue;
743
+ }
744
+ const { delta } = choice;
745
+ if (!delta) {
746
+ continue;
747
+ }
748
+ const chunk = this._convertOpenAIDeltaToBaseMessageChunk(delta, data, defaultRole);
749
+ if ('reasoning_content' in delta) {
750
+ chunk.additional_kwargs.reasoning_content = delta.reasoning_content;
751
+ }
752
+ defaultRole = delta.role ?? defaultRole;
753
+ const newTokenIndices = {
754
+ prompt: options.promptIndex ?? 0,
755
+ completion: choice.index ?? 0,
756
+ };
757
+ if (typeof chunk.content !== 'string') {
758
+ // eslint-disable-next-line no-console
759
+ console.log('[WARNING]: Received non-string content from OpenAI. This is currently not supported.');
760
+ continue;
761
+ }
762
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
763
+ const generationInfo = { ...newTokenIndices };
764
+ if (choice.finish_reason != null) {
765
+ generationInfo.finish_reason = choice.finish_reason;
766
+ generationInfo.system_fingerprint = data.system_fingerprint;
767
+ generationInfo.model_name = data.model;
768
+ generationInfo.service_tier = data.service_tier;
769
+ }
770
+ if (this.logprobs == true) {
771
+ generationInfo.logprobs = choice.logprobs;
772
+ }
773
+ const generationChunk = new ChatGenerationChunk({
774
+ message: chunk,
775
+ text: chunk.content,
776
+ generationInfo,
777
+ });
778
+ yield generationChunk;
779
+ if (this._lc_stream_delay != null) {
780
+ await sleep(this._lc_stream_delay);
781
+ }
782
+ await runManager?.handleLLMNewToken(generationChunk.text || '', newTokenIndices, undefined, undefined, undefined, { chunk: generationChunk });
783
+ }
784
+ if (usage) {
785
+ const inputTokenDetails = {
786
+ ...(usage.prompt_tokens_details?.audio_tokens != null && {
787
+ audio: usage.prompt_tokens_details.audio_tokens,
788
+ }),
789
+ ...(usage.prompt_tokens_details?.cached_tokens != null && {
790
+ cache_read: usage.prompt_tokens_details.cached_tokens,
791
+ }),
792
+ };
793
+ const outputTokenDetails = {
794
+ ...(usage.completion_tokens_details?.audio_tokens != null && {
795
+ audio: usage.completion_tokens_details.audio_tokens,
796
+ }),
797
+ ...(usage.completion_tokens_details?.reasoning_tokens != null && {
798
+ reasoning: usage.completion_tokens_details.reasoning_tokens,
799
+ }),
800
+ };
801
+ const generationChunk = new ChatGenerationChunk({
802
+ message: new AIMessageChunk({
803
+ content: '',
804
+ response_metadata: {
805
+ usage: { ...usage },
806
+ },
807
+ usage_metadata: {
808
+ input_tokens: usage.prompt_tokens,
809
+ output_tokens: usage.completion_tokens,
810
+ total_tokens: usage.total_tokens,
811
+ ...(Object.keys(inputTokenDetails).length > 0 && {
812
+ input_token_details: inputTokenDetails,
813
+ }),
814
+ ...(Object.keys(outputTokenDetails).length > 0 && {
815
+ output_token_details: outputTokenDetails,
816
+ }),
817
+ },
818
+ }),
819
+ text: '',
820
+ });
821
+ yield generationChunk;
822
+ if (this._lc_stream_delay != null) {
823
+ await sleep(this._lc_stream_delay);
824
+ }
825
+ }
826
+ if (options.signal?.aborted === true) {
827
+ throw new Error('AbortError');
828
+ }
829
+ }
830
+ }
831
+ class ChatXAI extends ChatXAI$1 {
832
+ _lc_stream_delay;
833
+ constructor(fields) {
834
+ super(fields);
835
+ this._lc_stream_delay = fields?._lc_stream_delay;
836
+ const customBaseURL = fields?.configuration?.baseURL ?? fields?.clientConfig?.baseURL;
837
+ if (customBaseURL != null && customBaseURL) {
838
+ this.clientConfig = {
839
+ ...this.clientConfig,
840
+ baseURL: customBaseURL,
841
+ };
842
+ // Reset the client to force recreation with new config
843
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
844
+ this.client = undefined;
845
+ }
846
+ }
847
+ static lc_name() {
848
+ return 'IllumaXAI';
849
+ }
850
+ get exposedClient() {
851
+ return this.client;
852
+ }
853
+ _getClientOptions(options) {
854
+ if (!this.client) {
855
+ const openAIEndpointConfig = {
856
+ baseURL: this.clientConfig.baseURL,
857
+ };
858
+ const endpoint = getEndpoint(openAIEndpointConfig);
859
+ const params = {
860
+ ...this.clientConfig,
861
+ baseURL: endpoint,
862
+ timeout: this.timeout,
863
+ maxRetries: 0,
864
+ };
865
+ if (params.baseURL == null) {
866
+ delete params.baseURL;
867
+ }
868
+ this.client = new CustomOpenAIClient(params);
869
+ }
870
+ const requestOptions = {
871
+ ...this.clientConfig,
872
+ ...options,
873
+ };
874
+ return requestOptions;
875
+ }
876
+ async *_streamResponseChunks(messages, options, runManager) {
877
+ const messagesMapped = _convertMessagesToOpenAIParams(messages, this.model);
878
+ const params = {
879
+ ...this.invocationParams(options, {
880
+ streaming: true,
881
+ }),
882
+ messages: messagesMapped,
883
+ stream: true,
884
+ };
885
+ let defaultRole;
886
+ const streamIterable = await this.completionWithRetry(params, options);
887
+ let usage;
888
+ for await (const data of streamIterable) {
889
+ const choice = data.choices[0];
890
+ if (data.usage) {
891
+ usage = data.usage;
892
+ }
893
+ if (!choice) {
894
+ continue;
895
+ }
896
+ const { delta } = choice;
897
+ if (!delta) {
898
+ continue;
899
+ }
900
+ const chunk = this._convertOpenAIDeltaToBaseMessageChunk(delta, data, defaultRole);
901
+ if (chunk.usage_metadata != null) {
902
+ chunk.usage_metadata = {
903
+ input_tokens: chunk.usage_metadata.input_tokens ?? 0,
904
+ output_tokens: chunk.usage_metadata.output_tokens ?? 0,
905
+ total_tokens: chunk.usage_metadata.total_tokens ?? 0,
906
+ };
907
+ }
908
+ if ('reasoning_content' in delta) {
909
+ chunk.additional_kwargs.reasoning_content = delta.reasoning_content;
910
+ }
911
+ defaultRole = delta.role ?? defaultRole;
912
+ const newTokenIndices = {
913
+ prompt: options.promptIndex ?? 0,
914
+ completion: choice.index ?? 0,
915
+ };
916
+ if (typeof chunk.content !== 'string') {
917
+ // eslint-disable-next-line no-console
918
+ console.log('[WARNING]: Received non-string content from OpenAI. This is currently not supported.');
919
+ continue;
920
+ }
921
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
922
+ const generationInfo = { ...newTokenIndices };
923
+ if (choice.finish_reason != null) {
924
+ generationInfo.finish_reason = choice.finish_reason;
925
+ // Only include system fingerprint in the last chunk for now
926
+ // to avoid concatenation issues
927
+ generationInfo.system_fingerprint = data.system_fingerprint;
928
+ generationInfo.model_name = data.model;
929
+ generationInfo.service_tier = data.service_tier;
930
+ }
931
+ if (this.logprobs == true) {
932
+ generationInfo.logprobs = choice.logprobs;
933
+ }
934
+ const generationChunk = new ChatGenerationChunk({
935
+ message: chunk,
936
+ text: chunk.content,
937
+ generationInfo,
938
+ });
939
+ yield generationChunk;
940
+ if (this._lc_stream_delay != null) {
941
+ await sleep(this._lc_stream_delay);
942
+ }
943
+ await runManager?.handleLLMNewToken(generationChunk.text || '', newTokenIndices, undefined, undefined, undefined, { chunk: generationChunk });
944
+ }
945
+ if (usage) {
946
+ // Type assertion for xAI-specific usage structure
947
+ const xaiUsage = usage;
948
+ const inputTokenDetails = {
949
+ // Standard OpenAI fields
950
+ ...(usage.prompt_tokens_details?.audio_tokens != null && {
951
+ audio: usage.prompt_tokens_details.audio_tokens,
952
+ }),
953
+ ...(usage.prompt_tokens_details?.cached_tokens != null && {
954
+ cache_read: usage.prompt_tokens_details.cached_tokens,
955
+ }),
956
+ // Add xAI-specific prompt token details if they exist
957
+ ...(xaiUsage.prompt_tokens_details?.text_tokens != null && {
958
+ text: xaiUsage.prompt_tokens_details.text_tokens,
959
+ }),
960
+ ...(xaiUsage.prompt_tokens_details?.image_tokens != null && {
961
+ image: xaiUsage.prompt_tokens_details.image_tokens,
962
+ }),
963
+ };
964
+ const outputTokenDetails = {
965
+ // Standard OpenAI fields
966
+ ...(usage.completion_tokens_details?.audio_tokens != null && {
967
+ audio: usage.completion_tokens_details.audio_tokens,
968
+ }),
969
+ ...(usage.completion_tokens_details?.reasoning_tokens != null && {
970
+ reasoning: usage.completion_tokens_details.reasoning_tokens,
971
+ }),
972
+ // Add xAI-specific completion token details if they exist
973
+ ...(xaiUsage.completion_tokens_details?.accepted_prediction_tokens !=
974
+ null && {
975
+ accepted_prediction: xaiUsage.completion_tokens_details.accepted_prediction_tokens,
976
+ }),
977
+ ...(xaiUsage.completion_tokens_details?.rejected_prediction_tokens !=
978
+ null && {
979
+ rejected_prediction: xaiUsage.completion_tokens_details.rejected_prediction_tokens,
980
+ }),
981
+ };
982
+ const generationChunk = new ChatGenerationChunk({
983
+ message: new AIMessageChunk({
984
+ content: '',
985
+ response_metadata: {
986
+ usage: { ...usage },
987
+ // Include xAI-specific metadata if it exists
988
+ ...(xaiUsage.num_sources_used != null && {
989
+ num_sources_used: xaiUsage.num_sources_used,
990
+ }),
991
+ },
992
+ usage_metadata: {
993
+ input_tokens: usage.prompt_tokens,
994
+ output_tokens: usage.completion_tokens,
995
+ total_tokens: usage.total_tokens,
996
+ ...(Object.keys(inputTokenDetails).length > 0 && {
997
+ input_token_details: inputTokenDetails,
998
+ }),
999
+ ...(Object.keys(outputTokenDetails).length > 0 && {
1000
+ output_token_details: outputTokenDetails,
1001
+ }),
1002
+ },
1003
+ }),
1004
+ text: '',
1005
+ });
1006
+ yield generationChunk;
1007
+ if (this._lc_stream_delay != null) {
1008
+ await sleep(this._lc_stream_delay);
1009
+ }
1010
+ }
1011
+ if (options.signal?.aborted === true) {
1012
+ throw new Error('AbortError');
1013
+ }
1014
+ }
1015
+ }
1016
+
1017
+ export { AzureChatOpenAI, ChatDeepSeek, ChatMoonshot, ChatOpenAI, ChatXAI, CustomAzureOpenAIClient, CustomOpenAIClient, isHeaders, normalizeHeaders };
1018
+ //# sourceMappingURL=index.mjs.map