@illuma-ai/agents 1.0.81

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (558) hide show
  1. package/README.md +485 -0
  2. package/dist/cjs/agents/AgentContext.cjs +734 -0
  3. package/dist/cjs/agents/AgentContext.cjs.map +1 -0
  4. package/dist/cjs/common/enum.cjs +190 -0
  5. package/dist/cjs/common/enum.cjs.map +1 -0
  6. package/dist/cjs/events.cjs +172 -0
  7. package/dist/cjs/events.cjs.map +1 -0
  8. package/dist/cjs/graphs/Graph.cjs +1615 -0
  9. package/dist/cjs/graphs/Graph.cjs.map +1 -0
  10. package/dist/cjs/graphs/MultiAgentGraph.cjs +890 -0
  11. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -0
  12. package/dist/cjs/instrumentation.cjs +21 -0
  13. package/dist/cjs/instrumentation.cjs.map +1 -0
  14. package/dist/cjs/llm/anthropic/index.cjs +292 -0
  15. package/dist/cjs/llm/anthropic/index.cjs.map +1 -0
  16. package/dist/cjs/llm/anthropic/types.cjs +50 -0
  17. package/dist/cjs/llm/anthropic/types.cjs.map +1 -0
  18. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +630 -0
  19. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -0
  20. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +218 -0
  21. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -0
  22. package/dist/cjs/llm/anthropic/utils/tools.cjs +29 -0
  23. package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -0
  24. package/dist/cjs/llm/bedrock/index.cjs +282 -0
  25. package/dist/cjs/llm/bedrock/index.cjs.map +1 -0
  26. package/dist/cjs/llm/fake.cjs +97 -0
  27. package/dist/cjs/llm/fake.cjs.map +1 -0
  28. package/dist/cjs/llm/google/index.cjs +216 -0
  29. package/dist/cjs/llm/google/index.cjs.map +1 -0
  30. package/dist/cjs/llm/google/utils/common.cjs +647 -0
  31. package/dist/cjs/llm/google/utils/common.cjs.map +1 -0
  32. package/dist/cjs/llm/openai/index.cjs +1028 -0
  33. package/dist/cjs/llm/openai/index.cjs.map +1 -0
  34. package/dist/cjs/llm/openai/utils/index.cjs +765 -0
  35. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -0
  36. package/dist/cjs/llm/openrouter/index.cjs +212 -0
  37. package/dist/cjs/llm/openrouter/index.cjs.map +1 -0
  38. package/dist/cjs/llm/providers.cjs +43 -0
  39. package/dist/cjs/llm/providers.cjs.map +1 -0
  40. package/dist/cjs/llm/text.cjs +69 -0
  41. package/dist/cjs/llm/text.cjs.map +1 -0
  42. package/dist/cjs/llm/vertexai/index.cjs +329 -0
  43. package/dist/cjs/llm/vertexai/index.cjs.map +1 -0
  44. package/dist/cjs/main.cjs +240 -0
  45. package/dist/cjs/main.cjs.map +1 -0
  46. package/dist/cjs/messages/cache.cjs +387 -0
  47. package/dist/cjs/messages/cache.cjs.map +1 -0
  48. package/dist/cjs/messages/content.cjs +53 -0
  49. package/dist/cjs/messages/content.cjs.map +1 -0
  50. package/dist/cjs/messages/core.cjs +367 -0
  51. package/dist/cjs/messages/core.cjs.map +1 -0
  52. package/dist/cjs/messages/format.cjs +761 -0
  53. package/dist/cjs/messages/format.cjs.map +1 -0
  54. package/dist/cjs/messages/ids.cjs +23 -0
  55. package/dist/cjs/messages/ids.cjs.map +1 -0
  56. package/dist/cjs/messages/prune.cjs +398 -0
  57. package/dist/cjs/messages/prune.cjs.map +1 -0
  58. package/dist/cjs/messages/tools.cjs +96 -0
  59. package/dist/cjs/messages/tools.cjs.map +1 -0
  60. package/dist/cjs/run.cjs +328 -0
  61. package/dist/cjs/run.cjs.map +1 -0
  62. package/dist/cjs/schemas/validate.cjs +324 -0
  63. package/dist/cjs/schemas/validate.cjs.map +1 -0
  64. package/dist/cjs/splitStream.cjs +210 -0
  65. package/dist/cjs/splitStream.cjs.map +1 -0
  66. package/dist/cjs/stream.cjs +620 -0
  67. package/dist/cjs/stream.cjs.map +1 -0
  68. package/dist/cjs/tools/BrowserTools.cjs +248 -0
  69. package/dist/cjs/tools/BrowserTools.cjs.map +1 -0
  70. package/dist/cjs/tools/Calculator.cjs +66 -0
  71. package/dist/cjs/tools/Calculator.cjs.map +1 -0
  72. package/dist/cjs/tools/CodeExecutor.cjs +234 -0
  73. package/dist/cjs/tools/CodeExecutor.cjs.map +1 -0
  74. package/dist/cjs/tools/ProgrammaticToolCalling.cjs +636 -0
  75. package/dist/cjs/tools/ProgrammaticToolCalling.cjs.map +1 -0
  76. package/dist/cjs/tools/ToolNode.cjs +548 -0
  77. package/dist/cjs/tools/ToolNode.cjs.map +1 -0
  78. package/dist/cjs/tools/ToolSearch.cjs +909 -0
  79. package/dist/cjs/tools/ToolSearch.cjs.map +1 -0
  80. package/dist/cjs/tools/handlers.cjs +255 -0
  81. package/dist/cjs/tools/handlers.cjs.map +1 -0
  82. package/dist/cjs/tools/schema.cjs +31 -0
  83. package/dist/cjs/tools/schema.cjs.map +1 -0
  84. package/dist/cjs/tools/search/anthropic.cjs +40 -0
  85. package/dist/cjs/tools/search/anthropic.cjs.map +1 -0
  86. package/dist/cjs/tools/search/content.cjs +140 -0
  87. package/dist/cjs/tools/search/content.cjs.map +1 -0
  88. package/dist/cjs/tools/search/firecrawl.cjs +179 -0
  89. package/dist/cjs/tools/search/firecrawl.cjs.map +1 -0
  90. package/dist/cjs/tools/search/format.cjs +203 -0
  91. package/dist/cjs/tools/search/format.cjs.map +1 -0
  92. package/dist/cjs/tools/search/highlights.cjs +245 -0
  93. package/dist/cjs/tools/search/highlights.cjs.map +1 -0
  94. package/dist/cjs/tools/search/rerankers.cjs +174 -0
  95. package/dist/cjs/tools/search/rerankers.cjs.map +1 -0
  96. package/dist/cjs/tools/search/schema.cjs +117 -0
  97. package/dist/cjs/tools/search/schema.cjs.map +1 -0
  98. package/dist/cjs/tools/search/search.cjs +566 -0
  99. package/dist/cjs/tools/search/search.cjs.map +1 -0
  100. package/dist/cjs/tools/search/serper-scraper.cjs +132 -0
  101. package/dist/cjs/tools/search/serper-scraper.cjs.map +1 -0
  102. package/dist/cjs/tools/search/tool.cjs +456 -0
  103. package/dist/cjs/tools/search/tool.cjs.map +1 -0
  104. package/dist/cjs/tools/search/utils.cjs +66 -0
  105. package/dist/cjs/tools/search/utils.cjs.map +1 -0
  106. package/dist/cjs/types/graph.cjs +29 -0
  107. package/dist/cjs/types/graph.cjs.map +1 -0
  108. package/dist/cjs/utils/contextAnalytics.cjs +66 -0
  109. package/dist/cjs/utils/contextAnalytics.cjs.map +1 -0
  110. package/dist/cjs/utils/events.cjs +31 -0
  111. package/dist/cjs/utils/events.cjs.map +1 -0
  112. package/dist/cjs/utils/graph.cjs +16 -0
  113. package/dist/cjs/utils/graph.cjs.map +1 -0
  114. package/dist/cjs/utils/handlers.cjs +70 -0
  115. package/dist/cjs/utils/handlers.cjs.map +1 -0
  116. package/dist/cjs/utils/llm.cjs +27 -0
  117. package/dist/cjs/utils/llm.cjs.map +1 -0
  118. package/dist/cjs/utils/misc.cjs +56 -0
  119. package/dist/cjs/utils/misc.cjs.map +1 -0
  120. package/dist/cjs/utils/run.cjs +73 -0
  121. package/dist/cjs/utils/run.cjs.map +1 -0
  122. package/dist/cjs/utils/schema.cjs +27 -0
  123. package/dist/cjs/utils/schema.cjs.map +1 -0
  124. package/dist/cjs/utils/title.cjs +125 -0
  125. package/dist/cjs/utils/title.cjs.map +1 -0
  126. package/dist/cjs/utils/tokens.cjs +125 -0
  127. package/dist/cjs/utils/tokens.cjs.map +1 -0
  128. package/dist/cjs/utils/toonFormat.cjs +388 -0
  129. package/dist/cjs/utils/toonFormat.cjs.map +1 -0
  130. package/dist/esm/agents/AgentContext.mjs +732 -0
  131. package/dist/esm/agents/AgentContext.mjs.map +1 -0
  132. package/dist/esm/common/enum.mjs +190 -0
  133. package/dist/esm/common/enum.mjs.map +1 -0
  134. package/dist/esm/events.mjs +164 -0
  135. package/dist/esm/events.mjs.map +1 -0
  136. package/dist/esm/graphs/Graph.mjs +1612 -0
  137. package/dist/esm/graphs/Graph.mjs.map +1 -0
  138. package/dist/esm/graphs/MultiAgentGraph.mjs +888 -0
  139. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -0
  140. package/dist/esm/instrumentation.mjs +19 -0
  141. package/dist/esm/instrumentation.mjs.map +1 -0
  142. package/dist/esm/llm/anthropic/index.mjs +290 -0
  143. package/dist/esm/llm/anthropic/index.mjs.map +1 -0
  144. package/dist/esm/llm/anthropic/types.mjs +48 -0
  145. package/dist/esm/llm/anthropic/types.mjs.map +1 -0
  146. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +627 -0
  147. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -0
  148. package/dist/esm/llm/anthropic/utils/message_outputs.mjs +216 -0
  149. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -0
  150. package/dist/esm/llm/anthropic/utils/tools.mjs +27 -0
  151. package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -0
  152. package/dist/esm/llm/bedrock/index.mjs +280 -0
  153. package/dist/esm/llm/bedrock/index.mjs.map +1 -0
  154. package/dist/esm/llm/fake.mjs +94 -0
  155. package/dist/esm/llm/fake.mjs.map +1 -0
  156. package/dist/esm/llm/google/index.mjs +214 -0
  157. package/dist/esm/llm/google/index.mjs.map +1 -0
  158. package/dist/esm/llm/google/utils/common.mjs +638 -0
  159. package/dist/esm/llm/google/utils/common.mjs.map +1 -0
  160. package/dist/esm/llm/openai/index.mjs +1018 -0
  161. package/dist/esm/llm/openai/index.mjs.map +1 -0
  162. package/dist/esm/llm/openai/utils/index.mjs +759 -0
  163. package/dist/esm/llm/openai/utils/index.mjs.map +1 -0
  164. package/dist/esm/llm/openrouter/index.mjs +210 -0
  165. package/dist/esm/llm/openrouter/index.mjs.map +1 -0
  166. package/dist/esm/llm/providers.mjs +39 -0
  167. package/dist/esm/llm/providers.mjs.map +1 -0
  168. package/dist/esm/llm/text.mjs +67 -0
  169. package/dist/esm/llm/text.mjs.map +1 -0
  170. package/dist/esm/llm/vertexai/index.mjs +327 -0
  171. package/dist/esm/llm/vertexai/index.mjs.map +1 -0
  172. package/dist/esm/main.mjs +37 -0
  173. package/dist/esm/main.mjs.map +1 -0
  174. package/dist/esm/messages/cache.mjs +382 -0
  175. package/dist/esm/messages/cache.mjs.map +1 -0
  176. package/dist/esm/messages/content.mjs +51 -0
  177. package/dist/esm/messages/content.mjs.map +1 -0
  178. package/dist/esm/messages/core.mjs +359 -0
  179. package/dist/esm/messages/core.mjs.map +1 -0
  180. package/dist/esm/messages/format.mjs +752 -0
  181. package/dist/esm/messages/format.mjs.map +1 -0
  182. package/dist/esm/messages/ids.mjs +21 -0
  183. package/dist/esm/messages/ids.mjs.map +1 -0
  184. package/dist/esm/messages/prune.mjs +393 -0
  185. package/dist/esm/messages/prune.mjs.map +1 -0
  186. package/dist/esm/messages/tools.mjs +93 -0
  187. package/dist/esm/messages/tools.mjs.map +1 -0
  188. package/dist/esm/run.mjs +325 -0
  189. package/dist/esm/run.mjs.map +1 -0
  190. package/dist/esm/schemas/validate.mjs +317 -0
  191. package/dist/esm/schemas/validate.mjs.map +1 -0
  192. package/dist/esm/splitStream.mjs +207 -0
  193. package/dist/esm/splitStream.mjs.map +1 -0
  194. package/dist/esm/stream.mjs +616 -0
  195. package/dist/esm/stream.mjs.map +1 -0
  196. package/dist/esm/tools/BrowserTools.mjs +244 -0
  197. package/dist/esm/tools/BrowserTools.mjs.map +1 -0
  198. package/dist/esm/tools/Calculator.mjs +41 -0
  199. package/dist/esm/tools/Calculator.mjs.map +1 -0
  200. package/dist/esm/tools/CodeExecutor.mjs +226 -0
  201. package/dist/esm/tools/CodeExecutor.mjs.map +1 -0
  202. package/dist/esm/tools/ProgrammaticToolCalling.mjs +622 -0
  203. package/dist/esm/tools/ProgrammaticToolCalling.mjs.map +1 -0
  204. package/dist/esm/tools/ToolNode.mjs +545 -0
  205. package/dist/esm/tools/ToolNode.mjs.map +1 -0
  206. package/dist/esm/tools/ToolSearch.mjs +870 -0
  207. package/dist/esm/tools/ToolSearch.mjs.map +1 -0
  208. package/dist/esm/tools/handlers.mjs +250 -0
  209. package/dist/esm/tools/handlers.mjs.map +1 -0
  210. package/dist/esm/tools/schema.mjs +28 -0
  211. package/dist/esm/tools/schema.mjs.map +1 -0
  212. package/dist/esm/tools/search/anthropic.mjs +37 -0
  213. package/dist/esm/tools/search/anthropic.mjs.map +1 -0
  214. package/dist/esm/tools/search/content.mjs +119 -0
  215. package/dist/esm/tools/search/content.mjs.map +1 -0
  216. package/dist/esm/tools/search/firecrawl.mjs +176 -0
  217. package/dist/esm/tools/search/firecrawl.mjs.map +1 -0
  218. package/dist/esm/tools/search/format.mjs +201 -0
  219. package/dist/esm/tools/search/format.mjs.map +1 -0
  220. package/dist/esm/tools/search/highlights.mjs +243 -0
  221. package/dist/esm/tools/search/highlights.mjs.map +1 -0
  222. package/dist/esm/tools/search/rerankers.mjs +168 -0
  223. package/dist/esm/tools/search/rerankers.mjs.map +1 -0
  224. package/dist/esm/tools/search/schema.mjs +104 -0
  225. package/dist/esm/tools/search/schema.mjs.map +1 -0
  226. package/dist/esm/tools/search/search.mjs +563 -0
  227. package/dist/esm/tools/search/search.mjs.map +1 -0
  228. package/dist/esm/tools/search/serper-scraper.mjs +129 -0
  229. package/dist/esm/tools/search/serper-scraper.mjs.map +1 -0
  230. package/dist/esm/tools/search/tool.mjs +454 -0
  231. package/dist/esm/tools/search/tool.mjs.map +1 -0
  232. package/dist/esm/tools/search/utils.mjs +61 -0
  233. package/dist/esm/tools/search/utils.mjs.map +1 -0
  234. package/dist/esm/types/graph.mjs +26 -0
  235. package/dist/esm/types/graph.mjs.map +1 -0
  236. package/dist/esm/utils/contextAnalytics.mjs +64 -0
  237. package/dist/esm/utils/contextAnalytics.mjs.map +1 -0
  238. package/dist/esm/utils/events.mjs +29 -0
  239. package/dist/esm/utils/events.mjs.map +1 -0
  240. package/dist/esm/utils/graph.mjs +13 -0
  241. package/dist/esm/utils/graph.mjs.map +1 -0
  242. package/dist/esm/utils/handlers.mjs +68 -0
  243. package/dist/esm/utils/handlers.mjs.map +1 -0
  244. package/dist/esm/utils/llm.mjs +24 -0
  245. package/dist/esm/utils/llm.mjs.map +1 -0
  246. package/dist/esm/utils/misc.mjs +53 -0
  247. package/dist/esm/utils/misc.mjs.map +1 -0
  248. package/dist/esm/utils/run.mjs +70 -0
  249. package/dist/esm/utils/run.mjs.map +1 -0
  250. package/dist/esm/utils/schema.mjs +24 -0
  251. package/dist/esm/utils/schema.mjs.map +1 -0
  252. package/dist/esm/utils/title.mjs +122 -0
  253. package/dist/esm/utils/title.mjs.map +1 -0
  254. package/dist/esm/utils/tokens.mjs +121 -0
  255. package/dist/esm/utils/tokens.mjs.map +1 -0
  256. package/dist/esm/utils/toonFormat.mjs +381 -0
  257. package/dist/esm/utils/toonFormat.mjs.map +1 -0
  258. package/dist/types/agents/AgentContext.d.ts +293 -0
  259. package/dist/types/common/enum.d.ts +155 -0
  260. package/dist/types/common/index.d.ts +1 -0
  261. package/dist/types/events.d.ts +31 -0
  262. package/dist/types/graphs/Graph.d.ts +216 -0
  263. package/dist/types/graphs/MultiAgentGraph.d.ts +104 -0
  264. package/dist/types/graphs/index.d.ts +2 -0
  265. package/dist/types/index.d.ts +21 -0
  266. package/dist/types/instrumentation.d.ts +1 -0
  267. package/dist/types/llm/anthropic/index.d.ts +39 -0
  268. package/dist/types/llm/anthropic/types.d.ts +37 -0
  269. package/dist/types/llm/anthropic/utils/message_inputs.d.ts +14 -0
  270. package/dist/types/llm/anthropic/utils/message_outputs.d.ts +14 -0
  271. package/dist/types/llm/anthropic/utils/output_parsers.d.ts +22 -0
  272. package/dist/types/llm/anthropic/utils/tools.d.ts +3 -0
  273. package/dist/types/llm/bedrock/index.d.ts +141 -0
  274. package/dist/types/llm/bedrock/types.d.ts +27 -0
  275. package/dist/types/llm/bedrock/utils/index.d.ts +5 -0
  276. package/dist/types/llm/bedrock/utils/message_inputs.d.ts +31 -0
  277. package/dist/types/llm/bedrock/utils/message_outputs.d.ts +33 -0
  278. package/dist/types/llm/fake.d.ts +31 -0
  279. package/dist/types/llm/google/index.d.ts +24 -0
  280. package/dist/types/llm/google/types.d.ts +42 -0
  281. package/dist/types/llm/google/utils/common.d.ts +34 -0
  282. package/dist/types/llm/google/utils/tools.d.ts +10 -0
  283. package/dist/types/llm/google/utils/zod_to_genai_parameters.d.ts +14 -0
  284. package/dist/types/llm/openai/index.d.ts +127 -0
  285. package/dist/types/llm/openai/types.d.ts +10 -0
  286. package/dist/types/llm/openai/utils/index.d.ts +29 -0
  287. package/dist/types/llm/openrouter/index.d.ts +15 -0
  288. package/dist/types/llm/providers.d.ts +5 -0
  289. package/dist/types/llm/text.d.ts +21 -0
  290. package/dist/types/llm/vertexai/index.d.ts +293 -0
  291. package/dist/types/messages/cache.d.ts +54 -0
  292. package/dist/types/messages/content.d.ts +7 -0
  293. package/dist/types/messages/core.d.ts +14 -0
  294. package/dist/types/messages/format.d.ts +137 -0
  295. package/dist/types/messages/ids.d.ts +3 -0
  296. package/dist/types/messages/index.d.ts +7 -0
  297. package/dist/types/messages/prune.d.ts +52 -0
  298. package/dist/types/messages/reducer.d.ts +9 -0
  299. package/dist/types/messages/tools.d.ts +17 -0
  300. package/dist/types/mockStream.d.ts +32 -0
  301. package/dist/types/prompts/collab.d.ts +1 -0
  302. package/dist/types/prompts/index.d.ts +2 -0
  303. package/dist/types/prompts/taskmanager.d.ts +41 -0
  304. package/dist/types/run.d.ts +41 -0
  305. package/dist/types/schemas/index.d.ts +1 -0
  306. package/dist/types/schemas/validate.d.ts +59 -0
  307. package/dist/types/splitStream.d.ts +37 -0
  308. package/dist/types/stream.d.ts +15 -0
  309. package/dist/types/test/mockTools.d.ts +28 -0
  310. package/dist/types/tools/BrowserTools.d.ts +87 -0
  311. package/dist/types/tools/Calculator.d.ts +34 -0
  312. package/dist/types/tools/CodeExecutor.d.ts +57 -0
  313. package/dist/types/tools/ProgrammaticToolCalling.d.ts +138 -0
  314. package/dist/types/tools/ToolNode.d.ts +51 -0
  315. package/dist/types/tools/ToolSearch.d.ts +219 -0
  316. package/dist/types/tools/handlers.d.ts +22 -0
  317. package/dist/types/tools/schema.d.ts +12 -0
  318. package/dist/types/tools/search/anthropic.d.ts +16 -0
  319. package/dist/types/tools/search/content.d.ts +4 -0
  320. package/dist/types/tools/search/firecrawl.d.ts +54 -0
  321. package/dist/types/tools/search/format.d.ts +5 -0
  322. package/dist/types/tools/search/highlights.d.ts +13 -0
  323. package/dist/types/tools/search/index.d.ts +3 -0
  324. package/dist/types/tools/search/rerankers.d.ts +38 -0
  325. package/dist/types/tools/search/schema.d.ts +103 -0
  326. package/dist/types/tools/search/search.d.ts +8 -0
  327. package/dist/types/tools/search/serper-scraper.d.ts +59 -0
  328. package/dist/types/tools/search/test.d.ts +1 -0
  329. package/dist/types/tools/search/tool.d.ts +3 -0
  330. package/dist/types/tools/search/types.d.ts +575 -0
  331. package/dist/types/tools/search/utils.d.ts +10 -0
  332. package/dist/types/types/graph.d.ts +399 -0
  333. package/dist/types/types/index.d.ts +5 -0
  334. package/dist/types/types/llm.d.ts +105 -0
  335. package/dist/types/types/messages.d.ts +4 -0
  336. package/dist/types/types/run.d.ts +112 -0
  337. package/dist/types/types/stream.d.ts +308 -0
  338. package/dist/types/types/tools.d.ts +296 -0
  339. package/dist/types/utils/contextAnalytics.d.ts +37 -0
  340. package/dist/types/utils/events.d.ts +6 -0
  341. package/dist/types/utils/graph.d.ts +2 -0
  342. package/dist/types/utils/handlers.d.ts +34 -0
  343. package/dist/types/utils/index.d.ts +9 -0
  344. package/dist/types/utils/llm.d.ts +3 -0
  345. package/dist/types/utils/llmConfig.d.ts +3 -0
  346. package/dist/types/utils/logging.d.ts +1 -0
  347. package/dist/types/utils/misc.d.ts +7 -0
  348. package/dist/types/utils/run.d.ts +27 -0
  349. package/dist/types/utils/schema.d.ts +8 -0
  350. package/dist/types/utils/title.d.ts +4 -0
  351. package/dist/types/utils/tokens.d.ts +28 -0
  352. package/dist/types/utils/toonFormat.d.ts +111 -0
  353. package/package.json +190 -0
  354. package/src/agents/AgentContext.test.ts +458 -0
  355. package/src/agents/AgentContext.ts +972 -0
  356. package/src/agents/__tests__/AgentContext.test.ts +805 -0
  357. package/src/agents/__tests__/resolveStructuredOutputMode.test.ts +137 -0
  358. package/src/common/enum.ts +203 -0
  359. package/src/common/index.ts +2 -0
  360. package/src/events.ts +223 -0
  361. package/src/graphs/Graph.ts +2228 -0
  362. package/src/graphs/MultiAgentGraph.ts +1063 -0
  363. package/src/graphs/__tests__/structured-output.integration.test.ts +809 -0
  364. package/src/graphs/__tests__/structured-output.test.ts +183 -0
  365. package/src/graphs/index.ts +2 -0
  366. package/src/index.ts +34 -0
  367. package/src/instrumentation.ts +22 -0
  368. package/src/llm/anthropic/Jacob_Lee_Resume_2023.pdf +0 -0
  369. package/src/llm/anthropic/index.ts +413 -0
  370. package/src/llm/anthropic/llm.spec.ts +1442 -0
  371. package/src/llm/anthropic/types.ts +140 -0
  372. package/src/llm/anthropic/utils/message_inputs.ts +757 -0
  373. package/src/llm/anthropic/utils/message_outputs.ts +289 -0
  374. package/src/llm/anthropic/utils/output_parsers.ts +133 -0
  375. package/src/llm/anthropic/utils/tools.ts +29 -0
  376. package/src/llm/bedrock/__tests__/bedrock-caching.test.ts +495 -0
  377. package/src/llm/bedrock/index.ts +411 -0
  378. package/src/llm/bedrock/llm.spec.ts +616 -0
  379. package/src/llm/bedrock/types.ts +51 -0
  380. package/src/llm/bedrock/utils/index.ts +18 -0
  381. package/src/llm/bedrock/utils/message_inputs.ts +563 -0
  382. package/src/llm/bedrock/utils/message_outputs.ts +310 -0
  383. package/src/llm/fake.ts +133 -0
  384. package/src/llm/google/data/gettysburg10.wav +0 -0
  385. package/src/llm/google/data/hotdog.jpg +0 -0
  386. package/src/llm/google/index.ts +337 -0
  387. package/src/llm/google/llm.spec.ts +934 -0
  388. package/src/llm/google/types.ts +56 -0
  389. package/src/llm/google/utils/common.ts +873 -0
  390. package/src/llm/google/utils/tools.ts +160 -0
  391. package/src/llm/google/utils/zod_to_genai_parameters.ts +86 -0
  392. package/src/llm/openai/index.ts +1366 -0
  393. package/src/llm/openai/types.ts +24 -0
  394. package/src/llm/openai/utils/index.ts +1035 -0
  395. package/src/llm/openai/utils/isReasoningModel.test.ts +90 -0
  396. package/src/llm/openrouter/index.ts +291 -0
  397. package/src/llm/providers.ts +52 -0
  398. package/src/llm/text.ts +94 -0
  399. package/src/llm/vertexai/index.ts +359 -0
  400. package/src/messages/__tests__/tools.test.ts +473 -0
  401. package/src/messages/cache.test.ts +1261 -0
  402. package/src/messages/cache.ts +518 -0
  403. package/src/messages/content.test.ts +362 -0
  404. package/src/messages/content.ts +63 -0
  405. package/src/messages/core.ts +473 -0
  406. package/src/messages/ensureThinkingBlock.test.ts +468 -0
  407. package/src/messages/format.ts +1029 -0
  408. package/src/messages/formatAgentMessages.test.ts +1513 -0
  409. package/src/messages/formatAgentMessages.tools.test.ts +419 -0
  410. package/src/messages/formatMessage.test.ts +693 -0
  411. package/src/messages/ids.ts +26 -0
  412. package/src/messages/index.ts +7 -0
  413. package/src/messages/labelContentByAgent.test.ts +887 -0
  414. package/src/messages/prune.ts +568 -0
  415. package/src/messages/reducer.ts +80 -0
  416. package/src/messages/shiftIndexTokenCountMap.test.ts +81 -0
  417. package/src/messages/tools.ts +108 -0
  418. package/src/mockStream.ts +99 -0
  419. package/src/prompts/collab.ts +6 -0
  420. package/src/prompts/index.ts +2 -0
  421. package/src/prompts/taskmanager.ts +61 -0
  422. package/src/run.ts +467 -0
  423. package/src/schemas/index.ts +2 -0
  424. package/src/schemas/schema-preparation.test.ts +500 -0
  425. package/src/schemas/validate.test.ts +358 -0
  426. package/src/schemas/validate.ts +454 -0
  427. package/src/scripts/abort.ts +157 -0
  428. package/src/scripts/ant_web_search.ts +158 -0
  429. package/src/scripts/ant_web_search_edge_case.ts +162 -0
  430. package/src/scripts/ant_web_search_error_edge_case.ts +148 -0
  431. package/src/scripts/args.ts +48 -0
  432. package/src/scripts/caching.ts +132 -0
  433. package/src/scripts/cli.ts +172 -0
  434. package/src/scripts/cli2.ts +133 -0
  435. package/src/scripts/cli3.ts +184 -0
  436. package/src/scripts/cli4.ts +191 -0
  437. package/src/scripts/cli5.ts +191 -0
  438. package/src/scripts/code_exec.ts +213 -0
  439. package/src/scripts/code_exec_files.ts +236 -0
  440. package/src/scripts/code_exec_multi_session.ts +241 -0
  441. package/src/scripts/code_exec_ptc.ts +334 -0
  442. package/src/scripts/code_exec_session.ts +282 -0
  443. package/src/scripts/code_exec_simple.ts +147 -0
  444. package/src/scripts/content.ts +138 -0
  445. package/src/scripts/empty_input.ts +137 -0
  446. package/src/scripts/handoff-test.ts +135 -0
  447. package/src/scripts/image.ts +178 -0
  448. package/src/scripts/memory.ts +97 -0
  449. package/src/scripts/multi-agent-chain.ts +331 -0
  450. package/src/scripts/multi-agent-conditional.ts +221 -0
  451. package/src/scripts/multi-agent-document-review-chain.ts +197 -0
  452. package/src/scripts/multi-agent-hybrid-flow.ts +310 -0
  453. package/src/scripts/multi-agent-parallel-start.ts +265 -0
  454. package/src/scripts/multi-agent-parallel.ts +394 -0
  455. package/src/scripts/multi-agent-sequence.ts +217 -0
  456. package/src/scripts/multi-agent-supervisor.ts +365 -0
  457. package/src/scripts/multi-agent-test.ts +186 -0
  458. package/src/scripts/parallel-asymmetric-tools-test.ts +274 -0
  459. package/src/scripts/parallel-full-metadata-test.ts +240 -0
  460. package/src/scripts/parallel-tools-test.ts +340 -0
  461. package/src/scripts/programmatic_exec.ts +396 -0
  462. package/src/scripts/programmatic_exec_agent.ts +231 -0
  463. package/src/scripts/search.ts +146 -0
  464. package/src/scripts/sequential-full-metadata-test.ts +197 -0
  465. package/src/scripts/simple.ts +225 -0
  466. package/src/scripts/single-agent-metadata-test.ts +198 -0
  467. package/src/scripts/stream.ts +140 -0
  468. package/src/scripts/test-custom-prompt-key.ts +145 -0
  469. package/src/scripts/test-handoff-input.ts +170 -0
  470. package/src/scripts/test-handoff-preamble.ts +277 -0
  471. package/src/scripts/test-multi-agent-list-handoff.ts +417 -0
  472. package/src/scripts/test-parallel-agent-labeling.ts +325 -0
  473. package/src/scripts/test-parallel-handoffs.ts +291 -0
  474. package/src/scripts/test-thinking-handoff-bedrock.ts +153 -0
  475. package/src/scripts/test-thinking-handoff.ts +155 -0
  476. package/src/scripts/test-tools-before-handoff.ts +226 -0
  477. package/src/scripts/test_code_api.ts +361 -0
  478. package/src/scripts/thinking-bedrock.ts +159 -0
  479. package/src/scripts/thinking.ts +171 -0
  480. package/src/scripts/tool_search.ts +162 -0
  481. package/src/scripts/tools.ts +177 -0
  482. package/src/specs/agent-handoffs.test.ts +888 -0
  483. package/src/specs/anthropic.simple.test.ts +387 -0
  484. package/src/specs/azure.simple.test.ts +364 -0
  485. package/src/specs/cache.simple.test.ts +396 -0
  486. package/src/specs/deepseek.simple.test.ts +283 -0
  487. package/src/specs/emergency-prune.test.ts +407 -0
  488. package/src/specs/moonshot.simple.test.ts +358 -0
  489. package/src/specs/openai.simple.test.ts +311 -0
  490. package/src/specs/openrouter.simple.test.ts +107 -0
  491. package/src/specs/prune.test.ts +901 -0
  492. package/src/specs/reasoning.test.ts +201 -0
  493. package/src/specs/spec.utils.ts +3 -0
  494. package/src/specs/thinking-handoff.test.ts +620 -0
  495. package/src/specs/thinking-prune.test.ts +703 -0
  496. package/src/specs/token-distribution-edge-case.test.ts +316 -0
  497. package/src/specs/token-memoization.test.ts +32 -0
  498. package/src/specs/tool-error.test.ts +198 -0
  499. package/src/splitStream.test.ts +691 -0
  500. package/src/splitStream.ts +234 -0
  501. package/src/stream.test.ts +94 -0
  502. package/src/stream.ts +801 -0
  503. package/src/test/mockTools.ts +386 -0
  504. package/src/tools/BrowserTools.ts +393 -0
  505. package/src/tools/Calculator.test.ts +278 -0
  506. package/src/tools/Calculator.ts +46 -0
  507. package/src/tools/CodeExecutor.ts +270 -0
  508. package/src/tools/ProgrammaticToolCalling.ts +785 -0
  509. package/src/tools/ToolNode.ts +674 -0
  510. package/src/tools/ToolSearch.ts +1095 -0
  511. package/src/tools/__tests__/BrowserTools.test.ts +265 -0
  512. package/src/tools/__tests__/ProgrammaticToolCalling.integration.test.ts +319 -0
  513. package/src/tools/__tests__/ProgrammaticToolCalling.test.ts +1006 -0
  514. package/src/tools/__tests__/ToolSearch.integration.test.ts +162 -0
  515. package/src/tools/__tests__/ToolSearch.test.ts +1003 -0
  516. package/src/tools/handlers.ts +363 -0
  517. package/src/tools/schema.ts +37 -0
  518. package/src/tools/search/anthropic.ts +51 -0
  519. package/src/tools/search/content.test.ts +173 -0
  520. package/src/tools/search/content.ts +147 -0
  521. package/src/tools/search/firecrawl.ts +210 -0
  522. package/src/tools/search/format.ts +250 -0
  523. package/src/tools/search/highlights.ts +320 -0
  524. package/src/tools/search/index.ts +3 -0
  525. package/src/tools/search/jina-reranker.test.ts +130 -0
  526. package/src/tools/search/output.md +2775 -0
  527. package/src/tools/search/rerankers.ts +242 -0
  528. package/src/tools/search/schema.ts +113 -0
  529. package/src/tools/search/search.ts +768 -0
  530. package/src/tools/search/serper-scraper.ts +155 -0
  531. package/src/tools/search/test.html +884 -0
  532. package/src/tools/search/test.md +643 -0
  533. package/src/tools/search/test.ts +159 -0
  534. package/src/tools/search/tool.ts +657 -0
  535. package/src/tools/search/types.ts +665 -0
  536. package/src/tools/search/utils.ts +79 -0
  537. package/src/types/graph.test.ts +218 -0
  538. package/src/types/graph.ts +533 -0
  539. package/src/types/index.ts +6 -0
  540. package/src/types/llm.ts +140 -0
  541. package/src/types/messages.ts +4 -0
  542. package/src/types/run.ts +128 -0
  543. package/src/types/stream.ts +417 -0
  544. package/src/types/tools.ts +355 -0
  545. package/src/utils/contextAnalytics.ts +103 -0
  546. package/src/utils/events.ts +32 -0
  547. package/src/utils/graph.ts +11 -0
  548. package/src/utils/handlers.ts +107 -0
  549. package/src/utils/index.ts +9 -0
  550. package/src/utils/llm.ts +26 -0
  551. package/src/utils/llmConfig.ts +208 -0
  552. package/src/utils/logging.ts +48 -0
  553. package/src/utils/misc.ts +57 -0
  554. package/src/utils/run.ts +106 -0
  555. package/src/utils/schema.ts +35 -0
  556. package/src/utils/title.ts +177 -0
  557. package/src/utils/tokens.ts +142 -0
  558. package/src/utils/toonFormat.ts +475 -0
@@ -0,0 +1,1366 @@
1
+ import { AzureOpenAI as AzureOpenAIClient } from 'openai';
2
+ import { ChatXAI as OriginalChatXAI } from '@langchain/xai';
3
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
4
+ import { AIMessage, AIMessageChunk } from '@langchain/core/messages';
5
+ import { ToolDefinition } from '@langchain/core/language_models/base';
6
+ import { isLangChainTool } from '@langchain/core/utils/function_calling';
7
+ import { ChatDeepSeek as OriginalChatDeepSeek } from '@langchain/deepseek';
8
+ import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
9
+ import {
10
+ getEndpoint,
11
+ OpenAIClient,
12
+ formatToOpenAITool,
13
+ ChatOpenAI as OriginalChatOpenAI,
14
+ AzureChatOpenAI as OriginalAzureChatOpenAI,
15
+ } from '@langchain/openai';
16
+ import type {
17
+ OpenAIChatCallOptions,
18
+ OpenAIRoleEnum,
19
+ HeaderValue,
20
+ HeadersLike,
21
+ } from './types';
22
+ import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
23
+ import type { BaseMessage, UsageMetadata } from '@langchain/core/messages';
24
+ import type { ChatResult, ChatGeneration } from '@langchain/core/outputs';
25
+ import type { ChatXAIInput } from '@langchain/xai';
26
+ import type * as t from '@langchain/openai';
27
+ import {
28
+ isReasoningModel,
29
+ _convertMessagesToOpenAIParams,
30
+ _convertMessagesToOpenAIResponsesParams,
31
+ _convertOpenAIResponsesDeltaToBaseMessageChunk,
32
+ type ResponseReturnStreamEvents,
33
+ } from './utils';
34
+ import { sleep } from '@/utils';
35
+
36
+ // eslint-disable-next-line @typescript-eslint/explicit-function-return-type
37
+ const iife = <T>(fn: () => T) => fn();
38
+
39
+ export function isHeaders(headers: unknown): headers is Headers {
40
+ return (
41
+ typeof Headers !== 'undefined' &&
42
+ headers !== null &&
43
+ typeof headers === 'object' &&
44
+ Object.prototype.toString.call(headers) === '[object Headers]'
45
+ );
46
+ }
47
+
48
+ export function normalizeHeaders(
49
+ headers: HeadersLike
50
+ ): Record<string, HeaderValue | readonly HeaderValue[]> {
51
+ const output = iife(() => {
52
+ // If headers is a Headers instance
53
+ if (isHeaders(headers)) {
54
+ return headers;
55
+ }
56
+ // If headers is an array of [key, value] pairs
57
+ else if (Array.isArray(headers)) {
58
+ return new Headers(headers);
59
+ }
60
+ // If headers is a NullableHeaders-like object (has 'values' property that is a Headers)
61
+ else if (
62
+ typeof headers === 'object' &&
63
+ headers !== null &&
64
+ 'values' in headers &&
65
+ isHeaders(headers.values)
66
+ ) {
67
+ return headers.values;
68
+ }
69
+ // If headers is a plain object
70
+ else if (typeof headers === 'object' && headers !== null) {
71
+ const entries: [string, string][] = Object.entries(headers)
72
+ .filter(([, v]) => typeof v === 'string')
73
+ .map(([k, v]) => [k, v as string]);
74
+ return new Headers(entries);
75
+ }
76
+ return new Headers();
77
+ });
78
+
79
+ return Object.fromEntries(output.entries());
80
+ }
81
+
82
+ type OpenAICompletionParam =
83
+ OpenAIClient.Chat.Completions.ChatCompletionMessageParam;
84
+
85
+ type OpenAICoreRequestOptions = OpenAIClient.RequestOptions;
86
+
87
+ function createAbortHandler(controller: AbortController): () => void {
88
+ return function (): void {
89
+ controller.abort();
90
+ };
91
+ }
92
+ /**
93
+ * Formats a tool in either OpenAI format, or LangChain structured tool format
94
+ * into an OpenAI tool format. If the tool is already in OpenAI format, return without
95
+ * any changes. If it is in LangChain structured tool format, convert it to OpenAI tool format
96
+ * using OpenAI's `zodFunction` util, falling back to `convertToOpenAIFunction` if the parameters
97
+ * returned from the `zodFunction` util are not defined.
98
+ *
99
+ * @param {BindToolsInput} tool The tool to convert to an OpenAI tool.
100
+ * @param {Object} [fields] Additional fields to add to the OpenAI tool.
101
+ * @returns {ToolDefinition} The inputted tool in OpenAI tool format.
102
+ */
103
+ export function _convertToOpenAITool(
104
+ tool: BindToolsInput,
105
+ fields?: {
106
+ /**
107
+ * If `true`, model output is guaranteed to exactly match the JSON Schema
108
+ * provided in the function definition.
109
+ */
110
+ strict?: boolean;
111
+ }
112
+ ): OpenAIClient.ChatCompletionTool {
113
+ let toolDef: OpenAIClient.ChatCompletionTool | undefined;
114
+
115
+ if (isLangChainTool(tool)) {
116
+ toolDef = formatToOpenAITool(tool);
117
+ } else {
118
+ toolDef = tool as ToolDefinition;
119
+ }
120
+
121
+ if (fields?.strict !== undefined) {
122
+ toolDef.function.strict = fields.strict;
123
+ }
124
+
125
+ return toolDef;
126
+ }
127
+ export class CustomOpenAIClient extends OpenAIClient {
128
+ abortHandler?: () => void;
129
+ async fetchWithTimeout(
130
+ url: RequestInfo,
131
+ init: RequestInit | undefined,
132
+ ms: number,
133
+ controller: AbortController
134
+ ): Promise<Response> {
135
+ const { signal, ...options } = init || {};
136
+ const handler = createAbortHandler(controller);
137
+ this.abortHandler = handler;
138
+ if (signal) signal.addEventListener('abort', handler, { once: true });
139
+
140
+ const timeout = setTimeout(() => handler, ms);
141
+
142
+ const fetchOptions = {
143
+ signal: controller.signal as AbortSignal,
144
+ ...options,
145
+ };
146
+ if (fetchOptions.method != null) {
147
+ // Custom methods like 'patch' need to be uppercased
148
+ // See https://github.com/nodejs/undici/issues/2294
149
+ fetchOptions.method = fetchOptions.method.toUpperCase();
150
+ }
151
+
152
+ return (
153
+ // use undefined this binding; fetch errors if bound to something else in browser/cloudflare
154
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
155
+ /** @ts-ignore */
156
+ this.fetch.call(undefined, url, fetchOptions).finally(() => {
157
+ clearTimeout(timeout);
158
+ })
159
+ );
160
+ }
161
+ }
162
+ export class CustomAzureOpenAIClient extends AzureOpenAIClient {
163
+ abortHandler?: () => void;
164
+ async fetchWithTimeout(
165
+ url: RequestInfo,
166
+ init: RequestInit | undefined,
167
+ ms: number,
168
+ controller: AbortController
169
+ ): Promise<Response> {
170
+ const { signal, ...options } = init || {};
171
+ const handler = createAbortHandler(controller);
172
+ this.abortHandler = handler;
173
+ if (signal) signal.addEventListener('abort', handler, { once: true });
174
+
175
+ const timeout = setTimeout(() => handler, ms);
176
+
177
+ const fetchOptions = {
178
+ signal: controller.signal as AbortSignal,
179
+ ...options,
180
+ };
181
+ if (fetchOptions.method != null) {
182
+ // Custom methods like 'patch' need to be uppercased
183
+ // See https://github.com/nodejs/undici/issues/2294
184
+ fetchOptions.method = fetchOptions.method.toUpperCase();
185
+ }
186
+
187
+ return (
188
+ // use undefined this binding; fetch errors if bound to something else in browser/cloudflare
189
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
190
+ /** @ts-ignore */
191
+ this.fetch.call(undefined, url, fetchOptions).finally(() => {
192
+ clearTimeout(timeout);
193
+ })
194
+ );
195
+ }
196
+ }
197
+
198
+ /** @ts-expect-error We are intentionally overriding `getReasoningParams` */
199
+ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
200
+ _lc_stream_delay?: number;
201
+
202
+ constructor(
203
+ fields?: t.ChatOpenAICallOptions & {
204
+ _lc_stream_delay?: number;
205
+ } & t.OpenAIChatInput['modelKwargs']
206
+ ) {
207
+ super(fields);
208
+ this._lc_stream_delay = fields?._lc_stream_delay;
209
+ }
210
+
211
+ public get exposedClient(): CustomOpenAIClient {
212
+ return this.client;
213
+ }
214
+ static lc_name(): string {
215
+ return 'IllumaOpenAI';
216
+ }
217
+ protected _getClientOptions(
218
+ options?: OpenAICoreRequestOptions
219
+ ): OpenAICoreRequestOptions {
220
+ if (!(this.client as OpenAIClient | undefined)) {
221
+ const openAIEndpointConfig: t.OpenAIEndpointConfig = {
222
+ baseURL: this.clientConfig.baseURL,
223
+ };
224
+
225
+ const endpoint = getEndpoint(openAIEndpointConfig);
226
+ const params = {
227
+ ...this.clientConfig,
228
+ baseURL: endpoint,
229
+ timeout: this.timeout,
230
+ maxRetries: 0,
231
+ };
232
+ if (params.baseURL == null) {
233
+ delete params.baseURL;
234
+ }
235
+
236
+ this.client = new CustomOpenAIClient(params);
237
+ }
238
+ const requestOptions = {
239
+ ...this.clientConfig,
240
+ ...options,
241
+ } as OpenAICoreRequestOptions;
242
+ return requestOptions;
243
+ }
244
+
245
+ /**
246
+ * Returns backwards compatible reasoning parameters from constructor params and call options
247
+ * @internal
248
+ */
249
+ getReasoningParams(
250
+ options?: this['ParsedCallOptions']
251
+ ): OpenAIClient.Reasoning | undefined {
252
+ // apply options in reverse order of importance -- newer options supersede older options
253
+ let reasoning: OpenAIClient.Reasoning | undefined;
254
+ if (this.reasoning !== undefined) {
255
+ reasoning = {
256
+ ...reasoning,
257
+ ...this.reasoning,
258
+ };
259
+ }
260
+ if (options?.reasoning !== undefined) {
261
+ reasoning = {
262
+ ...reasoning,
263
+ ...options.reasoning,
264
+ };
265
+ }
266
+
267
+ return reasoning;
268
+ }
269
+
270
+ protected _getReasoningParams(
271
+ options?: this['ParsedCallOptions']
272
+ ): OpenAIClient.Reasoning | undefined {
273
+ return this.getReasoningParams(options);
274
+ }
275
+
276
+ async *_streamResponseChunks(
277
+ messages: BaseMessage[],
278
+ options: this['ParsedCallOptions'],
279
+ runManager?: CallbackManagerForLLMRun
280
+ ): AsyncGenerator<ChatGenerationChunk> {
281
+ if (!this._useResponseApi(options)) {
282
+ return yield* this._streamResponseChunks2(messages, options, runManager);
283
+ }
284
+ const streamIterable = await this.responseApiWithRetry(
285
+ {
286
+ ...this.invocationParams<'responses'>(options, { streaming: true }),
287
+ input: _convertMessagesToOpenAIResponsesParams(
288
+ messages,
289
+ this.model,
290
+ this.zdrEnabled
291
+ ),
292
+ stream: true,
293
+ },
294
+ options
295
+ );
296
+
297
+ for await (const data of streamIterable) {
298
+ const chunk = _convertOpenAIResponsesDeltaToBaseMessageChunk(
299
+ data as ResponseReturnStreamEvents
300
+ );
301
+ if (chunk == null) continue;
302
+ yield chunk;
303
+ if (this._lc_stream_delay != null) {
304
+ await sleep(this._lc_stream_delay);
305
+ }
306
+ await runManager?.handleLLMNewToken(
307
+ chunk.text || '',
308
+ undefined,
309
+ undefined,
310
+ undefined,
311
+ undefined,
312
+ { chunk }
313
+ );
314
+ }
315
+
316
+ return;
317
+ }
318
+
319
+ async *_streamResponseChunks2(
320
+ messages: BaseMessage[],
321
+ options: this['ParsedCallOptions'],
322
+ runManager?: CallbackManagerForLLMRun
323
+ ): AsyncGenerator<ChatGenerationChunk> {
324
+ const messagesMapped: OpenAICompletionParam[] =
325
+ _convertMessagesToOpenAIParams(messages, this.model);
326
+
327
+ const params = {
328
+ ...this.invocationParams(options, {
329
+ streaming: true,
330
+ }),
331
+ messages: messagesMapped,
332
+ stream: true as const,
333
+ };
334
+ let defaultRole: OpenAIRoleEnum | undefined;
335
+
336
+ const streamIterable = await this.completionWithRetry(params, options);
337
+ let usage: OpenAIClient.Completions.CompletionUsage | undefined;
338
+ for await (const data of streamIterable) {
339
+ const choice = data.choices[0] as
340
+ | Partial<OpenAIClient.Chat.Completions.ChatCompletionChunk.Choice>
341
+ | undefined;
342
+ if (data.usage) {
343
+ usage = data.usage;
344
+ }
345
+ if (!choice) {
346
+ continue;
347
+ }
348
+
349
+ const { delta } = choice;
350
+ if (!delta) {
351
+ continue;
352
+ }
353
+ const chunk = this._convertOpenAIDeltaToBaseMessageChunk(
354
+ delta,
355
+ data,
356
+ defaultRole
357
+ );
358
+ if ('reasoning_content' in delta) {
359
+ chunk.additional_kwargs.reasoning_content = delta.reasoning_content;
360
+ } else if ('reasoning' in delta) {
361
+ chunk.additional_kwargs.reasoning_content = delta.reasoning;
362
+ }
363
+ if ('provider_specific_fields' in delta) {
364
+ chunk.additional_kwargs.provider_specific_fields =
365
+ delta.provider_specific_fields;
366
+ }
367
+ defaultRole = delta.role ?? defaultRole;
368
+ const newTokenIndices = {
369
+ prompt: options.promptIndex ?? 0,
370
+ completion: choice.index ?? 0,
371
+ };
372
+ if (typeof chunk.content !== 'string') {
373
+ // eslint-disable-next-line no-console
374
+ console.log(
375
+ '[WARNING]: Received non-string content from OpenAI. This is currently not supported.'
376
+ );
377
+ continue;
378
+ }
379
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
380
+ const generationInfo: Record<string, any> = { ...newTokenIndices };
381
+ if (choice.finish_reason != null) {
382
+ generationInfo.finish_reason = choice.finish_reason;
383
+ // Only include system fingerprint in the last chunk for now
384
+ // to avoid concatenation issues
385
+ generationInfo.system_fingerprint = data.system_fingerprint;
386
+ generationInfo.model_name = data.model;
387
+ generationInfo.service_tier = data.service_tier;
388
+ }
389
+ if (this.logprobs == true) {
390
+ generationInfo.logprobs = choice.logprobs;
391
+ }
392
+ const generationChunk = new ChatGenerationChunk({
393
+ message: chunk,
394
+ text: chunk.content,
395
+ generationInfo,
396
+ });
397
+ yield generationChunk;
398
+ if (this._lc_stream_delay != null) {
399
+ await sleep(this._lc_stream_delay);
400
+ }
401
+ await runManager?.handleLLMNewToken(
402
+ generationChunk.text || '',
403
+ newTokenIndices,
404
+ undefined,
405
+ undefined,
406
+ undefined,
407
+ { chunk: generationChunk }
408
+ );
409
+ }
410
+ if (usage) {
411
+ const inputTokenDetails = {
412
+ ...(usage.prompt_tokens_details?.audio_tokens != null && {
413
+ audio: usage.prompt_tokens_details.audio_tokens,
414
+ }),
415
+ ...(usage.prompt_tokens_details?.cached_tokens != null && {
416
+ cache_read: usage.prompt_tokens_details.cached_tokens,
417
+ }),
418
+ };
419
+ const outputTokenDetails = {
420
+ ...(usage.completion_tokens_details?.audio_tokens != null && {
421
+ audio: usage.completion_tokens_details.audio_tokens,
422
+ }),
423
+ ...(usage.completion_tokens_details?.reasoning_tokens != null && {
424
+ reasoning: usage.completion_tokens_details.reasoning_tokens,
425
+ }),
426
+ };
427
+ const generationChunk = new ChatGenerationChunk({
428
+ message: new AIMessageChunk({
429
+ content: '',
430
+ response_metadata: {
431
+ usage: { ...usage },
432
+ },
433
+ usage_metadata: {
434
+ input_tokens: usage.prompt_tokens,
435
+ output_tokens: usage.completion_tokens,
436
+ total_tokens: usage.total_tokens,
437
+ ...(Object.keys(inputTokenDetails).length > 0 && {
438
+ input_token_details: inputTokenDetails,
439
+ }),
440
+ ...(Object.keys(outputTokenDetails).length > 0 && {
441
+ output_token_details: outputTokenDetails,
442
+ }),
443
+ },
444
+ }),
445
+ text: '',
446
+ });
447
+ yield generationChunk;
448
+ if (this._lc_stream_delay != null) {
449
+ await sleep(this._lc_stream_delay);
450
+ }
451
+ }
452
+ if (options.signal?.aborted === true) {
453
+ throw new Error('AbortError');
454
+ }
455
+ }
456
+ }
457
+
458
+ /** @ts-expect-error We are intentionally overriding `getReasoningParams` */
459
+ export class AzureChatOpenAI extends OriginalAzureChatOpenAI {
460
+ _lc_stream_delay?: number;
461
+
462
+ constructor(fields?: t.AzureOpenAIInput & { _lc_stream_delay?: number }) {
463
+ super(fields);
464
+ this._lc_stream_delay = fields?._lc_stream_delay;
465
+ }
466
+
467
+ public get exposedClient(): CustomOpenAIClient {
468
+ return this.client;
469
+ }
470
+ static lc_name(): 'IllumaAzureOpenAI' {
471
+ return 'IllumaAzureOpenAI';
472
+ }
473
+ /**
474
+ * Returns backwards compatible reasoning parameters from constructor params and call options
475
+ * @internal
476
+ */
477
+ getReasoningParams(
478
+ options?: this['ParsedCallOptions']
479
+ ): OpenAIClient.Reasoning | undefined {
480
+ if (!isReasoningModel(this.model)) {
481
+ return;
482
+ }
483
+
484
+ // apply options in reverse order of importance -- newer options supersede older options
485
+ let reasoning: OpenAIClient.Reasoning | undefined;
486
+ if (this.reasoning !== undefined) {
487
+ reasoning = {
488
+ ...reasoning,
489
+ ...this.reasoning,
490
+ };
491
+ }
492
+ if (options?.reasoning !== undefined) {
493
+ reasoning = {
494
+ ...reasoning,
495
+ ...options.reasoning,
496
+ };
497
+ }
498
+
499
+ return reasoning;
500
+ }
501
+
502
+ protected _getReasoningParams(
503
+ options?: this['ParsedCallOptions']
504
+ ): OpenAIClient.Reasoning | undefined {
505
+ return this.getReasoningParams(options);
506
+ }
507
+
508
+ protected _getClientOptions(
509
+ options: OpenAICoreRequestOptions | undefined
510
+ ): OpenAICoreRequestOptions {
511
+ if (!(this.client as unknown as AzureOpenAIClient | undefined)) {
512
+ const openAIEndpointConfig: t.OpenAIEndpointConfig = {
513
+ azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName,
514
+ azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName,
515
+ azureOpenAIApiKey: this.azureOpenAIApiKey,
516
+ azureOpenAIBasePath: this.azureOpenAIBasePath,
517
+ azureADTokenProvider: this.azureADTokenProvider,
518
+ baseURL: this.clientConfig.baseURL,
519
+ };
520
+
521
+ const endpoint = getEndpoint(openAIEndpointConfig);
522
+
523
+ const params = {
524
+ ...this.clientConfig,
525
+ baseURL: endpoint,
526
+ timeout: this.timeout,
527
+ maxRetries: 0,
528
+ };
529
+
530
+ if (!this.azureADTokenProvider) {
531
+ params.apiKey = openAIEndpointConfig.azureOpenAIApiKey;
532
+ }
533
+
534
+ if (params.baseURL == null) {
535
+ delete params.baseURL;
536
+ }
537
+
538
+ const defaultHeaders = normalizeHeaders(params.defaultHeaders);
539
+ params.defaultHeaders = {
540
+ ...params.defaultHeaders,
541
+ 'User-Agent':
542
+ defaultHeaders['User-Agent'] != null
543
+ ? `${defaultHeaders['User-Agent']}: illuma-azure-openai-v2`
544
+ : 'illuma-azure-openai-v2',
545
+ };
546
+
547
+ this.client = new CustomAzureOpenAIClient({
548
+ apiVersion: this.azureOpenAIApiVersion,
549
+ azureADTokenProvider: this.azureADTokenProvider,
550
+ ...(params as t.AzureOpenAIInput),
551
+ }) as unknown as CustomOpenAIClient;
552
+ }
553
+
554
+ const requestOptions = {
555
+ ...this.clientConfig,
556
+ ...options,
557
+ } as OpenAICoreRequestOptions;
558
+ if (this.azureOpenAIApiKey != null) {
559
+ requestOptions.headers = {
560
+ 'api-key': this.azureOpenAIApiKey,
561
+ ...requestOptions.headers,
562
+ };
563
+ requestOptions.query = {
564
+ 'api-version': this.azureOpenAIApiVersion,
565
+ ...requestOptions.query,
566
+ };
567
+ }
568
+ return requestOptions;
569
+ }
570
+ async *_streamResponseChunks(
571
+ messages: BaseMessage[],
572
+ options: this['ParsedCallOptions'],
573
+ runManager?: CallbackManagerForLLMRun
574
+ ): AsyncGenerator<ChatGenerationChunk> {
575
+ if (!this._useResponseApi(options)) {
576
+ return yield* super._streamResponseChunks(messages, options, runManager);
577
+ }
578
+ const streamIterable = await this.responseApiWithRetry(
579
+ {
580
+ ...this.invocationParams<'responses'>(options, { streaming: true }),
581
+ input: _convertMessagesToOpenAIResponsesParams(
582
+ messages,
583
+ this.model,
584
+ this.zdrEnabled
585
+ ),
586
+ stream: true,
587
+ },
588
+ options
589
+ );
590
+
591
+ for await (const data of streamIterable) {
592
+ const chunk = _convertOpenAIResponsesDeltaToBaseMessageChunk(
593
+ data as ResponseReturnStreamEvents
594
+ );
595
+ if (chunk == null) continue;
596
+ yield chunk;
597
+ if (this._lc_stream_delay != null) {
598
+ await sleep(this._lc_stream_delay);
599
+ }
600
+ await runManager?.handleLLMNewToken(
601
+ chunk.text || '',
602
+ undefined,
603
+ undefined,
604
+ undefined,
605
+ undefined,
606
+ { chunk }
607
+ );
608
+ }
609
+
610
+ return;
611
+ }
612
+ }
613
+ export class ChatDeepSeek extends OriginalChatDeepSeek {
614
+ public get exposedClient(): CustomOpenAIClient {
615
+ return this.client;
616
+ }
617
+ static lc_name(): 'IllumaDeepSeek' {
618
+ return 'IllumaDeepSeek';
619
+ }
620
+
621
+ protected _convertMessages(messages: BaseMessage[]): OpenAICompletionParam[] {
622
+ return _convertMessagesToOpenAIParams(messages, this.model, {
623
+ includeReasoningContent: true,
624
+ });
625
+ }
626
+
627
+ async _generate(
628
+ messages: BaseMessage[],
629
+ options: this['ParsedCallOptions'] | undefined,
630
+ runManager?: CallbackManagerForLLMRun
631
+ ): Promise<ChatResult> {
632
+ const params = this.invocationParams(options);
633
+
634
+ if (params.stream === true) {
635
+ return super._generate(messages, options ?? {}, runManager);
636
+ }
637
+
638
+ const messagesMapped = this._convertMessages(messages);
639
+ const data = await this.completionWithRetry(
640
+ {
641
+ ...params,
642
+ stream: false,
643
+ messages: messagesMapped,
644
+ },
645
+ {
646
+ signal: options?.signal,
647
+ ...options?.options,
648
+ }
649
+ );
650
+
651
+ const { completion_tokens, prompt_tokens, total_tokens } = data.usage ?? {};
652
+
653
+ const generations = [];
654
+ for (const part of data.choices ?? []) {
655
+ const text = part.message.content ?? '';
656
+ const generation: ChatGeneration = {
657
+ text: typeof text === 'string' ? text : '',
658
+ message: this._convertResponseToMessage(part, data),
659
+ };
660
+ generation.generationInfo = {
661
+ ...(part.finish_reason != null
662
+ ? { finish_reason: part.finish_reason }
663
+ : {}),
664
+ ...(part.logprobs ? { logprobs: part.logprobs } : {}),
665
+ };
666
+ generations.push(generation);
667
+ }
668
+
669
+ return {
670
+ generations,
671
+ llmOutput: {
672
+ tokenUsage: {
673
+ completionTokens: completion_tokens,
674
+ promptTokens: prompt_tokens,
675
+ totalTokens: total_tokens,
676
+ },
677
+ },
678
+ };
679
+ }
680
+
681
+ protected _convertResponseToMessage(
682
+ choice: OpenAIClient.Chat.Completions.ChatCompletion.Choice,
683
+ data: OpenAIClient.Chat.Completions.ChatCompletion
684
+ ): AIMessage {
685
+ const { message } = choice;
686
+ const rawToolCalls = message.tool_calls;
687
+ const toolCalls = rawToolCalls?.map((tc) => ({
688
+ id: tc.id,
689
+ name: tc.function.name,
690
+ args: JSON.parse(tc.function.arguments || '{}'),
691
+ type: 'tool_call' as const,
692
+ }));
693
+
694
+ const additional_kwargs: Record<string, unknown> = {};
695
+ if (rawToolCalls) {
696
+ additional_kwargs.tool_calls = rawToolCalls;
697
+ }
698
+ if (
699
+ 'reasoning_content' in message &&
700
+ message.reasoning_content != null &&
701
+ message.reasoning_content !== ''
702
+ ) {
703
+ additional_kwargs.reasoning_content = message.reasoning_content;
704
+ }
705
+
706
+ return new AIMessage({
707
+ content: message.content ?? '',
708
+ tool_calls: toolCalls,
709
+ additional_kwargs,
710
+ usage_metadata: data.usage
711
+ ? {
712
+ input_tokens: data.usage.prompt_tokens,
713
+ output_tokens: data.usage.completion_tokens,
714
+ total_tokens: data.usage.total_tokens,
715
+ }
716
+ : undefined,
717
+ response_metadata: {
718
+ model_name: data.model,
719
+ system_fingerprint: data.system_fingerprint,
720
+ finish_reason: choice.finish_reason,
721
+ },
722
+ });
723
+ }
724
+
725
+ protected _getClientOptions(
726
+ options?: OpenAICoreRequestOptions
727
+ ): OpenAICoreRequestOptions {
728
+ if (!(this.client as OpenAIClient | undefined)) {
729
+ const openAIEndpointConfig: t.OpenAIEndpointConfig = {
730
+ baseURL: this.clientConfig.baseURL,
731
+ };
732
+
733
+ const endpoint = getEndpoint(openAIEndpointConfig);
734
+ const params = {
735
+ ...this.clientConfig,
736
+ baseURL: endpoint,
737
+ timeout: this.timeout,
738
+ maxRetries: 0,
739
+ };
740
+ if (params.baseURL == null) {
741
+ delete params.baseURL;
742
+ }
743
+
744
+ this.client = new CustomOpenAIClient(params);
745
+ }
746
+ const requestOptions = {
747
+ ...this.clientConfig,
748
+ ...options,
749
+ } as OpenAICoreRequestOptions;
750
+ return requestOptions;
751
+ }
752
+
753
+ async *_streamResponseChunks(
754
+ messages: BaseMessage[],
755
+ options: this['ParsedCallOptions'],
756
+ runManager?: CallbackManagerForLLMRun
757
+ ): AsyncGenerator<ChatGenerationChunk> {
758
+ const messagesMapped: OpenAICompletionParam[] =
759
+ _convertMessagesToOpenAIParams(messages, this.model, {
760
+ includeReasoningContent: true,
761
+ });
762
+
763
+ const params = {
764
+ ...this.invocationParams(options, {
765
+ streaming: true,
766
+ }),
767
+ messages: messagesMapped,
768
+ stream: true as const,
769
+ };
770
+ let defaultRole: OpenAIRoleEnum | undefined;
771
+
772
+ const streamIterable = await this.completionWithRetry(params, options);
773
+ let usage: OpenAIClient.Completions.CompletionUsage | undefined;
774
+ for await (const data of streamIterable) {
775
+ const choice = data.choices[0] as
776
+ | Partial<OpenAIClient.Chat.Completions.ChatCompletionChunk.Choice>
777
+ | undefined;
778
+ if (data.usage) {
779
+ usage = data.usage;
780
+ }
781
+ if (!choice) {
782
+ continue;
783
+ }
784
+
785
+ const { delta } = choice;
786
+ if (!delta) {
787
+ continue;
788
+ }
789
+ const chunk = this._convertOpenAIDeltaToBaseMessageChunk(
790
+ delta,
791
+ data,
792
+ defaultRole
793
+ );
794
+ if ('reasoning_content' in delta) {
795
+ chunk.additional_kwargs.reasoning_content = delta.reasoning_content;
796
+ }
797
+ defaultRole = delta.role ?? defaultRole;
798
+ const newTokenIndices = {
799
+ prompt: (options as OpenAIChatCallOptions).promptIndex ?? 0,
800
+ completion: choice.index ?? 0,
801
+ };
802
+ if (typeof chunk.content !== 'string') {
803
+ // eslint-disable-next-line no-console
804
+ console.log(
805
+ '[WARNING]: Received non-string content from OpenAI. This is currently not supported.'
806
+ );
807
+ continue;
808
+ }
809
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
810
+ const generationInfo: Record<string, any> = { ...newTokenIndices };
811
+ if (choice.finish_reason != null) {
812
+ generationInfo.finish_reason = choice.finish_reason;
813
+ generationInfo.system_fingerprint = data.system_fingerprint;
814
+ generationInfo.model_name = data.model;
815
+ generationInfo.service_tier = data.service_tier;
816
+ }
817
+ if (this.logprobs == true) {
818
+ generationInfo.logprobs = choice.logprobs;
819
+ }
820
+ const generationChunk = new ChatGenerationChunk({
821
+ message: chunk,
822
+ text: chunk.content,
823
+ generationInfo,
824
+ });
825
+ yield generationChunk;
826
+ await runManager?.handleLLMNewToken(
827
+ generationChunk.text || '',
828
+ newTokenIndices,
829
+ undefined,
830
+ undefined,
831
+ undefined,
832
+ { chunk: generationChunk }
833
+ );
834
+ }
835
+ if (usage) {
836
+ const inputTokenDetails = {
837
+ ...(usage.prompt_tokens_details?.audio_tokens != null && {
838
+ audio: usage.prompt_tokens_details.audio_tokens,
839
+ }),
840
+ ...(usage.prompt_tokens_details?.cached_tokens != null && {
841
+ cache_read: usage.prompt_tokens_details.cached_tokens,
842
+ }),
843
+ };
844
+ const outputTokenDetails = {
845
+ ...(usage.completion_tokens_details?.audio_tokens != null && {
846
+ audio: usage.completion_tokens_details.audio_tokens,
847
+ }),
848
+ ...(usage.completion_tokens_details?.reasoning_tokens != null && {
849
+ reasoning: usage.completion_tokens_details.reasoning_tokens,
850
+ }),
851
+ };
852
+ const generationChunk = new ChatGenerationChunk({
853
+ message: new AIMessageChunk({
854
+ content: '',
855
+ response_metadata: {
856
+ usage: { ...usage },
857
+ },
858
+ usage_metadata: {
859
+ input_tokens: usage.prompt_tokens,
860
+ output_tokens: usage.completion_tokens,
861
+ total_tokens: usage.total_tokens,
862
+ ...(Object.keys(inputTokenDetails).length > 0 && {
863
+ input_token_details: inputTokenDetails,
864
+ }),
865
+ ...(Object.keys(outputTokenDetails).length > 0 && {
866
+ output_token_details: outputTokenDetails,
867
+ }),
868
+ },
869
+ }),
870
+ text: '',
871
+ });
872
+ yield generationChunk;
873
+ }
874
+ if (options.signal?.aborted === true) {
875
+ throw new Error('AbortError');
876
+ }
877
+ }
878
+ }
879
+
880
+ /** xAI-specific usage metadata type */
881
+ export interface XAIUsageMetadata
882
+ extends OpenAIClient.Completions.CompletionUsage {
883
+ prompt_tokens_details?: {
884
+ audio_tokens?: number;
885
+ cached_tokens?: number;
886
+ text_tokens?: number;
887
+ image_tokens?: number;
888
+ };
889
+ completion_tokens_details?: {
890
+ audio_tokens?: number;
891
+ reasoning_tokens?: number;
892
+ accepted_prediction_tokens?: number;
893
+ rejected_prediction_tokens?: number;
894
+ };
895
+ num_sources_used?: number;
896
+ }
897
+
898
+ export class ChatMoonshot extends ChatOpenAI {
899
+ static lc_name(): 'LibreChatMoonshot' {
900
+ return 'LibreChatMoonshot';
901
+ }
902
+
903
+ protected _convertMessages(messages: BaseMessage[]): OpenAICompletionParam[] {
904
+ return _convertMessagesToOpenAIParams(messages, this.model, {
905
+ includeReasoningContent: true,
906
+ });
907
+ }
908
+
909
+ async _generate(
910
+ messages: BaseMessage[],
911
+ options: this['ParsedCallOptions'],
912
+ runManager?: CallbackManagerForLLMRun
913
+ ): Promise<ChatResult> {
914
+ const params = this.invocationParams(options);
915
+
916
+ if (params.stream === true) {
917
+ return super._generate(messages, options, runManager);
918
+ }
919
+
920
+ const messagesMapped = this._convertMessages(messages);
921
+ const data = await this.completionWithRetry(
922
+ {
923
+ ...params,
924
+ stream: false,
925
+ messages: messagesMapped,
926
+ },
927
+ {
928
+ signal: options.signal,
929
+ ...options.options,
930
+ }
931
+ );
932
+
933
+ const { completion_tokens, prompt_tokens, total_tokens } = data.usage ?? {};
934
+
935
+ const generations = [];
936
+ for (const part of data.choices ?? []) {
937
+ const text = part.message.content ?? '';
938
+ const generation: ChatGeneration = {
939
+ text: typeof text === 'string' ? text : '',
940
+ message: this._convertResponseToMessage(part, data),
941
+ };
942
+ generation.generationInfo = {
943
+ ...(part.finish_reason ? { finish_reason: part.finish_reason } : {}),
944
+ ...(part.logprobs ? { logprobs: part.logprobs } : {}),
945
+ };
946
+ generations.push(generation);
947
+ }
948
+
949
+ return {
950
+ generations,
951
+ llmOutput: {
952
+ tokenUsage: {
953
+ completionTokens: completion_tokens,
954
+ promptTokens: prompt_tokens,
955
+ totalTokens: total_tokens,
956
+ },
957
+ },
958
+ };
959
+ }
960
+
961
+ protected _convertResponseToMessage(
962
+ choice: OpenAIClient.Chat.Completions.ChatCompletion.Choice,
963
+ data: OpenAIClient.Chat.Completions.ChatCompletion
964
+ ): AIMessage {
965
+ const { message } = choice;
966
+ const rawToolCalls = message.tool_calls;
967
+ const toolCalls = rawToolCalls?.map((tc) => ({
968
+ id: tc.id,
969
+ name: tc.function.name,
970
+ args: JSON.parse(tc.function.arguments || '{}'),
971
+ type: 'tool_call' as const,
972
+ }));
973
+
974
+ const additional_kwargs: Record<string, unknown> = {};
975
+ if (rawToolCalls) {
976
+ additional_kwargs.tool_calls = rawToolCalls;
977
+ }
978
+ if (
979
+ 'reasoning_content' in message &&
980
+ message.reasoning_content != null &&
981
+ message.reasoning_content !== ''
982
+ ) {
983
+ additional_kwargs.reasoning_content = message.reasoning_content;
984
+ }
985
+
986
+ return new AIMessage({
987
+ content: message.content ?? '',
988
+ tool_calls: toolCalls,
989
+ additional_kwargs,
990
+ usage_metadata: data.usage
991
+ ? {
992
+ input_tokens: data.usage.prompt_tokens,
993
+ output_tokens: data.usage.completion_tokens,
994
+ total_tokens: data.usage.total_tokens,
995
+ }
996
+ : undefined,
997
+ response_metadata: {
998
+ model_name: data.model,
999
+ system_fingerprint: data.system_fingerprint,
1000
+ finish_reason: choice.finish_reason,
1001
+ },
1002
+ });
1003
+ }
1004
+
1005
+ async *_streamResponseChunks(
1006
+ messages: BaseMessage[],
1007
+ options: this['ParsedCallOptions'],
1008
+ runManager?: CallbackManagerForLLMRun
1009
+ ): AsyncGenerator<ChatGenerationChunk> {
1010
+ const messagesMapped: OpenAICompletionParam[] =
1011
+ _convertMessagesToOpenAIParams(messages, this.model, {
1012
+ includeReasoningContent: true,
1013
+ });
1014
+
1015
+ const params = {
1016
+ ...this.invocationParams(options, {
1017
+ streaming: true,
1018
+ }),
1019
+ messages: messagesMapped,
1020
+ stream: true as const,
1021
+ };
1022
+ let defaultRole: OpenAIRoleEnum | undefined;
1023
+
1024
+ const streamIterable = await this.completionWithRetry(params, options);
1025
+ let usage: OpenAIClient.Completions.CompletionUsage | undefined;
1026
+ for await (const data of streamIterable) {
1027
+ const choice = data.choices[0] as
1028
+ | Partial<OpenAIClient.Chat.Completions.ChatCompletionChunk.Choice>
1029
+ | undefined;
1030
+ if (data.usage) {
1031
+ usage = data.usage;
1032
+ }
1033
+ if (!choice) {
1034
+ continue;
1035
+ }
1036
+
1037
+ const { delta } = choice;
1038
+ if (!delta) {
1039
+ continue;
1040
+ }
1041
+ const chunk = this._convertOpenAIDeltaToBaseMessageChunk(
1042
+ delta,
1043
+ data,
1044
+ defaultRole
1045
+ );
1046
+ if ('reasoning_content' in delta) {
1047
+ chunk.additional_kwargs.reasoning_content = delta.reasoning_content;
1048
+ }
1049
+ defaultRole = delta.role ?? defaultRole;
1050
+ const newTokenIndices = {
1051
+ prompt: (options as OpenAIChatCallOptions).promptIndex ?? 0,
1052
+ completion: choice.index ?? 0,
1053
+ };
1054
+ if (typeof chunk.content !== 'string') {
1055
+ // eslint-disable-next-line no-console
1056
+ console.log(
1057
+ '[WARNING]: Received non-string content from OpenAI. This is currently not supported.'
1058
+ );
1059
+ continue;
1060
+ }
1061
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1062
+ const generationInfo: Record<string, any> = { ...newTokenIndices };
1063
+ if (choice.finish_reason != null) {
1064
+ generationInfo.finish_reason = choice.finish_reason;
1065
+ generationInfo.system_fingerprint = data.system_fingerprint;
1066
+ generationInfo.model_name = data.model;
1067
+ generationInfo.service_tier = data.service_tier;
1068
+ }
1069
+ if (this.logprobs == true) {
1070
+ generationInfo.logprobs = choice.logprobs;
1071
+ }
1072
+ const generationChunk = new ChatGenerationChunk({
1073
+ message: chunk,
1074
+ text: chunk.content,
1075
+ generationInfo,
1076
+ });
1077
+ yield generationChunk;
1078
+ if (this._lc_stream_delay != null) {
1079
+ await sleep(this._lc_stream_delay);
1080
+ }
1081
+ await runManager?.handleLLMNewToken(
1082
+ generationChunk.text || '',
1083
+ newTokenIndices,
1084
+ undefined,
1085
+ undefined,
1086
+ undefined,
1087
+ { chunk: generationChunk }
1088
+ );
1089
+ }
1090
+ if (usage) {
1091
+ const inputTokenDetails = {
1092
+ ...(usage.prompt_tokens_details?.audio_tokens != null && {
1093
+ audio: usage.prompt_tokens_details.audio_tokens,
1094
+ }),
1095
+ ...(usage.prompt_tokens_details?.cached_tokens != null && {
1096
+ cache_read: usage.prompt_tokens_details.cached_tokens,
1097
+ }),
1098
+ };
1099
+ const outputTokenDetails = {
1100
+ ...(usage.completion_tokens_details?.audio_tokens != null && {
1101
+ audio: usage.completion_tokens_details.audio_tokens,
1102
+ }),
1103
+ ...(usage.completion_tokens_details?.reasoning_tokens != null && {
1104
+ reasoning: usage.completion_tokens_details.reasoning_tokens,
1105
+ }),
1106
+ };
1107
+ const generationChunk = new ChatGenerationChunk({
1108
+ message: new AIMessageChunk({
1109
+ content: '',
1110
+ response_metadata: {
1111
+ usage: { ...usage },
1112
+ },
1113
+ usage_metadata: {
1114
+ input_tokens: usage.prompt_tokens,
1115
+ output_tokens: usage.completion_tokens,
1116
+ total_tokens: usage.total_tokens,
1117
+ ...(Object.keys(inputTokenDetails).length > 0 && {
1118
+ input_token_details: inputTokenDetails,
1119
+ }),
1120
+ ...(Object.keys(outputTokenDetails).length > 0 && {
1121
+ output_token_details: outputTokenDetails,
1122
+ }),
1123
+ },
1124
+ }),
1125
+ text: '',
1126
+ });
1127
+ yield generationChunk;
1128
+ if (this._lc_stream_delay != null) {
1129
+ await sleep(this._lc_stream_delay);
1130
+ }
1131
+ }
1132
+ if (options.signal?.aborted === true) {
1133
+ throw new Error('AbortError');
1134
+ }
1135
+ }
1136
+ }
1137
+
1138
+ export class ChatXAI extends OriginalChatXAI {
1139
+ _lc_stream_delay?: number;
1140
+
1141
+ constructor(
1142
+ fields?: Partial<ChatXAIInput> & {
1143
+ configuration?: { baseURL?: string };
1144
+ clientConfig?: { baseURL?: string };
1145
+ _lc_stream_delay?: number;
1146
+ }
1147
+ ) {
1148
+ super(fields);
1149
+ this._lc_stream_delay = fields?._lc_stream_delay;
1150
+ const customBaseURL =
1151
+ fields?.configuration?.baseURL ?? fields?.clientConfig?.baseURL;
1152
+ if (customBaseURL != null && customBaseURL) {
1153
+ this.clientConfig = {
1154
+ ...this.clientConfig,
1155
+ baseURL: customBaseURL,
1156
+ };
1157
+ // Reset the client to force recreation with new config
1158
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1159
+ this.client = undefined as any;
1160
+ }
1161
+ }
1162
+
1163
+ static lc_name(): 'IllumaXAI' {
1164
+ return 'IllumaXAI';
1165
+ }
1166
+
1167
+ public get exposedClient(): CustomOpenAIClient {
1168
+ return this.client;
1169
+ }
1170
+
1171
+ protected _getClientOptions(
1172
+ options?: OpenAICoreRequestOptions
1173
+ ): OpenAICoreRequestOptions {
1174
+ if (!(this.client as OpenAIClient | undefined)) {
1175
+ const openAIEndpointConfig: t.OpenAIEndpointConfig = {
1176
+ baseURL: this.clientConfig.baseURL,
1177
+ };
1178
+
1179
+ const endpoint = getEndpoint(openAIEndpointConfig);
1180
+ const params = {
1181
+ ...this.clientConfig,
1182
+ baseURL: endpoint,
1183
+ timeout: this.timeout,
1184
+ maxRetries: 0,
1185
+ };
1186
+ if (params.baseURL == null) {
1187
+ delete params.baseURL;
1188
+ }
1189
+
1190
+ this.client = new CustomOpenAIClient(params);
1191
+ }
1192
+ const requestOptions = {
1193
+ ...this.clientConfig,
1194
+ ...options,
1195
+ } as OpenAICoreRequestOptions;
1196
+ return requestOptions;
1197
+ }
1198
+
1199
+ async *_streamResponseChunks(
1200
+ messages: BaseMessage[],
1201
+ options: this['ParsedCallOptions'],
1202
+ runManager?: CallbackManagerForLLMRun
1203
+ ): AsyncGenerator<ChatGenerationChunk> {
1204
+ const messagesMapped: OpenAICompletionParam[] =
1205
+ _convertMessagesToOpenAIParams(messages, this.model);
1206
+
1207
+ const params = {
1208
+ ...this.invocationParams(options, {
1209
+ streaming: true,
1210
+ }),
1211
+ messages: messagesMapped,
1212
+ stream: true as const,
1213
+ };
1214
+ let defaultRole: OpenAIRoleEnum | undefined;
1215
+
1216
+ const streamIterable = await this.completionWithRetry(params, options);
1217
+ let usage: OpenAIClient.Completions.CompletionUsage | undefined;
1218
+ for await (const data of streamIterable) {
1219
+ const choice = data.choices[0] as
1220
+ | Partial<OpenAIClient.Chat.Completions.ChatCompletionChunk.Choice>
1221
+ | undefined;
1222
+ if (data.usage) {
1223
+ usage = data.usage;
1224
+ }
1225
+ if (!choice) {
1226
+ continue;
1227
+ }
1228
+
1229
+ const { delta } = choice;
1230
+ if (!delta) {
1231
+ continue;
1232
+ }
1233
+ const chunk = this._convertOpenAIDeltaToBaseMessageChunk(
1234
+ delta,
1235
+ data,
1236
+ defaultRole
1237
+ );
1238
+ if (chunk.usage_metadata != null) {
1239
+ chunk.usage_metadata = {
1240
+ input_tokens:
1241
+ (chunk.usage_metadata as Partial<UsageMetadata>).input_tokens ?? 0,
1242
+ output_tokens:
1243
+ (chunk.usage_metadata as Partial<UsageMetadata>).output_tokens ?? 0,
1244
+ total_tokens:
1245
+ (chunk.usage_metadata as Partial<UsageMetadata>).total_tokens ?? 0,
1246
+ };
1247
+ }
1248
+ if ('reasoning_content' in delta) {
1249
+ chunk.additional_kwargs.reasoning_content = delta.reasoning_content;
1250
+ }
1251
+ defaultRole = delta.role ?? defaultRole;
1252
+ const newTokenIndices = {
1253
+ prompt: (options as OpenAIChatCallOptions).promptIndex ?? 0,
1254
+ completion: choice.index ?? 0,
1255
+ };
1256
+ if (typeof chunk.content !== 'string') {
1257
+ // eslint-disable-next-line no-console
1258
+ console.log(
1259
+ '[WARNING]: Received non-string content from OpenAI. This is currently not supported.'
1260
+ );
1261
+ continue;
1262
+ }
1263
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1264
+ const generationInfo: Record<string, any> = { ...newTokenIndices };
1265
+ if (choice.finish_reason != null) {
1266
+ generationInfo.finish_reason = choice.finish_reason;
1267
+ // Only include system fingerprint in the last chunk for now
1268
+ // to avoid concatenation issues
1269
+ generationInfo.system_fingerprint = data.system_fingerprint;
1270
+ generationInfo.model_name = data.model;
1271
+ generationInfo.service_tier = data.service_tier;
1272
+ }
1273
+ if (this.logprobs == true) {
1274
+ generationInfo.logprobs = choice.logprobs;
1275
+ }
1276
+ const generationChunk = new ChatGenerationChunk({
1277
+ message: chunk,
1278
+ text: chunk.content,
1279
+ generationInfo,
1280
+ });
1281
+ yield generationChunk;
1282
+ if (this._lc_stream_delay != null) {
1283
+ await sleep(this._lc_stream_delay);
1284
+ }
1285
+ await runManager?.handleLLMNewToken(
1286
+ generationChunk.text || '',
1287
+ newTokenIndices,
1288
+ undefined,
1289
+ undefined,
1290
+ undefined,
1291
+ { chunk: generationChunk }
1292
+ );
1293
+ }
1294
+ if (usage) {
1295
+ // Type assertion for xAI-specific usage structure
1296
+ const xaiUsage = usage as XAIUsageMetadata;
1297
+ const inputTokenDetails = {
1298
+ // Standard OpenAI fields
1299
+ ...(usage.prompt_tokens_details?.audio_tokens != null && {
1300
+ audio: usage.prompt_tokens_details.audio_tokens,
1301
+ }),
1302
+ ...(usage.prompt_tokens_details?.cached_tokens != null && {
1303
+ cache_read: usage.prompt_tokens_details.cached_tokens,
1304
+ }),
1305
+ // Add xAI-specific prompt token details if they exist
1306
+ ...(xaiUsage.prompt_tokens_details?.text_tokens != null && {
1307
+ text: xaiUsage.prompt_tokens_details.text_tokens,
1308
+ }),
1309
+ ...(xaiUsage.prompt_tokens_details?.image_tokens != null && {
1310
+ image: xaiUsage.prompt_tokens_details.image_tokens,
1311
+ }),
1312
+ };
1313
+ const outputTokenDetails = {
1314
+ // Standard OpenAI fields
1315
+ ...(usage.completion_tokens_details?.audio_tokens != null && {
1316
+ audio: usage.completion_tokens_details.audio_tokens,
1317
+ }),
1318
+ ...(usage.completion_tokens_details?.reasoning_tokens != null && {
1319
+ reasoning: usage.completion_tokens_details.reasoning_tokens,
1320
+ }),
1321
+ // Add xAI-specific completion token details if they exist
1322
+ ...(xaiUsage.completion_tokens_details?.accepted_prediction_tokens !=
1323
+ null && {
1324
+ accepted_prediction:
1325
+ xaiUsage.completion_tokens_details.accepted_prediction_tokens,
1326
+ }),
1327
+ ...(xaiUsage.completion_tokens_details?.rejected_prediction_tokens !=
1328
+ null && {
1329
+ rejected_prediction:
1330
+ xaiUsage.completion_tokens_details.rejected_prediction_tokens,
1331
+ }),
1332
+ };
1333
+ const generationChunk = new ChatGenerationChunk({
1334
+ message: new AIMessageChunk({
1335
+ content: '',
1336
+ response_metadata: {
1337
+ usage: { ...usage },
1338
+ // Include xAI-specific metadata if it exists
1339
+ ...(xaiUsage.num_sources_used != null && {
1340
+ num_sources_used: xaiUsage.num_sources_used,
1341
+ }),
1342
+ },
1343
+ usage_metadata: {
1344
+ input_tokens: usage.prompt_tokens,
1345
+ output_tokens: usage.completion_tokens,
1346
+ total_tokens: usage.total_tokens,
1347
+ ...(Object.keys(inputTokenDetails).length > 0 && {
1348
+ input_token_details: inputTokenDetails,
1349
+ }),
1350
+ ...(Object.keys(outputTokenDetails).length > 0 && {
1351
+ output_token_details: outputTokenDetails,
1352
+ }),
1353
+ },
1354
+ }),
1355
+ text: '',
1356
+ });
1357
+ yield generationChunk;
1358
+ if (this._lc_stream_delay != null) {
1359
+ await sleep(this._lc_stream_delay);
1360
+ }
1361
+ }
1362
+ if (options.signal?.aborted === true) {
1363
+ throw new Error('AbortError');
1364
+ }
1365
+ }
1366
+ }