@librechat/agents 2.4.322 → 3.0.0-rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (266) hide show
  1. package/dist/cjs/agents/AgentContext.cjs +218 -0
  2. package/dist/cjs/agents/AgentContext.cjs.map +1 -0
  3. package/dist/cjs/common/enum.cjs +14 -5
  4. package/dist/cjs/common/enum.cjs.map +1 -1
  5. package/dist/cjs/events.cjs +10 -6
  6. package/dist/cjs/events.cjs.map +1 -1
  7. package/dist/cjs/graphs/Graph.cjs +309 -212
  8. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  9. package/dist/cjs/graphs/MultiAgentGraph.cjs +422 -0
  10. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -0
  11. package/dist/cjs/llm/anthropic/index.cjs +54 -9
  12. package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
  13. package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
  14. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +52 -6
  15. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
  16. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +22 -2
  17. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
  18. package/dist/cjs/llm/anthropic/utils/tools.cjs +29 -0
  19. package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -0
  20. package/dist/cjs/llm/google/index.cjs +144 -0
  21. package/dist/cjs/llm/google/index.cjs.map +1 -0
  22. package/dist/cjs/llm/google/utils/common.cjs +477 -0
  23. package/dist/cjs/llm/google/utils/common.cjs.map +1 -0
  24. package/dist/cjs/llm/ollama/index.cjs +67 -0
  25. package/dist/cjs/llm/ollama/index.cjs.map +1 -0
  26. package/dist/cjs/llm/ollama/utils.cjs +158 -0
  27. package/dist/cjs/llm/ollama/utils.cjs.map +1 -0
  28. package/dist/cjs/llm/openai/index.cjs +389 -3
  29. package/dist/cjs/llm/openai/index.cjs.map +1 -1
  30. package/dist/cjs/llm/openai/utils/index.cjs +672 -0
  31. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -0
  32. package/dist/cjs/llm/providers.cjs +15 -15
  33. package/dist/cjs/llm/providers.cjs.map +1 -1
  34. package/dist/cjs/llm/text.cjs +14 -3
  35. package/dist/cjs/llm/text.cjs.map +1 -1
  36. package/dist/cjs/llm/vertexai/index.cjs +330 -0
  37. package/dist/cjs/llm/vertexai/index.cjs.map +1 -0
  38. package/dist/cjs/main.cjs +11 -0
  39. package/dist/cjs/main.cjs.map +1 -1
  40. package/dist/cjs/run.cjs +120 -81
  41. package/dist/cjs/run.cjs.map +1 -1
  42. package/dist/cjs/stream.cjs +85 -51
  43. package/dist/cjs/stream.cjs.map +1 -1
  44. package/dist/cjs/tools/ToolNode.cjs +10 -4
  45. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  46. package/dist/cjs/tools/handlers.cjs +119 -13
  47. package/dist/cjs/tools/handlers.cjs.map +1 -1
  48. package/dist/cjs/tools/search/anthropic.cjs +40 -0
  49. package/dist/cjs/tools/search/anthropic.cjs.map +1 -0
  50. package/dist/cjs/tools/search/firecrawl.cjs +55 -9
  51. package/dist/cjs/tools/search/firecrawl.cjs.map +1 -1
  52. package/dist/cjs/tools/search/format.cjs +6 -6
  53. package/dist/cjs/tools/search/format.cjs.map +1 -1
  54. package/dist/cjs/tools/search/rerankers.cjs +7 -29
  55. package/dist/cjs/tools/search/rerankers.cjs.map +1 -1
  56. package/dist/cjs/tools/search/search.cjs +86 -16
  57. package/dist/cjs/tools/search/search.cjs.map +1 -1
  58. package/dist/cjs/tools/search/tool.cjs +4 -2
  59. package/dist/cjs/tools/search/tool.cjs.map +1 -1
  60. package/dist/cjs/tools/search/utils.cjs +1 -1
  61. package/dist/cjs/tools/search/utils.cjs.map +1 -1
  62. package/dist/cjs/utils/events.cjs +31 -0
  63. package/dist/cjs/utils/events.cjs.map +1 -0
  64. package/dist/cjs/utils/title.cjs +57 -21
  65. package/dist/cjs/utils/title.cjs.map +1 -1
  66. package/dist/cjs/utils/tokens.cjs +54 -7
  67. package/dist/cjs/utils/tokens.cjs.map +1 -1
  68. package/dist/esm/agents/AgentContext.mjs +216 -0
  69. package/dist/esm/agents/AgentContext.mjs.map +1 -0
  70. package/dist/esm/common/enum.mjs +15 -6
  71. package/dist/esm/common/enum.mjs.map +1 -1
  72. package/dist/esm/events.mjs +10 -6
  73. package/dist/esm/events.mjs.map +1 -1
  74. package/dist/esm/graphs/Graph.mjs +311 -214
  75. package/dist/esm/graphs/Graph.mjs.map +1 -1
  76. package/dist/esm/graphs/MultiAgentGraph.mjs +420 -0
  77. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -0
  78. package/dist/esm/llm/anthropic/index.mjs +54 -9
  79. package/dist/esm/llm/anthropic/index.mjs.map +1 -1
  80. package/dist/esm/llm/anthropic/types.mjs.map +1 -1
  81. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +52 -6
  82. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
  83. package/dist/esm/llm/anthropic/utils/message_outputs.mjs +22 -2
  84. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
  85. package/dist/esm/llm/anthropic/utils/tools.mjs +27 -0
  86. package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -0
  87. package/dist/esm/llm/google/index.mjs +142 -0
  88. package/dist/esm/llm/google/index.mjs.map +1 -0
  89. package/dist/esm/llm/google/utils/common.mjs +471 -0
  90. package/dist/esm/llm/google/utils/common.mjs.map +1 -0
  91. package/dist/esm/llm/ollama/index.mjs +65 -0
  92. package/dist/esm/llm/ollama/index.mjs.map +1 -0
  93. package/dist/esm/llm/ollama/utils.mjs +155 -0
  94. package/dist/esm/llm/ollama/utils.mjs.map +1 -0
  95. package/dist/esm/llm/openai/index.mjs +388 -4
  96. package/dist/esm/llm/openai/index.mjs.map +1 -1
  97. package/dist/esm/llm/openai/utils/index.mjs +666 -0
  98. package/dist/esm/llm/openai/utils/index.mjs.map +1 -0
  99. package/dist/esm/llm/providers.mjs +5 -5
  100. package/dist/esm/llm/providers.mjs.map +1 -1
  101. package/dist/esm/llm/text.mjs +14 -3
  102. package/dist/esm/llm/text.mjs.map +1 -1
  103. package/dist/esm/llm/vertexai/index.mjs +328 -0
  104. package/dist/esm/llm/vertexai/index.mjs.map +1 -0
  105. package/dist/esm/main.mjs +6 -5
  106. package/dist/esm/main.mjs.map +1 -1
  107. package/dist/esm/run.mjs +121 -83
  108. package/dist/esm/run.mjs.map +1 -1
  109. package/dist/esm/stream.mjs +87 -54
  110. package/dist/esm/stream.mjs.map +1 -1
  111. package/dist/esm/tools/ToolNode.mjs +10 -4
  112. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  113. package/dist/esm/tools/handlers.mjs +119 -15
  114. package/dist/esm/tools/handlers.mjs.map +1 -1
  115. package/dist/esm/tools/search/anthropic.mjs +37 -0
  116. package/dist/esm/tools/search/anthropic.mjs.map +1 -0
  117. package/dist/esm/tools/search/firecrawl.mjs +55 -9
  118. package/dist/esm/tools/search/firecrawl.mjs.map +1 -1
  119. package/dist/esm/tools/search/format.mjs +7 -7
  120. package/dist/esm/tools/search/format.mjs.map +1 -1
  121. package/dist/esm/tools/search/rerankers.mjs +7 -29
  122. package/dist/esm/tools/search/rerankers.mjs.map +1 -1
  123. package/dist/esm/tools/search/search.mjs +86 -16
  124. package/dist/esm/tools/search/search.mjs.map +1 -1
  125. package/dist/esm/tools/search/tool.mjs +4 -2
  126. package/dist/esm/tools/search/tool.mjs.map +1 -1
  127. package/dist/esm/tools/search/utils.mjs +1 -1
  128. package/dist/esm/tools/search/utils.mjs.map +1 -1
  129. package/dist/esm/utils/events.mjs +29 -0
  130. package/dist/esm/utils/events.mjs.map +1 -0
  131. package/dist/esm/utils/title.mjs +57 -22
  132. package/dist/esm/utils/title.mjs.map +1 -1
  133. package/dist/esm/utils/tokens.mjs +54 -8
  134. package/dist/esm/utils/tokens.mjs.map +1 -1
  135. package/dist/types/agents/AgentContext.d.ts +91 -0
  136. package/dist/types/common/enum.d.ts +15 -6
  137. package/dist/types/events.d.ts +5 -4
  138. package/dist/types/graphs/Graph.d.ts +64 -67
  139. package/dist/types/graphs/MultiAgentGraph.d.ts +37 -0
  140. package/dist/types/graphs/index.d.ts +1 -0
  141. package/dist/types/llm/anthropic/index.d.ts +11 -0
  142. package/dist/types/llm/anthropic/types.d.ts +9 -3
  143. package/dist/types/llm/anthropic/utils/message_inputs.d.ts +1 -1
  144. package/dist/types/llm/anthropic/utils/output_parsers.d.ts +4 -4
  145. package/dist/types/llm/anthropic/utils/tools.d.ts +3 -0
  146. package/dist/types/llm/google/index.d.ts +13 -0
  147. package/dist/types/llm/google/types.d.ts +32 -0
  148. package/dist/types/llm/google/utils/common.d.ts +19 -0
  149. package/dist/types/llm/google/utils/tools.d.ts +10 -0
  150. package/dist/types/llm/google/utils/zod_to_genai_parameters.d.ts +14 -0
  151. package/dist/types/llm/ollama/index.d.ts +7 -0
  152. package/dist/types/llm/ollama/utils.d.ts +7 -0
  153. package/dist/types/llm/openai/index.d.ts +72 -3
  154. package/dist/types/llm/openai/types.d.ts +10 -0
  155. package/dist/types/llm/openai/utils/index.d.ts +20 -0
  156. package/dist/types/llm/text.d.ts +1 -1
  157. package/dist/types/llm/vertexai/index.d.ts +293 -0
  158. package/dist/types/messages/reducer.d.ts +9 -0
  159. package/dist/types/run.d.ts +19 -12
  160. package/dist/types/scripts/ant_web_search.d.ts +1 -0
  161. package/dist/types/scripts/args.d.ts +2 -1
  162. package/dist/types/scripts/handoff-test.d.ts +1 -0
  163. package/dist/types/scripts/multi-agent-conditional.d.ts +1 -0
  164. package/dist/types/scripts/multi-agent-parallel.d.ts +1 -0
  165. package/dist/types/scripts/multi-agent-sequence.d.ts +1 -0
  166. package/dist/types/scripts/multi-agent-supervisor.d.ts +1 -0
  167. package/dist/types/scripts/multi-agent-test.d.ts +1 -0
  168. package/dist/types/scripts/test-custom-prompt-key.d.ts +2 -0
  169. package/dist/types/scripts/test-handoff-input.d.ts +2 -0
  170. package/dist/types/scripts/test-multi-agent-list-handoff.d.ts +2 -0
  171. package/dist/types/stream.d.ts +10 -3
  172. package/dist/types/tools/CodeExecutor.d.ts +2 -2
  173. package/dist/types/tools/ToolNode.d.ts +1 -1
  174. package/dist/types/tools/handlers.d.ts +17 -4
  175. package/dist/types/tools/search/anthropic.d.ts +16 -0
  176. package/dist/types/tools/search/firecrawl.d.ts +15 -0
  177. package/dist/types/tools/search/rerankers.d.ts +0 -1
  178. package/dist/types/tools/search/types.d.ts +30 -9
  179. package/dist/types/types/graph.d.ts +129 -15
  180. package/dist/types/types/llm.d.ts +24 -10
  181. package/dist/types/types/run.d.ts +46 -8
  182. package/dist/types/types/stream.d.ts +16 -2
  183. package/dist/types/types/tools.d.ts +1 -1
  184. package/dist/types/utils/events.d.ts +6 -0
  185. package/dist/types/utils/title.d.ts +2 -1
  186. package/dist/types/utils/tokens.d.ts +24 -0
  187. package/package.json +37 -17
  188. package/src/agents/AgentContext.ts +315 -0
  189. package/src/common/enum.ts +14 -5
  190. package/src/events.ts +24 -13
  191. package/src/graphs/Graph.ts +495 -312
  192. package/src/graphs/MultiAgentGraph.ts +498 -0
  193. package/src/graphs/index.ts +2 -1
  194. package/src/llm/anthropic/Jacob_Lee_Resume_2023.pdf +0 -0
  195. package/src/llm/anthropic/index.ts +78 -13
  196. package/src/llm/anthropic/llm.spec.ts +491 -115
  197. package/src/llm/anthropic/types.ts +39 -3
  198. package/src/llm/anthropic/utils/message_inputs.ts +67 -11
  199. package/src/llm/anthropic/utils/message_outputs.ts +21 -2
  200. package/src/llm/anthropic/utils/output_parsers.ts +25 -6
  201. package/src/llm/anthropic/utils/tools.ts +29 -0
  202. package/src/llm/google/index.ts +218 -0
  203. package/src/llm/google/types.ts +43 -0
  204. package/src/llm/google/utils/common.ts +646 -0
  205. package/src/llm/google/utils/tools.ts +160 -0
  206. package/src/llm/google/utils/zod_to_genai_parameters.ts +86 -0
  207. package/src/llm/ollama/index.ts +89 -0
  208. package/src/llm/ollama/utils.ts +193 -0
  209. package/src/llm/openai/index.ts +600 -14
  210. package/src/llm/openai/types.ts +24 -0
  211. package/src/llm/openai/utils/index.ts +912 -0
  212. package/src/llm/openai/utils/isReasoningModel.test.ts +90 -0
  213. package/src/llm/providers.ts +10 -9
  214. package/src/llm/text.ts +26 -7
  215. package/src/llm/vertexai/index.ts +360 -0
  216. package/src/messages/reducer.ts +80 -0
  217. package/src/run.ts +181 -112
  218. package/src/scripts/ant_web_search.ts +158 -0
  219. package/src/scripts/args.ts +12 -8
  220. package/src/scripts/cli4.ts +29 -21
  221. package/src/scripts/cli5.ts +29 -21
  222. package/src/scripts/code_exec.ts +54 -23
  223. package/src/scripts/code_exec_files.ts +48 -17
  224. package/src/scripts/code_exec_simple.ts +46 -27
  225. package/src/scripts/handoff-test.ts +135 -0
  226. package/src/scripts/image.ts +52 -20
  227. package/src/scripts/multi-agent-conditional.ts +220 -0
  228. package/src/scripts/multi-agent-example-output.md +110 -0
  229. package/src/scripts/multi-agent-parallel.ts +341 -0
  230. package/src/scripts/multi-agent-sequence.ts +212 -0
  231. package/src/scripts/multi-agent-supervisor.ts +361 -0
  232. package/src/scripts/multi-agent-test.ts +186 -0
  233. package/src/scripts/search.ts +1 -9
  234. package/src/scripts/simple.ts +25 -10
  235. package/src/scripts/test-custom-prompt-key.ts +145 -0
  236. package/src/scripts/test-handoff-input.ts +110 -0
  237. package/src/scripts/test-multi-agent-list-handoff.ts +258 -0
  238. package/src/scripts/tools.ts +48 -18
  239. package/src/specs/anthropic.simple.test.ts +150 -34
  240. package/src/specs/azure.simple.test.ts +325 -0
  241. package/src/specs/openai.simple.test.ts +140 -33
  242. package/src/specs/openrouter.simple.test.ts +107 -0
  243. package/src/specs/prune.test.ts +4 -9
  244. package/src/specs/reasoning.test.ts +80 -44
  245. package/src/specs/token-memoization.test.ts +39 -0
  246. package/src/stream.test.ts +94 -0
  247. package/src/stream.ts +139 -60
  248. package/src/tools/ToolNode.ts +21 -7
  249. package/src/tools/handlers.ts +192 -18
  250. package/src/tools/search/anthropic.ts +51 -0
  251. package/src/tools/search/firecrawl.ts +69 -20
  252. package/src/tools/search/format.ts +6 -8
  253. package/src/tools/search/rerankers.ts +7 -40
  254. package/src/tools/search/search.ts +97 -16
  255. package/src/tools/search/tool.ts +5 -2
  256. package/src/tools/search/types.ts +30 -10
  257. package/src/tools/search/utils.ts +1 -1
  258. package/src/types/graph.ts +315 -103
  259. package/src/types/llm.ts +25 -12
  260. package/src/types/run.ts +51 -13
  261. package/src/types/stream.ts +22 -1
  262. package/src/types/tools.ts +16 -10
  263. package/src/utils/events.ts +32 -0
  264. package/src/utils/llmConfig.ts +19 -7
  265. package/src/utils/title.ts +104 -30
  266. package/src/utils/tokens.ts +69 -10
@@ -0,0 +1,155 @@
1
+ import { AIMessageChunk } from '@langchain/core/messages';
2
+ import { v4 } from 'uuid';
3
+
4
+ function convertOllamaMessagesToLangChain(messages, extra) {
5
+ const additional_kwargs = {};
6
+ if ('thinking' in messages) {
7
+ additional_kwargs.reasoning_content = messages.thinking;
8
+ }
9
+ return new AIMessageChunk({
10
+ content: messages.content || '',
11
+ tool_call_chunks: messages.tool_calls?.map((tc) => ({
12
+ name: tc.function.name,
13
+ args: JSON.stringify(tc.function.arguments),
14
+ type: 'tool_call_chunk',
15
+ index: 0,
16
+ id: v4(),
17
+ })),
18
+ response_metadata: extra?.responseMetadata,
19
+ usage_metadata: extra?.usageMetadata,
20
+ additional_kwargs,
21
+ });
22
+ }
23
+ function extractBase64FromDataUrl(dataUrl) {
24
+ const match = dataUrl.match(/^data:.*?;base64,(.*)$/);
25
+ return match ? match[1] : '';
26
+ }
27
+ function convertAMessagesToOllama(messages) {
28
+ if (typeof messages.content === 'string') {
29
+ return [
30
+ {
31
+ role: 'assistant',
32
+ content: messages.content,
33
+ },
34
+ ];
35
+ }
36
+ const textFields = messages.content.filter((c) => c.type === 'text' && typeof c.text === 'string');
37
+ const textMessages = textFields.map((c) => ({
38
+ role: 'assistant',
39
+ content: c.text,
40
+ }));
41
+ let toolCallMsgs;
42
+ if (messages.content.find((c) => c.type === 'tool_use') &&
43
+ messages.tool_calls?.length) {
44
+ // `tool_use` content types are accepted if the message has tool calls
45
+ const toolCalls = messages.tool_calls.map((tc) => ({
46
+ id: tc.id,
47
+ type: 'function',
48
+ function: {
49
+ name: tc.name,
50
+ arguments: tc.args,
51
+ },
52
+ }));
53
+ if (toolCalls) {
54
+ toolCallMsgs = {
55
+ role: 'assistant',
56
+ tool_calls: toolCalls,
57
+ content: '',
58
+ };
59
+ }
60
+ }
61
+ else if (messages.content.find((c) => c.type === 'tool_use') &&
62
+ !messages.tool_calls?.length) {
63
+ throw new Error('\'tool_use\' content type is not supported without tool calls.');
64
+ }
65
+ return [...textMessages, ...(toolCallMsgs ? [toolCallMsgs] : [])];
66
+ }
67
+ function convertHumanGenericMessagesToOllama(message) {
68
+ if (typeof message.content === 'string') {
69
+ return [
70
+ {
71
+ role: 'user',
72
+ content: message.content,
73
+ },
74
+ ];
75
+ }
76
+ return message.content.map((c) => {
77
+ if (c.type === 'text') {
78
+ return {
79
+ role: 'user',
80
+ content: c.text,
81
+ };
82
+ }
83
+ else if (c.type === 'image_url') {
84
+ if (typeof c.image_url === 'string') {
85
+ return {
86
+ role: 'user',
87
+ content: '',
88
+ images: [extractBase64FromDataUrl(c.image_url)],
89
+ };
90
+ }
91
+ else if (c.image_url.url && typeof c.image_url.url === 'string') {
92
+ return {
93
+ role: 'user',
94
+ content: '',
95
+ images: [extractBase64FromDataUrl(c.image_url.url)],
96
+ };
97
+ }
98
+ }
99
+ throw new Error(`Unsupported content type: ${c.type}`);
100
+ });
101
+ }
102
+ function convertSystemMessageToOllama(message) {
103
+ if (typeof message.content === 'string') {
104
+ return [
105
+ {
106
+ role: 'system',
107
+ content: message.content,
108
+ },
109
+ ];
110
+ }
111
+ else if (message.content.every((c) => c.type === 'text' && typeof c.text === 'string')) {
112
+ return message.content.map((c) => ({
113
+ role: 'system',
114
+ content: c.text,
115
+ }));
116
+ }
117
+ else {
118
+ throw new Error(`Unsupported content type(s): ${message.content
119
+ .map((c) => c.type)
120
+ .join(', ')}`);
121
+ }
122
+ }
123
+ function convertToolMessageToOllama(message) {
124
+ if (typeof message.content !== 'string') {
125
+ throw new Error('Non string tool message content is not supported');
126
+ }
127
+ return [
128
+ {
129
+ role: 'tool',
130
+ content: message.content,
131
+ },
132
+ ];
133
+ }
134
+ function convertToOllamaMessages(messages) {
135
+ return messages.flatMap((msg) => {
136
+ if (['human', 'generic'].includes(msg._getType())) {
137
+ return convertHumanGenericMessagesToOllama(msg);
138
+ }
139
+ else if (msg._getType() === 'ai') {
140
+ return convertAMessagesToOllama(msg);
141
+ }
142
+ else if (msg._getType() === 'system') {
143
+ return convertSystemMessageToOllama(msg);
144
+ }
145
+ else if (msg._getType() === 'tool') {
146
+ return convertToolMessageToOllama(msg);
147
+ }
148
+ else {
149
+ throw new Error(`Unsupported message type: ${msg._getType()}`);
150
+ }
151
+ });
152
+ }
153
+
154
+ export { convertOllamaMessagesToLangChain, convertToOllamaMessages };
155
+ //# sourceMappingURL=utils.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"utils.mjs","sources":["../../../../src/llm/ollama/utils.ts"],"sourcesContent":["import {\n AIMessage,\n AIMessageChunk,\n BaseMessage,\n HumanMessage,\n MessageContentText,\n SystemMessage,\n ToolMessage,\n UsageMetadata,\n} from '@langchain/core/messages';\nimport type {\n Message as OllamaMessage,\n ToolCall as OllamaToolCall,\n} from 'ollama';\nimport { v4 as uuidv4 } from 'uuid';\n\nexport function convertOllamaMessagesToLangChain(\n messages: OllamaMessage,\n extra?: {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n responseMetadata?: Record<string, any>;\n usageMetadata?: UsageMetadata;\n }\n): AIMessageChunk {\n const additional_kwargs: BaseMessage['additional_kwargs'] = {};\n if ('thinking' in messages) {\n additional_kwargs.reasoning_content = messages.thinking as string;\n }\n return new AIMessageChunk({\n content: messages.content || '',\n tool_call_chunks: messages.tool_calls?.map((tc) => ({\n name: tc.function.name,\n args: JSON.stringify(tc.function.arguments),\n type: 'tool_call_chunk',\n index: 0,\n id: uuidv4(),\n })),\n response_metadata: extra?.responseMetadata,\n usage_metadata: extra?.usageMetadata,\n additional_kwargs,\n });\n}\n\nfunction extractBase64FromDataUrl(dataUrl: string): string {\n const match = dataUrl.match(/^data:.*?;base64,(.*)$/);\n return match ? match[1] : '';\n}\n\nfunction convertAMessagesToOllama(messages: AIMessage): OllamaMessage[] {\n if (typeof messages.content === 'string') {\n return [\n {\n role: 'assistant',\n content: messages.content,\n },\n ];\n }\n\n const textFields = messages.content.filter(\n (c) => c.type === 'text' && typeof c.text === 'string'\n );\n const textMessages = (textFields as MessageContentText[]).map((c) => ({\n role: 'assistant',\n content: c.text,\n }));\n let toolCallMsgs: OllamaMessage | undefined;\n\n if (\n messages.content.find((c) => c.type === 'tool_use') &&\n messages.tool_calls?.length\n ) {\n // `tool_use` content types are accepted if the message has tool calls\n const toolCalls: OllamaToolCall[] | undefined = messages.tool_calls.map(\n (tc) => ({\n id: tc.id,\n type: 'function',\n function: {\n name: tc.name,\n arguments: tc.args,\n },\n })\n );\n\n if (toolCalls) {\n toolCallMsgs = {\n role: 'assistant',\n tool_calls: toolCalls,\n content: '',\n };\n }\n } else if (\n messages.content.find((c) => c.type === 'tool_use') &&\n !messages.tool_calls?.length\n ) {\n throw new Error(\n '\\'tool_use\\' content type is not supported without tool calls.'\n );\n }\n\n return [...textMessages, ...(toolCallMsgs ? [toolCallMsgs] : [])];\n}\n\nfunction convertHumanGenericMessagesToOllama(\n message: HumanMessage\n): OllamaMessage[] {\n if (typeof message.content === 'string') {\n return [\n {\n role: 'user',\n content: message.content,\n },\n ];\n }\n return message.content.map((c) => {\n if (c.type === 'text') {\n return {\n role: 'user',\n content: c.text,\n };\n } else if (c.type === 'image_url') {\n if (typeof c.image_url === 'string') {\n return {\n role: 'user',\n content: '',\n images: [extractBase64FromDataUrl(c.image_url)],\n };\n } else if (c.image_url.url && typeof c.image_url.url === 'string') {\n return {\n role: 'user',\n content: '',\n images: [extractBase64FromDataUrl(c.image_url.url)],\n };\n }\n }\n throw new Error(`Unsupported content type: ${c.type}`);\n });\n}\n\nfunction convertSystemMessageToOllama(message: SystemMessage): OllamaMessage[] {\n if (typeof message.content === 'string') {\n return [\n {\n role: 'system',\n content: message.content,\n },\n ];\n } else if (\n message.content.every(\n (c) => c.type === 'text' && typeof c.text === 'string'\n )\n ) {\n return (message.content as MessageContentText[]).map((c) => ({\n role: 'system',\n content: c.text,\n }));\n } else {\n throw new Error(\n `Unsupported content type(s): ${message.content\n .map((c) => c.type)\n .join(', ')}`\n );\n }\n}\n\nfunction convertToolMessageToOllama(message: ToolMessage): OllamaMessage[] {\n if (typeof message.content !== 'string') {\n throw new Error('Non string tool message content is not supported');\n }\n return [\n {\n role: 'tool',\n content: message.content,\n },\n ];\n}\n\nexport function convertToOllamaMessages(\n messages: BaseMessage[]\n): OllamaMessage[] {\n return messages.flatMap((msg) => {\n if (['human', 'generic'].includes(msg._getType())) {\n return convertHumanGenericMessagesToOllama(msg);\n } else if (msg._getType() === 'ai') {\n return convertAMessagesToOllama(msg);\n } else if (msg._getType() === 'system') {\n return convertSystemMessageToOllama(msg);\n } else if (msg._getType() === 'tool') {\n return convertToolMessageToOllama(msg as ToolMessage);\n } else {\n throw new Error(`Unsupported message type: ${msg._getType()}`);\n }\n });\n}\n"],"names":["uuidv4"],"mappings":";;;AAgBgB,SAAA,gCAAgC,CAC9C,QAAuB,EACvB,KAIC,EAAA;IAED,MAAM,iBAAiB,GAAqC,EAAE;AAC9D,IAAA,IAAI,UAAU,IAAI,QAAQ,EAAE;AAC1B,QAAA,iBAAiB,CAAC,iBAAiB,GAAG,QAAQ,CAAC,QAAkB;;IAEnE,OAAO,IAAI,cAAc,CAAC;AACxB,QAAA,OAAO,EAAE,QAAQ,CAAC,OAAO,IAAI,EAAE;AAC/B,QAAA,gBAAgB,EAAE,QAAQ,CAAC,UAAU,EAAE,GAAG,CAAC,CAAC,EAAE,MAAM;AAClD,YAAA,IAAI,EAAE,EAAE,CAAC,QAAQ,CAAC,IAAI;YACtB,IAAI,EAAE,IAAI,CAAC,SAAS,CAAC,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC;AAC3C,YAAA,IAAI,EAAE,iBAAiB;AACvB,YAAA,KAAK,EAAE,CAAC;YACR,EAAE,EAAEA,EAAM,EAAE;AACb,SAAA,CAAC,CAAC;QACH,iBAAiB,EAAE,KAAK,EAAE,gBAAgB;QAC1C,cAAc,EAAE,KAAK,EAAE,aAAa;QACpC,iBAAiB;AAClB,KAAA,CAAC;AACJ;AAEA,SAAS,wBAAwB,CAAC,OAAe,EAAA;IAC/C,MAAM,KAAK,GAAG,OAAO,CAAC,KAAK,CAAC,wBAAwB,CAAC;AACrD,IAAA,OAAO,KAAK,GAAG,KAAK,CAAC,CAAC,CAAC,GAAG,EAAE;AAC9B;AAEA,SAAS,wBAAwB,CAAC,QAAmB,EAAA;AACnD,IAAA,IAAI,OAAO,QAAQ,CAAC,OAAO,KAAK,QAAQ,EAAE;QACxC,OAAO;AACL,YAAA;AACE,gBAAA,IAAI,EAAE,WAAW;gBACjB,OAAO,EAAE,QAAQ,CAAC,OAAO;AAC1B,aAAA;SACF;;IAGH,MAAM,UAAU,GAAG,QAAQ,CAAC,OAAO,CAAC,MAAM,CACxC,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI,KAAK,MAAM,IAAI,OAAO,CAAC,CAAC,IAAI,KAAK,QAAQ,CACvD;IACD,MAAM,YAAY,GAAI,UAAmC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM;AACpE,QAAA,IAAI,EAAE,WAAW;QACjB,OAAO,EAAE,CAAC,CAAC,IAAI;AAChB,KAAA,CAAC,CAAC;AACH,IAAA,IAAI,YAAuC;AAE3C,IAAA,IACE,QAAQ,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI,KAAK,UAAU,CAAC;AACnD,QAAA,QAAQ,CAAC,UAAU,EAAE,MAAM,EAC3B;;AAEA,QAAA,MAAM,SAAS,GAAiC,QAAQ,CAAC,UAAU,CAAC,GAAG,CACrE,CAAC,EAAE,MAAM;YACP,EAAE,EAAE,EAAE,CAAC,EAAE;AACT,YAAA,IAAI,EAAE,UAAU;AAChB,YAAA,QAAQ,EAAE;gBACR,IAAI,EAAE,EAAE,CAAC,IAAI;gBACb,SAAS,EAAE,EAAE,CAAC,IAAI;AACnB,aAAA;AACF,SAAA,CAAC,CACH;QAED,IAAI,SAAS,EAAE;AACb,YAAA,YAAY,GAAG;AACb,gBAAA,IAAI,EAAE,WAAW;AACjB,gBAAA,UAAU,EAAE,SAAS;AACrB,gBAAA,OAAO,EAAE,EAAE;aACZ;;;AAEE,SAAA,IACL,QAAQ,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI,KAAK,UAAU,CAAC;AACnD,QAAA,CAAC,QAAQ,CAAC,UAAU,EAAE,MAAM,EAC5B;AACA,QAAA,MAAM,IAAI,KAAK,CACb,gEAAgE,CACjE;;AAGH,IAAA,OAAO,CAAC,GAAG,YAAY,EAAE,IAAI,YAAY,GAAG,CAAC,YAAY,CAAC,GAAG,EAAE,CAAC,CAAC;AACnE;AAEA,SAAS,mCAAmC,CAC1C,OAAqB,EAAA;AAErB,IAAA,IAAI,OAAO,OAAO,CAAC,OAAO,KAAK,QAAQ,EAAE;QACvC,OAAO;AACL,YAAA;AACE,gBAAA,IAAI,EAAE,MAAM;gBACZ,OAAO,EAAE,OAAO,CAAC,OAAO;AACzB,aAAA;SACF;;IAEH,OAAO,OAAO,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,KAAI;AAC/B,QAAA,IAAI,CAAC,CAAC,IAAI,KAAK,MAAM,EAAE;YACrB,OAAO;AACL,gBAAA,IAAI,EAAE,MAAM;gBACZ,OAAO,EAAE,CAAC,CAAC,IAAI;aAChB;;AACI,aAAA,IAAI,CAAC,CAAC,IAAI,KAAK,WAAW,EAAE;AACjC,YAAA,IAAI,OAAO,CAAC,CAAC,SAAS,KAAK,QAAQ,EAAE;gBACnC,OAAO;AACL,oBAAA,IAAI,EAAE,MAAM;AACZ,oBAAA,OAAO,EAAE,EAAE;oBACX,MAAM,EAAE,CAAC,wBAAwB,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC;iBAChD;;AACI,iBAAA,IAAI,CAAC,CAAC,SAAS,CAAC,GAAG,IAAI,OAAO,CAAC,CAAC,SAAS,CAAC,GAAG,KAAK,QAAQ,EAAE;gBACjE,OAAO;AACL,oBAAA,IAAI,EAAE,MAAM;AACZ,oBAAA,OAAO,EAAE,EAAE;oBACX,MAAM,EAAE,CAAC,wBAAwB,CAAC,CAAC,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC;iBACpD;;;QAGL,MAAM,IAAI,KAAK,CAAC,CAAA,0BAAA,EAA6B,CAAC,CAAC,IAAI,CAAE,CAAA,CAAC;AACxD,KAAC,CAAC;AACJ;AAEA,SAAS,4BAA4B,CAAC,OAAsB,EAAA;AAC1D,IAAA,IAAI,OAAO,OAAO,CAAC,OAAO,KAAK,QAAQ,EAAE;QACvC,OAAO;AACL,YAAA;AACE,gBAAA,IAAI,EAAE,QAAQ;gBACd,OAAO,EAAE,OAAO,CAAC,OAAO;AACzB,aAAA;SACF;;SACI,IACL,OAAO,CAAC,OAAO,CAAC,KAAK,CACnB,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI,KAAK,MAAM,IAAI,OAAO,CAAC,CAAC,IAAI,KAAK,QAAQ,CACvD,EACD;QACA,OAAQ,OAAO,CAAC,OAAgC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM;AAC3D,YAAA,IAAI,EAAE,QAAQ;YACd,OAAO,EAAE,CAAC,CAAC,IAAI;AAChB,SAAA,CAAC,CAAC;;SACE;AACL,QAAA,MAAM,IAAI,KAAK,CACb,CAAgC,6BAAA,EAAA,OAAO,CAAC;aACrC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI;AACjB,aAAA,IAAI,CAAC,IAAI,CAAC,CAAA,CAAE,CAChB;;AAEL;AAEA,SAAS,0BAA0B,CAAC,OAAoB,EAAA;AACtD,IAAA,IAAI,OAAO,OAAO,CAAC,OAAO,KAAK,QAAQ,EAAE;AACvC,QAAA,MAAM,IAAI,KAAK,CAAC,kDAAkD,CAAC;;IAErE,OAAO;AACL,QAAA;AACE,YAAA,IAAI,EAAE,MAAM;YACZ,OAAO,EAAE,OAAO,CAAC,OAAO;AACzB,SAAA;KACF;AACH;AAEM,SAAU,uBAAuB,CACrC,QAAuB,EAAA;AAEvB,IAAA,OAAO,QAAQ,CAAC,OAAO,CAAC,CAAC,GAAG,KAAI;AAC9B,QAAA,IAAI,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,QAAQ,EAAE,CAAC,EAAE;AACjD,YAAA,OAAO,mCAAmC,CAAC,GAAG,CAAC;;AAC1C,aAAA,IAAI,GAAG,CAAC,QAAQ,EAAE,KAAK,IAAI,EAAE;AAClC,YAAA,OAAO,wBAAwB,CAAC,GAAG,CAAC;;AAC/B,aAAA,IAAI,GAAG,CAAC,QAAQ,EAAE,KAAK,QAAQ,EAAE;AACtC,YAAA,OAAO,4BAA4B,CAAC,GAAG,CAAC;;AACnC,aAAA,IAAI,GAAG,CAAC,QAAQ,EAAE,KAAK,MAAM,EAAE;AACpC,YAAA,OAAO,0BAA0B,CAAC,GAAkB,CAAC;;aAChD;YACL,MAAM,IAAI,KAAK,CAAC,CAA6B,0BAAA,EAAA,GAAG,CAAC,QAAQ,EAAE,CAAE,CAAA,CAAC;;AAElE,KAAC,CAAC;AACJ;;;;"}
@@ -1,8 +1,48 @@
1
1
  import { AzureOpenAI } from 'openai';
2
+ import { AIMessageChunk } from '@langchain/core/messages';
2
3
  import { ChatXAI as ChatXAI$1 } from '@langchain/xai';
4
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
5
+ import '@langchain/core/utils/function_calling';
3
6
  import { ChatDeepSeek as ChatDeepSeek$1 } from '@langchain/deepseek';
4
7
  import { getEndpoint, AzureChatOpenAI as AzureChatOpenAI$1, ChatOpenAI as ChatOpenAI$1, OpenAIClient } from '@langchain/openai';
8
+ import { isReasoningModel, _convertMessagesToOpenAIResponsesParams, _convertOpenAIResponsesDeltaToBaseMessageChunk, _convertMessagesToOpenAIParams } from './utils/index.mjs';
5
9
 
10
+ // eslint-disable-next-line @typescript-eslint/explicit-function-return-type
11
+ const iife = (fn) => fn();
12
+ function isHeaders(headers) {
13
+ return (typeof Headers !== 'undefined' &&
14
+ headers !== null &&
15
+ typeof headers === 'object' &&
16
+ Object.prototype.toString.call(headers) === '[object Headers]');
17
+ }
18
+ function normalizeHeaders(headers) {
19
+ const output = iife(() => {
20
+ // If headers is a Headers instance
21
+ if (isHeaders(headers)) {
22
+ return headers;
23
+ }
24
+ // If headers is an array of [key, value] pairs
25
+ else if (Array.isArray(headers)) {
26
+ return new Headers(headers);
27
+ }
28
+ // If headers is a NullableHeaders-like object (has 'values' property that is a Headers)
29
+ else if (typeof headers === 'object' &&
30
+ headers !== null &&
31
+ 'values' in headers &&
32
+ isHeaders(headers.values)) {
33
+ return headers.values;
34
+ }
35
+ // If headers is a plain object
36
+ else if (typeof headers === 'object' && headers !== null) {
37
+ const entries = Object.entries(headers)
38
+ .filter(([, v]) => typeof v === 'string')
39
+ .map(([k, v]) => [k, v]);
40
+ return new Headers(entries);
41
+ }
42
+ return new Headers();
43
+ });
44
+ return Object.fromEntries(output.entries());
45
+ }
6
46
  function createAbortHandler(controller) {
7
47
  return function () {
8
48
  controller.abort();
@@ -62,6 +102,7 @@ class CustomAzureOpenAIClient extends AzureOpenAI {
62
102
  }));
63
103
  }
64
104
  }
105
+ /** @ts-expect-error We are intentionally overriding `getReasoningParams` */
65
106
  class ChatOpenAI extends ChatOpenAI$1 {
66
107
  get exposedClient() {
67
108
  return this.client;
@@ -89,11 +130,189 @@ class ChatOpenAI extends ChatOpenAI$1 {
89
130
  };
90
131
  return requestOptions;
91
132
  }
133
+ /**
134
+ * Returns backwards compatible reasoning parameters from constructor params and call options
135
+ * @internal
136
+ */
137
+ getReasoningParams(options) {
138
+ if (!isReasoningModel(this.model)) {
139
+ return;
140
+ }
141
+ // apply options in reverse order of importance -- newer options supersede older options
142
+ let reasoning;
143
+ if (this.reasoning !== undefined) {
144
+ reasoning = {
145
+ ...reasoning,
146
+ ...this.reasoning,
147
+ };
148
+ }
149
+ if (options?.reasoning !== undefined) {
150
+ reasoning = {
151
+ ...reasoning,
152
+ ...options.reasoning,
153
+ };
154
+ }
155
+ return reasoning;
156
+ }
157
+ _getReasoningParams(options) {
158
+ return this.getReasoningParams(options);
159
+ }
160
+ async *_streamResponseChunks(messages, options, runManager) {
161
+ if (!this._useResponseApi(options)) {
162
+ return yield* this._streamResponseChunks2(messages, options, runManager);
163
+ }
164
+ const streamIterable = await this.responseApiWithRetry({
165
+ ...this.invocationParams(options, { streaming: true }),
166
+ input: _convertMessagesToOpenAIResponsesParams(messages, this.model, this.zdrEnabled),
167
+ stream: true,
168
+ }, options);
169
+ for await (const data of streamIterable) {
170
+ const chunk = _convertOpenAIResponsesDeltaToBaseMessageChunk(data);
171
+ if (chunk == null)
172
+ continue;
173
+ yield chunk;
174
+ await runManager?.handleLLMNewToken(chunk.text || '', undefined, undefined, undefined, undefined, { chunk });
175
+ }
176
+ return;
177
+ }
178
+ async *_streamResponseChunks2(messages, options, runManager) {
179
+ const messagesMapped = _convertMessagesToOpenAIParams(messages, this.model);
180
+ const params = {
181
+ ...this.invocationParams(options, {
182
+ streaming: true,
183
+ }),
184
+ messages: messagesMapped,
185
+ stream: true,
186
+ };
187
+ let defaultRole;
188
+ const streamIterable = await this.completionWithRetry(params, options);
189
+ let usage;
190
+ for await (const data of streamIterable) {
191
+ const choice = data.choices[0];
192
+ if (data.usage) {
193
+ usage = data.usage;
194
+ }
195
+ if (!choice) {
196
+ continue;
197
+ }
198
+ const { delta } = choice;
199
+ if (!delta) {
200
+ continue;
201
+ }
202
+ const chunk = this._convertOpenAIDeltaToBaseMessageChunk(delta, data, defaultRole);
203
+ if ('reasoning_content' in delta) {
204
+ chunk.additional_kwargs.reasoning_content = delta.reasoning_content;
205
+ }
206
+ else if ('reasoning' in delta) {
207
+ chunk.additional_kwargs.reasoning_content = delta.reasoning;
208
+ }
209
+ defaultRole = delta.role ?? defaultRole;
210
+ const newTokenIndices = {
211
+ prompt: options.promptIndex ?? 0,
212
+ completion: choice.index ?? 0,
213
+ };
214
+ if (typeof chunk.content !== 'string') {
215
+ // eslint-disable-next-line no-console
216
+ console.log('[WARNING]: Received non-string content from OpenAI. This is currently not supported.');
217
+ continue;
218
+ }
219
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
220
+ const generationInfo = { ...newTokenIndices };
221
+ if (choice.finish_reason != null) {
222
+ generationInfo.finish_reason = choice.finish_reason;
223
+ // Only include system fingerprint in the last chunk for now
224
+ // to avoid concatenation issues
225
+ generationInfo.system_fingerprint = data.system_fingerprint;
226
+ generationInfo.model_name = data.model;
227
+ generationInfo.service_tier = data.service_tier;
228
+ }
229
+ if (this.logprobs == true) {
230
+ generationInfo.logprobs = choice.logprobs;
231
+ }
232
+ const generationChunk = new ChatGenerationChunk({
233
+ message: chunk,
234
+ text: chunk.content,
235
+ generationInfo,
236
+ });
237
+ yield generationChunk;
238
+ await runManager?.handleLLMNewToken(generationChunk.text || '', newTokenIndices, undefined, undefined, undefined, { chunk: generationChunk });
239
+ }
240
+ if (usage) {
241
+ const inputTokenDetails = {
242
+ ...(usage.prompt_tokens_details?.audio_tokens != null && {
243
+ audio: usage.prompt_tokens_details.audio_tokens,
244
+ }),
245
+ ...(usage.prompt_tokens_details?.cached_tokens != null && {
246
+ cache_read: usage.prompt_tokens_details.cached_tokens,
247
+ }),
248
+ };
249
+ const outputTokenDetails = {
250
+ ...(usage.completion_tokens_details?.audio_tokens != null && {
251
+ audio: usage.completion_tokens_details.audio_tokens,
252
+ }),
253
+ ...(usage.completion_tokens_details?.reasoning_tokens != null && {
254
+ reasoning: usage.completion_tokens_details.reasoning_tokens,
255
+ }),
256
+ };
257
+ const generationChunk = new ChatGenerationChunk({
258
+ message: new AIMessageChunk({
259
+ content: '',
260
+ response_metadata: {
261
+ usage: { ...usage },
262
+ },
263
+ usage_metadata: {
264
+ input_tokens: usage.prompt_tokens,
265
+ output_tokens: usage.completion_tokens,
266
+ total_tokens: usage.total_tokens,
267
+ ...(Object.keys(inputTokenDetails).length > 0 && {
268
+ input_token_details: inputTokenDetails,
269
+ }),
270
+ ...(Object.keys(outputTokenDetails).length > 0 && {
271
+ output_token_details: outputTokenDetails,
272
+ }),
273
+ },
274
+ }),
275
+ text: '',
276
+ });
277
+ yield generationChunk;
278
+ }
279
+ if (options.signal?.aborted === true) {
280
+ throw new Error('AbortError');
281
+ }
282
+ }
92
283
  }
284
+ /** @ts-expect-error We are intentionally overriding `getReasoningParams` */
93
285
  class AzureChatOpenAI extends AzureChatOpenAI$1 {
94
286
  get exposedClient() {
95
287
  return this.client;
96
288
  }
289
+ /**
290
+ * Returns backwards compatible reasoning parameters from constructor params and call options
291
+ * @internal
292
+ */
293
+ getReasoningParams(options) {
294
+ if (!isReasoningModel(this.model)) {
295
+ return;
296
+ }
297
+ // apply options in reverse order of importance -- newer options supersede older options
298
+ let reasoning;
299
+ if (this.reasoning !== undefined) {
300
+ reasoning = {
301
+ ...reasoning,
302
+ ...this.reasoning,
303
+ };
304
+ }
305
+ if (options?.reasoning !== undefined) {
306
+ reasoning = {
307
+ ...reasoning,
308
+ ...options.reasoning,
309
+ };
310
+ }
311
+ return reasoning;
312
+ }
313
+ _getReasoningParams(options) {
314
+ return this.getReasoningParams(options);
315
+ }
97
316
  _getClientOptions(options) {
98
317
  if (!this.client) {
99
318
  const openAIEndpointConfig = {
@@ -117,11 +336,12 @@ class AzureChatOpenAI extends AzureChatOpenAI$1 {
117
336
  if (params.baseURL == null) {
118
337
  delete params.baseURL;
119
338
  }
339
+ const defaultHeaders = normalizeHeaders(params.defaultHeaders);
120
340
  params.defaultHeaders = {
121
341
  ...params.defaultHeaders,
122
- 'User-Agent': params.defaultHeaders?.['User-Agent'] != null
123
- ? `${params.defaultHeaders['User-Agent']}: langchainjs-azure-openai-v2`
124
- : 'langchainjs-azure-openai-v2',
342
+ 'User-Agent': defaultHeaders['User-Agent'] != null
343
+ ? `${defaultHeaders['User-Agent']}: librechat-azure-openai-v2`
344
+ : 'librechat-azure-openai-v2',
125
345
  };
126
346
  this.client = new CustomAzureOpenAIClient({
127
347
  apiVersion: this.azureOpenAIApiVersion,
@@ -145,6 +365,24 @@ class AzureChatOpenAI extends AzureChatOpenAI$1 {
145
365
  }
146
366
  return requestOptions;
147
367
  }
368
+ async *_streamResponseChunks(messages, options, runManager) {
369
+ if (!this._useResponseApi(options)) {
370
+ return yield* super._streamResponseChunks(messages, options, runManager);
371
+ }
372
+ const streamIterable = await this.responseApiWithRetry({
373
+ ...this.invocationParams(options, { streaming: true }),
374
+ input: _convertMessagesToOpenAIResponsesParams(messages, this.model, this.zdrEnabled),
375
+ stream: true,
376
+ }, options);
377
+ for await (const data of streamIterable) {
378
+ const chunk = _convertOpenAIResponsesDeltaToBaseMessageChunk(data);
379
+ if (chunk == null)
380
+ continue;
381
+ yield chunk;
382
+ await runManager?.handleLLMNewToken(chunk.text || '', undefined, undefined, undefined, undefined, { chunk });
383
+ }
384
+ return;
385
+ }
148
386
  }
149
387
  class ChatDeepSeek extends ChatDeepSeek$1 {
150
388
  get exposedClient() {
@@ -175,6 +413,19 @@ class ChatDeepSeek extends ChatDeepSeek$1 {
175
413
  }
176
414
  }
177
415
  class ChatXAI extends ChatXAI$1 {
416
+ constructor(fields) {
417
+ super(fields);
418
+ const customBaseURL = fields?.configuration?.baseURL ?? fields?.clientConfig?.baseURL;
419
+ if (customBaseURL != null && customBaseURL) {
420
+ this.clientConfig = {
421
+ ...this.clientConfig,
422
+ baseURL: customBaseURL,
423
+ };
424
+ // Reset the client to force recreation with new config
425
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
426
+ this.client = undefined;
427
+ }
428
+ }
178
429
  get exposedClient() {
179
430
  return this.client;
180
431
  }
@@ -201,7 +452,140 @@ class ChatXAI extends ChatXAI$1 {
201
452
  };
202
453
  return requestOptions;
203
454
  }
455
+ async *_streamResponseChunks(messages, options, runManager) {
456
+ const messagesMapped = _convertMessagesToOpenAIParams(messages, this.model);
457
+ const params = {
458
+ ...this.invocationParams(options, {
459
+ streaming: true,
460
+ }),
461
+ messages: messagesMapped,
462
+ stream: true,
463
+ };
464
+ let defaultRole;
465
+ const streamIterable = await this.completionWithRetry(params, options);
466
+ let usage;
467
+ for await (const data of streamIterable) {
468
+ const choice = data.choices[0];
469
+ if (data.usage) {
470
+ usage = data.usage;
471
+ }
472
+ if (!choice) {
473
+ continue;
474
+ }
475
+ const { delta } = choice;
476
+ if (!delta) {
477
+ continue;
478
+ }
479
+ const chunk = this._convertOpenAIDeltaToBaseMessageChunk(delta, data, defaultRole);
480
+ if (chunk.usage_metadata != null) {
481
+ chunk.usage_metadata = {
482
+ input_tokens: chunk.usage_metadata.input_tokens ?? 0,
483
+ output_tokens: chunk.usage_metadata.output_tokens ?? 0,
484
+ total_tokens: chunk.usage_metadata.total_tokens ?? 0,
485
+ };
486
+ }
487
+ if ('reasoning_content' in delta) {
488
+ chunk.additional_kwargs.reasoning_content = delta.reasoning_content;
489
+ }
490
+ defaultRole = delta.role ?? defaultRole;
491
+ const newTokenIndices = {
492
+ prompt: options.promptIndex ?? 0,
493
+ completion: choice.index ?? 0,
494
+ };
495
+ if (typeof chunk.content !== 'string') {
496
+ // eslint-disable-next-line no-console
497
+ console.log('[WARNING]: Received non-string content from OpenAI. This is currently not supported.');
498
+ continue;
499
+ }
500
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
501
+ const generationInfo = { ...newTokenIndices };
502
+ if (choice.finish_reason != null) {
503
+ generationInfo.finish_reason = choice.finish_reason;
504
+ // Only include system fingerprint in the last chunk for now
505
+ // to avoid concatenation issues
506
+ generationInfo.system_fingerprint = data.system_fingerprint;
507
+ generationInfo.model_name = data.model;
508
+ generationInfo.service_tier = data.service_tier;
509
+ }
510
+ if (this.logprobs == true) {
511
+ generationInfo.logprobs = choice.logprobs;
512
+ }
513
+ const generationChunk = new ChatGenerationChunk({
514
+ message: chunk,
515
+ text: chunk.content,
516
+ generationInfo,
517
+ });
518
+ yield generationChunk;
519
+ await runManager?.handleLLMNewToken(generationChunk.text || '', newTokenIndices, undefined, undefined, undefined, { chunk: generationChunk });
520
+ }
521
+ if (usage) {
522
+ // Type assertion for xAI-specific usage structure
523
+ const xaiUsage = usage;
524
+ const inputTokenDetails = {
525
+ // Standard OpenAI fields
526
+ ...(usage.prompt_tokens_details?.audio_tokens != null && {
527
+ audio: usage.prompt_tokens_details.audio_tokens,
528
+ }),
529
+ ...(usage.prompt_tokens_details?.cached_tokens != null && {
530
+ cache_read: usage.prompt_tokens_details.cached_tokens,
531
+ }),
532
+ // Add xAI-specific prompt token details if they exist
533
+ ...(xaiUsage.prompt_tokens_details?.text_tokens != null && {
534
+ text: xaiUsage.prompt_tokens_details.text_tokens,
535
+ }),
536
+ ...(xaiUsage.prompt_tokens_details?.image_tokens != null && {
537
+ image: xaiUsage.prompt_tokens_details.image_tokens,
538
+ }),
539
+ };
540
+ const outputTokenDetails = {
541
+ // Standard OpenAI fields
542
+ ...(usage.completion_tokens_details?.audio_tokens != null && {
543
+ audio: usage.completion_tokens_details.audio_tokens,
544
+ }),
545
+ ...(usage.completion_tokens_details?.reasoning_tokens != null && {
546
+ reasoning: usage.completion_tokens_details.reasoning_tokens,
547
+ }),
548
+ // Add xAI-specific completion token details if they exist
549
+ ...(xaiUsage.completion_tokens_details?.accepted_prediction_tokens !=
550
+ null && {
551
+ accepted_prediction: xaiUsage.completion_tokens_details.accepted_prediction_tokens,
552
+ }),
553
+ ...(xaiUsage.completion_tokens_details?.rejected_prediction_tokens !=
554
+ null && {
555
+ rejected_prediction: xaiUsage.completion_tokens_details.rejected_prediction_tokens,
556
+ }),
557
+ };
558
+ const generationChunk = new ChatGenerationChunk({
559
+ message: new AIMessageChunk({
560
+ content: '',
561
+ response_metadata: {
562
+ usage: { ...usage },
563
+ // Include xAI-specific metadata if it exists
564
+ ...(xaiUsage.num_sources_used != null && {
565
+ num_sources_used: xaiUsage.num_sources_used,
566
+ }),
567
+ },
568
+ usage_metadata: {
569
+ input_tokens: usage.prompt_tokens,
570
+ output_tokens: usage.completion_tokens,
571
+ total_tokens: usage.total_tokens,
572
+ ...(Object.keys(inputTokenDetails).length > 0 && {
573
+ input_token_details: inputTokenDetails,
574
+ }),
575
+ ...(Object.keys(outputTokenDetails).length > 0 && {
576
+ output_token_details: outputTokenDetails,
577
+ }),
578
+ },
579
+ }),
580
+ text: '',
581
+ });
582
+ yield generationChunk;
583
+ }
584
+ if (options.signal?.aborted === true) {
585
+ throw new Error('AbortError');
586
+ }
587
+ }
204
588
  }
205
589
 
206
- export { AzureChatOpenAI, ChatDeepSeek, ChatOpenAI, ChatXAI, CustomAzureOpenAIClient, CustomOpenAIClient };
590
+ export { AzureChatOpenAI, ChatDeepSeek, ChatOpenAI, ChatXAI, CustomAzureOpenAIClient, CustomOpenAIClient, isHeaders, normalizeHeaders };
207
591
  //# sourceMappingURL=index.mjs.map