@librechat/agents 2.4.322 → 3.0.0-rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (266) hide show
  1. package/dist/cjs/agents/AgentContext.cjs +218 -0
  2. package/dist/cjs/agents/AgentContext.cjs.map +1 -0
  3. package/dist/cjs/common/enum.cjs +14 -5
  4. package/dist/cjs/common/enum.cjs.map +1 -1
  5. package/dist/cjs/events.cjs +10 -6
  6. package/dist/cjs/events.cjs.map +1 -1
  7. package/dist/cjs/graphs/Graph.cjs +309 -212
  8. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  9. package/dist/cjs/graphs/MultiAgentGraph.cjs +422 -0
  10. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -0
  11. package/dist/cjs/llm/anthropic/index.cjs +54 -9
  12. package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
  13. package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
  14. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +52 -6
  15. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
  16. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +22 -2
  17. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
  18. package/dist/cjs/llm/anthropic/utils/tools.cjs +29 -0
  19. package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -0
  20. package/dist/cjs/llm/google/index.cjs +144 -0
  21. package/dist/cjs/llm/google/index.cjs.map +1 -0
  22. package/dist/cjs/llm/google/utils/common.cjs +477 -0
  23. package/dist/cjs/llm/google/utils/common.cjs.map +1 -0
  24. package/dist/cjs/llm/ollama/index.cjs +67 -0
  25. package/dist/cjs/llm/ollama/index.cjs.map +1 -0
  26. package/dist/cjs/llm/ollama/utils.cjs +158 -0
  27. package/dist/cjs/llm/ollama/utils.cjs.map +1 -0
  28. package/dist/cjs/llm/openai/index.cjs +389 -3
  29. package/dist/cjs/llm/openai/index.cjs.map +1 -1
  30. package/dist/cjs/llm/openai/utils/index.cjs +672 -0
  31. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -0
  32. package/dist/cjs/llm/providers.cjs +15 -15
  33. package/dist/cjs/llm/providers.cjs.map +1 -1
  34. package/dist/cjs/llm/text.cjs +14 -3
  35. package/dist/cjs/llm/text.cjs.map +1 -1
  36. package/dist/cjs/llm/vertexai/index.cjs +330 -0
  37. package/dist/cjs/llm/vertexai/index.cjs.map +1 -0
  38. package/dist/cjs/main.cjs +11 -0
  39. package/dist/cjs/main.cjs.map +1 -1
  40. package/dist/cjs/run.cjs +120 -81
  41. package/dist/cjs/run.cjs.map +1 -1
  42. package/dist/cjs/stream.cjs +85 -51
  43. package/dist/cjs/stream.cjs.map +1 -1
  44. package/dist/cjs/tools/ToolNode.cjs +10 -4
  45. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  46. package/dist/cjs/tools/handlers.cjs +119 -13
  47. package/dist/cjs/tools/handlers.cjs.map +1 -1
  48. package/dist/cjs/tools/search/anthropic.cjs +40 -0
  49. package/dist/cjs/tools/search/anthropic.cjs.map +1 -0
  50. package/dist/cjs/tools/search/firecrawl.cjs +55 -9
  51. package/dist/cjs/tools/search/firecrawl.cjs.map +1 -1
  52. package/dist/cjs/tools/search/format.cjs +6 -6
  53. package/dist/cjs/tools/search/format.cjs.map +1 -1
  54. package/dist/cjs/tools/search/rerankers.cjs +7 -29
  55. package/dist/cjs/tools/search/rerankers.cjs.map +1 -1
  56. package/dist/cjs/tools/search/search.cjs +86 -16
  57. package/dist/cjs/tools/search/search.cjs.map +1 -1
  58. package/dist/cjs/tools/search/tool.cjs +4 -2
  59. package/dist/cjs/tools/search/tool.cjs.map +1 -1
  60. package/dist/cjs/tools/search/utils.cjs +1 -1
  61. package/dist/cjs/tools/search/utils.cjs.map +1 -1
  62. package/dist/cjs/utils/events.cjs +31 -0
  63. package/dist/cjs/utils/events.cjs.map +1 -0
  64. package/dist/cjs/utils/title.cjs +57 -21
  65. package/dist/cjs/utils/title.cjs.map +1 -1
  66. package/dist/cjs/utils/tokens.cjs +54 -7
  67. package/dist/cjs/utils/tokens.cjs.map +1 -1
  68. package/dist/esm/agents/AgentContext.mjs +216 -0
  69. package/dist/esm/agents/AgentContext.mjs.map +1 -0
  70. package/dist/esm/common/enum.mjs +15 -6
  71. package/dist/esm/common/enum.mjs.map +1 -1
  72. package/dist/esm/events.mjs +10 -6
  73. package/dist/esm/events.mjs.map +1 -1
  74. package/dist/esm/graphs/Graph.mjs +311 -214
  75. package/dist/esm/graphs/Graph.mjs.map +1 -1
  76. package/dist/esm/graphs/MultiAgentGraph.mjs +420 -0
  77. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -0
  78. package/dist/esm/llm/anthropic/index.mjs +54 -9
  79. package/dist/esm/llm/anthropic/index.mjs.map +1 -1
  80. package/dist/esm/llm/anthropic/types.mjs.map +1 -1
  81. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +52 -6
  82. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
  83. package/dist/esm/llm/anthropic/utils/message_outputs.mjs +22 -2
  84. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
  85. package/dist/esm/llm/anthropic/utils/tools.mjs +27 -0
  86. package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -0
  87. package/dist/esm/llm/google/index.mjs +142 -0
  88. package/dist/esm/llm/google/index.mjs.map +1 -0
  89. package/dist/esm/llm/google/utils/common.mjs +471 -0
  90. package/dist/esm/llm/google/utils/common.mjs.map +1 -0
  91. package/dist/esm/llm/ollama/index.mjs +65 -0
  92. package/dist/esm/llm/ollama/index.mjs.map +1 -0
  93. package/dist/esm/llm/ollama/utils.mjs +155 -0
  94. package/dist/esm/llm/ollama/utils.mjs.map +1 -0
  95. package/dist/esm/llm/openai/index.mjs +388 -4
  96. package/dist/esm/llm/openai/index.mjs.map +1 -1
  97. package/dist/esm/llm/openai/utils/index.mjs +666 -0
  98. package/dist/esm/llm/openai/utils/index.mjs.map +1 -0
  99. package/dist/esm/llm/providers.mjs +5 -5
  100. package/dist/esm/llm/providers.mjs.map +1 -1
  101. package/dist/esm/llm/text.mjs +14 -3
  102. package/dist/esm/llm/text.mjs.map +1 -1
  103. package/dist/esm/llm/vertexai/index.mjs +328 -0
  104. package/dist/esm/llm/vertexai/index.mjs.map +1 -0
  105. package/dist/esm/main.mjs +6 -5
  106. package/dist/esm/main.mjs.map +1 -1
  107. package/dist/esm/run.mjs +121 -83
  108. package/dist/esm/run.mjs.map +1 -1
  109. package/dist/esm/stream.mjs +87 -54
  110. package/dist/esm/stream.mjs.map +1 -1
  111. package/dist/esm/tools/ToolNode.mjs +10 -4
  112. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  113. package/dist/esm/tools/handlers.mjs +119 -15
  114. package/dist/esm/tools/handlers.mjs.map +1 -1
  115. package/dist/esm/tools/search/anthropic.mjs +37 -0
  116. package/dist/esm/tools/search/anthropic.mjs.map +1 -0
  117. package/dist/esm/tools/search/firecrawl.mjs +55 -9
  118. package/dist/esm/tools/search/firecrawl.mjs.map +1 -1
  119. package/dist/esm/tools/search/format.mjs +7 -7
  120. package/dist/esm/tools/search/format.mjs.map +1 -1
  121. package/dist/esm/tools/search/rerankers.mjs +7 -29
  122. package/dist/esm/tools/search/rerankers.mjs.map +1 -1
  123. package/dist/esm/tools/search/search.mjs +86 -16
  124. package/dist/esm/tools/search/search.mjs.map +1 -1
  125. package/dist/esm/tools/search/tool.mjs +4 -2
  126. package/dist/esm/tools/search/tool.mjs.map +1 -1
  127. package/dist/esm/tools/search/utils.mjs +1 -1
  128. package/dist/esm/tools/search/utils.mjs.map +1 -1
  129. package/dist/esm/utils/events.mjs +29 -0
  130. package/dist/esm/utils/events.mjs.map +1 -0
  131. package/dist/esm/utils/title.mjs +57 -22
  132. package/dist/esm/utils/title.mjs.map +1 -1
  133. package/dist/esm/utils/tokens.mjs +54 -8
  134. package/dist/esm/utils/tokens.mjs.map +1 -1
  135. package/dist/types/agents/AgentContext.d.ts +91 -0
  136. package/dist/types/common/enum.d.ts +15 -6
  137. package/dist/types/events.d.ts +5 -4
  138. package/dist/types/graphs/Graph.d.ts +64 -67
  139. package/dist/types/graphs/MultiAgentGraph.d.ts +37 -0
  140. package/dist/types/graphs/index.d.ts +1 -0
  141. package/dist/types/llm/anthropic/index.d.ts +11 -0
  142. package/dist/types/llm/anthropic/types.d.ts +9 -3
  143. package/dist/types/llm/anthropic/utils/message_inputs.d.ts +1 -1
  144. package/dist/types/llm/anthropic/utils/output_parsers.d.ts +4 -4
  145. package/dist/types/llm/anthropic/utils/tools.d.ts +3 -0
  146. package/dist/types/llm/google/index.d.ts +13 -0
  147. package/dist/types/llm/google/types.d.ts +32 -0
  148. package/dist/types/llm/google/utils/common.d.ts +19 -0
  149. package/dist/types/llm/google/utils/tools.d.ts +10 -0
  150. package/dist/types/llm/google/utils/zod_to_genai_parameters.d.ts +14 -0
  151. package/dist/types/llm/ollama/index.d.ts +7 -0
  152. package/dist/types/llm/ollama/utils.d.ts +7 -0
  153. package/dist/types/llm/openai/index.d.ts +72 -3
  154. package/dist/types/llm/openai/types.d.ts +10 -0
  155. package/dist/types/llm/openai/utils/index.d.ts +20 -0
  156. package/dist/types/llm/text.d.ts +1 -1
  157. package/dist/types/llm/vertexai/index.d.ts +293 -0
  158. package/dist/types/messages/reducer.d.ts +9 -0
  159. package/dist/types/run.d.ts +19 -12
  160. package/dist/types/scripts/ant_web_search.d.ts +1 -0
  161. package/dist/types/scripts/args.d.ts +2 -1
  162. package/dist/types/scripts/handoff-test.d.ts +1 -0
  163. package/dist/types/scripts/multi-agent-conditional.d.ts +1 -0
  164. package/dist/types/scripts/multi-agent-parallel.d.ts +1 -0
  165. package/dist/types/scripts/multi-agent-sequence.d.ts +1 -0
  166. package/dist/types/scripts/multi-agent-supervisor.d.ts +1 -0
  167. package/dist/types/scripts/multi-agent-test.d.ts +1 -0
  168. package/dist/types/scripts/test-custom-prompt-key.d.ts +2 -0
  169. package/dist/types/scripts/test-handoff-input.d.ts +2 -0
  170. package/dist/types/scripts/test-multi-agent-list-handoff.d.ts +2 -0
  171. package/dist/types/stream.d.ts +10 -3
  172. package/dist/types/tools/CodeExecutor.d.ts +2 -2
  173. package/dist/types/tools/ToolNode.d.ts +1 -1
  174. package/dist/types/tools/handlers.d.ts +17 -4
  175. package/dist/types/tools/search/anthropic.d.ts +16 -0
  176. package/dist/types/tools/search/firecrawl.d.ts +15 -0
  177. package/dist/types/tools/search/rerankers.d.ts +0 -1
  178. package/dist/types/tools/search/types.d.ts +30 -9
  179. package/dist/types/types/graph.d.ts +129 -15
  180. package/dist/types/types/llm.d.ts +24 -10
  181. package/dist/types/types/run.d.ts +46 -8
  182. package/dist/types/types/stream.d.ts +16 -2
  183. package/dist/types/types/tools.d.ts +1 -1
  184. package/dist/types/utils/events.d.ts +6 -0
  185. package/dist/types/utils/title.d.ts +2 -1
  186. package/dist/types/utils/tokens.d.ts +24 -0
  187. package/package.json +37 -17
  188. package/src/agents/AgentContext.ts +315 -0
  189. package/src/common/enum.ts +14 -5
  190. package/src/events.ts +24 -13
  191. package/src/graphs/Graph.ts +495 -312
  192. package/src/graphs/MultiAgentGraph.ts +498 -0
  193. package/src/graphs/index.ts +2 -1
  194. package/src/llm/anthropic/Jacob_Lee_Resume_2023.pdf +0 -0
  195. package/src/llm/anthropic/index.ts +78 -13
  196. package/src/llm/anthropic/llm.spec.ts +491 -115
  197. package/src/llm/anthropic/types.ts +39 -3
  198. package/src/llm/anthropic/utils/message_inputs.ts +67 -11
  199. package/src/llm/anthropic/utils/message_outputs.ts +21 -2
  200. package/src/llm/anthropic/utils/output_parsers.ts +25 -6
  201. package/src/llm/anthropic/utils/tools.ts +29 -0
  202. package/src/llm/google/index.ts +218 -0
  203. package/src/llm/google/types.ts +43 -0
  204. package/src/llm/google/utils/common.ts +646 -0
  205. package/src/llm/google/utils/tools.ts +160 -0
  206. package/src/llm/google/utils/zod_to_genai_parameters.ts +86 -0
  207. package/src/llm/ollama/index.ts +89 -0
  208. package/src/llm/ollama/utils.ts +193 -0
  209. package/src/llm/openai/index.ts +600 -14
  210. package/src/llm/openai/types.ts +24 -0
  211. package/src/llm/openai/utils/index.ts +912 -0
  212. package/src/llm/openai/utils/isReasoningModel.test.ts +90 -0
  213. package/src/llm/providers.ts +10 -9
  214. package/src/llm/text.ts +26 -7
  215. package/src/llm/vertexai/index.ts +360 -0
  216. package/src/messages/reducer.ts +80 -0
  217. package/src/run.ts +181 -112
  218. package/src/scripts/ant_web_search.ts +158 -0
  219. package/src/scripts/args.ts +12 -8
  220. package/src/scripts/cli4.ts +29 -21
  221. package/src/scripts/cli5.ts +29 -21
  222. package/src/scripts/code_exec.ts +54 -23
  223. package/src/scripts/code_exec_files.ts +48 -17
  224. package/src/scripts/code_exec_simple.ts +46 -27
  225. package/src/scripts/handoff-test.ts +135 -0
  226. package/src/scripts/image.ts +52 -20
  227. package/src/scripts/multi-agent-conditional.ts +220 -0
  228. package/src/scripts/multi-agent-example-output.md +110 -0
  229. package/src/scripts/multi-agent-parallel.ts +341 -0
  230. package/src/scripts/multi-agent-sequence.ts +212 -0
  231. package/src/scripts/multi-agent-supervisor.ts +361 -0
  232. package/src/scripts/multi-agent-test.ts +186 -0
  233. package/src/scripts/search.ts +1 -9
  234. package/src/scripts/simple.ts +25 -10
  235. package/src/scripts/test-custom-prompt-key.ts +145 -0
  236. package/src/scripts/test-handoff-input.ts +110 -0
  237. package/src/scripts/test-multi-agent-list-handoff.ts +258 -0
  238. package/src/scripts/tools.ts +48 -18
  239. package/src/specs/anthropic.simple.test.ts +150 -34
  240. package/src/specs/azure.simple.test.ts +325 -0
  241. package/src/specs/openai.simple.test.ts +140 -33
  242. package/src/specs/openrouter.simple.test.ts +107 -0
  243. package/src/specs/prune.test.ts +4 -9
  244. package/src/specs/reasoning.test.ts +80 -44
  245. package/src/specs/token-memoization.test.ts +39 -0
  246. package/src/stream.test.ts +94 -0
  247. package/src/stream.ts +139 -60
  248. package/src/tools/ToolNode.ts +21 -7
  249. package/src/tools/handlers.ts +192 -18
  250. package/src/tools/search/anthropic.ts +51 -0
  251. package/src/tools/search/firecrawl.ts +69 -20
  252. package/src/tools/search/format.ts +6 -8
  253. package/src/tools/search/rerankers.ts +7 -40
  254. package/src/tools/search/search.ts +97 -16
  255. package/src/tools/search/tool.ts +5 -2
  256. package/src/tools/search/types.ts +30 -10
  257. package/src/tools/search/utils.ts +1 -1
  258. package/src/types/graph.ts +315 -103
  259. package/src/types/llm.ts +25 -12
  260. package/src/types/run.ts +51 -13
  261. package/src/types/stream.ts +22 -1
  262. package/src/types/tools.ts +16 -10
  263. package/src/utils/events.ts +32 -0
  264. package/src/utils/llmConfig.ts +19 -7
  265. package/src/utils/title.ts +104 -30
  266. package/src/utils/tokens.ts +69 -10
@@ -0,0 +1,666 @@
1
+ import { isAIMessage, isDataContentBlock, convertToProviderContentBlock, AIMessageChunk, ChatMessage, parseBase64DataUrl, parseMimeType, AIMessage } from '@langchain/core/messages';
2
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
3
+ import { convertLangChainToolCallToOpenAI, parseToolCall, makeInvalidToolCall } from '@langchain/core/output_parsers/openai_tools';
4
+
5
+ function extractGenericMessageCustomRole(message) {
6
+ if (message.role !== 'system' &&
7
+ message.role !== 'developer' &&
8
+ message.role !== 'assistant' &&
9
+ message.role !== 'user' &&
10
+ message.role !== 'function' &&
11
+ message.role !== 'tool') {
12
+ console.warn(`Unknown message role: ${message.role}`);
13
+ }
14
+ return message.role;
15
+ }
16
+ function messageToOpenAIRole(message) {
17
+ const type = message._getType();
18
+ switch (type) {
19
+ case 'system':
20
+ return 'system';
21
+ case 'ai':
22
+ return 'assistant';
23
+ case 'human':
24
+ return 'user';
25
+ case 'function':
26
+ return 'function';
27
+ case 'tool':
28
+ return 'tool';
29
+ case 'generic': {
30
+ if (!ChatMessage.isInstance(message))
31
+ throw new Error('Invalid generic chat message');
32
+ return extractGenericMessageCustomRole(message);
33
+ }
34
+ default:
35
+ throw new Error(`Unknown message type: ${type}`);
36
+ }
37
+ }
38
+ const completionsApiContentBlockConverter = {
39
+ providerName: 'ChatOpenAI',
40
+ fromStandardTextBlock(block) {
41
+ return { type: 'text', text: block.text };
42
+ },
43
+ fromStandardImageBlock(block) {
44
+ if (block.source_type === 'url') {
45
+ return {
46
+ type: 'image_url',
47
+ image_url: {
48
+ url: block.url,
49
+ ...(block.metadata?.detail
50
+ ? { detail: block.metadata.detail }
51
+ : {}),
52
+ },
53
+ };
54
+ }
55
+ if (block.source_type === 'base64') {
56
+ const url = `data:${block.mime_type ?? ''};base64,${block.data}`;
57
+ return {
58
+ type: 'image_url',
59
+ image_url: {
60
+ url,
61
+ ...(block.metadata?.detail
62
+ ? { detail: block.metadata.detail }
63
+ : {}),
64
+ },
65
+ };
66
+ }
67
+ throw new Error(`Image content blocks with source_type ${block.source_type} are not supported for ChatOpenAI`);
68
+ },
69
+ fromStandardAudioBlock(block) {
70
+ if (block.source_type === 'url') {
71
+ const data = parseBase64DataUrl({ dataUrl: block.url });
72
+ if (!data) {
73
+ throw new Error(`URL audio blocks with source_type ${block.source_type} must be formatted as a data URL for ChatOpenAI`);
74
+ }
75
+ const rawMimeType = data.mime_type || block.mime_type || '';
76
+ let mimeType;
77
+ try {
78
+ mimeType = parseMimeType(rawMimeType);
79
+ }
80
+ catch {
81
+ throw new Error(`Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`);
82
+ }
83
+ if (mimeType.type !== 'audio' ||
84
+ (mimeType.subtype !== 'wav' && mimeType.subtype !== 'mp3')) {
85
+ throw new Error(`Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`);
86
+ }
87
+ return {
88
+ type: 'input_audio',
89
+ input_audio: {
90
+ format: mimeType.subtype,
91
+ data: data.data,
92
+ },
93
+ };
94
+ }
95
+ if (block.source_type === 'base64') {
96
+ let mimeType;
97
+ try {
98
+ mimeType = parseMimeType(block.mime_type ?? '');
99
+ }
100
+ catch {
101
+ throw new Error(`Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`);
102
+ }
103
+ if (mimeType.type !== 'audio' ||
104
+ (mimeType.subtype !== 'wav' && mimeType.subtype !== 'mp3')) {
105
+ throw new Error(`Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`);
106
+ }
107
+ return {
108
+ type: 'input_audio',
109
+ input_audio: {
110
+ format: mimeType.subtype,
111
+ data: block.data,
112
+ },
113
+ };
114
+ }
115
+ throw new Error(`Audio content blocks with source_type ${block.source_type} are not supported for ChatOpenAI`);
116
+ },
117
+ fromStandardFileBlock(block) {
118
+ if (block.source_type === 'url') {
119
+ const data = parseBase64DataUrl({ dataUrl: block.url });
120
+ if (!data) {
121
+ throw new Error(`URL file blocks with source_type ${block.source_type} must be formatted as a data URL for ChatOpenAI`);
122
+ }
123
+ return {
124
+ type: 'file',
125
+ file: {
126
+ file_data: block.url, // formatted as base64 data URL
127
+ ...(block.metadata?.filename || block.metadata?.name
128
+ ? {
129
+ filename: (block.metadata.filename ||
130
+ block.metadata.name),
131
+ }
132
+ : {}),
133
+ },
134
+ };
135
+ }
136
+ if (block.source_type === 'base64') {
137
+ return {
138
+ type: 'file',
139
+ file: {
140
+ file_data: `data:${block.mime_type ?? ''};base64,${block.data}`,
141
+ ...(block.metadata?.filename ||
142
+ block.metadata?.name ||
143
+ block.metadata?.title
144
+ ? {
145
+ filename: (block.metadata.filename ||
146
+ block.metadata.name ||
147
+ block.metadata.title),
148
+ }
149
+ : {}),
150
+ },
151
+ };
152
+ }
153
+ if (block.source_type === 'id') {
154
+ return {
155
+ type: 'file',
156
+ file: {
157
+ file_id: block.id,
158
+ },
159
+ };
160
+ }
161
+ throw new Error(`File content blocks with source_type ${block.source_type} are not supported for ChatOpenAI`);
162
+ },
163
+ };
164
+ // Used in LangSmith, export is important here
165
+ function _convertMessagesToOpenAIParams(messages, model) {
166
+ // TODO: Function messages do not support array content, fix cast
167
+ return messages.flatMap((message) => {
168
+ let role = messageToOpenAIRole(message);
169
+ if (role === 'system' && isReasoningModel(model)) {
170
+ role = 'developer';
171
+ }
172
+ const content = typeof message.content === 'string'
173
+ ? message.content
174
+ : message.content.map((m) => {
175
+ if (isDataContentBlock(m)) {
176
+ return convertToProviderContentBlock(m, completionsApiContentBlockConverter);
177
+ }
178
+ return m;
179
+ });
180
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
181
+ const completionParam = {
182
+ role,
183
+ content,
184
+ };
185
+ if (message.name != null) {
186
+ completionParam.name = message.name;
187
+ }
188
+ if (message.additional_kwargs.function_call != null) {
189
+ completionParam.function_call = message.additional_kwargs.function_call;
190
+ completionParam.content = '';
191
+ }
192
+ if (isAIMessage(message) && !!message.tool_calls?.length) {
193
+ completionParam.tool_calls = message.tool_calls.map(convertLangChainToolCallToOpenAI);
194
+ completionParam.content = '';
195
+ }
196
+ else {
197
+ if (message.additional_kwargs.tool_calls != null) {
198
+ completionParam.tool_calls = message.additional_kwargs.tool_calls;
199
+ }
200
+ if (message.tool_call_id != null) {
201
+ completionParam.tool_call_id = message.tool_call_id;
202
+ }
203
+ }
204
+ if (message.additional_kwargs.audio &&
205
+ typeof message.additional_kwargs.audio === 'object' &&
206
+ 'id' in message.additional_kwargs.audio) {
207
+ const audioMessage = {
208
+ role: 'assistant',
209
+ audio: {
210
+ id: message.additional_kwargs.audio.id,
211
+ },
212
+ };
213
+ return [completionParam, audioMessage];
214
+ }
215
+ return completionParam;
216
+ });
217
+ }
218
+ const _FUNCTION_CALL_IDS_MAP_KEY = '__openai_function_call_ids__';
219
+ function _convertReasoningSummaryToOpenAIResponsesParams(reasoning) {
220
+ // combine summary parts that have the the same index and then remove the indexes
221
+ const summary = (reasoning.summary.length > 1
222
+ ? reasoning.summary.reduce((acc, curr) => {
223
+ const last = acc.at(-1);
224
+ if (last.index === curr.index) {
225
+ last.text += curr.text;
226
+ }
227
+ else {
228
+ acc.push(curr);
229
+ }
230
+ return acc;
231
+ }, [{ ...reasoning.summary[0] }])
232
+ : reasoning.summary).map((s) => Object.fromEntries(Object.entries(s).filter(([k]) => k !== 'index')));
233
+ return {
234
+ ...reasoning,
235
+ summary,
236
+ };
237
+ }
238
+ function _convertMessagesToOpenAIResponsesParams(messages, model, zdrEnabled) {
239
+ return messages.flatMap((lcMsg) => {
240
+ const additional_kwargs = lcMsg.additional_kwargs;
241
+ let role = messageToOpenAIRole(lcMsg);
242
+ if (role === 'system' && isReasoningModel(model))
243
+ role = 'developer';
244
+ if (role === 'function') {
245
+ throw new Error('Function messages are not supported in Responses API');
246
+ }
247
+ if (role === 'tool') {
248
+ const toolMessage = lcMsg;
249
+ // Handle computer call output
250
+ if (additional_kwargs.type === 'computer_call_output') {
251
+ const output = (() => {
252
+ if (typeof toolMessage.content === 'string') {
253
+ return {
254
+ type: 'computer_screenshot',
255
+ image_url: toolMessage.content,
256
+ };
257
+ }
258
+ if (Array.isArray(toolMessage.content)) {
259
+ const oaiScreenshot = toolMessage.content.find((i) => i.type === 'computer_screenshot');
260
+ if (oaiScreenshot)
261
+ return oaiScreenshot;
262
+ const lcImage = toolMessage.content.find((i) => i.type === 'image_url');
263
+ if (lcImage) {
264
+ return {
265
+ type: 'computer_screenshot',
266
+ image_url: typeof lcImage.image_url === 'string'
267
+ ? lcImage.image_url
268
+ : lcImage.image_url.url,
269
+ };
270
+ }
271
+ }
272
+ throw new Error('Invalid computer call output');
273
+ })();
274
+ return {
275
+ type: 'computer_call_output',
276
+ output,
277
+ call_id: toolMessage.tool_call_id,
278
+ };
279
+ }
280
+ return {
281
+ type: 'function_call_output',
282
+ call_id: toolMessage.tool_call_id,
283
+ id: toolMessage.id?.startsWith('fc_') ? toolMessage.id : undefined,
284
+ output: typeof toolMessage.content !== 'string'
285
+ ? JSON.stringify(toolMessage.content)
286
+ : toolMessage.content,
287
+ };
288
+ }
289
+ if (role === 'assistant') {
290
+ // if we have the original response items, just reuse them
291
+ if (!zdrEnabled &&
292
+ lcMsg.response_metadata.output != null &&
293
+ Array.isArray(lcMsg.response_metadata.output) &&
294
+ lcMsg.response_metadata.output.length > 0 &&
295
+ lcMsg.response_metadata.output.every((item) => 'type' in item)) {
296
+ return lcMsg.response_metadata.output;
297
+ }
298
+ // otherwise, try to reconstruct the response from what we have
299
+ const input = [];
300
+ // reasoning items
301
+ if (additional_kwargs.reasoning && !zdrEnabled) {
302
+ const reasoningItem = _convertReasoningSummaryToOpenAIResponsesParams(additional_kwargs.reasoning);
303
+ input.push(reasoningItem);
304
+ }
305
+ // ai content
306
+ let { content } = lcMsg;
307
+ if (additional_kwargs.refusal) {
308
+ if (typeof content === 'string') {
309
+ content = [{ type: 'output_text', text: content, annotations: [] }];
310
+ }
311
+ content = [
312
+ ...content,
313
+ { type: 'refusal', refusal: additional_kwargs.refusal },
314
+ ];
315
+ }
316
+ input.push({
317
+ type: 'message',
318
+ role: 'assistant',
319
+ ...(lcMsg.id && !zdrEnabled ? { id: lcMsg.id } : {}),
320
+ content: typeof content === 'string'
321
+ ? content
322
+ : content.flatMap((item) => {
323
+ if (item.type === 'text') {
324
+ return {
325
+ type: 'output_text',
326
+ text: item.text,
327
+ // @ts-expect-error TODO: add types for `annotations`
328
+ annotations: item.annotations ?? [],
329
+ };
330
+ }
331
+ if (item.type === 'output_text' || item.type === 'refusal') {
332
+ return item;
333
+ }
334
+ return [];
335
+ }),
336
+ });
337
+ const functionCallIds = additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY];
338
+ if (isAIMessage(lcMsg) && !!lcMsg.tool_calls?.length) {
339
+ input.push(...lcMsg.tool_calls.map((toolCall) => ({
340
+ type: 'function_call',
341
+ name: toolCall.name,
342
+ arguments: JSON.stringify(toolCall.args),
343
+ call_id: toolCall.id,
344
+ ...(zdrEnabled ? { id: functionCallIds?.[toolCall.id] } : {}),
345
+ })));
346
+ }
347
+ else if (additional_kwargs.tool_calls) {
348
+ input.push(...additional_kwargs.tool_calls.map((toolCall) => ({
349
+ type: 'function_call',
350
+ name: toolCall.function.name,
351
+ call_id: toolCall.id,
352
+ arguments: toolCall.function.arguments,
353
+ ...(zdrEnabled ? { id: functionCallIds?.[toolCall.id] } : {}),
354
+ })));
355
+ }
356
+ const toolOutputs = (lcMsg.response_metadata.output?.length ?? 0) > 0
357
+ ? lcMsg.response_metadata.output
358
+ : additional_kwargs.tool_outputs;
359
+ const fallthroughCallTypes = [
360
+ 'computer_call',
361
+ /** @ts-ignore */
362
+ 'mcp_call',
363
+ /** @ts-ignore */
364
+ 'code_interpreter_call',
365
+ /** @ts-ignore */
366
+ 'image_generation_call',
367
+ ];
368
+ if (toolOutputs != null) {
369
+ const castToolOutputs = toolOutputs;
370
+ const fallthroughCalls = castToolOutputs.filter((item) => fallthroughCallTypes.includes(item.type));
371
+ if (fallthroughCalls.length > 0)
372
+ input.push(...fallthroughCalls);
373
+ }
374
+ return input;
375
+ }
376
+ if (role === 'user' || role === 'system' || role === 'developer') {
377
+ if (typeof lcMsg.content === 'string') {
378
+ return { type: 'message', role, content: lcMsg.content };
379
+ }
380
+ const messages = [];
381
+ const content = lcMsg.content.flatMap((item) => {
382
+ if (item.type === 'mcp_approval_response') {
383
+ messages.push({
384
+ // @ts-ignore
385
+ type: 'mcp_approval_response',
386
+ approval_request_id: item.approval_request_id,
387
+ approve: item.approve,
388
+ });
389
+ }
390
+ if (isDataContentBlock(item)) {
391
+ return convertToProviderContentBlock(item, completionsApiContentBlockConverter);
392
+ }
393
+ if (item.type === 'text') {
394
+ return {
395
+ type: 'input_text',
396
+ text: item.text,
397
+ };
398
+ }
399
+ if (item.type === 'image_url') {
400
+ return {
401
+ type: 'input_image',
402
+ image_url: typeof item.image_url === 'string'
403
+ ? item.image_url
404
+ : item.image_url.url,
405
+ detail: typeof item.image_url === 'string'
406
+ ? 'auto'
407
+ : item.image_url.detail,
408
+ };
409
+ }
410
+ if (item.type === 'input_text' ||
411
+ item.type === 'input_image' ||
412
+ item.type === 'input_file') {
413
+ return item;
414
+ }
415
+ return [];
416
+ });
417
+ if (content.length > 0) {
418
+ messages.push({ type: 'message', role, content });
419
+ }
420
+ return messages;
421
+ }
422
+ console.warn(`Unsupported role found when converting to OpenAI Responses API: ${role}`);
423
+ return [];
424
+ });
425
+ }
426
+ function isReasoningModel(model) {
427
+ return model != null && model !== '' && /\b(o\d|gpt-[5-9])\b/i.test(model);
428
+ }
429
+ function _convertOpenAIResponsesMessageToBaseMessage(response) {
430
+ if (response.error) {
431
+ // TODO: add support for `addLangChainErrorFields`
432
+ const error = new Error(response.error.message);
433
+ error.name = response.error.code;
434
+ throw error;
435
+ }
436
+ let messageId;
437
+ const content = [];
438
+ const tool_calls = [];
439
+ const invalid_tool_calls = [];
440
+ const response_metadata = {
441
+ model: response.model,
442
+ created_at: response.created_at,
443
+ id: response.id,
444
+ incomplete_details: response.incomplete_details,
445
+ metadata: response.metadata,
446
+ object: response.object,
447
+ status: response.status,
448
+ user: response.user,
449
+ service_tier: response.service_tier,
450
+ // for compatibility with chat completion calls.
451
+ model_name: response.model,
452
+ };
453
+ const additional_kwargs = {};
454
+ for (const item of response.output) {
455
+ if (item.type === 'message') {
456
+ messageId = item.id;
457
+ content.push(...item.content.flatMap((part) => {
458
+ if (part.type === 'output_text') {
459
+ if ('parsed' in part && part.parsed != null) {
460
+ additional_kwargs.parsed = part.parsed;
461
+ }
462
+ return {
463
+ type: 'text',
464
+ text: part.text,
465
+ annotations: part.annotations,
466
+ };
467
+ }
468
+ if (part.type === 'refusal') {
469
+ additional_kwargs.refusal = part.refusal;
470
+ return [];
471
+ }
472
+ return part;
473
+ }));
474
+ }
475
+ else if (item.type === 'function_call') {
476
+ const fnAdapter = {
477
+ function: { name: item.name, arguments: item.arguments },
478
+ id: item.call_id,
479
+ };
480
+ try {
481
+ tool_calls.push(parseToolCall(fnAdapter, { returnId: true }));
482
+ }
483
+ catch (e) {
484
+ let errMessage;
485
+ if (typeof e === 'object' &&
486
+ e != null &&
487
+ 'message' in e &&
488
+ typeof e.message === 'string') {
489
+ errMessage = e.message;
490
+ }
491
+ invalid_tool_calls.push(makeInvalidToolCall(fnAdapter, errMessage));
492
+ }
493
+ additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] ??= {};
494
+ if (item.id) {
495
+ additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY][item.call_id] = item.id;
496
+ }
497
+ }
498
+ else if (item.type === 'reasoning') {
499
+ additional_kwargs.reasoning = item;
500
+ }
501
+ else {
502
+ additional_kwargs.tool_outputs ??= [];
503
+ additional_kwargs.tool_outputs.push(item);
504
+ }
505
+ }
506
+ return new AIMessage({
507
+ id: messageId,
508
+ content,
509
+ tool_calls,
510
+ invalid_tool_calls,
511
+ usage_metadata: response.usage,
512
+ additional_kwargs,
513
+ response_metadata,
514
+ });
515
+ }
516
+ function _convertOpenAIResponsesDeltaToBaseMessageChunk(chunk) {
517
+ const content = [];
518
+ let generationInfo = {};
519
+ let usage_metadata;
520
+ const tool_call_chunks = [];
521
+ const response_metadata = {};
522
+ const additional_kwargs = {};
523
+ let id;
524
+ if (chunk.type === 'response.output_text.delta') {
525
+ content.push({
526
+ type: 'text',
527
+ text: chunk.delta,
528
+ index: chunk.content_index,
529
+ });
530
+ /** @ts-ignore */
531
+ }
532
+ else if (chunk.type === 'response.output_text_annotation.added') {
533
+ content.push({
534
+ type: 'text',
535
+ text: '',
536
+ /** @ts-ignore */
537
+ annotations: [chunk.annotation],
538
+ /** @ts-ignore */
539
+ index: chunk.content_index,
540
+ });
541
+ }
542
+ else if (chunk.type === 'response.output_item.added' &&
543
+ chunk.item.type === 'message') {
544
+ id = chunk.item.id;
545
+ }
546
+ else if (chunk.type === 'response.output_item.added' &&
547
+ chunk.item.type === 'function_call') {
548
+ tool_call_chunks.push({
549
+ type: 'tool_call_chunk',
550
+ name: chunk.item.name,
551
+ args: chunk.item.arguments,
552
+ id: chunk.item.call_id,
553
+ index: chunk.output_index,
554
+ });
555
+ additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] = {
556
+ [chunk.item.call_id]: chunk.item.id,
557
+ };
558
+ }
559
+ else if (chunk.type === 'response.output_item.done' &&
560
+ [
561
+ 'web_search_call',
562
+ 'file_search_call',
563
+ 'computer_call',
564
+ 'code_interpreter_call',
565
+ 'mcp_call',
566
+ 'mcp_list_tools',
567
+ 'mcp_approval_request',
568
+ 'image_generation_call',
569
+ ].includes(chunk.item.type)) {
570
+ additional_kwargs.tool_outputs = [chunk.item];
571
+ }
572
+ else if (chunk.type === 'response.created') {
573
+ response_metadata.id = chunk.response.id;
574
+ response_metadata.model_name = chunk.response.model;
575
+ response_metadata.model = chunk.response.model;
576
+ }
577
+ else if (chunk.type === 'response.completed') {
578
+ const msg = _convertOpenAIResponsesMessageToBaseMessage(chunk.response);
579
+ usage_metadata = chunk.response.usage;
580
+ if (chunk.response.text?.format?.type === 'json_schema') {
581
+ additional_kwargs.parsed ??= JSON.parse(msg.text);
582
+ }
583
+ for (const [key, value] of Object.entries(chunk.response)) {
584
+ if (key !== 'id')
585
+ response_metadata[key] = value;
586
+ }
587
+ }
588
+ else if (chunk.type === 'response.function_call_arguments.delta') {
589
+ tool_call_chunks.push({
590
+ type: 'tool_call_chunk',
591
+ args: chunk.delta,
592
+ index: chunk.output_index,
593
+ });
594
+ }
595
+ else if (chunk.type === 'response.web_search_call.completed' ||
596
+ chunk.type === 'response.file_search_call.completed') {
597
+ generationInfo = {
598
+ tool_outputs: {
599
+ id: chunk.item_id,
600
+ type: chunk.type.replace('response.', '').replace('.completed', ''),
601
+ status: 'completed',
602
+ },
603
+ };
604
+ }
605
+ else if (chunk.type === 'response.refusal.done') {
606
+ additional_kwargs.refusal = chunk.refusal;
607
+ }
608
+ else if (chunk.type === 'response.output_item.added' &&
609
+ 'item' in chunk &&
610
+ chunk.item.type === 'reasoning') {
611
+ const summary = chunk
612
+ .item.summary
613
+ ? chunk.item.summary.map((s, index) => ({
614
+ ...s,
615
+ index,
616
+ }))
617
+ : undefined;
618
+ additional_kwargs.reasoning = {
619
+ // We only capture ID in the first chunk or else the concatenated result of all chunks will
620
+ // have an ID field that is repeated once per chunk. There is special handling for the `type`
621
+ // field that prevents this, however.
622
+ id: chunk.item.id,
623
+ type: chunk.item.type,
624
+ ...(summary ? { summary } : {}),
625
+ };
626
+ }
627
+ else if (chunk.type === 'response.reasoning_summary_part.added') {
628
+ additional_kwargs.reasoning = {
629
+ type: 'reasoning',
630
+ summary: [{ ...chunk.part, index: chunk.summary_index }],
631
+ };
632
+ }
633
+ else if (chunk.type === 'response.reasoning_summary_text.delta') {
634
+ additional_kwargs.reasoning = {
635
+ type: 'reasoning',
636
+ summary: [
637
+ { text: chunk.delta, type: 'summary_text', index: chunk.summary_index },
638
+ ],
639
+ };
640
+ /** @ts-ignore */
641
+ }
642
+ else if (chunk.type === 'response.image_generation_call.partial_image') {
643
+ // noop/fixme: retaining partial images in a message chunk means that _all_
644
+ // partial images get kept in history, so we don't do anything here.
645
+ return null;
646
+ }
647
+ else {
648
+ return null;
649
+ }
650
+ return new ChatGenerationChunk({
651
+ // Legacy reasons, `onLLMNewToken` should pulls this out
652
+ text: content.map((part) => part.text).join(''),
653
+ message: new AIMessageChunk({
654
+ id,
655
+ content,
656
+ tool_call_chunks,
657
+ usage_metadata,
658
+ additional_kwargs,
659
+ response_metadata,
660
+ }),
661
+ generationInfo,
662
+ });
663
+ }
664
+
665
+ export { _convertMessagesToOpenAIParams, _convertMessagesToOpenAIResponsesParams, _convertOpenAIResponsesDeltaToBaseMessageChunk, isReasoningModel, messageToOpenAIRole };
666
+ //# sourceMappingURL=index.mjs.map