@librechat/agents 2.4.321 → 3.0.0-rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (266) hide show
  1. package/dist/cjs/agents/AgentContext.cjs +218 -0
  2. package/dist/cjs/agents/AgentContext.cjs.map +1 -0
  3. package/dist/cjs/common/enum.cjs +14 -5
  4. package/dist/cjs/common/enum.cjs.map +1 -1
  5. package/dist/cjs/events.cjs +10 -6
  6. package/dist/cjs/events.cjs.map +1 -1
  7. package/dist/cjs/graphs/Graph.cjs +309 -212
  8. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  9. package/dist/cjs/graphs/MultiAgentGraph.cjs +322 -0
  10. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -0
  11. package/dist/cjs/llm/anthropic/index.cjs +54 -9
  12. package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
  13. package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
  14. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +52 -6
  15. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
  16. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +22 -2
  17. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
  18. package/dist/cjs/llm/anthropic/utils/tools.cjs +29 -0
  19. package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -0
  20. package/dist/cjs/llm/google/index.cjs +144 -0
  21. package/dist/cjs/llm/google/index.cjs.map +1 -0
  22. package/dist/cjs/llm/google/utils/common.cjs +477 -0
  23. package/dist/cjs/llm/google/utils/common.cjs.map +1 -0
  24. package/dist/cjs/llm/ollama/index.cjs +67 -0
  25. package/dist/cjs/llm/ollama/index.cjs.map +1 -0
  26. package/dist/cjs/llm/ollama/utils.cjs +158 -0
  27. package/dist/cjs/llm/ollama/utils.cjs.map +1 -0
  28. package/dist/cjs/llm/openai/index.cjs +389 -3
  29. package/dist/cjs/llm/openai/index.cjs.map +1 -1
  30. package/dist/cjs/llm/openai/utils/index.cjs +672 -0
  31. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -0
  32. package/dist/cjs/llm/providers.cjs +15 -15
  33. package/dist/cjs/llm/providers.cjs.map +1 -1
  34. package/dist/cjs/llm/text.cjs +14 -3
  35. package/dist/cjs/llm/text.cjs.map +1 -1
  36. package/dist/cjs/llm/vertexai/index.cjs +330 -0
  37. package/dist/cjs/llm/vertexai/index.cjs.map +1 -0
  38. package/dist/cjs/main.cjs +11 -0
  39. package/dist/cjs/main.cjs.map +1 -1
  40. package/dist/cjs/run.cjs +120 -81
  41. package/dist/cjs/run.cjs.map +1 -1
  42. package/dist/cjs/stream.cjs +85 -51
  43. package/dist/cjs/stream.cjs.map +1 -1
  44. package/dist/cjs/tools/ToolNode.cjs +10 -4
  45. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  46. package/dist/cjs/tools/handlers.cjs +119 -13
  47. package/dist/cjs/tools/handlers.cjs.map +1 -1
  48. package/dist/cjs/tools/search/anthropic.cjs +40 -0
  49. package/dist/cjs/tools/search/anthropic.cjs.map +1 -0
  50. package/dist/cjs/tools/search/firecrawl.cjs +61 -13
  51. package/dist/cjs/tools/search/firecrawl.cjs.map +1 -1
  52. package/dist/cjs/tools/search/format.cjs +9 -3
  53. package/dist/cjs/tools/search/format.cjs.map +1 -1
  54. package/dist/cjs/tools/search/rerankers.cjs +35 -50
  55. package/dist/cjs/tools/search/rerankers.cjs.map +1 -1
  56. package/dist/cjs/tools/search/schema.cjs +70 -0
  57. package/dist/cjs/tools/search/schema.cjs.map +1 -0
  58. package/dist/cjs/tools/search/search.cjs +145 -38
  59. package/dist/cjs/tools/search/search.cjs.map +1 -1
  60. package/dist/cjs/tools/search/tool.cjs +165 -48
  61. package/dist/cjs/tools/search/tool.cjs.map +1 -1
  62. package/dist/cjs/tools/search/utils.cjs +34 -5
  63. package/dist/cjs/tools/search/utils.cjs.map +1 -1
  64. package/dist/cjs/utils/events.cjs +31 -0
  65. package/dist/cjs/utils/events.cjs.map +1 -0
  66. package/dist/cjs/utils/title.cjs +57 -21
  67. package/dist/cjs/utils/title.cjs.map +1 -1
  68. package/dist/cjs/utils/tokens.cjs +54 -7
  69. package/dist/cjs/utils/tokens.cjs.map +1 -1
  70. package/dist/esm/agents/AgentContext.mjs +216 -0
  71. package/dist/esm/agents/AgentContext.mjs.map +1 -0
  72. package/dist/esm/common/enum.mjs +15 -6
  73. package/dist/esm/common/enum.mjs.map +1 -1
  74. package/dist/esm/events.mjs +10 -6
  75. package/dist/esm/events.mjs.map +1 -1
  76. package/dist/esm/graphs/Graph.mjs +311 -214
  77. package/dist/esm/graphs/Graph.mjs.map +1 -1
  78. package/dist/esm/graphs/MultiAgentGraph.mjs +320 -0
  79. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -0
  80. package/dist/esm/llm/anthropic/index.mjs +54 -9
  81. package/dist/esm/llm/anthropic/index.mjs.map +1 -1
  82. package/dist/esm/llm/anthropic/types.mjs.map +1 -1
  83. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +52 -6
  84. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
  85. package/dist/esm/llm/anthropic/utils/message_outputs.mjs +22 -2
  86. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
  87. package/dist/esm/llm/anthropic/utils/tools.mjs +27 -0
  88. package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -0
  89. package/dist/esm/llm/google/index.mjs +142 -0
  90. package/dist/esm/llm/google/index.mjs.map +1 -0
  91. package/dist/esm/llm/google/utils/common.mjs +471 -0
  92. package/dist/esm/llm/google/utils/common.mjs.map +1 -0
  93. package/dist/esm/llm/ollama/index.mjs +65 -0
  94. package/dist/esm/llm/ollama/index.mjs.map +1 -0
  95. package/dist/esm/llm/ollama/utils.mjs +155 -0
  96. package/dist/esm/llm/ollama/utils.mjs.map +1 -0
  97. package/dist/esm/llm/openai/index.mjs +388 -4
  98. package/dist/esm/llm/openai/index.mjs.map +1 -1
  99. package/dist/esm/llm/openai/utils/index.mjs +666 -0
  100. package/dist/esm/llm/openai/utils/index.mjs.map +1 -0
  101. package/dist/esm/llm/providers.mjs +5 -5
  102. package/dist/esm/llm/providers.mjs.map +1 -1
  103. package/dist/esm/llm/text.mjs +14 -3
  104. package/dist/esm/llm/text.mjs.map +1 -1
  105. package/dist/esm/llm/vertexai/index.mjs +328 -0
  106. package/dist/esm/llm/vertexai/index.mjs.map +1 -0
  107. package/dist/esm/main.mjs +6 -5
  108. package/dist/esm/main.mjs.map +1 -1
  109. package/dist/esm/run.mjs +121 -83
  110. package/dist/esm/run.mjs.map +1 -1
  111. package/dist/esm/stream.mjs +87 -54
  112. package/dist/esm/stream.mjs.map +1 -1
  113. package/dist/esm/tools/ToolNode.mjs +10 -4
  114. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  115. package/dist/esm/tools/handlers.mjs +119 -15
  116. package/dist/esm/tools/handlers.mjs.map +1 -1
  117. package/dist/esm/tools/search/anthropic.mjs +37 -0
  118. package/dist/esm/tools/search/anthropic.mjs.map +1 -0
  119. package/dist/esm/tools/search/firecrawl.mjs +61 -13
  120. package/dist/esm/tools/search/firecrawl.mjs.map +1 -1
  121. package/dist/esm/tools/search/format.mjs +10 -4
  122. package/dist/esm/tools/search/format.mjs.map +1 -1
  123. package/dist/esm/tools/search/rerankers.mjs +35 -50
  124. package/dist/esm/tools/search/rerankers.mjs.map +1 -1
  125. package/dist/esm/tools/search/schema.mjs +61 -0
  126. package/dist/esm/tools/search/schema.mjs.map +1 -0
  127. package/dist/esm/tools/search/search.mjs +146 -39
  128. package/dist/esm/tools/search/search.mjs.map +1 -1
  129. package/dist/esm/tools/search/tool.mjs +164 -47
  130. package/dist/esm/tools/search/tool.mjs.map +1 -1
  131. package/dist/esm/tools/search/utils.mjs +33 -6
  132. package/dist/esm/tools/search/utils.mjs.map +1 -1
  133. package/dist/esm/utils/events.mjs +29 -0
  134. package/dist/esm/utils/events.mjs.map +1 -0
  135. package/dist/esm/utils/title.mjs +57 -22
  136. package/dist/esm/utils/title.mjs.map +1 -1
  137. package/dist/esm/utils/tokens.mjs +54 -8
  138. package/dist/esm/utils/tokens.mjs.map +1 -1
  139. package/dist/types/agents/AgentContext.d.ts +91 -0
  140. package/dist/types/common/enum.d.ts +15 -6
  141. package/dist/types/events.d.ts +5 -4
  142. package/dist/types/graphs/Graph.d.ts +64 -67
  143. package/dist/types/graphs/MultiAgentGraph.d.ts +37 -0
  144. package/dist/types/graphs/index.d.ts +1 -0
  145. package/dist/types/llm/anthropic/index.d.ts +11 -0
  146. package/dist/types/llm/anthropic/types.d.ts +9 -3
  147. package/dist/types/llm/anthropic/utils/message_inputs.d.ts +1 -1
  148. package/dist/types/llm/anthropic/utils/output_parsers.d.ts +4 -4
  149. package/dist/types/llm/anthropic/utils/tools.d.ts +3 -0
  150. package/dist/types/llm/google/index.d.ts +13 -0
  151. package/dist/types/llm/google/types.d.ts +32 -0
  152. package/dist/types/llm/google/utils/common.d.ts +19 -0
  153. package/dist/types/llm/google/utils/tools.d.ts +10 -0
  154. package/dist/types/llm/google/utils/zod_to_genai_parameters.d.ts +14 -0
  155. package/dist/types/llm/ollama/index.d.ts +7 -0
  156. package/dist/types/llm/ollama/utils.d.ts +7 -0
  157. package/dist/types/llm/openai/index.d.ts +72 -3
  158. package/dist/types/llm/openai/types.d.ts +10 -0
  159. package/dist/types/llm/openai/utils/index.d.ts +20 -0
  160. package/dist/types/llm/text.d.ts +1 -1
  161. package/dist/types/llm/vertexai/index.d.ts +293 -0
  162. package/dist/types/messages/reducer.d.ts +9 -0
  163. package/dist/types/run.d.ts +19 -12
  164. package/dist/types/scripts/ant_web_search.d.ts +1 -0
  165. package/dist/types/scripts/args.d.ts +2 -1
  166. package/dist/types/scripts/handoff-test.d.ts +1 -0
  167. package/dist/types/scripts/multi-agent-conditional.d.ts +1 -0
  168. package/dist/types/scripts/multi-agent-parallel.d.ts +1 -0
  169. package/dist/types/scripts/multi-agent-sequence.d.ts +1 -0
  170. package/dist/types/scripts/multi-agent-test.d.ts +1 -0
  171. package/dist/types/stream.d.ts +10 -3
  172. package/dist/types/tools/CodeExecutor.d.ts +2 -2
  173. package/dist/types/tools/ToolNode.d.ts +1 -1
  174. package/dist/types/tools/handlers.d.ts +17 -4
  175. package/dist/types/tools/search/anthropic.d.ts +16 -0
  176. package/dist/types/tools/search/firecrawl.d.ts +16 -0
  177. package/dist/types/tools/search/rerankers.d.ts +8 -5
  178. package/dist/types/tools/search/schema.d.ts +16 -0
  179. package/dist/types/tools/search/tool.d.ts +13 -0
  180. package/dist/types/tools/search/types.d.ts +64 -9
  181. package/dist/types/tools/search/utils.d.ts +9 -2
  182. package/dist/types/types/graph.d.ts +95 -15
  183. package/dist/types/types/llm.d.ts +24 -10
  184. package/dist/types/types/run.d.ts +46 -8
  185. package/dist/types/types/stream.d.ts +16 -2
  186. package/dist/types/types/tools.d.ts +1 -1
  187. package/dist/types/utils/events.d.ts +6 -0
  188. package/dist/types/utils/title.d.ts +2 -1
  189. package/dist/types/utils/tokens.d.ts +24 -0
  190. package/package.json +35 -18
  191. package/src/agents/AgentContext.ts +315 -0
  192. package/src/common/enum.ts +14 -5
  193. package/src/events.ts +24 -13
  194. package/src/graphs/Graph.ts +495 -312
  195. package/src/graphs/MultiAgentGraph.ts +381 -0
  196. package/src/graphs/index.ts +2 -1
  197. package/src/llm/anthropic/Jacob_Lee_Resume_2023.pdf +0 -0
  198. package/src/llm/anthropic/index.ts +78 -13
  199. package/src/llm/anthropic/llm.spec.ts +491 -115
  200. package/src/llm/anthropic/types.ts +39 -3
  201. package/src/llm/anthropic/utils/message_inputs.ts +67 -11
  202. package/src/llm/anthropic/utils/message_outputs.ts +21 -2
  203. package/src/llm/anthropic/utils/output_parsers.ts +25 -6
  204. package/src/llm/anthropic/utils/tools.ts +29 -0
  205. package/src/llm/google/index.ts +218 -0
  206. package/src/llm/google/types.ts +43 -0
  207. package/src/llm/google/utils/common.ts +646 -0
  208. package/src/llm/google/utils/tools.ts +160 -0
  209. package/src/llm/google/utils/zod_to_genai_parameters.ts +86 -0
  210. package/src/llm/ollama/index.ts +89 -0
  211. package/src/llm/ollama/utils.ts +193 -0
  212. package/src/llm/openai/index.ts +600 -14
  213. package/src/llm/openai/types.ts +24 -0
  214. package/src/llm/openai/utils/index.ts +912 -0
  215. package/src/llm/openai/utils/isReasoningModel.test.ts +90 -0
  216. package/src/llm/providers.ts +10 -9
  217. package/src/llm/text.ts +26 -7
  218. package/src/llm/vertexai/index.ts +360 -0
  219. package/src/messages/reducer.ts +80 -0
  220. package/src/run.ts +181 -112
  221. package/src/scripts/ant_web_search.ts +158 -0
  222. package/src/scripts/args.ts +12 -8
  223. package/src/scripts/cli4.ts +29 -21
  224. package/src/scripts/cli5.ts +29 -21
  225. package/src/scripts/code_exec.ts +54 -23
  226. package/src/scripts/code_exec_files.ts +48 -17
  227. package/src/scripts/code_exec_simple.ts +46 -27
  228. package/src/scripts/handoff-test.ts +135 -0
  229. package/src/scripts/image.ts +52 -20
  230. package/src/scripts/multi-agent-conditional.ts +220 -0
  231. package/src/scripts/multi-agent-example-output.md +110 -0
  232. package/src/scripts/multi-agent-parallel.ts +337 -0
  233. package/src/scripts/multi-agent-sequence.ts +212 -0
  234. package/src/scripts/multi-agent-test.ts +186 -0
  235. package/src/scripts/search.ts +4 -12
  236. package/src/scripts/simple.ts +25 -10
  237. package/src/scripts/tools.ts +48 -18
  238. package/src/specs/anthropic.simple.test.ts +150 -34
  239. package/src/specs/azure.simple.test.ts +325 -0
  240. package/src/specs/openai.simple.test.ts +140 -33
  241. package/src/specs/openrouter.simple.test.ts +107 -0
  242. package/src/specs/prune.test.ts +4 -9
  243. package/src/specs/reasoning.test.ts +80 -44
  244. package/src/specs/token-memoization.test.ts +39 -0
  245. package/src/stream.test.ts +94 -0
  246. package/src/stream.ts +139 -60
  247. package/src/tools/ToolNode.ts +21 -7
  248. package/src/tools/handlers.ts +192 -18
  249. package/src/tools/search/anthropic.ts +51 -0
  250. package/src/tools/search/firecrawl.ts +78 -24
  251. package/src/tools/search/format.ts +10 -5
  252. package/src/tools/search/rerankers.ts +50 -62
  253. package/src/tools/search/schema.ts +63 -0
  254. package/src/tools/search/search.ts +167 -34
  255. package/src/tools/search/tool.ts +222 -46
  256. package/src/tools/search/types.ts +65 -10
  257. package/src/tools/search/utils.ts +37 -5
  258. package/src/types/graph.ts +272 -103
  259. package/src/types/llm.ts +25 -12
  260. package/src/types/run.ts +51 -13
  261. package/src/types/stream.ts +22 -1
  262. package/src/types/tools.ts +16 -10
  263. package/src/utils/events.ts +32 -0
  264. package/src/utils/llmConfig.ts +20 -8
  265. package/src/utils/title.ts +104 -30
  266. package/src/utils/tokens.ts +69 -10
@@ -0,0 +1,912 @@
1
+ /* eslint-disable @typescript-eslint/ban-ts-comment */
2
+ /* eslint-disable @typescript-eslint/explicit-function-return-type */
3
+ import { type OpenAI as OpenAIClient } from 'openai';
4
+ import type {
5
+ ChatCompletionContentPartText,
6
+ ChatCompletionContentPartImage,
7
+ ChatCompletionContentPartInputAudio,
8
+ ChatCompletionContentPart,
9
+ } from 'openai/resources/chat/completions';
10
+ import {
11
+ AIMessage,
12
+ AIMessageChunk,
13
+ type BaseMessage,
14
+ ChatMessage,
15
+ ToolMessage,
16
+ isAIMessage,
17
+ type UsageMetadata,
18
+ type BaseMessageFields,
19
+ type MessageContent,
20
+ type InvalidToolCall,
21
+ type MessageContentImageUrl,
22
+ StandardContentBlockConverter,
23
+ parseBase64DataUrl,
24
+ parseMimeType,
25
+ convertToProviderContentBlock,
26
+ isDataContentBlock,
27
+ } from '@langchain/core/messages';
28
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
29
+ import {
30
+ convertLangChainToolCallToOpenAI,
31
+ makeInvalidToolCall,
32
+ parseToolCall,
33
+ } from '@langchain/core/output_parsers/openai_tools';
34
+ import type { ToolCall, ToolCallChunk } from '@langchain/core/messages/tool';
35
+ import type {
36
+ OpenAICallOptions,
37
+ OpenAIChatInput,
38
+ ChatOpenAIReasoningSummary,
39
+ } from '@langchain/openai';
40
+
41
+ export type { OpenAICallOptions, OpenAIChatInput };
42
+
43
+ // Utility types to get hidden OpenAI response types
44
+ type ExtractAsyncIterableType<T> = T extends AsyncIterable<infer U> ? U : never;
45
+ type ExcludeController<T> = T extends { controller: unknown } ? never : T;
46
+ type ExcludeNonController<T> = T extends { controller: unknown } ? T : never;
47
+
48
+ type ResponsesCreate = OpenAIClient.Responses['create'];
49
+ type ResponsesParse = OpenAIClient.Responses['parse'];
50
+
51
+ type ResponsesInputItem = OpenAIClient.Responses.ResponseInputItem;
52
+
53
+ type ResponsesCreateInvoke = ExcludeController<
54
+ Awaited<ReturnType<ResponsesCreate>>
55
+ >;
56
+
57
+ type ResponsesParseInvoke = ExcludeController<
58
+ Awaited<ReturnType<ResponsesParse>>
59
+ >;
60
+
61
+ type ResponsesCreateStream = ExcludeNonController<
62
+ Awaited<ReturnType<ResponsesCreate>>
63
+ >;
64
+
65
+ export type ResponseReturnStreamEvents =
66
+ ExtractAsyncIterableType<ResponsesCreateStream>;
67
+
68
+ // TODO import from SDK when available
69
+ type OpenAIRoleEnum =
70
+ | 'system'
71
+ | 'developer'
72
+ | 'assistant'
73
+ | 'user'
74
+ | 'function'
75
+ | 'tool';
76
+
77
+ type OpenAICompletionParam =
78
+ OpenAIClient.Chat.Completions.ChatCompletionMessageParam;
79
+
80
+ function extractGenericMessageCustomRole(message: ChatMessage) {
81
+ if (
82
+ message.role !== 'system' &&
83
+ message.role !== 'developer' &&
84
+ message.role !== 'assistant' &&
85
+ message.role !== 'user' &&
86
+ message.role !== 'function' &&
87
+ message.role !== 'tool'
88
+ ) {
89
+ console.warn(`Unknown message role: ${message.role}`);
90
+ }
91
+
92
+ return message.role as OpenAIRoleEnum;
93
+ }
94
+
95
+ export function messageToOpenAIRole(message: BaseMessage): OpenAIRoleEnum {
96
+ const type = message._getType();
97
+ switch (type) {
98
+ case 'system':
99
+ return 'system';
100
+ case 'ai':
101
+ return 'assistant';
102
+ case 'human':
103
+ return 'user';
104
+ case 'function':
105
+ return 'function';
106
+ case 'tool':
107
+ return 'tool';
108
+ case 'generic': {
109
+ if (!ChatMessage.isInstance(message))
110
+ throw new Error('Invalid generic chat message');
111
+ return extractGenericMessageCustomRole(message);
112
+ }
113
+ default:
114
+ throw new Error(`Unknown message type: ${type}`);
115
+ }
116
+ }
117
+
118
+ const completionsApiContentBlockConverter: StandardContentBlockConverter<{
119
+ text: ChatCompletionContentPartText;
120
+ image: ChatCompletionContentPartImage;
121
+ audio: ChatCompletionContentPartInputAudio;
122
+ file: ChatCompletionContentPart.File;
123
+ }> = {
124
+ providerName: 'ChatOpenAI',
125
+
126
+ fromStandardTextBlock(block): ChatCompletionContentPartText {
127
+ return { type: 'text', text: block.text };
128
+ },
129
+
130
+ fromStandardImageBlock(block): ChatCompletionContentPartImage {
131
+ if (block.source_type === 'url') {
132
+ return {
133
+ type: 'image_url',
134
+ image_url: {
135
+ url: block.url,
136
+ ...(block.metadata?.detail
137
+ ? { detail: block.metadata.detail as 'auto' | 'low' | 'high' }
138
+ : {}),
139
+ },
140
+ };
141
+ }
142
+
143
+ if (block.source_type === 'base64') {
144
+ const url = `data:${block.mime_type ?? ''};base64,${block.data}`;
145
+ return {
146
+ type: 'image_url',
147
+ image_url: {
148
+ url,
149
+ ...(block.metadata?.detail
150
+ ? { detail: block.metadata.detail as 'auto' | 'low' | 'high' }
151
+ : {}),
152
+ },
153
+ };
154
+ }
155
+
156
+ throw new Error(
157
+ `Image content blocks with source_type ${block.source_type} are not supported for ChatOpenAI`
158
+ );
159
+ },
160
+
161
+ fromStandardAudioBlock(block): ChatCompletionContentPartInputAudio {
162
+ if (block.source_type === 'url') {
163
+ const data = parseBase64DataUrl({ dataUrl: block.url });
164
+ if (!data) {
165
+ throw new Error(
166
+ `URL audio blocks with source_type ${block.source_type} must be formatted as a data URL for ChatOpenAI`
167
+ );
168
+ }
169
+
170
+ const rawMimeType = data.mime_type || block.mime_type || '';
171
+ let mimeType: { type: string; subtype: string };
172
+
173
+ try {
174
+ mimeType = parseMimeType(rawMimeType);
175
+ } catch {
176
+ throw new Error(
177
+ `Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`
178
+ );
179
+ }
180
+
181
+ if (
182
+ mimeType.type !== 'audio' ||
183
+ (mimeType.subtype !== 'wav' && mimeType.subtype !== 'mp3')
184
+ ) {
185
+ throw new Error(
186
+ `Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`
187
+ );
188
+ }
189
+
190
+ return {
191
+ type: 'input_audio',
192
+ input_audio: {
193
+ format: mimeType.subtype,
194
+ data: data.data,
195
+ },
196
+ };
197
+ }
198
+
199
+ if (block.source_type === 'base64') {
200
+ let mimeType: { type: string; subtype: string };
201
+
202
+ try {
203
+ mimeType = parseMimeType(block.mime_type ?? '');
204
+ } catch {
205
+ throw new Error(
206
+ `Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`
207
+ );
208
+ }
209
+
210
+ if (
211
+ mimeType.type !== 'audio' ||
212
+ (mimeType.subtype !== 'wav' && mimeType.subtype !== 'mp3')
213
+ ) {
214
+ throw new Error(
215
+ `Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`
216
+ );
217
+ }
218
+
219
+ return {
220
+ type: 'input_audio',
221
+ input_audio: {
222
+ format: mimeType.subtype,
223
+ data: block.data,
224
+ },
225
+ };
226
+ }
227
+
228
+ throw new Error(
229
+ `Audio content blocks with source_type ${block.source_type} are not supported for ChatOpenAI`
230
+ );
231
+ },
232
+
233
+ fromStandardFileBlock(block): ChatCompletionContentPart.File {
234
+ if (block.source_type === 'url') {
235
+ const data = parseBase64DataUrl({ dataUrl: block.url });
236
+ if (!data) {
237
+ throw new Error(
238
+ `URL file blocks with source_type ${block.source_type} must be formatted as a data URL for ChatOpenAI`
239
+ );
240
+ }
241
+
242
+ return {
243
+ type: 'file',
244
+ file: {
245
+ file_data: block.url, // formatted as base64 data URL
246
+ ...(block.metadata?.filename || block.metadata?.name
247
+ ? {
248
+ filename: (block.metadata.filename ||
249
+ block.metadata.name) as string,
250
+ }
251
+ : {}),
252
+ },
253
+ };
254
+ }
255
+
256
+ if (block.source_type === 'base64') {
257
+ return {
258
+ type: 'file',
259
+ file: {
260
+ file_data: `data:${block.mime_type ?? ''};base64,${block.data}`,
261
+ ...(block.metadata?.filename ||
262
+ block.metadata?.name ||
263
+ block.metadata?.title
264
+ ? {
265
+ filename: (block.metadata.filename ||
266
+ block.metadata.name ||
267
+ block.metadata.title) as string,
268
+ }
269
+ : {}),
270
+ },
271
+ };
272
+ }
273
+
274
+ if (block.source_type === 'id') {
275
+ return {
276
+ type: 'file',
277
+ file: {
278
+ file_id: block.id,
279
+ },
280
+ };
281
+ }
282
+
283
+ throw new Error(
284
+ `File content blocks with source_type ${block.source_type} are not supported for ChatOpenAI`
285
+ );
286
+ },
287
+ };
288
+
289
+ // Used in LangSmith, export is important here
290
+ export function _convertMessagesToOpenAIParams(
291
+ messages: BaseMessage[],
292
+ model?: string
293
+ ): OpenAICompletionParam[] {
294
+ // TODO: Function messages do not support array content, fix cast
295
+ return messages.flatMap((message) => {
296
+ let role = messageToOpenAIRole(message);
297
+ if (role === 'system' && isReasoningModel(model)) {
298
+ role = 'developer';
299
+ }
300
+
301
+ const content =
302
+ typeof message.content === 'string'
303
+ ? message.content
304
+ : message.content.map((m) => {
305
+ if (isDataContentBlock(m)) {
306
+ return convertToProviderContentBlock(
307
+ m,
308
+ completionsApiContentBlockConverter
309
+ );
310
+ }
311
+ return m;
312
+ });
313
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
314
+ const completionParam: Record<string, any> = {
315
+ role,
316
+ content,
317
+ };
318
+ if (message.name != null) {
319
+ completionParam.name = message.name;
320
+ }
321
+ if (message.additional_kwargs.function_call != null) {
322
+ completionParam.function_call = message.additional_kwargs.function_call;
323
+ completionParam.content = '';
324
+ }
325
+ if (isAIMessage(message) && !!message.tool_calls?.length) {
326
+ completionParam.tool_calls = message.tool_calls.map(
327
+ convertLangChainToolCallToOpenAI
328
+ );
329
+ completionParam.content = '';
330
+ } else {
331
+ if (message.additional_kwargs.tool_calls != null) {
332
+ completionParam.tool_calls = message.additional_kwargs.tool_calls;
333
+ }
334
+ if ((message as ToolMessage).tool_call_id != null) {
335
+ completionParam.tool_call_id = (message as ToolMessage).tool_call_id;
336
+ }
337
+ }
338
+
339
+ if (
340
+ message.additional_kwargs.audio &&
341
+ typeof message.additional_kwargs.audio === 'object' &&
342
+ 'id' in message.additional_kwargs.audio
343
+ ) {
344
+ const audioMessage = {
345
+ role: 'assistant',
346
+ audio: {
347
+ id: message.additional_kwargs.audio.id,
348
+ },
349
+ };
350
+ return [completionParam, audioMessage] as OpenAICompletionParam[];
351
+ }
352
+
353
+ return completionParam as OpenAICompletionParam;
354
+ });
355
+ }
356
+
357
+ const _FUNCTION_CALL_IDS_MAP_KEY = '__openai_function_call_ids__';
358
+
359
+ function _convertReasoningSummaryToOpenAIResponsesParams(
360
+ reasoning: ChatOpenAIReasoningSummary
361
+ ): OpenAIClient.Responses.ResponseReasoningItem {
362
+ // combine summary parts that have the the same index and then remove the indexes
363
+ const summary = (
364
+ reasoning.summary.length > 1
365
+ ? reasoning.summary.reduce(
366
+ (acc, curr) => {
367
+ const last = acc.at(-1);
368
+
369
+ if (last!.index === curr.index) {
370
+ last!.text += curr.text;
371
+ } else {
372
+ acc.push(curr);
373
+ }
374
+ return acc;
375
+ },
376
+ [{ ...reasoning.summary[0] }]
377
+ )
378
+ : reasoning.summary
379
+ ).map((s) =>
380
+ Object.fromEntries(Object.entries(s).filter(([k]) => k !== 'index'))
381
+ ) as OpenAIClient.Responses.ResponseReasoningItem.Summary[];
382
+
383
+ return {
384
+ ...reasoning,
385
+ summary,
386
+ } as OpenAIClient.Responses.ResponseReasoningItem;
387
+ }
388
+
389
+ export function _convertMessagesToOpenAIResponsesParams(
390
+ messages: BaseMessage[],
391
+ model?: string,
392
+ zdrEnabled?: boolean
393
+ ): ResponsesInputItem[] {
394
+ return messages.flatMap(
395
+ (lcMsg): ResponsesInputItem | ResponsesInputItem[] => {
396
+ const additional_kwargs =
397
+ lcMsg.additional_kwargs as BaseMessageFields['additional_kwargs'] & {
398
+ [_FUNCTION_CALL_IDS_MAP_KEY]?: Record<string, string>;
399
+ reasoning?: OpenAIClient.Responses.ResponseReasoningItem;
400
+ type?: string;
401
+ refusal?: string;
402
+ };
403
+
404
+ let role = messageToOpenAIRole(lcMsg);
405
+ if (role === 'system' && isReasoningModel(model)) role = 'developer';
406
+
407
+ if (role === 'function') {
408
+ throw new Error('Function messages are not supported in Responses API');
409
+ }
410
+
411
+ if (role === 'tool') {
412
+ const toolMessage = lcMsg as ToolMessage;
413
+
414
+ // Handle computer call output
415
+ if (additional_kwargs.type === 'computer_call_output') {
416
+ const output = (() => {
417
+ if (typeof toolMessage.content === 'string') {
418
+ return {
419
+ type: 'computer_screenshot' as const,
420
+ image_url: toolMessage.content,
421
+ };
422
+ }
423
+
424
+ if (Array.isArray(toolMessage.content)) {
425
+ const oaiScreenshot = toolMessage.content.find(
426
+ (i) => i.type === 'computer_screenshot'
427
+ ) as { type: 'computer_screenshot'; image_url: string };
428
+
429
+ if (oaiScreenshot) return oaiScreenshot;
430
+
431
+ const lcImage = toolMessage.content.find(
432
+ (i) => i.type === 'image_url'
433
+ ) as MessageContentImageUrl;
434
+
435
+ if (lcImage) {
436
+ return {
437
+ type: 'computer_screenshot' as const,
438
+ image_url:
439
+ typeof lcImage.image_url === 'string'
440
+ ? lcImage.image_url
441
+ : lcImage.image_url.url,
442
+ };
443
+ }
444
+ }
445
+
446
+ throw new Error('Invalid computer call output');
447
+ })();
448
+
449
+ return {
450
+ type: 'computer_call_output',
451
+ output,
452
+ call_id: toolMessage.tool_call_id,
453
+ };
454
+ }
455
+
456
+ return {
457
+ type: 'function_call_output',
458
+ call_id: toolMessage.tool_call_id,
459
+ id: toolMessage.id?.startsWith('fc_') ? toolMessage.id : undefined,
460
+ output:
461
+ typeof toolMessage.content !== 'string'
462
+ ? JSON.stringify(toolMessage.content)
463
+ : toolMessage.content,
464
+ };
465
+ }
466
+
467
+ if (role === 'assistant') {
468
+ // if we have the original response items, just reuse them
469
+ if (
470
+ !zdrEnabled &&
471
+ lcMsg.response_metadata.output != null &&
472
+ Array.isArray(lcMsg.response_metadata.output) &&
473
+ lcMsg.response_metadata.output.length > 0 &&
474
+ lcMsg.response_metadata.output.every((item) => 'type' in item)
475
+ ) {
476
+ return lcMsg.response_metadata.output;
477
+ }
478
+
479
+ // otherwise, try to reconstruct the response from what we have
480
+
481
+ const input: ResponsesInputItem[] = [];
482
+
483
+ // reasoning items
484
+ if (additional_kwargs.reasoning && !zdrEnabled) {
485
+ const reasoningItem = _convertReasoningSummaryToOpenAIResponsesParams(
486
+ additional_kwargs.reasoning
487
+ );
488
+ input.push(reasoningItem);
489
+ }
490
+
491
+ // ai content
492
+ let { content } = lcMsg;
493
+ if (additional_kwargs.refusal) {
494
+ if (typeof content === 'string') {
495
+ content = [{ type: 'output_text', text: content, annotations: [] }];
496
+ }
497
+ content = [
498
+ ...content,
499
+ { type: 'refusal', refusal: additional_kwargs.refusal },
500
+ ];
501
+ }
502
+
503
+ input.push({
504
+ type: 'message',
505
+ role: 'assistant',
506
+ ...(lcMsg.id && !zdrEnabled ? { id: lcMsg.id } : {}),
507
+ content:
508
+ typeof content === 'string'
509
+ ? content
510
+ : content.flatMap((item) => {
511
+ if (item.type === 'text') {
512
+ return {
513
+ type: 'output_text',
514
+ text: item.text,
515
+ // @ts-expect-error TODO: add types for `annotations`
516
+ annotations: item.annotations ?? [],
517
+ };
518
+ }
519
+
520
+ if (item.type === 'output_text' || item.type === 'refusal') {
521
+ return item;
522
+ }
523
+
524
+ return [];
525
+ }),
526
+ });
527
+
528
+ const functionCallIds = additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY];
529
+
530
+ if (isAIMessage(lcMsg) && !!lcMsg.tool_calls?.length) {
531
+ input.push(
532
+ ...lcMsg.tool_calls.map(
533
+ (toolCall): ResponsesInputItem => ({
534
+ type: 'function_call',
535
+ name: toolCall.name,
536
+ arguments: JSON.stringify(toolCall.args),
537
+ call_id: toolCall.id!,
538
+ ...(zdrEnabled ? { id: functionCallIds?.[toolCall.id!] } : {}),
539
+ })
540
+ )
541
+ );
542
+ } else if (additional_kwargs.tool_calls) {
543
+ input.push(
544
+ ...additional_kwargs.tool_calls.map(
545
+ (toolCall): ResponsesInputItem => ({
546
+ type: 'function_call',
547
+ name: toolCall.function.name,
548
+ call_id: toolCall.id,
549
+ arguments: toolCall.function.arguments,
550
+ ...(zdrEnabled ? { id: functionCallIds?.[toolCall.id] } : {}),
551
+ })
552
+ )
553
+ );
554
+ }
555
+
556
+ const toolOutputs =
557
+ ((
558
+ lcMsg.response_metadata.output as
559
+ | Array<ResponsesInputItem>
560
+ | undefined
561
+ )?.length ?? 0) > 0
562
+ ? lcMsg.response_metadata.output
563
+ : additional_kwargs.tool_outputs;
564
+
565
+ const fallthroughCallTypes: ResponsesInputItem['type'][] = [
566
+ 'computer_call',
567
+ /** @ts-ignore */
568
+ 'mcp_call',
569
+ /** @ts-ignore */
570
+ 'code_interpreter_call',
571
+ /** @ts-ignore */
572
+ 'image_generation_call',
573
+ ];
574
+
575
+ if (toolOutputs != null) {
576
+ const castToolOutputs = toolOutputs as Array<ResponsesInputItem>;
577
+ const fallthroughCalls = castToolOutputs.filter((item) =>
578
+ fallthroughCallTypes.includes(item.type)
579
+ );
580
+ if (fallthroughCalls.length > 0) input.push(...fallthroughCalls);
581
+ }
582
+
583
+ return input;
584
+ }
585
+
586
+ if (role === 'user' || role === 'system' || role === 'developer') {
587
+ if (typeof lcMsg.content === 'string') {
588
+ return { type: 'message', role, content: lcMsg.content };
589
+ }
590
+
591
+ const messages: ResponsesInputItem[] = [];
592
+ const content = lcMsg.content.flatMap((item) => {
593
+ if (item.type === 'mcp_approval_response') {
594
+ messages.push({
595
+ // @ts-ignore
596
+ type: 'mcp_approval_response',
597
+ approval_request_id: item.approval_request_id,
598
+ approve: item.approve,
599
+ });
600
+ }
601
+ if (isDataContentBlock(item)) {
602
+ return convertToProviderContentBlock(
603
+ item,
604
+ completionsApiContentBlockConverter
605
+ );
606
+ }
607
+ if (item.type === 'text') {
608
+ return {
609
+ type: 'input_text',
610
+ text: item.text,
611
+ };
612
+ }
613
+ if (item.type === 'image_url') {
614
+ return {
615
+ type: 'input_image',
616
+ image_url:
617
+ typeof item.image_url === 'string'
618
+ ? item.image_url
619
+ : item.image_url.url,
620
+ detail:
621
+ typeof item.image_url === 'string'
622
+ ? 'auto'
623
+ : item.image_url.detail,
624
+ };
625
+ }
626
+ if (
627
+ item.type === 'input_text' ||
628
+ item.type === 'input_image' ||
629
+ item.type === 'input_file'
630
+ ) {
631
+ return item;
632
+ }
633
+ return [];
634
+ });
635
+
636
+ if (content.length > 0) {
637
+ messages.push({ type: 'message', role, content });
638
+ }
639
+ return messages;
640
+ }
641
+
642
+ console.warn(
643
+ `Unsupported role found when converting to OpenAI Responses API: ${role}`
644
+ );
645
+ return [];
646
+ }
647
+ );
648
+ }
649
+
650
+ export function isReasoningModel(model?: string) {
651
+ return model != null && model !== '' && /\b(o\d|gpt-[5-9])\b/i.test(model);
652
+ }
653
+
654
+ function _convertOpenAIResponsesMessageToBaseMessage(
655
+ response: ResponsesCreateInvoke | ResponsesParseInvoke
656
+ ): BaseMessage {
657
+ if (response.error) {
658
+ // TODO: add support for `addLangChainErrorFields`
659
+ const error = new Error(response.error.message);
660
+ error.name = response.error.code;
661
+ throw error;
662
+ }
663
+
664
+ let messageId: string | undefined;
665
+ const content: MessageContent = [];
666
+ const tool_calls: ToolCall[] = [];
667
+ const invalid_tool_calls: InvalidToolCall[] = [];
668
+ const response_metadata: Record<string, unknown> = {
669
+ model: response.model,
670
+ created_at: response.created_at,
671
+ id: response.id,
672
+ incomplete_details: response.incomplete_details,
673
+ metadata: response.metadata,
674
+ object: response.object,
675
+ status: response.status,
676
+ user: response.user,
677
+ service_tier: response.service_tier,
678
+
679
+ // for compatibility with chat completion calls.
680
+ model_name: response.model,
681
+ };
682
+
683
+ const additional_kwargs: {
684
+ [key: string]: unknown;
685
+ refusal?: string;
686
+ reasoning?: OpenAIClient.Responses.ResponseReasoningItem;
687
+ tool_outputs?: unknown[];
688
+ parsed?: unknown;
689
+ [_FUNCTION_CALL_IDS_MAP_KEY]?: Record<string, string>;
690
+ } = {};
691
+
692
+ for (const item of response.output) {
693
+ if (item.type === 'message') {
694
+ messageId = item.id;
695
+ content.push(
696
+ ...item.content.flatMap((part) => {
697
+ if (part.type === 'output_text') {
698
+ if ('parsed' in part && part.parsed != null) {
699
+ additional_kwargs.parsed = part.parsed;
700
+ }
701
+ return {
702
+ type: 'text',
703
+ text: part.text,
704
+ annotations: part.annotations,
705
+ };
706
+ }
707
+
708
+ if (part.type === 'refusal') {
709
+ additional_kwargs.refusal = part.refusal;
710
+ return [];
711
+ }
712
+
713
+ return part;
714
+ })
715
+ );
716
+ } else if (item.type === 'function_call') {
717
+ const fnAdapter = {
718
+ function: { name: item.name, arguments: item.arguments },
719
+ id: item.call_id,
720
+ };
721
+
722
+ try {
723
+ tool_calls.push(parseToolCall(fnAdapter, { returnId: true }));
724
+ } catch (e: unknown) {
725
+ let errMessage: string | undefined;
726
+ if (
727
+ typeof e === 'object' &&
728
+ e != null &&
729
+ 'message' in e &&
730
+ typeof e.message === 'string'
731
+ ) {
732
+ errMessage = e.message;
733
+ }
734
+ invalid_tool_calls.push(makeInvalidToolCall(fnAdapter, errMessage));
735
+ }
736
+
737
+ additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] ??= {};
738
+ if (item.id) {
739
+ additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY][item.call_id] = item.id;
740
+ }
741
+ } else if (item.type === 'reasoning') {
742
+ additional_kwargs.reasoning = item;
743
+ } else {
744
+ additional_kwargs.tool_outputs ??= [];
745
+ additional_kwargs.tool_outputs.push(item);
746
+ }
747
+ }
748
+
749
+ return new AIMessage({
750
+ id: messageId,
751
+ content,
752
+ tool_calls,
753
+ invalid_tool_calls,
754
+ usage_metadata: response.usage,
755
+ additional_kwargs,
756
+ response_metadata,
757
+ });
758
+ }
759
+
760
+ export function _convertOpenAIResponsesDeltaToBaseMessageChunk(
761
+ chunk: ResponseReturnStreamEvents
762
+ ) {
763
+ const content: Record<string, unknown>[] = [];
764
+ let generationInfo: Record<string, unknown> = {};
765
+ let usage_metadata: UsageMetadata | undefined;
766
+ const tool_call_chunks: ToolCallChunk[] = [];
767
+ const response_metadata: Record<string, unknown> = {};
768
+ const additional_kwargs: {
769
+ [key: string]: unknown;
770
+ reasoning?: Partial<ChatOpenAIReasoningSummary>;
771
+ tool_outputs?: unknown[];
772
+ } = {};
773
+ let id: string | undefined;
774
+ if (chunk.type === 'response.output_text.delta') {
775
+ content.push({
776
+ type: 'text',
777
+ text: chunk.delta,
778
+ index: chunk.content_index,
779
+ });
780
+ /** @ts-ignore */
781
+ } else if (chunk.type === 'response.output_text_annotation.added') {
782
+ content.push({
783
+ type: 'text',
784
+ text: '',
785
+ /** @ts-ignore */
786
+ annotations: [chunk.annotation],
787
+ /** @ts-ignore */
788
+ index: chunk.content_index,
789
+ });
790
+ } else if (
791
+ chunk.type === 'response.output_item.added' &&
792
+ chunk.item.type === 'message'
793
+ ) {
794
+ id = chunk.item.id;
795
+ } else if (
796
+ chunk.type === 'response.output_item.added' &&
797
+ chunk.item.type === 'function_call'
798
+ ) {
799
+ tool_call_chunks.push({
800
+ type: 'tool_call_chunk',
801
+ name: chunk.item.name,
802
+ args: chunk.item.arguments,
803
+ id: chunk.item.call_id,
804
+ index: chunk.output_index,
805
+ });
806
+
807
+ additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] = {
808
+ [chunk.item.call_id]: chunk.item.id,
809
+ };
810
+ } else if (
811
+ chunk.type === 'response.output_item.done' &&
812
+ [
813
+ 'web_search_call',
814
+ 'file_search_call',
815
+ 'computer_call',
816
+ 'code_interpreter_call',
817
+ 'mcp_call',
818
+ 'mcp_list_tools',
819
+ 'mcp_approval_request',
820
+ 'image_generation_call',
821
+ ].includes(chunk.item.type)
822
+ ) {
823
+ additional_kwargs.tool_outputs = [chunk.item];
824
+ } else if (chunk.type === 'response.created') {
825
+ response_metadata.id = chunk.response.id;
826
+ response_metadata.model_name = chunk.response.model;
827
+ response_metadata.model = chunk.response.model;
828
+ } else if (chunk.type === 'response.completed') {
829
+ const msg = _convertOpenAIResponsesMessageToBaseMessage(chunk.response);
830
+
831
+ usage_metadata = chunk.response.usage;
832
+ if (chunk.response.text?.format?.type === 'json_schema') {
833
+ additional_kwargs.parsed ??= JSON.parse(msg.text);
834
+ }
835
+ for (const [key, value] of Object.entries(chunk.response)) {
836
+ if (key !== 'id') response_metadata[key] = value;
837
+ }
838
+ } else if (chunk.type === 'response.function_call_arguments.delta') {
839
+ tool_call_chunks.push({
840
+ type: 'tool_call_chunk',
841
+ args: chunk.delta,
842
+ index: chunk.output_index,
843
+ });
844
+ } else if (
845
+ chunk.type === 'response.web_search_call.completed' ||
846
+ chunk.type === 'response.file_search_call.completed'
847
+ ) {
848
+ generationInfo = {
849
+ tool_outputs: {
850
+ id: chunk.item_id,
851
+ type: chunk.type.replace('response.', '').replace('.completed', ''),
852
+ status: 'completed',
853
+ },
854
+ };
855
+ } else if (chunk.type === 'response.refusal.done') {
856
+ additional_kwargs.refusal = chunk.refusal;
857
+ } else if (
858
+ chunk.type === 'response.output_item.added' &&
859
+ 'item' in chunk &&
860
+ chunk.item.type === 'reasoning'
861
+ ) {
862
+ const summary: ChatOpenAIReasoningSummary['summary'] | undefined = chunk
863
+ .item.summary
864
+ ? chunk.item.summary.map((s, index) => ({
865
+ ...s,
866
+ index,
867
+ }))
868
+ : undefined;
869
+
870
+ additional_kwargs.reasoning = {
871
+ // We only capture ID in the first chunk or else the concatenated result of all chunks will
872
+ // have an ID field that is repeated once per chunk. There is special handling for the `type`
873
+ // field that prevents this, however.
874
+ id: chunk.item.id,
875
+ type: chunk.item.type,
876
+ ...(summary ? { summary } : {}),
877
+ };
878
+ } else if (chunk.type === 'response.reasoning_summary_part.added') {
879
+ additional_kwargs.reasoning = {
880
+ type: 'reasoning',
881
+ summary: [{ ...chunk.part, index: chunk.summary_index }],
882
+ };
883
+ } else if (chunk.type === 'response.reasoning_summary_text.delta') {
884
+ additional_kwargs.reasoning = {
885
+ type: 'reasoning',
886
+ summary: [
887
+ { text: chunk.delta, type: 'summary_text', index: chunk.summary_index },
888
+ ],
889
+ };
890
+ /** @ts-ignore */
891
+ } else if (chunk.type === 'response.image_generation_call.partial_image') {
892
+ // noop/fixme: retaining partial images in a message chunk means that _all_
893
+ // partial images get kept in history, so we don't do anything here.
894
+ return null;
895
+ } else {
896
+ return null;
897
+ }
898
+
899
+ return new ChatGenerationChunk({
900
+ // Legacy reasons, `onLLMNewToken` should pulls this out
901
+ text: content.map((part) => part.text).join(''),
902
+ message: new AIMessageChunk({
903
+ id,
904
+ content,
905
+ tool_call_chunks,
906
+ usage_metadata,
907
+ additional_kwargs,
908
+ response_metadata,
909
+ }),
910
+ generationInfo,
911
+ });
912
+ }