@ai-sdk/openai 3.0.13 → 3.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/index.d.mts +1 -1
  3. package/dist/index.d.ts +1 -1
  4. package/dist/index.js +1 -1
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +1 -1
  7. package/dist/index.mjs.map +1 -1
  8. package/dist/internal/index.d.mts +1 -1
  9. package/dist/internal/index.d.ts +1 -1
  10. package/dist/internal/index.js.map +1 -1
  11. package/dist/internal/index.mjs.map +1 -1
  12. package/package.json +5 -4
  13. package/src/chat/__fixtures__/azure-model-router.1.chunks.txt +8 -0
  14. package/src/chat/__snapshots__/openai-chat-language-model.test.ts.snap +88 -0
  15. package/src/chat/convert-openai-chat-usage.ts +57 -0
  16. package/src/chat/convert-to-openai-chat-messages.test.ts +516 -0
  17. package/src/chat/convert-to-openai-chat-messages.ts +225 -0
  18. package/src/chat/get-response-metadata.ts +15 -0
  19. package/src/chat/map-openai-finish-reason.ts +19 -0
  20. package/src/chat/openai-chat-api.ts +198 -0
  21. package/src/chat/openai-chat-language-model.test.ts +3496 -0
  22. package/src/chat/openai-chat-language-model.ts +700 -0
  23. package/src/chat/openai-chat-options.ts +186 -0
  24. package/src/chat/openai-chat-prepare-tools.test.ts +322 -0
  25. package/src/chat/openai-chat-prepare-tools.ts +84 -0
  26. package/src/chat/openai-chat-prompt.ts +70 -0
  27. package/src/completion/convert-openai-completion-usage.ts +46 -0
  28. package/src/completion/convert-to-openai-completion-prompt.ts +93 -0
  29. package/src/completion/get-response-metadata.ts +15 -0
  30. package/src/completion/map-openai-finish-reason.ts +19 -0
  31. package/src/completion/openai-completion-api.ts +81 -0
  32. package/src/completion/openai-completion-language-model.test.ts +752 -0
  33. package/src/completion/openai-completion-language-model.ts +336 -0
  34. package/src/completion/openai-completion-options.ts +58 -0
  35. package/src/embedding/__snapshots__/openai-embedding-model.test.ts.snap +43 -0
  36. package/src/embedding/openai-embedding-api.ts +13 -0
  37. package/src/embedding/openai-embedding-model.test.ts +146 -0
  38. package/src/embedding/openai-embedding-model.ts +95 -0
  39. package/src/embedding/openai-embedding-options.ts +30 -0
  40. package/src/image/openai-image-api.ts +35 -0
  41. package/src/image/openai-image-model.test.ts +722 -0
  42. package/src/image/openai-image-model.ts +305 -0
  43. package/src/image/openai-image-options.ts +28 -0
  44. package/src/index.ts +9 -0
  45. package/src/internal/index.ts +19 -0
  46. package/src/openai-config.ts +18 -0
  47. package/src/openai-error.test.ts +34 -0
  48. package/src/openai-error.ts +22 -0
  49. package/src/openai-language-model-capabilities.test.ts +93 -0
  50. package/src/openai-language-model-capabilities.ts +54 -0
  51. package/src/openai-provider.test.ts +98 -0
  52. package/src/openai-provider.ts +270 -0
  53. package/src/openai-tools.ts +114 -0
  54. package/src/responses/__fixtures__/openai-apply-patch-tool-delete.1.chunks.txt +5 -0
  55. package/src/responses/__fixtures__/openai-apply-patch-tool.1.chunks.txt +38 -0
  56. package/src/responses/__fixtures__/openai-apply-patch-tool.1.json +69 -0
  57. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.chunks.txt +393 -0
  58. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.json +137 -0
  59. package/src/responses/__fixtures__/openai-error.1.chunks.txt +4 -0
  60. package/src/responses/__fixtures__/openai-error.1.json +8 -0
  61. package/src/responses/__fixtures__/openai-file-search-tool.1.chunks.txt +94 -0
  62. package/src/responses/__fixtures__/openai-file-search-tool.1.json +89 -0
  63. package/src/responses/__fixtures__/openai-file-search-tool.2.chunks.txt +93 -0
  64. package/src/responses/__fixtures__/openai-file-search-tool.2.json +112 -0
  65. package/src/responses/__fixtures__/openai-image-generation-tool.1.chunks.txt +16 -0
  66. package/src/responses/__fixtures__/openai-image-generation-tool.1.json +96 -0
  67. package/src/responses/__fixtures__/openai-local-shell-tool.1.chunks.txt +7 -0
  68. package/src/responses/__fixtures__/openai-local-shell-tool.1.json +70 -0
  69. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.chunks.txt +11 -0
  70. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.json +169 -0
  71. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.chunks.txt +123 -0
  72. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.json +176 -0
  73. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.chunks.txt +11 -0
  74. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.json +169 -0
  75. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.chunks.txt +84 -0
  76. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.json +182 -0
  77. package/src/responses/__fixtures__/openai-mcp-tool.1.chunks.txt +373 -0
  78. package/src/responses/__fixtures__/openai-mcp-tool.1.json +159 -0
  79. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.chunks.txt +110 -0
  80. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.json +117 -0
  81. package/src/responses/__fixtures__/openai-shell-tool.1.chunks.txt +182 -0
  82. package/src/responses/__fixtures__/openai-shell-tool.1.json +73 -0
  83. package/src/responses/__fixtures__/openai-web-search-tool.1.chunks.txt +185 -0
  84. package/src/responses/__fixtures__/openai-web-search-tool.1.json +266 -0
  85. package/src/responses/__snapshots__/openai-responses-language-model.test.ts.snap +10955 -0
  86. package/src/responses/convert-openai-responses-usage.ts +53 -0
  87. package/src/responses/convert-to-openai-responses-input.test.ts +2976 -0
  88. package/src/responses/convert-to-openai-responses-input.ts +578 -0
  89. package/src/responses/map-openai-responses-finish-reason.ts +22 -0
  90. package/src/responses/openai-responses-api.test.ts +89 -0
  91. package/src/responses/openai-responses-api.ts +1086 -0
  92. package/src/responses/openai-responses-language-model.test.ts +6927 -0
  93. package/src/responses/openai-responses-language-model.ts +1932 -0
  94. package/src/responses/openai-responses-options.ts +312 -0
  95. package/src/responses/openai-responses-prepare-tools.test.ts +924 -0
  96. package/src/responses/openai-responses-prepare-tools.ts +264 -0
  97. package/src/responses/openai-responses-provider-metadata.ts +39 -0
  98. package/src/speech/openai-speech-api.ts +38 -0
  99. package/src/speech/openai-speech-model.test.ts +202 -0
  100. package/src/speech/openai-speech-model.ts +137 -0
  101. package/src/speech/openai-speech-options.ts +22 -0
  102. package/src/tool/apply-patch.ts +141 -0
  103. package/src/tool/code-interpreter.ts +104 -0
  104. package/src/tool/file-search.ts +145 -0
  105. package/src/tool/image-generation.ts +126 -0
  106. package/src/tool/local-shell.test-d.ts +20 -0
  107. package/src/tool/local-shell.ts +72 -0
  108. package/src/tool/mcp.ts +125 -0
  109. package/src/tool/shell.ts +85 -0
  110. package/src/tool/web-search-preview.ts +139 -0
  111. package/src/tool/web-search.test-d.ts +13 -0
  112. package/src/tool/web-search.ts +179 -0
  113. package/src/transcription/openai-transcription-api.ts +37 -0
  114. package/src/transcription/openai-transcription-model.test.ts +507 -0
  115. package/src/transcription/openai-transcription-model.ts +232 -0
  116. package/src/transcription/openai-transcription-options.ts +50 -0
  117. package/src/transcription/transcription-test.mp3 +0 -0
  118. package/src/version.ts +6 -0
@@ -0,0 +1,1932 @@
1
+ import {
2
+ APICallError,
3
+ JSONValue,
4
+ LanguageModelV3,
5
+ LanguageModelV3Prompt,
6
+ LanguageModelV3CallOptions,
7
+ LanguageModelV3Content,
8
+ LanguageModelV3FinishReason,
9
+ LanguageModelV3GenerateResult,
10
+ LanguageModelV3ProviderTool,
11
+ LanguageModelV3StreamPart,
12
+ LanguageModelV3StreamResult,
13
+ LanguageModelV3ToolApprovalRequest,
14
+ SharedV3ProviderMetadata,
15
+ SharedV3Warning,
16
+ } from '@ai-sdk/provider';
17
+ import {
18
+ combineHeaders,
19
+ createEventSourceResponseHandler,
20
+ createJsonResponseHandler,
21
+ createToolNameMapping,
22
+ generateId,
23
+ InferSchema,
24
+ parseProviderOptions,
25
+ ParseResult,
26
+ postJsonToApi,
27
+ } from '@ai-sdk/provider-utils';
28
+ import { OpenAIConfig } from '../openai-config';
29
+ import { openaiFailedResponseHandler } from '../openai-error';
30
+ import { getOpenAILanguageModelCapabilities } from '../openai-language-model-capabilities';
31
+ import { applyPatchInputSchema } from '../tool/apply-patch';
32
+ import {
33
+ codeInterpreterInputSchema,
34
+ codeInterpreterOutputSchema,
35
+ } from '../tool/code-interpreter';
36
+ import { fileSearchOutputSchema } from '../tool/file-search';
37
+ import { imageGenerationOutputSchema } from '../tool/image-generation';
38
+ import { localShellInputSchema } from '../tool/local-shell';
39
+ import { mcpOutputSchema } from '../tool/mcp';
40
+ import { shellInputSchema } from '../tool/shell';
41
+ import { webSearchOutputSchema } from '../tool/web-search';
42
+ import {
43
+ convertOpenAIResponsesUsage,
44
+ OpenAIResponsesUsage,
45
+ } from './convert-openai-responses-usage';
46
+ import { convertToOpenAIResponsesInput } from './convert-to-openai-responses-input';
47
+ import { mapOpenAIResponseFinishReason } from './map-openai-responses-finish-reason';
48
+ import {
49
+ OpenAIResponsesChunk,
50
+ openaiResponsesChunkSchema,
51
+ OpenAIResponsesIncludeOptions,
52
+ OpenAIResponsesIncludeValue,
53
+ OpenAIResponsesLogprobs,
54
+ openaiResponsesResponseSchema,
55
+ OpenAIResponsesWebSearchAction,
56
+ OpenAIResponsesApplyPatchOperationDiffDeltaChunk,
57
+ OpenAIResponsesApplyPatchOperationDiffDoneChunk,
58
+ } from './openai-responses-api';
59
+ import {
60
+ OpenAIResponsesModelId,
61
+ openaiResponsesProviderOptionsSchema,
62
+ TOP_LOGPROBS_MAX,
63
+ } from './openai-responses-options';
64
+ import { prepareResponsesTools } from './openai-responses-prepare-tools';
65
+ import {
66
+ ResponsesSourceDocumentProviderMetadata,
67
+ ResponsesTextProviderMetadata,
68
+ } from './openai-responses-provider-metadata';
69
+
70
+ /**
71
+ * Extracts a mapping from MCP approval request IDs to their corresponding tool call IDs
72
+ * from the prompt. When an MCP tool requires approval, we generate a tool call ID to track
73
+ * the pending approval in our system. When the user responds to the approval (and we
74
+ * continue the conversation), we need to map the approval request ID back to our tool call ID
75
+ * so that tool results reference the correct tool call.
76
+ */
77
+ function extractApprovalRequestIdToToolCallIdMapping(
78
+ prompt: LanguageModelV3Prompt,
79
+ ): Record<string, string> {
80
+ const mapping: Record<string, string> = {};
81
+ for (const message of prompt) {
82
+ if (message.role !== 'assistant') continue;
83
+ for (const part of message.content) {
84
+ if (part.type !== 'tool-call') continue;
85
+ const approvalRequestId = part.providerOptions?.openai
86
+ ?.approvalRequestId as string | undefined;
87
+ if (approvalRequestId != null) {
88
+ mapping[approvalRequestId] = part.toolCallId;
89
+ }
90
+ }
91
+ }
92
+ return mapping;
93
+ }
94
+
95
+ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
96
+ readonly specificationVersion = 'v3';
97
+
98
+ readonly modelId: OpenAIResponsesModelId;
99
+
100
+ private readonly config: OpenAIConfig;
101
+
102
+ constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig) {
103
+ this.modelId = modelId;
104
+ this.config = config;
105
+ }
106
+
107
+ readonly supportedUrls: Record<string, RegExp[]> = {
108
+ 'image/*': [/^https?:\/\/.*$/],
109
+ 'application/pdf': [/^https?:\/\/.*$/],
110
+ };
111
+
112
+ get provider(): string {
113
+ return this.config.provider;
114
+ }
115
+
116
+ private async getArgs({
117
+ maxOutputTokens,
118
+ temperature,
119
+ stopSequences,
120
+ topP,
121
+ topK,
122
+ presencePenalty,
123
+ frequencyPenalty,
124
+ seed,
125
+ prompt,
126
+ providerOptions,
127
+ tools,
128
+ toolChoice,
129
+ responseFormat,
130
+ }: LanguageModelV3CallOptions) {
131
+ const warnings: SharedV3Warning[] = [];
132
+ const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
133
+
134
+ if (topK != null) {
135
+ warnings.push({ type: 'unsupported', feature: 'topK' });
136
+ }
137
+
138
+ if (seed != null) {
139
+ warnings.push({ type: 'unsupported', feature: 'seed' });
140
+ }
141
+
142
+ if (presencePenalty != null) {
143
+ warnings.push({ type: 'unsupported', feature: 'presencePenalty' });
144
+ }
145
+
146
+ if (frequencyPenalty != null) {
147
+ warnings.push({ type: 'unsupported', feature: 'frequencyPenalty' });
148
+ }
149
+
150
+ if (stopSequences != null) {
151
+ warnings.push({ type: 'unsupported', feature: 'stopSequences' });
152
+ }
153
+
154
+ const providerOptionsName = this.config.provider.includes('azure')
155
+ ? 'azure'
156
+ : 'openai';
157
+ let openaiOptions = await parseProviderOptions({
158
+ provider: providerOptionsName,
159
+ providerOptions,
160
+ schema: openaiResponsesProviderOptionsSchema,
161
+ });
162
+
163
+ if (openaiOptions == null && providerOptionsName !== 'openai') {
164
+ openaiOptions = await parseProviderOptions({
165
+ provider: 'openai',
166
+ providerOptions,
167
+ schema: openaiResponsesProviderOptionsSchema,
168
+ });
169
+ }
170
+
171
+ const isReasoningModel =
172
+ openaiOptions?.forceReasoning ?? modelCapabilities.isReasoningModel;
173
+
174
+ if (openaiOptions?.conversation && openaiOptions?.previousResponseId) {
175
+ warnings.push({
176
+ type: 'unsupported',
177
+ feature: 'conversation',
178
+ details: 'conversation and previousResponseId cannot be used together',
179
+ });
180
+ }
181
+
182
+ const toolNameMapping = createToolNameMapping({
183
+ tools,
184
+ providerToolNames: {
185
+ 'openai.code_interpreter': 'code_interpreter',
186
+ 'openai.file_search': 'file_search',
187
+ 'openai.image_generation': 'image_generation',
188
+ 'openai.local_shell': 'local_shell',
189
+ 'openai.shell': 'shell',
190
+ 'openai.web_search': 'web_search',
191
+ 'openai.web_search_preview': 'web_search_preview',
192
+ 'openai.mcp': 'mcp',
193
+ 'openai.apply_patch': 'apply_patch',
194
+ },
195
+ });
196
+
197
+ const { input, warnings: inputWarnings } =
198
+ await convertToOpenAIResponsesInput({
199
+ prompt,
200
+ toolNameMapping,
201
+ systemMessageMode:
202
+ openaiOptions?.systemMessageMode ??
203
+ (isReasoningModel
204
+ ? 'developer'
205
+ : modelCapabilities.systemMessageMode),
206
+ providerOptionsName,
207
+ fileIdPrefixes: this.config.fileIdPrefixes,
208
+ store: openaiOptions?.store ?? true,
209
+ hasConversation: openaiOptions?.conversation != null,
210
+ hasLocalShellTool: hasOpenAITool('openai.local_shell'),
211
+ hasShellTool: hasOpenAITool('openai.shell'),
212
+ hasApplyPatchTool: hasOpenAITool('openai.apply_patch'),
213
+ });
214
+
215
+ warnings.push(...inputWarnings);
216
+
217
+ const strictJsonSchema = openaiOptions?.strictJsonSchema ?? true;
218
+
219
+ let include: OpenAIResponsesIncludeOptions = openaiOptions?.include;
220
+
221
+ function addInclude(key: OpenAIResponsesIncludeValue) {
222
+ if (include == null) {
223
+ include = [key];
224
+ } else if (!include.includes(key)) {
225
+ include = [...include, key];
226
+ }
227
+ }
228
+
229
+ function hasOpenAITool(id: string) {
230
+ return (
231
+ tools?.find(tool => tool.type === 'provider' && tool.id === id) != null
232
+ );
233
+ }
234
+
235
+ // when logprobs are requested, automatically include them:
236
+ const topLogprobs =
237
+ typeof openaiOptions?.logprobs === 'number'
238
+ ? openaiOptions?.logprobs
239
+ : openaiOptions?.logprobs === true
240
+ ? TOP_LOGPROBS_MAX
241
+ : undefined;
242
+
243
+ if (topLogprobs) {
244
+ addInclude('message.output_text.logprobs');
245
+ }
246
+
247
+ // when a web search tool is present, automatically include the sources:
248
+ const webSearchToolName = (
249
+ tools?.find(
250
+ tool =>
251
+ tool.type === 'provider' &&
252
+ (tool.id === 'openai.web_search' ||
253
+ tool.id === 'openai.web_search_preview'),
254
+ ) as LanguageModelV3ProviderTool | undefined
255
+ )?.name;
256
+
257
+ if (webSearchToolName) {
258
+ addInclude('web_search_call.action.sources');
259
+ }
260
+
261
+ // when a code interpreter tool is present, automatically include the outputs:
262
+ if (hasOpenAITool('openai.code_interpreter')) {
263
+ addInclude('code_interpreter_call.outputs');
264
+ }
265
+
266
+ const store = openaiOptions?.store;
267
+
268
+ // store defaults to true in the OpenAI responses API, so check for false exactly:
269
+ if (store === false && isReasoningModel) {
270
+ addInclude('reasoning.encrypted_content');
271
+ }
272
+
273
+ const baseArgs = {
274
+ model: this.modelId,
275
+ input,
276
+ temperature,
277
+ top_p: topP,
278
+ max_output_tokens: maxOutputTokens,
279
+
280
+ ...((responseFormat?.type === 'json' || openaiOptions?.textVerbosity) && {
281
+ text: {
282
+ ...(responseFormat?.type === 'json' && {
283
+ format:
284
+ responseFormat.schema != null
285
+ ? {
286
+ type: 'json_schema',
287
+ strict: strictJsonSchema,
288
+ name: responseFormat.name ?? 'response',
289
+ description: responseFormat.description,
290
+ schema: responseFormat.schema,
291
+ }
292
+ : { type: 'json_object' },
293
+ }),
294
+ ...(openaiOptions?.textVerbosity && {
295
+ verbosity: openaiOptions.textVerbosity,
296
+ }),
297
+ },
298
+ }),
299
+
300
+ // provider options:
301
+ conversation: openaiOptions?.conversation,
302
+ max_tool_calls: openaiOptions?.maxToolCalls,
303
+ metadata: openaiOptions?.metadata,
304
+ parallel_tool_calls: openaiOptions?.parallelToolCalls,
305
+ previous_response_id: openaiOptions?.previousResponseId,
306
+ store,
307
+ user: openaiOptions?.user,
308
+ instructions: openaiOptions?.instructions,
309
+ service_tier: openaiOptions?.serviceTier,
310
+ include,
311
+ prompt_cache_key: openaiOptions?.promptCacheKey,
312
+ prompt_cache_retention: openaiOptions?.promptCacheRetention,
313
+ safety_identifier: openaiOptions?.safetyIdentifier,
314
+ top_logprobs: topLogprobs,
315
+ truncation: openaiOptions?.truncation,
316
+
317
+ // model-specific settings:
318
+ ...(isReasoningModel &&
319
+ (openaiOptions?.reasoningEffort != null ||
320
+ openaiOptions?.reasoningSummary != null) && {
321
+ reasoning: {
322
+ ...(openaiOptions?.reasoningEffort != null && {
323
+ effort: openaiOptions.reasoningEffort,
324
+ }),
325
+ ...(openaiOptions?.reasoningSummary != null && {
326
+ summary: openaiOptions.reasoningSummary,
327
+ }),
328
+ },
329
+ }),
330
+ };
331
+
332
+ // remove unsupported settings for reasoning models
333
+ // see https://platform.openai.com/docs/guides/reasoning#limitations
334
+ if (isReasoningModel) {
335
+ // when reasoning effort is none, gpt-5.1 models allow temperature, topP, logprobs
336
+ // https://platform.openai.com/docs/guides/latest-model#gpt-5-1-parameter-compatibility
337
+ if (
338
+ !(
339
+ openaiOptions?.reasoningEffort === 'none' &&
340
+ modelCapabilities.supportsNonReasoningParameters
341
+ )
342
+ ) {
343
+ if (baseArgs.temperature != null) {
344
+ baseArgs.temperature = undefined;
345
+ warnings.push({
346
+ type: 'unsupported',
347
+ feature: 'temperature',
348
+ details: 'temperature is not supported for reasoning models',
349
+ });
350
+ }
351
+
352
+ if (baseArgs.top_p != null) {
353
+ baseArgs.top_p = undefined;
354
+ warnings.push({
355
+ type: 'unsupported',
356
+ feature: 'topP',
357
+ details: 'topP is not supported for reasoning models',
358
+ });
359
+ }
360
+ }
361
+ } else {
362
+ if (openaiOptions?.reasoningEffort != null) {
363
+ warnings.push({
364
+ type: 'unsupported',
365
+ feature: 'reasoningEffort',
366
+ details: 'reasoningEffort is not supported for non-reasoning models',
367
+ });
368
+ }
369
+
370
+ if (openaiOptions?.reasoningSummary != null) {
371
+ warnings.push({
372
+ type: 'unsupported',
373
+ feature: 'reasoningSummary',
374
+ details: 'reasoningSummary is not supported for non-reasoning models',
375
+ });
376
+ }
377
+ }
378
+
379
+ // Validate flex processing support
380
+ if (
381
+ openaiOptions?.serviceTier === 'flex' &&
382
+ !modelCapabilities.supportsFlexProcessing
383
+ ) {
384
+ warnings.push({
385
+ type: 'unsupported',
386
+ feature: 'serviceTier',
387
+ details:
388
+ 'flex processing is only available for o3, o4-mini, and gpt-5 models',
389
+ });
390
+ // Remove from args if not supported
391
+ delete (baseArgs as any).service_tier;
392
+ }
393
+
394
+ // Validate priority processing support
395
+ if (
396
+ openaiOptions?.serviceTier === 'priority' &&
397
+ !modelCapabilities.supportsPriorityProcessing
398
+ ) {
399
+ warnings.push({
400
+ type: 'unsupported',
401
+ feature: 'serviceTier',
402
+ details:
403
+ 'priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported',
404
+ });
405
+ // Remove from args if not supported
406
+ delete (baseArgs as any).service_tier;
407
+ }
408
+
409
+ const {
410
+ tools: openaiTools,
411
+ toolChoice: openaiToolChoice,
412
+ toolWarnings,
413
+ } = await prepareResponsesTools({
414
+ tools,
415
+ toolChoice,
416
+ });
417
+
418
+ return {
419
+ webSearchToolName,
420
+ args: {
421
+ ...baseArgs,
422
+ tools: openaiTools,
423
+ tool_choice: openaiToolChoice,
424
+ },
425
+ warnings: [...warnings, ...toolWarnings],
426
+ store,
427
+ toolNameMapping,
428
+ providerOptionsName,
429
+ };
430
+ }
431
+
432
+ async doGenerate(
433
+ options: LanguageModelV3CallOptions,
434
+ ): Promise<LanguageModelV3GenerateResult> {
435
+ const {
436
+ args: body,
437
+ warnings,
438
+ webSearchToolName,
439
+ toolNameMapping,
440
+ providerOptionsName,
441
+ } = await this.getArgs(options);
442
+ const url = this.config.url({
443
+ path: '/responses',
444
+ modelId: this.modelId,
445
+ });
446
+
447
+ const approvalRequestIdToDummyToolCallIdFromPrompt =
448
+ extractApprovalRequestIdToToolCallIdMapping(options.prompt);
449
+
450
+ const {
451
+ responseHeaders,
452
+ value: response,
453
+ rawValue: rawResponse,
454
+ } = await postJsonToApi({
455
+ url,
456
+ headers: combineHeaders(this.config.headers(), options.headers),
457
+ body,
458
+ failedResponseHandler: openaiFailedResponseHandler,
459
+ successfulResponseHandler: createJsonResponseHandler(
460
+ openaiResponsesResponseSchema,
461
+ ),
462
+ abortSignal: options.abortSignal,
463
+ fetch: this.config.fetch,
464
+ });
465
+
466
+ if (response.error) {
467
+ throw new APICallError({
468
+ message: response.error.message,
469
+ url,
470
+ requestBodyValues: body,
471
+ statusCode: 400,
472
+ responseHeaders,
473
+ responseBody: rawResponse as string,
474
+ isRetryable: false,
475
+ });
476
+ }
477
+
478
+ const content: Array<LanguageModelV3Content> = [];
479
+ const logprobs: Array<OpenAIResponsesLogprobs> = [];
480
+
481
+ // flag that checks if there have been client-side tool calls (not executed by openai)
482
+ let hasFunctionCall = false;
483
+
484
+ // map response content to content array (defined when there is no error)
485
+ for (const part of response.output!) {
486
+ switch (part.type) {
487
+ case 'reasoning': {
488
+ // when there are no summary parts, we need to add an empty reasoning part:
489
+ if (part.summary.length === 0) {
490
+ part.summary.push({ type: 'summary_text', text: '' });
491
+ }
492
+
493
+ for (const summary of part.summary) {
494
+ content.push({
495
+ type: 'reasoning' as const,
496
+ text: summary.text,
497
+ providerMetadata: {
498
+ [providerOptionsName]: {
499
+ itemId: part.id,
500
+ reasoningEncryptedContent: part.encrypted_content ?? null,
501
+ },
502
+ },
503
+ });
504
+ }
505
+ break;
506
+ }
507
+
508
+ case 'image_generation_call': {
509
+ content.push({
510
+ type: 'tool-call',
511
+ toolCallId: part.id,
512
+ toolName: toolNameMapping.toCustomToolName('image_generation'),
513
+ input: '{}',
514
+ providerExecuted: true,
515
+ });
516
+
517
+ content.push({
518
+ type: 'tool-result',
519
+ toolCallId: part.id,
520
+ toolName: toolNameMapping.toCustomToolName('image_generation'),
521
+ result: {
522
+ result: part.result,
523
+ } satisfies InferSchema<typeof imageGenerationOutputSchema>,
524
+ });
525
+
526
+ break;
527
+ }
528
+
529
+ case 'local_shell_call': {
530
+ content.push({
531
+ type: 'tool-call',
532
+ toolCallId: part.call_id,
533
+ toolName: toolNameMapping.toCustomToolName('local_shell'),
534
+ input: JSON.stringify({
535
+ action: part.action,
536
+ } satisfies InferSchema<typeof localShellInputSchema>),
537
+ providerMetadata: {
538
+ [providerOptionsName]: {
539
+ itemId: part.id,
540
+ },
541
+ },
542
+ });
543
+
544
+ break;
545
+ }
546
+
547
+ case 'shell_call': {
548
+ content.push({
549
+ type: 'tool-call',
550
+ toolCallId: part.call_id,
551
+ toolName: toolNameMapping.toCustomToolName('shell'),
552
+ input: JSON.stringify({
553
+ action: {
554
+ commands: part.action.commands,
555
+ },
556
+ } satisfies InferSchema<typeof shellInputSchema>),
557
+ providerMetadata: {
558
+ [providerOptionsName]: {
559
+ itemId: part.id,
560
+ },
561
+ },
562
+ });
563
+
564
+ break;
565
+ }
566
+
567
+ case 'message': {
568
+ for (const contentPart of part.content) {
569
+ if (
570
+ options.providerOptions?.[providerOptionsName]?.logprobs &&
571
+ contentPart.logprobs
572
+ ) {
573
+ logprobs.push(contentPart.logprobs);
574
+ }
575
+
576
+ const providerMetadata: SharedV3ProviderMetadata[string] = {
577
+ itemId: part.id,
578
+ ...(contentPart.annotations.length > 0 && {
579
+ annotations: contentPart.annotations,
580
+ }),
581
+ } satisfies ResponsesTextProviderMetadata;
582
+
583
+ content.push({
584
+ type: 'text',
585
+ text: contentPart.text,
586
+ providerMetadata: {
587
+ [providerOptionsName]: providerMetadata,
588
+ },
589
+ });
590
+
591
+ for (const annotation of contentPart.annotations) {
592
+ if (annotation.type === 'url_citation') {
593
+ content.push({
594
+ type: 'source',
595
+ sourceType: 'url',
596
+ id: this.config.generateId?.() ?? generateId(),
597
+ url: annotation.url,
598
+ title: annotation.title,
599
+ });
600
+ } else if (annotation.type === 'file_citation') {
601
+ content.push({
602
+ type: 'source',
603
+ sourceType: 'document',
604
+ id: this.config.generateId?.() ?? generateId(),
605
+ mediaType: 'text/plain',
606
+ title: annotation.filename,
607
+ filename: annotation.filename,
608
+ providerMetadata: {
609
+ [providerOptionsName]: {
610
+ type: annotation.type,
611
+ fileId: annotation.file_id,
612
+ index: annotation.index,
613
+ } satisfies Extract<
614
+ ResponsesSourceDocumentProviderMetadata,
615
+ { type: 'file_citation' }
616
+ >,
617
+ },
618
+ });
619
+ } else if (annotation.type === 'container_file_citation') {
620
+ content.push({
621
+ type: 'source',
622
+ sourceType: 'document',
623
+ id: this.config.generateId?.() ?? generateId(),
624
+ mediaType: 'text/plain',
625
+ title: annotation.filename,
626
+ filename: annotation.filename,
627
+ providerMetadata: {
628
+ [providerOptionsName]: {
629
+ type: annotation.type,
630
+ fileId: annotation.file_id,
631
+ containerId: annotation.container_id,
632
+ } satisfies Extract<
633
+ ResponsesSourceDocumentProviderMetadata,
634
+ { type: 'container_file_citation' }
635
+ >,
636
+ },
637
+ });
638
+ } else if (annotation.type === 'file_path') {
639
+ content.push({
640
+ type: 'source',
641
+ sourceType: 'document',
642
+ id: this.config.generateId?.() ?? generateId(),
643
+ mediaType: 'application/octet-stream',
644
+ title: annotation.file_id,
645
+ filename: annotation.file_id,
646
+ providerMetadata: {
647
+ [providerOptionsName]: {
648
+ type: annotation.type,
649
+ fileId: annotation.file_id,
650
+ index: annotation.index,
651
+ } satisfies Extract<
652
+ ResponsesSourceDocumentProviderMetadata,
653
+ { type: 'file_path' }
654
+ >,
655
+ },
656
+ });
657
+ }
658
+ }
659
+ }
660
+
661
+ break;
662
+ }
663
+
664
+ case 'function_call': {
665
+ hasFunctionCall = true;
666
+
667
+ content.push({
668
+ type: 'tool-call',
669
+ toolCallId: part.call_id,
670
+ toolName: part.name,
671
+ input: part.arguments,
672
+ providerMetadata: {
673
+ [providerOptionsName]: {
674
+ itemId: part.id,
675
+ },
676
+ },
677
+ });
678
+ break;
679
+ }
680
+
681
+ case 'web_search_call': {
682
+ content.push({
683
+ type: 'tool-call',
684
+ toolCallId: part.id,
685
+ toolName: toolNameMapping.toCustomToolName(
686
+ webSearchToolName ?? 'web_search',
687
+ ),
688
+ input: JSON.stringify({}),
689
+ providerExecuted: true,
690
+ });
691
+
692
+ content.push({
693
+ type: 'tool-result',
694
+ toolCallId: part.id,
695
+ toolName: toolNameMapping.toCustomToolName(
696
+ webSearchToolName ?? 'web_search',
697
+ ),
698
+ result: mapWebSearchOutput(part.action),
699
+ });
700
+
701
+ break;
702
+ }
703
+
704
+ case 'mcp_call': {
705
+ const toolCallId =
706
+ part.approval_request_id != null
707
+ ? (approvalRequestIdToDummyToolCallIdFromPrompt[
708
+ part.approval_request_id
709
+ ] ?? part.id)
710
+ : part.id;
711
+
712
+ const toolName = `mcp.${part.name}`;
713
+
714
+ content.push({
715
+ type: 'tool-call',
716
+ toolCallId,
717
+ toolName,
718
+ input: part.arguments,
719
+ providerExecuted: true,
720
+ dynamic: true,
721
+ });
722
+
723
+ content.push({
724
+ type: 'tool-result',
725
+ toolCallId,
726
+ toolName,
727
+ result: {
728
+ type: 'call',
729
+ serverLabel: part.server_label,
730
+ name: part.name,
731
+ arguments: part.arguments,
732
+ ...(part.output != null ? { output: part.output } : {}),
733
+ ...(part.error != null
734
+ ? { error: part.error as unknown as JSONValue }
735
+ : {}),
736
+ } satisfies InferSchema<typeof mcpOutputSchema>,
737
+ providerMetadata: {
738
+ [providerOptionsName]: {
739
+ itemId: part.id,
740
+ },
741
+ },
742
+ });
743
+ break;
744
+ }
745
+
746
+ case 'mcp_list_tools': {
747
+ // skip
748
+ break;
749
+ }
750
+
751
+ case 'mcp_approval_request': {
752
+ const approvalRequestId = part.approval_request_id ?? part.id;
753
+ const dummyToolCallId = this.config.generateId?.() ?? generateId();
754
+ const toolName = `mcp.${part.name}`;
755
+
756
+ content.push({
757
+ type: 'tool-call',
758
+ toolCallId: dummyToolCallId,
759
+ toolName,
760
+ input: part.arguments,
761
+ providerExecuted: true,
762
+ dynamic: true,
763
+ });
764
+
765
+ content.push({
766
+ type: 'tool-approval-request',
767
+ approvalId: approvalRequestId,
768
+ toolCallId: dummyToolCallId,
769
+ } satisfies LanguageModelV3ToolApprovalRequest);
770
+ break;
771
+ }
772
+
773
+ case 'computer_call': {
774
+ content.push({
775
+ type: 'tool-call',
776
+ toolCallId: part.id,
777
+ toolName: toolNameMapping.toCustomToolName('computer_use'),
778
+ input: '',
779
+ providerExecuted: true,
780
+ });
781
+
782
+ content.push({
783
+ type: 'tool-result',
784
+ toolCallId: part.id,
785
+ toolName: toolNameMapping.toCustomToolName('computer_use'),
786
+ result: {
787
+ type: 'computer_use_tool_result',
788
+ status: part.status || 'completed',
789
+ },
790
+ });
791
+ break;
792
+ }
793
+
794
+ case 'file_search_call': {
795
+ content.push({
796
+ type: 'tool-call',
797
+ toolCallId: part.id,
798
+ toolName: toolNameMapping.toCustomToolName('file_search'),
799
+ input: '{}',
800
+ providerExecuted: true,
801
+ });
802
+
803
+ content.push({
804
+ type: 'tool-result',
805
+ toolCallId: part.id,
806
+ toolName: toolNameMapping.toCustomToolName('file_search'),
807
+ result: {
808
+ queries: part.queries,
809
+ results:
810
+ part.results?.map(result => ({
811
+ attributes: result.attributes,
812
+ fileId: result.file_id,
813
+ filename: result.filename,
814
+ score: result.score,
815
+ text: result.text,
816
+ })) ?? null,
817
+ } satisfies InferSchema<typeof fileSearchOutputSchema>,
818
+ });
819
+ break;
820
+ }
821
+
822
+ case 'code_interpreter_call': {
823
+ content.push({
824
+ type: 'tool-call',
825
+ toolCallId: part.id,
826
+ toolName: toolNameMapping.toCustomToolName('code_interpreter'),
827
+ input: JSON.stringify({
828
+ code: part.code,
829
+ containerId: part.container_id,
830
+ } satisfies InferSchema<typeof codeInterpreterInputSchema>),
831
+ providerExecuted: true,
832
+ });
833
+
834
+ content.push({
835
+ type: 'tool-result',
836
+ toolCallId: part.id,
837
+ toolName: toolNameMapping.toCustomToolName('code_interpreter'),
838
+ result: {
839
+ outputs: part.outputs,
840
+ } satisfies InferSchema<typeof codeInterpreterOutputSchema>,
841
+ });
842
+ break;
843
+ }
844
+
845
+ case 'apply_patch_call': {
846
+ content.push({
847
+ type: 'tool-call',
848
+ toolCallId: part.call_id,
849
+ toolName: toolNameMapping.toCustomToolName('apply_patch'),
850
+ input: JSON.stringify({
851
+ callId: part.call_id,
852
+ operation: part.operation,
853
+ } satisfies InferSchema<typeof applyPatchInputSchema>),
854
+ providerMetadata: {
855
+ [providerOptionsName]: {
856
+ itemId: part.id,
857
+ },
858
+ },
859
+ });
860
+
861
+ break;
862
+ }
863
+ }
864
+ }
865
+
866
+ const providerMetadata: SharedV3ProviderMetadata = {
867
+ [providerOptionsName]: { responseId: response.id },
868
+ };
869
+
870
+ if (logprobs.length > 0) {
871
+ providerMetadata[providerOptionsName].logprobs = logprobs;
872
+ }
873
+
874
+ if (typeof response.service_tier === 'string') {
875
+ providerMetadata[providerOptionsName].serviceTier = response.service_tier;
876
+ }
877
+
878
+ const usage = response.usage!; // defined when there is no error
879
+
880
+ return {
881
+ content,
882
+ finishReason: {
883
+ unified: mapOpenAIResponseFinishReason({
884
+ finishReason: response.incomplete_details?.reason,
885
+ hasFunctionCall,
886
+ }),
887
+ raw: response.incomplete_details?.reason ?? undefined,
888
+ },
889
+ usage: convertOpenAIResponsesUsage(usage),
890
+ request: { body },
891
+ response: {
892
+ id: response.id,
893
+ timestamp: new Date(response.created_at! * 1000),
894
+ modelId: response.model,
895
+ headers: responseHeaders,
896
+ body: rawResponse,
897
+ },
898
+ providerMetadata,
899
+ warnings,
900
+ };
901
+ }
902
+
903
+ async doStream(
904
+ options: LanguageModelV3CallOptions,
905
+ ): Promise<LanguageModelV3StreamResult> {
906
+ const {
907
+ args: body,
908
+ warnings,
909
+ webSearchToolName,
910
+ toolNameMapping,
911
+ store,
912
+ providerOptionsName,
913
+ } = await this.getArgs(options);
914
+
915
+ const { responseHeaders, value: response } = await postJsonToApi({
916
+ url: this.config.url({
917
+ path: '/responses',
918
+ modelId: this.modelId,
919
+ }),
920
+ headers: combineHeaders(this.config.headers(), options.headers),
921
+ body: {
922
+ ...body,
923
+ stream: true,
924
+ },
925
+ failedResponseHandler: openaiFailedResponseHandler,
926
+ successfulResponseHandler: createEventSourceResponseHandler(
927
+ openaiResponsesChunkSchema,
928
+ ),
929
+ abortSignal: options.abortSignal,
930
+ fetch: this.config.fetch,
931
+ });
932
+
933
+ const self = this;
934
+
935
+ const approvalRequestIdToDummyToolCallIdFromPrompt =
936
+ extractApprovalRequestIdToToolCallIdMapping(options.prompt);
937
+
938
+ const approvalRequestIdToDummyToolCallIdFromStream = new Map<
939
+ string,
940
+ string
941
+ >();
942
+
943
+ let finishReason: LanguageModelV3FinishReason = {
944
+ unified: 'other',
945
+ raw: undefined,
946
+ };
947
+ let usage: OpenAIResponsesUsage | undefined = undefined;
948
+ const logprobs: Array<OpenAIResponsesLogprobs> = [];
949
+ let responseId: string | null = null;
950
+
951
+ const ongoingToolCalls: Record<
952
+ number,
953
+ | {
954
+ toolName: string;
955
+ toolCallId: string;
956
+ codeInterpreter?: {
957
+ containerId: string;
958
+ };
959
+ applyPatch?: {
960
+ hasDiff: boolean;
961
+ endEmitted: boolean;
962
+ };
963
+ }
964
+ | undefined
965
+ > = {};
966
+
967
+ // set annotations in 'text-end' part providerMetadata.
968
+ const ongoingAnnotations: Array<
969
+ Extract<
970
+ OpenAIResponsesChunk,
971
+ { type: 'response.output_text.annotation.added' }
972
+ >['annotation']
973
+ > = [];
974
+
975
+ // flag that checks if there have been client-side tool calls (not executed by openai)
976
+ let hasFunctionCall = false;
977
+
978
+ const activeReasoning: Record<
979
+ string,
980
+ {
981
+ encryptedContent?: string | null;
982
+ // summary index as string to reasoning part state:
983
+ summaryParts: Record<string, 'active' | 'can-conclude' | 'concluded'>;
984
+ }
985
+ > = {};
986
+
987
+ let serviceTier: string | undefined;
988
+
989
+ return {
990
+ stream: response.pipeThrough(
991
+ new TransformStream<
992
+ ParseResult<OpenAIResponsesChunk>,
993
+ LanguageModelV3StreamPart
994
+ >({
995
+ start(controller) {
996
+ controller.enqueue({ type: 'stream-start', warnings });
997
+ },
998
+
999
+ transform(chunk, controller) {
1000
+ if (options.includeRawChunks) {
1001
+ controller.enqueue({ type: 'raw', rawValue: chunk.rawValue });
1002
+ }
1003
+
1004
+ // handle failed chunk parsing / validation:
1005
+ if (!chunk.success) {
1006
+ finishReason = { unified: 'error', raw: undefined };
1007
+ controller.enqueue({ type: 'error', error: chunk.error });
1008
+ return;
1009
+ }
1010
+
1011
+ const value = chunk.value;
1012
+
1013
+ if (isResponseOutputItemAddedChunk(value)) {
1014
+ if (value.item.type === 'function_call') {
1015
+ ongoingToolCalls[value.output_index] = {
1016
+ toolName: value.item.name,
1017
+ toolCallId: value.item.call_id,
1018
+ };
1019
+
1020
+ controller.enqueue({
1021
+ type: 'tool-input-start',
1022
+ id: value.item.call_id,
1023
+ toolName: value.item.name,
1024
+ });
1025
+ } else if (value.item.type === 'web_search_call') {
1026
+ ongoingToolCalls[value.output_index] = {
1027
+ toolName: toolNameMapping.toCustomToolName(
1028
+ webSearchToolName ?? 'web_search',
1029
+ ),
1030
+ toolCallId: value.item.id,
1031
+ };
1032
+
1033
+ controller.enqueue({
1034
+ type: 'tool-input-start',
1035
+ id: value.item.id,
1036
+ toolName: toolNameMapping.toCustomToolName(
1037
+ webSearchToolName ?? 'web_search',
1038
+ ),
1039
+ providerExecuted: true,
1040
+ });
1041
+
1042
+ controller.enqueue({
1043
+ type: 'tool-input-end',
1044
+ id: value.item.id,
1045
+ });
1046
+
1047
+ controller.enqueue({
1048
+ type: 'tool-call',
1049
+ toolCallId: value.item.id,
1050
+ toolName: toolNameMapping.toCustomToolName(
1051
+ webSearchToolName ?? 'web_search',
1052
+ ),
1053
+ input: JSON.stringify({}),
1054
+ providerExecuted: true,
1055
+ });
1056
+ } else if (value.item.type === 'computer_call') {
1057
+ ongoingToolCalls[value.output_index] = {
1058
+ toolName: toolNameMapping.toCustomToolName('computer_use'),
1059
+ toolCallId: value.item.id,
1060
+ };
1061
+
1062
+ controller.enqueue({
1063
+ type: 'tool-input-start',
1064
+ id: value.item.id,
1065
+ toolName: toolNameMapping.toCustomToolName('computer_use'),
1066
+ providerExecuted: true,
1067
+ });
1068
+ } else if (value.item.type === 'code_interpreter_call') {
1069
+ ongoingToolCalls[value.output_index] = {
1070
+ toolName:
1071
+ toolNameMapping.toCustomToolName('code_interpreter'),
1072
+ toolCallId: value.item.id,
1073
+ codeInterpreter: {
1074
+ containerId: value.item.container_id,
1075
+ },
1076
+ };
1077
+
1078
+ controller.enqueue({
1079
+ type: 'tool-input-start',
1080
+ id: value.item.id,
1081
+ toolName:
1082
+ toolNameMapping.toCustomToolName('code_interpreter'),
1083
+ providerExecuted: true,
1084
+ });
1085
+
1086
+ controller.enqueue({
1087
+ type: 'tool-input-delta',
1088
+ id: value.item.id,
1089
+ delta: `{"containerId":"${value.item.container_id}","code":"`,
1090
+ });
1091
+ } else if (value.item.type === 'file_search_call') {
1092
+ controller.enqueue({
1093
+ type: 'tool-call',
1094
+ toolCallId: value.item.id,
1095
+ toolName: toolNameMapping.toCustomToolName('file_search'),
1096
+ input: '{}',
1097
+ providerExecuted: true,
1098
+ });
1099
+ } else if (value.item.type === 'image_generation_call') {
1100
+ controller.enqueue({
1101
+ type: 'tool-call',
1102
+ toolCallId: value.item.id,
1103
+ toolName:
1104
+ toolNameMapping.toCustomToolName('image_generation'),
1105
+ input: '{}',
1106
+ providerExecuted: true,
1107
+ });
1108
+ } else if (
1109
+ value.item.type === 'mcp_call' ||
1110
+ value.item.type === 'mcp_list_tools' ||
1111
+ value.item.type === 'mcp_approval_request'
1112
+ ) {
1113
+ // Emit MCP tool-call/approval parts on output_item.done instead, so we can:
1114
+ // - alias mcp_call IDs when an approval_request_id is present
1115
+ // - emit a proper tool-approval-request part for MCP approvals
1116
+ } else if (value.item.type === 'apply_patch_call') {
1117
+ const { call_id: callId, operation } = value.item;
1118
+
1119
+ ongoingToolCalls[value.output_index] = {
1120
+ toolName: toolNameMapping.toCustomToolName('apply_patch'),
1121
+ toolCallId: callId,
1122
+ applyPatch: {
1123
+ // delete_file doesn't have diff
1124
+ hasDiff: operation.type === 'delete_file',
1125
+ endEmitted: operation.type === 'delete_file',
1126
+ },
1127
+ };
1128
+
1129
+ controller.enqueue({
1130
+ type: 'tool-input-start',
1131
+ id: callId,
1132
+ toolName: toolNameMapping.toCustomToolName('apply_patch'),
1133
+ });
1134
+
1135
+ if (operation.type === 'delete_file') {
1136
+ const inputString = JSON.stringify({
1137
+ callId,
1138
+ operation,
1139
+ } satisfies InferSchema<typeof applyPatchInputSchema>);
1140
+
1141
+ controller.enqueue({
1142
+ type: 'tool-input-delta',
1143
+ id: callId,
1144
+ delta: inputString,
1145
+ });
1146
+
1147
+ controller.enqueue({
1148
+ type: 'tool-input-end',
1149
+ id: callId,
1150
+ });
1151
+ } else {
1152
+ controller.enqueue({
1153
+ type: 'tool-input-delta',
1154
+ id: callId,
1155
+ delta: `{"callId":"${escapeJSONDelta(callId)}","operation":{"type":"${escapeJSONDelta(operation.type)}","path":"${escapeJSONDelta(operation.path)}","diff":"`,
1156
+ });
1157
+ }
1158
+ } else if (value.item.type === 'shell_call') {
1159
+ ongoingToolCalls[value.output_index] = {
1160
+ toolName: toolNameMapping.toCustomToolName('shell'),
1161
+ toolCallId: value.item.call_id,
1162
+ };
1163
+ } else if (value.item.type === 'message') {
1164
+ ongoingAnnotations.splice(0, ongoingAnnotations.length);
1165
+ controller.enqueue({
1166
+ type: 'text-start',
1167
+ id: value.item.id,
1168
+ providerMetadata: {
1169
+ [providerOptionsName]: {
1170
+ itemId: value.item.id,
1171
+ },
1172
+ },
1173
+ });
1174
+ } else if (
1175
+ isResponseOutputItemAddedChunk(value) &&
1176
+ value.item.type === 'reasoning'
1177
+ ) {
1178
+ activeReasoning[value.item.id] = {
1179
+ encryptedContent: value.item.encrypted_content,
1180
+ summaryParts: { 0: 'active' },
1181
+ };
1182
+
1183
+ controller.enqueue({
1184
+ type: 'reasoning-start',
1185
+ id: `${value.item.id}:0`,
1186
+ providerMetadata: {
1187
+ [providerOptionsName]: {
1188
+ itemId: value.item.id,
1189
+ reasoningEncryptedContent:
1190
+ value.item.encrypted_content ?? null,
1191
+ },
1192
+ },
1193
+ });
1194
+ }
1195
+ } else if (isResponseOutputItemDoneChunk(value)) {
1196
+ if (value.item.type === 'message') {
1197
+ controller.enqueue({
1198
+ type: 'text-end',
1199
+ id: value.item.id,
1200
+ providerMetadata: {
1201
+ [providerOptionsName]: {
1202
+ itemId: value.item.id,
1203
+ ...(ongoingAnnotations.length > 0 && {
1204
+ annotations: ongoingAnnotations,
1205
+ }),
1206
+ } satisfies ResponsesTextProviderMetadata,
1207
+ },
1208
+ });
1209
+ } else if (value.item.type === 'function_call') {
1210
+ ongoingToolCalls[value.output_index] = undefined;
1211
+ hasFunctionCall = true;
1212
+
1213
+ controller.enqueue({
1214
+ type: 'tool-input-end',
1215
+ id: value.item.call_id,
1216
+ });
1217
+
1218
+ controller.enqueue({
1219
+ type: 'tool-call',
1220
+ toolCallId: value.item.call_id,
1221
+ toolName: value.item.name,
1222
+ input: value.item.arguments,
1223
+ providerMetadata: {
1224
+ [providerOptionsName]: {
1225
+ itemId: value.item.id,
1226
+ },
1227
+ },
1228
+ });
1229
+ } else if (value.item.type === 'web_search_call') {
1230
+ ongoingToolCalls[value.output_index] = undefined;
1231
+
1232
+ controller.enqueue({
1233
+ type: 'tool-result',
1234
+ toolCallId: value.item.id,
1235
+ toolName: toolNameMapping.toCustomToolName(
1236
+ webSearchToolName ?? 'web_search',
1237
+ ),
1238
+ result: mapWebSearchOutput(value.item.action),
1239
+ });
1240
+ } else if (value.item.type === 'computer_call') {
1241
+ ongoingToolCalls[value.output_index] = undefined;
1242
+
1243
+ controller.enqueue({
1244
+ type: 'tool-input-end',
1245
+ id: value.item.id,
1246
+ });
1247
+
1248
+ controller.enqueue({
1249
+ type: 'tool-call',
1250
+ toolCallId: value.item.id,
1251
+ toolName: toolNameMapping.toCustomToolName('computer_use'),
1252
+ input: '',
1253
+ providerExecuted: true,
1254
+ });
1255
+
1256
+ controller.enqueue({
1257
+ type: 'tool-result',
1258
+ toolCallId: value.item.id,
1259
+ toolName: toolNameMapping.toCustomToolName('computer_use'),
1260
+ result: {
1261
+ type: 'computer_use_tool_result',
1262
+ status: value.item.status || 'completed',
1263
+ },
1264
+ });
1265
+ } else if (value.item.type === 'file_search_call') {
1266
+ ongoingToolCalls[value.output_index] = undefined;
1267
+
1268
+ controller.enqueue({
1269
+ type: 'tool-result',
1270
+ toolCallId: value.item.id,
1271
+ toolName: toolNameMapping.toCustomToolName('file_search'),
1272
+ result: {
1273
+ queries: value.item.queries,
1274
+ results:
1275
+ value.item.results?.map(result => ({
1276
+ attributes: result.attributes,
1277
+ fileId: result.file_id,
1278
+ filename: result.filename,
1279
+ score: result.score,
1280
+ text: result.text,
1281
+ })) ?? null,
1282
+ } satisfies InferSchema<typeof fileSearchOutputSchema>,
1283
+ });
1284
+ } else if (value.item.type === 'code_interpreter_call') {
1285
+ ongoingToolCalls[value.output_index] = undefined;
1286
+
1287
+ controller.enqueue({
1288
+ type: 'tool-result',
1289
+ toolCallId: value.item.id,
1290
+ toolName:
1291
+ toolNameMapping.toCustomToolName('code_interpreter'),
1292
+ result: {
1293
+ outputs: value.item.outputs,
1294
+ } satisfies InferSchema<typeof codeInterpreterOutputSchema>,
1295
+ });
1296
+ } else if (value.item.type === 'image_generation_call') {
1297
+ controller.enqueue({
1298
+ type: 'tool-result',
1299
+ toolCallId: value.item.id,
1300
+ toolName:
1301
+ toolNameMapping.toCustomToolName('image_generation'),
1302
+ result: {
1303
+ result: value.item.result,
1304
+ } satisfies InferSchema<typeof imageGenerationOutputSchema>,
1305
+ });
1306
+ } else if (value.item.type === 'mcp_call') {
1307
+ ongoingToolCalls[value.output_index] = undefined;
1308
+
1309
+ const approvalRequestId =
1310
+ value.item.approval_request_id ?? undefined;
1311
+
1312
+ // when MCP tools require approval, we track them with our own
1313
+ // tool call IDs and then map OpenAI's approval_request_id back to our ID so results match.
1314
+ const aliasedToolCallId =
1315
+ approvalRequestId != null
1316
+ ? (approvalRequestIdToDummyToolCallIdFromStream.get(
1317
+ approvalRequestId,
1318
+ ) ??
1319
+ approvalRequestIdToDummyToolCallIdFromPrompt[
1320
+ approvalRequestId
1321
+ ] ??
1322
+ value.item.id)
1323
+ : value.item.id;
1324
+
1325
+ const toolName = `mcp.${value.item.name}`;
1326
+
1327
+ controller.enqueue({
1328
+ type: 'tool-call',
1329
+ toolCallId: aliasedToolCallId,
1330
+ toolName,
1331
+ input: value.item.arguments,
1332
+ providerExecuted: true,
1333
+ dynamic: true,
1334
+ });
1335
+
1336
+ controller.enqueue({
1337
+ type: 'tool-result',
1338
+ toolCallId: aliasedToolCallId,
1339
+ toolName,
1340
+ result: {
1341
+ type: 'call',
1342
+ serverLabel: value.item.server_label,
1343
+ name: value.item.name,
1344
+ arguments: value.item.arguments,
1345
+ ...(value.item.output != null
1346
+ ? { output: value.item.output }
1347
+ : {}),
1348
+ ...(value.item.error != null
1349
+ ? { error: value.item.error as unknown as JSONValue }
1350
+ : {}),
1351
+ } satisfies InferSchema<typeof mcpOutputSchema>,
1352
+ providerMetadata: {
1353
+ [providerOptionsName]: {
1354
+ itemId: value.item.id,
1355
+ },
1356
+ },
1357
+ });
1358
+ } else if (value.item.type === 'mcp_list_tools') {
1359
+ // Skip listTools - we don't expose this to the UI or send it back
1360
+ ongoingToolCalls[value.output_index] = undefined;
1361
+
1362
+ // skip
1363
+ } else if (value.item.type === 'apply_patch_call') {
1364
+ const toolCall = ongoingToolCalls[value.output_index];
1365
+ if (
1366
+ toolCall?.applyPatch &&
1367
+ !toolCall.applyPatch.endEmitted &&
1368
+ value.item.operation.type !== 'delete_file'
1369
+ ) {
1370
+ if (!toolCall.applyPatch.hasDiff) {
1371
+ controller.enqueue({
1372
+ type: 'tool-input-delta',
1373
+ id: toolCall.toolCallId,
1374
+ delta: escapeJSONDelta(value.item.operation.diff),
1375
+ });
1376
+ }
1377
+
1378
+ controller.enqueue({
1379
+ type: 'tool-input-delta',
1380
+ id: toolCall.toolCallId,
1381
+ delta: '"}}',
1382
+ });
1383
+
1384
+ controller.enqueue({
1385
+ type: 'tool-input-end',
1386
+ id: toolCall.toolCallId,
1387
+ });
1388
+
1389
+ toolCall.applyPatch.endEmitted = true;
1390
+ }
1391
+
1392
+ // Emit the final tool-call with complete diff when status is 'completed'
1393
+ if (toolCall && value.item.status === 'completed') {
1394
+ controller.enqueue({
1395
+ type: 'tool-call',
1396
+ toolCallId: toolCall.toolCallId,
1397
+ toolName: toolNameMapping.toCustomToolName('apply_patch'),
1398
+ input: JSON.stringify({
1399
+ callId: value.item.call_id,
1400
+ operation: value.item.operation,
1401
+ } satisfies InferSchema<typeof applyPatchInputSchema>),
1402
+ providerMetadata: {
1403
+ [providerOptionsName]: {
1404
+ itemId: value.item.id,
1405
+ },
1406
+ },
1407
+ });
1408
+ }
1409
+
1410
+ ongoingToolCalls[value.output_index] = undefined;
1411
+ } else if (value.item.type === 'mcp_approval_request') {
1412
+ ongoingToolCalls[value.output_index] = undefined;
1413
+
1414
+ const dummyToolCallId =
1415
+ self.config.generateId?.() ?? generateId();
1416
+ const approvalRequestId =
1417
+ value.item.approval_request_id ?? value.item.id;
1418
+ approvalRequestIdToDummyToolCallIdFromStream.set(
1419
+ approvalRequestId,
1420
+ dummyToolCallId,
1421
+ );
1422
+
1423
+ const toolName = `mcp.${value.item.name}`;
1424
+
1425
+ controller.enqueue({
1426
+ type: 'tool-call',
1427
+ toolCallId: dummyToolCallId,
1428
+ toolName,
1429
+ input: value.item.arguments,
1430
+ providerExecuted: true,
1431
+ dynamic: true,
1432
+ });
1433
+
1434
+ controller.enqueue({
1435
+ type: 'tool-approval-request',
1436
+ approvalId: approvalRequestId,
1437
+ toolCallId: dummyToolCallId,
1438
+ });
1439
+ } else if (value.item.type === 'local_shell_call') {
1440
+ ongoingToolCalls[value.output_index] = undefined;
1441
+
1442
+ controller.enqueue({
1443
+ type: 'tool-call',
1444
+ toolCallId: value.item.call_id,
1445
+ toolName: toolNameMapping.toCustomToolName('local_shell'),
1446
+ input: JSON.stringify({
1447
+ action: {
1448
+ type: 'exec',
1449
+ command: value.item.action.command,
1450
+ timeoutMs: value.item.action.timeout_ms,
1451
+ user: value.item.action.user,
1452
+ workingDirectory: value.item.action.working_directory,
1453
+ env: value.item.action.env,
1454
+ },
1455
+ } satisfies InferSchema<typeof localShellInputSchema>),
1456
+ providerMetadata: {
1457
+ [providerOptionsName]: { itemId: value.item.id },
1458
+ },
1459
+ });
1460
+ } else if (value.item.type === 'shell_call') {
1461
+ ongoingToolCalls[value.output_index] = undefined;
1462
+
1463
+ controller.enqueue({
1464
+ type: 'tool-call',
1465
+ toolCallId: value.item.call_id,
1466
+ toolName: toolNameMapping.toCustomToolName('shell'),
1467
+ input: JSON.stringify({
1468
+ action: {
1469
+ commands: value.item.action.commands,
1470
+ },
1471
+ } satisfies InferSchema<typeof shellInputSchema>),
1472
+ providerMetadata: {
1473
+ [providerOptionsName]: { itemId: value.item.id },
1474
+ },
1475
+ });
1476
+ } else if (value.item.type === 'reasoning') {
1477
+ const activeReasoningPart = activeReasoning[value.item.id];
1478
+
1479
+ // get all active or can-conclude summary parts' ids
1480
+ // to conclude ongoing reasoning parts:
1481
+ const summaryPartIndices = Object.entries(
1482
+ activeReasoningPart.summaryParts,
1483
+ )
1484
+ .filter(
1485
+ ([_, status]) =>
1486
+ status === 'active' || status === 'can-conclude',
1487
+ )
1488
+ .map(([summaryIndex]) => summaryIndex);
1489
+
1490
+ for (const summaryIndex of summaryPartIndices) {
1491
+ controller.enqueue({
1492
+ type: 'reasoning-end',
1493
+ id: `${value.item.id}:${summaryIndex}`,
1494
+ providerMetadata: {
1495
+ [providerOptionsName]: {
1496
+ itemId: value.item.id,
1497
+ reasoningEncryptedContent:
1498
+ value.item.encrypted_content ?? null,
1499
+ },
1500
+ },
1501
+ });
1502
+ }
1503
+
1504
+ delete activeReasoning[value.item.id];
1505
+ }
1506
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
1507
+ const toolCall = ongoingToolCalls[value.output_index];
1508
+
1509
+ if (toolCall != null) {
1510
+ controller.enqueue({
1511
+ type: 'tool-input-delta',
1512
+ id: toolCall.toolCallId,
1513
+ delta: value.delta,
1514
+ });
1515
+ }
1516
+ } else if (isResponseApplyPatchCallOperationDiffDeltaChunk(value)) {
1517
+ const toolCall = ongoingToolCalls[value.output_index];
1518
+
1519
+ if (toolCall?.applyPatch) {
1520
+ controller.enqueue({
1521
+ type: 'tool-input-delta',
1522
+ id: toolCall.toolCallId,
1523
+ delta: escapeJSONDelta(value.delta),
1524
+ });
1525
+
1526
+ toolCall.applyPatch.hasDiff = true;
1527
+ }
1528
+ } else if (isResponseApplyPatchCallOperationDiffDoneChunk(value)) {
1529
+ const toolCall = ongoingToolCalls[value.output_index];
1530
+
1531
+ if (toolCall?.applyPatch && !toolCall.applyPatch.endEmitted) {
1532
+ if (!toolCall.applyPatch.hasDiff) {
1533
+ controller.enqueue({
1534
+ type: 'tool-input-delta',
1535
+ id: toolCall.toolCallId,
1536
+ delta: escapeJSONDelta(value.diff),
1537
+ });
1538
+
1539
+ toolCall.applyPatch.hasDiff = true;
1540
+ }
1541
+
1542
+ controller.enqueue({
1543
+ type: 'tool-input-delta',
1544
+ id: toolCall.toolCallId,
1545
+ delta: '"}}',
1546
+ });
1547
+
1548
+ controller.enqueue({
1549
+ type: 'tool-input-end',
1550
+ id: toolCall.toolCallId,
1551
+ });
1552
+
1553
+ toolCall.applyPatch.endEmitted = true;
1554
+ }
1555
+ } else if (isResponseImageGenerationCallPartialImageChunk(value)) {
1556
+ controller.enqueue({
1557
+ type: 'tool-result',
1558
+ toolCallId: value.item_id,
1559
+ toolName: toolNameMapping.toCustomToolName('image_generation'),
1560
+ result: {
1561
+ result: value.partial_image_b64,
1562
+ } satisfies InferSchema<typeof imageGenerationOutputSchema>,
1563
+ preliminary: true,
1564
+ });
1565
+ } else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) {
1566
+ const toolCall = ongoingToolCalls[value.output_index];
1567
+
1568
+ if (toolCall != null) {
1569
+ controller.enqueue({
1570
+ type: 'tool-input-delta',
1571
+ id: toolCall.toolCallId,
1572
+ delta: escapeJSONDelta(value.delta),
1573
+ });
1574
+ }
1575
+ } else if (isResponseCodeInterpreterCallCodeDoneChunk(value)) {
1576
+ const toolCall = ongoingToolCalls[value.output_index];
1577
+
1578
+ if (toolCall != null) {
1579
+ controller.enqueue({
1580
+ type: 'tool-input-delta',
1581
+ id: toolCall.toolCallId,
1582
+ delta: '"}',
1583
+ });
1584
+
1585
+ controller.enqueue({
1586
+ type: 'tool-input-end',
1587
+ id: toolCall.toolCallId,
1588
+ });
1589
+
1590
+ // immediately send the tool call after the input end:
1591
+ controller.enqueue({
1592
+ type: 'tool-call',
1593
+ toolCallId: toolCall.toolCallId,
1594
+ toolName:
1595
+ toolNameMapping.toCustomToolName('code_interpreter'),
1596
+ input: JSON.stringify({
1597
+ code: value.code,
1598
+ containerId: toolCall.codeInterpreter!.containerId,
1599
+ } satisfies InferSchema<typeof codeInterpreterInputSchema>),
1600
+ providerExecuted: true,
1601
+ });
1602
+ }
1603
+ } else if (isResponseCreatedChunk(value)) {
1604
+ responseId = value.response.id;
1605
+ controller.enqueue({
1606
+ type: 'response-metadata',
1607
+ id: value.response.id,
1608
+ timestamp: new Date(value.response.created_at * 1000),
1609
+ modelId: value.response.model,
1610
+ });
1611
+ } else if (isTextDeltaChunk(value)) {
1612
+ controller.enqueue({
1613
+ type: 'text-delta',
1614
+ id: value.item_id,
1615
+ delta: value.delta,
1616
+ });
1617
+
1618
+ if (
1619
+ options.providerOptions?.[providerOptionsName]?.logprobs &&
1620
+ value.logprobs
1621
+ ) {
1622
+ logprobs.push(value.logprobs);
1623
+ }
1624
+ } else if (value.type === 'response.reasoning_summary_part.added') {
1625
+ // the first reasoning start is pushed in isResponseOutputItemAddedReasoningChunk
1626
+ if (value.summary_index > 0) {
1627
+ const activeReasoningPart = activeReasoning[value.item_id]!;
1628
+
1629
+ activeReasoningPart.summaryParts[value.summary_index] =
1630
+ 'active';
1631
+
1632
+ // since there is a new active summary part, we can conclude all can-conclude summary parts
1633
+ for (const summaryIndex of Object.keys(
1634
+ activeReasoningPart.summaryParts,
1635
+ )) {
1636
+ if (
1637
+ activeReasoningPart.summaryParts[summaryIndex] ===
1638
+ 'can-conclude'
1639
+ ) {
1640
+ controller.enqueue({
1641
+ type: 'reasoning-end',
1642
+ id: `${value.item_id}:${summaryIndex}`,
1643
+ providerMetadata: {
1644
+ [providerOptionsName]: { itemId: value.item_id },
1645
+ },
1646
+ });
1647
+ activeReasoningPart.summaryParts[summaryIndex] =
1648
+ 'concluded';
1649
+ }
1650
+ }
1651
+
1652
+ controller.enqueue({
1653
+ type: 'reasoning-start',
1654
+ id: `${value.item_id}:${value.summary_index}`,
1655
+ providerMetadata: {
1656
+ [providerOptionsName]: {
1657
+ itemId: value.item_id,
1658
+ reasoningEncryptedContent:
1659
+ activeReasoning[value.item_id]?.encryptedContent ??
1660
+ null,
1661
+ },
1662
+ },
1663
+ });
1664
+ }
1665
+ } else if (value.type === 'response.reasoning_summary_text.delta') {
1666
+ controller.enqueue({
1667
+ type: 'reasoning-delta',
1668
+ id: `${value.item_id}:${value.summary_index}`,
1669
+ delta: value.delta,
1670
+ providerMetadata: {
1671
+ [providerOptionsName]: {
1672
+ itemId: value.item_id,
1673
+ },
1674
+ },
1675
+ });
1676
+ } else if (value.type === 'response.reasoning_summary_part.done') {
1677
+ // when OpenAI stores the message data, we can immediately conclude the reasoning part
1678
+ // since we do not need to send the encrypted content.
1679
+ if (store) {
1680
+ controller.enqueue({
1681
+ type: 'reasoning-end',
1682
+ id: `${value.item_id}:${value.summary_index}`,
1683
+ providerMetadata: {
1684
+ [providerOptionsName]: { itemId: value.item_id },
1685
+ },
1686
+ });
1687
+
1688
+ // mark the summary part as concluded
1689
+ activeReasoning[value.item_id]!.summaryParts[
1690
+ value.summary_index
1691
+ ] = 'concluded';
1692
+ } else {
1693
+ // mark the summary part as can-conclude only
1694
+ // because we need to have a final summary part with the encrypted content
1695
+ activeReasoning[value.item_id]!.summaryParts[
1696
+ value.summary_index
1697
+ ] = 'can-conclude';
1698
+ }
1699
+ } else if (isResponseFinishedChunk(value)) {
1700
+ finishReason = {
1701
+ unified: mapOpenAIResponseFinishReason({
1702
+ finishReason: value.response.incomplete_details?.reason,
1703
+ hasFunctionCall,
1704
+ }),
1705
+ raw: value.response.incomplete_details?.reason ?? undefined,
1706
+ };
1707
+ usage = value.response.usage;
1708
+ if (typeof value.response.service_tier === 'string') {
1709
+ serviceTier = value.response.service_tier;
1710
+ }
1711
+ } else if (isResponseAnnotationAddedChunk(value)) {
1712
+ ongoingAnnotations.push(value.annotation);
1713
+ if (value.annotation.type === 'url_citation') {
1714
+ controller.enqueue({
1715
+ type: 'source',
1716
+ sourceType: 'url',
1717
+ id: self.config.generateId?.() ?? generateId(),
1718
+ url: value.annotation.url,
1719
+ title: value.annotation.title,
1720
+ });
1721
+ } else if (value.annotation.type === 'file_citation') {
1722
+ controller.enqueue({
1723
+ type: 'source',
1724
+ sourceType: 'document',
1725
+ id: self.config.generateId?.() ?? generateId(),
1726
+ mediaType: 'text/plain',
1727
+ title: value.annotation.filename,
1728
+ filename: value.annotation.filename,
1729
+ providerMetadata: {
1730
+ [providerOptionsName]: {
1731
+ type: value.annotation.type,
1732
+ fileId: value.annotation.file_id,
1733
+ index: value.annotation.index,
1734
+ } satisfies Extract<
1735
+ ResponsesSourceDocumentProviderMetadata,
1736
+ { type: 'file_citation' }
1737
+ >,
1738
+ },
1739
+ });
1740
+ } else if (value.annotation.type === 'container_file_citation') {
1741
+ controller.enqueue({
1742
+ type: 'source',
1743
+ sourceType: 'document',
1744
+ id: self.config.generateId?.() ?? generateId(),
1745
+ mediaType: 'text/plain',
1746
+ title: value.annotation.filename,
1747
+ filename: value.annotation.filename,
1748
+ providerMetadata: {
1749
+ [providerOptionsName]: {
1750
+ type: value.annotation.type,
1751
+ fileId: value.annotation.file_id,
1752
+ containerId: value.annotation.container_id,
1753
+ } satisfies Extract<
1754
+ ResponsesSourceDocumentProviderMetadata,
1755
+ { type: 'container_file_citation' }
1756
+ >,
1757
+ },
1758
+ });
1759
+ } else if (value.annotation.type === 'file_path') {
1760
+ controller.enqueue({
1761
+ type: 'source',
1762
+ sourceType: 'document',
1763
+ id: self.config.generateId?.() ?? generateId(),
1764
+ mediaType: 'application/octet-stream',
1765
+ title: value.annotation.file_id,
1766
+ filename: value.annotation.file_id,
1767
+ providerMetadata: {
1768
+ [providerOptionsName]: {
1769
+ type: value.annotation.type,
1770
+ fileId: value.annotation.file_id,
1771
+ index: value.annotation.index,
1772
+ } satisfies Extract<
1773
+ ResponsesSourceDocumentProviderMetadata,
1774
+ { type: 'file_path' }
1775
+ >,
1776
+ },
1777
+ });
1778
+ }
1779
+ } else if (isErrorChunk(value)) {
1780
+ controller.enqueue({ type: 'error', error: value });
1781
+ }
1782
+ },
1783
+
1784
+ flush(controller) {
1785
+ const providerMetadata: SharedV3ProviderMetadata = {
1786
+ [providerOptionsName]: {
1787
+ responseId,
1788
+ },
1789
+ };
1790
+
1791
+ if (logprobs.length > 0) {
1792
+ providerMetadata[providerOptionsName].logprobs = logprobs;
1793
+ }
1794
+
1795
+ if (serviceTier !== undefined) {
1796
+ providerMetadata[providerOptionsName].serviceTier = serviceTier;
1797
+ }
1798
+
1799
+ controller.enqueue({
1800
+ type: 'finish',
1801
+ finishReason,
1802
+ usage: convertOpenAIResponsesUsage(usage),
1803
+ providerMetadata,
1804
+ });
1805
+ },
1806
+ }),
1807
+ ),
1808
+ request: { body },
1809
+ response: { headers: responseHeaders },
1810
+ };
1811
+ }
1812
+ }
1813
+
1814
+ function isTextDeltaChunk(
1815
+ chunk: OpenAIResponsesChunk,
1816
+ ): chunk is OpenAIResponsesChunk & { type: 'response.output_text.delta' } {
1817
+ return chunk.type === 'response.output_text.delta';
1818
+ }
1819
+
1820
+ function isResponseOutputItemDoneChunk(
1821
+ chunk: OpenAIResponsesChunk,
1822
+ ): chunk is OpenAIResponsesChunk & { type: 'response.output_item.done' } {
1823
+ return chunk.type === 'response.output_item.done';
1824
+ }
1825
+
1826
+ function isResponseFinishedChunk(
1827
+ chunk: OpenAIResponsesChunk,
1828
+ ): chunk is OpenAIResponsesChunk & {
1829
+ type: 'response.completed' | 'response.incomplete';
1830
+ } {
1831
+ return (
1832
+ chunk.type === 'response.completed' || chunk.type === 'response.incomplete'
1833
+ );
1834
+ }
1835
+
1836
+ function isResponseCreatedChunk(
1837
+ chunk: OpenAIResponsesChunk,
1838
+ ): chunk is OpenAIResponsesChunk & { type: 'response.created' } {
1839
+ return chunk.type === 'response.created';
1840
+ }
1841
+
1842
+ function isResponseFunctionCallArgumentsDeltaChunk(
1843
+ chunk: OpenAIResponsesChunk,
1844
+ ): chunk is OpenAIResponsesChunk & {
1845
+ type: 'response.function_call_arguments.delta';
1846
+ } {
1847
+ return chunk.type === 'response.function_call_arguments.delta';
1848
+ }
1849
+ function isResponseImageGenerationCallPartialImageChunk(
1850
+ chunk: OpenAIResponsesChunk,
1851
+ ): chunk is OpenAIResponsesChunk & {
1852
+ type: 'response.image_generation_call.partial_image';
1853
+ } {
1854
+ return chunk.type === 'response.image_generation_call.partial_image';
1855
+ }
1856
+
1857
+ function isResponseCodeInterpreterCallCodeDeltaChunk(
1858
+ chunk: OpenAIResponsesChunk,
1859
+ ): chunk is OpenAIResponsesChunk & {
1860
+ type: 'response.code_interpreter_call_code.delta';
1861
+ } {
1862
+ return chunk.type === 'response.code_interpreter_call_code.delta';
1863
+ }
1864
+
1865
+ function isResponseCodeInterpreterCallCodeDoneChunk(
1866
+ chunk: OpenAIResponsesChunk,
1867
+ ): chunk is OpenAIResponsesChunk & {
1868
+ type: 'response.code_interpreter_call_code.done';
1869
+ } {
1870
+ return chunk.type === 'response.code_interpreter_call_code.done';
1871
+ }
1872
+
1873
+ function isResponseApplyPatchCallOperationDiffDeltaChunk(
1874
+ chunk: OpenAIResponsesChunk,
1875
+ ): chunk is OpenAIResponsesApplyPatchOperationDiffDeltaChunk {
1876
+ return chunk.type === 'response.apply_patch_call_operation_diff.delta';
1877
+ }
1878
+
1879
+ function isResponseApplyPatchCallOperationDiffDoneChunk(
1880
+ chunk: OpenAIResponsesChunk,
1881
+ ): chunk is OpenAIResponsesApplyPatchOperationDiffDoneChunk {
1882
+ return chunk.type === 'response.apply_patch_call_operation_diff.done';
1883
+ }
1884
+
1885
+ function isResponseOutputItemAddedChunk(
1886
+ chunk: OpenAIResponsesChunk,
1887
+ ): chunk is OpenAIResponsesChunk & { type: 'response.output_item.added' } {
1888
+ return chunk.type === 'response.output_item.added';
1889
+ }
1890
+
1891
+ function isResponseAnnotationAddedChunk(
1892
+ chunk: OpenAIResponsesChunk,
1893
+ ): chunk is OpenAIResponsesChunk & {
1894
+ type: 'response.output_text.annotation.added';
1895
+ } {
1896
+ return chunk.type === 'response.output_text.annotation.added';
1897
+ }
1898
+
1899
+ function isErrorChunk(
1900
+ chunk: OpenAIResponsesChunk,
1901
+ ): chunk is OpenAIResponsesChunk & { type: 'error' } {
1902
+ return chunk.type === 'error';
1903
+ }
1904
+
1905
+ function mapWebSearchOutput(
1906
+ action: OpenAIResponsesWebSearchAction,
1907
+ ): InferSchema<typeof webSearchOutputSchema> {
1908
+ switch (action.type) {
1909
+ case 'search':
1910
+ return {
1911
+ action: { type: 'search', query: action.query ?? undefined },
1912
+ // include sources when provided by the Responses API (behind include flag)
1913
+ ...(action.sources != null && { sources: action.sources }),
1914
+ };
1915
+ case 'open_page':
1916
+ return { action: { type: 'openPage', url: action.url } };
1917
+ case 'find_in_page':
1918
+ return {
1919
+ action: {
1920
+ type: 'findInPage',
1921
+ url: action.url,
1922
+ pattern: action.pattern,
1923
+ },
1924
+ };
1925
+ }
1926
+ }
1927
+
1928
+ // The delta is embedded in a JSON string.
1929
+ // To escape it, we use JSON.stringify and slice to remove the outer quotes.
1930
+ function escapeJSONDelta(delta: string) {
1931
+ return JSON.stringify(delta).slice(1, -1);
1932
+ }