@ai-sdk/openai 3.0.14 → 3.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/index.js +1 -1
  3. package/dist/index.mjs +1 -1
  4. package/package.json +6 -5
  5. package/src/chat/__fixtures__/azure-model-router.1.chunks.txt +8 -0
  6. package/src/chat/__snapshots__/openai-chat-language-model.test.ts.snap +88 -0
  7. package/src/chat/convert-openai-chat-usage.ts +57 -0
  8. package/src/chat/convert-to-openai-chat-messages.test.ts +516 -0
  9. package/src/chat/convert-to-openai-chat-messages.ts +225 -0
  10. package/src/chat/get-response-metadata.ts +15 -0
  11. package/src/chat/map-openai-finish-reason.ts +19 -0
  12. package/src/chat/openai-chat-api.ts +198 -0
  13. package/src/chat/openai-chat-language-model.test.ts +3496 -0
  14. package/src/chat/openai-chat-language-model.ts +700 -0
  15. package/src/chat/openai-chat-options.ts +186 -0
  16. package/src/chat/openai-chat-prepare-tools.test.ts +322 -0
  17. package/src/chat/openai-chat-prepare-tools.ts +84 -0
  18. package/src/chat/openai-chat-prompt.ts +70 -0
  19. package/src/completion/convert-openai-completion-usage.ts +46 -0
  20. package/src/completion/convert-to-openai-completion-prompt.ts +93 -0
  21. package/src/completion/get-response-metadata.ts +15 -0
  22. package/src/completion/map-openai-finish-reason.ts +19 -0
  23. package/src/completion/openai-completion-api.ts +81 -0
  24. package/src/completion/openai-completion-language-model.test.ts +752 -0
  25. package/src/completion/openai-completion-language-model.ts +336 -0
  26. package/src/completion/openai-completion-options.ts +58 -0
  27. package/src/embedding/__snapshots__/openai-embedding-model.test.ts.snap +43 -0
  28. package/src/embedding/openai-embedding-api.ts +13 -0
  29. package/src/embedding/openai-embedding-model.test.ts +146 -0
  30. package/src/embedding/openai-embedding-model.ts +95 -0
  31. package/src/embedding/openai-embedding-options.ts +30 -0
  32. package/src/image/openai-image-api.ts +35 -0
  33. package/src/image/openai-image-model.test.ts +722 -0
  34. package/src/image/openai-image-model.ts +305 -0
  35. package/src/image/openai-image-options.ts +28 -0
  36. package/src/index.ts +9 -0
  37. package/src/internal/index.ts +19 -0
  38. package/src/openai-config.ts +18 -0
  39. package/src/openai-error.test.ts +34 -0
  40. package/src/openai-error.ts +22 -0
  41. package/src/openai-language-model-capabilities.test.ts +93 -0
  42. package/src/openai-language-model-capabilities.ts +54 -0
  43. package/src/openai-provider.test.ts +98 -0
  44. package/src/openai-provider.ts +270 -0
  45. package/src/openai-tools.ts +114 -0
  46. package/src/responses/__fixtures__/openai-apply-patch-tool-delete.1.chunks.txt +5 -0
  47. package/src/responses/__fixtures__/openai-apply-patch-tool.1.chunks.txt +38 -0
  48. package/src/responses/__fixtures__/openai-apply-patch-tool.1.json +69 -0
  49. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.chunks.txt +393 -0
  50. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.json +137 -0
  51. package/src/responses/__fixtures__/openai-error.1.chunks.txt +4 -0
  52. package/src/responses/__fixtures__/openai-error.1.json +8 -0
  53. package/src/responses/__fixtures__/openai-file-search-tool.1.chunks.txt +94 -0
  54. package/src/responses/__fixtures__/openai-file-search-tool.1.json +89 -0
  55. package/src/responses/__fixtures__/openai-file-search-tool.2.chunks.txt +93 -0
  56. package/src/responses/__fixtures__/openai-file-search-tool.2.json +112 -0
  57. package/src/responses/__fixtures__/openai-image-generation-tool.1.chunks.txt +16 -0
  58. package/src/responses/__fixtures__/openai-image-generation-tool.1.json +96 -0
  59. package/src/responses/__fixtures__/openai-local-shell-tool.1.chunks.txt +7 -0
  60. package/src/responses/__fixtures__/openai-local-shell-tool.1.json +70 -0
  61. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.chunks.txt +11 -0
  62. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.json +169 -0
  63. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.chunks.txt +123 -0
  64. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.json +176 -0
  65. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.chunks.txt +11 -0
  66. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.json +169 -0
  67. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.chunks.txt +84 -0
  68. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.json +182 -0
  69. package/src/responses/__fixtures__/openai-mcp-tool.1.chunks.txt +373 -0
  70. package/src/responses/__fixtures__/openai-mcp-tool.1.json +159 -0
  71. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.chunks.txt +110 -0
  72. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.json +117 -0
  73. package/src/responses/__fixtures__/openai-shell-tool.1.chunks.txt +182 -0
  74. package/src/responses/__fixtures__/openai-shell-tool.1.json +73 -0
  75. package/src/responses/__fixtures__/openai-web-search-tool.1.chunks.txt +185 -0
  76. package/src/responses/__fixtures__/openai-web-search-tool.1.json +266 -0
  77. package/src/responses/__snapshots__/openai-responses-language-model.test.ts.snap +10955 -0
  78. package/src/responses/convert-openai-responses-usage.ts +53 -0
  79. package/src/responses/convert-to-openai-responses-input.test.ts +2976 -0
  80. package/src/responses/convert-to-openai-responses-input.ts +578 -0
  81. package/src/responses/map-openai-responses-finish-reason.ts +22 -0
  82. package/src/responses/openai-responses-api.test.ts +89 -0
  83. package/src/responses/openai-responses-api.ts +1086 -0
  84. package/src/responses/openai-responses-language-model.test.ts +6927 -0
  85. package/src/responses/openai-responses-language-model.ts +1932 -0
  86. package/src/responses/openai-responses-options.ts +312 -0
  87. package/src/responses/openai-responses-prepare-tools.test.ts +924 -0
  88. package/src/responses/openai-responses-prepare-tools.ts +264 -0
  89. package/src/responses/openai-responses-provider-metadata.ts +39 -0
  90. package/src/speech/openai-speech-api.ts +38 -0
  91. package/src/speech/openai-speech-model.test.ts +202 -0
  92. package/src/speech/openai-speech-model.ts +137 -0
  93. package/src/speech/openai-speech-options.ts +22 -0
  94. package/src/tool/apply-patch.ts +141 -0
  95. package/src/tool/code-interpreter.ts +104 -0
  96. package/src/tool/file-search.ts +145 -0
  97. package/src/tool/image-generation.ts +126 -0
  98. package/src/tool/local-shell.test-d.ts +20 -0
  99. package/src/tool/local-shell.ts +72 -0
  100. package/src/tool/mcp.ts +125 -0
  101. package/src/tool/shell.ts +85 -0
  102. package/src/tool/web-search-preview.ts +139 -0
  103. package/src/tool/web-search.test-d.ts +13 -0
  104. package/src/tool/web-search.ts +179 -0
  105. package/src/transcription/openai-transcription-api.ts +37 -0
  106. package/src/transcription/openai-transcription-model.test.ts +507 -0
  107. package/src/transcription/openai-transcription-model.ts +232 -0
  108. package/src/transcription/openai-transcription-options.ts +50 -0
  109. package/src/transcription/transcription-test.mp3 +0 -0
  110. package/src/version.ts +6 -0
@@ -0,0 +1,578 @@
1
+ import {
2
+ LanguageModelV3Prompt,
3
+ LanguageModelV3ToolApprovalResponsePart,
4
+ SharedV3Warning,
5
+ UnsupportedFunctionalityError,
6
+ } from '@ai-sdk/provider';
7
+ import {
8
+ convertToBase64,
9
+ isNonNullable,
10
+ parseProviderOptions,
11
+ ToolNameMapping,
12
+ validateTypes,
13
+ } from '@ai-sdk/provider-utils';
14
+ import { z } from 'zod/v4';
15
+ import { applyPatchOutputSchema } from '../tool/apply-patch';
16
+ import {
17
+ localShellInputSchema,
18
+ localShellOutputSchema,
19
+ } from '../tool/local-shell';
20
+ import { shellInputSchema, shellOutputSchema } from '../tool/shell';
21
+ import {
22
+ OpenAIResponsesFunctionCallOutput,
23
+ OpenAIResponsesInput,
24
+ OpenAIResponsesReasoning,
25
+ } from './openai-responses-api';
26
+
27
+ /**
28
+ * Check if a string is a file ID based on the given prefixes
29
+ * Returns false if prefixes is undefined (disables file ID detection)
30
+ */
31
+ function isFileId(data: string, prefixes?: readonly string[]): boolean {
32
+ if (!prefixes) return false;
33
+ return prefixes.some(prefix => data.startsWith(prefix));
34
+ }
35
+
36
+ export async function convertToOpenAIResponsesInput({
37
+ prompt,
38
+ toolNameMapping,
39
+ systemMessageMode,
40
+ providerOptionsName,
41
+ fileIdPrefixes,
42
+ store,
43
+ hasConversation = false,
44
+ hasLocalShellTool = false,
45
+ hasShellTool = false,
46
+ hasApplyPatchTool = false,
47
+ }: {
48
+ prompt: LanguageModelV3Prompt;
49
+ toolNameMapping: ToolNameMapping;
50
+ systemMessageMode: 'system' | 'developer' | 'remove';
51
+ providerOptionsName: string;
52
+ fileIdPrefixes?: readonly string[];
53
+ store: boolean;
54
+ hasConversation?: boolean; // when true, skip assistant messages that already have item IDs
55
+ hasLocalShellTool?: boolean;
56
+ hasShellTool?: boolean;
57
+ hasApplyPatchTool?: boolean;
58
+ }): Promise<{
59
+ input: OpenAIResponsesInput;
60
+ warnings: Array<SharedV3Warning>;
61
+ }> {
62
+ const input: OpenAIResponsesInput = [];
63
+ const warnings: Array<SharedV3Warning> = [];
64
+ const processedApprovalIds = new Set<string>();
65
+
66
+ for (const { role, content } of prompt) {
67
+ switch (role) {
68
+ case 'system': {
69
+ switch (systemMessageMode) {
70
+ case 'system': {
71
+ input.push({ role: 'system', content });
72
+ break;
73
+ }
74
+ case 'developer': {
75
+ input.push({ role: 'developer', content });
76
+ break;
77
+ }
78
+ case 'remove': {
79
+ warnings.push({
80
+ type: 'other',
81
+ message: 'system messages are removed for this model',
82
+ });
83
+ break;
84
+ }
85
+ default: {
86
+ const _exhaustiveCheck: never = systemMessageMode;
87
+ throw new Error(
88
+ `Unsupported system message mode: ${_exhaustiveCheck}`,
89
+ );
90
+ }
91
+ }
92
+ break;
93
+ }
94
+
95
+ case 'user': {
96
+ input.push({
97
+ role: 'user',
98
+ content: content.map((part, index) => {
99
+ switch (part.type) {
100
+ case 'text': {
101
+ return { type: 'input_text', text: part.text };
102
+ }
103
+ case 'file': {
104
+ if (part.mediaType.startsWith('image/')) {
105
+ const mediaType =
106
+ part.mediaType === 'image/*'
107
+ ? 'image/jpeg'
108
+ : part.mediaType;
109
+
110
+ return {
111
+ type: 'input_image',
112
+ ...(part.data instanceof URL
113
+ ? { image_url: part.data.toString() }
114
+ : typeof part.data === 'string' &&
115
+ isFileId(part.data, fileIdPrefixes)
116
+ ? { file_id: part.data }
117
+ : {
118
+ image_url: `data:${mediaType};base64,${convertToBase64(part.data)}`,
119
+ }),
120
+ detail:
121
+ part.providerOptions?.[providerOptionsName]?.imageDetail,
122
+ };
123
+ } else if (part.mediaType === 'application/pdf') {
124
+ if (part.data instanceof URL) {
125
+ return {
126
+ type: 'input_file',
127
+ file_url: part.data.toString(),
128
+ };
129
+ }
130
+ return {
131
+ type: 'input_file',
132
+ ...(typeof part.data === 'string' &&
133
+ isFileId(part.data, fileIdPrefixes)
134
+ ? { file_id: part.data }
135
+ : {
136
+ filename: part.filename ?? `part-${index}.pdf`,
137
+ file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`,
138
+ }),
139
+ };
140
+ } else {
141
+ throw new UnsupportedFunctionalityError({
142
+ functionality: `file part media type ${part.mediaType}`,
143
+ });
144
+ }
145
+ }
146
+ }
147
+ }),
148
+ });
149
+
150
+ break;
151
+ }
152
+
153
+ case 'assistant': {
154
+ const reasoningMessages: Record<string, OpenAIResponsesReasoning> = {};
155
+
156
+ for (const part of content) {
157
+ switch (part.type) {
158
+ case 'text': {
159
+ const id = part.providerOptions?.[providerOptionsName]?.itemId as
160
+ | string
161
+ | undefined;
162
+
163
+ // when using conversation, skip items that already exist in the conversation context to avoid "Duplicate item found" errors
164
+ if (hasConversation && id != null) {
165
+ break;
166
+ }
167
+
168
+ // item references reduce the payload size
169
+ if (store && id != null) {
170
+ input.push({ type: 'item_reference', id });
171
+ break;
172
+ }
173
+
174
+ input.push({
175
+ role: 'assistant',
176
+ content: [{ type: 'output_text', text: part.text }],
177
+ id,
178
+ });
179
+
180
+ break;
181
+ }
182
+ case 'tool-call': {
183
+ const id = (part.providerOptions?.[providerOptionsName]?.itemId ??
184
+ (
185
+ part as {
186
+ providerMetadata?: {
187
+ [providerOptionsName]?: { itemId?: string };
188
+ };
189
+ }
190
+ ).providerMetadata?.[providerOptionsName]?.itemId) as
191
+ | string
192
+ | undefined;
193
+
194
+ if (hasConversation && id != null) {
195
+ break;
196
+ }
197
+
198
+ if (part.providerExecuted) {
199
+ if (store && id != null) {
200
+ input.push({ type: 'item_reference', id });
201
+ }
202
+ break;
203
+ }
204
+
205
+ if (store && id != null) {
206
+ input.push({ type: 'item_reference', id });
207
+ break;
208
+ }
209
+
210
+ const resolvedToolName = toolNameMapping.toProviderToolName(
211
+ part.toolName,
212
+ );
213
+
214
+ if (hasLocalShellTool && resolvedToolName === 'local_shell') {
215
+ const parsedInput = await validateTypes({
216
+ value: part.input,
217
+ schema: localShellInputSchema,
218
+ });
219
+ input.push({
220
+ type: 'local_shell_call',
221
+ call_id: part.toolCallId,
222
+ id: id!,
223
+ action: {
224
+ type: 'exec',
225
+ command: parsedInput.action.command,
226
+ timeout_ms: parsedInput.action.timeoutMs,
227
+ user: parsedInput.action.user,
228
+ working_directory: parsedInput.action.workingDirectory,
229
+ env: parsedInput.action.env,
230
+ },
231
+ });
232
+
233
+ break;
234
+ }
235
+
236
+ if (hasShellTool && resolvedToolName === 'shell') {
237
+ const parsedInput = await validateTypes({
238
+ value: part.input,
239
+ schema: shellInputSchema,
240
+ });
241
+ input.push({
242
+ type: 'shell_call',
243
+ call_id: part.toolCallId,
244
+ id: id!,
245
+ status: 'completed',
246
+ action: {
247
+ commands: parsedInput.action.commands,
248
+ timeout_ms: parsedInput.action.timeoutMs,
249
+ max_output_length: parsedInput.action.maxOutputLength,
250
+ },
251
+ });
252
+
253
+ break;
254
+ }
255
+
256
+ input.push({
257
+ type: 'function_call',
258
+ call_id: part.toolCallId,
259
+ name: resolvedToolName,
260
+ arguments: JSON.stringify(part.input),
261
+ id,
262
+ });
263
+ break;
264
+ }
265
+
266
+ // assistant tool result parts are from provider-executed tools:
267
+ case 'tool-result': {
268
+ // Skip execution-denied results - these are synthetic results from denied
269
+ // approvals and have no corresponding item in OpenAI's store.
270
+ // Check both the direct type and if it was transformed to json with execution-denied inside
271
+ if (
272
+ part.output.type === 'execution-denied' ||
273
+ (part.output.type === 'json' &&
274
+ typeof part.output.value === 'object' &&
275
+ part.output.value != null &&
276
+ 'type' in part.output.value &&
277
+ part.output.value.type === 'execution-denied')
278
+ ) {
279
+ break;
280
+ }
281
+
282
+ if (hasConversation) {
283
+ break;
284
+ }
285
+
286
+ if (store) {
287
+ const itemId =
288
+ (
289
+ part as {
290
+ providerMetadata?: {
291
+ [providerOptionsName]?: { itemId?: string };
292
+ };
293
+ }
294
+ ).providerMetadata?.[providerOptionsName]?.itemId ??
295
+ part.toolCallId;
296
+ input.push({ type: 'item_reference', id: itemId });
297
+ } else {
298
+ warnings.push({
299
+ type: 'other',
300
+ message: `Results for OpenAI tool ${part.toolName} are not sent to the API when store is false`,
301
+ });
302
+ }
303
+
304
+ break;
305
+ }
306
+
307
+ case 'reasoning': {
308
+ const providerOptions = await parseProviderOptions({
309
+ provider: providerOptionsName,
310
+ providerOptions: part.providerOptions,
311
+ schema: openaiResponsesReasoningProviderOptionsSchema,
312
+ });
313
+
314
+ const reasoningId = providerOptions?.itemId;
315
+
316
+ if (hasConversation && reasoningId != null) {
317
+ break;
318
+ }
319
+
320
+ if (reasoningId != null) {
321
+ const reasoningMessage = reasoningMessages[reasoningId];
322
+
323
+ if (store) {
324
+ // use item references to refer to reasoning (single reference)
325
+ // when the first part is encountered
326
+ if (reasoningMessage === undefined) {
327
+ input.push({ type: 'item_reference', id: reasoningId });
328
+
329
+ // store unused reasoning message to mark id as used
330
+ reasoningMessages[reasoningId] = {
331
+ type: 'reasoning',
332
+ id: reasoningId,
333
+ summary: [],
334
+ };
335
+ }
336
+ } else {
337
+ const summaryParts: Array<{
338
+ type: 'summary_text';
339
+ text: string;
340
+ }> = [];
341
+
342
+ if (part.text.length > 0) {
343
+ summaryParts.push({
344
+ type: 'summary_text',
345
+ text: part.text,
346
+ });
347
+ } else if (reasoningMessage !== undefined) {
348
+ warnings.push({
349
+ type: 'other',
350
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`,
351
+ });
352
+ }
353
+
354
+ if (reasoningMessage === undefined) {
355
+ reasoningMessages[reasoningId] = {
356
+ type: 'reasoning',
357
+ id: reasoningId,
358
+ encrypted_content:
359
+ providerOptions?.reasoningEncryptedContent,
360
+ summary: summaryParts,
361
+ };
362
+ input.push(reasoningMessages[reasoningId]);
363
+ } else {
364
+ reasoningMessage.summary.push(...summaryParts);
365
+
366
+ // updated encrypted content to enable setting it in the last summary part:
367
+ if (providerOptions?.reasoningEncryptedContent != null) {
368
+ reasoningMessage.encrypted_content =
369
+ providerOptions.reasoningEncryptedContent;
370
+ }
371
+ }
372
+ }
373
+ } else {
374
+ warnings.push({
375
+ type: 'other',
376
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`,
377
+ });
378
+ }
379
+ break;
380
+ }
381
+ }
382
+ }
383
+
384
+ break;
385
+ }
386
+
387
+ case 'tool': {
388
+ for (const part of content) {
389
+ if (part.type === 'tool-approval-response') {
390
+ const approvalResponse =
391
+ part as LanguageModelV3ToolApprovalResponsePart;
392
+
393
+ if (processedApprovalIds.has(approvalResponse.approvalId)) {
394
+ continue;
395
+ }
396
+ processedApprovalIds.add(approvalResponse.approvalId);
397
+
398
+ if (store) {
399
+ input.push({
400
+ type: 'item_reference',
401
+ id: approvalResponse.approvalId,
402
+ });
403
+ }
404
+
405
+ input.push({
406
+ type: 'mcp_approval_response',
407
+ approval_request_id: approvalResponse.approvalId,
408
+ approve: approvalResponse.approved,
409
+ });
410
+ continue;
411
+ }
412
+
413
+ const output = part.output;
414
+
415
+ // Skip execution-denied with approvalId - already handled via tool-approval-response
416
+ if (output.type === 'execution-denied') {
417
+ const approvalId = (
418
+ output.providerOptions?.openai as { approvalId?: string }
419
+ )?.approvalId;
420
+
421
+ if (approvalId) {
422
+ continue;
423
+ }
424
+ }
425
+
426
+ const resolvedToolName = toolNameMapping.toProviderToolName(
427
+ part.toolName,
428
+ );
429
+
430
+ if (
431
+ hasLocalShellTool &&
432
+ resolvedToolName === 'local_shell' &&
433
+ output.type === 'json'
434
+ ) {
435
+ const parsedOutput = await validateTypes({
436
+ value: output.value,
437
+ schema: localShellOutputSchema,
438
+ });
439
+
440
+ input.push({
441
+ type: 'local_shell_call_output',
442
+ call_id: part.toolCallId,
443
+ output: parsedOutput.output,
444
+ });
445
+ continue;
446
+ }
447
+
448
+ if (
449
+ hasShellTool &&
450
+ resolvedToolName === 'shell' &&
451
+ output.type === 'json'
452
+ ) {
453
+ const parsedOutput = await validateTypes({
454
+ value: output.value,
455
+ schema: shellOutputSchema,
456
+ });
457
+
458
+ input.push({
459
+ type: 'shell_call_output',
460
+ call_id: part.toolCallId,
461
+ output: parsedOutput.output.map(item => ({
462
+ stdout: item.stdout,
463
+ stderr: item.stderr,
464
+ outcome:
465
+ item.outcome.type === 'timeout'
466
+ ? { type: 'timeout' as const }
467
+ : {
468
+ type: 'exit' as const,
469
+ exit_code: item.outcome.exitCode,
470
+ },
471
+ })),
472
+ });
473
+ continue;
474
+ }
475
+
476
+ if (
477
+ hasApplyPatchTool &&
478
+ part.toolName === 'apply_patch' &&
479
+ output.type === 'json'
480
+ ) {
481
+ const parsedOutput = await validateTypes({
482
+ value: output.value,
483
+ schema: applyPatchOutputSchema,
484
+ });
485
+
486
+ input.push({
487
+ type: 'apply_patch_call_output',
488
+ call_id: part.toolCallId,
489
+ status: parsedOutput.status,
490
+ output: parsedOutput.output,
491
+ });
492
+ continue;
493
+ }
494
+
495
+ let contentValue: OpenAIResponsesFunctionCallOutput['output'];
496
+ switch (output.type) {
497
+ case 'text':
498
+ case 'error-text':
499
+ contentValue = output.value;
500
+ break;
501
+ case 'execution-denied':
502
+ contentValue = output.reason ?? 'Tool execution denied.';
503
+ break;
504
+ case 'json':
505
+ case 'error-json':
506
+ contentValue = JSON.stringify(output.value);
507
+ break;
508
+ case 'content':
509
+ contentValue = output.value
510
+ .map(item => {
511
+ switch (item.type) {
512
+ case 'text': {
513
+ return { type: 'input_text' as const, text: item.text };
514
+ }
515
+
516
+ case 'image-data': {
517
+ return {
518
+ type: 'input_image' as const,
519
+ image_url: `data:${item.mediaType};base64,${item.data}`,
520
+ };
521
+ }
522
+
523
+ case 'image-url': {
524
+ return {
525
+ type: 'input_image' as const,
526
+ image_url: item.url,
527
+ };
528
+ }
529
+
530
+ case 'file-data': {
531
+ return {
532
+ type: 'input_file' as const,
533
+ filename: item.filename ?? 'data',
534
+ file_data: `data:${item.mediaType};base64,${item.data}`,
535
+ };
536
+ }
537
+
538
+ default: {
539
+ warnings.push({
540
+ type: 'other',
541
+ message: `unsupported tool content part type: ${item.type}`,
542
+ });
543
+ return undefined;
544
+ }
545
+ }
546
+ })
547
+ .filter(isNonNullable);
548
+ break;
549
+ }
550
+
551
+ input.push({
552
+ type: 'function_call_output',
553
+ call_id: part.toolCallId,
554
+ output: contentValue,
555
+ });
556
+ }
557
+
558
+ break;
559
+ }
560
+
561
+ default: {
562
+ const _exhaustiveCheck: never = role;
563
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
564
+ }
565
+ }
566
+ }
567
+
568
+ return { input, warnings };
569
+ }
570
+
571
+ const openaiResponsesReasoningProviderOptionsSchema = z.object({
572
+ itemId: z.string().nullish(),
573
+ reasoningEncryptedContent: z.string().nullish(),
574
+ });
575
+
576
+ export type OpenAIResponsesReasoningProviderOptions = z.infer<
577
+ typeof openaiResponsesReasoningProviderOptionsSchema
578
+ >;
@@ -0,0 +1,22 @@
1
+ import { LanguageModelV3FinishReason } from '@ai-sdk/provider';
2
+
3
+ export function mapOpenAIResponseFinishReason({
4
+ finishReason,
5
+ hasFunctionCall,
6
+ }: {
7
+ finishReason: string | null | undefined;
8
+ // flag that checks if there have been client-side tool calls (not executed by openai)
9
+ hasFunctionCall: boolean;
10
+ }): LanguageModelV3FinishReason['unified'] {
11
+ switch (finishReason) {
12
+ case undefined:
13
+ case null:
14
+ return hasFunctionCall ? 'tool-calls' : 'stop';
15
+ case 'max_output_tokens':
16
+ return 'length';
17
+ case 'content_filter':
18
+ return 'content-filter';
19
+ default:
20
+ return hasFunctionCall ? 'tool-calls' : 'other';
21
+ }
22
+ }
@@ -0,0 +1,89 @@
1
+ import { InferSchema } from '@ai-sdk/provider-utils';
2
+ import { describe, expectTypeOf, it } from 'vitest';
3
+ import {
4
+ openaiResponsesChunkSchema,
5
+ openaiResponsesResponseSchema,
6
+ } from './openai-responses-api';
7
+
8
+ /**
9
+ * expectTypeOf is utilized to ensure that the required sections of openaiResponsesChunkSchema
10
+ * and openaiResponsesResponseSchema are of the same type.
11
+ */
12
+
13
+ describe('openaiResponses schema alignment', () => {
14
+ type Chunk = InferSchema<typeof openaiResponsesChunkSchema>;
15
+ type Response = InferSchema<typeof openaiResponsesResponseSchema>;
16
+
17
+ it('matches annotation shape between chunk and response schemas', () => {
18
+ type ChunkAnnotation = Extract<
19
+ Chunk,
20
+ { type: 'response.output_text.annotation.added' }
21
+ >['annotation'];
22
+
23
+ type ResponseAnnotation = Extract<
24
+ NonNullable<Response['output']>[number],
25
+ { type: 'message' }
26
+ >['content'][number]['annotations'][number];
27
+
28
+ expectTypeOf<ChunkAnnotation>().toEqualTypeOf<ResponseAnnotation>();
29
+ });
30
+
31
+ it('aligns web_search_call actions', () => {
32
+ type ChunkWebSearchAction = Extract<
33
+ Extract<Chunk, { type: 'response.output_item.done' }>['item'],
34
+ { type: 'web_search_call' }
35
+ >['action'];
36
+
37
+ type ResponseWebSearchAction = Extract<
38
+ NonNullable<Response['output']>[number],
39
+ { type: 'web_search_call' }
40
+ >['action'];
41
+
42
+ expectTypeOf<ChunkWebSearchAction>().toEqualTypeOf<ResponseWebSearchAction>();
43
+ });
44
+
45
+ it('aligns code_interpreter outputs', () => {
46
+ type ChunkCodeInterpreterOutputs = Extract<
47
+ Extract<Chunk, { type: 'response.output_item.done' }>['item'],
48
+ { type: 'code_interpreter_call' }
49
+ >['outputs'];
50
+
51
+ type ResponseCodeInterpreterOutputs = Extract<
52
+ NonNullable<Response['output']>[number],
53
+ { type: 'code_interpreter_call' }
54
+ >['outputs'];
55
+
56
+ expectTypeOf<ChunkCodeInterpreterOutputs>().toEqualTypeOf<ResponseCodeInterpreterOutputs>();
57
+ });
58
+
59
+ it('aligns file_search_call results', () => {
60
+ type ChunkFileSearchResults = Extract<
61
+ Extract<Chunk, { type: 'response.output_item.done' }>['item'],
62
+ { type: 'file_search_call' }
63
+ >['results'];
64
+
65
+ type ResponseFileSearchResults = Extract<
66
+ NonNullable<Response['output']>[number],
67
+ { type: 'file_search_call' }
68
+ >['results'];
69
+
70
+ expectTypeOf<ChunkFileSearchResults>().toEqualTypeOf<ResponseFileSearchResults>();
71
+ });
72
+
73
+ it('aligns output_text logprobs', () => {
74
+ type ChunkLogprobs = Extract<
75
+ Chunk,
76
+ { type: 'response.output_text.delta' }
77
+ >['logprobs'];
78
+
79
+ type ResponseLogprobs = Extract<
80
+ Extract<
81
+ NonNullable<Response['output']>[number],
82
+ { type: 'message' }
83
+ >['content'][number],
84
+ { type: 'output_text' }
85
+ >['logprobs'];
86
+
87
+ expectTypeOf<ChunkLogprobs>().toEqualTypeOf<ResponseLogprobs>();
88
+ });
89
+ });