@ai-sdk/openai 3.0.13 → 3.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/index.d.mts +1 -1
  3. package/dist/index.d.ts +1 -1
  4. package/dist/index.js +1 -1
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +1 -1
  7. package/dist/index.mjs.map +1 -1
  8. package/dist/internal/index.d.mts +1 -1
  9. package/dist/internal/index.d.ts +1 -1
  10. package/dist/internal/index.js.map +1 -1
  11. package/dist/internal/index.mjs.map +1 -1
  12. package/package.json +5 -4
  13. package/src/chat/__fixtures__/azure-model-router.1.chunks.txt +8 -0
  14. package/src/chat/__snapshots__/openai-chat-language-model.test.ts.snap +88 -0
  15. package/src/chat/convert-openai-chat-usage.ts +57 -0
  16. package/src/chat/convert-to-openai-chat-messages.test.ts +516 -0
  17. package/src/chat/convert-to-openai-chat-messages.ts +225 -0
  18. package/src/chat/get-response-metadata.ts +15 -0
  19. package/src/chat/map-openai-finish-reason.ts +19 -0
  20. package/src/chat/openai-chat-api.ts +198 -0
  21. package/src/chat/openai-chat-language-model.test.ts +3496 -0
  22. package/src/chat/openai-chat-language-model.ts +700 -0
  23. package/src/chat/openai-chat-options.ts +186 -0
  24. package/src/chat/openai-chat-prepare-tools.test.ts +322 -0
  25. package/src/chat/openai-chat-prepare-tools.ts +84 -0
  26. package/src/chat/openai-chat-prompt.ts +70 -0
  27. package/src/completion/convert-openai-completion-usage.ts +46 -0
  28. package/src/completion/convert-to-openai-completion-prompt.ts +93 -0
  29. package/src/completion/get-response-metadata.ts +15 -0
  30. package/src/completion/map-openai-finish-reason.ts +19 -0
  31. package/src/completion/openai-completion-api.ts +81 -0
  32. package/src/completion/openai-completion-language-model.test.ts +752 -0
  33. package/src/completion/openai-completion-language-model.ts +336 -0
  34. package/src/completion/openai-completion-options.ts +58 -0
  35. package/src/embedding/__snapshots__/openai-embedding-model.test.ts.snap +43 -0
  36. package/src/embedding/openai-embedding-api.ts +13 -0
  37. package/src/embedding/openai-embedding-model.test.ts +146 -0
  38. package/src/embedding/openai-embedding-model.ts +95 -0
  39. package/src/embedding/openai-embedding-options.ts +30 -0
  40. package/src/image/openai-image-api.ts +35 -0
  41. package/src/image/openai-image-model.test.ts +722 -0
  42. package/src/image/openai-image-model.ts +305 -0
  43. package/src/image/openai-image-options.ts +28 -0
  44. package/src/index.ts +9 -0
  45. package/src/internal/index.ts +19 -0
  46. package/src/openai-config.ts +18 -0
  47. package/src/openai-error.test.ts +34 -0
  48. package/src/openai-error.ts +22 -0
  49. package/src/openai-language-model-capabilities.test.ts +93 -0
  50. package/src/openai-language-model-capabilities.ts +54 -0
  51. package/src/openai-provider.test.ts +98 -0
  52. package/src/openai-provider.ts +270 -0
  53. package/src/openai-tools.ts +114 -0
  54. package/src/responses/__fixtures__/openai-apply-patch-tool-delete.1.chunks.txt +5 -0
  55. package/src/responses/__fixtures__/openai-apply-patch-tool.1.chunks.txt +38 -0
  56. package/src/responses/__fixtures__/openai-apply-patch-tool.1.json +69 -0
  57. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.chunks.txt +393 -0
  58. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.json +137 -0
  59. package/src/responses/__fixtures__/openai-error.1.chunks.txt +4 -0
  60. package/src/responses/__fixtures__/openai-error.1.json +8 -0
  61. package/src/responses/__fixtures__/openai-file-search-tool.1.chunks.txt +94 -0
  62. package/src/responses/__fixtures__/openai-file-search-tool.1.json +89 -0
  63. package/src/responses/__fixtures__/openai-file-search-tool.2.chunks.txt +93 -0
  64. package/src/responses/__fixtures__/openai-file-search-tool.2.json +112 -0
  65. package/src/responses/__fixtures__/openai-image-generation-tool.1.chunks.txt +16 -0
  66. package/src/responses/__fixtures__/openai-image-generation-tool.1.json +96 -0
  67. package/src/responses/__fixtures__/openai-local-shell-tool.1.chunks.txt +7 -0
  68. package/src/responses/__fixtures__/openai-local-shell-tool.1.json +70 -0
  69. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.chunks.txt +11 -0
  70. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.json +169 -0
  71. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.chunks.txt +123 -0
  72. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.json +176 -0
  73. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.chunks.txt +11 -0
  74. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.json +169 -0
  75. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.chunks.txt +84 -0
  76. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.json +182 -0
  77. package/src/responses/__fixtures__/openai-mcp-tool.1.chunks.txt +373 -0
  78. package/src/responses/__fixtures__/openai-mcp-tool.1.json +159 -0
  79. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.chunks.txt +110 -0
  80. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.json +117 -0
  81. package/src/responses/__fixtures__/openai-shell-tool.1.chunks.txt +182 -0
  82. package/src/responses/__fixtures__/openai-shell-tool.1.json +73 -0
  83. package/src/responses/__fixtures__/openai-web-search-tool.1.chunks.txt +185 -0
  84. package/src/responses/__fixtures__/openai-web-search-tool.1.json +266 -0
  85. package/src/responses/__snapshots__/openai-responses-language-model.test.ts.snap +10955 -0
  86. package/src/responses/convert-openai-responses-usage.ts +53 -0
  87. package/src/responses/convert-to-openai-responses-input.test.ts +2976 -0
  88. package/src/responses/convert-to-openai-responses-input.ts +578 -0
  89. package/src/responses/map-openai-responses-finish-reason.ts +22 -0
  90. package/src/responses/openai-responses-api.test.ts +89 -0
  91. package/src/responses/openai-responses-api.ts +1086 -0
  92. package/src/responses/openai-responses-language-model.test.ts +6927 -0
  93. package/src/responses/openai-responses-language-model.ts +1932 -0
  94. package/src/responses/openai-responses-options.ts +312 -0
  95. package/src/responses/openai-responses-prepare-tools.test.ts +924 -0
  96. package/src/responses/openai-responses-prepare-tools.ts +264 -0
  97. package/src/responses/openai-responses-provider-metadata.ts +39 -0
  98. package/src/speech/openai-speech-api.ts +38 -0
  99. package/src/speech/openai-speech-model.test.ts +202 -0
  100. package/src/speech/openai-speech-model.ts +137 -0
  101. package/src/speech/openai-speech-options.ts +22 -0
  102. package/src/tool/apply-patch.ts +141 -0
  103. package/src/tool/code-interpreter.ts +104 -0
  104. package/src/tool/file-search.ts +145 -0
  105. package/src/tool/image-generation.ts +126 -0
  106. package/src/tool/local-shell.test-d.ts +20 -0
  107. package/src/tool/local-shell.ts +72 -0
  108. package/src/tool/mcp.ts +125 -0
  109. package/src/tool/shell.ts +85 -0
  110. package/src/tool/web-search-preview.ts +139 -0
  111. package/src/tool/web-search.test-d.ts +13 -0
  112. package/src/tool/web-search.ts +179 -0
  113. package/src/transcription/openai-transcription-api.ts +37 -0
  114. package/src/transcription/openai-transcription-model.test.ts +507 -0
  115. package/src/transcription/openai-transcription-model.ts +232 -0
  116. package/src/transcription/openai-transcription-options.ts +50 -0
  117. package/src/transcription/transcription-test.mp3 +0 -0
  118. package/src/version.ts +6 -0
@@ -0,0 +1,225 @@
1
+ import {
2
+ SharedV3Warning,
3
+ LanguageModelV3Prompt,
4
+ UnsupportedFunctionalityError,
5
+ } from '@ai-sdk/provider';
6
+ import { OpenAIChatPrompt } from './openai-chat-prompt';
7
+ import { convertToBase64 } from '@ai-sdk/provider-utils';
8
+
9
+ export function convertToOpenAIChatMessages({
10
+ prompt,
11
+ systemMessageMode = 'system',
12
+ }: {
13
+ prompt: LanguageModelV3Prompt;
14
+ systemMessageMode?: 'system' | 'developer' | 'remove';
15
+ }): {
16
+ messages: OpenAIChatPrompt;
17
+ warnings: Array<SharedV3Warning>;
18
+ } {
19
+ const messages: OpenAIChatPrompt = [];
20
+ const warnings: Array<SharedV3Warning> = [];
21
+
22
+ for (const { role, content } of prompt) {
23
+ switch (role) {
24
+ case 'system': {
25
+ switch (systemMessageMode) {
26
+ case 'system': {
27
+ messages.push({ role: 'system', content });
28
+ break;
29
+ }
30
+ case 'developer': {
31
+ messages.push({ role: 'developer', content });
32
+ break;
33
+ }
34
+ case 'remove': {
35
+ warnings.push({
36
+ type: 'other',
37
+ message: 'system messages are removed for this model',
38
+ });
39
+ break;
40
+ }
41
+ default: {
42
+ const _exhaustiveCheck: never = systemMessageMode;
43
+ throw new Error(
44
+ `Unsupported system message mode: ${_exhaustiveCheck}`,
45
+ );
46
+ }
47
+ }
48
+ break;
49
+ }
50
+
51
+ case 'user': {
52
+ if (content.length === 1 && content[0].type === 'text') {
53
+ messages.push({ role: 'user', content: content[0].text });
54
+ break;
55
+ }
56
+
57
+ messages.push({
58
+ role: 'user',
59
+ content: content.map((part, index) => {
60
+ switch (part.type) {
61
+ case 'text': {
62
+ return { type: 'text', text: part.text };
63
+ }
64
+ case 'file': {
65
+ if (part.mediaType.startsWith('image/')) {
66
+ const mediaType =
67
+ part.mediaType === 'image/*'
68
+ ? 'image/jpeg'
69
+ : part.mediaType;
70
+
71
+ return {
72
+ type: 'image_url',
73
+ image_url: {
74
+ url:
75
+ part.data instanceof URL
76
+ ? part.data.toString()
77
+ : `data:${mediaType};base64,${convertToBase64(part.data)}`,
78
+
79
+ // OpenAI specific extension: image detail
80
+ detail: part.providerOptions?.openai?.imageDetail,
81
+ },
82
+ };
83
+ } else if (part.mediaType.startsWith('audio/')) {
84
+ if (part.data instanceof URL) {
85
+ throw new UnsupportedFunctionalityError({
86
+ functionality: 'audio file parts with URLs',
87
+ });
88
+ }
89
+
90
+ switch (part.mediaType) {
91
+ case 'audio/wav': {
92
+ return {
93
+ type: 'input_audio',
94
+ input_audio: {
95
+ data: convertToBase64(part.data),
96
+ format: 'wav',
97
+ },
98
+ };
99
+ }
100
+ case 'audio/mp3':
101
+ case 'audio/mpeg': {
102
+ return {
103
+ type: 'input_audio',
104
+ input_audio: {
105
+ data: convertToBase64(part.data),
106
+ format: 'mp3',
107
+ },
108
+ };
109
+ }
110
+
111
+ default: {
112
+ throw new UnsupportedFunctionalityError({
113
+ functionality: `audio content parts with media type ${part.mediaType}`,
114
+ });
115
+ }
116
+ }
117
+ } else if (part.mediaType === 'application/pdf') {
118
+ if (part.data instanceof URL) {
119
+ throw new UnsupportedFunctionalityError({
120
+ functionality: 'PDF file parts with URLs',
121
+ });
122
+ }
123
+
124
+ return {
125
+ type: 'file',
126
+ file:
127
+ typeof part.data === 'string' &&
128
+ part.data.startsWith('file-')
129
+ ? { file_id: part.data }
130
+ : {
131
+ filename: part.filename ?? `part-${index}.pdf`,
132
+ file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`,
133
+ },
134
+ };
135
+ } else {
136
+ throw new UnsupportedFunctionalityError({
137
+ functionality: `file part media type ${part.mediaType}`,
138
+ });
139
+ }
140
+ }
141
+ }
142
+ }),
143
+ });
144
+
145
+ break;
146
+ }
147
+
148
+ case 'assistant': {
149
+ let text = '';
150
+ const toolCalls: Array<{
151
+ id: string;
152
+ type: 'function';
153
+ function: { name: string; arguments: string };
154
+ }> = [];
155
+
156
+ for (const part of content) {
157
+ switch (part.type) {
158
+ case 'text': {
159
+ text += part.text;
160
+ break;
161
+ }
162
+ case 'tool-call': {
163
+ toolCalls.push({
164
+ id: part.toolCallId,
165
+ type: 'function',
166
+ function: {
167
+ name: part.toolName,
168
+ arguments: JSON.stringify(part.input),
169
+ },
170
+ });
171
+ break;
172
+ }
173
+ }
174
+ }
175
+
176
+ messages.push({
177
+ role: 'assistant',
178
+ content: text,
179
+ tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
180
+ });
181
+
182
+ break;
183
+ }
184
+
185
+ case 'tool': {
186
+ for (const toolResponse of content) {
187
+ if (toolResponse.type === 'tool-approval-response') {
188
+ continue;
189
+ }
190
+ const output = toolResponse.output;
191
+
192
+ let contentValue: string;
193
+ switch (output.type) {
194
+ case 'text':
195
+ case 'error-text':
196
+ contentValue = output.value;
197
+ break;
198
+ case 'execution-denied':
199
+ contentValue = output.reason ?? 'Tool execution denied.';
200
+ break;
201
+ case 'content':
202
+ case 'json':
203
+ case 'error-json':
204
+ contentValue = JSON.stringify(output.value);
205
+ break;
206
+ }
207
+
208
+ messages.push({
209
+ role: 'tool',
210
+ tool_call_id: toolResponse.toolCallId,
211
+ content: contentValue,
212
+ });
213
+ }
214
+ break;
215
+ }
216
+
217
+ default: {
218
+ const _exhaustiveCheck: never = role;
219
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
220
+ }
221
+ }
222
+ }
223
+
224
+ return { messages, warnings };
225
+ }
@@ -0,0 +1,15 @@
1
+ export function getResponseMetadata({
2
+ id,
3
+ model,
4
+ created,
5
+ }: {
6
+ id?: string | undefined | null;
7
+ created?: number | undefined | null;
8
+ model?: string | undefined | null;
9
+ }) {
10
+ return {
11
+ id: id ?? undefined,
12
+ modelId: model ?? undefined,
13
+ timestamp: created ? new Date(created * 1000) : undefined,
14
+ };
15
+ }
@@ -0,0 +1,19 @@
1
+ import { LanguageModelV3FinishReason } from '@ai-sdk/provider';
2
+
3
+ export function mapOpenAIFinishReason(
4
+ finishReason: string | null | undefined,
5
+ ): LanguageModelV3FinishReason['unified'] {
6
+ switch (finishReason) {
7
+ case 'stop':
8
+ return 'stop';
9
+ case 'length':
10
+ return 'length';
11
+ case 'content_filter':
12
+ return 'content-filter';
13
+ case 'function_call':
14
+ case 'tool_calls':
15
+ return 'tool-calls';
16
+ default:
17
+ return 'other';
18
+ }
19
+ }
@@ -0,0 +1,198 @@
1
+ import { JSONSchema7 } from '@ai-sdk/provider';
2
+ import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
3
+ import { z } from 'zod/v4';
4
+ import { openaiErrorDataSchema } from '../openai-error';
5
+
6
+ export interface OpenAIChatFunctionTool {
7
+ type: 'function';
8
+ function: {
9
+ name: string;
10
+ description: string | undefined;
11
+ parameters: JSONSchema7;
12
+ strict?: boolean;
13
+ };
14
+ }
15
+
16
+ export type OpenAIChatToolChoice =
17
+ | 'auto'
18
+ | 'none'
19
+ | 'required'
20
+ | { type: 'function'; function: { name: string } };
21
+
22
+ // limited version of the schema, focussed on what is needed for the implementation
23
+ // this approach limits breakages when the API changes and increases efficiency
24
+ export const openaiChatResponseSchema = lazySchema(() =>
25
+ zodSchema(
26
+ z.object({
27
+ id: z.string().nullish(),
28
+ created: z.number().nullish(),
29
+ model: z.string().nullish(),
30
+ choices: z.array(
31
+ z.object({
32
+ message: z.object({
33
+ role: z.literal('assistant').nullish(),
34
+ content: z.string().nullish(),
35
+ tool_calls: z
36
+ .array(
37
+ z.object({
38
+ id: z.string().nullish(),
39
+ type: z.literal('function'),
40
+ function: z.object({
41
+ name: z.string(),
42
+ arguments: z.string(),
43
+ }),
44
+ }),
45
+ )
46
+ .nullish(),
47
+ annotations: z
48
+ .array(
49
+ z.object({
50
+ type: z.literal('url_citation'),
51
+ url_citation: z.object({
52
+ start_index: z.number(),
53
+ end_index: z.number(),
54
+ url: z.string(),
55
+ title: z.string(),
56
+ }),
57
+ }),
58
+ )
59
+ .nullish(),
60
+ }),
61
+ index: z.number(),
62
+ logprobs: z
63
+ .object({
64
+ content: z
65
+ .array(
66
+ z.object({
67
+ token: z.string(),
68
+ logprob: z.number(),
69
+ top_logprobs: z.array(
70
+ z.object({
71
+ token: z.string(),
72
+ logprob: z.number(),
73
+ }),
74
+ ),
75
+ }),
76
+ )
77
+ .nullish(),
78
+ })
79
+ .nullish(),
80
+ finish_reason: z.string().nullish(),
81
+ }),
82
+ ),
83
+ usage: z
84
+ .object({
85
+ prompt_tokens: z.number().nullish(),
86
+ completion_tokens: z.number().nullish(),
87
+ total_tokens: z.number().nullish(),
88
+ prompt_tokens_details: z
89
+ .object({
90
+ cached_tokens: z.number().nullish(),
91
+ })
92
+ .nullish(),
93
+ completion_tokens_details: z
94
+ .object({
95
+ reasoning_tokens: z.number().nullish(),
96
+ accepted_prediction_tokens: z.number().nullish(),
97
+ rejected_prediction_tokens: z.number().nullish(),
98
+ })
99
+ .nullish(),
100
+ })
101
+ .nullish(),
102
+ }),
103
+ ),
104
+ );
105
+
106
+ // limited version of the schema, focussed on what is needed for the implementation
107
+ // this approach limits breakages when the API changes and increases efficiency
108
+ export const openaiChatChunkSchema = lazySchema(() =>
109
+ zodSchema(
110
+ z.union([
111
+ z.object({
112
+ id: z.string().nullish(),
113
+ created: z.number().nullish(),
114
+ model: z.string().nullish(),
115
+ choices: z.array(
116
+ z.object({
117
+ delta: z
118
+ .object({
119
+ role: z.enum(['assistant']).nullish(),
120
+ content: z.string().nullish(),
121
+ tool_calls: z
122
+ .array(
123
+ z.object({
124
+ index: z.number(),
125
+ id: z.string().nullish(),
126
+ type: z.literal('function').nullish(),
127
+ function: z.object({
128
+ name: z.string().nullish(),
129
+ arguments: z.string().nullish(),
130
+ }),
131
+ }),
132
+ )
133
+ .nullish(),
134
+ annotations: z
135
+ .array(
136
+ z.object({
137
+ type: z.literal('url_citation'),
138
+ url_citation: z.object({
139
+ start_index: z.number(),
140
+ end_index: z.number(),
141
+ url: z.string(),
142
+ title: z.string(),
143
+ }),
144
+ }),
145
+ )
146
+ .nullish(),
147
+ })
148
+ .nullish(),
149
+ logprobs: z
150
+ .object({
151
+ content: z
152
+ .array(
153
+ z.object({
154
+ token: z.string(),
155
+ logprob: z.number(),
156
+ top_logprobs: z.array(
157
+ z.object({
158
+ token: z.string(),
159
+ logprob: z.number(),
160
+ }),
161
+ ),
162
+ }),
163
+ )
164
+ .nullish(),
165
+ })
166
+ .nullish(),
167
+ finish_reason: z.string().nullish(),
168
+ index: z.number(),
169
+ }),
170
+ ),
171
+ usage: z
172
+ .object({
173
+ prompt_tokens: z.number().nullish(),
174
+ completion_tokens: z.number().nullish(),
175
+ total_tokens: z.number().nullish(),
176
+ prompt_tokens_details: z
177
+ .object({
178
+ cached_tokens: z.number().nullish(),
179
+ })
180
+ .nullish(),
181
+ completion_tokens_details: z
182
+ .object({
183
+ reasoning_tokens: z.number().nullish(),
184
+ accepted_prediction_tokens: z.number().nullish(),
185
+ rejected_prediction_tokens: z.number().nullish(),
186
+ })
187
+ .nullish(),
188
+ })
189
+ .nullish(),
190
+ }),
191
+ openaiErrorDataSchema,
192
+ ]),
193
+ ),
194
+ );
195
+
196
+ export type OpenAIChatResponse = InferSchema<typeof openaiChatResponseSchema>;
197
+
198
+ export type OpenAIChatChunk = InferSchema<typeof openaiChatChunkSchema>;