@ai-sdk/openai 3.0.13 → 3.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/index.d.mts +1 -1
  3. package/dist/index.d.ts +1 -1
  4. package/dist/index.js +1 -1
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +1 -1
  7. package/dist/index.mjs.map +1 -1
  8. package/dist/internal/index.d.mts +1 -1
  9. package/dist/internal/index.d.ts +1 -1
  10. package/dist/internal/index.js.map +1 -1
  11. package/dist/internal/index.mjs.map +1 -1
  12. package/package.json +5 -4
  13. package/src/chat/__fixtures__/azure-model-router.1.chunks.txt +8 -0
  14. package/src/chat/__snapshots__/openai-chat-language-model.test.ts.snap +88 -0
  15. package/src/chat/convert-openai-chat-usage.ts +57 -0
  16. package/src/chat/convert-to-openai-chat-messages.test.ts +516 -0
  17. package/src/chat/convert-to-openai-chat-messages.ts +225 -0
  18. package/src/chat/get-response-metadata.ts +15 -0
  19. package/src/chat/map-openai-finish-reason.ts +19 -0
  20. package/src/chat/openai-chat-api.ts +198 -0
  21. package/src/chat/openai-chat-language-model.test.ts +3496 -0
  22. package/src/chat/openai-chat-language-model.ts +700 -0
  23. package/src/chat/openai-chat-options.ts +186 -0
  24. package/src/chat/openai-chat-prepare-tools.test.ts +322 -0
  25. package/src/chat/openai-chat-prepare-tools.ts +84 -0
  26. package/src/chat/openai-chat-prompt.ts +70 -0
  27. package/src/completion/convert-openai-completion-usage.ts +46 -0
  28. package/src/completion/convert-to-openai-completion-prompt.ts +93 -0
  29. package/src/completion/get-response-metadata.ts +15 -0
  30. package/src/completion/map-openai-finish-reason.ts +19 -0
  31. package/src/completion/openai-completion-api.ts +81 -0
  32. package/src/completion/openai-completion-language-model.test.ts +752 -0
  33. package/src/completion/openai-completion-language-model.ts +336 -0
  34. package/src/completion/openai-completion-options.ts +58 -0
  35. package/src/embedding/__snapshots__/openai-embedding-model.test.ts.snap +43 -0
  36. package/src/embedding/openai-embedding-api.ts +13 -0
  37. package/src/embedding/openai-embedding-model.test.ts +146 -0
  38. package/src/embedding/openai-embedding-model.ts +95 -0
  39. package/src/embedding/openai-embedding-options.ts +30 -0
  40. package/src/image/openai-image-api.ts +35 -0
  41. package/src/image/openai-image-model.test.ts +722 -0
  42. package/src/image/openai-image-model.ts +305 -0
  43. package/src/image/openai-image-options.ts +28 -0
  44. package/src/index.ts +9 -0
  45. package/src/internal/index.ts +19 -0
  46. package/src/openai-config.ts +18 -0
  47. package/src/openai-error.test.ts +34 -0
  48. package/src/openai-error.ts +22 -0
  49. package/src/openai-language-model-capabilities.test.ts +93 -0
  50. package/src/openai-language-model-capabilities.ts +54 -0
  51. package/src/openai-provider.test.ts +98 -0
  52. package/src/openai-provider.ts +270 -0
  53. package/src/openai-tools.ts +114 -0
  54. package/src/responses/__fixtures__/openai-apply-patch-tool-delete.1.chunks.txt +5 -0
  55. package/src/responses/__fixtures__/openai-apply-patch-tool.1.chunks.txt +38 -0
  56. package/src/responses/__fixtures__/openai-apply-patch-tool.1.json +69 -0
  57. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.chunks.txt +393 -0
  58. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.json +137 -0
  59. package/src/responses/__fixtures__/openai-error.1.chunks.txt +4 -0
  60. package/src/responses/__fixtures__/openai-error.1.json +8 -0
  61. package/src/responses/__fixtures__/openai-file-search-tool.1.chunks.txt +94 -0
  62. package/src/responses/__fixtures__/openai-file-search-tool.1.json +89 -0
  63. package/src/responses/__fixtures__/openai-file-search-tool.2.chunks.txt +93 -0
  64. package/src/responses/__fixtures__/openai-file-search-tool.2.json +112 -0
  65. package/src/responses/__fixtures__/openai-image-generation-tool.1.chunks.txt +16 -0
  66. package/src/responses/__fixtures__/openai-image-generation-tool.1.json +96 -0
  67. package/src/responses/__fixtures__/openai-local-shell-tool.1.chunks.txt +7 -0
  68. package/src/responses/__fixtures__/openai-local-shell-tool.1.json +70 -0
  69. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.chunks.txt +11 -0
  70. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.json +169 -0
  71. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.chunks.txt +123 -0
  72. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.json +176 -0
  73. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.chunks.txt +11 -0
  74. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.json +169 -0
  75. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.chunks.txt +84 -0
  76. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.json +182 -0
  77. package/src/responses/__fixtures__/openai-mcp-tool.1.chunks.txt +373 -0
  78. package/src/responses/__fixtures__/openai-mcp-tool.1.json +159 -0
  79. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.chunks.txt +110 -0
  80. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.json +117 -0
  81. package/src/responses/__fixtures__/openai-shell-tool.1.chunks.txt +182 -0
  82. package/src/responses/__fixtures__/openai-shell-tool.1.json +73 -0
  83. package/src/responses/__fixtures__/openai-web-search-tool.1.chunks.txt +185 -0
  84. package/src/responses/__fixtures__/openai-web-search-tool.1.json +266 -0
  85. package/src/responses/__snapshots__/openai-responses-language-model.test.ts.snap +10955 -0
  86. package/src/responses/convert-openai-responses-usage.ts +53 -0
  87. package/src/responses/convert-to-openai-responses-input.test.ts +2976 -0
  88. package/src/responses/convert-to-openai-responses-input.ts +578 -0
  89. package/src/responses/map-openai-responses-finish-reason.ts +22 -0
  90. package/src/responses/openai-responses-api.test.ts +89 -0
  91. package/src/responses/openai-responses-api.ts +1086 -0
  92. package/src/responses/openai-responses-language-model.test.ts +6927 -0
  93. package/src/responses/openai-responses-language-model.ts +1932 -0
  94. package/src/responses/openai-responses-options.ts +312 -0
  95. package/src/responses/openai-responses-prepare-tools.test.ts +924 -0
  96. package/src/responses/openai-responses-prepare-tools.ts +264 -0
  97. package/src/responses/openai-responses-provider-metadata.ts +39 -0
  98. package/src/speech/openai-speech-api.ts +38 -0
  99. package/src/speech/openai-speech-model.test.ts +202 -0
  100. package/src/speech/openai-speech-model.ts +137 -0
  101. package/src/speech/openai-speech-options.ts +22 -0
  102. package/src/tool/apply-patch.ts +141 -0
  103. package/src/tool/code-interpreter.ts +104 -0
  104. package/src/tool/file-search.ts +145 -0
  105. package/src/tool/image-generation.ts +126 -0
  106. package/src/tool/local-shell.test-d.ts +20 -0
  107. package/src/tool/local-shell.ts +72 -0
  108. package/src/tool/mcp.ts +125 -0
  109. package/src/tool/shell.ts +85 -0
  110. package/src/tool/web-search-preview.ts +139 -0
  111. package/src/tool/web-search.test-d.ts +13 -0
  112. package/src/tool/web-search.ts +179 -0
  113. package/src/transcription/openai-transcription-api.ts +37 -0
  114. package/src/transcription/openai-transcription-model.test.ts +507 -0
  115. package/src/transcription/openai-transcription-model.ts +232 -0
  116. package/src/transcription/openai-transcription-options.ts +50 -0
  117. package/src/transcription/transcription-test.mp3 +0 -0
  118. package/src/version.ts +6 -0
@@ -0,0 +1,305 @@
1
+ import {
2
+ ImageModelV3,
3
+ ImageModelV3File,
4
+ SharedV3Warning,
5
+ } from '@ai-sdk/provider';
6
+ import {
7
+ combineHeaders,
8
+ convertBase64ToUint8Array,
9
+ convertToFormData,
10
+ createJsonResponseHandler,
11
+ downloadBlob,
12
+ postFormDataToApi,
13
+ postJsonToApi,
14
+ } from '@ai-sdk/provider-utils';
15
+ import { OpenAIConfig } from '../openai-config';
16
+ import { openaiFailedResponseHandler } from '../openai-error';
17
+ import { openaiImageResponseSchema } from './openai-image-api';
18
+ import {
19
+ OpenAIImageModelId,
20
+ hasDefaultResponseFormat,
21
+ modelMaxImagesPerCall,
22
+ } from './openai-image-options';
23
+
24
+ interface OpenAIImageModelConfig extends OpenAIConfig {
25
+ _internal?: {
26
+ currentDate?: () => Date;
27
+ };
28
+ }
29
+
30
+ export class OpenAIImageModel implements ImageModelV3 {
31
+ readonly specificationVersion = 'v3';
32
+
33
+ get maxImagesPerCall(): number {
34
+ return modelMaxImagesPerCall[this.modelId] ?? 1;
35
+ }
36
+
37
+ get provider(): string {
38
+ return this.config.provider;
39
+ }
40
+
41
+ constructor(
42
+ readonly modelId: OpenAIImageModelId,
43
+ private readonly config: OpenAIImageModelConfig,
44
+ ) {}
45
+
46
+ async doGenerate({
47
+ prompt,
48
+ files,
49
+ mask,
50
+ n,
51
+ size,
52
+ aspectRatio,
53
+ seed,
54
+ providerOptions,
55
+ headers,
56
+ abortSignal,
57
+ }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<
58
+ Awaited<ReturnType<ImageModelV3['doGenerate']>>
59
+ > {
60
+ const warnings: Array<SharedV3Warning> = [];
61
+
62
+ if (aspectRatio != null) {
63
+ warnings.push({
64
+ type: 'unsupported',
65
+ feature: 'aspectRatio',
66
+ details:
67
+ 'This model does not support aspect ratio. Use `size` instead.',
68
+ });
69
+ }
70
+
71
+ if (seed != null) {
72
+ warnings.push({ type: 'unsupported', feature: 'seed' });
73
+ }
74
+
75
+ const currentDate = this.config._internal?.currentDate?.() ?? new Date();
76
+
77
+ if (files != null) {
78
+ const { value: response, responseHeaders } = await postFormDataToApi({
79
+ url: this.config.url({
80
+ path: '/images/edits',
81
+ modelId: this.modelId,
82
+ }),
83
+ headers: combineHeaders(this.config.headers(), headers),
84
+ formData: convertToFormData<OpenAIImageEditInput>({
85
+ model: this.modelId,
86
+ prompt,
87
+ image: await Promise.all(
88
+ files.map(file =>
89
+ file.type === 'file'
90
+ ? new Blob(
91
+ [
92
+ file.data instanceof Uint8Array
93
+ ? new Blob([file.data as BlobPart], {
94
+ type: file.mediaType,
95
+ })
96
+ : new Blob([convertBase64ToUint8Array(file.data)], {
97
+ type: file.mediaType,
98
+ }),
99
+ ],
100
+ { type: file.mediaType },
101
+ )
102
+ : downloadBlob(file.url),
103
+ ),
104
+ ),
105
+ mask: mask != null ? await fileToBlob(mask) : undefined,
106
+ n,
107
+ size,
108
+ ...(providerOptions.openai ?? {}),
109
+ }),
110
+ failedResponseHandler: openaiFailedResponseHandler,
111
+ successfulResponseHandler: createJsonResponseHandler(
112
+ openaiImageResponseSchema,
113
+ ),
114
+ abortSignal,
115
+ fetch: this.config.fetch,
116
+ });
117
+
118
+ return {
119
+ images: response.data.map(item => item.b64_json),
120
+ warnings,
121
+ usage:
122
+ response.usage != null
123
+ ? {
124
+ inputTokens: response.usage.input_tokens ?? undefined,
125
+ outputTokens: response.usage.output_tokens ?? undefined,
126
+ totalTokens: response.usage.total_tokens ?? undefined,
127
+ }
128
+ : undefined,
129
+ response: {
130
+ timestamp: currentDate,
131
+ modelId: this.modelId,
132
+ headers: responseHeaders,
133
+ },
134
+ providerMetadata: {
135
+ openai: {
136
+ images: response.data.map(item => ({
137
+ ...(item.revised_prompt
138
+ ? { revisedPrompt: item.revised_prompt }
139
+ : {}),
140
+ created: response.created ?? undefined,
141
+ size: response.size ?? undefined,
142
+ quality: response.quality ?? undefined,
143
+ background: response.background ?? undefined,
144
+ outputFormat: response.output_format ?? undefined,
145
+ })),
146
+ },
147
+ },
148
+ };
149
+ }
150
+
151
+ const { value: response, responseHeaders } = await postJsonToApi({
152
+ url: this.config.url({
153
+ path: '/images/generations',
154
+ modelId: this.modelId,
155
+ }),
156
+ headers: combineHeaders(this.config.headers(), headers),
157
+ body: {
158
+ model: this.modelId,
159
+ prompt,
160
+ n,
161
+ size,
162
+ ...(providerOptions.openai ?? {}),
163
+ ...(!hasDefaultResponseFormat(this.modelId)
164
+ ? { response_format: 'b64_json' }
165
+ : {}),
166
+ },
167
+ failedResponseHandler: openaiFailedResponseHandler,
168
+ successfulResponseHandler: createJsonResponseHandler(
169
+ openaiImageResponseSchema,
170
+ ),
171
+ abortSignal,
172
+ fetch: this.config.fetch,
173
+ });
174
+
175
+ return {
176
+ images: response.data.map(item => item.b64_json),
177
+ warnings,
178
+ usage:
179
+ response.usage != null
180
+ ? {
181
+ inputTokens: response.usage.input_tokens ?? undefined,
182
+ outputTokens: response.usage.output_tokens ?? undefined,
183
+ totalTokens: response.usage.total_tokens ?? undefined,
184
+ }
185
+ : undefined,
186
+ response: {
187
+ timestamp: currentDate,
188
+ modelId: this.modelId,
189
+ headers: responseHeaders,
190
+ },
191
+ providerMetadata: {
192
+ openai: {
193
+ images: response.data.map(item => ({
194
+ ...(item.revised_prompt
195
+ ? { revisedPrompt: item.revised_prompt }
196
+ : {}),
197
+ created: response.created ?? undefined,
198
+ size: response.size ?? undefined,
199
+ quality: response.quality ?? undefined,
200
+ background: response.background ?? undefined,
201
+ outputFormat: response.output_format ?? undefined,
202
+ })),
203
+ },
204
+ },
205
+ };
206
+ }
207
+ }
208
+
209
+ type OpenAIImageEditInput = {
210
+ /**
211
+ * Allows to set transparency for the background of the generated image(s).
212
+ * This parameter is only supported for `gpt-image-1`. Must be one of
213
+ * `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
214
+ * model will automatically determine the best background for the image.
215
+ *
216
+ * If `transparent`, the output format needs to support transparency, so it
217
+ * should be set to either `png` (default value) or `webp`.
218
+ *
219
+ */
220
+ background?: 'transparent' | 'opaque' | 'auto';
221
+ /**
222
+ * The image(s) to edit. Must be a supported image file or an array of images.
223
+ *
224
+ * For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less
225
+ * than 50MB. You can provide up to 16 images.
226
+ *
227
+ * For `dall-e-2`, you can only provide one image, and it should be a square
228
+ * `png` file less than 4MB.
229
+ *
230
+ */
231
+ image: Blob | Blob[];
232
+ input_fidelity?: ('high' | 'low') | null;
233
+ /**
234
+ * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. If there are multiple images provided, the mask will be applied on the first image. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
235
+ */
236
+ mask?: Blob;
237
+ /**
238
+ * The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.
239
+ */
240
+ model?: 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | (string & {});
241
+ /**
242
+ * The number of images to generate. Must be between 1 and 10.
243
+ */
244
+ n?: number;
245
+ /**
246
+ * The compression level (0-100%) for the generated images. This parameter
247
+ * is only supported for `gpt-image-1` with the `webp` or `jpeg` output
248
+ * formats, and defaults to 100.
249
+ *
250
+ */
251
+ output_compression?: number;
252
+ /**
253
+ * The format in which the generated images are returned. This parameter is
254
+ * only supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
255
+ * The default value is `png`.
256
+ *
257
+ */
258
+ output_format?: 'png' | 'jpeg' | 'webp';
259
+ partial_images?: number | null;
260
+ /**
261
+ * A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
262
+ */
263
+ prompt?: string;
264
+ /**
265
+ * The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`.
266
+ *
267
+ */
268
+ quality?: 'standard' | 'low' | 'medium' | 'high' | 'auto';
269
+ /**
270
+ * The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` will always return base64-encoded images.
271
+ */
272
+ response_format?: 'url' | 'b64_json';
273
+ /**
274
+ * The size of the generated images. Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
275
+ */
276
+ size?: `${number}x${number}`;
277
+ /**
278
+ * Edit the image in streaming mode. Defaults to `false`. See the
279
+ * [Image generation guide](https://platform.openai.com/docs/guides/image-generation) for more information.
280
+ *
281
+ */
282
+ stream?: boolean;
283
+ /**
284
+ * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
285
+ *
286
+ */
287
+ user?: string;
288
+ };
289
+
290
+ async function fileToBlob(
291
+ file: ImageModelV3File | undefined,
292
+ ): Promise<Blob | undefined> {
293
+ if (!file) return undefined;
294
+
295
+ if (file.type === 'url') {
296
+ return downloadBlob(file.url);
297
+ }
298
+
299
+ const data =
300
+ file.data instanceof Uint8Array
301
+ ? file.data
302
+ : convertBase64ToUint8Array(file.data);
303
+
304
+ return new Blob([data as BlobPart], { type: file.mediaType });
305
+ }
@@ -0,0 +1,28 @@
1
+ export type OpenAIImageModelId =
2
+ | 'dall-e-3'
3
+ | 'dall-e-2'
4
+ | 'gpt-image-1'
5
+ | 'gpt-image-1-mini'
6
+ | 'gpt-image-1.5'
7
+ | (string & {});
8
+
9
+ // https://platform.openai.com/docs/guides/images
10
+ export const modelMaxImagesPerCall: Record<OpenAIImageModelId, number> = {
11
+ 'dall-e-3': 1,
12
+ 'dall-e-2': 10,
13
+ 'gpt-image-1': 10,
14
+ 'gpt-image-1-mini': 10,
15
+ 'gpt-image-1.5': 10,
16
+ };
17
+
18
+ const defaultResponseFormatPrefixes = [
19
+ 'gpt-image-1-mini',
20
+ 'gpt-image-1.5',
21
+ 'gpt-image-1',
22
+ ];
23
+
24
+ export function hasDefaultResponseFormat(modelId: string): boolean {
25
+ return defaultResponseFormatPrefixes.some(prefix =>
26
+ modelId.startsWith(prefix),
27
+ );
28
+ }
package/src/index.ts ADDED
@@ -0,0 +1,9 @@
1
+ export { createOpenAI, openai } from './openai-provider';
2
+ export type { OpenAIProvider, OpenAIProviderSettings } from './openai-provider';
3
+ export type { OpenAIResponsesProviderOptions } from './responses/openai-responses-options';
4
+ export type { OpenAIChatLanguageModelOptions } from './chat/openai-chat-options';
5
+ export type {
6
+ OpenaiResponsesTextProviderMetadata,
7
+ OpenaiResponsesSourceDocumentProviderMetadata,
8
+ } from './responses/openai-responses-provider-metadata';
9
+ export { VERSION } from './version';
@@ -0,0 +1,19 @@
1
+ export * from '../chat/openai-chat-language-model';
2
+ export * from '../chat/openai-chat-options';
3
+ export * from '../completion/openai-completion-language-model';
4
+ export * from '../completion/openai-completion-options';
5
+ export * from '../embedding/openai-embedding-model';
6
+ export * from '../embedding/openai-embedding-options';
7
+ export * from '../image/openai-image-model';
8
+ export * from '../image/openai-image-options';
9
+ export * from '../transcription/openai-transcription-model';
10
+ export * from '../transcription/openai-transcription-options';
11
+ export * from '../speech/openai-speech-model';
12
+ export * from '../speech/openai-speech-options';
13
+ export * from '../responses/openai-responses-language-model';
14
+ export * from '../responses/openai-responses-provider-metadata';
15
+ export * from '../tool/apply-patch';
16
+ export * from '../tool/code-interpreter';
17
+ export * from '../tool/file-search';
18
+ export * from '../tool/image-generation';
19
+ export * from '../tool/web-search-preview';
@@ -0,0 +1,18 @@
1
+ import { FetchFunction } from '@ai-sdk/provider-utils';
2
+
3
+ export type OpenAIConfig = {
4
+ provider: string;
5
+ url: (options: { modelId: string; path: string }) => string;
6
+ headers: () => Record<string, string | undefined>;
7
+ fetch?: FetchFunction;
8
+ generateId?: () => string;
9
+ /**
10
+ * File ID prefixes used to identify file IDs in Responses API.
11
+ * When undefined, all file data is treated as base64 content.
12
+ *
13
+ * Examples:
14
+ * - OpenAI: ['file-'] for IDs like 'file-abc123'
15
+ * - Azure OpenAI: ['assistant-'] for IDs like 'assistant-abc123'
16
+ */
17
+ fileIdPrefixes?: readonly string[];
18
+ };
@@ -0,0 +1,34 @@
1
+ import { safeParseJSON } from '@ai-sdk/provider-utils';
2
+ import { openaiErrorDataSchema } from './openai-error';
3
+ import { describe, it, expect } from 'vitest';
4
+
5
+ describe('openaiErrorDataSchema', () => {
6
+ it('should parse OpenRouter resource exhausted error', async () => {
7
+ const error = `
8
+ {"error":{"message":"{\\n \\"error\\": {\\n \\"code\\": 429,\\n \\"message\\": \\"Resource has been exhausted (e.g. check quota).\\",\\n \\"status\\": \\"RESOURCE_EXHAUSTED\\"\\n }\\n}\\n","code":429}}
9
+ `;
10
+
11
+ const result = await safeParseJSON({
12
+ text: error,
13
+ schema: openaiErrorDataSchema,
14
+ });
15
+
16
+ expect(result).toStrictEqual({
17
+ success: true,
18
+ value: {
19
+ error: {
20
+ message:
21
+ '{\n "error": {\n "code": 429,\n "message": "Resource has been exhausted (e.g. check quota).",\n "status": "RESOURCE_EXHAUSTED"\n }\n}\n',
22
+ code: 429,
23
+ },
24
+ },
25
+ rawValue: {
26
+ error: {
27
+ message:
28
+ '{\n "error": {\n "code": 429,\n "message": "Resource has been exhausted (e.g. check quota).",\n "status": "RESOURCE_EXHAUSTED"\n }\n}\n',
29
+ code: 429,
30
+ },
31
+ },
32
+ });
33
+ });
34
+ });
@@ -0,0 +1,22 @@
1
+ import { z } from 'zod/v4';
2
+ import { createJsonErrorResponseHandler } from '@ai-sdk/provider-utils';
3
+
4
+ export const openaiErrorDataSchema = z.object({
5
+ error: z.object({
6
+ message: z.string(),
7
+
8
+ // The additional information below is handled loosely to support
9
+ // OpenAI-compatible providers that have slightly different error
10
+ // responses:
11
+ type: z.string().nullish(),
12
+ param: z.any().nullish(),
13
+ code: z.union([z.string(), z.number()]).nullish(),
14
+ }),
15
+ });
16
+
17
+ export type OpenAIErrorData = z.infer<typeof openaiErrorDataSchema>;
18
+
19
+ export const openaiFailedResponseHandler = createJsonErrorResponseHandler({
20
+ errorSchema: openaiErrorDataSchema,
21
+ errorToMessage: data => data.error.message,
22
+ });
@@ -0,0 +1,93 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { getOpenAILanguageModelCapabilities } from './openai-language-model-capabilities';
3
+
4
+ describe('getOpenAILanguageModelCapabilities', () => {
5
+ describe('isReasoningModel', () => {
6
+ it.each([
7
+ ['gpt-4.1', false],
8
+ ['gpt-4.1-2025-04-14', false],
9
+ ['gpt-4.1-mini', false],
10
+ ['gpt-4.1-mini-2025-04-14', false],
11
+ ['gpt-4.1-nano', false],
12
+ ['gpt-4.1-nano-2025-04-14', false],
13
+ ['gpt-4o', false],
14
+ ['gpt-4o-2024-05-13', false],
15
+ ['gpt-4o-2024-08-06', false],
16
+ ['gpt-4o-2024-11-20', false],
17
+ ['gpt-4o-audio-preview', false],
18
+ ['gpt-4o-audio-preview-2024-10-01', false],
19
+ ['gpt-4o-audio-preview-2024-12-17', false],
20
+ ['gpt-4o-search-preview', false],
21
+ ['gpt-4o-search-preview-2025-03-11', false],
22
+ ['gpt-4o-mini-search-preview', false],
23
+ ['gpt-4o-mini-search-preview-2025-03-11', false],
24
+ ['gpt-4o-mini', false],
25
+ ['gpt-4o-mini-2024-07-18', false],
26
+ ['gpt-4-turbo', false],
27
+ ['gpt-4-turbo-2024-04-09', false],
28
+ ['gpt-4-turbo-preview', false],
29
+ ['gpt-4-0125-preview', false],
30
+ ['gpt-4-1106-preview', false],
31
+ ['gpt-4', false],
32
+ ['gpt-4-0613', false],
33
+ ['gpt-4.5-preview', false],
34
+ ['gpt-4.5-preview-2025-02-27', false],
35
+ ['gpt-3.5-turbo-0125', false],
36
+ ['gpt-3.5-turbo', false],
37
+ ['gpt-3.5-turbo-1106', false],
38
+ ['chatgpt-4o-latest', false],
39
+ ['gpt-5-chat-latest', false],
40
+ ['o1', true],
41
+ ['o1-2024-12-17', true],
42
+ ['o3-mini', true],
43
+ ['o3-mini-2025-01-31', true],
44
+ ['o3', true],
45
+ ['o3-2025-04-16', true],
46
+ ['o4-mini', true],
47
+ ['o4-mini-2025-04-16', true],
48
+ ['codex-mini-latest', true],
49
+ ['computer-use-preview', true],
50
+ ['gpt-5', true],
51
+ ['gpt-5-2025-08-07', true],
52
+ ['gpt-5-codex', true],
53
+ ['gpt-5-mini', true],
54
+ ['gpt-5-mini-2025-08-07', true],
55
+ ['gpt-5-nano', true],
56
+ ['gpt-5-nano-2025-08-07', true],
57
+ ['gpt-5-pro', true],
58
+ ['gpt-5-pro-2025-10-06', true],
59
+ ['new-unknown-model', false],
60
+ ['ft:gpt-4o-2024-08-06:org:custom:abc123', false],
61
+ ['custom-model', false],
62
+ ])('%s reasoning model: %s', (modelId, expectedCapabilities) => {
63
+ expect(
64
+ getOpenAILanguageModelCapabilities(modelId).isReasoningModel,
65
+ ).toEqual(expectedCapabilities);
66
+ });
67
+ });
68
+
69
+ describe('supportsNonReasoningParameters', () => {
70
+ it.each([
71
+ ['gpt-5.1', true],
72
+ ['gpt-5.1-chat-latest', true],
73
+ ['gpt-5.1-codex-mini', true],
74
+ ['gpt-5.1-codex', true],
75
+ ['gpt-5.2', true],
76
+ ['gpt-5.2-pro', true],
77
+ ['gpt-5.2-chat-latest', true],
78
+ ['gpt-5', false],
79
+ ['gpt-5-mini', false],
80
+ ['gpt-5-nano', false],
81
+ ['gpt-5-pro', false],
82
+ ['gpt-5-chat-latest', false],
83
+ ])(
84
+ '%s supports non-reasoning parameters: %s',
85
+ (modelId, expectedCapabilities) => {
86
+ expect(
87
+ getOpenAILanguageModelCapabilities(modelId)
88
+ .supportsNonReasoningParameters,
89
+ ).toEqual(expectedCapabilities);
90
+ },
91
+ );
92
+ });
93
+ });
@@ -0,0 +1,54 @@
1
+ export type OpenAILanguageModelCapabilities = {
2
+ isReasoningModel: boolean;
3
+ systemMessageMode: 'remove' | 'system' | 'developer';
4
+ supportsFlexProcessing: boolean;
5
+ supportsPriorityProcessing: boolean;
6
+
7
+ /**
8
+ * Allow temperature, topP, logProbs when reasoningEffort is none.
9
+ */
10
+ supportsNonReasoningParameters: boolean;
11
+ };
12
+
13
+ export function getOpenAILanguageModelCapabilities(
14
+ modelId: string,
15
+ ): OpenAILanguageModelCapabilities {
16
+ const supportsFlexProcessing =
17
+ modelId.startsWith('o3') ||
18
+ modelId.startsWith('o4-mini') ||
19
+ (modelId.startsWith('gpt-5') && !modelId.startsWith('gpt-5-chat'));
20
+
21
+ const supportsPriorityProcessing =
22
+ modelId.startsWith('gpt-4') ||
23
+ modelId.startsWith('gpt-5-mini') ||
24
+ (modelId.startsWith('gpt-5') &&
25
+ !modelId.startsWith('gpt-5-nano') &&
26
+ !modelId.startsWith('gpt-5-chat')) ||
27
+ modelId.startsWith('o3') ||
28
+ modelId.startsWith('o4-mini');
29
+
30
+ // Use allowlist approach: only known reasoning models should use 'developer' role
31
+ // This prevents issues with fine-tuned models, third-party models, and custom models
32
+ const isReasoningModel =
33
+ modelId.startsWith('o1') ||
34
+ modelId.startsWith('o3') ||
35
+ modelId.startsWith('o4-mini') ||
36
+ modelId.startsWith('codex-mini') ||
37
+ modelId.startsWith('computer-use-preview') ||
38
+ (modelId.startsWith('gpt-5') && !modelId.startsWith('gpt-5-chat'));
39
+
40
+ // https://platform.openai.com/docs/guides/latest-model#gpt-5-1-parameter-compatibility
41
+ // GPT-5.1 and GPT-5.2 support temperature, topP, logProbs when reasoningEffort is none
42
+ const supportsNonReasoningParameters =
43
+ modelId.startsWith('gpt-5.1') || modelId.startsWith('gpt-5.2');
44
+
45
+ const systemMessageMode = isReasoningModel ? 'developer' : 'system';
46
+
47
+ return {
48
+ supportsFlexProcessing,
49
+ supportsPriorityProcessing,
50
+ isReasoningModel,
51
+ systemMessageMode,
52
+ supportsNonReasoningParameters,
53
+ };
54
+ }
@@ -0,0 +1,98 @@
1
+ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
2
+ import { createOpenAI } from './openai-provider';
3
+
4
+ vi.mock('./version', () => ({
5
+ VERSION: '0.0.0-test',
6
+ }));
7
+
8
+ const createSuccessfulResponse = () =>
9
+ new Response(
10
+ JSON.stringify({
11
+ object: 'list',
12
+ data: [
13
+ {
14
+ object: 'embedding',
15
+ index: 0,
16
+ embedding: [0.1, 0.2],
17
+ },
18
+ ],
19
+ model: 'text-embedding-3-small',
20
+ usage: { prompt_tokens: 1, total_tokens: 1 },
21
+ }),
22
+ {
23
+ status: 200,
24
+ headers: { 'Content-Type': 'application/json' },
25
+ },
26
+ );
27
+
28
+ const createFetchMock = () =>
29
+ vi.fn().mockResolvedValue(createSuccessfulResponse());
30
+
31
+ describe('createOpenAI', () => {
32
+ describe('baseURL configuration', () => {
33
+ const originalBaseUrl = process.env.OPENAI_BASE_URL;
34
+
35
+ beforeEach(() => {
36
+ vi.restoreAllMocks();
37
+ });
38
+
39
+ afterEach(() => {
40
+ process.env.OPENAI_BASE_URL = originalBaseUrl;
41
+ });
42
+
43
+ it('uses the default OpenAI base URL when not provided', async () => {
44
+ delete process.env.OPENAI_BASE_URL;
45
+
46
+ const fetchMock = createFetchMock();
47
+ const provider = createOpenAI({
48
+ apiKey: 'test-api-key',
49
+ fetch: fetchMock,
50
+ });
51
+
52
+ await provider.embedding('text-embedding-3-small').doEmbed({
53
+ values: ['hello'],
54
+ });
55
+
56
+ expect(fetchMock).toHaveBeenCalledTimes(1);
57
+ const call = fetchMock.mock.calls[0]!;
58
+ expect(call[0]).toBe('https://api.openai.com/v1/embeddings');
59
+ });
60
+
61
+ it('uses OPENAI_BASE_URL when set', async () => {
62
+ process.env.OPENAI_BASE_URL = 'https://proxy.openai.example/v1/';
63
+
64
+ const fetchMock = createFetchMock();
65
+ const provider = createOpenAI({
66
+ apiKey: 'test-api-key',
67
+ fetch: fetchMock,
68
+ });
69
+
70
+ await provider.embedding('text-embedding-3-small').doEmbed({
71
+ values: ['hello'],
72
+ });
73
+
74
+ expect(fetchMock).toHaveBeenCalledTimes(1);
75
+ const call = fetchMock.mock.calls[0]!;
76
+ expect(call[0]).toBe('https://proxy.openai.example/v1/embeddings');
77
+ });
78
+
79
+ it('prefers the baseURL option over OPENAI_BASE_URL', async () => {
80
+ process.env.OPENAI_BASE_URL = 'https://env.openai.example/v1';
81
+
82
+ const fetchMock = createFetchMock();
83
+ const provider = createOpenAI({
84
+ apiKey: 'test-api-key',
85
+ baseURL: 'https://option.openai.example/v1/',
86
+ fetch: fetchMock,
87
+ });
88
+
89
+ await provider.embedding('text-embedding-3-small').doEmbed({
90
+ values: ['hello'],
91
+ });
92
+
93
+ expect(fetchMock).toHaveBeenCalledTimes(1);
94
+ const call = fetchMock.mock.calls[0]!;
95
+ expect(call[0]).toBe('https://option.openai.example/v1/embeddings');
96
+ });
97
+ });
98
+ });