@ai-sdk/openai 4.0.0-beta.3 → 4.0.0-beta.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/CHANGELOG.md +302 -22
  2. package/README.md +2 -0
  3. package/dist/index.d.ts +139 -36
  4. package/dist/index.js +2200 -1436
  5. package/dist/index.js.map +1 -1
  6. package/dist/internal/index.d.ts +107 -41
  7. package/dist/internal/index.js +1969 -1457
  8. package/dist/internal/index.js.map +1 -1
  9. package/docs/03-openai.mdx +274 -9
  10. package/package.json +9 -12
  11. package/src/chat/convert-openai-chat-usage.ts +2 -2
  12. package/src/chat/convert-to-openai-chat-messages.ts +26 -15
  13. package/src/chat/map-openai-finish-reason.ts +2 -2
  14. package/src/chat/openai-chat-language-model.ts +32 -25
  15. package/src/chat/openai-chat-options.ts +5 -0
  16. package/src/chat/openai-chat-prepare-tools.ts +6 -6
  17. package/src/completion/convert-openai-completion-usage.ts +2 -2
  18. package/src/completion/convert-to-openai-completion-prompt.ts +2 -2
  19. package/src/completion/map-openai-finish-reason.ts +2 -2
  20. package/src/completion/openai-completion-language-model.ts +20 -20
  21. package/src/embedding/openai-embedding-model.ts +5 -5
  22. package/src/files/openai-files-api.ts +17 -0
  23. package/src/files/openai-files-options.ts +18 -0
  24. package/src/files/openai-files.ts +102 -0
  25. package/src/image/openai-image-model.ts +9 -9
  26. package/src/index.ts +2 -0
  27. package/src/openai-config.ts +5 -5
  28. package/src/openai-language-model-capabilities.ts +3 -2
  29. package/src/openai-provider.ts +54 -21
  30. package/src/openai-tools.ts +12 -1
  31. package/src/responses/convert-openai-responses-usage.ts +2 -2
  32. package/src/responses/convert-to-openai-responses-input.ts +188 -14
  33. package/src/responses/map-openai-responses-finish-reason.ts +2 -2
  34. package/src/responses/openai-responses-api.ts +136 -2
  35. package/src/responses/openai-responses-language-model.ts +233 -37
  36. package/src/responses/openai-responses-options.ts +24 -2
  37. package/src/responses/openai-responses-prepare-tools.ts +47 -14
  38. package/src/responses/openai-responses-provider-metadata.ts +10 -0
  39. package/src/skills/openai-skills-api.ts +31 -0
  40. package/src/skills/openai-skills.ts +87 -0
  41. package/src/speech/openai-speech-model.ts +7 -7
  42. package/src/tool/custom.ts +0 -6
  43. package/src/tool/shell.ts +7 -2
  44. package/src/tool/tool-search.ts +98 -0
  45. package/src/transcription/openai-transcription-model.ts +8 -8
  46. package/dist/index.d.mts +0 -1107
  47. package/dist/index.mjs +0 -6497
  48. package/dist/index.mjs.map +0 -1
  49. package/dist/internal/index.d.mts +0 -1137
  50. package/dist/internal/index.mjs +0 -6310
  51. package/dist/internal/index.mjs.map +0 -1
@@ -51,8 +51,13 @@ export type OpenAIChatModelId =
51
51
  | 'gpt-5.2-chat-latest'
52
52
  | 'gpt-5.2-pro'
53
53
  | 'gpt-5.2-pro-2025-12-11'
54
+ | 'gpt-5.3-chat-latest'
54
55
  | 'gpt-5.4'
55
56
  | 'gpt-5.4-2026-03-05'
57
+ | 'gpt-5.4-mini'
58
+ | 'gpt-5.4-mini-2026-03-17'
59
+ | 'gpt-5.4-nano'
60
+ | 'gpt-5.4-nano-2026-03-17'
56
61
  | 'gpt-5.4-pro'
57
62
  | 'gpt-5.4-pro-2026-03-05'
58
63
  | (string & {});
@@ -1,6 +1,6 @@
1
1
  import {
2
- LanguageModelV3CallOptions,
3
- SharedV3Warning,
2
+ LanguageModelV4CallOptions,
3
+ SharedV4Warning,
4
4
  UnsupportedFunctionalityError,
5
5
  } from '@ai-sdk/provider';
6
6
  import {
@@ -12,17 +12,17 @@ export function prepareChatTools({
12
12
  tools,
13
13
  toolChoice,
14
14
  }: {
15
- tools: LanguageModelV3CallOptions['tools'];
16
- toolChoice?: LanguageModelV3CallOptions['toolChoice'];
15
+ tools: LanguageModelV4CallOptions['tools'];
16
+ toolChoice?: LanguageModelV4CallOptions['toolChoice'];
17
17
  }): {
18
18
  tools?: OpenAIChatFunctionTool[];
19
19
  toolChoice?: OpenAIChatToolChoice;
20
- toolWarnings: Array<SharedV3Warning>;
20
+ toolWarnings: Array<SharedV4Warning>;
21
21
  } {
22
22
  // when the tools array is empty, change it to undefined to prevent errors:
23
23
  tools = tools?.length ? tools : undefined;
24
24
 
25
- const toolWarnings: SharedV3Warning[] = [];
25
+ const toolWarnings: SharedV4Warning[] = [];
26
26
 
27
27
  if (tools == null) {
28
28
  return { tools: undefined, toolChoice: undefined, toolWarnings };
@@ -1,4 +1,4 @@
1
- import { LanguageModelV3Usage } from '@ai-sdk/provider';
1
+ import { LanguageModelV4Usage } from '@ai-sdk/provider';
2
2
 
3
3
  export type OpenAICompletionUsage = {
4
4
  prompt_tokens?: number | null;
@@ -8,7 +8,7 @@ export type OpenAICompletionUsage = {
8
8
 
9
9
  export function convertOpenAICompletionUsage(
10
10
  usage: OpenAICompletionUsage | undefined | null,
11
- ): LanguageModelV3Usage {
11
+ ): LanguageModelV4Usage {
12
12
  if (usage == null) {
13
13
  return {
14
14
  inputTokens: {
@@ -1,6 +1,6 @@
1
1
  import {
2
2
  InvalidPromptError,
3
- LanguageModelV3Prompt,
3
+ LanguageModelV4Prompt,
4
4
  UnsupportedFunctionalityError,
5
5
  } from '@ai-sdk/provider';
6
6
 
@@ -9,7 +9,7 @@ export function convertToOpenAICompletionPrompt({
9
9
  user = 'user',
10
10
  assistant = 'assistant',
11
11
  }: {
12
- prompt: LanguageModelV3Prompt;
12
+ prompt: LanguageModelV4Prompt;
13
13
  user?: string;
14
14
  assistant?: string;
15
15
  }): {
@@ -1,8 +1,8 @@
1
- import { LanguageModelV3FinishReason } from '@ai-sdk/provider';
1
+ import { LanguageModelV4FinishReason } from '@ai-sdk/provider';
2
2
 
3
3
  export function mapOpenAIFinishReason(
4
4
  finishReason: string | null | undefined,
5
- ): LanguageModelV3FinishReason['unified'] {
5
+ ): LanguageModelV4FinishReason['unified'] {
6
6
  switch (finishReason) {
7
7
  case 'stop':
8
8
  return 'stop';
@@ -1,12 +1,12 @@
1
1
  import {
2
- LanguageModelV3,
3
- LanguageModelV3CallOptions,
4
- LanguageModelV3FinishReason,
5
- LanguageModelV3GenerateResult,
6
- LanguageModelV3StreamPart,
7
- LanguageModelV3StreamResult,
8
- SharedV3ProviderMetadata,
9
- SharedV3Warning,
2
+ LanguageModelV4,
3
+ LanguageModelV4CallOptions,
4
+ LanguageModelV4FinishReason,
5
+ LanguageModelV4GenerateResult,
6
+ LanguageModelV4StreamPart,
7
+ LanguageModelV4StreamResult,
8
+ SharedV4ProviderMetadata,
9
+ SharedV4Warning,
10
10
  } from '@ai-sdk/provider';
11
11
  import {
12
12
  combineHeaders,
@@ -42,8 +42,8 @@ type OpenAICompletionConfig = {
42
42
  fetch?: FetchFunction;
43
43
  };
44
44
 
45
- export class OpenAICompletionLanguageModel implements LanguageModelV3 {
46
- readonly specificationVersion = 'v3';
45
+ export class OpenAICompletionLanguageModel implements LanguageModelV4 {
46
+ readonly specificationVersion = 'v4';
47
47
 
48
48
  readonly modelId: OpenAICompletionModelId;
49
49
 
@@ -83,8 +83,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
83
83
  toolChoice,
84
84
  seed,
85
85
  providerOptions,
86
- }: LanguageModelV3CallOptions) {
87
- const warnings: SharedV3Warning[] = [];
86
+ }: LanguageModelV4CallOptions) {
87
+ const warnings: SharedV4Warning[] = [];
88
88
 
89
89
  // Parse provider options
90
90
  const openaiOptions = {
@@ -161,8 +161,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
161
161
  }
162
162
 
163
163
  async doGenerate(
164
- options: LanguageModelV3CallOptions,
165
- ): Promise<LanguageModelV3GenerateResult> {
164
+ options: LanguageModelV4CallOptions,
165
+ ): Promise<LanguageModelV4GenerateResult> {
166
166
  const { args, warnings } = await this.getArgs(options);
167
167
 
168
168
  const {
@@ -186,7 +186,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
186
186
 
187
187
  const choice = response.choices[0];
188
188
 
189
- const providerMetadata: SharedV3ProviderMetadata = { openai: {} };
189
+ const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
190
190
 
191
191
  if (choice.logprobs != null) {
192
192
  providerMetadata.openai.logprobs = choice.logprobs;
@@ -211,8 +211,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
211
211
  }
212
212
 
213
213
  async doStream(
214
- options: LanguageModelV3CallOptions,
215
- ): Promise<LanguageModelV3StreamResult> {
214
+ options: LanguageModelV4CallOptions,
215
+ ): Promise<LanguageModelV4StreamResult> {
216
216
  const { args, warnings } = await this.getArgs(options);
217
217
 
218
218
  const body = {
@@ -239,11 +239,11 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
239
239
  fetch: this.config.fetch,
240
240
  });
241
241
 
242
- let finishReason: LanguageModelV3FinishReason = {
242
+ let finishReason: LanguageModelV4FinishReason = {
243
243
  unified: 'other',
244
244
  raw: undefined,
245
245
  };
246
- const providerMetadata: SharedV3ProviderMetadata = { openai: {} };
246
+ const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
247
247
  let usage: OpenAICompletionUsage | undefined = undefined;
248
248
  let isFirstChunk = true;
249
249
 
@@ -251,7 +251,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
251
251
  stream: response.pipeThrough(
252
252
  new TransformStream<
253
253
  ParseResult<OpenAICompletionChunk>,
254
- LanguageModelV3StreamPart
254
+ LanguageModelV4StreamPart
255
255
  >({
256
256
  start(controller) {
257
257
  controller.enqueue({ type: 'stream-start', warnings });
@@ -1,5 +1,5 @@
1
1
  import {
2
- EmbeddingModelV3,
2
+ EmbeddingModelV4,
3
3
  TooManyEmbeddingValuesForCallError,
4
4
  } from '@ai-sdk/provider';
5
5
  import {
@@ -16,8 +16,8 @@ import {
16
16
  } from './openai-embedding-options';
17
17
  import { openaiTextEmbeddingResponseSchema } from './openai-embedding-api';
18
18
 
19
- export class OpenAIEmbeddingModel implements EmbeddingModelV3 {
20
- readonly specificationVersion = 'v3';
19
+ export class OpenAIEmbeddingModel implements EmbeddingModelV4 {
20
+ readonly specificationVersion = 'v4';
21
21
  readonly modelId: OpenAIEmbeddingModelId;
22
22
  readonly maxEmbeddingsPerCall = 2048;
23
23
  readonly supportsParallelCalls = true;
@@ -38,8 +38,8 @@ export class OpenAIEmbeddingModel implements EmbeddingModelV3 {
38
38
  headers,
39
39
  abortSignal,
40
40
  providerOptions,
41
- }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<
42
- Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>
41
+ }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<
42
+ Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>
43
43
  > {
44
44
  if (values.length > this.maxEmbeddingsPerCall) {
45
45
  throw new TooManyEmbeddingValuesForCallError({
@@ -0,0 +1,17 @@
1
+ import { lazySchema, zodSchema } from '@ai-sdk/provider-utils';
2
+ import { z } from 'zod/v4';
3
+
4
+ export const openaiFilesResponseSchema = lazySchema(() =>
5
+ zodSchema(
6
+ z.object({
7
+ id: z.string(),
8
+ object: z.string().nullish(),
9
+ bytes: z.number().nullish(),
10
+ created_at: z.number().nullish(),
11
+ filename: z.string().nullish(),
12
+ purpose: z.string().nullish(),
13
+ status: z.string().nullish(),
14
+ expires_at: z.number().nullish(),
15
+ }),
16
+ ),
17
+ );
@@ -0,0 +1,18 @@
1
+ import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
2
+ import { z } from 'zod/v4';
3
+
4
+ export const openaiFilesOptionsSchema = lazySchema(() =>
5
+ zodSchema(
6
+ z.object({
7
+ /*
8
+ * Required by the OpenAI API, but optional here because
9
+ * the SDK defaults to "assistants" — by far the most common
10
+ * purpose when uploading files in this context.
11
+ */
12
+ purpose: z.string().optional(),
13
+ expiresAfter: z.number().optional(),
14
+ }),
15
+ ),
16
+ );
17
+
18
+ export type OpenAIFilesOptions = InferSchema<typeof openaiFilesOptionsSchema>;
@@ -0,0 +1,102 @@
1
+ import {
2
+ FilesV4,
3
+ FilesV4UploadFileCallOptions,
4
+ FilesV4UploadFileResult,
5
+ } from '@ai-sdk/provider';
6
+ import {
7
+ combineHeaders,
8
+ convertBase64ToUint8Array,
9
+ createJsonResponseHandler,
10
+ FetchFunction,
11
+ parseProviderOptions,
12
+ postFormDataToApi,
13
+ } from '@ai-sdk/provider-utils';
14
+ import { openaiFailedResponseHandler } from '../openai-error';
15
+ import { openaiFilesResponseSchema } from './openai-files-api';
16
+ import {
17
+ openaiFilesOptionsSchema,
18
+ OpenAIFilesOptions,
19
+ } from './openai-files-options';
20
+
21
+ interface OpenAIFilesConfig {
22
+ provider: string;
23
+ baseURL: string;
24
+ headers: () => Record<string, string | undefined>;
25
+ fetch?: FetchFunction;
26
+ }
27
+
28
+ export class OpenAIFiles implements FilesV4 {
29
+ readonly specificationVersion = 'v4';
30
+
31
+ get provider(): string {
32
+ return this.config.provider;
33
+ }
34
+
35
+ constructor(private readonly config: OpenAIFilesConfig) {}
36
+
37
+ async uploadFile({
38
+ data,
39
+ mediaType,
40
+ filename,
41
+ providerOptions,
42
+ }: FilesV4UploadFileCallOptions): Promise<FilesV4UploadFileResult> {
43
+ const openaiOptions = (await parseProviderOptions({
44
+ provider: 'openai',
45
+ providerOptions,
46
+ schema: openaiFilesOptionsSchema,
47
+ })) as OpenAIFilesOptions | undefined;
48
+
49
+ const fileBytes =
50
+ data instanceof Uint8Array ? data : convertBase64ToUint8Array(data);
51
+
52
+ const blob = new Blob([fileBytes], {
53
+ type: mediaType,
54
+ });
55
+
56
+ const formData = new FormData();
57
+ if (filename != null) {
58
+ formData.append('file', blob, filename);
59
+ } else {
60
+ formData.append('file', blob);
61
+ }
62
+ formData.append('purpose', openaiOptions?.purpose ?? 'assistants');
63
+
64
+ if (openaiOptions?.expiresAfter != null) {
65
+ formData.append('expires_after', String(openaiOptions.expiresAfter));
66
+ }
67
+
68
+ const { value: response } = await postFormDataToApi({
69
+ url: `${this.config.baseURL}/files`,
70
+ headers: combineHeaders(this.config.headers()),
71
+ formData,
72
+ failedResponseHandler: openaiFailedResponseHandler,
73
+ successfulResponseHandler: createJsonResponseHandler(
74
+ openaiFilesResponseSchema,
75
+ ),
76
+ fetch: this.config.fetch,
77
+ });
78
+
79
+ return {
80
+ warnings: [],
81
+ providerReference: { openai: response.id },
82
+ ...((response.filename ?? filename)
83
+ ? { filename: response.filename ?? filename }
84
+ : {}),
85
+ ...(mediaType != null ? { mediaType } : {}),
86
+ providerMetadata: {
87
+ openai: {
88
+ ...(response.filename != null ? { filename: response.filename } : {}),
89
+ ...(response.purpose != null ? { purpose: response.purpose } : {}),
90
+ ...(response.bytes != null ? { bytes: response.bytes } : {}),
91
+ ...(response.created_at != null
92
+ ? { createdAt: response.created_at }
93
+ : {}),
94
+ ...(response.status != null ? { status: response.status } : {}),
95
+ ...(response.expires_at != null
96
+ ? { expiresAt: response.expires_at }
97
+ : {}),
98
+ },
99
+ },
100
+ };
101
+ }
102
+ }
@@ -1,7 +1,7 @@
1
1
  import {
2
- ImageModelV3,
3
- ImageModelV3File,
4
- SharedV3Warning,
2
+ ImageModelV4,
3
+ ImageModelV4File,
4
+ SharedV4Warning,
5
5
  } from '@ai-sdk/provider';
6
6
  import {
7
7
  combineHeaders,
@@ -27,8 +27,8 @@ interface OpenAIImageModelConfig extends OpenAIConfig {
27
27
  };
28
28
  }
29
29
 
30
- export class OpenAIImageModel implements ImageModelV3 {
31
- readonly specificationVersion = 'v3';
30
+ export class OpenAIImageModel implements ImageModelV4 {
31
+ readonly specificationVersion = 'v4';
32
32
 
33
33
  get maxImagesPerCall(): number {
34
34
  return modelMaxImagesPerCall[this.modelId] ?? 1;
@@ -54,10 +54,10 @@ export class OpenAIImageModel implements ImageModelV3 {
54
54
  providerOptions,
55
55
  headers,
56
56
  abortSignal,
57
- }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<
58
- Awaited<ReturnType<ImageModelV3['doGenerate']>>
57
+ }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<
58
+ Awaited<ReturnType<ImageModelV4['doGenerate']>>
59
59
  > {
60
- const warnings: Array<SharedV3Warning> = [];
60
+ const warnings: Array<SharedV4Warning> = [];
61
61
 
62
62
  if (aspectRatio != null) {
63
63
  warnings.push({
@@ -332,7 +332,7 @@ type OpenAIImageEditInput = {
332
332
  };
333
333
 
334
334
  async function fileToBlob(
335
- file: ImageModelV3File | undefined,
335
+ file: ImageModelV4File | undefined,
336
336
  ): Promise<Blob | undefined> {
337
337
  if (!file) return undefined;
338
338
 
package/src/index.ts CHANGED
@@ -14,7 +14,9 @@ export type { OpenAILanguageModelCompletionOptions } from './completion/openai-c
14
14
  export type { OpenAIEmbeddingModelOptions } from './embedding/openai-embedding-options';
15
15
  export type { OpenAISpeechModelOptions } from './speech/openai-speech-options';
16
16
  export type { OpenAITranscriptionModelOptions } from './transcription/openai-transcription-options';
17
+ export type { OpenAIFilesOptions } from './files/openai-files-options';
17
18
  export type {
19
+ OpenaiResponsesCompactionProviderMetadata,
18
20
  OpenaiResponsesProviderMetadata,
19
21
  OpenaiResponsesReasoningProviderMetadata,
20
22
  OpenaiResponsesTextProviderMetadata,
@@ -7,12 +7,12 @@ export type OpenAIConfig = {
7
7
  fetch?: FetchFunction;
8
8
  generateId?: () => string;
9
9
  /**
10
- * File ID prefixes used to identify file IDs in Responses API.
11
- * When undefined, all file data is treated as base64 content.
10
+ * This is soft-deprecated. Use provider references (e.g. `{ openai: 'file-abc123' }`)
11
+ * in file part data instead. File ID prefixes used to identify file IDs
12
+ * in Responses API. When undefined, all string file data is treated as
13
+ * base64 content.
12
14
  *
13
- * Examples:
14
- * - OpenAI: ['file-'] for IDs like 'file-abc123'
15
- * - Azure OpenAI: ['assistant-'] for IDs like 'assistant-abc123'
15
+ * TODO: remove in v8
16
16
  */
17
17
  fileIdPrefixes?: readonly string[];
18
18
  };
@@ -20,10 +20,10 @@ export function getOpenAILanguageModelCapabilities(
20
20
 
21
21
  const supportsPriorityProcessing =
22
22
  modelId.startsWith('gpt-4') ||
23
- modelId.startsWith('gpt-5-mini') ||
24
23
  (modelId.startsWith('gpt-5') &&
25
24
  !modelId.startsWith('gpt-5-nano') &&
26
- !modelId.startsWith('gpt-5-chat')) ||
25
+ !modelId.startsWith('gpt-5-chat') &&
26
+ !modelId.startsWith('gpt-5.4-nano')) ||
27
27
  modelId.startsWith('o3') ||
28
28
  modelId.startsWith('o4-mini');
29
29
 
@@ -40,6 +40,7 @@ export function getOpenAILanguageModelCapabilities(
40
40
  const supportsNonReasoningParameters =
41
41
  modelId.startsWith('gpt-5.1') ||
42
42
  modelId.startsWith('gpt-5.2') ||
43
+ modelId.startsWith('gpt-5.3') ||
43
44
  modelId.startsWith('gpt-5.4');
44
45
 
45
46
  const systemMessageMode = isReasoningModel ? 'developer' : 'system';
@@ -1,10 +1,12 @@
1
1
  import {
2
- EmbeddingModelV3,
3
- ImageModelV3,
4
- LanguageModelV3,
5
- ProviderV3,
6
- SpeechModelV3,
7
- TranscriptionModelV3,
2
+ EmbeddingModelV4,
3
+ FilesV4,
4
+ ImageModelV4,
5
+ LanguageModelV4,
6
+ ProviderV4,
7
+ SpeechModelV4,
8
+ SkillsV4,
9
+ TranscriptionModelV4,
8
10
  } from '@ai-sdk/provider';
9
11
  import {
10
12
  FetchFunction,
@@ -18,6 +20,7 @@ import { OpenAIChatModelId } from './chat/openai-chat-options';
18
20
  import { OpenAICompletionLanguageModel } from './completion/openai-completion-language-model';
19
21
  import { OpenAICompletionModelId } from './completion/openai-completion-options';
20
22
  import { OpenAIEmbeddingModel } from './embedding/openai-embedding-model';
23
+ import { OpenAIFiles } from './files/openai-files';
21
24
  import { OpenAIEmbeddingModelId } from './embedding/openai-embedding-options';
22
25
  import { OpenAIImageModel } from './image/openai-image-model';
23
26
  import { OpenAIImageModelId } from './image/openai-image-options';
@@ -28,70 +31,81 @@ import { OpenAISpeechModel } from './speech/openai-speech-model';
28
31
  import { OpenAISpeechModelId } from './speech/openai-speech-options';
29
32
  import { OpenAITranscriptionModel } from './transcription/openai-transcription-model';
30
33
  import { OpenAITranscriptionModelId } from './transcription/openai-transcription-options';
34
+ import { OpenAISkills } from './skills/openai-skills';
31
35
  import { VERSION } from './version';
32
36
 
33
- export interface OpenAIProvider extends ProviderV3 {
34
- (modelId: OpenAIResponsesModelId): LanguageModelV3;
37
+ export interface OpenAIProvider extends ProviderV4 {
38
+ (modelId: OpenAIResponsesModelId): LanguageModelV4;
35
39
 
36
40
  /**
37
41
  * Creates an OpenAI model for text generation.
38
42
  */
39
- languageModel(modelId: OpenAIResponsesModelId): LanguageModelV3;
43
+ languageModel(modelId: OpenAIResponsesModelId): LanguageModelV4;
40
44
 
41
45
  /**
42
46
  * Creates an OpenAI chat model for text generation.
43
47
  */
44
- chat(modelId: OpenAIChatModelId): LanguageModelV3;
48
+ chat(modelId: OpenAIChatModelId): LanguageModelV4;
45
49
 
46
50
  /**
47
51
  * Creates an OpenAI responses API model for text generation.
48
52
  */
49
- responses(modelId: OpenAIResponsesModelId): LanguageModelV3;
53
+ responses(modelId: OpenAIResponsesModelId): LanguageModelV4;
50
54
 
51
55
  /**
52
56
  * Creates an OpenAI completion model for text generation.
53
57
  */
54
- completion(modelId: OpenAICompletionModelId): LanguageModelV3;
58
+ completion(modelId: OpenAICompletionModelId): LanguageModelV4;
55
59
 
56
60
  /**
57
61
  * Creates a model for text embeddings.
58
62
  */
59
- embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
63
+ embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
60
64
 
61
65
  /**
62
66
  * Creates a model for text embeddings.
63
67
  */
64
- embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
68
+ embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
65
69
 
66
70
  /**
67
71
  * @deprecated Use `embedding` instead.
68
72
  */
69
- textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
73
+ textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
70
74
 
71
75
  /**
72
76
  * @deprecated Use `embeddingModel` instead.
73
77
  */
74
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
78
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
75
79
 
76
80
  /**
77
81
  * Creates a model for image generation.
78
82
  */
79
- image(modelId: OpenAIImageModelId): ImageModelV3;
83
+ image(modelId: OpenAIImageModelId): ImageModelV4;
80
84
 
81
85
  /**
82
86
  * Creates a model for image generation.
83
87
  */
84
- imageModel(modelId: OpenAIImageModelId): ImageModelV3;
88
+ imageModel(modelId: OpenAIImageModelId): ImageModelV4;
85
89
 
86
90
  /**
87
91
  * Creates a model for transcription.
88
92
  */
89
- transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV3;
93
+ transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV4;
90
94
 
91
95
  /**
92
96
  * Creates a model for speech generation.
93
97
  */
94
- speech(modelId: OpenAISpeechModelId): SpeechModelV3;
98
+ speech(modelId: OpenAISpeechModelId): SpeechModelV4;
99
+
100
+ /**
101
+ * Returns a FilesV4 interface for uploading files to OpenAI.
102
+ */
103
+ files(): FilesV4;
104
+
105
+ /**
106
+ * Returns a SkillsV4 interface for uploading skills to OpenAI.
107
+ */
108
+ skills(): SkillsV4;
95
109
 
96
110
  /**
97
111
  * OpenAI-specific tools.
@@ -216,6 +230,22 @@ export function createOpenAI(
216
230
  fetch: options.fetch,
217
231
  });
218
232
 
233
+ const createFiles = () =>
234
+ new OpenAIFiles({
235
+ provider: `${providerName}.files`,
236
+ baseURL,
237
+ headers: getHeaders,
238
+ fetch: options.fetch,
239
+ });
240
+
241
+ const createSkills = () =>
242
+ new OpenAISkills({
243
+ provider: `${providerName}.skills`,
244
+ url: ({ path }) => `${baseURL}${path}`,
245
+ headers: getHeaders,
246
+ fetch: options.fetch,
247
+ });
248
+
219
249
  const createLanguageModel = (modelId: OpenAIResponsesModelId) => {
220
250
  if (new.target) {
221
251
  throw new Error(
@@ -232,6 +262,7 @@ export function createOpenAI(
232
262
  url: ({ path }) => `${baseURL}${path}`,
233
263
  headers: getHeaders,
234
264
  fetch: options.fetch,
265
+ // Soft-deprecated. TODO: remove in v8
235
266
  fileIdPrefixes: ['file-'],
236
267
  });
237
268
  };
@@ -240,7 +271,7 @@ export function createOpenAI(
240
271
  return createLanguageModel(modelId);
241
272
  };
242
273
 
243
- provider.specificationVersion = 'v3' as const;
274
+ provider.specificationVersion = 'v4' as const;
244
275
  provider.languageModel = createLanguageModel;
245
276
  provider.chat = createChatModel;
246
277
  provider.completion = createCompletionModel;
@@ -258,6 +289,8 @@ export function createOpenAI(
258
289
 
259
290
  provider.speech = createSpeechModel;
260
291
  provider.speechModel = createSpeechModel;
292
+ provider.files = createFiles;
293
+ provider.skills = createSkills;
261
294
 
262
295
  provider.tools = openaiTools;
263
296
 
@@ -5,6 +5,7 @@ import { fileSearch } from './tool/file-search';
5
5
  import { imageGeneration } from './tool/image-generation';
6
6
  import { localShell } from './tool/local-shell';
7
7
  import { shell } from './tool/shell';
8
+ import { toolSearch } from './tool/tool-search';
8
9
  import { webSearch } from './tool/web-search';
9
10
  import { webSearchPreview } from './tool/web-search-preview';
10
11
  import { mcp } from './tool/mcp';
@@ -24,7 +25,6 @@ export const openaiTools = {
24
25
  * Lark syntax). The model returns a `custom_tool_call` output item whose
25
26
  * `input` field is a string matching the specified grammar.
26
27
  *
27
- * @param name - The name of the custom tool.
28
28
  * @param description - An optional description of the tool.
29
29
  * @param format - The output format constraint (grammar type, syntax, and definition).
30
30
  */
@@ -123,4 +123,15 @@ export const openaiTools = {
123
123
  * @param serverUrl - URL for the MCP server.
124
124
  */
125
125
  mcp,
126
+
127
+ /**
128
+ * Tool search allows the model to dynamically search for and load deferred
129
+ * tools into the model's context as needed. This helps reduce overall token
130
+ * usage, cost, and latency by only loading tools when the model needs them.
131
+ *
132
+ * To use tool search, mark functions or namespaces with `defer_loading: true`
133
+ * in the tools array. The model will use tool search to load these tools
134
+ * when it determines they are needed.
135
+ */
136
+ toolSearch,
126
137
  };
@@ -1,4 +1,4 @@
1
- import { LanguageModelV3Usage } from '@ai-sdk/provider';
1
+ import { LanguageModelV4Usage } from '@ai-sdk/provider';
2
2
 
3
3
  export type OpenAIResponsesUsage = {
4
4
  input_tokens: number;
@@ -13,7 +13,7 @@ export type OpenAIResponsesUsage = {
13
13
 
14
14
  export function convertOpenAIResponsesUsage(
15
15
  usage: OpenAIResponsesUsage | undefined | null,
16
- ): LanguageModelV3Usage {
16
+ ): LanguageModelV4Usage {
17
17
  if (usage == null) {
18
18
  return {
19
19
  inputTokens: {