@ai-sdk/openai 4.0.0-beta.2 → 4.0.0-beta.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/CHANGELOG.md +234 -22
  2. package/README.md +2 -0
  3. package/dist/index.d.mts +134 -35
  4. package/dist/index.d.ts +134 -35
  5. package/dist/index.js +1700 -1139
  6. package/dist/index.js.map +1 -1
  7. package/dist/index.mjs +1697 -1117
  8. package/dist/index.mjs.map +1 -1
  9. package/dist/internal/index.d.mts +107 -41
  10. package/dist/internal/index.d.ts +107 -41
  11. package/dist/internal/index.js +1380 -939
  12. package/dist/internal/index.js.map +1 -1
  13. package/dist/internal/index.mjs +1371 -917
  14. package/dist/internal/index.mjs.map +1 -1
  15. package/docs/03-openai.mdx +274 -9
  16. package/package.json +3 -5
  17. package/src/chat/convert-openai-chat-usage.ts +2 -2
  18. package/src/chat/convert-to-openai-chat-messages.ts +26 -15
  19. package/src/chat/map-openai-finish-reason.ts +2 -2
  20. package/src/chat/openai-chat-language-model.ts +32 -24
  21. package/src/chat/openai-chat-options.ts +5 -0
  22. package/src/chat/openai-chat-prepare-tools.ts +6 -6
  23. package/src/completion/convert-openai-completion-usage.ts +2 -2
  24. package/src/completion/convert-to-openai-completion-prompt.ts +2 -2
  25. package/src/completion/map-openai-finish-reason.ts +2 -2
  26. package/src/completion/openai-completion-language-model.ts +20 -20
  27. package/src/embedding/openai-embedding-model.ts +5 -5
  28. package/src/files/openai-files-api.ts +17 -0
  29. package/src/files/openai-files-options.ts +18 -0
  30. package/src/files/openai-files.ts +102 -0
  31. package/src/image/openai-image-model.ts +9 -9
  32. package/src/index.ts +2 -0
  33. package/src/openai-config.ts +5 -5
  34. package/src/openai-language-model-capabilities.ts +3 -2
  35. package/src/openai-provider.ts +39 -21
  36. package/src/openai-tools.ts +12 -1
  37. package/src/responses/convert-openai-responses-usage.ts +2 -2
  38. package/src/responses/convert-to-openai-responses-input.ts +188 -14
  39. package/src/responses/map-openai-responses-finish-reason.ts +2 -2
  40. package/src/responses/openai-responses-api.ts +136 -2
  41. package/src/responses/openai-responses-language-model.ts +233 -37
  42. package/src/responses/openai-responses-options.ts +24 -2
  43. package/src/responses/openai-responses-prepare-tools.ts +34 -9
  44. package/src/responses/openai-responses-provider-metadata.ts +10 -0
  45. package/src/speech/openai-speech-model.ts +7 -7
  46. package/src/tool/custom.ts +0 -6
  47. package/src/tool/tool-search.ts +98 -0
  48. package/src/transcription/openai-transcription-model.ts +8 -8
@@ -1,6 +1,6 @@
1
1
  import {
2
- LanguageModelV3CallOptions,
3
- SharedV3Warning,
2
+ LanguageModelV4CallOptions,
3
+ SharedV4Warning,
4
4
  UnsupportedFunctionalityError,
5
5
  } from '@ai-sdk/provider';
6
6
  import {
@@ -12,17 +12,17 @@ export function prepareChatTools({
12
12
  tools,
13
13
  toolChoice,
14
14
  }: {
15
- tools: LanguageModelV3CallOptions['tools'];
16
- toolChoice?: LanguageModelV3CallOptions['toolChoice'];
15
+ tools: LanguageModelV4CallOptions['tools'];
16
+ toolChoice?: LanguageModelV4CallOptions['toolChoice'];
17
17
  }): {
18
18
  tools?: OpenAIChatFunctionTool[];
19
19
  toolChoice?: OpenAIChatToolChoice;
20
- toolWarnings: Array<SharedV3Warning>;
20
+ toolWarnings: Array<SharedV4Warning>;
21
21
  } {
22
22
  // when the tools array is empty, change it to undefined to prevent errors:
23
23
  tools = tools?.length ? tools : undefined;
24
24
 
25
- const toolWarnings: SharedV3Warning[] = [];
25
+ const toolWarnings: SharedV4Warning[] = [];
26
26
 
27
27
  if (tools == null) {
28
28
  return { tools: undefined, toolChoice: undefined, toolWarnings };
@@ -1,4 +1,4 @@
1
- import { LanguageModelV3Usage } from '@ai-sdk/provider';
1
+ import { LanguageModelV4Usage } from '@ai-sdk/provider';
2
2
 
3
3
  export type OpenAICompletionUsage = {
4
4
  prompt_tokens?: number | null;
@@ -8,7 +8,7 @@ export type OpenAICompletionUsage = {
8
8
 
9
9
  export function convertOpenAICompletionUsage(
10
10
  usage: OpenAICompletionUsage | undefined | null,
11
- ): LanguageModelV3Usage {
11
+ ): LanguageModelV4Usage {
12
12
  if (usage == null) {
13
13
  return {
14
14
  inputTokens: {
@@ -1,6 +1,6 @@
1
1
  import {
2
2
  InvalidPromptError,
3
- LanguageModelV3Prompt,
3
+ LanguageModelV4Prompt,
4
4
  UnsupportedFunctionalityError,
5
5
  } from '@ai-sdk/provider';
6
6
 
@@ -9,7 +9,7 @@ export function convertToOpenAICompletionPrompt({
9
9
  user = 'user',
10
10
  assistant = 'assistant',
11
11
  }: {
12
- prompt: LanguageModelV3Prompt;
12
+ prompt: LanguageModelV4Prompt;
13
13
  user?: string;
14
14
  assistant?: string;
15
15
  }): {
@@ -1,8 +1,8 @@
1
- import { LanguageModelV3FinishReason } from '@ai-sdk/provider';
1
+ import { LanguageModelV4FinishReason } from '@ai-sdk/provider';
2
2
 
3
3
  export function mapOpenAIFinishReason(
4
4
  finishReason: string | null | undefined,
5
- ): LanguageModelV3FinishReason['unified'] {
5
+ ): LanguageModelV4FinishReason['unified'] {
6
6
  switch (finishReason) {
7
7
  case 'stop':
8
8
  return 'stop';
@@ -1,12 +1,12 @@
1
1
  import {
2
- LanguageModelV3,
3
- LanguageModelV3CallOptions,
4
- LanguageModelV3FinishReason,
5
- LanguageModelV3GenerateResult,
6
- LanguageModelV3StreamPart,
7
- LanguageModelV3StreamResult,
8
- SharedV3ProviderMetadata,
9
- SharedV3Warning,
2
+ LanguageModelV4,
3
+ LanguageModelV4CallOptions,
4
+ LanguageModelV4FinishReason,
5
+ LanguageModelV4GenerateResult,
6
+ LanguageModelV4StreamPart,
7
+ LanguageModelV4StreamResult,
8
+ SharedV4ProviderMetadata,
9
+ SharedV4Warning,
10
10
  } from '@ai-sdk/provider';
11
11
  import {
12
12
  combineHeaders,
@@ -42,8 +42,8 @@ type OpenAICompletionConfig = {
42
42
  fetch?: FetchFunction;
43
43
  };
44
44
 
45
- export class OpenAICompletionLanguageModel implements LanguageModelV3 {
46
- readonly specificationVersion = 'v3';
45
+ export class OpenAICompletionLanguageModel implements LanguageModelV4 {
46
+ readonly specificationVersion = 'v4';
47
47
 
48
48
  readonly modelId: OpenAICompletionModelId;
49
49
 
@@ -83,8 +83,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
83
83
  toolChoice,
84
84
  seed,
85
85
  providerOptions,
86
- }: LanguageModelV3CallOptions) {
87
- const warnings: SharedV3Warning[] = [];
86
+ }: LanguageModelV4CallOptions) {
87
+ const warnings: SharedV4Warning[] = [];
88
88
 
89
89
  // Parse provider options
90
90
  const openaiOptions = {
@@ -161,8 +161,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
161
161
  }
162
162
 
163
163
  async doGenerate(
164
- options: LanguageModelV3CallOptions,
165
- ): Promise<LanguageModelV3GenerateResult> {
164
+ options: LanguageModelV4CallOptions,
165
+ ): Promise<LanguageModelV4GenerateResult> {
166
166
  const { args, warnings } = await this.getArgs(options);
167
167
 
168
168
  const {
@@ -186,7 +186,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
186
186
 
187
187
  const choice = response.choices[0];
188
188
 
189
- const providerMetadata: SharedV3ProviderMetadata = { openai: {} };
189
+ const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
190
190
 
191
191
  if (choice.logprobs != null) {
192
192
  providerMetadata.openai.logprobs = choice.logprobs;
@@ -211,8 +211,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
211
211
  }
212
212
 
213
213
  async doStream(
214
- options: LanguageModelV3CallOptions,
215
- ): Promise<LanguageModelV3StreamResult> {
214
+ options: LanguageModelV4CallOptions,
215
+ ): Promise<LanguageModelV4StreamResult> {
216
216
  const { args, warnings } = await this.getArgs(options);
217
217
 
218
218
  const body = {
@@ -239,11 +239,11 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
239
239
  fetch: this.config.fetch,
240
240
  });
241
241
 
242
- let finishReason: LanguageModelV3FinishReason = {
242
+ let finishReason: LanguageModelV4FinishReason = {
243
243
  unified: 'other',
244
244
  raw: undefined,
245
245
  };
246
- const providerMetadata: SharedV3ProviderMetadata = { openai: {} };
246
+ const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
247
247
  let usage: OpenAICompletionUsage | undefined = undefined;
248
248
  let isFirstChunk = true;
249
249
 
@@ -251,7 +251,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
251
251
  stream: response.pipeThrough(
252
252
  new TransformStream<
253
253
  ParseResult<OpenAICompletionChunk>,
254
- LanguageModelV3StreamPart
254
+ LanguageModelV4StreamPart
255
255
  >({
256
256
  start(controller) {
257
257
  controller.enqueue({ type: 'stream-start', warnings });
@@ -1,5 +1,5 @@
1
1
  import {
2
- EmbeddingModelV3,
2
+ EmbeddingModelV4,
3
3
  TooManyEmbeddingValuesForCallError,
4
4
  } from '@ai-sdk/provider';
5
5
  import {
@@ -16,8 +16,8 @@ import {
16
16
  } from './openai-embedding-options';
17
17
  import { openaiTextEmbeddingResponseSchema } from './openai-embedding-api';
18
18
 
19
- export class OpenAIEmbeddingModel implements EmbeddingModelV3 {
20
- readonly specificationVersion = 'v3';
19
+ export class OpenAIEmbeddingModel implements EmbeddingModelV4 {
20
+ readonly specificationVersion = 'v4';
21
21
  readonly modelId: OpenAIEmbeddingModelId;
22
22
  readonly maxEmbeddingsPerCall = 2048;
23
23
  readonly supportsParallelCalls = true;
@@ -38,8 +38,8 @@ export class OpenAIEmbeddingModel implements EmbeddingModelV3 {
38
38
  headers,
39
39
  abortSignal,
40
40
  providerOptions,
41
- }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<
42
- Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>
41
+ }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<
42
+ Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>
43
43
  > {
44
44
  if (values.length > this.maxEmbeddingsPerCall) {
45
45
  throw new TooManyEmbeddingValuesForCallError({
@@ -0,0 +1,17 @@
1
+ import { lazySchema, zodSchema } from '@ai-sdk/provider-utils';
2
+ import { z } from 'zod/v4';
3
+
4
+ export const openaiFilesResponseSchema = lazySchema(() =>
5
+ zodSchema(
6
+ z.object({
7
+ id: z.string(),
8
+ object: z.string().nullish(),
9
+ bytes: z.number().nullish(),
10
+ created_at: z.number().nullish(),
11
+ filename: z.string().nullish(),
12
+ purpose: z.string().nullish(),
13
+ status: z.string().nullish(),
14
+ expires_at: z.number().nullish(),
15
+ }),
16
+ ),
17
+ );
@@ -0,0 +1,18 @@
1
+ import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
2
+ import { z } from 'zod/v4';
3
+
4
+ export const openaiFilesOptionsSchema = lazySchema(() =>
5
+ zodSchema(
6
+ z.object({
7
+ /*
8
+ * Required by the OpenAI API, but optional here because
9
+ * the SDK defaults to "assistants" — by far the most common
10
+ * purpose when uploading files in this context.
11
+ */
12
+ purpose: z.string().optional(),
13
+ expiresAfter: z.number().optional(),
14
+ }),
15
+ ),
16
+ );
17
+
18
+ export type OpenAIFilesOptions = InferSchema<typeof openaiFilesOptionsSchema>;
@@ -0,0 +1,102 @@
1
+ import {
2
+ FilesV4,
3
+ FilesV4UploadFileCallOptions,
4
+ FilesV4UploadFileResult,
5
+ } from '@ai-sdk/provider';
6
+ import {
7
+ combineHeaders,
8
+ convertBase64ToUint8Array,
9
+ createJsonResponseHandler,
10
+ FetchFunction,
11
+ parseProviderOptions,
12
+ postFormDataToApi,
13
+ } from '@ai-sdk/provider-utils';
14
+ import { openaiFailedResponseHandler } from '../openai-error';
15
+ import { openaiFilesResponseSchema } from './openai-files-api';
16
+ import {
17
+ openaiFilesOptionsSchema,
18
+ OpenAIFilesOptions,
19
+ } from './openai-files-options';
20
+
21
+ interface OpenAIFilesConfig {
22
+ provider: string;
23
+ baseURL: string;
24
+ headers: () => Record<string, string | undefined>;
25
+ fetch?: FetchFunction;
26
+ }
27
+
28
+ export class OpenAIFiles implements FilesV4 {
29
+ readonly specificationVersion = 'v4';
30
+
31
+ get provider(): string {
32
+ return this.config.provider;
33
+ }
34
+
35
+ constructor(private readonly config: OpenAIFilesConfig) {}
36
+
37
+ async uploadFile({
38
+ data,
39
+ mediaType,
40
+ filename,
41
+ providerOptions,
42
+ }: FilesV4UploadFileCallOptions): Promise<FilesV4UploadFileResult> {
43
+ const openaiOptions = (await parseProviderOptions({
44
+ provider: 'openai',
45
+ providerOptions,
46
+ schema: openaiFilesOptionsSchema,
47
+ })) as OpenAIFilesOptions | undefined;
48
+
49
+ const fileBytes =
50
+ data instanceof Uint8Array ? data : convertBase64ToUint8Array(data);
51
+
52
+ const blob = new Blob([fileBytes], {
53
+ type: mediaType,
54
+ });
55
+
56
+ const formData = new FormData();
57
+ if (filename != null) {
58
+ formData.append('file', blob, filename);
59
+ } else {
60
+ formData.append('file', blob);
61
+ }
62
+ formData.append('purpose', openaiOptions?.purpose ?? 'assistants');
63
+
64
+ if (openaiOptions?.expiresAfter != null) {
65
+ formData.append('expires_after', String(openaiOptions.expiresAfter));
66
+ }
67
+
68
+ const { value: response } = await postFormDataToApi({
69
+ url: `${this.config.baseURL}/files`,
70
+ headers: combineHeaders(this.config.headers()),
71
+ formData,
72
+ failedResponseHandler: openaiFailedResponseHandler,
73
+ successfulResponseHandler: createJsonResponseHandler(
74
+ openaiFilesResponseSchema,
75
+ ),
76
+ fetch: this.config.fetch,
77
+ });
78
+
79
+ return {
80
+ warnings: [],
81
+ providerReference: { openai: response.id },
82
+ ...((response.filename ?? filename)
83
+ ? { filename: response.filename ?? filename }
84
+ : {}),
85
+ ...(mediaType != null ? { mediaType } : {}),
86
+ providerMetadata: {
87
+ openai: {
88
+ ...(response.filename != null ? { filename: response.filename } : {}),
89
+ ...(response.purpose != null ? { purpose: response.purpose } : {}),
90
+ ...(response.bytes != null ? { bytes: response.bytes } : {}),
91
+ ...(response.created_at != null
92
+ ? { createdAt: response.created_at }
93
+ : {}),
94
+ ...(response.status != null ? { status: response.status } : {}),
95
+ ...(response.expires_at != null
96
+ ? { expiresAt: response.expires_at }
97
+ : {}),
98
+ },
99
+ },
100
+ };
101
+ }
102
+ }
@@ -1,7 +1,7 @@
1
1
  import {
2
- ImageModelV3,
3
- ImageModelV3File,
4
- SharedV3Warning,
2
+ ImageModelV4,
3
+ ImageModelV4File,
4
+ SharedV4Warning,
5
5
  } from '@ai-sdk/provider';
6
6
  import {
7
7
  combineHeaders,
@@ -27,8 +27,8 @@ interface OpenAIImageModelConfig extends OpenAIConfig {
27
27
  };
28
28
  }
29
29
 
30
- export class OpenAIImageModel implements ImageModelV3 {
31
- readonly specificationVersion = 'v3';
30
+ export class OpenAIImageModel implements ImageModelV4 {
31
+ readonly specificationVersion = 'v4';
32
32
 
33
33
  get maxImagesPerCall(): number {
34
34
  return modelMaxImagesPerCall[this.modelId] ?? 1;
@@ -54,10 +54,10 @@ export class OpenAIImageModel implements ImageModelV3 {
54
54
  providerOptions,
55
55
  headers,
56
56
  abortSignal,
57
- }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<
58
- Awaited<ReturnType<ImageModelV3['doGenerate']>>
57
+ }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<
58
+ Awaited<ReturnType<ImageModelV4['doGenerate']>>
59
59
  > {
60
- const warnings: Array<SharedV3Warning> = [];
60
+ const warnings: Array<SharedV4Warning> = [];
61
61
 
62
62
  if (aspectRatio != null) {
63
63
  warnings.push({
@@ -332,7 +332,7 @@ type OpenAIImageEditInput = {
332
332
  };
333
333
 
334
334
  async function fileToBlob(
335
- file: ImageModelV3File | undefined,
335
+ file: ImageModelV4File | undefined,
336
336
  ): Promise<Blob | undefined> {
337
337
  if (!file) return undefined;
338
338
 
package/src/index.ts CHANGED
@@ -14,7 +14,9 @@ export type { OpenAILanguageModelCompletionOptions } from './completion/openai-c
14
14
  export type { OpenAIEmbeddingModelOptions } from './embedding/openai-embedding-options';
15
15
  export type { OpenAISpeechModelOptions } from './speech/openai-speech-options';
16
16
  export type { OpenAITranscriptionModelOptions } from './transcription/openai-transcription-options';
17
+ export type { OpenAIFilesOptions } from './files/openai-files-options';
17
18
  export type {
19
+ OpenaiResponsesCompactionProviderMetadata,
18
20
  OpenaiResponsesProviderMetadata,
19
21
  OpenaiResponsesReasoningProviderMetadata,
20
22
  OpenaiResponsesTextProviderMetadata,
@@ -7,12 +7,12 @@ export type OpenAIConfig = {
7
7
  fetch?: FetchFunction;
8
8
  generateId?: () => string;
9
9
  /**
10
- * File ID prefixes used to identify file IDs in Responses API.
11
- * When undefined, all file data is treated as base64 content.
10
+ * This is soft-deprecated. Use provider references (e.g. `{ openai: 'file-abc123' }`)
11
+ * in file part data instead. File ID prefixes used to identify file IDs
12
+ * in Responses API. When undefined, all string file data is treated as
13
+ * base64 content.
12
14
  *
13
- * Examples:
14
- * - OpenAI: ['file-'] for IDs like 'file-abc123'
15
- * - Azure OpenAI: ['assistant-'] for IDs like 'assistant-abc123'
15
+ * TODO: remove in v8
16
16
  */
17
17
  fileIdPrefixes?: readonly string[];
18
18
  };
@@ -20,10 +20,10 @@ export function getOpenAILanguageModelCapabilities(
20
20
 
21
21
  const supportsPriorityProcessing =
22
22
  modelId.startsWith('gpt-4') ||
23
- modelId.startsWith('gpt-5-mini') ||
24
23
  (modelId.startsWith('gpt-5') &&
25
24
  !modelId.startsWith('gpt-5-nano') &&
26
- !modelId.startsWith('gpt-5-chat')) ||
25
+ !modelId.startsWith('gpt-5-chat') &&
26
+ !modelId.startsWith('gpt-5.4-nano')) ||
27
27
  modelId.startsWith('o3') ||
28
28
  modelId.startsWith('o4-mini');
29
29
 
@@ -40,6 +40,7 @@ export function getOpenAILanguageModelCapabilities(
40
40
  const supportsNonReasoningParameters =
41
41
  modelId.startsWith('gpt-5.1') ||
42
42
  modelId.startsWith('gpt-5.2') ||
43
+ modelId.startsWith('gpt-5.3') ||
43
44
  modelId.startsWith('gpt-5.4');
44
45
 
45
46
  const systemMessageMode = isReasoningModel ? 'developer' : 'system';
@@ -1,10 +1,11 @@
1
1
  import {
2
- EmbeddingModelV3,
3
- ImageModelV3,
4
- LanguageModelV3,
5
- ProviderV3,
6
- SpeechModelV3,
7
- TranscriptionModelV3,
2
+ EmbeddingModelV4,
3
+ FilesV4,
4
+ ImageModelV4,
5
+ LanguageModelV4,
6
+ ProviderV4,
7
+ SpeechModelV4,
8
+ TranscriptionModelV4,
8
9
  } from '@ai-sdk/provider';
9
10
  import {
10
11
  FetchFunction,
@@ -18,6 +19,7 @@ import { OpenAIChatModelId } from './chat/openai-chat-options';
18
19
  import { OpenAICompletionLanguageModel } from './completion/openai-completion-language-model';
19
20
  import { OpenAICompletionModelId } from './completion/openai-completion-options';
20
21
  import { OpenAIEmbeddingModel } from './embedding/openai-embedding-model';
22
+ import { OpenAIFiles } from './files/openai-files';
21
23
  import { OpenAIEmbeddingModelId } from './embedding/openai-embedding-options';
22
24
  import { OpenAIImageModel } from './image/openai-image-model';
23
25
  import { OpenAIImageModelId } from './image/openai-image-options';
@@ -30,68 +32,73 @@ import { OpenAITranscriptionModel } from './transcription/openai-transcription-m
30
32
  import { OpenAITranscriptionModelId } from './transcription/openai-transcription-options';
31
33
  import { VERSION } from './version';
32
34
 
33
- export interface OpenAIProvider extends ProviderV3 {
34
- (modelId: OpenAIResponsesModelId): LanguageModelV3;
35
+ export interface OpenAIProvider extends ProviderV4 {
36
+ (modelId: OpenAIResponsesModelId): LanguageModelV4;
35
37
 
36
38
  /**
37
39
  * Creates an OpenAI model for text generation.
38
40
  */
39
- languageModel(modelId: OpenAIResponsesModelId): LanguageModelV3;
41
+ languageModel(modelId: OpenAIResponsesModelId): LanguageModelV4;
40
42
 
41
43
  /**
42
44
  * Creates an OpenAI chat model for text generation.
43
45
  */
44
- chat(modelId: OpenAIChatModelId): LanguageModelV3;
46
+ chat(modelId: OpenAIChatModelId): LanguageModelV4;
45
47
 
46
48
  /**
47
49
  * Creates an OpenAI responses API model for text generation.
48
50
  */
49
- responses(modelId: OpenAIResponsesModelId): LanguageModelV3;
51
+ responses(modelId: OpenAIResponsesModelId): LanguageModelV4;
50
52
 
51
53
  /**
52
54
  * Creates an OpenAI completion model for text generation.
53
55
  */
54
- completion(modelId: OpenAICompletionModelId): LanguageModelV3;
56
+ completion(modelId: OpenAICompletionModelId): LanguageModelV4;
55
57
 
56
58
  /**
57
59
  * Creates a model for text embeddings.
58
60
  */
59
- embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
61
+ embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
60
62
 
61
63
  /**
62
64
  * Creates a model for text embeddings.
63
65
  */
64
- embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
66
+ embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
65
67
 
66
68
  /**
67
69
  * @deprecated Use `embedding` instead.
68
70
  */
69
- textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
71
+ textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
70
72
 
71
73
  /**
72
74
  * @deprecated Use `embeddingModel` instead.
73
75
  */
74
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
76
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
75
77
 
76
78
  /**
77
79
  * Creates a model for image generation.
78
80
  */
79
- image(modelId: OpenAIImageModelId): ImageModelV3;
81
+ image(modelId: OpenAIImageModelId): ImageModelV4;
80
82
 
81
83
  /**
82
84
  * Creates a model for image generation.
83
85
  */
84
- imageModel(modelId: OpenAIImageModelId): ImageModelV3;
86
+ imageModel(modelId: OpenAIImageModelId): ImageModelV4;
85
87
 
86
88
  /**
87
89
  * Creates a model for transcription.
88
90
  */
89
- transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV3;
91
+ transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV4;
90
92
 
91
93
  /**
92
94
  * Creates a model for speech generation.
93
95
  */
94
- speech(modelId: OpenAISpeechModelId): SpeechModelV3;
96
+ speech(modelId: OpenAISpeechModelId): SpeechModelV4;
97
+
98
+ /**
99
+ * Returns a FilesV4 interface for uploading files to OpenAI.
100
+ */
101
+ files(): FilesV4;
95
102
 
96
103
  /**
97
104
  * OpenAI-specific tools.
@@ -216,6 +223,14 @@ export function createOpenAI(
216
223
  fetch: options.fetch,
217
224
  });
218
225
 
226
+ const createFiles = () =>
227
+ new OpenAIFiles({
228
+ provider: `${providerName}.files`,
229
+ baseURL,
230
+ headers: getHeaders,
231
+ fetch: options.fetch,
232
+ });
233
+
219
234
  const createLanguageModel = (modelId: OpenAIResponsesModelId) => {
220
235
  if (new.target) {
221
236
  throw new Error(
@@ -232,6 +247,7 @@ export function createOpenAI(
232
247
  url: ({ path }) => `${baseURL}${path}`,
233
248
  headers: getHeaders,
234
249
  fetch: options.fetch,
250
+ // Soft-deprecated. TODO: remove in v8
235
251
  fileIdPrefixes: ['file-'],
236
252
  });
237
253
  };
@@ -240,7 +256,7 @@ export function createOpenAI(
240
256
  return createLanguageModel(modelId);
241
257
  };
242
258
 
243
- provider.specificationVersion = 'v3' as const;
259
+ provider.specificationVersion = 'v4' as const;
244
260
  provider.languageModel = createLanguageModel;
245
261
  provider.chat = createChatModel;
246
262
  provider.completion = createCompletionModel;
@@ -259,6 +275,8 @@ export function createOpenAI(
259
275
  provider.speech = createSpeechModel;
260
276
  provider.speechModel = createSpeechModel;
261
277
 
278
+ provider.files = createFiles;
279
+
262
280
  provider.tools = openaiTools;
263
281
 
264
282
  return provider as OpenAIProvider;
@@ -5,6 +5,7 @@ import { fileSearch } from './tool/file-search';
5
5
  import { imageGeneration } from './tool/image-generation';
6
6
  import { localShell } from './tool/local-shell';
7
7
  import { shell } from './tool/shell';
8
+ import { toolSearch } from './tool/tool-search';
8
9
  import { webSearch } from './tool/web-search';
9
10
  import { webSearchPreview } from './tool/web-search-preview';
10
11
  import { mcp } from './tool/mcp';
@@ -24,7 +25,6 @@ export const openaiTools = {
24
25
  * Lark syntax). The model returns a `custom_tool_call` output item whose
25
26
  * `input` field is a string matching the specified grammar.
26
27
  *
27
- * @param name - The name of the custom tool.
28
28
  * @param description - An optional description of the tool.
29
29
  * @param format - The output format constraint (grammar type, syntax, and definition).
30
30
  */
@@ -123,4 +123,15 @@ export const openaiTools = {
123
123
  * @param serverUrl - URL for the MCP server.
124
124
  */
125
125
  mcp,
126
+
127
+ /**
128
+ * Tool search allows the model to dynamically search for and load deferred
129
+ * tools into the model's context as needed. This helps reduce overall token
130
+ * usage, cost, and latency by only loading tools when the model needs them.
131
+ *
132
+ * To use tool search, mark functions or namespaces with `defer_loading: true`
133
+ * in the tools array. The model will use tool search to load these tools
134
+ * when it determines they are needed.
135
+ */
136
+ toolSearch,
126
137
  };
@@ -1,4 +1,4 @@
1
- import { LanguageModelV3Usage } from '@ai-sdk/provider';
1
+ import { LanguageModelV4Usage } from '@ai-sdk/provider';
2
2
 
3
3
  export type OpenAIResponsesUsage = {
4
4
  input_tokens: number;
@@ -13,7 +13,7 @@ export type OpenAIResponsesUsage = {
13
13
 
14
14
  export function convertOpenAIResponsesUsage(
15
15
  usage: OpenAIResponsesUsage | undefined | null,
16
- ): LanguageModelV3Usage {
16
+ ): LanguageModelV4Usage {
17
17
  if (usage == null) {
18
18
  return {
19
19
  inputTokens: {