@ai-sdk/openai 0.0.0-013d7476-20250808163325 → 0.0.0-2f1ae29d-20260122140908

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. package/CHANGELOG.md +1324 -228
  2. package/dist/index.d.mts +832 -85
  3. package/dist/index.d.ts +832 -85
  4. package/dist/index.js +3940 -1418
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +4039 -1469
  7. package/dist/index.mjs.map +1 -1
  8. package/dist/internal/index.d.mts +954 -116
  9. package/dist/internal/index.d.ts +954 -116
  10. package/dist/internal/index.js +3798 -1342
  11. package/dist/internal/index.js.map +1 -1
  12. package/dist/internal/index.mjs +3728 -1245
  13. package/dist/internal/index.mjs.map +1 -1
  14. package/docs/03-openai.mdx +2018 -0
  15. package/package.json +13 -6
  16. package/src/chat/__fixtures__/azure-model-router.1.chunks.txt +8 -0
  17. package/src/chat/__snapshots__/openai-chat-language-model.test.ts.snap +88 -0
  18. package/src/chat/convert-openai-chat-usage.ts +57 -0
  19. package/src/chat/convert-to-openai-chat-messages.test.ts +516 -0
  20. package/src/chat/convert-to-openai-chat-messages.ts +225 -0
  21. package/src/chat/get-response-metadata.ts +15 -0
  22. package/src/chat/map-openai-finish-reason.ts +19 -0
  23. package/src/chat/openai-chat-api.ts +198 -0
  24. package/src/chat/openai-chat-language-model.test.ts +3496 -0
  25. package/src/chat/openai-chat-language-model.ts +700 -0
  26. package/src/chat/openai-chat-options.ts +186 -0
  27. package/src/chat/openai-chat-prepare-tools.test.ts +322 -0
  28. package/src/chat/openai-chat-prepare-tools.ts +84 -0
  29. package/src/chat/openai-chat-prompt.ts +70 -0
  30. package/src/completion/convert-openai-completion-usage.ts +46 -0
  31. package/src/completion/convert-to-openai-completion-prompt.ts +93 -0
  32. package/src/completion/get-response-metadata.ts +15 -0
  33. package/src/completion/map-openai-finish-reason.ts +19 -0
  34. package/src/completion/openai-completion-api.ts +81 -0
  35. package/src/completion/openai-completion-language-model.test.ts +752 -0
  36. package/src/completion/openai-completion-language-model.ts +336 -0
  37. package/src/completion/openai-completion-options.ts +58 -0
  38. package/src/embedding/__snapshots__/openai-embedding-model.test.ts.snap +43 -0
  39. package/src/embedding/openai-embedding-api.ts +13 -0
  40. package/src/embedding/openai-embedding-model.test.ts +146 -0
  41. package/src/embedding/openai-embedding-model.ts +95 -0
  42. package/src/embedding/openai-embedding-options.ts +30 -0
  43. package/src/image/openai-image-api.ts +35 -0
  44. package/src/image/openai-image-model.test.ts +722 -0
  45. package/src/image/openai-image-model.ts +305 -0
  46. package/src/image/openai-image-options.ts +28 -0
  47. package/src/index.ts +9 -0
  48. package/src/internal/index.ts +19 -0
  49. package/src/openai-config.ts +18 -0
  50. package/src/openai-error.test.ts +34 -0
  51. package/src/openai-error.ts +22 -0
  52. package/src/openai-language-model-capabilities.test.ts +93 -0
  53. package/src/openai-language-model-capabilities.ts +54 -0
  54. package/src/openai-provider.test.ts +98 -0
  55. package/src/openai-provider.ts +270 -0
  56. package/src/openai-tools.ts +114 -0
  57. package/src/responses/__fixtures__/openai-apply-patch-tool-delete.1.chunks.txt +5 -0
  58. package/src/responses/__fixtures__/openai-apply-patch-tool.1.chunks.txt +38 -0
  59. package/src/responses/__fixtures__/openai-apply-patch-tool.1.json +69 -0
  60. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.chunks.txt +393 -0
  61. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.json +137 -0
  62. package/src/responses/__fixtures__/openai-error.1.chunks.txt +4 -0
  63. package/src/responses/__fixtures__/openai-error.1.json +8 -0
  64. package/src/responses/__fixtures__/openai-file-search-tool.1.chunks.txt +94 -0
  65. package/src/responses/__fixtures__/openai-file-search-tool.1.json +89 -0
  66. package/src/responses/__fixtures__/openai-file-search-tool.2.chunks.txt +93 -0
  67. package/src/responses/__fixtures__/openai-file-search-tool.2.json +112 -0
  68. package/src/responses/__fixtures__/openai-image-generation-tool.1.chunks.txt +16 -0
  69. package/src/responses/__fixtures__/openai-image-generation-tool.1.json +96 -0
  70. package/src/responses/__fixtures__/openai-local-shell-tool.1.chunks.txt +7 -0
  71. package/src/responses/__fixtures__/openai-local-shell-tool.1.json +70 -0
  72. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.chunks.txt +11 -0
  73. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.json +169 -0
  74. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.chunks.txt +123 -0
  75. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.json +176 -0
  76. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.chunks.txt +11 -0
  77. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.json +169 -0
  78. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.chunks.txt +84 -0
  79. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.json +182 -0
  80. package/src/responses/__fixtures__/openai-mcp-tool.1.chunks.txt +373 -0
  81. package/src/responses/__fixtures__/openai-mcp-tool.1.json +159 -0
  82. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.chunks.txt +110 -0
  83. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.json +117 -0
  84. package/src/responses/__fixtures__/openai-shell-tool.1.chunks.txt +182 -0
  85. package/src/responses/__fixtures__/openai-shell-tool.1.json +73 -0
  86. package/src/responses/__fixtures__/openai-web-search-tool.1.chunks.txt +185 -0
  87. package/src/responses/__fixtures__/openai-web-search-tool.1.json +266 -0
  88. package/src/responses/__snapshots__/openai-responses-language-model.test.ts.snap +10955 -0
  89. package/src/responses/convert-openai-responses-usage.ts +53 -0
  90. package/src/responses/convert-to-openai-responses-input.test.ts +2976 -0
  91. package/src/responses/convert-to-openai-responses-input.ts +578 -0
  92. package/src/responses/map-openai-responses-finish-reason.ts +22 -0
  93. package/src/responses/openai-responses-api.test.ts +89 -0
  94. package/src/responses/openai-responses-api.ts +1086 -0
  95. package/src/responses/openai-responses-language-model.test.ts +6927 -0
  96. package/src/responses/openai-responses-language-model.ts +1932 -0
  97. package/src/responses/openai-responses-options.ts +312 -0
  98. package/src/responses/openai-responses-prepare-tools.test.ts +924 -0
  99. package/src/responses/openai-responses-prepare-tools.ts +264 -0
  100. package/src/responses/openai-responses-provider-metadata.ts +39 -0
  101. package/src/speech/openai-speech-api.ts +38 -0
  102. package/src/speech/openai-speech-model.test.ts +202 -0
  103. package/src/speech/openai-speech-model.ts +137 -0
  104. package/src/speech/openai-speech-options.ts +22 -0
  105. package/src/tool/apply-patch.ts +141 -0
  106. package/src/tool/code-interpreter.ts +104 -0
  107. package/src/tool/file-search.ts +145 -0
  108. package/src/tool/image-generation.ts +126 -0
  109. package/src/tool/local-shell.test-d.ts +20 -0
  110. package/src/tool/local-shell.ts +72 -0
  111. package/src/tool/mcp.ts +125 -0
  112. package/src/tool/shell.ts +85 -0
  113. package/src/tool/web-search-preview.ts +139 -0
  114. package/src/tool/web-search.test-d.ts +13 -0
  115. package/src/tool/web-search.ts +179 -0
  116. package/src/transcription/openai-transcription-api.ts +37 -0
  117. package/src/transcription/openai-transcription-model.test.ts +507 -0
  118. package/src/transcription/openai-transcription-model.ts +232 -0
  119. package/src/transcription/openai-transcription-options.ts +50 -0
  120. package/src/transcription/transcription-test.mp3 +0 -0
  121. package/src/version.ts +6 -0
@@ -1,31 +1,28 @@
1
- import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
2
- import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z } from 'zod/v4';
4
-
5
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
6
- declare const openaiProviderOptions: z.ZodObject<{
7
- logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
8
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
9
- parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
10
- user: z.ZodOptional<z.ZodString>;
11
- reasoningEffort: z.ZodOptional<z.ZodEnum<{
12
- low: "low";
13
- medium: "medium";
14
- high: "high";
15
- }>>;
16
- maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
17
- store: z.ZodOptional<z.ZodBoolean>;
18
- metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
19
- prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
20
- structuredOutputs: z.ZodOptional<z.ZodBoolean>;
21
- serviceTier: z.ZodOptional<z.ZodEnum<{
22
- auto: "auto";
23
- flex: "flex";
24
- priority: "priority";
25
- }>>;
26
- strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
27
- }, z.core.$strip>;
28
- type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
1
+ import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3CallOptions, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
2
+ import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
+ import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
4
+
5
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | (string & {});
6
+ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
7
+ logitBias?: Record<number, number> | undefined;
8
+ logprobs?: number | boolean | undefined;
9
+ parallelToolCalls?: boolean | undefined;
10
+ user?: string | undefined;
11
+ reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh" | undefined;
12
+ maxCompletionTokens?: number | undefined;
13
+ store?: boolean | undefined;
14
+ metadata?: Record<string, string> | undefined;
15
+ prediction?: Record<string, any> | undefined;
16
+ serviceTier?: "default" | "auto" | "flex" | "priority" | undefined;
17
+ strictJsonSchema?: boolean | undefined;
18
+ textVerbosity?: "low" | "medium" | "high" | undefined;
19
+ promptCacheKey?: string | undefined;
20
+ promptCacheRetention?: "in_memory" | "24h" | undefined;
21
+ safetyIdentifier?: string | undefined;
22
+ systemMessageMode?: "remove" | "system" | "developer" | undefined;
23
+ forceReasoning?: boolean | undefined;
24
+ }>;
25
+ type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
29
26
 
30
27
  type OpenAIChatConfig = {
31
28
  provider: string;
@@ -36,8 +33,8 @@ type OpenAIChatConfig = {
36
33
  }) => string;
37
34
  fetch?: FetchFunction;
38
35
  };
39
- declare class OpenAIChatLanguageModel implements LanguageModelV2 {
40
- readonly specificationVersion = "v2";
36
+ declare class OpenAIChatLanguageModel implements LanguageModelV3 {
37
+ readonly specificationVersion = "v3";
41
38
  readonly modelId: OpenAIChatModelId;
42
39
  readonly supportedUrls: {
43
40
  'image/*': RegExp[];
@@ -46,19 +43,19 @@ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
46
43
  constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
47
44
  get provider(): string;
48
45
  private getArgs;
49
- doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
50
- doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
46
+ doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
47
+ doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
51
48
  }
52
49
 
53
50
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
54
- declare const openaiCompletionProviderOptions: z.ZodObject<{
55
- echo: z.ZodOptional<z.ZodBoolean>;
56
- logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
57
- suffix: z.ZodOptional<z.ZodString>;
58
- user: z.ZodOptional<z.ZodString>;
59
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
60
- }, z.core.$strip>;
61
- type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
51
+ declare const openaiCompletionProviderOptions: _ai_sdk_provider_utils.LazySchema<{
52
+ echo?: boolean | undefined;
53
+ logitBias?: Record<string, number> | undefined;
54
+ suffix?: string | undefined;
55
+ user?: string | undefined;
56
+ logprobs?: number | boolean | undefined;
57
+ }>;
58
+ type OpenAICompletionProviderOptions = InferSchema<typeof openaiCompletionProviderOptions>;
62
59
 
63
60
  type OpenAICompletionConfig = {
64
61
  provider: string;
@@ -69,8 +66,8 @@ type OpenAICompletionConfig = {
69
66
  }) => string;
70
67
  fetch?: FetchFunction;
71
68
  };
72
- declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
73
- readonly specificationVersion = "v2";
69
+ declare class OpenAICompletionLanguageModel implements LanguageModelV3 {
70
+ readonly specificationVersion = "v3";
74
71
  readonly modelId: OpenAICompletionModelId;
75
72
  private readonly config;
76
73
  private get providerOptionsName();
@@ -78,8 +75,8 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
78
75
  get provider(): string;
79
76
  readonly supportedUrls: Record<string, RegExp[]>;
80
77
  private getArgs;
81
- doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
82
- doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
78
+ doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
79
+ doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
83
80
  }
84
81
 
85
82
  type OpenAIConfig = {
@@ -91,59 +88,65 @@ type OpenAIConfig = {
91
88
  headers: () => Record<string, string | undefined>;
92
89
  fetch?: FetchFunction;
93
90
  generateId?: () => string;
91
+ /**
92
+ * File ID prefixes used to identify file IDs in Responses API.
93
+ * When undefined, all file data is treated as base64 content.
94
+ *
95
+ * Examples:
96
+ * - OpenAI: ['file-'] for IDs like 'file-abc123'
97
+ * - Azure OpenAI: ['assistant-'] for IDs like 'assistant-abc123'
98
+ */
99
+ fileIdPrefixes?: readonly string[];
94
100
  };
95
101
 
96
102
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
97
- declare const openaiEmbeddingProviderOptions: z.ZodObject<{
98
- dimensions: z.ZodOptional<z.ZodNumber>;
99
- user: z.ZodOptional<z.ZodString>;
100
- }, z.core.$strip>;
101
- type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOptions>;
102
-
103
- declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
104
- readonly specificationVersion = "v2";
103
+ declare const openaiEmbeddingProviderOptions: _ai_sdk_provider_utils.LazySchema<{
104
+ dimensions?: number | undefined;
105
+ user?: string | undefined;
106
+ }>;
107
+ type OpenAIEmbeddingProviderOptions = InferSchema<typeof openaiEmbeddingProviderOptions>;
108
+
109
+ declare class OpenAIEmbeddingModel implements EmbeddingModelV3 {
110
+ readonly specificationVersion = "v3";
105
111
  readonly modelId: OpenAIEmbeddingModelId;
106
112
  readonly maxEmbeddingsPerCall = 2048;
107
113
  readonly supportsParallelCalls = true;
108
114
  private readonly config;
109
115
  get provider(): string;
110
116
  constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
111
- doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
117
+ doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>>;
112
118
  }
113
119
 
114
- type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
120
+ type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | (string & {});
115
121
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
116
- declare const hasDefaultResponseFormat: Set<string>;
122
+ declare function hasDefaultResponseFormat(modelId: string): boolean;
117
123
 
118
124
  interface OpenAIImageModelConfig extends OpenAIConfig {
119
125
  _internal?: {
120
126
  currentDate?: () => Date;
121
127
  };
122
128
  }
123
- declare class OpenAIImageModel implements ImageModelV2 {
129
+ declare class OpenAIImageModel implements ImageModelV3 {
124
130
  readonly modelId: OpenAIImageModelId;
125
131
  private readonly config;
126
- readonly specificationVersion = "v2";
132
+ readonly specificationVersion = "v3";
127
133
  get maxImagesPerCall(): number;
128
134
  get provider(): string;
129
135
  constructor(modelId: OpenAIImageModelId, config: OpenAIImageModelConfig);
130
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV2['doGenerate']>>>;
136
+ doGenerate({ prompt, files, mask, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
131
137
  }
132
138
 
133
139
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
134
- declare const openAITranscriptionProviderOptions: z.ZodObject<{
135
- include: z.ZodOptional<z.ZodArray<z.ZodString>>;
136
- language: z.ZodOptional<z.ZodString>;
137
- prompt: z.ZodOptional<z.ZodString>;
138
- temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
139
- timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<{
140
- word: "word";
141
- segment: "segment";
142
- }>>>>;
143
- }, z.core.$strip>;
144
- type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
145
-
146
- type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV2CallOptions, 'providerOptions'> & {
140
+ declare const openAITranscriptionProviderOptions: _ai_sdk_provider_utils.LazySchema<{
141
+ include?: string[] | undefined;
142
+ language?: string | undefined;
143
+ prompt?: string | undefined;
144
+ temperature?: number | undefined;
145
+ timestampGranularities?: ("word" | "segment")[] | undefined;
146
+ }>;
147
+ type OpenAITranscriptionProviderOptions = InferSchema<typeof openAITranscriptionProviderOptions>;
148
+
149
+ type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV3CallOptions, 'providerOptions'> & {
147
150
  providerOptions?: {
148
151
  openai?: OpenAITranscriptionProviderOptions;
149
152
  };
@@ -153,77 +156,912 @@ interface OpenAITranscriptionModelConfig extends OpenAIConfig {
153
156
  currentDate?: () => Date;
154
157
  };
155
158
  }
156
- declare class OpenAITranscriptionModel implements TranscriptionModelV2 {
159
+ declare class OpenAITranscriptionModel implements TranscriptionModelV3 {
157
160
  readonly modelId: OpenAITranscriptionModelId;
158
161
  private readonly config;
159
- readonly specificationVersion = "v2";
162
+ readonly specificationVersion = "v3";
160
163
  get provider(): string;
161
164
  constructor(modelId: OpenAITranscriptionModelId, config: OpenAITranscriptionModelConfig);
162
165
  private getArgs;
163
- doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV2['doGenerate']>>>;
166
+ doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV3['doGenerate']>>>;
164
167
  }
165
168
 
166
169
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
170
+ declare const openaiSpeechProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
171
+ instructions?: string | null | undefined;
172
+ speed?: number | null | undefined;
173
+ }>;
174
+ type OpenAISpeechCallOptions = InferSchema<typeof openaiSpeechProviderOptionsSchema>;
167
175
 
168
- declare const OpenAIProviderOptionsSchema: z.ZodObject<{
169
- instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
170
- speed: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
171
- }, z.core.$strip>;
172
- type OpenAISpeechCallOptions = z.infer<typeof OpenAIProviderOptionsSchema>;
173
176
  interface OpenAISpeechModelConfig extends OpenAIConfig {
174
177
  _internal?: {
175
178
  currentDate?: () => Date;
176
179
  };
177
180
  }
178
- declare class OpenAISpeechModel implements SpeechModelV2 {
181
+ declare class OpenAISpeechModel implements SpeechModelV3 {
179
182
  readonly modelId: OpenAISpeechModelId;
180
183
  private readonly config;
181
- readonly specificationVersion = "v2";
184
+ readonly specificationVersion = "v3";
182
185
  get provider(): string;
183
186
  constructor(modelId: OpenAISpeechModelId, config: OpenAISpeechModelConfig);
184
187
  private getArgs;
185
- doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>;
188
+ doGenerate(options: Parameters<SpeechModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV3['doGenerate']>>>;
186
189
  }
187
190
 
188
- declare const openaiResponsesModelIds: readonly ["gpt-4.1", "gpt-4.1-2025-04-14", "gpt-4.1-mini", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano", "gpt-4.1-nano-2025-04-14", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01", "gpt-4o-audio-preview-2024-12-17", "gpt-4o-search-preview", "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview", "gpt-4o-mini-search-preview-2025-03-11", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-0613", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "chatgpt-4o-latest", "o1", "o1-2024-12-17", "o3-mini", "o3-mini-2025-01-31", "o3", "o3-2025-04-16", "o4-mini", "o4-mini-2025-04-16", "codex-mini-latest", "computer-use-preview", "gpt-5", "gpt-5-2025-08-07", "gpt-5-mini", "gpt-5-mini-2025-08-07", "gpt-5-nano", "gpt-5-nano-2025-08-07", "gpt-5-chat-latest"];
189
- type OpenAIResponsesModelId = (typeof openaiResponsesModelIds)[number] | (string & {});
191
+ type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
190
192
 
191
- declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
192
- readonly specificationVersion = "v2";
193
+ declare class OpenAIResponsesLanguageModel implements LanguageModelV3 {
194
+ readonly specificationVersion = "v3";
193
195
  readonly modelId: OpenAIResponsesModelId;
194
196
  private readonly config;
195
197
  constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
196
198
  readonly supportedUrls: Record<string, RegExp[]>;
197
199
  get provider(): string;
198
200
  private getArgs;
199
- doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
200
- doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
201
+ doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
202
+ doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
201
203
  }
202
- declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
203
- metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
204
- parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
205
- previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
206
- store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
207
- user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
208
- reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
209
- strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
210
- instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
211
- reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
212
- serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
213
- auto: "auto";
214
- flex: "flex";
215
- priority: "priority";
216
- }>>>;
217
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
218
- "reasoning.encrypted_content": "reasoning.encrypted_content";
219
- "file_search_call.results": "file_search_call.results";
220
- }>>>>;
221
- textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
222
- low: "low";
223
- medium: "medium";
224
- high: "high";
225
- }>>>;
226
- }, z.core.$strip>;
227
- type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
228
-
229
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
204
+
205
+ /**
206
+ * A filter used to compare a specified attribute key to a given value using a defined comparison operation.
207
+ */
208
+ type OpenAIResponsesFileSearchToolComparisonFilter = {
209
+ /**
210
+ * The key to compare against the value.
211
+ */
212
+ key: string;
213
+ /**
214
+ * Specifies the comparison operator: eq, ne, gt, gte, lt, lte, in, nin.
215
+ */
216
+ type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte' | 'in' | 'nin';
217
+ /**
218
+ * The value to compare against the attribute key; supports string, number, boolean, or array of string types.
219
+ */
220
+ value: string | number | boolean | string[];
221
+ };
222
+ /**
223
+ * Combine multiple filters using and or or.
224
+ */
225
+ type OpenAIResponsesFileSearchToolCompoundFilter = {
226
+ /**
227
+ * Type of operation: and or or.
228
+ */
229
+ type: 'and' | 'or';
230
+ /**
231
+ * Array of filters to combine. Items can be ComparisonFilter or CompoundFilter.
232
+ */
233
+ filters: Array<OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter>;
234
+ };
235
+ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
236
+ type: "unknown_chunk";
237
+ message: string;
238
+ } | {
239
+ type: "response.output_text.delta";
240
+ item_id: string;
241
+ delta: string;
242
+ logprobs?: {
243
+ token: string;
244
+ logprob: number;
245
+ top_logprobs: {
246
+ token: string;
247
+ logprob: number;
248
+ }[];
249
+ }[] | null | undefined;
250
+ } | {
251
+ type: "response.completed" | "response.incomplete";
252
+ response: {
253
+ usage: {
254
+ input_tokens: number;
255
+ output_tokens: number;
256
+ input_tokens_details?: {
257
+ cached_tokens?: number | null | undefined;
258
+ } | null | undefined;
259
+ output_tokens_details?: {
260
+ reasoning_tokens?: number | null | undefined;
261
+ } | null | undefined;
262
+ };
263
+ incomplete_details?: {
264
+ reason: string;
265
+ } | null | undefined;
266
+ service_tier?: string | null | undefined;
267
+ };
268
+ } | {
269
+ type: "response.created";
270
+ response: {
271
+ id: string;
272
+ created_at: number;
273
+ model: string;
274
+ service_tier?: string | null | undefined;
275
+ };
276
+ } | {
277
+ type: "response.output_item.added";
278
+ output_index: number;
279
+ item: {
280
+ type: "message";
281
+ id: string;
282
+ } | {
283
+ type: "reasoning";
284
+ id: string;
285
+ encrypted_content?: string | null | undefined;
286
+ } | {
287
+ type: "function_call";
288
+ id: string;
289
+ call_id: string;
290
+ name: string;
291
+ arguments: string;
292
+ } | {
293
+ type: "web_search_call";
294
+ id: string;
295
+ status: string;
296
+ } | {
297
+ type: "computer_call";
298
+ id: string;
299
+ status: string;
300
+ } | {
301
+ type: "file_search_call";
302
+ id: string;
303
+ } | {
304
+ type: "image_generation_call";
305
+ id: string;
306
+ } | {
307
+ type: "code_interpreter_call";
308
+ id: string;
309
+ container_id: string;
310
+ code: string | null;
311
+ outputs: ({
312
+ type: "logs";
313
+ logs: string;
314
+ } | {
315
+ type: "image";
316
+ url: string;
317
+ })[] | null;
318
+ status: string;
319
+ } | {
320
+ type: "mcp_call";
321
+ id: string;
322
+ status: string;
323
+ approval_request_id?: string | null | undefined;
324
+ } | {
325
+ type: "mcp_list_tools";
326
+ id: string;
327
+ } | {
328
+ type: "mcp_approval_request";
329
+ id: string;
330
+ } | {
331
+ type: "apply_patch_call";
332
+ id: string;
333
+ call_id: string;
334
+ status: "completed" | "in_progress";
335
+ operation: {
336
+ type: "create_file";
337
+ path: string;
338
+ diff: string;
339
+ } | {
340
+ type: "delete_file";
341
+ path: string;
342
+ } | {
343
+ type: "update_file";
344
+ path: string;
345
+ diff: string;
346
+ };
347
+ } | {
348
+ type: "shell_call";
349
+ id: string;
350
+ call_id: string;
351
+ status: "completed" | "in_progress" | "incomplete";
352
+ action: {
353
+ commands: string[];
354
+ };
355
+ };
356
+ } | {
357
+ type: "response.output_item.done";
358
+ output_index: number;
359
+ item: {
360
+ type: "message";
361
+ id: string;
362
+ } | {
363
+ type: "reasoning";
364
+ id: string;
365
+ encrypted_content?: string | null | undefined;
366
+ } | {
367
+ type: "function_call";
368
+ id: string;
369
+ call_id: string;
370
+ name: string;
371
+ arguments: string;
372
+ status: "completed";
373
+ } | {
374
+ type: "code_interpreter_call";
375
+ id: string;
376
+ code: string | null;
377
+ container_id: string;
378
+ outputs: ({
379
+ type: "logs";
380
+ logs: string;
381
+ } | {
382
+ type: "image";
383
+ url: string;
384
+ })[] | null;
385
+ } | {
386
+ type: "image_generation_call";
387
+ id: string;
388
+ result: string;
389
+ } | {
390
+ type: "web_search_call";
391
+ id: string;
392
+ status: string;
393
+ action: {
394
+ type: "search";
395
+ query?: string | null | undefined;
396
+ sources?: ({
397
+ type: "url";
398
+ url: string;
399
+ } | {
400
+ type: "api";
401
+ name: string;
402
+ })[] | null | undefined;
403
+ } | {
404
+ type: "open_page";
405
+ url?: string | null | undefined;
406
+ } | {
407
+ type: "find_in_page";
408
+ url?: string | null | undefined;
409
+ pattern?: string | null | undefined;
410
+ };
411
+ } | {
412
+ type: "file_search_call";
413
+ id: string;
414
+ queries: string[];
415
+ results?: {
416
+ attributes: Record<string, string | number | boolean>;
417
+ file_id: string;
418
+ filename: string;
419
+ score: number;
420
+ text: string;
421
+ }[] | null | undefined;
422
+ } | {
423
+ type: "local_shell_call";
424
+ id: string;
425
+ call_id: string;
426
+ action: {
427
+ type: "exec";
428
+ command: string[];
429
+ timeout_ms?: number | undefined;
430
+ user?: string | undefined;
431
+ working_directory?: string | undefined;
432
+ env?: Record<string, string> | undefined;
433
+ };
434
+ } | {
435
+ type: "computer_call";
436
+ id: string;
437
+ status: "completed";
438
+ } | {
439
+ type: "mcp_call";
440
+ id: string;
441
+ status: string;
442
+ arguments: string;
443
+ name: string;
444
+ server_label: string;
445
+ output?: string | null | undefined;
446
+ error?: string | {
447
+ [x: string]: unknown;
448
+ type?: string | undefined;
449
+ code?: string | number | undefined;
450
+ message?: string | undefined;
451
+ } | null | undefined;
452
+ approval_request_id?: string | null | undefined;
453
+ } | {
454
+ type: "mcp_list_tools";
455
+ id: string;
456
+ server_label: string;
457
+ tools: {
458
+ name: string;
459
+ input_schema: any;
460
+ description?: string | undefined;
461
+ annotations?: Record<string, unknown> | undefined;
462
+ }[];
463
+ error?: string | {
464
+ [x: string]: unknown;
465
+ type?: string | undefined;
466
+ code?: string | number | undefined;
467
+ message?: string | undefined;
468
+ } | undefined;
469
+ } | {
470
+ type: "mcp_approval_request";
471
+ id: string;
472
+ server_label: string;
473
+ name: string;
474
+ arguments: string;
475
+ approval_request_id?: string | undefined;
476
+ } | {
477
+ type: "apply_patch_call";
478
+ id: string;
479
+ call_id: string;
480
+ status: "completed" | "in_progress";
481
+ operation: {
482
+ type: "create_file";
483
+ path: string;
484
+ diff: string;
485
+ } | {
486
+ type: "delete_file";
487
+ path: string;
488
+ } | {
489
+ type: "update_file";
490
+ path: string;
491
+ diff: string;
492
+ };
493
+ } | {
494
+ type: "shell_call";
495
+ id: string;
496
+ call_id: string;
497
+ status: "completed" | "in_progress" | "incomplete";
498
+ action: {
499
+ commands: string[];
500
+ };
501
+ };
502
+ } | {
503
+ type: "response.function_call_arguments.delta";
504
+ item_id: string;
505
+ output_index: number;
506
+ delta: string;
507
+ } | {
508
+ type: "response.image_generation_call.partial_image";
509
+ item_id: string;
510
+ output_index: number;
511
+ partial_image_b64: string;
512
+ } | {
513
+ type: "response.code_interpreter_call_code.delta";
514
+ item_id: string;
515
+ output_index: number;
516
+ delta: string;
517
+ } | {
518
+ type: "response.code_interpreter_call_code.done";
519
+ item_id: string;
520
+ output_index: number;
521
+ code: string;
522
+ } | {
523
+ type: "response.output_text.annotation.added";
524
+ annotation: {
525
+ type: "url_citation";
526
+ start_index: number;
527
+ end_index: number;
528
+ url: string;
529
+ title: string;
530
+ } | {
531
+ type: "file_citation";
532
+ file_id: string;
533
+ filename: string;
534
+ index: number;
535
+ } | {
536
+ type: "container_file_citation";
537
+ container_id: string;
538
+ file_id: string;
539
+ filename: string;
540
+ start_index: number;
541
+ end_index: number;
542
+ } | {
543
+ type: "file_path";
544
+ file_id: string;
545
+ index: number;
546
+ };
547
+ } | {
548
+ type: "response.reasoning_summary_part.added";
549
+ item_id: string;
550
+ summary_index: number;
551
+ } | {
552
+ type: "response.reasoning_summary_text.delta";
553
+ item_id: string;
554
+ summary_index: number;
555
+ delta: string;
556
+ } | {
557
+ type: "response.reasoning_summary_part.done";
558
+ item_id: string;
559
+ summary_index: number;
560
+ } | {
561
+ type: "response.apply_patch_call_operation_diff.delta";
562
+ item_id: string;
563
+ output_index: number;
564
+ delta: string;
565
+ obfuscation?: string | null | undefined;
566
+ } | {
567
+ type: "response.apply_patch_call_operation_diff.done";
568
+ item_id: string;
569
+ output_index: number;
570
+ diff: string;
571
+ } | {
572
+ type: "error";
573
+ sequence_number: number;
574
+ error: {
575
+ type: string;
576
+ code: string;
577
+ message: string;
578
+ param?: string | null | undefined;
579
+ };
580
+ }>;
581
+
582
+ type OpenaiResponsesChunk = InferSchema<typeof openaiResponsesChunkSchema>;
583
+ type ResponsesOutputTextAnnotationProviderMetadata = Extract<OpenaiResponsesChunk, {
584
+ type: 'response.output_text.annotation.added';
585
+ }>['annotation'];
586
+ type ResponsesTextProviderMetadata = {
587
+ itemId: string;
588
+ annotations?: Array<ResponsesOutputTextAnnotationProviderMetadata>;
589
+ };
590
+ type OpenaiResponsesTextProviderMetadata = {
591
+ openai: ResponsesTextProviderMetadata;
592
+ };
593
+ type ResponsesSourceDocumentProviderMetadata = {
594
+ type: 'file_citation';
595
+ fileId: string;
596
+ index: number;
597
+ } | {
598
+ type: 'container_file_citation';
599
+ fileId: string;
600
+ containerId: string;
601
+ } | {
602
+ type: 'file_path';
603
+ fileId: string;
604
+ index: number;
605
+ };
606
+ type OpenaiResponsesSourceDocumentProviderMetadata = {
607
+ openai: ResponsesSourceDocumentProviderMetadata;
608
+ };
609
+
610
+ /**
611
+ * Schema for the apply_patch input - what the model sends.
612
+ *
613
+ * Refer the official spec here: https://platform.openai.com/docs/api-reference/responses/create#responses_create-input-input_item_list-item-apply_patch_tool_call
614
+ *
615
+ */
616
+ declare const applyPatchInputSchema: _ai_sdk_provider_utils.LazySchema<{
617
+ callId: string;
618
+ operation: {
619
+ type: "create_file";
620
+ path: string;
621
+ diff: string;
622
+ } | {
623
+ type: "delete_file";
624
+ path: string;
625
+ } | {
626
+ type: "update_file";
627
+ path: string;
628
+ diff: string;
629
+ };
630
+ }>;
631
+ /**
632
+ * Schema for the apply_patch output - what we send back.
633
+ */
634
+ declare const applyPatchOutputSchema: _ai_sdk_provider_utils.LazySchema<{
635
+ status: "completed" | "failed";
636
+ output?: string | undefined;
637
+ }>;
638
+ /**
639
+ * Schema for tool arguments (configuration options).
640
+ * The apply_patch tool doesn't require any configuration options.
641
+ */
642
+ declare const applyPatchArgsSchema: _ai_sdk_provider_utils.LazySchema<Record<string, never>>;
643
+ /**
644
+ * Type definitions for the apply_patch operations.
645
+ */
646
+ type ApplyPatchOperation = {
647
+ type: 'create_file';
648
+ /**
649
+ * Path of the file to create relative to the workspace root.
650
+ */
651
+ path: string;
652
+ /**
653
+ * Unified diff content to apply when creating the file.
654
+ */
655
+ diff: string;
656
+ } | {
657
+ type: 'delete_file';
658
+ /**
659
+ * Path of the file to delete relative to the workspace root.
660
+ */
661
+ path: string;
662
+ } | {
663
+ type: 'update_file';
664
+ /**
665
+ * Path of the file to update relative to the workspace root.
666
+ */
667
+ path: string;
668
+ /**
669
+ * Unified diff content to apply to the existing file.
670
+ */
671
+ diff: string;
672
+ };
673
+ /**
674
+ * The apply_patch tool lets GPT-5.1 create, update, and delete files in your
675
+ * codebase using structured diffs. Instead of just suggesting edits, the model
676
+ * emits patch operations that your application applies and then reports back on,
677
+ * enabling iterative, multi-step code editing workflows.
678
+ *
679
+ * The tool factory creates a provider-defined tool that:
680
+ * - Receives patch operations from the model (create_file, update_file, delete_file)
681
+ * - Returns the status of applying those patches (completed or failed)
682
+ *
683
+ */
684
+ declare const applyPatchToolFactory: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{
685
+ /**
686
+ * The unique ID of the apply patch tool call generated by the model.
687
+ */
688
+ callId: string;
689
+ /**
690
+ * The specific create, delete, or update instruction for the apply_patch tool call.
691
+ */
692
+ operation: ApplyPatchOperation;
693
+ }, {
694
+ /**
695
+ * The status of the apply patch tool call output.
696
+ * - 'completed': The patch was applied successfully.
697
+ * - 'failed': The patch failed to apply.
698
+ */
699
+ status: "completed" | "failed";
700
+ /**
701
+ * Optional human-readable log text from the apply patch tool
702
+ * (e.g., patch results or errors).
703
+ */
704
+ output?: string;
705
+ }, {}>;
706
+ /**
707
+ * The apply_patch tool lets GPT-5.1 create, update, and delete files in your
708
+ * codebase using structured diffs. Instead of just suggesting edits, the model
709
+ * emits patch operations that your application applies and then reports back on,
710
+ * enabling iterative, multi-step code editing workflows.
711
+ */
712
+ declare const applyPatch: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{
713
+ /**
714
+ * The unique ID of the apply patch tool call generated by the model.
715
+ */
716
+ callId: string;
717
+ /**
718
+ * The specific create, delete, or update instruction for the apply_patch tool call.
719
+ */
720
+ operation: ApplyPatchOperation;
721
+ }, {
722
+ /**
723
+ * The status of the apply patch tool call output.
724
+ * - 'completed': The patch was applied successfully.
725
+ * - 'failed': The patch failed to apply.
726
+ */
727
+ status: "completed" | "failed";
728
+ /**
729
+ * Optional human-readable log text from the apply patch tool
730
+ * (e.g., patch results or errors).
731
+ */
732
+ output?: string;
733
+ }, {}>;
734
+
735
+ declare const codeInterpreterInputSchema: _ai_sdk_provider_utils.LazySchema<{
736
+ containerId: string;
737
+ code?: string | null | undefined;
738
+ }>;
739
+ declare const codeInterpreterOutputSchema: _ai_sdk_provider_utils.LazySchema<{
740
+ outputs?: ({
741
+ type: "logs";
742
+ logs: string;
743
+ } | {
744
+ type: "image";
745
+ url: string;
746
+ })[] | null | undefined;
747
+ }>;
748
+ declare const codeInterpreterArgsSchema: _ai_sdk_provider_utils.LazySchema<{
749
+ container?: string | {
750
+ fileIds?: string[] | undefined;
751
+ } | undefined;
752
+ }>;
753
+ type CodeInterpreterArgs = {
754
+ /**
755
+ * The code interpreter container.
756
+ * Can be a container ID
757
+ * or an object that specifies uploaded file IDs to make available to your code.
758
+ */
759
+ container?: string | {
760
+ fileIds?: string[];
761
+ };
762
+ };
763
+ declare const codeInterpreterToolFactory: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{
764
+ /**
765
+ * The code to run, or null if not available.
766
+ */
767
+ code?: string | null;
768
+ /**
769
+ * The ID of the container used to run the code.
770
+ */
771
+ containerId: string;
772
+ }, {
773
+ /**
774
+ * The outputs generated by the code interpreter, such as logs or images.
775
+ * Can be null if no outputs are available.
776
+ */
777
+ outputs?: Array<{
778
+ type: "logs";
779
+ /**
780
+ * The logs output from the code interpreter.
781
+ */
782
+ logs: string;
783
+ } | {
784
+ type: "image";
785
+ /**
786
+ * The URL of the image output from the code interpreter.
787
+ */
788
+ url: string;
789
+ }> | null;
790
+ }, CodeInterpreterArgs>;
791
+ declare const codeInterpreter: (args?: CodeInterpreterArgs) => _ai_sdk_provider_utils.Tool<{
792
+ /**
793
+ * The code to run, or null if not available.
794
+ */
795
+ code?: string | null;
796
+ /**
797
+ * The ID of the container used to run the code.
798
+ */
799
+ containerId: string;
800
+ }, {
801
+ /**
802
+ * The outputs generated by the code interpreter, such as logs or images.
803
+ * Can be null if no outputs are available.
804
+ */
805
+ outputs?: Array<{
806
+ type: "logs";
807
+ /**
808
+ * The logs output from the code interpreter.
809
+ */
810
+ logs: string;
811
+ } | {
812
+ type: "image";
813
+ /**
814
+ * The URL of the image output from the code interpreter.
815
+ */
816
+ url: string;
817
+ }> | null;
818
+ }>;
819
+
820
+ declare const fileSearchArgsSchema: _ai_sdk_provider_utils.LazySchema<{
821
+ vectorStoreIds: string[];
822
+ maxNumResults?: number | undefined;
823
+ ranking?: {
824
+ ranker?: string | undefined;
825
+ scoreThreshold?: number | undefined;
826
+ } | undefined;
827
+ filters?: any;
828
+ }>;
829
+ declare const fileSearchOutputSchema: _ai_sdk_provider_utils.LazySchema<{
830
+ queries: string[];
831
+ results: {
832
+ attributes: Record<string, unknown>;
833
+ fileId: string;
834
+ filename: string;
835
+ score: number;
836
+ text: string;
837
+ }[] | null;
838
+ }>;
839
+ declare const fileSearch: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{}, {
840
+ /**
841
+ * The search query to execute.
842
+ */
843
+ queries: string[];
844
+ /**
845
+ * The results of the file search tool call.
846
+ */
847
+ results: null | {
848
+ /**
849
+ * Set of 16 key-value pairs that can be attached to an object.
850
+ * This can be useful for storing additional information about the object
851
+ * in a structured format, and querying for objects via API or the dashboard.
852
+ * Keys are strings with a maximum length of 64 characters.
853
+ * Values are strings with a maximum length of 512 characters, booleans, or numbers.
854
+ */
855
+ attributes: Record<string, unknown>;
856
+ /**
857
+ * The unique ID of the file.
858
+ */
859
+ fileId: string;
860
+ /**
861
+ * The name of the file.
862
+ */
863
+ filename: string;
864
+ /**
865
+ * The relevance score of the file - a value between 0 and 1.
866
+ */
867
+ score: number;
868
+ /**
869
+ * The text that was retrieved from the file.
870
+ */
871
+ text: string;
872
+ }[];
873
+ }, {
874
+ /**
875
+ * List of vector store IDs to search through.
876
+ */
877
+ vectorStoreIds: string[];
878
+ /**
879
+ * Maximum number of search results to return. Defaults to 10.
880
+ */
881
+ maxNumResults?: number;
882
+ /**
883
+ * Ranking options for the search.
884
+ */
885
+ ranking?: {
886
+ /**
887
+ * The ranker to use for the file search.
888
+ */
889
+ ranker?: string;
890
+ /**
891
+ * The score threshold for the file search, a number between 0 and 1.
892
+ * Numbers closer to 1 will attempt to return only the most relevant results,
893
+ * but may return fewer results.
894
+ */
895
+ scoreThreshold?: number;
896
+ };
897
+ /**
898
+ * A filter to apply.
899
+ */
900
+ filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter;
901
+ }>;
902
+
903
+ declare const imageGenerationArgsSchema: _ai_sdk_provider_utils.LazySchema<{
904
+ background?: "auto" | "transparent" | "opaque" | undefined;
905
+ inputFidelity?: "low" | "high" | undefined;
906
+ inputImageMask?: {
907
+ fileId?: string | undefined;
908
+ imageUrl?: string | undefined;
909
+ } | undefined;
910
+ model?: string | undefined;
911
+ moderation?: "auto" | undefined;
912
+ outputCompression?: number | undefined;
913
+ outputFormat?: "png" | "jpeg" | "webp" | undefined;
914
+ partialImages?: number | undefined;
915
+ quality?: "auto" | "low" | "medium" | "high" | undefined;
916
+ size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024" | undefined;
917
+ }>;
918
+ declare const imageGenerationOutputSchema: _ai_sdk_provider_utils.LazySchema<{
919
+ result: string;
920
+ }>;
921
+ type ImageGenerationArgs = {
922
+ /**
923
+ * Background type for the generated image. Default is 'auto'.
924
+ */
925
+ background?: 'auto' | 'opaque' | 'transparent';
926
+ /**
927
+ * Input fidelity for the generated image. Default is 'low'.
928
+ */
929
+ inputFidelity?: 'low' | 'high';
930
+ /**
931
+ * Optional mask for inpainting.
932
+ * Contains image_url (string, optional) and file_id (string, optional).
933
+ */
934
+ inputImageMask?: {
935
+ /**
936
+ * File ID for the mask image.
937
+ */
938
+ fileId?: string;
939
+ /**
940
+ * Base64-encoded mask image.
941
+ */
942
+ imageUrl?: string;
943
+ };
944
+ /**
945
+ * The image generation model to use. Default: gpt-image-1.
946
+ */
947
+ model?: string;
948
+ /**
949
+ * Moderation level for the generated image. Default: auto.
950
+ */
951
+ moderation?: 'auto';
952
+ /**
953
+ * Compression level for the output image. Default: 100.
954
+ */
955
+ outputCompression?: number;
956
+ /**
957
+ * The output format of the generated image. One of png, webp, or jpeg.
958
+ * Default: png
959
+ */
960
+ outputFormat?: 'png' | 'jpeg' | 'webp';
961
+ /**
962
+ * Number of partial images to generate in streaming mode, from 0 (default value) to 3.
963
+ */
964
+ partialImages?: number;
965
+ /**
966
+ * The quality of the generated image.
967
+ * One of low, medium, high, or auto. Default: auto.
968
+ */
969
+ quality?: 'auto' | 'low' | 'medium' | 'high';
970
+ /**
971
+ * The size of the generated image.
972
+ * One of 1024x1024, 1024x1536, 1536x1024, or auto.
973
+ * Default: auto.
974
+ */
975
+ size?: 'auto' | '1024x1024' | '1024x1536' | '1536x1024';
976
+ };
977
+ declare const imageGeneration: (args?: ImageGenerationArgs) => _ai_sdk_provider_utils.Tool<{}, {
978
+ /**
979
+ * The generated image encoded in base64.
980
+ */
981
+ result: string;
982
+ }>;
983
+
984
+ declare const webSearchPreviewArgsSchema: _ai_sdk_provider_utils.LazySchema<{
985
+ searchContextSize?: "low" | "medium" | "high" | undefined;
986
+ userLocation?: {
987
+ type: "approximate";
988
+ country?: string | undefined;
989
+ city?: string | undefined;
990
+ region?: string | undefined;
991
+ timezone?: string | undefined;
992
+ } | undefined;
993
+ }>;
994
+ declare const webSearchPreviewInputSchema: _ai_sdk_provider_utils.LazySchema<Record<string, never>>;
995
+ declare const webSearchPreview: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{}, {
996
+ /**
997
+ * An object describing the specific action taken in this web search call.
998
+ * Includes details on how the model used the web (search, open_page, find_in_page).
999
+ */
1000
+ action: {
1001
+ /**
1002
+ * Action type "search" - Performs a web search query.
1003
+ */
1004
+ type: "search";
1005
+ /**
1006
+ * The search query.
1007
+ */
1008
+ query?: string;
1009
+ } | {
1010
+ /**
1011
+ * Action type "openPage" - Opens a specific URL from search results.
1012
+ */
1013
+ type: "openPage";
1014
+ /**
1015
+ * The URL opened by the model.
1016
+ */
1017
+ url?: string | null;
1018
+ } | {
1019
+ /**
1020
+ * Action type "findInPage": Searches for a pattern within a loaded page.
1021
+ */
1022
+ type: "findInPage";
1023
+ /**
1024
+ * The URL of the page searched for the pattern.
1025
+ */
1026
+ url?: string | null;
1027
+ /**
1028
+ * The pattern or text to search for within the page.
1029
+ */
1030
+ pattern?: string | null;
1031
+ };
1032
+ }, {
1033
+ /**
1034
+ * Search context size to use for the web search.
1035
+ * - high: Most comprehensive context, highest cost, slower response
1036
+ * - medium: Balanced context, cost, and latency (default)
1037
+ * - low: Least context, lowest cost, fastest response
1038
+ */
1039
+ searchContextSize?: "low" | "medium" | "high";
1040
+ /**
1041
+ * User location information to provide geographically relevant search results.
1042
+ */
1043
+ userLocation?: {
1044
+ /**
1045
+ * Type of location (always 'approximate')
1046
+ */
1047
+ type: "approximate";
1048
+ /**
1049
+ * Two-letter ISO country code (e.g., 'US', 'GB')
1050
+ */
1051
+ country?: string;
1052
+ /**
1053
+ * City name (free text, e.g., 'Minneapolis')
1054
+ */
1055
+ city?: string;
1056
+ /**
1057
+ * Region name (free text, e.g., 'Minnesota')
1058
+ */
1059
+ region?: string;
1060
+ /**
1061
+ * IANA timezone (e.g., 'America/Chicago')
1062
+ */
1063
+ timezone?: string;
1064
+ };
1065
+ }>;
1066
+
1067
+ export { type ApplyPatchOperation, OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, OpenAIResponsesLanguageModel, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, type OpenaiResponsesSourceDocumentProviderMetadata, type OpenaiResponsesTextProviderMetadata, type ResponsesSourceDocumentProviderMetadata, type ResponsesTextProviderMetadata, applyPatch, applyPatchArgsSchema, applyPatchInputSchema, applyPatchOutputSchema, applyPatchToolFactory, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, imageGeneration, imageGenerationArgsSchema, imageGenerationOutputSchema, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiSpeechProviderOptionsSchema, webSearchPreview, webSearchPreviewArgsSchema, webSearchPreviewInputSchema };