@ai-sdk/openai 2.0.0-canary.2 → 2.0.0-canary.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,262 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.0-canary.20
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [faf8446]
8
+ - @ai-sdk/provider-utils@3.0.0-canary.19
9
+
10
+ ## 2.0.0-canary.19
11
+
12
+ ### Patch Changes
13
+
14
+ - Updated dependencies [40acf9b]
15
+ - @ai-sdk/provider-utils@3.0.0-canary.18
16
+
17
+ ## 2.0.0-canary.18
18
+
19
+ ### Major Changes
20
+
21
+ - 516be5b: ### Move Image Model Settings into generate options
22
+
23
+ Image Models no longer have settings. Instead, `maxImagesPerCall` can be passed directly to `generateImage()`. All other image settings can be passed to `providerOptions[provider]`.
24
+
25
+ Before
26
+
27
+ ```js
28
+ await generateImage({
29
+ model: luma.image('photon-flash-1', {
30
+ maxImagesPerCall: 5,
31
+ pollIntervalMillis: 500,
32
+ }),
33
+ prompt,
34
+ n: 10,
35
+ });
36
+ ```
37
+
38
+ After
39
+
40
+ ```js
41
+ await generateImage({
42
+ model: luma.image('photon-flash-1'),
43
+ prompt,
44
+ n: 10,
45
+ maxImagesPerCall: 5,
46
+ providerOptions: {
47
+ luma: { pollIntervalMillis: 5 },
48
+ },
49
+ });
50
+ ```
51
+
52
+ Pull Request: https://github.com/vercel/ai/pull/6180
53
+
54
+ ### Patch Changes
55
+
56
+ - Updated dependencies [ea7a7c9]
57
+ - @ai-sdk/provider-utils@3.0.0-canary.17
58
+
59
+ ## 2.0.0-canary.17
60
+
61
+ ### Patch Changes
62
+
63
+ - 52ce942: chore(providers/openai): remove & enable strict compatibility by default
64
+ - Updated dependencies [87b828f]
65
+ - @ai-sdk/provider-utils@3.0.0-canary.16
66
+
67
+ ## 2.0.0-canary.16
68
+
69
+ ### Patch Changes
70
+
71
+ - 928fadf: fix(providers/openai): logprobs for stream alongside completion model
72
+ - 6f231db: fix(providers): always use optional instead of mix of nullish for providerOptions
73
+ - Updated dependencies [a571d6e]
74
+ - Updated dependencies [a8c8bd5]
75
+ - Updated dependencies [7979f7f]
76
+ - Updated dependencies [41fa418]
77
+ - @ai-sdk/provider-utils@3.0.0-canary.15
78
+ - @ai-sdk/provider@2.0.0-canary.14
79
+
80
+ ## 2.0.0-canary.15
81
+
82
+ ### Patch Changes
83
+
84
+ - 136819b: chore(providers/openai): re-introduce logprobs as providerMetadata
85
+ - 9bd5ab5: feat (provider): add providerMetadata to ImageModelV2 interface (#5977)
86
+
87
+ The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
88
+
89
+ ```js
90
+ const prompt = 'Santa Claus driving a Cadillac';
91
+
92
+ const { providerMetadata } = await experimental_generateImage({
93
+ model: openai.image('dall-e-3'),
94
+ prompt,
95
+ });
96
+
97
+ const revisedPrompt = providerMetadata.openai.images[0]?.revisedPrompt;
98
+
99
+ console.log({
100
+ prompt,
101
+ revisedPrompt,
102
+ });
103
+ ```
104
+
105
+ - 284353f: fix(providers/openai): zod parse error with function
106
+ - Updated dependencies [957b739]
107
+ - Updated dependencies [9bd5ab5]
108
+ - @ai-sdk/provider-utils@3.0.0-canary.14
109
+ - @ai-sdk/provider@2.0.0-canary.13
110
+
111
+ ## 2.0.0-canary.14
112
+
113
+ ### Patch Changes
114
+
115
+ - fa758ea: feat(provider/openai): add o3 & o4-mini with developer systemMessageMode
116
+ - Updated dependencies [7b3ae3f]
117
+ - Updated dependencies [0ff02bb]
118
+ - @ai-sdk/provider@2.0.0-canary.12
119
+ - @ai-sdk/provider-utils@3.0.0-canary.13
120
+
121
+ ## 2.0.0-canary.13
122
+
123
+ ### Patch Changes
124
+
125
+ - 177526b: chore(providers/openai-transcription): switch to providerOptions
126
+ - c15dfbf: feat (providers/openai): add gpt-image-1 model id to image settings
127
+ - 9bf7291: chore(providers/openai): enable structuredOutputs by default & switch to provider option
128
+ - 4617fab: chore(embedding-models): remove remaining settings
129
+ - Updated dependencies [9bf7291]
130
+ - Updated dependencies [4617fab]
131
+ - Updated dependencies [e030615]
132
+ - @ai-sdk/provider@2.0.0-canary.11
133
+ - @ai-sdk/provider-utils@3.0.0-canary.12
134
+
135
+ ## 2.0.0-canary.12
136
+
137
+ ### Patch Changes
138
+
139
+ - db72adc: chore(providers/openai): update completion model to use providerOptions
140
+ - 66962ed: fix(packages): export node10 compatible types
141
+ - 9301f86: refactor (image-model): rename `ImageModelV1` to `ImageModelV2`
142
+ - 7df7a25: feat (providers/openai): support gpt-image-1 image generation
143
+ - Updated dependencies [66962ed]
144
+ - Updated dependencies [9301f86]
145
+ - Updated dependencies [a3f768e]
146
+ - @ai-sdk/provider-utils@3.0.0-canary.11
147
+ - @ai-sdk/provider@2.0.0-canary.10
148
+
149
+ ## 2.0.0-canary.11
150
+
151
+ ### Patch Changes
152
+
153
+ - 8493141: feat (providers/openai): add support for reasoning summaries
154
+ - Updated dependencies [e86be6f]
155
+ - @ai-sdk/provider@2.0.0-canary.9
156
+ - @ai-sdk/provider-utils@3.0.0-canary.10
157
+
158
+ ## 2.0.0-canary.10
159
+
160
+ ### Patch Changes
161
+
162
+ - 3bd3c0b: chore(providers/openai): update embedding model to use providerOptions
163
+ - Updated dependencies [95857aa]
164
+ - Updated dependencies [7ea4132]
165
+ - @ai-sdk/provider@2.0.0-canary.8
166
+ - @ai-sdk/provider-utils@3.0.0-canary.9
167
+
168
+ ## 2.0.0-canary.9
169
+
170
+ ### Patch Changes
171
+
172
+ - d63bcbc: feat (provider/openai): o4 updates for responses api
173
+ - d2af019: feat (providers/openai): add gpt-4.1 models
174
+ - 870c5c0: feat (providers/openai): add o3 and o4-mini models
175
+ - 06bac05: fix (openai): structure output for responses model
176
+
177
+ ## 2.0.0-canary.8
178
+
179
+ ### Patch Changes
180
+
181
+ - 8aa9e20: feat: add speech with experimental_generateSpeech
182
+ - Updated dependencies [5d142ab]
183
+ - Updated dependencies [b6b43c7]
184
+ - Updated dependencies [8aa9e20]
185
+ - Updated dependencies [3795467]
186
+ - @ai-sdk/provider-utils@3.0.0-canary.8
187
+ - @ai-sdk/provider@2.0.0-canary.7
188
+
189
+ ## 2.0.0-canary.7
190
+
191
+ ### Patch Changes
192
+
193
+ - 26735b5: chore(embedding-model): add v2 interface
194
+ - 443d8ec: feat(embedding-model-v2): add response body field
195
+ - fd65bc6: chore(embedding-model-v2): rename rawResponse to response
196
+ - Updated dependencies [26735b5]
197
+ - Updated dependencies [443d8ec]
198
+ - Updated dependencies [14c9410]
199
+ - Updated dependencies [d9c98f4]
200
+ - Updated dependencies [c4a2fec]
201
+ - Updated dependencies [0054544]
202
+ - Updated dependencies [9e9c809]
203
+ - Updated dependencies [32831c6]
204
+ - Updated dependencies [d0f9495]
205
+ - Updated dependencies [fd65bc6]
206
+ - Updated dependencies [393138b]
207
+ - Updated dependencies [7182d14]
208
+ - @ai-sdk/provider@2.0.0-canary.6
209
+ - @ai-sdk/provider-utils@3.0.0-canary.7
210
+
211
+ ## 2.0.0-canary.6
212
+
213
+ ### Patch Changes
214
+
215
+ - 948b755: chore(providers/openai): convert to providerOptions
216
+ - 3b1ea10: adding support for gpt-4o-search-preview and handling unsupported parameters
217
+ - 442be08: fix: propagate openai transcription fixes
218
+ - 5147e6e: chore(openai): remove simulateStreaming
219
+ - c2b92cc: chore(openai): remove legacy function calling
220
+ - f10304b: feat(tool-calling): don't require the user to have to pass parameters
221
+ - Updated dependencies [411e483]
222
+ - Updated dependencies [79457bd]
223
+ - Updated dependencies [ad80501]
224
+ - Updated dependencies [1766ede]
225
+ - Updated dependencies [f10304b]
226
+ - @ai-sdk/provider@2.0.0-canary.5
227
+ - @ai-sdk/provider-utils@3.0.0-canary.6
228
+
229
+ ## 2.0.0-canary.5
230
+
231
+ ### Patch Changes
232
+
233
+ - Updated dependencies [6f6bb89]
234
+ - @ai-sdk/provider@2.0.0-canary.4
235
+ - @ai-sdk/provider-utils@3.0.0-canary.5
236
+
237
+ ## 2.0.0-canary.4
238
+
239
+ ### Patch Changes
240
+
241
+ - Updated dependencies [d1a1aa1]
242
+ - @ai-sdk/provider@2.0.0-canary.3
243
+ - @ai-sdk/provider-utils@3.0.0-canary.4
244
+
245
+ ## 2.0.0-canary.3
246
+
247
+ ### Patch Changes
248
+
249
+ - a166433: feat: add transcription with experimental_transcribe
250
+ - 0a87932: core (ai): change transcription model mimeType to mediaType
251
+ - 0a87932: fix (provider/openai): increase transcription model resilience
252
+ - Updated dependencies [a166433]
253
+ - Updated dependencies [abf9a79]
254
+ - Updated dependencies [9f95b35]
255
+ - Updated dependencies [0a87932]
256
+ - Updated dependencies [6dc848c]
257
+ - @ai-sdk/provider-utils@3.0.0-canary.3
258
+ - @ai-sdk/provider@2.0.0-canary.2
259
+
3
260
  ## 2.0.0-canary.2
4
261
 
5
262
  ### Patch Changes
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # AI SDK - OpenAI Provider
2
2
 
3
- The **[OpenAI provider](https://sdk.vercel.ai/providers/ai-sdk-providers/openai)** for the [AI SDK](https://sdk.vercel.ai/docs)
3
+ The **[OpenAI provider](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for the [AI SDK](https://ai-sdk.dev/docs)
4
4
  contains language model support for the OpenAI chat and completion APIs and embedding model support for the OpenAI embeddings API.
5
5
 
6
6
  ## Setup
@@ -33,4 +33,4 @@ const { text } = await generateText({
33
33
 
34
34
  ## Documentation
35
35
 
36
- Please check out the **[OpenAI provider documentation](https://sdk.vercel.ai/providers/ai-sdk-providers/openai)** for more information.
36
+ Please check out the **[OpenAI provider documentation](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for more information.
package/dist/index.d.mts CHANGED
@@ -1,133 +1,13 @@
1
- import { LanguageModelV2, ProviderV2, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, ProviderV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod';
4
4
 
5
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
6
- interface OpenAIChatSettings {
7
- /**
8
- Modify the likelihood of specified tokens appearing in the completion.
9
-
10
- Accepts a JSON object that maps tokens (specified by their token ID in
11
- the GPT tokenizer) to an associated bias value from -100 to 100. You
12
- can use this tokenizer tool to convert text to token IDs. Mathematically,
13
- the bias is added to the logits generated by the model prior to sampling.
14
- The exact effect will vary per model, but values between -1 and 1 should
15
- decrease or increase likelihood of selection; values like -100 or 100
16
- should result in a ban or exclusive selection of the relevant token.
17
-
18
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
19
- token from being generated.
20
- */
21
- logitBias?: Record<number, number>;
22
- /**
23
- Return the log probabilities of the tokens. Including logprobs will increase
24
- the response size and can slow down response times. However, it can
25
- be useful to better understand how the model is behaving.
26
-
27
- Setting to true will return the log probabilities of the tokens that
28
- were generated.
29
-
30
- Setting to a number will return the log probabilities of the top n
31
- tokens that were generated.
32
- */
33
- logprobs?: boolean | number;
34
- /**
35
- Whether to enable parallel function calling during tool use. Default to true.
36
- */
37
- parallelToolCalls?: boolean;
38
- /**
39
- Whether to use structured outputs. Defaults to false.
40
-
41
- When enabled, tool calls and object generation will be strict and follow the provided schema.
42
- */
43
- structuredOutputs?: boolean;
44
- /**
45
- Whether to use legacy function calling. Defaults to false.
46
-
47
- Required by some open source inference engines which do not support the `tools` API. May also
48
- provide a workaround for `parallelToolCalls` resulting in the provider buffering tool calls,
49
- which causes `streamObject` to be non-streaming.
50
-
51
- Prefer setting `parallelToolCalls: false` over this option.
52
-
53
- @deprecated this API is supported but deprecated by OpenAI.
54
- */
55
- useLegacyFunctionCalling?: boolean;
56
- /**
57
- A unique identifier representing your end-user, which can help OpenAI to
58
- monitor and detect abuse. Learn more.
59
- */
60
- user?: string;
61
- /**
62
- Automatically download images and pass the image as data to the model.
63
- OpenAI supports image URLs for public models, so this is only needed for
64
- private models or when the images are not publicly accessible.
65
-
66
- Defaults to `false`.
67
- */
68
- downloadImages?: boolean;
69
- /**
70
- Simulates streaming by using a normal generate call and returning it as a stream.
71
- Enable this if the model that you are using does not support streaming.
72
-
73
- Defaults to `false`.
74
-
75
- @deprecated Use `simulateStreamingMiddleware` instead.
76
- */
77
- simulateStreaming?: boolean;
78
- /**
79
- Reasoning effort for reasoning models. Defaults to `medium`.
80
- */
81
- reasoningEffort?: 'low' | 'medium' | 'high';
82
- }
5
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
83
6
 
84
7
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
85
- interface OpenAICompletionSettings {
86
- /**
87
- Echo back the prompt in addition to the completion.
88
- */
89
- echo?: boolean;
90
- /**
91
- Modify the likelihood of specified tokens appearing in the completion.
92
-
93
- Accepts a JSON object that maps tokens (specified by their token ID in
94
- the GPT tokenizer) to an associated bias value from -100 to 100. You
95
- can use this tokenizer tool to convert text to token IDs. Mathematically,
96
- the bias is added to the logits generated by the model prior to sampling.
97
- The exact effect will vary per model, but values between -1 and 1 should
98
- decrease or increase likelihood of selection; values like -100 or 100
99
- should result in a ban or exclusive selection of the relevant token.
100
-
101
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
102
- token from being generated.
103
- */
104
- logitBias?: Record<number, number>;
105
- /**
106
- Return the log probabilities of the tokens. Including logprobs will increase
107
- the response size and can slow down response times. However, it can
108
- be useful to better understand how the model is behaving.
109
-
110
- Setting to true will return the log probabilities of the tokens that
111
- were generated.
112
-
113
- Setting to a number will return the log probabilities of the top n
114
- tokens that were generated.
115
- */
116
- logprobs?: boolean | number;
117
- /**
118
- The suffix that comes after a completion of inserted text.
119
- */
120
- suffix?: string;
121
- /**
122
- A unique identifier representing your end-user, which can help OpenAI to
123
- monitor and detect abuse. Learn more.
124
- */
125
- user?: string;
126
- }
127
8
 
128
9
  type OpenAICompletionConfig = {
129
10
  provider: string;
130
- compatibility: 'strict' | 'compatible';
131
11
  headers: () => Record<string, string | undefined>;
132
12
  url: (options: {
133
13
  modelId: string;
@@ -137,47 +17,20 @@ type OpenAICompletionConfig = {
137
17
  };
138
18
  declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
139
19
  readonly specificationVersion = "v2";
140
- readonly defaultObjectGenerationMode: undefined;
141
20
  readonly modelId: OpenAICompletionModelId;
142
- readonly settings: OpenAICompletionSettings;
143
21
  private readonly config;
144
- constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
22
+ private get providerOptionsName();
23
+ constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
145
24
  get provider(): string;
25
+ readonly supportedUrls: Record<string, RegExp[]>;
146
26
  private getArgs;
147
27
  doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
148
28
  doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
149
29
  }
150
30
 
151
31
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
152
- interface OpenAIEmbeddingSettings {
153
- /**
154
- Override the maximum number of embeddings per call.
155
- */
156
- maxEmbeddingsPerCall?: number;
157
- /**
158
- Override the parallelism of embedding calls.
159
- */
160
- supportsParallelCalls?: boolean;
161
- /**
162
- The number of dimensions the resulting output embeddings should have.
163
- Only supported in text-embedding-3 and later models.
164
- */
165
- dimensions?: number;
166
- /**
167
- A unique identifier representing your end-user, which can help OpenAI to
168
- monitor and detect abuse. Learn more.
169
- */
170
- user?: string;
171
- }
172
32
 
173
- type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
174
- interface OpenAIImageSettings {
175
- /**
176
- Override the maximum number of images per call (default is dependent on the
177
- model, or 1 for an unknown model).
178
- */
179
- maxImagesPerCall?: number;
180
- }
33
+ type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
181
34
 
182
35
  declare const WebSearchPreviewParameters: z.ZodObject<{}, "strip", z.ZodTypeAny, {}, {}>;
183
36
  declare function webSearchPreviewTool({ searchContextSize, userLocation, }?: {
@@ -199,20 +52,24 @@ declare const openaiTools: {
199
52
  webSearchPreview: typeof webSearchPreviewTool;
200
53
  };
201
54
 
202
- type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
55
+ type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
56
+
57
+ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
58
+
59
+ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
203
60
 
204
61
  interface OpenAIProvider extends ProviderV2 {
205
- (modelId: 'gpt-3.5-turbo-instruct', settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
206
- (modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): LanguageModelV2;
62
+ (modelId: 'gpt-3.5-turbo-instruct'): OpenAICompletionLanguageModel;
63
+ (modelId: OpenAIChatModelId): LanguageModelV2;
207
64
  /**
208
65
  Creates an OpenAI model for text generation.
209
66
  */
210
- languageModel(modelId: 'gpt-3.5-turbo-instruct', settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
211
- languageModel(modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): LanguageModelV2;
67
+ languageModel(modelId: 'gpt-3.5-turbo-instruct'): OpenAICompletionLanguageModel;
68
+ languageModel(modelId: OpenAIChatModelId): LanguageModelV2;
212
69
  /**
213
70
  Creates an OpenAI chat model for text generation.
214
71
  */
215
- chat(modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): LanguageModelV2;
72
+ chat(modelId: OpenAIChatModelId): LanguageModelV2;
216
73
  /**
217
74
  Creates an OpenAI responses API model for text generation.
218
75
  */
@@ -220,29 +77,38 @@ interface OpenAIProvider extends ProviderV2 {
220
77
  /**
221
78
  Creates an OpenAI completion model for text generation.
222
79
  */
223
- completion(modelId: OpenAICompletionModelId, settings?: OpenAICompletionSettings): LanguageModelV2;
80
+ completion(modelId: OpenAICompletionModelId): LanguageModelV2;
224
81
  /**
225
82
  Creates a model for text embeddings.
226
83
  */
227
- embedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
84
+ embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
228
85
  /**
229
86
  Creates a model for text embeddings.
230
87
 
231
88
  @deprecated Use `textEmbeddingModel` instead.
232
89
  */
233
- textEmbedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
90
+ textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
234
91
  /**
235
92
  Creates a model for text embeddings.
236
93
  */
237
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
94
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
238
95
  /**
239
96
  Creates a model for image generation.
97
+ @deprecated Use `imageModel` instead.
240
98
  */
241
- image(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV1;
99
+ image(modelId: OpenAIImageModelId): ImageModelV2;
242
100
  /**
243
101
  Creates a model for image generation.
244
102
  */
245
- imageModel(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV1;
103
+ imageModel(modelId: OpenAIImageModelId): ImageModelV2;
104
+ /**
105
+ Creates a model for transcription.
106
+ */
107
+ transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV1;
108
+ /**
109
+ Creates a model for speech generation.
110
+ */
111
+ speech(modelId: OpenAISpeechModelId): SpeechModelV1;
246
112
  /**
247
113
  OpenAI-specific tools.
248
114
  */
@@ -270,12 +136,6 @@ interface OpenAIProviderSettings {
270
136
  */
271
137
  headers?: Record<string, string>;
272
138
  /**
273
- OpenAI compatibility mode. Should be set to `strict` when using the OpenAI API,
274
- and `compatible` when using 3rd party providers. In `compatible` mode, newer
275
- information such as streamOptions are not being sent. Defaults to 'compatible'.
276
- */
277
- compatibility?: 'strict' | 'compatible';
278
- /**
279
139
  Provider name. Overrides the `openai` default name for 3rd party providers.
280
140
  */
281
141
  name?: string;
@@ -290,7 +150,7 @@ Create an OpenAI provider instance.
290
150
  */
291
151
  declare function createOpenAI(options?: OpenAIProviderSettings): OpenAIProvider;
292
152
  /**
293
- Default OpenAI provider instance. It uses 'strict' compatibility mode.
153
+ Default OpenAI provider instance.
294
154
  */
295
155
  declare const openai: OpenAIProvider;
296
156
 
@@ -303,24 +163,27 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
303
163
  reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
304
164
  strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
305
165
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
166
+ reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
306
167
  }, "strip", z.ZodTypeAny, {
307
168
  user?: string | null | undefined;
169
+ parallelToolCalls?: boolean | null | undefined;
170
+ reasoningEffort?: string | null | undefined;
308
171
  store?: boolean | null | undefined;
309
172
  metadata?: any;
310
- reasoningEffort?: string | null | undefined;
311
- parallelToolCalls?: boolean | null | undefined;
312
173
  previousResponseId?: string | null | undefined;
313
174
  strictSchemas?: boolean | null | undefined;
314
175
  instructions?: string | null | undefined;
176
+ reasoningSummary?: string | null | undefined;
315
177
  }, {
316
178
  user?: string | null | undefined;
179
+ parallelToolCalls?: boolean | null | undefined;
180
+ reasoningEffort?: string | null | undefined;
317
181
  store?: boolean | null | undefined;
318
182
  metadata?: any;
319
- reasoningEffort?: string | null | undefined;
320
- parallelToolCalls?: boolean | null | undefined;
321
183
  previousResponseId?: string | null | undefined;
322
184
  strictSchemas?: boolean | null | undefined;
323
185
  instructions?: string | null | undefined;
186
+ reasoningSummary?: string | null | undefined;
324
187
  }>;
325
188
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
326
189