@ai-sdk/openai 2.0.0-canary.1 → 2.0.0-canary.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,116 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.0-canary.10
4
+
5
+ ### Patch Changes
6
+
7
+ - 3bd3c0b: chore(providers/openai): update embedding model to use providerOptions
8
+ - Updated dependencies [95857aa]
9
+ - Updated dependencies [7ea4132]
10
+ - @ai-sdk/provider@2.0.0-canary.8
11
+ - @ai-sdk/provider-utils@3.0.0-canary.9
12
+
13
+ ## 2.0.0-canary.9
14
+
15
+ ### Patch Changes
16
+
17
+ - d63bcbc: feat (provider/openai): o4 updates for responses api
18
+ - d2af019: feat (providers/openai): add gpt-4.1 models
19
+ - 870c5c0: feat (providers/openai): add o3 and o4-mini models
20
+ - 06bac05: fix (openai): structure output for responses model
21
+
22
+ ## 2.0.0-canary.8
23
+
24
+ ### Patch Changes
25
+
26
+ - 8aa9e20: feat: add speech with experimental_generateSpeech
27
+ - Updated dependencies [5d142ab]
28
+ - Updated dependencies [b6b43c7]
29
+ - Updated dependencies [8aa9e20]
30
+ - Updated dependencies [3795467]
31
+ - @ai-sdk/provider-utils@3.0.0-canary.8
32
+ - @ai-sdk/provider@2.0.0-canary.7
33
+
34
+ ## 2.0.0-canary.7
35
+
36
+ ### Patch Changes
37
+
38
+ - 26735b5: chore(embedding-model): add v2 interface
39
+ - 443d8ec: feat(embedding-model-v2): add response body field
40
+ - fd65bc6: chore(embedding-model-v2): rename rawResponse to response
41
+ - Updated dependencies [26735b5]
42
+ - Updated dependencies [443d8ec]
43
+ - Updated dependencies [14c9410]
44
+ - Updated dependencies [d9c98f4]
45
+ - Updated dependencies [c4a2fec]
46
+ - Updated dependencies [0054544]
47
+ - Updated dependencies [9e9c809]
48
+ - Updated dependencies [32831c6]
49
+ - Updated dependencies [d0f9495]
50
+ - Updated dependencies [fd65bc6]
51
+ - Updated dependencies [393138b]
52
+ - Updated dependencies [7182d14]
53
+ - @ai-sdk/provider@2.0.0-canary.6
54
+ - @ai-sdk/provider-utils@3.0.0-canary.7
55
+
56
+ ## 2.0.0-canary.6
57
+
58
+ ### Patch Changes
59
+
60
+ - 948b755: chore(providers/openai): convert to providerOptions
61
+ - 3b1ea10: adding support for gpt-4o-search-preview and handling unsupported parameters
62
+ - 442be08: fix: propagate openai transcription fixes
63
+ - 5147e6e: chore(openai): remove simulateStreaming
64
+ - c2b92cc: chore(openai): remove legacy function calling
65
+ - f10304b: feat(tool-calling): don't require the user to have to pass parameters
66
+ - Updated dependencies [411e483]
67
+ - Updated dependencies [79457bd]
68
+ - Updated dependencies [ad80501]
69
+ - Updated dependencies [1766ede]
70
+ - Updated dependencies [f10304b]
71
+ - @ai-sdk/provider@2.0.0-canary.5
72
+ - @ai-sdk/provider-utils@3.0.0-canary.6
73
+
74
+ ## 2.0.0-canary.5
75
+
76
+ ### Patch Changes
77
+
78
+ - Updated dependencies [6f6bb89]
79
+ - @ai-sdk/provider@2.0.0-canary.4
80
+ - @ai-sdk/provider-utils@3.0.0-canary.5
81
+
82
+ ## 2.0.0-canary.4
83
+
84
+ ### Patch Changes
85
+
86
+ - Updated dependencies [d1a1aa1]
87
+ - @ai-sdk/provider@2.0.0-canary.3
88
+ - @ai-sdk/provider-utils@3.0.0-canary.4
89
+
90
+ ## 2.0.0-canary.3
91
+
92
+ ### Patch Changes
93
+
94
+ - a166433: feat: add transcription with experimental_transcribe
95
+ - 0a87932: core (ai): change transcription model mimeType to mediaType
96
+ - 0a87932: fix (provider/openai): increase transcription model resilience
97
+ - Updated dependencies [a166433]
98
+ - Updated dependencies [abf9a79]
99
+ - Updated dependencies [9f95b35]
100
+ - Updated dependencies [0a87932]
101
+ - Updated dependencies [6dc848c]
102
+ - @ai-sdk/provider-utils@3.0.0-canary.3
103
+ - @ai-sdk/provider@2.0.0-canary.2
104
+
105
+ ## 2.0.0-canary.2
106
+
107
+ ### Patch Changes
108
+
109
+ - Updated dependencies [c57e248]
110
+ - Updated dependencies [33f4a6a]
111
+ - @ai-sdk/provider@2.0.0-canary.1
112
+ - @ai-sdk/provider-utils@3.0.0-canary.2
113
+
3
114
  ## 2.0.0-canary.1
4
115
 
5
116
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1,84 +1,15 @@
1
- import { LanguageModelV2, ProviderV2, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, ProviderV2, EmbeddingModelV2, ImageModelV1, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod';
4
4
 
5
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
5
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
6
6
  interface OpenAIChatSettings {
7
7
  /**
8
- Modify the likelihood of specified tokens appearing in the completion.
9
-
10
- Accepts a JSON object that maps tokens (specified by their token ID in
11
- the GPT tokenizer) to an associated bias value from -100 to 100. You
12
- can use this tokenizer tool to convert text to token IDs. Mathematically,
13
- the bias is added to the logits generated by the model prior to sampling.
14
- The exact effect will vary per model, but values between -1 and 1 should
15
- decrease or increase likelihood of selection; values like -100 or 100
16
- should result in a ban or exclusive selection of the relevant token.
17
-
18
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
19
- token from being generated.
20
- */
21
- logitBias?: Record<number, number>;
22
- /**
23
- Return the log probabilities of the tokens. Including logprobs will increase
24
- the response size and can slow down response times. However, it can
25
- be useful to better understand how the model is behaving.
26
-
27
- Setting to true will return the log probabilities of the tokens that
28
- were generated.
29
-
30
- Setting to a number will return the log probabilities of the top n
31
- tokens that were generated.
32
- */
33
- logprobs?: boolean | number;
34
- /**
35
- Whether to enable parallel function calling during tool use. Default to true.
36
- */
37
- parallelToolCalls?: boolean;
38
- /**
39
8
  Whether to use structured outputs. Defaults to false.
40
9
 
41
10
  When enabled, tool calls and object generation will be strict and follow the provided schema.
42
- */
43
- structuredOutputs?: boolean;
44
- /**
45
- Whether to use legacy function calling. Defaults to false.
46
-
47
- Required by some open source inference engines which do not support the `tools` API. May also
48
- provide a workaround for `parallelToolCalls` resulting in the provider buffering tool calls,
49
- which causes `streamObject` to be non-streaming.
50
-
51
- Prefer setting `parallelToolCalls: false` over this option.
52
-
53
- @deprecated this API is supported but deprecated by OpenAI.
54
- */
55
- useLegacyFunctionCalling?: boolean;
56
- /**
57
- A unique identifier representing your end-user, which can help OpenAI to
58
- monitor and detect abuse. Learn more.
59
11
  */
60
- user?: string;
61
- /**
62
- Automatically download images and pass the image as data to the model.
63
- OpenAI supports image URLs for public models, so this is only needed for
64
- private models or when the images are not publicly accessible.
65
-
66
- Defaults to `false`.
67
- */
68
- downloadImages?: boolean;
69
- /**
70
- Simulates streaming by using a normal generate call and returning it as a stream.
71
- Enable this if the model that you are using does not support streaming.
72
-
73
- Defaults to `false`.
74
-
75
- @deprecated Use `simulateStreamingMiddleware` instead.
76
- */
77
- simulateStreaming?: boolean;
78
- /**
79
- Reasoning effort for reasoning models. Defaults to `medium`.
80
- */
81
- reasoningEffort?: 'low' | 'medium' | 'high';
12
+ structuredOutputs?: boolean;
82
13
  }
83
14
 
84
15
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
@@ -137,12 +68,12 @@ type OpenAICompletionConfig = {
137
68
  };
138
69
  declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
139
70
  readonly specificationVersion = "v2";
140
- readonly defaultObjectGenerationMode: undefined;
141
71
  readonly modelId: OpenAICompletionModelId;
142
72
  readonly settings: OpenAICompletionSettings;
143
73
  private readonly config;
144
74
  constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
145
75
  get provider(): string;
76
+ getSupportedUrls(): Promise<Record<string, RegExp[]>>;
146
77
  private getArgs;
147
78
  doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
148
79
  doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
@@ -158,16 +89,6 @@ interface OpenAIEmbeddingSettings {
158
89
  Override the parallelism of embedding calls.
159
90
  */
160
91
  supportsParallelCalls?: boolean;
161
- /**
162
- The number of dimensions the resulting output embeddings should have.
163
- Only supported in text-embedding-3 and later models.
164
- */
165
- dimensions?: number;
166
- /**
167
- A unique identifier representing your end-user, which can help OpenAI to
168
- monitor and detect abuse. Learn more.
169
- */
170
- user?: string;
171
92
  }
172
93
 
173
94
  type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
@@ -199,7 +120,11 @@ declare const openaiTools: {
199
120
  webSearchPreview: typeof webSearchPreviewTool;
200
121
  };
201
122
 
202
- type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
123
+ type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
124
+
125
+ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
126
+
127
+ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
203
128
 
204
129
  interface OpenAIProvider extends ProviderV2 {
205
130
  (modelId: 'gpt-3.5-turbo-instruct', settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
@@ -224,17 +149,17 @@ interface OpenAIProvider extends ProviderV2 {
224
149
  /**
225
150
  Creates a model for text embeddings.
226
151
  */
227
- embedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
152
+ embedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
228
153
  /**
229
154
  Creates a model for text embeddings.
230
155
 
231
156
  @deprecated Use `textEmbeddingModel` instead.
232
157
  */
233
- textEmbedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
158
+ textEmbedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
234
159
  /**
235
160
  Creates a model for text embeddings.
236
161
  */
237
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
162
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
238
163
  /**
239
164
  Creates a model for image generation.
240
165
  */
@@ -244,6 +169,14 @@ interface OpenAIProvider extends ProviderV2 {
244
169
  */
245
170
  imageModel(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV1;
246
171
  /**
172
+ Creates a model for transcription.
173
+ */
174
+ transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV1;
175
+ /**
176
+ Creates a model for speech generation.
177
+ */
178
+ speech(modelId: OpenAISpeechModelId): SpeechModelV1;
179
+ /**
247
180
  OpenAI-specific tools.
248
181
  */
249
182
  tools: typeof openaiTools;
@@ -305,19 +238,19 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
305
238
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
306
239
  }, "strip", z.ZodTypeAny, {
307
240
  user?: string | null | undefined;
241
+ parallelToolCalls?: boolean | null | undefined;
242
+ reasoningEffort?: string | null | undefined;
308
243
  store?: boolean | null | undefined;
309
244
  metadata?: any;
310
- reasoningEffort?: string | null | undefined;
311
- parallelToolCalls?: boolean | null | undefined;
312
245
  previousResponseId?: string | null | undefined;
313
246
  strictSchemas?: boolean | null | undefined;
314
247
  instructions?: string | null | undefined;
315
248
  }, {
316
249
  user?: string | null | undefined;
250
+ parallelToolCalls?: boolean | null | undefined;
251
+ reasoningEffort?: string | null | undefined;
317
252
  store?: boolean | null | undefined;
318
253
  metadata?: any;
319
- reasoningEffort?: string | null | undefined;
320
- parallelToolCalls?: boolean | null | undefined;
321
254
  previousResponseId?: string | null | undefined;
322
255
  strictSchemas?: boolean | null | undefined;
323
256
  instructions?: string | null | undefined;
package/dist/index.d.ts CHANGED
@@ -1,84 +1,15 @@
1
- import { LanguageModelV2, ProviderV2, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, ProviderV2, EmbeddingModelV2, ImageModelV1, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod';
4
4
 
5
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
5
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
6
6
  interface OpenAIChatSettings {
7
7
  /**
8
- Modify the likelihood of specified tokens appearing in the completion.
9
-
10
- Accepts a JSON object that maps tokens (specified by their token ID in
11
- the GPT tokenizer) to an associated bias value from -100 to 100. You
12
- can use this tokenizer tool to convert text to token IDs. Mathematically,
13
- the bias is added to the logits generated by the model prior to sampling.
14
- The exact effect will vary per model, but values between -1 and 1 should
15
- decrease or increase likelihood of selection; values like -100 or 100
16
- should result in a ban or exclusive selection of the relevant token.
17
-
18
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
19
- token from being generated.
20
- */
21
- logitBias?: Record<number, number>;
22
- /**
23
- Return the log probabilities of the tokens. Including logprobs will increase
24
- the response size and can slow down response times. However, it can
25
- be useful to better understand how the model is behaving.
26
-
27
- Setting to true will return the log probabilities of the tokens that
28
- were generated.
29
-
30
- Setting to a number will return the log probabilities of the top n
31
- tokens that were generated.
32
- */
33
- logprobs?: boolean | number;
34
- /**
35
- Whether to enable parallel function calling during tool use. Default to true.
36
- */
37
- parallelToolCalls?: boolean;
38
- /**
39
8
  Whether to use structured outputs. Defaults to false.
40
9
 
41
10
  When enabled, tool calls and object generation will be strict and follow the provided schema.
42
- */
43
- structuredOutputs?: boolean;
44
- /**
45
- Whether to use legacy function calling. Defaults to false.
46
-
47
- Required by some open source inference engines which do not support the `tools` API. May also
48
- provide a workaround for `parallelToolCalls` resulting in the provider buffering tool calls,
49
- which causes `streamObject` to be non-streaming.
50
-
51
- Prefer setting `parallelToolCalls: false` over this option.
52
-
53
- @deprecated this API is supported but deprecated by OpenAI.
54
- */
55
- useLegacyFunctionCalling?: boolean;
56
- /**
57
- A unique identifier representing your end-user, which can help OpenAI to
58
- monitor and detect abuse. Learn more.
59
11
  */
60
- user?: string;
61
- /**
62
- Automatically download images and pass the image as data to the model.
63
- OpenAI supports image URLs for public models, so this is only needed for
64
- private models or when the images are not publicly accessible.
65
-
66
- Defaults to `false`.
67
- */
68
- downloadImages?: boolean;
69
- /**
70
- Simulates streaming by using a normal generate call and returning it as a stream.
71
- Enable this if the model that you are using does not support streaming.
72
-
73
- Defaults to `false`.
74
-
75
- @deprecated Use `simulateStreamingMiddleware` instead.
76
- */
77
- simulateStreaming?: boolean;
78
- /**
79
- Reasoning effort for reasoning models. Defaults to `medium`.
80
- */
81
- reasoningEffort?: 'low' | 'medium' | 'high';
12
+ structuredOutputs?: boolean;
82
13
  }
83
14
 
84
15
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
@@ -137,12 +68,12 @@ type OpenAICompletionConfig = {
137
68
  };
138
69
  declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
139
70
  readonly specificationVersion = "v2";
140
- readonly defaultObjectGenerationMode: undefined;
141
71
  readonly modelId: OpenAICompletionModelId;
142
72
  readonly settings: OpenAICompletionSettings;
143
73
  private readonly config;
144
74
  constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
145
75
  get provider(): string;
76
+ getSupportedUrls(): Promise<Record<string, RegExp[]>>;
146
77
  private getArgs;
147
78
  doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
148
79
  doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
@@ -158,16 +89,6 @@ interface OpenAIEmbeddingSettings {
158
89
  Override the parallelism of embedding calls.
159
90
  */
160
91
  supportsParallelCalls?: boolean;
161
- /**
162
- The number of dimensions the resulting output embeddings should have.
163
- Only supported in text-embedding-3 and later models.
164
- */
165
- dimensions?: number;
166
- /**
167
- A unique identifier representing your end-user, which can help OpenAI to
168
- monitor and detect abuse. Learn more.
169
- */
170
- user?: string;
171
92
  }
172
93
 
173
94
  type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
@@ -199,7 +120,11 @@ declare const openaiTools: {
199
120
  webSearchPreview: typeof webSearchPreviewTool;
200
121
  };
201
122
 
202
- type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
123
+ type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
124
+
125
+ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
126
+
127
+ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
203
128
 
204
129
  interface OpenAIProvider extends ProviderV2 {
205
130
  (modelId: 'gpt-3.5-turbo-instruct', settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
@@ -224,17 +149,17 @@ interface OpenAIProvider extends ProviderV2 {
224
149
  /**
225
150
  Creates a model for text embeddings.
226
151
  */
227
- embedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
152
+ embedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
228
153
  /**
229
154
  Creates a model for text embeddings.
230
155
 
231
156
  @deprecated Use `textEmbeddingModel` instead.
232
157
  */
233
- textEmbedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
158
+ textEmbedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
234
159
  /**
235
160
  Creates a model for text embeddings.
236
161
  */
237
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
162
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
238
163
  /**
239
164
  Creates a model for image generation.
240
165
  */
@@ -244,6 +169,14 @@ interface OpenAIProvider extends ProviderV2 {
244
169
  */
245
170
  imageModel(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV1;
246
171
  /**
172
+ Creates a model for transcription.
173
+ */
174
+ transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV1;
175
+ /**
176
+ Creates a model for speech generation.
177
+ */
178
+ speech(modelId: OpenAISpeechModelId): SpeechModelV1;
179
+ /**
247
180
  OpenAI-specific tools.
248
181
  */
249
182
  tools: typeof openaiTools;
@@ -305,19 +238,19 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
305
238
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
306
239
  }, "strip", z.ZodTypeAny, {
307
240
  user?: string | null | undefined;
241
+ parallelToolCalls?: boolean | null | undefined;
242
+ reasoningEffort?: string | null | undefined;
308
243
  store?: boolean | null | undefined;
309
244
  metadata?: any;
310
- reasoningEffort?: string | null | undefined;
311
- parallelToolCalls?: boolean | null | undefined;
312
245
  previousResponseId?: string | null | undefined;
313
246
  strictSchemas?: boolean | null | undefined;
314
247
  instructions?: string | null | undefined;
315
248
  }, {
316
249
  user?: string | null | undefined;
250
+ parallelToolCalls?: boolean | null | undefined;
251
+ reasoningEffort?: string | null | undefined;
317
252
  store?: boolean | null | undefined;
318
253
  metadata?: any;
319
- reasoningEffort?: string | null | undefined;
320
- parallelToolCalls?: boolean | null | undefined;
321
254
  previousResponseId?: string | null | undefined;
322
255
  strictSchemas?: boolean | null | undefined;
323
256
  instructions?: string | null | undefined;