@ai-sdk/openai 2.0.0-canary.1 → 2.0.0-canary.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,125 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.0-canary.11
4
+
5
+ ### Patch Changes
6
+
7
+ - 8493141: feat (providers/openai): add support for reasoning summaries
8
+ - Updated dependencies [e86be6f]
9
+ - @ai-sdk/provider@2.0.0-canary.9
10
+ - @ai-sdk/provider-utils@3.0.0-canary.10
11
+
12
+ ## 2.0.0-canary.10
13
+
14
+ ### Patch Changes
15
+
16
+ - 3bd3c0b: chore(providers/openai): update embedding model to use providerOptions
17
+ - Updated dependencies [95857aa]
18
+ - Updated dependencies [7ea4132]
19
+ - @ai-sdk/provider@2.0.0-canary.8
20
+ - @ai-sdk/provider-utils@3.0.0-canary.9
21
+
22
+ ## 2.0.0-canary.9
23
+
24
+ ### Patch Changes
25
+
26
+ - d63bcbc: feat (provider/openai): o4 updates for responses api
27
+ - d2af019: feat (providers/openai): add gpt-4.1 models
28
+ - 870c5c0: feat (providers/openai): add o3 and o4-mini models
29
+ - 06bac05: fix (openai): structure output for responses model
30
+
31
+ ## 2.0.0-canary.8
32
+
33
+ ### Patch Changes
34
+
35
+ - 8aa9e20: feat: add speech with experimental_generateSpeech
36
+ - Updated dependencies [5d142ab]
37
+ - Updated dependencies [b6b43c7]
38
+ - Updated dependencies [8aa9e20]
39
+ - Updated dependencies [3795467]
40
+ - @ai-sdk/provider-utils@3.0.0-canary.8
41
+ - @ai-sdk/provider@2.0.0-canary.7
42
+
43
+ ## 2.0.0-canary.7
44
+
45
+ ### Patch Changes
46
+
47
+ - 26735b5: chore(embedding-model): add v2 interface
48
+ - 443d8ec: feat(embedding-model-v2): add response body field
49
+ - fd65bc6: chore(embedding-model-v2): rename rawResponse to response
50
+ - Updated dependencies [26735b5]
51
+ - Updated dependencies [443d8ec]
52
+ - Updated dependencies [14c9410]
53
+ - Updated dependencies [d9c98f4]
54
+ - Updated dependencies [c4a2fec]
55
+ - Updated dependencies [0054544]
56
+ - Updated dependencies [9e9c809]
57
+ - Updated dependencies [32831c6]
58
+ - Updated dependencies [d0f9495]
59
+ - Updated dependencies [fd65bc6]
60
+ - Updated dependencies [393138b]
61
+ - Updated dependencies [7182d14]
62
+ - @ai-sdk/provider@2.0.0-canary.6
63
+ - @ai-sdk/provider-utils@3.0.0-canary.7
64
+
65
+ ## 2.0.0-canary.6
66
+
67
+ ### Patch Changes
68
+
69
+ - 948b755: chore(providers/openai): convert to providerOptions
70
+ - 3b1ea10: adding support for gpt-4o-search-preview and handling unsupported parameters
71
+ - 442be08: fix: propagate openai transcription fixes
72
+ - 5147e6e: chore(openai): remove simulateStreaming
73
+ - c2b92cc: chore(openai): remove legacy function calling
74
+ - f10304b: feat(tool-calling): don't require the user to have to pass parameters
75
+ - Updated dependencies [411e483]
76
+ - Updated dependencies [79457bd]
77
+ - Updated dependencies [ad80501]
78
+ - Updated dependencies [1766ede]
79
+ - Updated dependencies [f10304b]
80
+ - @ai-sdk/provider@2.0.0-canary.5
81
+ - @ai-sdk/provider-utils@3.0.0-canary.6
82
+
83
+ ## 2.0.0-canary.5
84
+
85
+ ### Patch Changes
86
+
87
+ - Updated dependencies [6f6bb89]
88
+ - @ai-sdk/provider@2.0.0-canary.4
89
+ - @ai-sdk/provider-utils@3.0.0-canary.5
90
+
91
+ ## 2.0.0-canary.4
92
+
93
+ ### Patch Changes
94
+
95
+ - Updated dependencies [d1a1aa1]
96
+ - @ai-sdk/provider@2.0.0-canary.3
97
+ - @ai-sdk/provider-utils@3.0.0-canary.4
98
+
99
+ ## 2.0.0-canary.3
100
+
101
+ ### Patch Changes
102
+
103
+ - a166433: feat: add transcription with experimental_transcribe
104
+ - 0a87932: core (ai): change transcription model mimeType to mediaType
105
+ - 0a87932: fix (provider/openai): increase transcription model resilience
106
+ - Updated dependencies [a166433]
107
+ - Updated dependencies [abf9a79]
108
+ - Updated dependencies [9f95b35]
109
+ - Updated dependencies [0a87932]
110
+ - Updated dependencies [6dc848c]
111
+ - @ai-sdk/provider-utils@3.0.0-canary.3
112
+ - @ai-sdk/provider@2.0.0-canary.2
113
+
114
+ ## 2.0.0-canary.2
115
+
116
+ ### Patch Changes
117
+
118
+ - Updated dependencies [c57e248]
119
+ - Updated dependencies [33f4a6a]
120
+ - @ai-sdk/provider@2.0.0-canary.1
121
+ - @ai-sdk/provider-utils@3.0.0-canary.2
122
+
3
123
  ## 2.0.0-canary.1
4
124
 
5
125
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1,84 +1,15 @@
1
- import { LanguageModelV2, ProviderV2, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, ProviderV2, EmbeddingModelV2, ImageModelV1, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod';
4
4
 
5
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
5
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
6
6
  interface OpenAIChatSettings {
7
7
  /**
8
- Modify the likelihood of specified tokens appearing in the completion.
9
-
10
- Accepts a JSON object that maps tokens (specified by their token ID in
11
- the GPT tokenizer) to an associated bias value from -100 to 100. You
12
- can use this tokenizer tool to convert text to token IDs. Mathematically,
13
- the bias is added to the logits generated by the model prior to sampling.
14
- The exact effect will vary per model, but values between -1 and 1 should
15
- decrease or increase likelihood of selection; values like -100 or 100
16
- should result in a ban or exclusive selection of the relevant token.
17
-
18
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
19
- token from being generated.
20
- */
21
- logitBias?: Record<number, number>;
22
- /**
23
- Return the log probabilities of the tokens. Including logprobs will increase
24
- the response size and can slow down response times. However, it can
25
- be useful to better understand how the model is behaving.
26
-
27
- Setting to true will return the log probabilities of the tokens that
28
- were generated.
29
-
30
- Setting to a number will return the log probabilities of the top n
31
- tokens that were generated.
32
- */
33
- logprobs?: boolean | number;
34
- /**
35
- Whether to enable parallel function calling during tool use. Default to true.
36
- */
37
- parallelToolCalls?: boolean;
38
- /**
39
8
  Whether to use structured outputs. Defaults to false.
40
9
 
41
10
  When enabled, tool calls and object generation will be strict and follow the provided schema.
42
- */
43
- structuredOutputs?: boolean;
44
- /**
45
- Whether to use legacy function calling. Defaults to false.
46
-
47
- Required by some open source inference engines which do not support the `tools` API. May also
48
- provide a workaround for `parallelToolCalls` resulting in the provider buffering tool calls,
49
- which causes `streamObject` to be non-streaming.
50
-
51
- Prefer setting `parallelToolCalls: false` over this option.
52
-
53
- @deprecated this API is supported but deprecated by OpenAI.
54
- */
55
- useLegacyFunctionCalling?: boolean;
56
- /**
57
- A unique identifier representing your end-user, which can help OpenAI to
58
- monitor and detect abuse. Learn more.
59
11
  */
60
- user?: string;
61
- /**
62
- Automatically download images and pass the image as data to the model.
63
- OpenAI supports image URLs for public models, so this is only needed for
64
- private models or when the images are not publicly accessible.
65
-
66
- Defaults to `false`.
67
- */
68
- downloadImages?: boolean;
69
- /**
70
- Simulates streaming by using a normal generate call and returning it as a stream.
71
- Enable this if the model that you are using does not support streaming.
72
-
73
- Defaults to `false`.
74
-
75
- @deprecated Use `simulateStreamingMiddleware` instead.
76
- */
77
- simulateStreaming?: boolean;
78
- /**
79
- Reasoning effort for reasoning models. Defaults to `medium`.
80
- */
81
- reasoningEffort?: 'low' | 'medium' | 'high';
12
+ structuredOutputs?: boolean;
82
13
  }
83
14
 
84
15
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
@@ -103,18 +34,6 @@ interface OpenAICompletionSettings {
103
34
  */
104
35
  logitBias?: Record<number, number>;
105
36
  /**
106
- Return the log probabilities of the tokens. Including logprobs will increase
107
- the response size and can slow down response times. However, it can
108
- be useful to better understand how the model is behaving.
109
-
110
- Setting to true will return the log probabilities of the tokens that
111
- were generated.
112
-
113
- Setting to a number will return the log probabilities of the top n
114
- tokens that were generated.
115
- */
116
- logprobs?: boolean | number;
117
- /**
118
37
  The suffix that comes after a completion of inserted text.
119
38
  */
120
39
  suffix?: string;
@@ -137,12 +56,12 @@ type OpenAICompletionConfig = {
137
56
  };
138
57
  declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
139
58
  readonly specificationVersion = "v2";
140
- readonly defaultObjectGenerationMode: undefined;
141
59
  readonly modelId: OpenAICompletionModelId;
142
60
  readonly settings: OpenAICompletionSettings;
143
61
  private readonly config;
144
62
  constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
145
63
  get provider(): string;
64
+ getSupportedUrls(): Promise<Record<string, RegExp[]>>;
146
65
  private getArgs;
147
66
  doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
148
67
  doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
@@ -158,16 +77,6 @@ interface OpenAIEmbeddingSettings {
158
77
  Override the parallelism of embedding calls.
159
78
  */
160
79
  supportsParallelCalls?: boolean;
161
- /**
162
- The number of dimensions the resulting output embeddings should have.
163
- Only supported in text-embedding-3 and later models.
164
- */
165
- dimensions?: number;
166
- /**
167
- A unique identifier representing your end-user, which can help OpenAI to
168
- monitor and detect abuse. Learn more.
169
- */
170
- user?: string;
171
80
  }
172
81
 
173
82
  type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
@@ -199,7 +108,11 @@ declare const openaiTools: {
199
108
  webSearchPreview: typeof webSearchPreviewTool;
200
109
  };
201
110
 
202
- type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
111
+ type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
112
+
113
+ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
114
+
115
+ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
203
116
 
204
117
  interface OpenAIProvider extends ProviderV2 {
205
118
  (modelId: 'gpt-3.5-turbo-instruct', settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
@@ -224,17 +137,17 @@ interface OpenAIProvider extends ProviderV2 {
224
137
  /**
225
138
  Creates a model for text embeddings.
226
139
  */
227
- embedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
140
+ embedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
228
141
  /**
229
142
  Creates a model for text embeddings.
230
143
 
231
144
  @deprecated Use `textEmbeddingModel` instead.
232
145
  */
233
- textEmbedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
146
+ textEmbedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
234
147
  /**
235
148
  Creates a model for text embeddings.
236
149
  */
237
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
150
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
238
151
  /**
239
152
  Creates a model for image generation.
240
153
  */
@@ -244,6 +157,14 @@ interface OpenAIProvider extends ProviderV2 {
244
157
  */
245
158
  imageModel(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV1;
246
159
  /**
160
+ Creates a model for transcription.
161
+ */
162
+ transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV1;
163
+ /**
164
+ Creates a model for speech generation.
165
+ */
166
+ speech(modelId: OpenAISpeechModelId): SpeechModelV1;
167
+ /**
247
168
  OpenAI-specific tools.
248
169
  */
249
170
  tools: typeof openaiTools;
@@ -303,24 +224,27 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
303
224
  reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
304
225
  strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
305
226
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
227
+ reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
306
228
  }, "strip", z.ZodTypeAny, {
307
229
  user?: string | null | undefined;
230
+ parallelToolCalls?: boolean | null | undefined;
231
+ reasoningEffort?: string | null | undefined;
308
232
  store?: boolean | null | undefined;
309
233
  metadata?: any;
310
- reasoningEffort?: string | null | undefined;
311
- parallelToolCalls?: boolean | null | undefined;
312
234
  previousResponseId?: string | null | undefined;
313
235
  strictSchemas?: boolean | null | undefined;
314
236
  instructions?: string | null | undefined;
237
+ reasoningSummary?: string | null | undefined;
315
238
  }, {
316
239
  user?: string | null | undefined;
240
+ parallelToolCalls?: boolean | null | undefined;
241
+ reasoningEffort?: string | null | undefined;
317
242
  store?: boolean | null | undefined;
318
243
  metadata?: any;
319
- reasoningEffort?: string | null | undefined;
320
- parallelToolCalls?: boolean | null | undefined;
321
244
  previousResponseId?: string | null | undefined;
322
245
  strictSchemas?: boolean | null | undefined;
323
246
  instructions?: string | null | undefined;
247
+ reasoningSummary?: string | null | undefined;
324
248
  }>;
325
249
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
326
250
 
package/dist/index.d.ts CHANGED
@@ -1,84 +1,15 @@
1
- import { LanguageModelV2, ProviderV2, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, ProviderV2, EmbeddingModelV2, ImageModelV1, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod';
4
4
 
5
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
5
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
6
6
  interface OpenAIChatSettings {
7
7
  /**
8
- Modify the likelihood of specified tokens appearing in the completion.
9
-
10
- Accepts a JSON object that maps tokens (specified by their token ID in
11
- the GPT tokenizer) to an associated bias value from -100 to 100. You
12
- can use this tokenizer tool to convert text to token IDs. Mathematically,
13
- the bias is added to the logits generated by the model prior to sampling.
14
- The exact effect will vary per model, but values between -1 and 1 should
15
- decrease or increase likelihood of selection; values like -100 or 100
16
- should result in a ban or exclusive selection of the relevant token.
17
-
18
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
19
- token from being generated.
20
- */
21
- logitBias?: Record<number, number>;
22
- /**
23
- Return the log probabilities of the tokens. Including logprobs will increase
24
- the response size and can slow down response times. However, it can
25
- be useful to better understand how the model is behaving.
26
-
27
- Setting to true will return the log probabilities of the tokens that
28
- were generated.
29
-
30
- Setting to a number will return the log probabilities of the top n
31
- tokens that were generated.
32
- */
33
- logprobs?: boolean | number;
34
- /**
35
- Whether to enable parallel function calling during tool use. Default to true.
36
- */
37
- parallelToolCalls?: boolean;
38
- /**
39
8
  Whether to use structured outputs. Defaults to false.
40
9
 
41
10
  When enabled, tool calls and object generation will be strict and follow the provided schema.
42
- */
43
- structuredOutputs?: boolean;
44
- /**
45
- Whether to use legacy function calling. Defaults to false.
46
-
47
- Required by some open source inference engines which do not support the `tools` API. May also
48
- provide a workaround for `parallelToolCalls` resulting in the provider buffering tool calls,
49
- which causes `streamObject` to be non-streaming.
50
-
51
- Prefer setting `parallelToolCalls: false` over this option.
52
-
53
- @deprecated this API is supported but deprecated by OpenAI.
54
- */
55
- useLegacyFunctionCalling?: boolean;
56
- /**
57
- A unique identifier representing your end-user, which can help OpenAI to
58
- monitor and detect abuse. Learn more.
59
11
  */
60
- user?: string;
61
- /**
62
- Automatically download images and pass the image as data to the model.
63
- OpenAI supports image URLs for public models, so this is only needed for
64
- private models or when the images are not publicly accessible.
65
-
66
- Defaults to `false`.
67
- */
68
- downloadImages?: boolean;
69
- /**
70
- Simulates streaming by using a normal generate call and returning it as a stream.
71
- Enable this if the model that you are using does not support streaming.
72
-
73
- Defaults to `false`.
74
-
75
- @deprecated Use `simulateStreamingMiddleware` instead.
76
- */
77
- simulateStreaming?: boolean;
78
- /**
79
- Reasoning effort for reasoning models. Defaults to `medium`.
80
- */
81
- reasoningEffort?: 'low' | 'medium' | 'high';
12
+ structuredOutputs?: boolean;
82
13
  }
83
14
 
84
15
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
@@ -103,18 +34,6 @@ interface OpenAICompletionSettings {
103
34
  */
104
35
  logitBias?: Record<number, number>;
105
36
  /**
106
- Return the log probabilities of the tokens. Including logprobs will increase
107
- the response size and can slow down response times. However, it can
108
- be useful to better understand how the model is behaving.
109
-
110
- Setting to true will return the log probabilities of the tokens that
111
- were generated.
112
-
113
- Setting to a number will return the log probabilities of the top n
114
- tokens that were generated.
115
- */
116
- logprobs?: boolean | number;
117
- /**
118
37
  The suffix that comes after a completion of inserted text.
119
38
  */
120
39
  suffix?: string;
@@ -137,12 +56,12 @@ type OpenAICompletionConfig = {
137
56
  };
138
57
  declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
139
58
  readonly specificationVersion = "v2";
140
- readonly defaultObjectGenerationMode: undefined;
141
59
  readonly modelId: OpenAICompletionModelId;
142
60
  readonly settings: OpenAICompletionSettings;
143
61
  private readonly config;
144
62
  constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
145
63
  get provider(): string;
64
+ getSupportedUrls(): Promise<Record<string, RegExp[]>>;
146
65
  private getArgs;
147
66
  doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
148
67
  doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
@@ -158,16 +77,6 @@ interface OpenAIEmbeddingSettings {
158
77
  Override the parallelism of embedding calls.
159
78
  */
160
79
  supportsParallelCalls?: boolean;
161
- /**
162
- The number of dimensions the resulting output embeddings should have.
163
- Only supported in text-embedding-3 and later models.
164
- */
165
- dimensions?: number;
166
- /**
167
- A unique identifier representing your end-user, which can help OpenAI to
168
- monitor and detect abuse. Learn more.
169
- */
170
- user?: string;
171
80
  }
172
81
 
173
82
  type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
@@ -199,7 +108,11 @@ declare const openaiTools: {
199
108
  webSearchPreview: typeof webSearchPreviewTool;
200
109
  };
201
110
 
202
- type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
111
+ type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
112
+
113
+ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
114
+
115
+ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
203
116
 
204
117
  interface OpenAIProvider extends ProviderV2 {
205
118
  (modelId: 'gpt-3.5-turbo-instruct', settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
@@ -224,17 +137,17 @@ interface OpenAIProvider extends ProviderV2 {
224
137
  /**
225
138
  Creates a model for text embeddings.
226
139
  */
227
- embedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
140
+ embedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
228
141
  /**
229
142
  Creates a model for text embeddings.
230
143
 
231
144
  @deprecated Use `textEmbeddingModel` instead.
232
145
  */
233
- textEmbedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
146
+ textEmbedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
234
147
  /**
235
148
  Creates a model for text embeddings.
236
149
  */
237
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
150
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
238
151
  /**
239
152
  Creates a model for image generation.
240
153
  */
@@ -244,6 +157,14 @@ interface OpenAIProvider extends ProviderV2 {
244
157
  */
245
158
  imageModel(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV1;
246
159
  /**
160
+ Creates a model for transcription.
161
+ */
162
+ transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV1;
163
+ /**
164
+ Creates a model for speech generation.
165
+ */
166
+ speech(modelId: OpenAISpeechModelId): SpeechModelV1;
167
+ /**
247
168
  OpenAI-specific tools.
248
169
  */
249
170
  tools: typeof openaiTools;
@@ -303,24 +224,27 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
303
224
  reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
304
225
  strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
305
226
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
227
+ reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
306
228
  }, "strip", z.ZodTypeAny, {
307
229
  user?: string | null | undefined;
230
+ parallelToolCalls?: boolean | null | undefined;
231
+ reasoningEffort?: string | null | undefined;
308
232
  store?: boolean | null | undefined;
309
233
  metadata?: any;
310
- reasoningEffort?: string | null | undefined;
311
- parallelToolCalls?: boolean | null | undefined;
312
234
  previousResponseId?: string | null | undefined;
313
235
  strictSchemas?: boolean | null | undefined;
314
236
  instructions?: string | null | undefined;
237
+ reasoningSummary?: string | null | undefined;
315
238
  }, {
316
239
  user?: string | null | undefined;
240
+ parallelToolCalls?: boolean | null | undefined;
241
+ reasoningEffort?: string | null | undefined;
317
242
  store?: boolean | null | undefined;
318
243
  metadata?: any;
319
- reasoningEffort?: string | null | undefined;
320
- parallelToolCalls?: boolean | null | undefined;
321
244
  previousResponseId?: string | null | undefined;
322
245
  strictSchemas?: boolean | null | undefined;
323
246
  instructions?: string | null | undefined;
247
+ reasoningSummary?: string | null | undefined;
324
248
  }>;
325
249
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
326
250