@ai-sdk/openai 2.0.0-canary.9 → 2.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,161 +1,107 @@
1
- import { LanguageModelV2, ProviderV2, EmbeddingModelV2, ImageModelV1, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, ProviderV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
2
+ import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
2
3
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z } from 'zod';
4
+ import { z } from 'zod/v4';
4
5
 
5
6
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
6
- interface OpenAIChatSettings {
7
- /**
8
- Whether to use structured outputs. Defaults to false.
9
-
10
- When enabled, tool calls and object generation will be strict and follow the provided schema.
11
- */
12
- structuredOutputs?: boolean;
13
- /**
14
- Automatically download images and pass the image as data to the model.
15
- OpenAI supports image URLs for public models, so this is only needed for
16
- private models or when the images are not publicly accessible.
17
-
18
- Defaults to `false`.
19
- */
20
- downloadImages?: boolean;
21
- }
22
7
 
23
8
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
24
- interface OpenAICompletionSettings {
25
- /**
26
- Echo back the prompt in addition to the completion.
27
- */
28
- echo?: boolean;
29
- /**
30
- Modify the likelihood of specified tokens appearing in the completion.
31
-
32
- Accepts a JSON object that maps tokens (specified by their token ID in
33
- the GPT tokenizer) to an associated bias value from -100 to 100. You
34
- can use this tokenizer tool to convert text to token IDs. Mathematically,
35
- the bias is added to the logits generated by the model prior to sampling.
36
- The exact effect will vary per model, but values between -1 and 1 should
37
- decrease or increase likelihood of selection; values like -100 or 100
38
- should result in a ban or exclusive selection of the relevant token.
39
-
40
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
41
- token from being generated.
42
- */
43
- logitBias?: Record<number, number>;
44
- /**
45
- Return the log probabilities of the tokens. Including logprobs will increase
46
- the response size and can slow down response times. However, it can
47
- be useful to better understand how the model is behaving.
48
-
49
- Setting to true will return the log probabilities of the tokens that
50
- were generated.
51
-
52
- Setting to a number will return the log probabilities of the top n
53
- tokens that were generated.
54
- */
55
- logprobs?: boolean | number;
56
- /**
57
- The suffix that comes after a completion of inserted text.
58
- */
59
- suffix?: string;
60
- /**
61
- A unique identifier representing your end-user, which can help OpenAI to
62
- monitor and detect abuse. Learn more.
63
- */
64
- user?: string;
65
- }
66
9
 
67
- type OpenAICompletionConfig = {
10
+ type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
11
+
12
+ type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
13
+
14
+ declare const openaiTools: {
15
+ fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{
16
+ query: string;
17
+ }, {
18
+ vectorStoreIds?: string[];
19
+ maxNumResults?: number;
20
+ ranking?: {
21
+ ranker?: "auto" | "default-2024-08-21";
22
+ };
23
+ filters?: {
24
+ key: string;
25
+ type: "eq" | "ne" | "gt" | "gte" | "lt" | "lte";
26
+ value: string | number | boolean;
27
+ } | {
28
+ type: "and" | "or";
29
+ filters: any[];
30
+ };
31
+ }>;
32
+ webSearchPreview: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
33
+ searchContextSize?: "low" | "medium" | "high";
34
+ userLocation?: {
35
+ type: "approximate";
36
+ country?: string;
37
+ city?: string;
38
+ region?: string;
39
+ timezone?: string;
40
+ };
41
+ }>;
42
+ };
43
+
44
+ type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
45
+
46
+ type OpenAIConfig = {
68
47
  provider: string;
69
- compatibility: 'strict' | 'compatible';
70
- headers: () => Record<string, string | undefined>;
71
48
  url: (options: {
72
49
  modelId: string;
73
50
  path: string;
74
51
  }) => string;
52
+ headers: () => Record<string, string | undefined>;
75
53
  fetch?: FetchFunction;
54
+ generateId?: () => string;
76
55
  };
77
- declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
56
+
57
+ declare const openaiResponsesModelIds: readonly ["gpt-4.1", "gpt-4.1-2025-04-14", "gpt-4.1-mini", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano", "gpt-4.1-nano-2025-04-14", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01", "gpt-4o-audio-preview-2024-12-17", "gpt-4o-search-preview", "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview", "gpt-4o-mini-search-preview-2025-03-11", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-0613", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "chatgpt-4o-latest", "o1", "o1-2024-12-17", "o1-mini", "o1-mini-2024-09-12", "o1-preview", "o1-preview-2024-09-12", "o3-mini", "o3-mini-2025-01-31", "o3", "o3-2025-04-16", "o4-mini", "o4-mini-2025-04-16", "codex-mini-latest", "computer-use-preview"];
58
+ type OpenAIResponsesModelId = (typeof openaiResponsesModelIds)[number] | (string & {});
59
+
60
+ declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
78
61
  readonly specificationVersion = "v2";
79
- readonly defaultObjectGenerationMode: undefined;
80
- readonly modelId: OpenAICompletionModelId;
81
- readonly settings: OpenAICompletionSettings;
62
+ readonly modelId: OpenAIResponsesModelId;
82
63
  private readonly config;
83
- constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
64
+ constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
65
+ readonly supportedUrls: Record<string, RegExp[]>;
84
66
  get provider(): string;
85
67
  private getArgs;
86
68
  doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
87
69
  doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
88
70
  }
89
-
90
- type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
91
- interface OpenAIEmbeddingSettings {
92
- /**
93
- Override the maximum number of embeddings per call.
94
- */
95
- maxEmbeddingsPerCall?: number;
96
- /**
97
- Override the parallelism of embedding calls.
98
- */
99
- supportsParallelCalls?: boolean;
100
- /**
101
- The number of dimensions the resulting output embeddings should have.
102
- Only supported in text-embedding-3 and later models.
103
- */
104
- dimensions?: number;
105
- /**
106
- A unique identifier representing your end-user, which can help OpenAI to
107
- monitor and detect abuse. Learn more.
108
- */
109
- user?: string;
110
- }
111
-
112
- type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
113
- interface OpenAIImageSettings {
114
- /**
115
- Override the maximum number of images per call (default is dependent on the
116
- model, or 1 for an unknown model).
117
- */
118
- maxImagesPerCall?: number;
119
- }
120
-
121
- declare const WebSearchPreviewParameters: z.ZodObject<{}, "strip", z.ZodTypeAny, {}, {}>;
122
- declare function webSearchPreviewTool({ searchContextSize, userLocation, }?: {
123
- searchContextSize?: 'low' | 'medium' | 'high';
124
- userLocation?: {
125
- type?: 'approximate';
126
- city?: string;
127
- region?: string;
128
- country?: string;
129
- timezone?: string;
130
- };
131
- }): {
132
- type: 'provider-defined';
133
- id: 'openai.web_search_preview';
134
- args: {};
135
- parameters: typeof WebSearchPreviewParameters;
136
- };
137
- declare const openaiTools: {
138
- webSearchPreview: typeof webSearchPreviewTool;
139
- };
140
-
141
- type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
142
-
143
- type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
71
+ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
72
+ metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
73
+ parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
74
+ previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
75
+ store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
76
+ user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
77
+ reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
78
+ strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
79
+ instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
80
+ reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
81
+ serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
82
+ auto: "auto";
83
+ flex: "flex";
84
+ priority: "priority";
85
+ }>>>;
86
+ include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
87
+ "reasoning.encrypted_content": "reasoning.encrypted_content";
88
+ "file_search_call.results": "file_search_call.results";
89
+ }>>>>;
90
+ }, z.core.$strip>;
91
+ type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
144
92
 
145
93
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
146
94
 
147
95
  interface OpenAIProvider extends ProviderV2 {
148
- (modelId: 'gpt-3.5-turbo-instruct', settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
149
- (modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): LanguageModelV2;
96
+ (modelId: OpenAIResponsesModelId): LanguageModelV2;
150
97
  /**
151
98
  Creates an OpenAI model for text generation.
152
99
  */
153
- languageModel(modelId: 'gpt-3.5-turbo-instruct', settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
154
- languageModel(modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): LanguageModelV2;
100
+ languageModel(modelId: OpenAIResponsesModelId): OpenAIResponsesLanguageModel;
155
101
  /**
156
102
  Creates an OpenAI chat model for text generation.
157
103
  */
158
- chat(modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): LanguageModelV2;
104
+ chat(modelId: OpenAIChatModelId): LanguageModelV2;
159
105
  /**
160
106
  Creates an OpenAI responses API model for text generation.
161
107
  */
@@ -163,37 +109,35 @@ interface OpenAIProvider extends ProviderV2 {
163
109
  /**
164
110
  Creates an OpenAI completion model for text generation.
165
111
  */
166
- completion(modelId: OpenAICompletionModelId, settings?: OpenAICompletionSettings): LanguageModelV2;
112
+ completion(modelId: OpenAICompletionModelId): LanguageModelV2;
167
113
  /**
168
114
  Creates a model for text embeddings.
169
115
  */
170
- embedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
116
+ embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
171
117
  /**
172
118
  Creates a model for text embeddings.
173
-
174
- @deprecated Use `textEmbeddingModel` instead.
175
119
  */
176
- textEmbedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
120
+ textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
177
121
  /**
178
122
  Creates a model for text embeddings.
179
123
  */
180
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV2<string>;
124
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
181
125
  /**
182
126
  Creates a model for image generation.
183
127
  */
184
- image(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV1;
128
+ image(modelId: OpenAIImageModelId): ImageModelV2;
185
129
  /**
186
130
  Creates a model for image generation.
187
131
  */
188
- imageModel(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV1;
132
+ imageModel(modelId: OpenAIImageModelId): ImageModelV2;
189
133
  /**
190
134
  Creates a model for transcription.
191
135
  */
192
- transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV1;
136
+ transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV2;
193
137
  /**
194
138
  Creates a model for speech generation.
195
139
  */
196
- speech(modelId: OpenAISpeechModelId): SpeechModelV1;
140
+ speech(modelId: OpenAISpeechModelId): SpeechModelV2;
197
141
  /**
198
142
  OpenAI-specific tools.
199
143
  */
@@ -221,12 +165,6 @@ interface OpenAIProviderSettings {
221
165
  */
222
166
  headers?: Record<string, string>;
223
167
  /**
224
- OpenAI compatibility mode. Should be set to `strict` when using the OpenAI API,
225
- and `compatible` when using 3rd party providers. In `compatible` mode, newer
226
- information such as streamOptions are not being sent. Defaults to 'compatible'.
227
- */
228
- compatibility?: 'strict' | 'compatible';
229
- /**
230
168
  Provider name. Overrides the `openai` default name for 3rd party providers.
231
169
  */
232
170
  name?: string;
@@ -241,38 +179,8 @@ Create an OpenAI provider instance.
241
179
  */
242
180
  declare function createOpenAI(options?: OpenAIProviderSettings): OpenAIProvider;
243
181
  /**
244
- Default OpenAI provider instance. It uses 'strict' compatibility mode.
182
+ Default OpenAI provider instance.
245
183
  */
246
184
  declare const openai: OpenAIProvider;
247
185
 
248
- declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
249
- metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
250
- parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
251
- previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
252
- store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
253
- user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
254
- reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
255
- strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
256
- instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
257
- }, "strip", z.ZodTypeAny, {
258
- user?: string | null | undefined;
259
- parallelToolCalls?: boolean | null | undefined;
260
- reasoningEffort?: string | null | undefined;
261
- store?: boolean | null | undefined;
262
- metadata?: any;
263
- previousResponseId?: string | null | undefined;
264
- strictSchemas?: boolean | null | undefined;
265
- instructions?: string | null | undefined;
266
- }, {
267
- user?: string | null | undefined;
268
- parallelToolCalls?: boolean | null | undefined;
269
- reasoningEffort?: string | null | undefined;
270
- store?: boolean | null | undefined;
271
- metadata?: any;
272
- previousResponseId?: string | null | undefined;
273
- strictSchemas?: boolean | null | undefined;
274
- instructions?: string | null | undefined;
275
- }>;
276
- type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
277
-
278
186
  export { type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, createOpenAI, openai };