@ai-sdk/openai 2.0.44 → 2.0.46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.46
4
+
5
+ ### Patch Changes
6
+
7
+ - 66f69e7: Add 'default' as service tier
8
+
9
+ ## 2.0.45
10
+
11
+ ### Patch Changes
12
+
13
+ - 6f0644c: chore: use import \* from zod/v4
14
+ - 6f0644c: chore: load zod schemas lazily
15
+ - Updated dependencies [6f0644c]
16
+ - Updated dependencies [6f0644c]
17
+ - @ai-sdk/provider-utils@3.0.11
18
+
3
19
  ## 2.0.44
4
20
 
5
21
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1,40 +1,26 @@
1
1
  import { ProviderV2, LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
- import { FetchFunction } from '@ai-sdk/provider-utils';
4
- import { z } from 'zod/v4';
3
+ import { InferValidator, FetchFunction } from '@ai-sdk/provider-utils';
5
4
 
6
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
7
- declare const openaiChatLanguageModelOptions: z.ZodObject<{
8
- logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
9
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
10
- parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
11
- user: z.ZodOptional<z.ZodString>;
12
- reasoningEffort: z.ZodOptional<z.ZodEnum<{
13
- minimal: "minimal";
14
- low: "low";
15
- medium: "medium";
16
- high: "high";
17
- }>>;
18
- maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
19
- store: z.ZodOptional<z.ZodBoolean>;
20
- metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
21
- prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
22
- structuredOutputs: z.ZodOptional<z.ZodBoolean>;
23
- serviceTier: z.ZodOptional<z.ZodEnum<{
24
- auto: "auto";
25
- flex: "flex";
26
- priority: "priority";
27
- }>>;
28
- strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
29
- textVerbosity: z.ZodOptional<z.ZodEnum<{
30
- low: "low";
31
- medium: "medium";
32
- high: "high";
33
- }>>;
34
- promptCacheKey: z.ZodOptional<z.ZodString>;
35
- safetyIdentifier: z.ZodOptional<z.ZodString>;
36
- }, z.core.$strip>;
37
- type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
6
+ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidator<{
7
+ logitBias?: Record<number, number> | undefined;
8
+ logprobs?: number | boolean | undefined;
9
+ parallelToolCalls?: boolean | undefined;
10
+ user?: string | undefined;
11
+ reasoningEffort?: "minimal" | "low" | "medium" | "high" | undefined;
12
+ maxCompletionTokens?: number | undefined;
13
+ store?: boolean | undefined;
14
+ metadata?: Record<string, string> | undefined;
15
+ prediction?: Record<string, any> | undefined;
16
+ structuredOutputs?: boolean | undefined;
17
+ serviceTier?: "default" | "auto" | "flex" | "priority" | undefined;
18
+ strictJsonSchema?: boolean | undefined;
19
+ textVerbosity?: "low" | "medium" | "high" | undefined;
20
+ promptCacheKey?: string | undefined;
21
+ safetyIdentifier?: string | undefined;
22
+ }>;
23
+ type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
38
24
 
39
25
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
40
26
 
@@ -259,6 +245,25 @@ declare const openaiTools: {
259
245
  };
260
246
 
261
247
  type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
248
+ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
249
+ include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
250
+ instructions?: string | null | undefined;
251
+ logprobs?: number | boolean | undefined;
252
+ maxToolCalls?: number | null | undefined;
253
+ metadata?: any;
254
+ parallelToolCalls?: boolean | null | undefined;
255
+ previousResponseId?: string | null | undefined;
256
+ promptCacheKey?: string | null | undefined;
257
+ reasoningEffort?: string | null | undefined;
258
+ reasoningSummary?: string | null | undefined;
259
+ safetyIdentifier?: string | null | undefined;
260
+ serviceTier?: "default" | "auto" | "flex" | "priority" | null | undefined;
261
+ store?: boolean | null | undefined;
262
+ strictJsonSchema?: boolean | null | undefined;
263
+ textVerbosity?: "low" | "medium" | "high" | null | undefined;
264
+ user?: string | null | undefined;
265
+ }>;
266
+ type OpenAIResponsesProviderOptions = InferValidator<typeof openaiResponsesProviderOptionsSchema>;
262
267
 
263
268
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
264
269
 
@@ -355,38 +360,6 @@ Default OpenAI provider instance.
355
360
  */
356
361
  declare const openai: OpenAIProvider;
357
362
 
358
- declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
359
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
360
- "file_search_call.results": "file_search_call.results";
361
- "message.output_text.logprobs": "message.output_text.logprobs";
362
- "reasoning.encrypted_content": "reasoning.encrypted_content";
363
- }>>>>;
364
- instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
365
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
366
- maxToolCalls: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
367
- metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
368
- parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
369
- previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
370
- promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
371
- reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
372
- reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
373
- safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
374
- serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
375
- auto: "auto";
376
- flex: "flex";
377
- priority: "priority";
378
- }>>>;
379
- store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
380
- strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
381
- textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
382
- low: "low";
383
- medium: "medium";
384
- high: "high";
385
- }>>>;
386
- user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
387
- }, z.core.$strip>;
388
- type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
389
-
390
363
  declare const VERSION: string;
391
364
 
392
365
  export { type OpenAIChatLanguageModelOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, VERSION, createOpenAI, openai };
package/dist/index.d.ts CHANGED
@@ -1,40 +1,26 @@
1
1
  import { ProviderV2, LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
- import { FetchFunction } from '@ai-sdk/provider-utils';
4
- import { z } from 'zod/v4';
3
+ import { InferValidator, FetchFunction } from '@ai-sdk/provider-utils';
5
4
 
6
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
7
- declare const openaiChatLanguageModelOptions: z.ZodObject<{
8
- logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
9
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
10
- parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
11
- user: z.ZodOptional<z.ZodString>;
12
- reasoningEffort: z.ZodOptional<z.ZodEnum<{
13
- minimal: "minimal";
14
- low: "low";
15
- medium: "medium";
16
- high: "high";
17
- }>>;
18
- maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
19
- store: z.ZodOptional<z.ZodBoolean>;
20
- metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
21
- prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
22
- structuredOutputs: z.ZodOptional<z.ZodBoolean>;
23
- serviceTier: z.ZodOptional<z.ZodEnum<{
24
- auto: "auto";
25
- flex: "flex";
26
- priority: "priority";
27
- }>>;
28
- strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
29
- textVerbosity: z.ZodOptional<z.ZodEnum<{
30
- low: "low";
31
- medium: "medium";
32
- high: "high";
33
- }>>;
34
- promptCacheKey: z.ZodOptional<z.ZodString>;
35
- safetyIdentifier: z.ZodOptional<z.ZodString>;
36
- }, z.core.$strip>;
37
- type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
6
+ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidator<{
7
+ logitBias?: Record<number, number> | undefined;
8
+ logprobs?: number | boolean | undefined;
9
+ parallelToolCalls?: boolean | undefined;
10
+ user?: string | undefined;
11
+ reasoningEffort?: "minimal" | "low" | "medium" | "high" | undefined;
12
+ maxCompletionTokens?: number | undefined;
13
+ store?: boolean | undefined;
14
+ metadata?: Record<string, string> | undefined;
15
+ prediction?: Record<string, any> | undefined;
16
+ structuredOutputs?: boolean | undefined;
17
+ serviceTier?: "default" | "auto" | "flex" | "priority" | undefined;
18
+ strictJsonSchema?: boolean | undefined;
19
+ textVerbosity?: "low" | "medium" | "high" | undefined;
20
+ promptCacheKey?: string | undefined;
21
+ safetyIdentifier?: string | undefined;
22
+ }>;
23
+ type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
38
24
 
39
25
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
40
26
 
@@ -259,6 +245,25 @@ declare const openaiTools: {
259
245
  };
260
246
 
261
247
  type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
248
+ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
249
+ include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
250
+ instructions?: string | null | undefined;
251
+ logprobs?: number | boolean | undefined;
252
+ maxToolCalls?: number | null | undefined;
253
+ metadata?: any;
254
+ parallelToolCalls?: boolean | null | undefined;
255
+ previousResponseId?: string | null | undefined;
256
+ promptCacheKey?: string | null | undefined;
257
+ reasoningEffort?: string | null | undefined;
258
+ reasoningSummary?: string | null | undefined;
259
+ safetyIdentifier?: string | null | undefined;
260
+ serviceTier?: "default" | "auto" | "flex" | "priority" | null | undefined;
261
+ store?: boolean | null | undefined;
262
+ strictJsonSchema?: boolean | null | undefined;
263
+ textVerbosity?: "low" | "medium" | "high" | null | undefined;
264
+ user?: string | null | undefined;
265
+ }>;
266
+ type OpenAIResponsesProviderOptions = InferValidator<typeof openaiResponsesProviderOptionsSchema>;
262
267
 
263
268
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
264
269
 
@@ -355,38 +360,6 @@ Default OpenAI provider instance.
355
360
  */
356
361
  declare const openai: OpenAIProvider;
357
362
 
358
- declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
359
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
360
- "file_search_call.results": "file_search_call.results";
361
- "message.output_text.logprobs": "message.output_text.logprobs";
362
- "reasoning.encrypted_content": "reasoning.encrypted_content";
363
- }>>>>;
364
- instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
365
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
366
- maxToolCalls: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
367
- metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
368
- parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
369
- previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
370
- promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
371
- reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
372
- reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
373
- safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
374
- serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
375
- auto: "auto";
376
- flex: "flex";
377
- priority: "priority";
378
- }>>>;
379
- store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
380
- strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
381
- textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
382
- low: "low";
383
- medium: "medium";
384
- high: "high";
385
- }>>>;
386
- user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
387
- }, z.core.$strip>;
388
- type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
389
-
390
363
  declare const VERSION: string;
391
364
 
392
365
  export { type OpenAIChatLanguageModelOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, VERSION, createOpenAI, openai };