@ai-sdk/openai 3.0.0-beta.17 → 3.0.0-beta.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,15 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 3.0.0-beta.18
4
+
5
+ ### Patch Changes
6
+
7
+ - 95f65c2: chore: use import \* from zod/v4
8
+ - 95f65c2: chore: load zod schemas lazily
9
+ - Updated dependencies [95f65c2]
10
+ - Updated dependencies [95f65c2]
11
+ - @ai-sdk/provider-utils@4.0.0-beta.11
12
+
3
13
  ## 3.0.0-beta.17
4
14
 
5
15
  ### Major Changes
package/dist/index.d.mts CHANGED
@@ -1,40 +1,26 @@
1
1
  import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
- import { FetchFunction } from '@ai-sdk/provider-utils';
4
- import { z } from 'zod/v4';
3
+ import { InferValidator, FetchFunction } from '@ai-sdk/provider-utils';
5
4
 
6
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
7
- declare const openaiChatLanguageModelOptions: z.ZodObject<{
8
- logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
9
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
10
- parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
11
- user: z.ZodOptional<z.ZodString>;
12
- reasoningEffort: z.ZodOptional<z.ZodEnum<{
13
- minimal: "minimal";
14
- low: "low";
15
- medium: "medium";
16
- high: "high";
17
- }>>;
18
- maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
19
- store: z.ZodOptional<z.ZodBoolean>;
20
- metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
21
- prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
22
- structuredOutputs: z.ZodOptional<z.ZodBoolean>;
23
- serviceTier: z.ZodOptional<z.ZodEnum<{
24
- auto: "auto";
25
- flex: "flex";
26
- priority: "priority";
27
- }>>;
28
- strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
29
- textVerbosity: z.ZodOptional<z.ZodEnum<{
30
- low: "low";
31
- medium: "medium";
32
- high: "high";
33
- }>>;
34
- promptCacheKey: z.ZodOptional<z.ZodString>;
35
- safetyIdentifier: z.ZodOptional<z.ZodString>;
36
- }, z.core.$strip>;
37
- type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
6
+ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidator<{
7
+ logitBias?: Record<number, number> | undefined;
8
+ logprobs?: number | boolean | undefined;
9
+ parallelToolCalls?: boolean | undefined;
10
+ user?: string | undefined;
11
+ reasoningEffort?: "minimal" | "low" | "medium" | "high" | undefined;
12
+ maxCompletionTokens?: number | undefined;
13
+ store?: boolean | undefined;
14
+ metadata?: Record<string, string> | undefined;
15
+ prediction?: Record<string, any> | undefined;
16
+ structuredOutputs?: boolean | undefined;
17
+ serviceTier?: "auto" | "flex" | "priority" | undefined;
18
+ strictJsonSchema?: boolean | undefined;
19
+ textVerbosity?: "low" | "medium" | "high" | undefined;
20
+ promptCacheKey?: string | undefined;
21
+ safetyIdentifier?: string | undefined;
22
+ }>;
23
+ type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
38
24
 
39
25
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
40
26
 
@@ -265,6 +251,25 @@ declare const openaiTools: {
265
251
  };
266
252
 
267
253
  type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
254
+ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
255
+ include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
256
+ instructions?: string | null | undefined;
257
+ logprobs?: number | boolean | undefined;
258
+ maxToolCalls?: number | null | undefined;
259
+ metadata?: any;
260
+ parallelToolCalls?: boolean | null | undefined;
261
+ previousResponseId?: string | null | undefined;
262
+ promptCacheKey?: string | null | undefined;
263
+ reasoningEffort?: string | null | undefined;
264
+ reasoningSummary?: string | null | undefined;
265
+ safetyIdentifier?: string | null | undefined;
266
+ serviceTier?: "auto" | "flex" | "priority" | null | undefined;
267
+ store?: boolean | null | undefined;
268
+ strictJsonSchema?: boolean | null | undefined;
269
+ textVerbosity?: "low" | "medium" | "high" | null | undefined;
270
+ user?: string | null | undefined;
271
+ }>;
272
+ type OpenAIResponsesProviderOptions = InferValidator<typeof openaiResponsesProviderOptionsSchema>;
268
273
 
269
274
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
270
275
 
@@ -361,38 +366,6 @@ Default OpenAI provider instance.
361
366
  */
362
367
  declare const openai: OpenAIProvider;
363
368
 
364
- declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
365
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
366
- "file_search_call.results": "file_search_call.results";
367
- "message.output_text.logprobs": "message.output_text.logprobs";
368
- "reasoning.encrypted_content": "reasoning.encrypted_content";
369
- }>>>>;
370
- instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
371
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
372
- maxToolCalls: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
373
- metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
374
- parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
375
- previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
376
- promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
377
- reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
378
- reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
379
- safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
380
- serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
381
- auto: "auto";
382
- flex: "flex";
383
- priority: "priority";
384
- }>>>;
385
- store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
386
- strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
387
- textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
388
- low: "low";
389
- medium: "medium";
390
- high: "high";
391
- }>>>;
392
- user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
393
- }, z.core.$strip>;
394
- type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
395
-
396
369
  declare const VERSION: string;
397
370
 
398
371
  export { type OpenAIChatLanguageModelOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, VERSION, createOpenAI, openai };
package/dist/index.d.ts CHANGED
@@ -1,40 +1,26 @@
1
1
  import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
- import { FetchFunction } from '@ai-sdk/provider-utils';
4
- import { z } from 'zod/v4';
3
+ import { InferValidator, FetchFunction } from '@ai-sdk/provider-utils';
5
4
 
6
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
7
- declare const openaiChatLanguageModelOptions: z.ZodObject<{
8
- logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
9
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
10
- parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
11
- user: z.ZodOptional<z.ZodString>;
12
- reasoningEffort: z.ZodOptional<z.ZodEnum<{
13
- minimal: "minimal";
14
- low: "low";
15
- medium: "medium";
16
- high: "high";
17
- }>>;
18
- maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
19
- store: z.ZodOptional<z.ZodBoolean>;
20
- metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
21
- prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
22
- structuredOutputs: z.ZodOptional<z.ZodBoolean>;
23
- serviceTier: z.ZodOptional<z.ZodEnum<{
24
- auto: "auto";
25
- flex: "flex";
26
- priority: "priority";
27
- }>>;
28
- strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
29
- textVerbosity: z.ZodOptional<z.ZodEnum<{
30
- low: "low";
31
- medium: "medium";
32
- high: "high";
33
- }>>;
34
- promptCacheKey: z.ZodOptional<z.ZodString>;
35
- safetyIdentifier: z.ZodOptional<z.ZodString>;
36
- }, z.core.$strip>;
37
- type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
6
+ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidator<{
7
+ logitBias?: Record<number, number> | undefined;
8
+ logprobs?: number | boolean | undefined;
9
+ parallelToolCalls?: boolean | undefined;
10
+ user?: string | undefined;
11
+ reasoningEffort?: "minimal" | "low" | "medium" | "high" | undefined;
12
+ maxCompletionTokens?: number | undefined;
13
+ store?: boolean | undefined;
14
+ metadata?: Record<string, string> | undefined;
15
+ prediction?: Record<string, any> | undefined;
16
+ structuredOutputs?: boolean | undefined;
17
+ serviceTier?: "auto" | "flex" | "priority" | undefined;
18
+ strictJsonSchema?: boolean | undefined;
19
+ textVerbosity?: "low" | "medium" | "high" | undefined;
20
+ promptCacheKey?: string | undefined;
21
+ safetyIdentifier?: string | undefined;
22
+ }>;
23
+ type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
38
24
 
39
25
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
40
26
 
@@ -265,6 +251,25 @@ declare const openaiTools: {
265
251
  };
266
252
 
267
253
  type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
254
+ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
255
+ include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
256
+ instructions?: string | null | undefined;
257
+ logprobs?: number | boolean | undefined;
258
+ maxToolCalls?: number | null | undefined;
259
+ metadata?: any;
260
+ parallelToolCalls?: boolean | null | undefined;
261
+ previousResponseId?: string | null | undefined;
262
+ promptCacheKey?: string | null | undefined;
263
+ reasoningEffort?: string | null | undefined;
264
+ reasoningSummary?: string | null | undefined;
265
+ safetyIdentifier?: string | null | undefined;
266
+ serviceTier?: "auto" | "flex" | "priority" | null | undefined;
267
+ store?: boolean | null | undefined;
268
+ strictJsonSchema?: boolean | null | undefined;
269
+ textVerbosity?: "low" | "medium" | "high" | null | undefined;
270
+ user?: string | null | undefined;
271
+ }>;
272
+ type OpenAIResponsesProviderOptions = InferValidator<typeof openaiResponsesProviderOptionsSchema>;
268
273
 
269
274
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
270
275
 
@@ -361,38 +366,6 @@ Default OpenAI provider instance.
361
366
  */
362
367
  declare const openai: OpenAIProvider;
363
368
 
364
- declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
365
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
366
- "file_search_call.results": "file_search_call.results";
367
- "message.output_text.logprobs": "message.output_text.logprobs";
368
- "reasoning.encrypted_content": "reasoning.encrypted_content";
369
- }>>>>;
370
- instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
371
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
372
- maxToolCalls: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
373
- metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
374
- parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
375
- previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
376
- promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
377
- reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
378
- reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
379
- safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
380
- serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
381
- auto: "auto";
382
- flex: "flex";
383
- priority: "priority";
384
- }>>>;
385
- store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
386
- strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
387
- textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
388
- low: "low";
389
- medium: "medium";
390
- high: "high";
391
- }>>>;
392
- user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
393
- }, z.core.$strip>;
394
- type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
395
-
396
369
  declare const VERSION: string;
397
370
 
398
371
  export { type OpenAIChatLanguageModelOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, VERSION, createOpenAI, openai };