@ai-sdk/openai 3.0.0-beta.91 → 3.0.0-beta.93

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,7 +8,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
8
8
  logprobs?: number | boolean | undefined;
9
9
  parallelToolCalls?: boolean | undefined;
10
10
  user?: string | undefined;
11
- reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | undefined;
11
+ reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh" | undefined;
12
12
  maxCompletionTokens?: number | undefined;
13
13
  store?: boolean | undefined;
14
14
  metadata?: Record<string, string> | undefined;
@@ -186,7 +186,7 @@ declare class OpenAISpeechModel implements SpeechModelV3 {
186
186
  doGenerate(options: Parameters<SpeechModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV3['doGenerate']>>>;
187
187
  }
188
188
 
189
- type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
189
+ type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
190
190
 
191
191
  declare class OpenAIResponsesLanguageModel implements LanguageModelV3 {
192
192
  readonly specificationVersion = "v3";
@@ -8,7 +8,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
8
8
  logprobs?: number | boolean | undefined;
9
9
  parallelToolCalls?: boolean | undefined;
10
10
  user?: string | undefined;
11
- reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | undefined;
11
+ reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh" | undefined;
12
12
  maxCompletionTokens?: number | undefined;
13
13
  store?: boolean | undefined;
14
14
  metadata?: Record<string, string> | undefined;
@@ -186,7 +186,7 @@ declare class OpenAISpeechModel implements SpeechModelV3 {
186
186
  doGenerate(options: Parameters<SpeechModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV3['doGenerate']>>>;
187
187
  }
188
188
 
189
- type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
189
+ type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
190
190
 
191
191
  declare class OpenAIResponsesLanguageModel implements LanguageModelV3 {
192
192
  readonly specificationVersion = "v3";
@@ -519,7 +519,7 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
519
519
  /**
520
520
  * Reasoning effort for reasoning models. Defaults to `medium`.
521
521
  */
522
- reasoningEffort: import_v43.z.enum(["none", "minimal", "low", "medium", "high"]).optional(),
522
+ reasoningEffort: import_v43.z.enum(["none", "minimal", "low", "medium", "high", "xhigh"]).optional(),
523
523
  /**
524
524
  * Maximum number of completion tokens to generate. Useful for reasoning models.
525
525
  */
@@ -3401,7 +3401,8 @@ var openaiResponsesReasoningModelIds = [
3401
3401
  "gpt-5.1",
3402
3402
  "gpt-5.1-chat-latest",
3403
3403
  "gpt-5.1-codex-mini",
3404
- "gpt-5.1-codex"
3404
+ "gpt-5.1-codex",
3405
+ "gpt-5.1-codex-max"
3405
3406
  ];
3406
3407
  var openaiResponsesModelIds = [
3407
3408
  "gpt-4.1",
@@ -3518,10 +3519,11 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils24.lazySchem
3518
3519
  /**
3519
3520
  * Reasoning effort for reasoning models. Defaults to `medium`. If you use
3520
3521
  * `providerOptions` to set the `reasoningEffort` option, this model setting will be ignored.
3521
- * Valid values: 'none' | 'minimal' | 'low' | 'medium' | 'high'
3522
+ * Valid values: 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh'
3522
3523
  *
3523
- * Note: The 'none' type for `reasoningEffort` is only available for OpenAI's GPT-5.1
3524
- * models. Setting `reasoningEffort` to 'none' with other models will result in
3524
+ * The 'none' type for `reasoningEffort` is only available for OpenAI's GPT-5.1
3525
+ * models. Also, the 'xhigh' type for `reasoningEffort` is only available for
3526
+ * OpenAI's GPT-5.1-Codex-Max model. Setting `reasoningEffort` to 'none' or 'xhigh' with unsupported models will result in
3525
3527
  * an error.
3526
3528
  */
3527
3529
  reasoningEffort: import_v417.z.string().nullish(),