@ai-sdk/openai 2.0.81 → 2.0.83
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +2 -2
- package/dist/index.d.ts +2 -2
- package/dist/index.js +14 -3
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +14 -3
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -2
- package/dist/internal/index.d.ts +2 -2
- package/dist/internal/index.js +13 -2
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +13 -2
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,17 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.0.83
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- edc807f: feat(provider/openai): add GPT 5.1 Codex Max to OpenAI Responses model IDs list
|
|
8
|
+
|
|
9
|
+
## 2.0.82
|
|
10
|
+
|
|
11
|
+
### Patch Changes
|
|
12
|
+
|
|
13
|
+
- e31dfa4: feat(provider/openai): add support for OpenAI xhigh reasoning effort
|
|
14
|
+
|
|
3
15
|
## 2.0.81
|
|
4
16
|
|
|
5
17
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -8,7 +8,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidat
|
|
|
8
8
|
logprobs?: number | boolean | undefined;
|
|
9
9
|
parallelToolCalls?: boolean | undefined;
|
|
10
10
|
user?: string | undefined;
|
|
11
|
-
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | undefined;
|
|
11
|
+
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh" | undefined;
|
|
12
12
|
maxCompletionTokens?: number | undefined;
|
|
13
13
|
store?: boolean | undefined;
|
|
14
14
|
metadata?: Record<string, string> | undefined;
|
|
@@ -329,7 +329,7 @@ declare const openaiTools: {
|
|
|
329
329
|
}>;
|
|
330
330
|
};
|
|
331
331
|
|
|
332
|
-
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
332
|
+
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
333
333
|
declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
|
|
334
334
|
conversation?: string | null | undefined;
|
|
335
335
|
include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
|
package/dist/index.d.ts
CHANGED
|
@@ -8,7 +8,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidat
|
|
|
8
8
|
logprobs?: number | boolean | undefined;
|
|
9
9
|
parallelToolCalls?: boolean | undefined;
|
|
10
10
|
user?: string | undefined;
|
|
11
|
-
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | undefined;
|
|
11
|
+
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh" | undefined;
|
|
12
12
|
maxCompletionTokens?: number | undefined;
|
|
13
13
|
store?: boolean | undefined;
|
|
14
14
|
metadata?: Record<string, string> | undefined;
|
|
@@ -329,7 +329,7 @@ declare const openaiTools: {
|
|
|
329
329
|
}>;
|
|
330
330
|
};
|
|
331
331
|
|
|
332
|
-
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
332
|
+
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
333
333
|
declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
|
|
334
334
|
conversation?: string | null | undefined;
|
|
335
335
|
include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
|
package/dist/index.js
CHANGED
|
@@ -449,7 +449,7 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
|
|
|
449
449
|
/**
|
|
450
450
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
451
451
|
*/
|
|
452
|
-
reasoningEffort: import_v43.z.enum(["none", "minimal", "low", "medium", "high"]).optional(),
|
|
452
|
+
reasoningEffort: import_v43.z.enum(["none", "minimal", "low", "medium", "high", "xhigh"]).optional(),
|
|
453
453
|
/**
|
|
454
454
|
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
455
455
|
*/
|
|
@@ -2879,7 +2879,8 @@ var openaiResponsesReasoningModelIds = [
|
|
|
2879
2879
|
"gpt-5.1",
|
|
2880
2880
|
"gpt-5.1-chat-latest",
|
|
2881
2881
|
"gpt-5.1-codex-mini",
|
|
2882
|
-
"gpt-5.1-codex"
|
|
2882
|
+
"gpt-5.1-codex",
|
|
2883
|
+
"gpt-5.1-codex-max"
|
|
2883
2884
|
];
|
|
2884
2885
|
var openaiResponsesModelIds = [
|
|
2885
2886
|
"gpt-4.1",
|
|
@@ -2962,6 +2963,16 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValid
|
|
|
2962
2963
|
* @default 'in_memory'
|
|
2963
2964
|
*/
|
|
2964
2965
|
promptCacheRetention: import_v417.z.enum(["in_memory", "24h"]).nullish(),
|
|
2966
|
+
/**
|
|
2967
|
+
* Reasoning effort for reasoning models. Defaults to `medium`. If you use
|
|
2968
|
+
* `providerOptions` to set the `reasoningEffort` option, this model setting will be ignored.
|
|
2969
|
+
* Valid values: 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh'
|
|
2970
|
+
*
|
|
2971
|
+
* The 'none' type for `reasoningEffort` is only available for OpenAI's GPT-5.1
|
|
2972
|
+
* models. Also, the 'xhigh' type for `reasoningEffort` is only available for
|
|
2973
|
+
* OpenAI's GPT-5.1-Codex-Max model. Setting `reasoningEffort` to 'none' or 'xhigh' with unsupported models will result in
|
|
2974
|
+
* an error.
|
|
2975
|
+
*/
|
|
2965
2976
|
reasoningEffort: import_v417.z.string().nullish(),
|
|
2966
2977
|
reasoningSummary: import_v417.z.string().nullish(),
|
|
2967
2978
|
safetyIdentifier: import_v417.z.string().nullish(),
|
|
@@ -4541,7 +4552,7 @@ var OpenAITranscriptionModel = class {
|
|
|
4541
4552
|
};
|
|
4542
4553
|
|
|
4543
4554
|
// src/version.ts
|
|
4544
|
-
var VERSION = true ? "2.0.
|
|
4555
|
+
var VERSION = true ? "2.0.83" : "0.0.0-test";
|
|
4545
4556
|
|
|
4546
4557
|
// src/openai-provider.ts
|
|
4547
4558
|
function createOpenAI(options = {}) {
|