@ai-sdk/openai 2.0.6 → 2.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/dist/index.d.mts +5 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +31 -19
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +31 -19
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +11 -0
- package/dist/internal/index.d.ts +11 -0
- package/dist/internal/index.js +31 -19
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +31 -19
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,20 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.0.8
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 57fb959: feat(openai): add verbosity parameter support for chat api
|
|
8
|
+
- 2a3fbe6: allow `minimal` in `reasoningEffort` for openai chat
|
|
9
|
+
|
|
10
|
+
## 2.0.7
|
|
11
|
+
|
|
12
|
+
### Patch Changes
|
|
13
|
+
|
|
14
|
+
- 4738f18: feat(openai): add flex processing support for gpt-5 models
|
|
15
|
+
- 013d747: feat(openai): add verbosity parameter support for responses api
|
|
16
|
+
- 35feee8: feat(openai): add priority processing support for gpt-5 models
|
|
17
|
+
|
|
3
18
|
## 2.0.6
|
|
4
19
|
|
|
5
20
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -87,6 +87,11 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
87
87
|
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
88
88
|
"file_search_call.results": "file_search_call.results";
|
|
89
89
|
}>>>>;
|
|
90
|
+
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
91
|
+
low: "low";
|
|
92
|
+
medium: "medium";
|
|
93
|
+
high: "high";
|
|
94
|
+
}>>>;
|
|
90
95
|
}, z.core.$strip>;
|
|
91
96
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
92
97
|
|
package/dist/index.d.ts
CHANGED
|
@@ -87,6 +87,11 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
87
87
|
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
88
88
|
"file_search_call.results": "file_search_call.results";
|
|
89
89
|
}>>>>;
|
|
90
|
+
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
91
|
+
low: "low";
|
|
92
|
+
medium: "medium";
|
|
93
|
+
high: "high";
|
|
94
|
+
}>>>;
|
|
90
95
|
}, z.core.$strip>;
|
|
91
96
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
92
97
|
|
package/dist/index.js
CHANGED
|
@@ -273,7 +273,7 @@ var openaiProviderOptions = import_v4.z.object({
|
|
|
273
273
|
/**
|
|
274
274
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
275
275
|
*/
|
|
276
|
-
reasoningEffort: import_v4.z.enum(["low", "medium", "high"]).optional(),
|
|
276
|
+
reasoningEffort: import_v4.z.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
277
277
|
/**
|
|
278
278
|
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
279
279
|
*/
|
|
@@ -310,7 +310,12 @@ var openaiProviderOptions = import_v4.z.object({
|
|
|
310
310
|
*
|
|
311
311
|
* @default false
|
|
312
312
|
*/
|
|
313
|
-
strictJsonSchema: import_v4.z.boolean().optional()
|
|
313
|
+
strictJsonSchema: import_v4.z.boolean().optional(),
|
|
314
|
+
/**
|
|
315
|
+
* Controls the verbosity of the model's responses.
|
|
316
|
+
* Lower values will result in more concise responses, while higher values will result in more verbose responses.
|
|
317
|
+
*/
|
|
318
|
+
textVerbosity: import_v4.z.enum(["low", "medium", "high"]).optional()
|
|
314
319
|
});
|
|
315
320
|
|
|
316
321
|
// src/openai-error.ts
|
|
@@ -591,6 +596,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
591
596
|
} : { type: "json_object" } : void 0,
|
|
592
597
|
stop: stopSequences,
|
|
593
598
|
seed,
|
|
599
|
+
verbosity: openaiOptions.textVerbosity,
|
|
594
600
|
// openai specific settings:
|
|
595
601
|
// TODO remove in next major version; we auto-map maxOutputTokens now
|
|
596
602
|
max_completion_tokens: openaiOptions.maxCompletionTokens,
|
|
@@ -676,7 +682,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
676
682
|
warnings.push({
|
|
677
683
|
type: "unsupported-setting",
|
|
678
684
|
setting: "serviceTier",
|
|
679
|
-
details: "flex processing is only available for o3
|
|
685
|
+
details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
|
|
680
686
|
});
|
|
681
687
|
baseArgs.service_tier = void 0;
|
|
682
688
|
}
|
|
@@ -684,7 +690,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
684
690
|
warnings.push({
|
|
685
691
|
type: "unsupported-setting",
|
|
686
692
|
setting: "serviceTier",
|
|
687
|
-
details: "priority processing is only available for supported models (
|
|
693
|
+
details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
|
|
688
694
|
});
|
|
689
695
|
baseArgs.service_tier = void 0;
|
|
690
696
|
}
|
|
@@ -1117,10 +1123,10 @@ function isReasoningModel(modelId) {
|
|
|
1117
1123
|
return modelId.startsWith("o") || modelId.startsWith("gpt-5");
|
|
1118
1124
|
}
|
|
1119
1125
|
function supportsFlexProcessing(modelId) {
|
|
1120
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1126
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
|
|
1121
1127
|
}
|
|
1122
1128
|
function supportsPriorityProcessing(modelId) {
|
|
1123
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1129
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1124
1130
|
}
|
|
1125
1131
|
function getSystemMessageMode(modelId) {
|
|
1126
1132
|
var _a, _b;
|
|
@@ -2298,15 +2304,20 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2298
2304
|
temperature,
|
|
2299
2305
|
top_p: topP,
|
|
2300
2306
|
max_output_tokens: maxOutputTokens,
|
|
2301
|
-
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
2307
|
+
...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
|
|
2302
2308
|
text: {
|
|
2303
|
-
|
|
2304
|
-
|
|
2305
|
-
|
|
2306
|
-
|
|
2307
|
-
|
|
2308
|
-
|
|
2309
|
-
|
|
2309
|
+
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
2310
|
+
format: responseFormat.schema != null ? {
|
|
2311
|
+
type: "json_schema",
|
|
2312
|
+
strict: strictJsonSchema,
|
|
2313
|
+
name: (_b = responseFormat.name) != null ? _b : "response",
|
|
2314
|
+
description: responseFormat.description,
|
|
2315
|
+
schema: responseFormat.schema
|
|
2316
|
+
} : { type: "json_object" }
|
|
2317
|
+
},
|
|
2318
|
+
...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
|
|
2319
|
+
verbosity: openaiOptions.textVerbosity
|
|
2320
|
+
}
|
|
2310
2321
|
}
|
|
2311
2322
|
},
|
|
2312
2323
|
// provider options:
|
|
@@ -2370,7 +2381,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2370
2381
|
warnings.push({
|
|
2371
2382
|
type: "unsupported-setting",
|
|
2372
2383
|
setting: "serviceTier",
|
|
2373
|
-
details: "flex processing is only available for o3
|
|
2384
|
+
details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
|
|
2374
2385
|
});
|
|
2375
2386
|
delete baseArgs.service_tier;
|
|
2376
2387
|
}
|
|
@@ -2378,7 +2389,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2378
2389
|
warnings.push({
|
|
2379
2390
|
type: "unsupported-setting",
|
|
2380
2391
|
setting: "serviceTier",
|
|
2381
|
-
details: "priority processing is only available for supported models (
|
|
2392
|
+
details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
|
|
2382
2393
|
});
|
|
2383
2394
|
delete baseArgs.service_tier;
|
|
2384
2395
|
}
|
|
@@ -3134,10 +3145,10 @@ function getResponsesModelConfig(modelId) {
|
|
|
3134
3145
|
};
|
|
3135
3146
|
}
|
|
3136
3147
|
function supportsFlexProcessing2(modelId) {
|
|
3137
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3148
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
|
|
3138
3149
|
}
|
|
3139
3150
|
function supportsPriorityProcessing2(modelId) {
|
|
3140
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3151
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3141
3152
|
}
|
|
3142
3153
|
var openaiResponsesProviderOptionsSchema = import_v414.z.object({
|
|
3143
3154
|
metadata: import_v414.z.any().nullish(),
|
|
@@ -3150,7 +3161,8 @@ var openaiResponsesProviderOptionsSchema = import_v414.z.object({
|
|
|
3150
3161
|
instructions: import_v414.z.string().nullish(),
|
|
3151
3162
|
reasoningSummary: import_v414.z.string().nullish(),
|
|
3152
3163
|
serviceTier: import_v414.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
3153
|
-
include: import_v414.z.array(import_v414.z.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish()
|
|
3164
|
+
include: import_v414.z.array(import_v414.z.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish(),
|
|
3165
|
+
textVerbosity: import_v414.z.enum(["low", "medium", "high"]).nullish()
|
|
3154
3166
|
});
|
|
3155
3167
|
|
|
3156
3168
|
// src/openai-speech-model.ts
|