@ai-sdk/openai 2.0.6 → 2.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/dist/index.d.mts +5 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +31 -19
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +31 -19
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +11 -0
- package/dist/internal/index.d.ts +11 -0
- package/dist/internal/index.js +31 -19
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +31 -19
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.mjs
CHANGED
|
@@ -255,7 +255,7 @@ var openaiProviderOptions = z.object({
|
|
|
255
255
|
/**
|
|
256
256
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
257
257
|
*/
|
|
258
|
-
reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
|
|
258
|
+
reasoningEffort: z.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
259
259
|
/**
|
|
260
260
|
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
261
261
|
*/
|
|
@@ -292,7 +292,12 @@ var openaiProviderOptions = z.object({
|
|
|
292
292
|
*
|
|
293
293
|
* @default false
|
|
294
294
|
*/
|
|
295
|
-
strictJsonSchema: z.boolean().optional()
|
|
295
|
+
strictJsonSchema: z.boolean().optional(),
|
|
296
|
+
/**
|
|
297
|
+
* Controls the verbosity of the model's responses.
|
|
298
|
+
* Lower values will result in more concise responses, while higher values will result in more verbose responses.
|
|
299
|
+
*/
|
|
300
|
+
textVerbosity: z.enum(["low", "medium", "high"]).optional()
|
|
296
301
|
});
|
|
297
302
|
|
|
298
303
|
// src/openai-error.ts
|
|
@@ -575,6 +580,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
575
580
|
} : { type: "json_object" } : void 0,
|
|
576
581
|
stop: stopSequences,
|
|
577
582
|
seed,
|
|
583
|
+
verbosity: openaiOptions.textVerbosity,
|
|
578
584
|
// openai specific settings:
|
|
579
585
|
// TODO remove in next major version; we auto-map maxOutputTokens now
|
|
580
586
|
max_completion_tokens: openaiOptions.maxCompletionTokens,
|
|
@@ -660,7 +666,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
660
666
|
warnings.push({
|
|
661
667
|
type: "unsupported-setting",
|
|
662
668
|
setting: "serviceTier",
|
|
663
|
-
details: "flex processing is only available for o3
|
|
669
|
+
details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
|
|
664
670
|
});
|
|
665
671
|
baseArgs.service_tier = void 0;
|
|
666
672
|
}
|
|
@@ -668,7 +674,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
668
674
|
warnings.push({
|
|
669
675
|
type: "unsupported-setting",
|
|
670
676
|
setting: "serviceTier",
|
|
671
|
-
details: "priority processing is only available for supported models (
|
|
677
|
+
details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
|
|
672
678
|
});
|
|
673
679
|
baseArgs.service_tier = void 0;
|
|
674
680
|
}
|
|
@@ -1101,10 +1107,10 @@ function isReasoningModel(modelId) {
|
|
|
1101
1107
|
return modelId.startsWith("o") || modelId.startsWith("gpt-5");
|
|
1102
1108
|
}
|
|
1103
1109
|
function supportsFlexProcessing(modelId) {
|
|
1104
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1110
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
|
|
1105
1111
|
}
|
|
1106
1112
|
function supportsPriorityProcessing(modelId) {
|
|
1107
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1113
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1108
1114
|
}
|
|
1109
1115
|
function getSystemMessageMode(modelId) {
|
|
1110
1116
|
var _a, _b;
|
|
@@ -2427,15 +2433,20 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2427
2433
|
temperature,
|
|
2428
2434
|
top_p: topP,
|
|
2429
2435
|
max_output_tokens: maxOutputTokens,
|
|
2430
|
-
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
2436
|
+
...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
|
|
2431
2437
|
text: {
|
|
2432
|
-
|
|
2433
|
-
|
|
2434
|
-
|
|
2435
|
-
|
|
2436
|
-
|
|
2437
|
-
|
|
2438
|
-
|
|
2438
|
+
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
2439
|
+
format: responseFormat.schema != null ? {
|
|
2440
|
+
type: "json_schema",
|
|
2441
|
+
strict: strictJsonSchema,
|
|
2442
|
+
name: (_b = responseFormat.name) != null ? _b : "response",
|
|
2443
|
+
description: responseFormat.description,
|
|
2444
|
+
schema: responseFormat.schema
|
|
2445
|
+
} : { type: "json_object" }
|
|
2446
|
+
},
|
|
2447
|
+
...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
|
|
2448
|
+
verbosity: openaiOptions.textVerbosity
|
|
2449
|
+
}
|
|
2439
2450
|
}
|
|
2440
2451
|
},
|
|
2441
2452
|
// provider options:
|
|
@@ -2499,7 +2510,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2499
2510
|
warnings.push({
|
|
2500
2511
|
type: "unsupported-setting",
|
|
2501
2512
|
setting: "serviceTier",
|
|
2502
|
-
details: "flex processing is only available for o3
|
|
2513
|
+
details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
|
|
2503
2514
|
});
|
|
2504
2515
|
delete baseArgs.service_tier;
|
|
2505
2516
|
}
|
|
@@ -2507,7 +2518,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2507
2518
|
warnings.push({
|
|
2508
2519
|
type: "unsupported-setting",
|
|
2509
2520
|
setting: "serviceTier",
|
|
2510
|
-
details: "priority processing is only available for supported models (
|
|
2521
|
+
details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
|
|
2511
2522
|
});
|
|
2512
2523
|
delete baseArgs.service_tier;
|
|
2513
2524
|
}
|
|
@@ -3263,10 +3274,10 @@ function getResponsesModelConfig(modelId) {
|
|
|
3263
3274
|
};
|
|
3264
3275
|
}
|
|
3265
3276
|
function supportsFlexProcessing2(modelId) {
|
|
3266
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3277
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
|
|
3267
3278
|
}
|
|
3268
3279
|
function supportsPriorityProcessing2(modelId) {
|
|
3269
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3280
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3270
3281
|
}
|
|
3271
3282
|
var openaiResponsesProviderOptionsSchema = z15.object({
|
|
3272
3283
|
metadata: z15.any().nullish(),
|
|
@@ -3279,7 +3290,8 @@ var openaiResponsesProviderOptionsSchema = z15.object({
|
|
|
3279
3290
|
instructions: z15.string().nullish(),
|
|
3280
3291
|
reasoningSummary: z15.string().nullish(),
|
|
3281
3292
|
serviceTier: z15.enum(["auto", "flex", "priority"]).nullish(),
|
|
3282
|
-
include: z15.array(z15.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish()
|
|
3293
|
+
include: z15.array(z15.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish(),
|
|
3294
|
+
textVerbosity: z15.enum(["low", "medium", "high"]).nullish()
|
|
3283
3295
|
});
|
|
3284
3296
|
export {
|
|
3285
3297
|
OpenAIChatLanguageModel,
|