@ai-sdk/openai 2.0.5 → 2.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/index.d.mts +7 -2
- package/dist/index.d.ts +7 -2
- package/dist/index.js +23 -17
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +23 -17
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +7 -2
- package/dist/internal/index.d.ts +7 -2
- package/dist/internal/index.js +23 -17
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +23 -17
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.mjs
CHANGED
|
@@ -660,7 +660,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
660
660
|
warnings.push({
|
|
661
661
|
type: "unsupported-setting",
|
|
662
662
|
setting: "serviceTier",
|
|
663
|
-
details: "flex processing is only available for o3
|
|
663
|
+
details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
|
|
664
664
|
});
|
|
665
665
|
baseArgs.service_tier = void 0;
|
|
666
666
|
}
|
|
@@ -668,7 +668,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
668
668
|
warnings.push({
|
|
669
669
|
type: "unsupported-setting",
|
|
670
670
|
setting: "serviceTier",
|
|
671
|
-
details: "priority processing is only available for supported models (
|
|
671
|
+
details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
|
|
672
672
|
});
|
|
673
673
|
baseArgs.service_tier = void 0;
|
|
674
674
|
}
|
|
@@ -1101,10 +1101,10 @@ function isReasoningModel(modelId) {
|
|
|
1101
1101
|
return modelId.startsWith("o") || modelId.startsWith("gpt-5");
|
|
1102
1102
|
}
|
|
1103
1103
|
function supportsFlexProcessing(modelId) {
|
|
1104
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1104
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
|
|
1105
1105
|
}
|
|
1106
1106
|
function supportsPriorityProcessing(modelId) {
|
|
1107
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1107
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1108
1108
|
}
|
|
1109
1109
|
function getSystemMessageMode(modelId) {
|
|
1110
1110
|
var _a, _b;
|
|
@@ -2427,15 +2427,20 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2427
2427
|
temperature,
|
|
2428
2428
|
top_p: topP,
|
|
2429
2429
|
max_output_tokens: maxOutputTokens,
|
|
2430
|
-
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
2430
|
+
...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
|
|
2431
2431
|
text: {
|
|
2432
|
-
|
|
2433
|
-
|
|
2434
|
-
|
|
2435
|
-
|
|
2436
|
-
|
|
2437
|
-
|
|
2438
|
-
|
|
2432
|
+
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
2433
|
+
format: responseFormat.schema != null ? {
|
|
2434
|
+
type: "json_schema",
|
|
2435
|
+
strict: strictJsonSchema,
|
|
2436
|
+
name: (_b = responseFormat.name) != null ? _b : "response",
|
|
2437
|
+
description: responseFormat.description,
|
|
2438
|
+
schema: responseFormat.schema
|
|
2439
|
+
} : { type: "json_object" }
|
|
2440
|
+
},
|
|
2441
|
+
...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
|
|
2442
|
+
verbosity: openaiOptions.textVerbosity
|
|
2443
|
+
}
|
|
2439
2444
|
}
|
|
2440
2445
|
},
|
|
2441
2446
|
// provider options:
|
|
@@ -2499,7 +2504,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2499
2504
|
warnings.push({
|
|
2500
2505
|
type: "unsupported-setting",
|
|
2501
2506
|
setting: "serviceTier",
|
|
2502
|
-
details: "flex processing is only available for o3
|
|
2507
|
+
details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
|
|
2503
2508
|
});
|
|
2504
2509
|
delete baseArgs.service_tier;
|
|
2505
2510
|
}
|
|
@@ -2507,7 +2512,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2507
2512
|
warnings.push({
|
|
2508
2513
|
type: "unsupported-setting",
|
|
2509
2514
|
setting: "serviceTier",
|
|
2510
|
-
details: "priority processing is only available for supported models (
|
|
2515
|
+
details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
|
|
2511
2516
|
});
|
|
2512
2517
|
delete baseArgs.service_tier;
|
|
2513
2518
|
}
|
|
@@ -3263,10 +3268,10 @@ function getResponsesModelConfig(modelId) {
|
|
|
3263
3268
|
};
|
|
3264
3269
|
}
|
|
3265
3270
|
function supportsFlexProcessing2(modelId) {
|
|
3266
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3271
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
|
|
3267
3272
|
}
|
|
3268
3273
|
function supportsPriorityProcessing2(modelId) {
|
|
3269
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3274
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3270
3275
|
}
|
|
3271
3276
|
var openaiResponsesProviderOptionsSchema = z15.object({
|
|
3272
3277
|
metadata: z15.any().nullish(),
|
|
@@ -3279,7 +3284,8 @@ var openaiResponsesProviderOptionsSchema = z15.object({
|
|
|
3279
3284
|
instructions: z15.string().nullish(),
|
|
3280
3285
|
reasoningSummary: z15.string().nullish(),
|
|
3281
3286
|
serviceTier: z15.enum(["auto", "flex", "priority"]).nullish(),
|
|
3282
|
-
include: z15.array(z15.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish()
|
|
3287
|
+
include: z15.array(z15.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish(),
|
|
3288
|
+
textVerbosity: z15.enum(["low", "medium", "high"]).nullish()
|
|
3283
3289
|
});
|
|
3284
3290
|
export {
|
|
3285
3291
|
OpenAIChatLanguageModel,
|