@ai-sdk/openai 2.0.5 → 2.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/index.d.mts +7 -2
- package/dist/index.d.ts +7 -2
- package/dist/index.js +23 -17
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +23 -17
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +7 -2
- package/dist/internal/index.d.ts +7 -2
- package/dist/internal/index.js +23 -17
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +23 -17
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
|
@@ -666,7 +666,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
666
666
|
warnings.push({
|
|
667
667
|
type: "unsupported-setting",
|
|
668
668
|
setting: "serviceTier",
|
|
669
|
-
details: "flex processing is only available for o3
|
|
669
|
+
details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
|
|
670
670
|
});
|
|
671
671
|
baseArgs.service_tier = void 0;
|
|
672
672
|
}
|
|
@@ -674,7 +674,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
674
674
|
warnings.push({
|
|
675
675
|
type: "unsupported-setting",
|
|
676
676
|
setting: "serviceTier",
|
|
677
|
-
details: "priority processing is only available for supported models (
|
|
677
|
+
details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
|
|
678
678
|
});
|
|
679
679
|
baseArgs.service_tier = void 0;
|
|
680
680
|
}
|
|
@@ -1107,10 +1107,10 @@ function isReasoningModel(modelId) {
|
|
|
1107
1107
|
return modelId.startsWith("o") || modelId.startsWith("gpt-5");
|
|
1108
1108
|
}
|
|
1109
1109
|
function supportsFlexProcessing(modelId) {
|
|
1110
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1110
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
|
|
1111
1111
|
}
|
|
1112
1112
|
function supportsPriorityProcessing(modelId) {
|
|
1113
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1113
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1114
1114
|
}
|
|
1115
1115
|
function getSystemMessageMode(modelId) {
|
|
1116
1116
|
var _a, _b;
|
|
@@ -2327,15 +2327,20 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2327
2327
|
temperature,
|
|
2328
2328
|
top_p: topP,
|
|
2329
2329
|
max_output_tokens: maxOutputTokens,
|
|
2330
|
-
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
2330
|
+
...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
|
|
2331
2331
|
text: {
|
|
2332
|
-
|
|
2333
|
-
|
|
2334
|
-
|
|
2335
|
-
|
|
2336
|
-
|
|
2337
|
-
|
|
2338
|
-
|
|
2332
|
+
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
2333
|
+
format: responseFormat.schema != null ? {
|
|
2334
|
+
type: "json_schema",
|
|
2335
|
+
strict: strictJsonSchema,
|
|
2336
|
+
name: (_b = responseFormat.name) != null ? _b : "response",
|
|
2337
|
+
description: responseFormat.description,
|
|
2338
|
+
schema: responseFormat.schema
|
|
2339
|
+
} : { type: "json_object" }
|
|
2340
|
+
},
|
|
2341
|
+
...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
|
|
2342
|
+
verbosity: openaiOptions.textVerbosity
|
|
2343
|
+
}
|
|
2339
2344
|
}
|
|
2340
2345
|
},
|
|
2341
2346
|
// provider options:
|
|
@@ -2399,7 +2404,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2399
2404
|
warnings.push({
|
|
2400
2405
|
type: "unsupported-setting",
|
|
2401
2406
|
setting: "serviceTier",
|
|
2402
|
-
details: "flex processing is only available for o3
|
|
2407
|
+
details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
|
|
2403
2408
|
});
|
|
2404
2409
|
delete baseArgs.service_tier;
|
|
2405
2410
|
}
|
|
@@ -2407,7 +2412,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2407
2412
|
warnings.push({
|
|
2408
2413
|
type: "unsupported-setting",
|
|
2409
2414
|
setting: "serviceTier",
|
|
2410
|
-
details: "priority processing is only available for supported models (
|
|
2415
|
+
details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
|
|
2411
2416
|
});
|
|
2412
2417
|
delete baseArgs.service_tier;
|
|
2413
2418
|
}
|
|
@@ -3163,10 +3168,10 @@ function getResponsesModelConfig(modelId) {
|
|
|
3163
3168
|
};
|
|
3164
3169
|
}
|
|
3165
3170
|
function supportsFlexProcessing2(modelId) {
|
|
3166
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3171
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
|
|
3167
3172
|
}
|
|
3168
3173
|
function supportsPriorityProcessing2(modelId) {
|
|
3169
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3174
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3170
3175
|
}
|
|
3171
3176
|
var openaiResponsesProviderOptionsSchema = z14.object({
|
|
3172
3177
|
metadata: z14.any().nullish(),
|
|
@@ -3179,7 +3184,8 @@ var openaiResponsesProviderOptionsSchema = z14.object({
|
|
|
3179
3184
|
instructions: z14.string().nullish(),
|
|
3180
3185
|
reasoningSummary: z14.string().nullish(),
|
|
3181
3186
|
serviceTier: z14.enum(["auto", "flex", "priority"]).nullish(),
|
|
3182
|
-
include: z14.array(z14.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish()
|
|
3187
|
+
include: z14.array(z14.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish(),
|
|
3188
|
+
textVerbosity: z14.enum(["low", "medium", "high"]).nullish()
|
|
3183
3189
|
});
|
|
3184
3190
|
|
|
3185
3191
|
// src/openai-speech-model.ts
|