@ai-sdk/openai 2.0.0-beta.13 → 2.0.0-beta.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.d.mts +3 -4
- package/dist/index.d.ts +3 -4
- package/dist/index.js +30 -6
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +30 -6
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +3 -0
- package/dist/internal/index.d.ts +3 -0
- package/dist/internal/index.js +30 -6
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +30 -6
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/internal/index.mjs
CHANGED
|
@@ -279,12 +279,14 @@ var openaiProviderOptions = z.object({
|
|
|
279
279
|
*/
|
|
280
280
|
structuredOutputs: z.boolean().optional(),
|
|
281
281
|
/**
|
|
282
|
-
* Service tier for the request.
|
|
283
|
-
*
|
|
282
|
+
* Service tier for the request.
|
|
283
|
+
* - 'auto': Default service tier
|
|
284
|
+
* - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
285
|
+
* - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
|
|
284
286
|
*
|
|
285
287
|
* @default 'auto'
|
|
286
288
|
*/
|
|
287
|
-
serviceTier: z.enum(["auto", "flex"]).optional(),
|
|
289
|
+
serviceTier: z.enum(["auto", "flex", "priority"]).optional(),
|
|
288
290
|
/**
|
|
289
291
|
* Whether to use strict JSON schema validation.
|
|
290
292
|
*
|
|
@@ -345,7 +347,7 @@ var fileSearchArgsSchema = z3.object({
|
|
|
345
347
|
* Ranking options for the search.
|
|
346
348
|
*/
|
|
347
349
|
ranking: z3.object({
|
|
348
|
-
ranker: z3.enum(["auto", "
|
|
350
|
+
ranker: z3.enum(["auto", "default-2024-08-21"]).optional()
|
|
349
351
|
}).optional(),
|
|
350
352
|
/**
|
|
351
353
|
* A filter to apply based on file attributes.
|
|
@@ -662,6 +664,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
662
664
|
});
|
|
663
665
|
baseArgs.service_tier = void 0;
|
|
664
666
|
}
|
|
667
|
+
if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
|
|
668
|
+
warnings.push({
|
|
669
|
+
type: "unsupported-setting",
|
|
670
|
+
setting: "serviceTier",
|
|
671
|
+
details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
|
|
672
|
+
});
|
|
673
|
+
baseArgs.service_tier = void 0;
|
|
674
|
+
}
|
|
665
675
|
const {
|
|
666
676
|
tools: openaiTools,
|
|
667
677
|
toolChoice: openaiToolChoice,
|
|
@@ -1055,6 +1065,9 @@ function isReasoningModel(modelId) {
|
|
|
1055
1065
|
function supportsFlexProcessing(modelId) {
|
|
1056
1066
|
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1057
1067
|
}
|
|
1068
|
+
function supportsPriorityProcessing(modelId) {
|
|
1069
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1070
|
+
}
|
|
1058
1071
|
function getSystemMessageMode(modelId) {
|
|
1059
1072
|
var _a, _b;
|
|
1060
1073
|
if (!isReasoningModel(modelId)) {
|
|
@@ -2448,6 +2461,14 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2448
2461
|
});
|
|
2449
2462
|
delete baseArgs.service_tier;
|
|
2450
2463
|
}
|
|
2464
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !supportsPriorityProcessing2(this.modelId)) {
|
|
2465
|
+
warnings.push({
|
|
2466
|
+
type: "unsupported-setting",
|
|
2467
|
+
setting: "serviceTier",
|
|
2468
|
+
details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
|
|
2469
|
+
});
|
|
2470
|
+
delete baseArgs.service_tier;
|
|
2471
|
+
}
|
|
2451
2472
|
const {
|
|
2452
2473
|
tools: openaiTools,
|
|
2453
2474
|
toolChoice: openaiToolChoice,
|
|
@@ -3167,6 +3188,9 @@ function getResponsesModelConfig(modelId) {
|
|
|
3167
3188
|
function supportsFlexProcessing2(modelId) {
|
|
3168
3189
|
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3169
3190
|
}
|
|
3191
|
+
function supportsPriorityProcessing2(modelId) {
|
|
3192
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3193
|
+
}
|
|
3170
3194
|
var openaiResponsesProviderOptionsSchema = z15.object({
|
|
3171
3195
|
metadata: z15.any().nullish(),
|
|
3172
3196
|
parallelToolCalls: z15.boolean().nullish(),
|
|
@@ -3177,8 +3201,8 @@ var openaiResponsesProviderOptionsSchema = z15.object({
|
|
|
3177
3201
|
strictJsonSchema: z15.boolean().nullish(),
|
|
3178
3202
|
instructions: z15.string().nullish(),
|
|
3179
3203
|
reasoningSummary: z15.string().nullish(),
|
|
3180
|
-
serviceTier: z15.enum(["auto", "flex"]).nullish(),
|
|
3181
|
-
include: z15.array(z15.enum(["reasoning.encrypted_content"])).nullish()
|
|
3204
|
+
serviceTier: z15.enum(["auto", "flex", "priority"]).nullish(),
|
|
3205
|
+
include: z15.array(z15.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish()
|
|
3182
3206
|
});
|
|
3183
3207
|
export {
|
|
3184
3208
|
OpenAIChatLanguageModel,
|