@ai-sdk/openai 2.0.0-alpha.10 → 2.0.0-alpha.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +9 -0
- package/dist/index.d.mts +3 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +34 -2
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +34 -2
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +12 -0
- package/dist/internal/index.d.ts +12 -0
- package/dist/internal/index.js +34 -2
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +34 -2
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
|
@@ -270,7 +270,14 @@ var openaiProviderOptions = z.object({
|
|
|
270
270
|
*
|
|
271
271
|
* @default true
|
|
272
272
|
*/
|
|
273
|
-
structuredOutputs: z.boolean().optional()
|
|
273
|
+
structuredOutputs: z.boolean().optional(),
|
|
274
|
+
/**
|
|
275
|
+
* Service tier for the request. Set to 'flex' for 50% cheaper processing
|
|
276
|
+
* at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
277
|
+
*
|
|
278
|
+
* @default 'auto'
|
|
279
|
+
*/
|
|
280
|
+
serviceTier: z.enum(["auto", "flex"]).optional()
|
|
274
281
|
});
|
|
275
282
|
|
|
276
283
|
// src/openai-error.ts
|
|
@@ -443,6 +450,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
443
450
|
metadata: openaiOptions.metadata,
|
|
444
451
|
prediction: openaiOptions.prediction,
|
|
445
452
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
453
|
+
service_tier: openaiOptions.serviceTier,
|
|
446
454
|
// messages:
|
|
447
455
|
messages
|
|
448
456
|
};
|
|
@@ -516,6 +524,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
516
524
|
});
|
|
517
525
|
}
|
|
518
526
|
}
|
|
527
|
+
if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
|
|
528
|
+
warnings.push({
|
|
529
|
+
type: "unsupported-setting",
|
|
530
|
+
setting: "serviceTier",
|
|
531
|
+
details: "flex processing is only available for o3 and o4-mini models"
|
|
532
|
+
});
|
|
533
|
+
baseArgs.service_tier = void 0;
|
|
534
|
+
}
|
|
519
535
|
const {
|
|
520
536
|
tools: openaiTools2,
|
|
521
537
|
toolChoice: openaiToolChoice,
|
|
@@ -887,6 +903,9 @@ var openaiChatChunkSchema = z3.union([
|
|
|
887
903
|
function isReasoningModel(modelId) {
|
|
888
904
|
return modelId.startsWith("o");
|
|
889
905
|
}
|
|
906
|
+
function supportsFlexProcessing(modelId) {
|
|
907
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
908
|
+
}
|
|
890
909
|
function getSystemMessageMode(modelId) {
|
|
891
910
|
var _a, _b;
|
|
892
911
|
if (!isReasoningModel(modelId)) {
|
|
@@ -2039,6 +2058,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2039
2058
|
store: openaiOptions == null ? void 0 : openaiOptions.store,
|
|
2040
2059
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2041
2060
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2061
|
+
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2042
2062
|
// model-specific settings:
|
|
2043
2063
|
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2044
2064
|
reasoning: {
|
|
@@ -2072,6 +2092,14 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2072
2092
|
});
|
|
2073
2093
|
}
|
|
2074
2094
|
}
|
|
2095
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
|
|
2096
|
+
warnings.push({
|
|
2097
|
+
type: "unsupported-setting",
|
|
2098
|
+
setting: "serviceTier",
|
|
2099
|
+
details: "flex processing is only available for o3 and o4-mini models"
|
|
2100
|
+
});
|
|
2101
|
+
delete baseArgs.service_tier;
|
|
2102
|
+
}
|
|
2075
2103
|
const {
|
|
2076
2104
|
tools: openaiTools2,
|
|
2077
2105
|
toolChoice: openaiToolChoice,
|
|
@@ -2501,6 +2529,9 @@ function getResponsesModelConfig(modelId) {
|
|
|
2501
2529
|
requiredAutoTruncation: false
|
|
2502
2530
|
};
|
|
2503
2531
|
}
|
|
2532
|
+
function supportsFlexProcessing2(modelId) {
|
|
2533
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
2534
|
+
}
|
|
2504
2535
|
var openaiResponsesProviderOptionsSchema = z12.object({
|
|
2505
2536
|
metadata: z12.any().nullish(),
|
|
2506
2537
|
parallelToolCalls: z12.boolean().nullish(),
|
|
@@ -2510,7 +2541,8 @@ var openaiResponsesProviderOptionsSchema = z12.object({
|
|
|
2510
2541
|
reasoningEffort: z12.string().nullish(),
|
|
2511
2542
|
strictSchemas: z12.boolean().nullish(),
|
|
2512
2543
|
instructions: z12.string().nullish(),
|
|
2513
|
-
reasoningSummary: z12.string().nullish()
|
|
2544
|
+
reasoningSummary: z12.string().nullish(),
|
|
2545
|
+
serviceTier: z12.enum(["auto", "flex"]).nullish()
|
|
2514
2546
|
});
|
|
2515
2547
|
|
|
2516
2548
|
// src/openai-speech-model.ts
|