@ai-sdk/openai 2.0.0-beta.13 → 2.0.0-beta.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +9 -0
- package/dist/index.d.mts +1 -3
- package/dist/index.d.ts +1 -3
- package/dist/index.js +28 -4
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +28 -4
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -0
- package/dist/internal/index.d.ts +2 -0
- package/dist/internal/index.js +28 -4
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +28 -4
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,14 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.0.0-beta.14
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- eb173f1: chore (providers): remove model shorthand deprecation warnings
|
|
8
|
+
- 7032dc5: feat(openai): add priority processing service tier support
|
|
9
|
+
- Updated dependencies [dd5fd43]
|
|
10
|
+
- @ai-sdk/provider-utils@3.0.0-beta.8
|
|
11
|
+
|
|
3
12
|
## 2.0.0-beta.13
|
|
4
13
|
|
|
5
14
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -81,6 +81,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
81
81
|
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
82
82
|
auto: "auto";
|
|
83
83
|
flex: "flex";
|
|
84
|
+
priority: "priority";
|
|
84
85
|
}>>>;
|
|
85
86
|
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
86
87
|
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
@@ -114,8 +115,6 @@ interface OpenAIProvider extends ProviderV2 {
|
|
|
114
115
|
embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
115
116
|
/**
|
|
116
117
|
Creates a model for text embeddings.
|
|
117
|
-
|
|
118
|
-
@deprecated Use `textEmbeddingModel` instead.
|
|
119
118
|
*/
|
|
120
119
|
textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
121
120
|
/**
|
|
@@ -124,7 +123,6 @@ interface OpenAIProvider extends ProviderV2 {
|
|
|
124
123
|
textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
125
124
|
/**
|
|
126
125
|
Creates a model for image generation.
|
|
127
|
-
@deprecated Use `imageModel` instead.
|
|
128
126
|
*/
|
|
129
127
|
image(modelId: OpenAIImageModelId): ImageModelV2;
|
|
130
128
|
/**
|
package/dist/index.d.ts
CHANGED
|
@@ -81,6 +81,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
81
81
|
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
82
82
|
auto: "auto";
|
|
83
83
|
flex: "flex";
|
|
84
|
+
priority: "priority";
|
|
84
85
|
}>>>;
|
|
85
86
|
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
86
87
|
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
@@ -114,8 +115,6 @@ interface OpenAIProvider extends ProviderV2 {
|
|
|
114
115
|
embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
115
116
|
/**
|
|
116
117
|
Creates a model for text embeddings.
|
|
117
|
-
|
|
118
|
-
@deprecated Use `textEmbeddingModel` instead.
|
|
119
118
|
*/
|
|
120
119
|
textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
121
120
|
/**
|
|
@@ -124,7 +123,6 @@ interface OpenAIProvider extends ProviderV2 {
|
|
|
124
123
|
textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
125
124
|
/**
|
|
126
125
|
Creates a model for image generation.
|
|
127
|
-
@deprecated Use `imageModel` instead.
|
|
128
126
|
*/
|
|
129
127
|
image(modelId: OpenAIImageModelId): ImageModelV2;
|
|
130
128
|
/**
|
package/dist/index.js
CHANGED
|
@@ -297,12 +297,14 @@ var openaiProviderOptions = import_v4.z.object({
|
|
|
297
297
|
*/
|
|
298
298
|
structuredOutputs: import_v4.z.boolean().optional(),
|
|
299
299
|
/**
|
|
300
|
-
* Service tier for the request.
|
|
301
|
-
*
|
|
300
|
+
* Service tier for the request.
|
|
301
|
+
* - 'auto': Default service tier
|
|
302
|
+
* - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
303
|
+
* - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
|
|
302
304
|
*
|
|
303
305
|
* @default 'auto'
|
|
304
306
|
*/
|
|
305
|
-
serviceTier: import_v4.z.enum(["auto", "flex"]).optional(),
|
|
307
|
+
serviceTier: import_v4.z.enum(["auto", "flex", "priority"]).optional(),
|
|
306
308
|
/**
|
|
307
309
|
* Whether to use strict JSON schema validation.
|
|
308
310
|
*
|
|
@@ -678,6 +680,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
678
680
|
});
|
|
679
681
|
baseArgs.service_tier = void 0;
|
|
680
682
|
}
|
|
683
|
+
if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
|
|
684
|
+
warnings.push({
|
|
685
|
+
type: "unsupported-setting",
|
|
686
|
+
setting: "serviceTier",
|
|
687
|
+
details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
|
|
688
|
+
});
|
|
689
|
+
baseArgs.service_tier = void 0;
|
|
690
|
+
}
|
|
681
691
|
const {
|
|
682
692
|
tools: openaiTools2,
|
|
683
693
|
toolChoice: openaiToolChoice,
|
|
@@ -1071,6 +1081,9 @@ function isReasoningModel(modelId) {
|
|
|
1071
1081
|
function supportsFlexProcessing(modelId) {
|
|
1072
1082
|
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1073
1083
|
}
|
|
1084
|
+
function supportsPriorityProcessing(modelId) {
|
|
1085
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1086
|
+
}
|
|
1074
1087
|
function getSystemMessageMode(modelId) {
|
|
1075
1088
|
var _a, _b;
|
|
1076
1089
|
if (!isReasoningModel(modelId)) {
|
|
@@ -2319,6 +2332,14 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2319
2332
|
});
|
|
2320
2333
|
delete baseArgs.service_tier;
|
|
2321
2334
|
}
|
|
2335
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !supportsPriorityProcessing2(this.modelId)) {
|
|
2336
|
+
warnings.push({
|
|
2337
|
+
type: "unsupported-setting",
|
|
2338
|
+
setting: "serviceTier",
|
|
2339
|
+
details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
|
|
2340
|
+
});
|
|
2341
|
+
delete baseArgs.service_tier;
|
|
2342
|
+
}
|
|
2322
2343
|
const {
|
|
2323
2344
|
tools: openaiTools2,
|
|
2324
2345
|
toolChoice: openaiToolChoice,
|
|
@@ -3038,6 +3059,9 @@ function getResponsesModelConfig(modelId) {
|
|
|
3038
3059
|
function supportsFlexProcessing2(modelId) {
|
|
3039
3060
|
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3040
3061
|
}
|
|
3062
|
+
function supportsPriorityProcessing2(modelId) {
|
|
3063
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3064
|
+
}
|
|
3041
3065
|
var openaiResponsesProviderOptionsSchema = import_v414.z.object({
|
|
3042
3066
|
metadata: import_v414.z.any().nullish(),
|
|
3043
3067
|
parallelToolCalls: import_v414.z.boolean().nullish(),
|
|
@@ -3048,7 +3072,7 @@ var openaiResponsesProviderOptionsSchema = import_v414.z.object({
|
|
|
3048
3072
|
strictJsonSchema: import_v414.z.boolean().nullish(),
|
|
3049
3073
|
instructions: import_v414.z.string().nullish(),
|
|
3050
3074
|
reasoningSummary: import_v414.z.string().nullish(),
|
|
3051
|
-
serviceTier: import_v414.z.enum(["auto", "flex"]).nullish(),
|
|
3075
|
+
serviceTier: import_v414.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
3052
3076
|
include: import_v414.z.array(import_v414.z.enum(["reasoning.encrypted_content"])).nullish()
|
|
3053
3077
|
});
|
|
3054
3078
|
|