@ai-sdk/openai 2.0.0-beta.13 → 2.0.0-beta.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.d.mts +3 -4
- package/dist/index.d.ts +3 -4
- package/dist/index.js +30 -6
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +30 -6
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +3 -0
- package/dist/internal/index.d.ts +3 -0
- package/dist/internal/index.js +30 -6
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +30 -6
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,23 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.0.0-beta.15
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 9943464: feat(openai): add file_search_call.results support to include parameter
|
|
8
|
+
- Updated dependencies [27deb4d]
|
|
9
|
+
- @ai-sdk/provider@2.0.0-beta.2
|
|
10
|
+
- @ai-sdk/provider-utils@3.0.0-beta.9
|
|
11
|
+
|
|
12
|
+
## 2.0.0-beta.14
|
|
13
|
+
|
|
14
|
+
### Patch Changes
|
|
15
|
+
|
|
16
|
+
- eb173f1: chore (providers): remove model shorthand deprecation warnings
|
|
17
|
+
- 7032dc5: feat(openai): add priority processing service tier support
|
|
18
|
+
- Updated dependencies [dd5fd43]
|
|
19
|
+
- @ai-sdk/provider-utils@3.0.0-beta.8
|
|
20
|
+
|
|
3
21
|
## 2.0.0-beta.13
|
|
4
22
|
|
|
5
23
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -18,7 +18,7 @@ declare const openaiTools: {
|
|
|
18
18
|
vectorStoreIds?: string[];
|
|
19
19
|
maxNumResults?: number;
|
|
20
20
|
ranking?: {
|
|
21
|
-
ranker?: "auto" | "
|
|
21
|
+
ranker?: "auto" | "default-2024-08-21";
|
|
22
22
|
};
|
|
23
23
|
filters?: {
|
|
24
24
|
key: string;
|
|
@@ -81,9 +81,11 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
81
81
|
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
82
82
|
auto: "auto";
|
|
83
83
|
flex: "flex";
|
|
84
|
+
priority: "priority";
|
|
84
85
|
}>>>;
|
|
85
86
|
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
86
87
|
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
88
|
+
"file_search_call.results": "file_search_call.results";
|
|
87
89
|
}>>>>;
|
|
88
90
|
}, z.core.$strip>;
|
|
89
91
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
@@ -114,8 +116,6 @@ interface OpenAIProvider extends ProviderV2 {
|
|
|
114
116
|
embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
115
117
|
/**
|
|
116
118
|
Creates a model for text embeddings.
|
|
117
|
-
|
|
118
|
-
@deprecated Use `textEmbeddingModel` instead.
|
|
119
119
|
*/
|
|
120
120
|
textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
121
121
|
/**
|
|
@@ -124,7 +124,6 @@ interface OpenAIProvider extends ProviderV2 {
|
|
|
124
124
|
textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
125
125
|
/**
|
|
126
126
|
Creates a model for image generation.
|
|
127
|
-
@deprecated Use `imageModel` instead.
|
|
128
127
|
*/
|
|
129
128
|
image(modelId: OpenAIImageModelId): ImageModelV2;
|
|
130
129
|
/**
|
package/dist/index.d.ts
CHANGED
|
@@ -18,7 +18,7 @@ declare const openaiTools: {
|
|
|
18
18
|
vectorStoreIds?: string[];
|
|
19
19
|
maxNumResults?: number;
|
|
20
20
|
ranking?: {
|
|
21
|
-
ranker?: "auto" | "
|
|
21
|
+
ranker?: "auto" | "default-2024-08-21";
|
|
22
22
|
};
|
|
23
23
|
filters?: {
|
|
24
24
|
key: string;
|
|
@@ -81,9 +81,11 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
81
81
|
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
82
82
|
auto: "auto";
|
|
83
83
|
flex: "flex";
|
|
84
|
+
priority: "priority";
|
|
84
85
|
}>>>;
|
|
85
86
|
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
86
87
|
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
88
|
+
"file_search_call.results": "file_search_call.results";
|
|
87
89
|
}>>>>;
|
|
88
90
|
}, z.core.$strip>;
|
|
89
91
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
@@ -114,8 +116,6 @@ interface OpenAIProvider extends ProviderV2 {
|
|
|
114
116
|
embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
115
117
|
/**
|
|
116
118
|
Creates a model for text embeddings.
|
|
117
|
-
|
|
118
|
-
@deprecated Use `textEmbeddingModel` instead.
|
|
119
119
|
*/
|
|
120
120
|
textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
121
121
|
/**
|
|
@@ -124,7 +124,6 @@ interface OpenAIProvider extends ProviderV2 {
|
|
|
124
124
|
textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
125
125
|
/**
|
|
126
126
|
Creates a model for image generation.
|
|
127
|
-
@deprecated Use `imageModel` instead.
|
|
128
127
|
*/
|
|
129
128
|
image(modelId: OpenAIImageModelId): ImageModelV2;
|
|
130
129
|
/**
|
package/dist/index.js
CHANGED
|
@@ -297,12 +297,14 @@ var openaiProviderOptions = import_v4.z.object({
|
|
|
297
297
|
*/
|
|
298
298
|
structuredOutputs: import_v4.z.boolean().optional(),
|
|
299
299
|
/**
|
|
300
|
-
* Service tier for the request.
|
|
301
|
-
*
|
|
300
|
+
* Service tier for the request.
|
|
301
|
+
* - 'auto': Default service tier
|
|
302
|
+
* - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
303
|
+
* - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
|
|
302
304
|
*
|
|
303
305
|
* @default 'auto'
|
|
304
306
|
*/
|
|
305
|
-
serviceTier: import_v4.z.enum(["auto", "flex"]).optional(),
|
|
307
|
+
serviceTier: import_v4.z.enum(["auto", "flex", "priority"]).optional(),
|
|
306
308
|
/**
|
|
307
309
|
* Whether to use strict JSON schema validation.
|
|
308
310
|
*
|
|
@@ -361,7 +363,7 @@ var fileSearchArgsSchema = import_v43.z.object({
|
|
|
361
363
|
* Ranking options for the search.
|
|
362
364
|
*/
|
|
363
365
|
ranking: import_v43.z.object({
|
|
364
|
-
ranker: import_v43.z.enum(["auto", "
|
|
366
|
+
ranker: import_v43.z.enum(["auto", "default-2024-08-21"]).optional()
|
|
365
367
|
}).optional(),
|
|
366
368
|
/**
|
|
367
369
|
* A filter to apply based on file attributes.
|
|
@@ -678,6 +680,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
678
680
|
});
|
|
679
681
|
baseArgs.service_tier = void 0;
|
|
680
682
|
}
|
|
683
|
+
if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
|
|
684
|
+
warnings.push({
|
|
685
|
+
type: "unsupported-setting",
|
|
686
|
+
setting: "serviceTier",
|
|
687
|
+
details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
|
|
688
|
+
});
|
|
689
|
+
baseArgs.service_tier = void 0;
|
|
690
|
+
}
|
|
681
691
|
const {
|
|
682
692
|
tools: openaiTools2,
|
|
683
693
|
toolChoice: openaiToolChoice,
|
|
@@ -1071,6 +1081,9 @@ function isReasoningModel(modelId) {
|
|
|
1071
1081
|
function supportsFlexProcessing(modelId) {
|
|
1072
1082
|
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1073
1083
|
}
|
|
1084
|
+
function supportsPriorityProcessing(modelId) {
|
|
1085
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1086
|
+
}
|
|
1074
1087
|
function getSystemMessageMode(modelId) {
|
|
1075
1088
|
var _a, _b;
|
|
1076
1089
|
if (!isReasoningModel(modelId)) {
|
|
@@ -2319,6 +2332,14 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2319
2332
|
});
|
|
2320
2333
|
delete baseArgs.service_tier;
|
|
2321
2334
|
}
|
|
2335
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !supportsPriorityProcessing2(this.modelId)) {
|
|
2336
|
+
warnings.push({
|
|
2337
|
+
type: "unsupported-setting",
|
|
2338
|
+
setting: "serviceTier",
|
|
2339
|
+
details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
|
|
2340
|
+
});
|
|
2341
|
+
delete baseArgs.service_tier;
|
|
2342
|
+
}
|
|
2322
2343
|
const {
|
|
2323
2344
|
tools: openaiTools2,
|
|
2324
2345
|
toolChoice: openaiToolChoice,
|
|
@@ -3038,6 +3059,9 @@ function getResponsesModelConfig(modelId) {
|
|
|
3038
3059
|
function supportsFlexProcessing2(modelId) {
|
|
3039
3060
|
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3040
3061
|
}
|
|
3062
|
+
function supportsPriorityProcessing2(modelId) {
|
|
3063
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3064
|
+
}
|
|
3041
3065
|
var openaiResponsesProviderOptionsSchema = import_v414.z.object({
|
|
3042
3066
|
metadata: import_v414.z.any().nullish(),
|
|
3043
3067
|
parallelToolCalls: import_v414.z.boolean().nullish(),
|
|
@@ -3048,8 +3072,8 @@ var openaiResponsesProviderOptionsSchema = import_v414.z.object({
|
|
|
3048
3072
|
strictJsonSchema: import_v414.z.boolean().nullish(),
|
|
3049
3073
|
instructions: import_v414.z.string().nullish(),
|
|
3050
3074
|
reasoningSummary: import_v414.z.string().nullish(),
|
|
3051
|
-
serviceTier: import_v414.z.enum(["auto", "flex"]).nullish(),
|
|
3052
|
-
include: import_v414.z.array(import_v414.z.enum(["reasoning.encrypted_content"])).nullish()
|
|
3075
|
+
serviceTier: import_v414.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
3076
|
+
include: import_v414.z.array(import_v414.z.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish()
|
|
3053
3077
|
});
|
|
3054
3078
|
|
|
3055
3079
|
// src/openai-speech-model.ts
|