@ai-sdk/openai 2.0.0-canary.13 → 2.0.0-canary.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +41 -0
- package/dist/index.d.mts +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +116 -25
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +116 -25
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +30 -4
- package/dist/internal/index.d.ts +30 -4
- package/dist/internal/index.js +116 -25
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +116 -25
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,46 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.0.0-canary.15
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 136819b: chore(providers/openai): re-introduce logprobs as providerMetadata
|
|
8
|
+
- 9bd5ab5: feat (provider): add providerMetadata to ImageModelV2 interface (#5977)
|
|
9
|
+
|
|
10
|
+
The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
|
|
11
|
+
|
|
12
|
+
```js
|
|
13
|
+
const prompt = 'Santa Claus driving a Cadillac';
|
|
14
|
+
|
|
15
|
+
const { providerMetadata } = await experimental_generateImage({
|
|
16
|
+
model: openai.image('dall-e-3'),
|
|
17
|
+
prompt,
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
const revisedPrompt = providerMetadata.openai.images[0]?.revisedPrompt;
|
|
21
|
+
|
|
22
|
+
console.log({
|
|
23
|
+
prompt,
|
|
24
|
+
revisedPrompt,
|
|
25
|
+
});
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
- 284353f: fix(providers/openai): zod parse error with function
|
|
29
|
+
- Updated dependencies [957b739]
|
|
30
|
+
- Updated dependencies [9bd5ab5]
|
|
31
|
+
- @ai-sdk/provider-utils@3.0.0-canary.14
|
|
32
|
+
- @ai-sdk/provider@2.0.0-canary.13
|
|
33
|
+
|
|
34
|
+
## 2.0.0-canary.14
|
|
35
|
+
|
|
36
|
+
### Patch Changes
|
|
37
|
+
|
|
38
|
+
- fa758ea: feat(provider/openai): add o3 & o4-mini with developer systemMessageMode
|
|
39
|
+
- Updated dependencies [7b3ae3f]
|
|
40
|
+
- Updated dependencies [0ff02bb]
|
|
41
|
+
- @ai-sdk/provider@2.0.0-canary.12
|
|
42
|
+
- @ai-sdk/provider-utils@3.0.0-canary.13
|
|
43
|
+
|
|
3
44
|
## 2.0.0-canary.13
|
|
4
45
|
|
|
5
46
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -23,7 +23,7 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
|
23
23
|
private get providerOptionsName();
|
|
24
24
|
constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
|
|
25
25
|
get provider(): string;
|
|
26
|
-
|
|
26
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
27
27
|
private getArgs;
|
|
28
28
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
29
29
|
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
package/dist/index.d.ts
CHANGED
|
@@ -23,7 +23,7 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
|
23
23
|
private get providerOptionsName();
|
|
24
24
|
constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
|
|
25
25
|
get provider(): string;
|
|
26
|
-
|
|
26
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
27
27
|
private getArgs;
|
|
28
28
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
29
29
|
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
package/dist/index.js
CHANGED
|
@@ -238,6 +238,16 @@ var openaiProviderOptions = import_zod.z.object({
|
|
|
238
238
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
239
239
|
*/
|
|
240
240
|
logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
|
|
241
|
+
/**
|
|
242
|
+
* Return the log probabilities of the tokens.
|
|
243
|
+
*
|
|
244
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
245
|
+
* were generated.
|
|
246
|
+
*
|
|
247
|
+
* Setting to a number will return the log probabilities of the top n
|
|
248
|
+
* tokens that were generated.
|
|
249
|
+
*/
|
|
250
|
+
logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
|
|
241
251
|
/**
|
|
242
252
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
243
253
|
*/
|
|
@@ -355,17 +365,15 @@ function prepareTools({
|
|
|
355
365
|
var OpenAIChatLanguageModel = class {
|
|
356
366
|
constructor(modelId, config) {
|
|
357
367
|
this.specificationVersion = "v2";
|
|
368
|
+
this.supportedUrls = {
|
|
369
|
+
"image/*": [/^https?:\/\/.*$/]
|
|
370
|
+
};
|
|
358
371
|
this.modelId = modelId;
|
|
359
372
|
this.config = config;
|
|
360
373
|
}
|
|
361
374
|
get provider() {
|
|
362
375
|
return this.config.provider;
|
|
363
376
|
}
|
|
364
|
-
async getSupportedUrls() {
|
|
365
|
-
return {
|
|
366
|
-
"image/*": [/^https?:\/\/.*$/]
|
|
367
|
-
};
|
|
368
|
-
}
|
|
369
377
|
async getArgs({
|
|
370
378
|
prompt,
|
|
371
379
|
maxOutputTokens,
|
|
@@ -414,6 +422,8 @@ var OpenAIChatLanguageModel = class {
|
|
|
414
422
|
model: this.modelId,
|
|
415
423
|
// model specific settings:
|
|
416
424
|
logit_bias: openaiOptions.logitBias,
|
|
425
|
+
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
426
|
+
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
417
427
|
user: openaiOptions.user,
|
|
418
428
|
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
419
429
|
// standardized settings:
|
|
@@ -486,6 +496,20 @@ var OpenAIChatLanguageModel = class {
|
|
|
486
496
|
message: "logitBias is not supported for reasoning models"
|
|
487
497
|
});
|
|
488
498
|
}
|
|
499
|
+
if (baseArgs.logprobs != null) {
|
|
500
|
+
baseArgs.logprobs = void 0;
|
|
501
|
+
warnings.push({
|
|
502
|
+
type: "other",
|
|
503
|
+
message: "logprobs is not supported for reasoning models"
|
|
504
|
+
});
|
|
505
|
+
}
|
|
506
|
+
if (baseArgs.top_logprobs != null) {
|
|
507
|
+
baseArgs.top_logprobs = void 0;
|
|
508
|
+
warnings.push({
|
|
509
|
+
type: "other",
|
|
510
|
+
message: "topLogprobs is not supported for reasoning models"
|
|
511
|
+
});
|
|
512
|
+
}
|
|
489
513
|
if (baseArgs.max_tokens != null) {
|
|
490
514
|
if (baseArgs.max_completion_tokens == null) {
|
|
491
515
|
baseArgs.max_completion_tokens = baseArgs.max_tokens;
|
|
@@ -521,7 +545,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
521
545
|
};
|
|
522
546
|
}
|
|
523
547
|
async doGenerate(options) {
|
|
524
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
548
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
525
549
|
const { args: body, warnings } = await this.getArgs(options);
|
|
526
550
|
const {
|
|
527
551
|
responseHeaders,
|
|
@@ -571,12 +595,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
571
595
|
if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
|
|
572
596
|
providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
|
|
573
597
|
}
|
|
598
|
+
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
599
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
600
|
+
}
|
|
574
601
|
return {
|
|
575
602
|
content,
|
|
576
603
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
577
604
|
usage: {
|
|
578
|
-
inputTokens: (
|
|
579
|
-
outputTokens: (
|
|
605
|
+
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
606
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
|
|
580
607
|
},
|
|
581
608
|
request: { body },
|
|
582
609
|
response: {
|
|
@@ -811,6 +838,20 @@ var openaiChatResponseSchema = import_zod3.z.object({
|
|
|
811
838
|
).nullish()
|
|
812
839
|
}),
|
|
813
840
|
index: import_zod3.z.number(),
|
|
841
|
+
logprobs: import_zod3.z.object({
|
|
842
|
+
content: import_zod3.z.array(
|
|
843
|
+
import_zod3.z.object({
|
|
844
|
+
token: import_zod3.z.string(),
|
|
845
|
+
logprob: import_zod3.z.number(),
|
|
846
|
+
top_logprobs: import_zod3.z.array(
|
|
847
|
+
import_zod3.z.object({
|
|
848
|
+
token: import_zod3.z.string(),
|
|
849
|
+
logprob: import_zod3.z.number()
|
|
850
|
+
})
|
|
851
|
+
)
|
|
852
|
+
})
|
|
853
|
+
).nullish()
|
|
854
|
+
}).nullish(),
|
|
814
855
|
finish_reason: import_zod3.z.string().nullish()
|
|
815
856
|
})
|
|
816
857
|
),
|
|
@@ -830,7 +871,7 @@ var openaiChatChunkSchema = import_zod3.z.union([
|
|
|
830
871
|
import_zod3.z.object({
|
|
831
872
|
index: import_zod3.z.number(),
|
|
832
873
|
id: import_zod3.z.string().nullish(),
|
|
833
|
-
type: import_zod3.z.literal("function").
|
|
874
|
+
type: import_zod3.z.literal("function").nullish(),
|
|
834
875
|
function: import_zod3.z.object({
|
|
835
876
|
name: import_zod3.z.string().nullish(),
|
|
836
877
|
arguments: import_zod3.z.string().nullish()
|
|
@@ -838,7 +879,7 @@ var openaiChatChunkSchema = import_zod3.z.union([
|
|
|
838
879
|
})
|
|
839
880
|
).nullish()
|
|
840
881
|
}).nullish(),
|
|
841
|
-
finish_reason: import_zod3.z.string().
|
|
882
|
+
finish_reason: import_zod3.z.string().nullish(),
|
|
842
883
|
index: import_zod3.z.number()
|
|
843
884
|
})
|
|
844
885
|
),
|
|
@@ -869,11 +910,23 @@ var reasoningModels = {
|
|
|
869
910
|
"o1-preview-2024-09-12": {
|
|
870
911
|
systemMessageMode: "remove"
|
|
871
912
|
},
|
|
913
|
+
o3: {
|
|
914
|
+
systemMessageMode: "developer"
|
|
915
|
+
},
|
|
916
|
+
"o3-2025-04-16": {
|
|
917
|
+
systemMessageMode: "developer"
|
|
918
|
+
},
|
|
872
919
|
"o3-mini": {
|
|
873
920
|
systemMessageMode: "developer"
|
|
874
921
|
},
|
|
875
922
|
"o3-mini-2025-01-31": {
|
|
876
923
|
systemMessageMode: "developer"
|
|
924
|
+
},
|
|
925
|
+
"o4-mini": {
|
|
926
|
+
systemMessageMode: "developer"
|
|
927
|
+
},
|
|
928
|
+
"o4-mini-2025-04-16": {
|
|
929
|
+
systemMessageMode: "developer"
|
|
877
930
|
}
|
|
878
931
|
};
|
|
879
932
|
|
|
@@ -986,13 +1039,26 @@ var openaiCompletionProviderOptions = import_zod4.z.object({
|
|
|
986
1039
|
A unique identifier representing your end-user, which can help OpenAI to
|
|
987
1040
|
monitor and detect abuse. Learn more.
|
|
988
1041
|
*/
|
|
989
|
-
user: import_zod4.z.string().optional()
|
|
1042
|
+
user: import_zod4.z.string().optional(),
|
|
1043
|
+
/**
|
|
1044
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1045
|
+
the response size and can slow down response times. However, it can
|
|
1046
|
+
be useful to better understand how the model is behaving.
|
|
1047
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1048
|
+
were generated.
|
|
1049
|
+
Setting to a number will return the log probabilities of the top n
|
|
1050
|
+
tokens that were generated.
|
|
1051
|
+
*/
|
|
1052
|
+
logprobs: import_zod4.z.union([import_zod4.z.boolean(), import_zod4.z.number()]).optional()
|
|
990
1053
|
});
|
|
991
1054
|
|
|
992
1055
|
// src/openai-completion-language-model.ts
|
|
993
1056
|
var OpenAICompletionLanguageModel = class {
|
|
994
1057
|
constructor(modelId, config) {
|
|
995
1058
|
this.specificationVersion = "v2";
|
|
1059
|
+
this.supportedUrls = {
|
|
1060
|
+
// No URLs are supported for completion models.
|
|
1061
|
+
};
|
|
996
1062
|
this.modelId = modelId;
|
|
997
1063
|
this.config = config;
|
|
998
1064
|
}
|
|
@@ -1002,11 +1068,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1002
1068
|
get provider() {
|
|
1003
1069
|
return this.config.provider;
|
|
1004
1070
|
}
|
|
1005
|
-
async getSupportedUrls() {
|
|
1006
|
-
return {
|
|
1007
|
-
// no supported urls for completion models
|
|
1008
|
-
};
|
|
1009
|
-
}
|
|
1010
1071
|
async getArgs({
|
|
1011
1072
|
prompt,
|
|
1012
1073
|
maxOutputTokens,
|
|
@@ -1060,6 +1121,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1060
1121
|
// model specific settings:
|
|
1061
1122
|
echo: openaiOptions.echo,
|
|
1062
1123
|
logit_bias: openaiOptions.logitBias,
|
|
1124
|
+
logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
|
|
1063
1125
|
suffix: openaiOptions.suffix,
|
|
1064
1126
|
user: openaiOptions.user,
|
|
1065
1127
|
// standardized settings:
|
|
@@ -1098,6 +1160,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1098
1160
|
fetch: this.config.fetch
|
|
1099
1161
|
});
|
|
1100
1162
|
const choice = response.choices[0];
|
|
1163
|
+
const providerMetadata = { openai: {} };
|
|
1164
|
+
if (choice.logprobs != null) {
|
|
1165
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1166
|
+
}
|
|
1101
1167
|
return {
|
|
1102
1168
|
content: [{ type: "text", text: choice.text }],
|
|
1103
1169
|
usage: {
|
|
@@ -1111,6 +1177,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1111
1177
|
headers: responseHeaders,
|
|
1112
1178
|
body: rawResponse
|
|
1113
1179
|
},
|
|
1180
|
+
providerMetadata,
|
|
1114
1181
|
warnings
|
|
1115
1182
|
};
|
|
1116
1183
|
}
|
|
@@ -1137,6 +1204,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1137
1204
|
fetch: this.config.fetch
|
|
1138
1205
|
});
|
|
1139
1206
|
let finishReason = "unknown";
|
|
1207
|
+
const providerMetadata = { openai: {} };
|
|
1140
1208
|
const usage = {
|
|
1141
1209
|
inputTokens: void 0,
|
|
1142
1210
|
outputTokens: void 0
|
|
@@ -1175,6 +1243,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1175
1243
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1176
1244
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
1177
1245
|
}
|
|
1246
|
+
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1247
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1248
|
+
}
|
|
1178
1249
|
if ((choice == null ? void 0 : choice.text) != null) {
|
|
1179
1250
|
controller.enqueue({
|
|
1180
1251
|
type: "text",
|
|
@@ -1186,6 +1257,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1186
1257
|
controller.enqueue({
|
|
1187
1258
|
type: "finish",
|
|
1188
1259
|
finishReason,
|
|
1260
|
+
providerMetadata,
|
|
1189
1261
|
usage
|
|
1190
1262
|
});
|
|
1191
1263
|
}
|
|
@@ -1203,7 +1275,12 @@ var openaiCompletionResponseSchema = import_zod5.z.object({
|
|
|
1203
1275
|
choices: import_zod5.z.array(
|
|
1204
1276
|
import_zod5.z.object({
|
|
1205
1277
|
text: import_zod5.z.string(),
|
|
1206
|
-
finish_reason: import_zod5.z.string()
|
|
1278
|
+
finish_reason: import_zod5.z.string(),
|
|
1279
|
+
logprobs: import_zod5.z.object({
|
|
1280
|
+
tokens: import_zod5.z.array(import_zod5.z.string()),
|
|
1281
|
+
token_logprobs: import_zod5.z.array(import_zod5.z.number()),
|
|
1282
|
+
top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
|
|
1283
|
+
}).nullish()
|
|
1207
1284
|
})
|
|
1208
1285
|
),
|
|
1209
1286
|
usage: import_zod5.z.object({
|
|
@@ -1220,7 +1297,12 @@ var openaiCompletionChunkSchema = import_zod5.z.union([
|
|
|
1220
1297
|
import_zod5.z.object({
|
|
1221
1298
|
text: import_zod5.z.string(),
|
|
1222
1299
|
finish_reason: import_zod5.z.string().nullish(),
|
|
1223
|
-
index: import_zod5.z.number()
|
|
1300
|
+
index: import_zod5.z.number(),
|
|
1301
|
+
logprobs: import_zod5.z.object({
|
|
1302
|
+
tokens: import_zod5.z.array(import_zod5.z.string()),
|
|
1303
|
+
token_logprobs: import_zod5.z.array(import_zod5.z.number()),
|
|
1304
|
+
top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
|
|
1305
|
+
}).nullish()
|
|
1224
1306
|
})
|
|
1225
1307
|
),
|
|
1226
1308
|
usage: import_zod5.z.object({
|
|
@@ -1337,7 +1419,7 @@ var OpenAIImageModel = class {
|
|
|
1337
1419
|
this.modelId = modelId;
|
|
1338
1420
|
this.settings = settings;
|
|
1339
1421
|
this.config = config;
|
|
1340
|
-
this.specificationVersion = "
|
|
1422
|
+
this.specificationVersion = "v2";
|
|
1341
1423
|
}
|
|
1342
1424
|
get maxImagesPerCall() {
|
|
1343
1425
|
var _a, _b;
|
|
@@ -1397,12 +1479,23 @@ var OpenAIImageModel = class {
|
|
|
1397
1479
|
timestamp: currentDate,
|
|
1398
1480
|
modelId: this.modelId,
|
|
1399
1481
|
headers: responseHeaders
|
|
1482
|
+
},
|
|
1483
|
+
providerMetadata: {
|
|
1484
|
+
openai: {
|
|
1485
|
+
images: response.data.map(
|
|
1486
|
+
(item) => item.revised_prompt ? {
|
|
1487
|
+
revisedPrompt: item.revised_prompt
|
|
1488
|
+
} : null
|
|
1489
|
+
)
|
|
1490
|
+
}
|
|
1400
1491
|
}
|
|
1401
1492
|
};
|
|
1402
1493
|
}
|
|
1403
1494
|
};
|
|
1404
1495
|
var openaiImageResponseSchema = import_zod8.z.object({
|
|
1405
|
-
data: import_zod8.z.array(
|
|
1496
|
+
data: import_zod8.z.array(
|
|
1497
|
+
import_zod8.z.object({ b64_json: import_zod8.z.string(), revised_prompt: import_zod8.z.string().optional() })
|
|
1498
|
+
)
|
|
1406
1499
|
});
|
|
1407
1500
|
|
|
1408
1501
|
// src/openai-tools.ts
|
|
@@ -1826,13 +1919,11 @@ function prepareResponsesTools({
|
|
|
1826
1919
|
var OpenAIResponsesLanguageModel = class {
|
|
1827
1920
|
constructor(modelId, config) {
|
|
1828
1921
|
this.specificationVersion = "v2";
|
|
1829
|
-
this.
|
|
1830
|
-
this.config = config;
|
|
1831
|
-
}
|
|
1832
|
-
async getSupportedUrls() {
|
|
1833
|
-
return {
|
|
1922
|
+
this.supportedUrls = {
|
|
1834
1923
|
"image/*": [/^https?:\/\/.*$/]
|
|
1835
1924
|
};
|
|
1925
|
+
this.modelId = modelId;
|
|
1926
|
+
this.config = config;
|
|
1836
1927
|
}
|
|
1837
1928
|
get provider() {
|
|
1838
1929
|
return this.config.provider;
|