@ai-sdk/openai 2.0.0-canary.13 → 2.0.0-canary.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +41 -0
- package/dist/index.d.mts +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +116 -25
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +116 -25
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +30 -4
- package/dist/internal/index.d.ts +30 -4
- package/dist/internal/index.js +116 -25
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +116 -25
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
|
@@ -226,6 +226,16 @@ var openaiProviderOptions = z.object({
|
|
|
226
226
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
227
227
|
*/
|
|
228
228
|
logitBias: z.record(z.coerce.number(), z.number()).optional(),
|
|
229
|
+
/**
|
|
230
|
+
* Return the log probabilities of the tokens.
|
|
231
|
+
*
|
|
232
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
233
|
+
* were generated.
|
|
234
|
+
*
|
|
235
|
+
* Setting to a number will return the log probabilities of the top n
|
|
236
|
+
* tokens that were generated.
|
|
237
|
+
*/
|
|
238
|
+
logprobs: z.union([z.boolean(), z.number()]).optional(),
|
|
229
239
|
/**
|
|
230
240
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
231
241
|
*/
|
|
@@ -345,17 +355,15 @@ function prepareTools({
|
|
|
345
355
|
var OpenAIChatLanguageModel = class {
|
|
346
356
|
constructor(modelId, config) {
|
|
347
357
|
this.specificationVersion = "v2";
|
|
358
|
+
this.supportedUrls = {
|
|
359
|
+
"image/*": [/^https?:\/\/.*$/]
|
|
360
|
+
};
|
|
348
361
|
this.modelId = modelId;
|
|
349
362
|
this.config = config;
|
|
350
363
|
}
|
|
351
364
|
get provider() {
|
|
352
365
|
return this.config.provider;
|
|
353
366
|
}
|
|
354
|
-
async getSupportedUrls() {
|
|
355
|
-
return {
|
|
356
|
-
"image/*": [/^https?:\/\/.*$/]
|
|
357
|
-
};
|
|
358
|
-
}
|
|
359
367
|
async getArgs({
|
|
360
368
|
prompt,
|
|
361
369
|
maxOutputTokens,
|
|
@@ -404,6 +412,8 @@ var OpenAIChatLanguageModel = class {
|
|
|
404
412
|
model: this.modelId,
|
|
405
413
|
// model specific settings:
|
|
406
414
|
logit_bias: openaiOptions.logitBias,
|
|
415
|
+
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
416
|
+
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
407
417
|
user: openaiOptions.user,
|
|
408
418
|
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
409
419
|
// standardized settings:
|
|
@@ -476,6 +486,20 @@ var OpenAIChatLanguageModel = class {
|
|
|
476
486
|
message: "logitBias is not supported for reasoning models"
|
|
477
487
|
});
|
|
478
488
|
}
|
|
489
|
+
if (baseArgs.logprobs != null) {
|
|
490
|
+
baseArgs.logprobs = void 0;
|
|
491
|
+
warnings.push({
|
|
492
|
+
type: "other",
|
|
493
|
+
message: "logprobs is not supported for reasoning models"
|
|
494
|
+
});
|
|
495
|
+
}
|
|
496
|
+
if (baseArgs.top_logprobs != null) {
|
|
497
|
+
baseArgs.top_logprobs = void 0;
|
|
498
|
+
warnings.push({
|
|
499
|
+
type: "other",
|
|
500
|
+
message: "topLogprobs is not supported for reasoning models"
|
|
501
|
+
});
|
|
502
|
+
}
|
|
479
503
|
if (baseArgs.max_tokens != null) {
|
|
480
504
|
if (baseArgs.max_completion_tokens == null) {
|
|
481
505
|
baseArgs.max_completion_tokens = baseArgs.max_tokens;
|
|
@@ -511,7 +535,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
511
535
|
};
|
|
512
536
|
}
|
|
513
537
|
async doGenerate(options) {
|
|
514
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
538
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
515
539
|
const { args: body, warnings } = await this.getArgs(options);
|
|
516
540
|
const {
|
|
517
541
|
responseHeaders,
|
|
@@ -561,12 +585,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
561
585
|
if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
|
|
562
586
|
providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
|
|
563
587
|
}
|
|
588
|
+
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
589
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
590
|
+
}
|
|
564
591
|
return {
|
|
565
592
|
content,
|
|
566
593
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
567
594
|
usage: {
|
|
568
|
-
inputTokens: (
|
|
569
|
-
outputTokens: (
|
|
595
|
+
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
596
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
|
|
570
597
|
},
|
|
571
598
|
request: { body },
|
|
572
599
|
response: {
|
|
@@ -801,6 +828,20 @@ var openaiChatResponseSchema = z3.object({
|
|
|
801
828
|
).nullish()
|
|
802
829
|
}),
|
|
803
830
|
index: z3.number(),
|
|
831
|
+
logprobs: z3.object({
|
|
832
|
+
content: z3.array(
|
|
833
|
+
z3.object({
|
|
834
|
+
token: z3.string(),
|
|
835
|
+
logprob: z3.number(),
|
|
836
|
+
top_logprobs: z3.array(
|
|
837
|
+
z3.object({
|
|
838
|
+
token: z3.string(),
|
|
839
|
+
logprob: z3.number()
|
|
840
|
+
})
|
|
841
|
+
)
|
|
842
|
+
})
|
|
843
|
+
).nullish()
|
|
844
|
+
}).nullish(),
|
|
804
845
|
finish_reason: z3.string().nullish()
|
|
805
846
|
})
|
|
806
847
|
),
|
|
@@ -820,7 +861,7 @@ var openaiChatChunkSchema = z3.union([
|
|
|
820
861
|
z3.object({
|
|
821
862
|
index: z3.number(),
|
|
822
863
|
id: z3.string().nullish(),
|
|
823
|
-
type: z3.literal("function").
|
|
864
|
+
type: z3.literal("function").nullish(),
|
|
824
865
|
function: z3.object({
|
|
825
866
|
name: z3.string().nullish(),
|
|
826
867
|
arguments: z3.string().nullish()
|
|
@@ -828,7 +869,7 @@ var openaiChatChunkSchema = z3.union([
|
|
|
828
869
|
})
|
|
829
870
|
).nullish()
|
|
830
871
|
}).nullish(),
|
|
831
|
-
finish_reason: z3.string().
|
|
872
|
+
finish_reason: z3.string().nullish(),
|
|
832
873
|
index: z3.number()
|
|
833
874
|
})
|
|
834
875
|
),
|
|
@@ -859,11 +900,23 @@ var reasoningModels = {
|
|
|
859
900
|
"o1-preview-2024-09-12": {
|
|
860
901
|
systemMessageMode: "remove"
|
|
861
902
|
},
|
|
903
|
+
o3: {
|
|
904
|
+
systemMessageMode: "developer"
|
|
905
|
+
},
|
|
906
|
+
"o3-2025-04-16": {
|
|
907
|
+
systemMessageMode: "developer"
|
|
908
|
+
},
|
|
862
909
|
"o3-mini": {
|
|
863
910
|
systemMessageMode: "developer"
|
|
864
911
|
},
|
|
865
912
|
"o3-mini-2025-01-31": {
|
|
866
913
|
systemMessageMode: "developer"
|
|
914
|
+
},
|
|
915
|
+
"o4-mini": {
|
|
916
|
+
systemMessageMode: "developer"
|
|
917
|
+
},
|
|
918
|
+
"o4-mini-2025-04-16": {
|
|
919
|
+
systemMessageMode: "developer"
|
|
867
920
|
}
|
|
868
921
|
};
|
|
869
922
|
|
|
@@ -985,13 +1038,26 @@ var openaiCompletionProviderOptions = z4.object({
|
|
|
985
1038
|
A unique identifier representing your end-user, which can help OpenAI to
|
|
986
1039
|
monitor and detect abuse. Learn more.
|
|
987
1040
|
*/
|
|
988
|
-
user: z4.string().optional()
|
|
1041
|
+
user: z4.string().optional(),
|
|
1042
|
+
/**
|
|
1043
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1044
|
+
the response size and can slow down response times. However, it can
|
|
1045
|
+
be useful to better understand how the model is behaving.
|
|
1046
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1047
|
+
were generated.
|
|
1048
|
+
Setting to a number will return the log probabilities of the top n
|
|
1049
|
+
tokens that were generated.
|
|
1050
|
+
*/
|
|
1051
|
+
logprobs: z4.union([z4.boolean(), z4.number()]).optional()
|
|
989
1052
|
});
|
|
990
1053
|
|
|
991
1054
|
// src/openai-completion-language-model.ts
|
|
992
1055
|
var OpenAICompletionLanguageModel = class {
|
|
993
1056
|
constructor(modelId, config) {
|
|
994
1057
|
this.specificationVersion = "v2";
|
|
1058
|
+
this.supportedUrls = {
|
|
1059
|
+
// No URLs are supported for completion models.
|
|
1060
|
+
};
|
|
995
1061
|
this.modelId = modelId;
|
|
996
1062
|
this.config = config;
|
|
997
1063
|
}
|
|
@@ -1001,11 +1067,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1001
1067
|
get provider() {
|
|
1002
1068
|
return this.config.provider;
|
|
1003
1069
|
}
|
|
1004
|
-
async getSupportedUrls() {
|
|
1005
|
-
return {
|
|
1006
|
-
// no supported urls for completion models
|
|
1007
|
-
};
|
|
1008
|
-
}
|
|
1009
1070
|
async getArgs({
|
|
1010
1071
|
prompt,
|
|
1011
1072
|
maxOutputTokens,
|
|
@@ -1059,6 +1120,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1059
1120
|
// model specific settings:
|
|
1060
1121
|
echo: openaiOptions.echo,
|
|
1061
1122
|
logit_bias: openaiOptions.logitBias,
|
|
1123
|
+
logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
|
|
1062
1124
|
suffix: openaiOptions.suffix,
|
|
1063
1125
|
user: openaiOptions.user,
|
|
1064
1126
|
// standardized settings:
|
|
@@ -1097,6 +1159,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1097
1159
|
fetch: this.config.fetch
|
|
1098
1160
|
});
|
|
1099
1161
|
const choice = response.choices[0];
|
|
1162
|
+
const providerMetadata = { openai: {} };
|
|
1163
|
+
if (choice.logprobs != null) {
|
|
1164
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1165
|
+
}
|
|
1100
1166
|
return {
|
|
1101
1167
|
content: [{ type: "text", text: choice.text }],
|
|
1102
1168
|
usage: {
|
|
@@ -1110,6 +1176,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1110
1176
|
headers: responseHeaders,
|
|
1111
1177
|
body: rawResponse
|
|
1112
1178
|
},
|
|
1179
|
+
providerMetadata,
|
|
1113
1180
|
warnings
|
|
1114
1181
|
};
|
|
1115
1182
|
}
|
|
@@ -1136,6 +1203,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1136
1203
|
fetch: this.config.fetch
|
|
1137
1204
|
});
|
|
1138
1205
|
let finishReason = "unknown";
|
|
1206
|
+
const providerMetadata = { openai: {} };
|
|
1139
1207
|
const usage = {
|
|
1140
1208
|
inputTokens: void 0,
|
|
1141
1209
|
outputTokens: void 0
|
|
@@ -1174,6 +1242,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1174
1242
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1175
1243
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
1176
1244
|
}
|
|
1245
|
+
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1246
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1247
|
+
}
|
|
1177
1248
|
if ((choice == null ? void 0 : choice.text) != null) {
|
|
1178
1249
|
controller.enqueue({
|
|
1179
1250
|
type: "text",
|
|
@@ -1185,6 +1256,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1185
1256
|
controller.enqueue({
|
|
1186
1257
|
type: "finish",
|
|
1187
1258
|
finishReason,
|
|
1259
|
+
providerMetadata,
|
|
1188
1260
|
usage
|
|
1189
1261
|
});
|
|
1190
1262
|
}
|
|
@@ -1202,7 +1274,12 @@ var openaiCompletionResponseSchema = z5.object({
|
|
|
1202
1274
|
choices: z5.array(
|
|
1203
1275
|
z5.object({
|
|
1204
1276
|
text: z5.string(),
|
|
1205
|
-
finish_reason: z5.string()
|
|
1277
|
+
finish_reason: z5.string(),
|
|
1278
|
+
logprobs: z5.object({
|
|
1279
|
+
tokens: z5.array(z5.string()),
|
|
1280
|
+
token_logprobs: z5.array(z5.number()),
|
|
1281
|
+
top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
|
|
1282
|
+
}).nullish()
|
|
1206
1283
|
})
|
|
1207
1284
|
),
|
|
1208
1285
|
usage: z5.object({
|
|
@@ -1219,7 +1296,12 @@ var openaiCompletionChunkSchema = z5.union([
|
|
|
1219
1296
|
z5.object({
|
|
1220
1297
|
text: z5.string(),
|
|
1221
1298
|
finish_reason: z5.string().nullish(),
|
|
1222
|
-
index: z5.number()
|
|
1299
|
+
index: z5.number(),
|
|
1300
|
+
logprobs: z5.object({
|
|
1301
|
+
tokens: z5.array(z5.string()),
|
|
1302
|
+
token_logprobs: z5.array(z5.number()),
|
|
1303
|
+
top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
|
|
1304
|
+
}).nullish()
|
|
1223
1305
|
})
|
|
1224
1306
|
),
|
|
1225
1307
|
usage: z5.object({
|
|
@@ -1347,7 +1429,7 @@ var OpenAIImageModel = class {
|
|
|
1347
1429
|
this.modelId = modelId;
|
|
1348
1430
|
this.settings = settings;
|
|
1349
1431
|
this.config = config;
|
|
1350
|
-
this.specificationVersion = "
|
|
1432
|
+
this.specificationVersion = "v2";
|
|
1351
1433
|
}
|
|
1352
1434
|
get maxImagesPerCall() {
|
|
1353
1435
|
var _a, _b;
|
|
@@ -1407,12 +1489,23 @@ var OpenAIImageModel = class {
|
|
|
1407
1489
|
timestamp: currentDate,
|
|
1408
1490
|
modelId: this.modelId,
|
|
1409
1491
|
headers: responseHeaders
|
|
1492
|
+
},
|
|
1493
|
+
providerMetadata: {
|
|
1494
|
+
openai: {
|
|
1495
|
+
images: response.data.map(
|
|
1496
|
+
(item) => item.revised_prompt ? {
|
|
1497
|
+
revisedPrompt: item.revised_prompt
|
|
1498
|
+
} : null
|
|
1499
|
+
)
|
|
1500
|
+
}
|
|
1410
1501
|
}
|
|
1411
1502
|
};
|
|
1412
1503
|
}
|
|
1413
1504
|
};
|
|
1414
1505
|
var openaiImageResponseSchema = z8.object({
|
|
1415
|
-
data: z8.array(
|
|
1506
|
+
data: z8.array(
|
|
1507
|
+
z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
|
|
1508
|
+
)
|
|
1416
1509
|
});
|
|
1417
1510
|
|
|
1418
1511
|
// src/openai-tools.ts
|
|
@@ -1853,13 +1946,11 @@ function prepareResponsesTools({
|
|
|
1853
1946
|
var OpenAIResponsesLanguageModel = class {
|
|
1854
1947
|
constructor(modelId, config) {
|
|
1855
1948
|
this.specificationVersion = "v2";
|
|
1856
|
-
this.
|
|
1857
|
-
this.config = config;
|
|
1858
|
-
}
|
|
1859
|
-
async getSupportedUrls() {
|
|
1860
|
-
return {
|
|
1949
|
+
this.supportedUrls = {
|
|
1861
1950
|
"image/*": [/^https?:\/\/.*$/]
|
|
1862
1951
|
};
|
|
1952
|
+
this.modelId = modelId;
|
|
1953
|
+
this.config = config;
|
|
1863
1954
|
}
|
|
1864
1955
|
get provider() {
|
|
1865
1956
|
return this.config.provider;
|