@ai-sdk/openai 2.0.0-canary.13 → 2.0.0-canary.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +41 -0
- package/dist/index.d.mts +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +116 -25
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +116 -25
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +30 -4
- package/dist/internal/index.d.ts +30 -4
- package/dist/internal/index.js +116 -25
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +116 -25
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/internal/index.mjs
CHANGED
|
@@ -220,6 +220,16 @@ var openaiProviderOptions = z.object({
|
|
|
220
220
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
221
221
|
*/
|
|
222
222
|
logitBias: z.record(z.coerce.number(), z.number()).optional(),
|
|
223
|
+
/**
|
|
224
|
+
* Return the log probabilities of the tokens.
|
|
225
|
+
*
|
|
226
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
227
|
+
* were generated.
|
|
228
|
+
*
|
|
229
|
+
* Setting to a number will return the log probabilities of the top n
|
|
230
|
+
* tokens that were generated.
|
|
231
|
+
*/
|
|
232
|
+
logprobs: z.union([z.boolean(), z.number()]).optional(),
|
|
223
233
|
/**
|
|
224
234
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
225
235
|
*/
|
|
@@ -339,17 +349,15 @@ function prepareTools({
|
|
|
339
349
|
var OpenAIChatLanguageModel = class {
|
|
340
350
|
constructor(modelId, config) {
|
|
341
351
|
this.specificationVersion = "v2";
|
|
352
|
+
this.supportedUrls = {
|
|
353
|
+
"image/*": [/^https?:\/\/.*$/]
|
|
354
|
+
};
|
|
342
355
|
this.modelId = modelId;
|
|
343
356
|
this.config = config;
|
|
344
357
|
}
|
|
345
358
|
get provider() {
|
|
346
359
|
return this.config.provider;
|
|
347
360
|
}
|
|
348
|
-
async getSupportedUrls() {
|
|
349
|
-
return {
|
|
350
|
-
"image/*": [/^https?:\/\/.*$/]
|
|
351
|
-
};
|
|
352
|
-
}
|
|
353
361
|
async getArgs({
|
|
354
362
|
prompt,
|
|
355
363
|
maxOutputTokens,
|
|
@@ -398,6 +406,8 @@ var OpenAIChatLanguageModel = class {
|
|
|
398
406
|
model: this.modelId,
|
|
399
407
|
// model specific settings:
|
|
400
408
|
logit_bias: openaiOptions.logitBias,
|
|
409
|
+
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
410
|
+
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
401
411
|
user: openaiOptions.user,
|
|
402
412
|
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
403
413
|
// standardized settings:
|
|
@@ -470,6 +480,20 @@ var OpenAIChatLanguageModel = class {
|
|
|
470
480
|
message: "logitBias is not supported for reasoning models"
|
|
471
481
|
});
|
|
472
482
|
}
|
|
483
|
+
if (baseArgs.logprobs != null) {
|
|
484
|
+
baseArgs.logprobs = void 0;
|
|
485
|
+
warnings.push({
|
|
486
|
+
type: "other",
|
|
487
|
+
message: "logprobs is not supported for reasoning models"
|
|
488
|
+
});
|
|
489
|
+
}
|
|
490
|
+
if (baseArgs.top_logprobs != null) {
|
|
491
|
+
baseArgs.top_logprobs = void 0;
|
|
492
|
+
warnings.push({
|
|
493
|
+
type: "other",
|
|
494
|
+
message: "topLogprobs is not supported for reasoning models"
|
|
495
|
+
});
|
|
496
|
+
}
|
|
473
497
|
if (baseArgs.max_tokens != null) {
|
|
474
498
|
if (baseArgs.max_completion_tokens == null) {
|
|
475
499
|
baseArgs.max_completion_tokens = baseArgs.max_tokens;
|
|
@@ -505,7 +529,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
505
529
|
};
|
|
506
530
|
}
|
|
507
531
|
async doGenerate(options) {
|
|
508
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
532
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
509
533
|
const { args: body, warnings } = await this.getArgs(options);
|
|
510
534
|
const {
|
|
511
535
|
responseHeaders,
|
|
@@ -555,12 +579,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
555
579
|
if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
|
|
556
580
|
providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
|
|
557
581
|
}
|
|
582
|
+
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
583
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
584
|
+
}
|
|
558
585
|
return {
|
|
559
586
|
content,
|
|
560
587
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
561
588
|
usage: {
|
|
562
|
-
inputTokens: (
|
|
563
|
-
outputTokens: (
|
|
589
|
+
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
590
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
|
|
564
591
|
},
|
|
565
592
|
request: { body },
|
|
566
593
|
response: {
|
|
@@ -795,6 +822,20 @@ var openaiChatResponseSchema = z3.object({
|
|
|
795
822
|
).nullish()
|
|
796
823
|
}),
|
|
797
824
|
index: z3.number(),
|
|
825
|
+
logprobs: z3.object({
|
|
826
|
+
content: z3.array(
|
|
827
|
+
z3.object({
|
|
828
|
+
token: z3.string(),
|
|
829
|
+
logprob: z3.number(),
|
|
830
|
+
top_logprobs: z3.array(
|
|
831
|
+
z3.object({
|
|
832
|
+
token: z3.string(),
|
|
833
|
+
logprob: z3.number()
|
|
834
|
+
})
|
|
835
|
+
)
|
|
836
|
+
})
|
|
837
|
+
).nullish()
|
|
838
|
+
}).nullish(),
|
|
798
839
|
finish_reason: z3.string().nullish()
|
|
799
840
|
})
|
|
800
841
|
),
|
|
@@ -814,7 +855,7 @@ var openaiChatChunkSchema = z3.union([
|
|
|
814
855
|
z3.object({
|
|
815
856
|
index: z3.number(),
|
|
816
857
|
id: z3.string().nullish(),
|
|
817
|
-
type: z3.literal("function").
|
|
858
|
+
type: z3.literal("function").nullish(),
|
|
818
859
|
function: z3.object({
|
|
819
860
|
name: z3.string().nullish(),
|
|
820
861
|
arguments: z3.string().nullish()
|
|
@@ -822,7 +863,7 @@ var openaiChatChunkSchema = z3.union([
|
|
|
822
863
|
})
|
|
823
864
|
).nullish()
|
|
824
865
|
}).nullish(),
|
|
825
|
-
finish_reason: z3.string().
|
|
866
|
+
finish_reason: z3.string().nullish(),
|
|
826
867
|
index: z3.number()
|
|
827
868
|
})
|
|
828
869
|
),
|
|
@@ -853,11 +894,23 @@ var reasoningModels = {
|
|
|
853
894
|
"o1-preview-2024-09-12": {
|
|
854
895
|
systemMessageMode: "remove"
|
|
855
896
|
},
|
|
897
|
+
o3: {
|
|
898
|
+
systemMessageMode: "developer"
|
|
899
|
+
},
|
|
900
|
+
"o3-2025-04-16": {
|
|
901
|
+
systemMessageMode: "developer"
|
|
902
|
+
},
|
|
856
903
|
"o3-mini": {
|
|
857
904
|
systemMessageMode: "developer"
|
|
858
905
|
},
|
|
859
906
|
"o3-mini-2025-01-31": {
|
|
860
907
|
systemMessageMode: "developer"
|
|
908
|
+
},
|
|
909
|
+
"o4-mini": {
|
|
910
|
+
systemMessageMode: "developer"
|
|
911
|
+
},
|
|
912
|
+
"o4-mini-2025-04-16": {
|
|
913
|
+
systemMessageMode: "developer"
|
|
861
914
|
}
|
|
862
915
|
};
|
|
863
916
|
|
|
@@ -979,13 +1032,26 @@ var openaiCompletionProviderOptions = z4.object({
|
|
|
979
1032
|
A unique identifier representing your end-user, which can help OpenAI to
|
|
980
1033
|
monitor and detect abuse. Learn more.
|
|
981
1034
|
*/
|
|
982
|
-
user: z4.string().optional()
|
|
1035
|
+
user: z4.string().optional(),
|
|
1036
|
+
/**
|
|
1037
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1038
|
+
the response size and can slow down response times. However, it can
|
|
1039
|
+
be useful to better understand how the model is behaving.
|
|
1040
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1041
|
+
were generated.
|
|
1042
|
+
Setting to a number will return the log probabilities of the top n
|
|
1043
|
+
tokens that were generated.
|
|
1044
|
+
*/
|
|
1045
|
+
logprobs: z4.union([z4.boolean(), z4.number()]).optional()
|
|
983
1046
|
});
|
|
984
1047
|
|
|
985
1048
|
// src/openai-completion-language-model.ts
|
|
986
1049
|
var OpenAICompletionLanguageModel = class {
|
|
987
1050
|
constructor(modelId, config) {
|
|
988
1051
|
this.specificationVersion = "v2";
|
|
1052
|
+
this.supportedUrls = {
|
|
1053
|
+
// No URLs are supported for completion models.
|
|
1054
|
+
};
|
|
989
1055
|
this.modelId = modelId;
|
|
990
1056
|
this.config = config;
|
|
991
1057
|
}
|
|
@@ -995,11 +1061,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
995
1061
|
get provider() {
|
|
996
1062
|
return this.config.provider;
|
|
997
1063
|
}
|
|
998
|
-
async getSupportedUrls() {
|
|
999
|
-
return {
|
|
1000
|
-
// no supported urls for completion models
|
|
1001
|
-
};
|
|
1002
|
-
}
|
|
1003
1064
|
async getArgs({
|
|
1004
1065
|
prompt,
|
|
1005
1066
|
maxOutputTokens,
|
|
@@ -1053,6 +1114,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1053
1114
|
// model specific settings:
|
|
1054
1115
|
echo: openaiOptions.echo,
|
|
1055
1116
|
logit_bias: openaiOptions.logitBias,
|
|
1117
|
+
logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
|
|
1056
1118
|
suffix: openaiOptions.suffix,
|
|
1057
1119
|
user: openaiOptions.user,
|
|
1058
1120
|
// standardized settings:
|
|
@@ -1091,6 +1153,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1091
1153
|
fetch: this.config.fetch
|
|
1092
1154
|
});
|
|
1093
1155
|
const choice = response.choices[0];
|
|
1156
|
+
const providerMetadata = { openai: {} };
|
|
1157
|
+
if (choice.logprobs != null) {
|
|
1158
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1159
|
+
}
|
|
1094
1160
|
return {
|
|
1095
1161
|
content: [{ type: "text", text: choice.text }],
|
|
1096
1162
|
usage: {
|
|
@@ -1104,6 +1170,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1104
1170
|
headers: responseHeaders,
|
|
1105
1171
|
body: rawResponse
|
|
1106
1172
|
},
|
|
1173
|
+
providerMetadata,
|
|
1107
1174
|
warnings
|
|
1108
1175
|
};
|
|
1109
1176
|
}
|
|
@@ -1130,6 +1197,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1130
1197
|
fetch: this.config.fetch
|
|
1131
1198
|
});
|
|
1132
1199
|
let finishReason = "unknown";
|
|
1200
|
+
const providerMetadata = { openai: {} };
|
|
1133
1201
|
const usage = {
|
|
1134
1202
|
inputTokens: void 0,
|
|
1135
1203
|
outputTokens: void 0
|
|
@@ -1168,6 +1236,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1168
1236
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1169
1237
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
1170
1238
|
}
|
|
1239
|
+
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1240
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1241
|
+
}
|
|
1171
1242
|
if ((choice == null ? void 0 : choice.text) != null) {
|
|
1172
1243
|
controller.enqueue({
|
|
1173
1244
|
type: "text",
|
|
@@ -1179,6 +1250,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1179
1250
|
controller.enqueue({
|
|
1180
1251
|
type: "finish",
|
|
1181
1252
|
finishReason,
|
|
1253
|
+
providerMetadata,
|
|
1182
1254
|
usage
|
|
1183
1255
|
});
|
|
1184
1256
|
}
|
|
@@ -1196,7 +1268,12 @@ var openaiCompletionResponseSchema = z5.object({
|
|
|
1196
1268
|
choices: z5.array(
|
|
1197
1269
|
z5.object({
|
|
1198
1270
|
text: z5.string(),
|
|
1199
|
-
finish_reason: z5.string()
|
|
1271
|
+
finish_reason: z5.string(),
|
|
1272
|
+
logprobs: z5.object({
|
|
1273
|
+
tokens: z5.array(z5.string()),
|
|
1274
|
+
token_logprobs: z5.array(z5.number()),
|
|
1275
|
+
top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
|
|
1276
|
+
}).nullish()
|
|
1200
1277
|
})
|
|
1201
1278
|
),
|
|
1202
1279
|
usage: z5.object({
|
|
@@ -1213,7 +1290,12 @@ var openaiCompletionChunkSchema = z5.union([
|
|
|
1213
1290
|
z5.object({
|
|
1214
1291
|
text: z5.string(),
|
|
1215
1292
|
finish_reason: z5.string().nullish(),
|
|
1216
|
-
index: z5.number()
|
|
1293
|
+
index: z5.number(),
|
|
1294
|
+
logprobs: z5.object({
|
|
1295
|
+
tokens: z5.array(z5.string()),
|
|
1296
|
+
token_logprobs: z5.array(z5.number()),
|
|
1297
|
+
top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
|
|
1298
|
+
}).nullish()
|
|
1217
1299
|
})
|
|
1218
1300
|
),
|
|
1219
1301
|
usage: z5.object({
|
|
@@ -1341,7 +1423,7 @@ var OpenAIImageModel = class {
|
|
|
1341
1423
|
this.modelId = modelId;
|
|
1342
1424
|
this.settings = settings;
|
|
1343
1425
|
this.config = config;
|
|
1344
|
-
this.specificationVersion = "
|
|
1426
|
+
this.specificationVersion = "v2";
|
|
1345
1427
|
}
|
|
1346
1428
|
get maxImagesPerCall() {
|
|
1347
1429
|
var _a, _b;
|
|
@@ -1401,12 +1483,23 @@ var OpenAIImageModel = class {
|
|
|
1401
1483
|
timestamp: currentDate,
|
|
1402
1484
|
modelId: this.modelId,
|
|
1403
1485
|
headers: responseHeaders
|
|
1486
|
+
},
|
|
1487
|
+
providerMetadata: {
|
|
1488
|
+
openai: {
|
|
1489
|
+
images: response.data.map(
|
|
1490
|
+
(item) => item.revised_prompt ? {
|
|
1491
|
+
revisedPrompt: item.revised_prompt
|
|
1492
|
+
} : null
|
|
1493
|
+
)
|
|
1494
|
+
}
|
|
1404
1495
|
}
|
|
1405
1496
|
};
|
|
1406
1497
|
}
|
|
1407
1498
|
};
|
|
1408
1499
|
var openaiImageResponseSchema = z8.object({
|
|
1409
|
-
data: z8.array(
|
|
1500
|
+
data: z8.array(
|
|
1501
|
+
z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
|
|
1502
|
+
)
|
|
1410
1503
|
});
|
|
1411
1504
|
|
|
1412
1505
|
// src/openai-transcription-model.ts
|
|
@@ -1930,13 +2023,11 @@ function prepareResponsesTools({
|
|
|
1930
2023
|
var OpenAIResponsesLanguageModel = class {
|
|
1931
2024
|
constructor(modelId, config) {
|
|
1932
2025
|
this.specificationVersion = "v2";
|
|
1933
|
-
this.
|
|
1934
|
-
this.config = config;
|
|
1935
|
-
}
|
|
1936
|
-
async getSupportedUrls() {
|
|
1937
|
-
return {
|
|
2026
|
+
this.supportedUrls = {
|
|
1938
2027
|
"image/*": [/^https?:\/\/.*$/]
|
|
1939
2028
|
};
|
|
2029
|
+
this.modelId = modelId;
|
|
2030
|
+
this.config = config;
|
|
1940
2031
|
}
|
|
1941
2032
|
get provider() {
|
|
1942
2033
|
return this.config.provider;
|