@ai-sdk/openai 2.0.0-canary.14 → 2.0.0-canary.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +31 -0
- package/dist/index.js +94 -9
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +94 -9
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +24 -0
- package/dist/internal/index.d.ts +24 -0
- package/dist/internal/index.js +94 -9
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +94 -9
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/internal/index.mjs
CHANGED
|
@@ -220,6 +220,16 @@ var openaiProviderOptions = z.object({
|
|
|
220
220
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
221
221
|
*/
|
|
222
222
|
logitBias: z.record(z.coerce.number(), z.number()).optional(),
|
|
223
|
+
/**
|
|
224
|
+
* Return the log probabilities of the tokens.
|
|
225
|
+
*
|
|
226
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
227
|
+
* were generated.
|
|
228
|
+
*
|
|
229
|
+
* Setting to a number will return the log probabilities of the top n
|
|
230
|
+
* tokens that were generated.
|
|
231
|
+
*/
|
|
232
|
+
logprobs: z.union([z.boolean(), z.number()]).optional(),
|
|
223
233
|
/**
|
|
224
234
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
225
235
|
*/
|
|
@@ -396,6 +406,8 @@ var OpenAIChatLanguageModel = class {
|
|
|
396
406
|
model: this.modelId,
|
|
397
407
|
// model specific settings:
|
|
398
408
|
logit_bias: openaiOptions.logitBias,
|
|
409
|
+
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
410
|
+
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
399
411
|
user: openaiOptions.user,
|
|
400
412
|
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
401
413
|
// standardized settings:
|
|
@@ -468,6 +480,20 @@ var OpenAIChatLanguageModel = class {
|
|
|
468
480
|
message: "logitBias is not supported for reasoning models"
|
|
469
481
|
});
|
|
470
482
|
}
|
|
483
|
+
if (baseArgs.logprobs != null) {
|
|
484
|
+
baseArgs.logprobs = void 0;
|
|
485
|
+
warnings.push({
|
|
486
|
+
type: "other",
|
|
487
|
+
message: "logprobs is not supported for reasoning models"
|
|
488
|
+
});
|
|
489
|
+
}
|
|
490
|
+
if (baseArgs.top_logprobs != null) {
|
|
491
|
+
baseArgs.top_logprobs = void 0;
|
|
492
|
+
warnings.push({
|
|
493
|
+
type: "other",
|
|
494
|
+
message: "topLogprobs is not supported for reasoning models"
|
|
495
|
+
});
|
|
496
|
+
}
|
|
471
497
|
if (baseArgs.max_tokens != null) {
|
|
472
498
|
if (baseArgs.max_completion_tokens == null) {
|
|
473
499
|
baseArgs.max_completion_tokens = baseArgs.max_tokens;
|
|
@@ -503,7 +529,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
503
529
|
};
|
|
504
530
|
}
|
|
505
531
|
async doGenerate(options) {
|
|
506
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
532
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
507
533
|
const { args: body, warnings } = await this.getArgs(options);
|
|
508
534
|
const {
|
|
509
535
|
responseHeaders,
|
|
@@ -553,12 +579,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
553
579
|
if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
|
|
554
580
|
providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
|
|
555
581
|
}
|
|
582
|
+
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
583
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
584
|
+
}
|
|
556
585
|
return {
|
|
557
586
|
content,
|
|
558
587
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
559
588
|
usage: {
|
|
560
|
-
inputTokens: (
|
|
561
|
-
outputTokens: (
|
|
589
|
+
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
590
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
|
|
562
591
|
},
|
|
563
592
|
request: { body },
|
|
564
593
|
response: {
|
|
@@ -793,6 +822,20 @@ var openaiChatResponseSchema = z3.object({
|
|
|
793
822
|
).nullish()
|
|
794
823
|
}),
|
|
795
824
|
index: z3.number(),
|
|
825
|
+
logprobs: z3.object({
|
|
826
|
+
content: z3.array(
|
|
827
|
+
z3.object({
|
|
828
|
+
token: z3.string(),
|
|
829
|
+
logprob: z3.number(),
|
|
830
|
+
top_logprobs: z3.array(
|
|
831
|
+
z3.object({
|
|
832
|
+
token: z3.string(),
|
|
833
|
+
logprob: z3.number()
|
|
834
|
+
})
|
|
835
|
+
)
|
|
836
|
+
})
|
|
837
|
+
).nullish()
|
|
838
|
+
}).nullish(),
|
|
796
839
|
finish_reason: z3.string().nullish()
|
|
797
840
|
})
|
|
798
841
|
),
|
|
@@ -812,7 +855,7 @@ var openaiChatChunkSchema = z3.union([
|
|
|
812
855
|
z3.object({
|
|
813
856
|
index: z3.number(),
|
|
814
857
|
id: z3.string().nullish(),
|
|
815
|
-
type: z3.literal("function").
|
|
858
|
+
type: z3.literal("function").nullish(),
|
|
816
859
|
function: z3.object({
|
|
817
860
|
name: z3.string().nullish(),
|
|
818
861
|
arguments: z3.string().nullish()
|
|
@@ -820,7 +863,7 @@ var openaiChatChunkSchema = z3.union([
|
|
|
820
863
|
})
|
|
821
864
|
).nullish()
|
|
822
865
|
}).nullish(),
|
|
823
|
-
finish_reason: z3.string().
|
|
866
|
+
finish_reason: z3.string().nullish(),
|
|
824
867
|
index: z3.number()
|
|
825
868
|
})
|
|
826
869
|
),
|
|
@@ -989,7 +1032,17 @@ var openaiCompletionProviderOptions = z4.object({
|
|
|
989
1032
|
A unique identifier representing your end-user, which can help OpenAI to
|
|
990
1033
|
monitor and detect abuse. Learn more.
|
|
991
1034
|
*/
|
|
992
|
-
user: z4.string().optional()
|
|
1035
|
+
user: z4.string().optional(),
|
|
1036
|
+
/**
|
|
1037
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1038
|
+
the response size and can slow down response times. However, it can
|
|
1039
|
+
be useful to better understand how the model is behaving.
|
|
1040
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1041
|
+
were generated.
|
|
1042
|
+
Setting to a number will return the log probabilities of the top n
|
|
1043
|
+
tokens that were generated.
|
|
1044
|
+
*/
|
|
1045
|
+
logprobs: z4.union([z4.boolean(), z4.number()]).optional()
|
|
993
1046
|
});
|
|
994
1047
|
|
|
995
1048
|
// src/openai-completion-language-model.ts
|
|
@@ -1061,6 +1114,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1061
1114
|
// model specific settings:
|
|
1062
1115
|
echo: openaiOptions.echo,
|
|
1063
1116
|
logit_bias: openaiOptions.logitBias,
|
|
1117
|
+
logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
|
|
1064
1118
|
suffix: openaiOptions.suffix,
|
|
1065
1119
|
user: openaiOptions.user,
|
|
1066
1120
|
// standardized settings:
|
|
@@ -1099,6 +1153,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1099
1153
|
fetch: this.config.fetch
|
|
1100
1154
|
});
|
|
1101
1155
|
const choice = response.choices[0];
|
|
1156
|
+
const providerMetadata = { openai: {} };
|
|
1157
|
+
if (choice.logprobs != null) {
|
|
1158
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1159
|
+
}
|
|
1102
1160
|
return {
|
|
1103
1161
|
content: [{ type: "text", text: choice.text }],
|
|
1104
1162
|
usage: {
|
|
@@ -1112,6 +1170,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1112
1170
|
headers: responseHeaders,
|
|
1113
1171
|
body: rawResponse
|
|
1114
1172
|
},
|
|
1173
|
+
providerMetadata,
|
|
1115
1174
|
warnings
|
|
1116
1175
|
};
|
|
1117
1176
|
}
|
|
@@ -1138,6 +1197,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1138
1197
|
fetch: this.config.fetch
|
|
1139
1198
|
});
|
|
1140
1199
|
let finishReason = "unknown";
|
|
1200
|
+
const providerMetadata = { openai: {} };
|
|
1141
1201
|
const usage = {
|
|
1142
1202
|
inputTokens: void 0,
|
|
1143
1203
|
outputTokens: void 0
|
|
@@ -1176,6 +1236,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1176
1236
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1177
1237
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
1178
1238
|
}
|
|
1239
|
+
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1240
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1241
|
+
}
|
|
1179
1242
|
if ((choice == null ? void 0 : choice.text) != null) {
|
|
1180
1243
|
controller.enqueue({
|
|
1181
1244
|
type: "text",
|
|
@@ -1187,6 +1250,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1187
1250
|
controller.enqueue({
|
|
1188
1251
|
type: "finish",
|
|
1189
1252
|
finishReason,
|
|
1253
|
+
providerMetadata,
|
|
1190
1254
|
usage
|
|
1191
1255
|
});
|
|
1192
1256
|
}
|
|
@@ -1204,7 +1268,12 @@ var openaiCompletionResponseSchema = z5.object({
|
|
|
1204
1268
|
choices: z5.array(
|
|
1205
1269
|
z5.object({
|
|
1206
1270
|
text: z5.string(),
|
|
1207
|
-
finish_reason: z5.string()
|
|
1271
|
+
finish_reason: z5.string(),
|
|
1272
|
+
logprobs: z5.object({
|
|
1273
|
+
tokens: z5.array(z5.string()),
|
|
1274
|
+
token_logprobs: z5.array(z5.number()),
|
|
1275
|
+
top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
|
|
1276
|
+
}).nullish()
|
|
1208
1277
|
})
|
|
1209
1278
|
),
|
|
1210
1279
|
usage: z5.object({
|
|
@@ -1221,7 +1290,12 @@ var openaiCompletionChunkSchema = z5.union([
|
|
|
1221
1290
|
z5.object({
|
|
1222
1291
|
text: z5.string(),
|
|
1223
1292
|
finish_reason: z5.string().nullish(),
|
|
1224
|
-
index: z5.number()
|
|
1293
|
+
index: z5.number(),
|
|
1294
|
+
logprobs: z5.object({
|
|
1295
|
+
tokens: z5.array(z5.string()),
|
|
1296
|
+
token_logprobs: z5.array(z5.number()),
|
|
1297
|
+
top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
|
|
1298
|
+
}).nullish()
|
|
1225
1299
|
})
|
|
1226
1300
|
),
|
|
1227
1301
|
usage: z5.object({
|
|
@@ -1409,12 +1483,23 @@ var OpenAIImageModel = class {
|
|
|
1409
1483
|
timestamp: currentDate,
|
|
1410
1484
|
modelId: this.modelId,
|
|
1411
1485
|
headers: responseHeaders
|
|
1486
|
+
},
|
|
1487
|
+
providerMetadata: {
|
|
1488
|
+
openai: {
|
|
1489
|
+
images: response.data.map(
|
|
1490
|
+
(item) => item.revised_prompt ? {
|
|
1491
|
+
revisedPrompt: item.revised_prompt
|
|
1492
|
+
} : null
|
|
1493
|
+
)
|
|
1494
|
+
}
|
|
1412
1495
|
}
|
|
1413
1496
|
};
|
|
1414
1497
|
}
|
|
1415
1498
|
};
|
|
1416
1499
|
var openaiImageResponseSchema = z8.object({
|
|
1417
|
-
data: z8.array(
|
|
1500
|
+
data: z8.array(
|
|
1501
|
+
z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
|
|
1502
|
+
)
|
|
1418
1503
|
});
|
|
1419
1504
|
|
|
1420
1505
|
// src/openai-transcription-model.ts
|