@ai-sdk/openai 2.0.0-canary.14 → 2.0.0-canary.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +31 -0
- package/dist/index.js +94 -9
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +94 -9
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +24 -0
- package/dist/internal/index.d.ts +24 -0
- package/dist/internal/index.js +94 -9
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +94 -9
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
|
@@ -226,6 +226,16 @@ var openaiProviderOptions = z.object({
|
|
|
226
226
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
227
227
|
*/
|
|
228
228
|
logitBias: z.record(z.coerce.number(), z.number()).optional(),
|
|
229
|
+
/**
|
|
230
|
+
* Return the log probabilities of the tokens.
|
|
231
|
+
*
|
|
232
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
233
|
+
* were generated.
|
|
234
|
+
*
|
|
235
|
+
* Setting to a number will return the log probabilities of the top n
|
|
236
|
+
* tokens that were generated.
|
|
237
|
+
*/
|
|
238
|
+
logprobs: z.union([z.boolean(), z.number()]).optional(),
|
|
229
239
|
/**
|
|
230
240
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
231
241
|
*/
|
|
@@ -402,6 +412,8 @@ var OpenAIChatLanguageModel = class {
|
|
|
402
412
|
model: this.modelId,
|
|
403
413
|
// model specific settings:
|
|
404
414
|
logit_bias: openaiOptions.logitBias,
|
|
415
|
+
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
416
|
+
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
405
417
|
user: openaiOptions.user,
|
|
406
418
|
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
407
419
|
// standardized settings:
|
|
@@ -474,6 +486,20 @@ var OpenAIChatLanguageModel = class {
|
|
|
474
486
|
message: "logitBias is not supported for reasoning models"
|
|
475
487
|
});
|
|
476
488
|
}
|
|
489
|
+
if (baseArgs.logprobs != null) {
|
|
490
|
+
baseArgs.logprobs = void 0;
|
|
491
|
+
warnings.push({
|
|
492
|
+
type: "other",
|
|
493
|
+
message: "logprobs is not supported for reasoning models"
|
|
494
|
+
});
|
|
495
|
+
}
|
|
496
|
+
if (baseArgs.top_logprobs != null) {
|
|
497
|
+
baseArgs.top_logprobs = void 0;
|
|
498
|
+
warnings.push({
|
|
499
|
+
type: "other",
|
|
500
|
+
message: "topLogprobs is not supported for reasoning models"
|
|
501
|
+
});
|
|
502
|
+
}
|
|
477
503
|
if (baseArgs.max_tokens != null) {
|
|
478
504
|
if (baseArgs.max_completion_tokens == null) {
|
|
479
505
|
baseArgs.max_completion_tokens = baseArgs.max_tokens;
|
|
@@ -509,7 +535,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
509
535
|
};
|
|
510
536
|
}
|
|
511
537
|
async doGenerate(options) {
|
|
512
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
538
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
513
539
|
const { args: body, warnings } = await this.getArgs(options);
|
|
514
540
|
const {
|
|
515
541
|
responseHeaders,
|
|
@@ -559,12 +585,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
559
585
|
if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
|
|
560
586
|
providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
|
|
561
587
|
}
|
|
588
|
+
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
589
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
590
|
+
}
|
|
562
591
|
return {
|
|
563
592
|
content,
|
|
564
593
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
565
594
|
usage: {
|
|
566
|
-
inputTokens: (
|
|
567
|
-
outputTokens: (
|
|
595
|
+
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
596
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
|
|
568
597
|
},
|
|
569
598
|
request: { body },
|
|
570
599
|
response: {
|
|
@@ -799,6 +828,20 @@ var openaiChatResponseSchema = z3.object({
|
|
|
799
828
|
).nullish()
|
|
800
829
|
}),
|
|
801
830
|
index: z3.number(),
|
|
831
|
+
logprobs: z3.object({
|
|
832
|
+
content: z3.array(
|
|
833
|
+
z3.object({
|
|
834
|
+
token: z3.string(),
|
|
835
|
+
logprob: z3.number(),
|
|
836
|
+
top_logprobs: z3.array(
|
|
837
|
+
z3.object({
|
|
838
|
+
token: z3.string(),
|
|
839
|
+
logprob: z3.number()
|
|
840
|
+
})
|
|
841
|
+
)
|
|
842
|
+
})
|
|
843
|
+
).nullish()
|
|
844
|
+
}).nullish(),
|
|
802
845
|
finish_reason: z3.string().nullish()
|
|
803
846
|
})
|
|
804
847
|
),
|
|
@@ -818,7 +861,7 @@ var openaiChatChunkSchema = z3.union([
|
|
|
818
861
|
z3.object({
|
|
819
862
|
index: z3.number(),
|
|
820
863
|
id: z3.string().nullish(),
|
|
821
|
-
type: z3.literal("function").
|
|
864
|
+
type: z3.literal("function").nullish(),
|
|
822
865
|
function: z3.object({
|
|
823
866
|
name: z3.string().nullish(),
|
|
824
867
|
arguments: z3.string().nullish()
|
|
@@ -826,7 +869,7 @@ var openaiChatChunkSchema = z3.union([
|
|
|
826
869
|
})
|
|
827
870
|
).nullish()
|
|
828
871
|
}).nullish(),
|
|
829
|
-
finish_reason: z3.string().
|
|
872
|
+
finish_reason: z3.string().nullish(),
|
|
830
873
|
index: z3.number()
|
|
831
874
|
})
|
|
832
875
|
),
|
|
@@ -995,7 +1038,17 @@ var openaiCompletionProviderOptions = z4.object({
|
|
|
995
1038
|
A unique identifier representing your end-user, which can help OpenAI to
|
|
996
1039
|
monitor and detect abuse. Learn more.
|
|
997
1040
|
*/
|
|
998
|
-
user: z4.string().optional()
|
|
1041
|
+
user: z4.string().optional(),
|
|
1042
|
+
/**
|
|
1043
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1044
|
+
the response size and can slow down response times. However, it can
|
|
1045
|
+
be useful to better understand how the model is behaving.
|
|
1046
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1047
|
+
were generated.
|
|
1048
|
+
Setting to a number will return the log probabilities of the top n
|
|
1049
|
+
tokens that were generated.
|
|
1050
|
+
*/
|
|
1051
|
+
logprobs: z4.union([z4.boolean(), z4.number()]).optional()
|
|
999
1052
|
});
|
|
1000
1053
|
|
|
1001
1054
|
// src/openai-completion-language-model.ts
|
|
@@ -1067,6 +1120,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1067
1120
|
// model specific settings:
|
|
1068
1121
|
echo: openaiOptions.echo,
|
|
1069
1122
|
logit_bias: openaiOptions.logitBias,
|
|
1123
|
+
logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
|
|
1070
1124
|
suffix: openaiOptions.suffix,
|
|
1071
1125
|
user: openaiOptions.user,
|
|
1072
1126
|
// standardized settings:
|
|
@@ -1105,6 +1159,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1105
1159
|
fetch: this.config.fetch
|
|
1106
1160
|
});
|
|
1107
1161
|
const choice = response.choices[0];
|
|
1162
|
+
const providerMetadata = { openai: {} };
|
|
1163
|
+
if (choice.logprobs != null) {
|
|
1164
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1165
|
+
}
|
|
1108
1166
|
return {
|
|
1109
1167
|
content: [{ type: "text", text: choice.text }],
|
|
1110
1168
|
usage: {
|
|
@@ -1118,6 +1176,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1118
1176
|
headers: responseHeaders,
|
|
1119
1177
|
body: rawResponse
|
|
1120
1178
|
},
|
|
1179
|
+
providerMetadata,
|
|
1121
1180
|
warnings
|
|
1122
1181
|
};
|
|
1123
1182
|
}
|
|
@@ -1144,6 +1203,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1144
1203
|
fetch: this.config.fetch
|
|
1145
1204
|
});
|
|
1146
1205
|
let finishReason = "unknown";
|
|
1206
|
+
const providerMetadata = { openai: {} };
|
|
1147
1207
|
const usage = {
|
|
1148
1208
|
inputTokens: void 0,
|
|
1149
1209
|
outputTokens: void 0
|
|
@@ -1182,6 +1242,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1182
1242
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1183
1243
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
1184
1244
|
}
|
|
1245
|
+
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1246
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1247
|
+
}
|
|
1185
1248
|
if ((choice == null ? void 0 : choice.text) != null) {
|
|
1186
1249
|
controller.enqueue({
|
|
1187
1250
|
type: "text",
|
|
@@ -1193,6 +1256,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1193
1256
|
controller.enqueue({
|
|
1194
1257
|
type: "finish",
|
|
1195
1258
|
finishReason,
|
|
1259
|
+
providerMetadata,
|
|
1196
1260
|
usage
|
|
1197
1261
|
});
|
|
1198
1262
|
}
|
|
@@ -1210,7 +1274,12 @@ var openaiCompletionResponseSchema = z5.object({
|
|
|
1210
1274
|
choices: z5.array(
|
|
1211
1275
|
z5.object({
|
|
1212
1276
|
text: z5.string(),
|
|
1213
|
-
finish_reason: z5.string()
|
|
1277
|
+
finish_reason: z5.string(),
|
|
1278
|
+
logprobs: z5.object({
|
|
1279
|
+
tokens: z5.array(z5.string()),
|
|
1280
|
+
token_logprobs: z5.array(z5.number()),
|
|
1281
|
+
top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
|
|
1282
|
+
}).nullish()
|
|
1214
1283
|
})
|
|
1215
1284
|
),
|
|
1216
1285
|
usage: z5.object({
|
|
@@ -1227,7 +1296,12 @@ var openaiCompletionChunkSchema = z5.union([
|
|
|
1227
1296
|
z5.object({
|
|
1228
1297
|
text: z5.string(),
|
|
1229
1298
|
finish_reason: z5.string().nullish(),
|
|
1230
|
-
index: z5.number()
|
|
1299
|
+
index: z5.number(),
|
|
1300
|
+
logprobs: z5.object({
|
|
1301
|
+
tokens: z5.array(z5.string()),
|
|
1302
|
+
token_logprobs: z5.array(z5.number()),
|
|
1303
|
+
top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
|
|
1304
|
+
}).nullish()
|
|
1231
1305
|
})
|
|
1232
1306
|
),
|
|
1233
1307
|
usage: z5.object({
|
|
@@ -1415,12 +1489,23 @@ var OpenAIImageModel = class {
|
|
|
1415
1489
|
timestamp: currentDate,
|
|
1416
1490
|
modelId: this.modelId,
|
|
1417
1491
|
headers: responseHeaders
|
|
1492
|
+
},
|
|
1493
|
+
providerMetadata: {
|
|
1494
|
+
openai: {
|
|
1495
|
+
images: response.data.map(
|
|
1496
|
+
(item) => item.revised_prompt ? {
|
|
1497
|
+
revisedPrompt: item.revised_prompt
|
|
1498
|
+
} : null
|
|
1499
|
+
)
|
|
1500
|
+
}
|
|
1418
1501
|
}
|
|
1419
1502
|
};
|
|
1420
1503
|
}
|
|
1421
1504
|
};
|
|
1422
1505
|
var openaiImageResponseSchema = z8.object({
|
|
1423
|
-
data: z8.array(
|
|
1506
|
+
data: z8.array(
|
|
1507
|
+
z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
|
|
1508
|
+
)
|
|
1424
1509
|
});
|
|
1425
1510
|
|
|
1426
1511
|
// src/openai-tools.ts
|