@ai-sdk/openai 2.0.0-canary.14 → 2.0.0-canary.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +31 -0
- package/dist/index.js +94 -9
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +94 -9
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +24 -0
- package/dist/internal/index.d.ts +24 -0
- package/dist/internal/index.js +94 -9
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +94 -9
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
|
@@ -11,6 +11,16 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
11
11
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
12
12
|
*/
|
|
13
13
|
logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
|
|
14
|
+
/**
|
|
15
|
+
* Return the log probabilities of the tokens.
|
|
16
|
+
*
|
|
17
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
18
|
+
* were generated.
|
|
19
|
+
*
|
|
20
|
+
* Setting to a number will return the log probabilities of the top n
|
|
21
|
+
* tokens that were generated.
|
|
22
|
+
*/
|
|
23
|
+
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
14
24
|
/**
|
|
15
25
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
16
26
|
*/
|
|
@@ -49,6 +59,7 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
49
59
|
}, "strip", z.ZodTypeAny, {
|
|
50
60
|
user?: string | undefined;
|
|
51
61
|
logitBias?: Record<number, number> | undefined;
|
|
62
|
+
logprobs?: number | boolean | undefined;
|
|
52
63
|
parallelToolCalls?: boolean | undefined;
|
|
53
64
|
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
54
65
|
maxCompletionTokens?: number | undefined;
|
|
@@ -59,6 +70,7 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
59
70
|
}, {
|
|
60
71
|
user?: string | undefined;
|
|
61
72
|
logitBias?: Record<number, number> | undefined;
|
|
73
|
+
logprobs?: number | boolean | undefined;
|
|
62
74
|
parallelToolCalls?: boolean | undefined;
|
|
63
75
|
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
64
76
|
maxCompletionTokens?: number | undefined;
|
|
@@ -123,14 +135,26 @@ declare const openaiCompletionProviderOptions: z.ZodObject<{
|
|
|
123
135
|
monitor and detect abuse. Learn more.
|
|
124
136
|
*/
|
|
125
137
|
user: z.ZodOptional<z.ZodString>;
|
|
138
|
+
/**
|
|
139
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
140
|
+
the response size and can slow down response times. However, it can
|
|
141
|
+
be useful to better understand how the model is behaving.
|
|
142
|
+
Setting to true will return the log probabilities of the tokens that
|
|
143
|
+
were generated.
|
|
144
|
+
Setting to a number will return the log probabilities of the top n
|
|
145
|
+
tokens that were generated.
|
|
146
|
+
*/
|
|
147
|
+
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
126
148
|
}, "strip", z.ZodTypeAny, {
|
|
127
149
|
user?: string | undefined;
|
|
128
150
|
logitBias?: Record<string, number> | undefined;
|
|
151
|
+
logprobs?: number | boolean | undefined;
|
|
129
152
|
echo?: boolean | undefined;
|
|
130
153
|
suffix?: string | undefined;
|
|
131
154
|
}, {
|
|
132
155
|
user?: string | undefined;
|
|
133
156
|
logitBias?: Record<string, number> | undefined;
|
|
157
|
+
logprobs?: number | boolean | undefined;
|
|
134
158
|
echo?: boolean | undefined;
|
|
135
159
|
suffix?: string | undefined;
|
|
136
160
|
}>;
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -11,6 +11,16 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
11
11
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
12
12
|
*/
|
|
13
13
|
logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
|
|
14
|
+
/**
|
|
15
|
+
* Return the log probabilities of the tokens.
|
|
16
|
+
*
|
|
17
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
18
|
+
* were generated.
|
|
19
|
+
*
|
|
20
|
+
* Setting to a number will return the log probabilities of the top n
|
|
21
|
+
* tokens that were generated.
|
|
22
|
+
*/
|
|
23
|
+
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
14
24
|
/**
|
|
15
25
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
16
26
|
*/
|
|
@@ -49,6 +59,7 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
49
59
|
}, "strip", z.ZodTypeAny, {
|
|
50
60
|
user?: string | undefined;
|
|
51
61
|
logitBias?: Record<number, number> | undefined;
|
|
62
|
+
logprobs?: number | boolean | undefined;
|
|
52
63
|
parallelToolCalls?: boolean | undefined;
|
|
53
64
|
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
54
65
|
maxCompletionTokens?: number | undefined;
|
|
@@ -59,6 +70,7 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
59
70
|
}, {
|
|
60
71
|
user?: string | undefined;
|
|
61
72
|
logitBias?: Record<number, number> | undefined;
|
|
73
|
+
logprobs?: number | boolean | undefined;
|
|
62
74
|
parallelToolCalls?: boolean | undefined;
|
|
63
75
|
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
64
76
|
maxCompletionTokens?: number | undefined;
|
|
@@ -123,14 +135,26 @@ declare const openaiCompletionProviderOptions: z.ZodObject<{
|
|
|
123
135
|
monitor and detect abuse. Learn more.
|
|
124
136
|
*/
|
|
125
137
|
user: z.ZodOptional<z.ZodString>;
|
|
138
|
+
/**
|
|
139
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
140
|
+
the response size and can slow down response times. However, it can
|
|
141
|
+
be useful to better understand how the model is behaving.
|
|
142
|
+
Setting to true will return the log probabilities of the tokens that
|
|
143
|
+
were generated.
|
|
144
|
+
Setting to a number will return the log probabilities of the top n
|
|
145
|
+
tokens that were generated.
|
|
146
|
+
*/
|
|
147
|
+
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
126
148
|
}, "strip", z.ZodTypeAny, {
|
|
127
149
|
user?: string | undefined;
|
|
128
150
|
logitBias?: Record<string, number> | undefined;
|
|
151
|
+
logprobs?: number | boolean | undefined;
|
|
129
152
|
echo?: boolean | undefined;
|
|
130
153
|
suffix?: string | undefined;
|
|
131
154
|
}, {
|
|
132
155
|
user?: string | undefined;
|
|
133
156
|
logitBias?: Record<string, number> | undefined;
|
|
157
|
+
logprobs?: number | boolean | undefined;
|
|
134
158
|
echo?: boolean | undefined;
|
|
135
159
|
suffix?: string | undefined;
|
|
136
160
|
}>;
|
package/dist/internal/index.js
CHANGED
|
@@ -246,6 +246,16 @@ var openaiProviderOptions = import_zod.z.object({
|
|
|
246
246
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
247
247
|
*/
|
|
248
248
|
logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
|
|
249
|
+
/**
|
|
250
|
+
* Return the log probabilities of the tokens.
|
|
251
|
+
*
|
|
252
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
253
|
+
* were generated.
|
|
254
|
+
*
|
|
255
|
+
* Setting to a number will return the log probabilities of the top n
|
|
256
|
+
* tokens that were generated.
|
|
257
|
+
*/
|
|
258
|
+
logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
|
|
249
259
|
/**
|
|
250
260
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
251
261
|
*/
|
|
@@ -420,6 +430,8 @@ var OpenAIChatLanguageModel = class {
|
|
|
420
430
|
model: this.modelId,
|
|
421
431
|
// model specific settings:
|
|
422
432
|
logit_bias: openaiOptions.logitBias,
|
|
433
|
+
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
434
|
+
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
423
435
|
user: openaiOptions.user,
|
|
424
436
|
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
425
437
|
// standardized settings:
|
|
@@ -492,6 +504,20 @@ var OpenAIChatLanguageModel = class {
|
|
|
492
504
|
message: "logitBias is not supported for reasoning models"
|
|
493
505
|
});
|
|
494
506
|
}
|
|
507
|
+
if (baseArgs.logprobs != null) {
|
|
508
|
+
baseArgs.logprobs = void 0;
|
|
509
|
+
warnings.push({
|
|
510
|
+
type: "other",
|
|
511
|
+
message: "logprobs is not supported for reasoning models"
|
|
512
|
+
});
|
|
513
|
+
}
|
|
514
|
+
if (baseArgs.top_logprobs != null) {
|
|
515
|
+
baseArgs.top_logprobs = void 0;
|
|
516
|
+
warnings.push({
|
|
517
|
+
type: "other",
|
|
518
|
+
message: "topLogprobs is not supported for reasoning models"
|
|
519
|
+
});
|
|
520
|
+
}
|
|
495
521
|
if (baseArgs.max_tokens != null) {
|
|
496
522
|
if (baseArgs.max_completion_tokens == null) {
|
|
497
523
|
baseArgs.max_completion_tokens = baseArgs.max_tokens;
|
|
@@ -527,7 +553,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
527
553
|
};
|
|
528
554
|
}
|
|
529
555
|
async doGenerate(options) {
|
|
530
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
556
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
531
557
|
const { args: body, warnings } = await this.getArgs(options);
|
|
532
558
|
const {
|
|
533
559
|
responseHeaders,
|
|
@@ -577,12 +603,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
577
603
|
if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
|
|
578
604
|
providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
|
|
579
605
|
}
|
|
606
|
+
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
607
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
608
|
+
}
|
|
580
609
|
return {
|
|
581
610
|
content,
|
|
582
611
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
583
612
|
usage: {
|
|
584
|
-
inputTokens: (
|
|
585
|
-
outputTokens: (
|
|
613
|
+
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
614
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
|
|
586
615
|
},
|
|
587
616
|
request: { body },
|
|
588
617
|
response: {
|
|
@@ -817,6 +846,20 @@ var openaiChatResponseSchema = import_zod3.z.object({
|
|
|
817
846
|
).nullish()
|
|
818
847
|
}),
|
|
819
848
|
index: import_zod3.z.number(),
|
|
849
|
+
logprobs: import_zod3.z.object({
|
|
850
|
+
content: import_zod3.z.array(
|
|
851
|
+
import_zod3.z.object({
|
|
852
|
+
token: import_zod3.z.string(),
|
|
853
|
+
logprob: import_zod3.z.number(),
|
|
854
|
+
top_logprobs: import_zod3.z.array(
|
|
855
|
+
import_zod3.z.object({
|
|
856
|
+
token: import_zod3.z.string(),
|
|
857
|
+
logprob: import_zod3.z.number()
|
|
858
|
+
})
|
|
859
|
+
)
|
|
860
|
+
})
|
|
861
|
+
).nullish()
|
|
862
|
+
}).nullish(),
|
|
820
863
|
finish_reason: import_zod3.z.string().nullish()
|
|
821
864
|
})
|
|
822
865
|
),
|
|
@@ -836,7 +879,7 @@ var openaiChatChunkSchema = import_zod3.z.union([
|
|
|
836
879
|
import_zod3.z.object({
|
|
837
880
|
index: import_zod3.z.number(),
|
|
838
881
|
id: import_zod3.z.string().nullish(),
|
|
839
|
-
type: import_zod3.z.literal("function").
|
|
882
|
+
type: import_zod3.z.literal("function").nullish(),
|
|
840
883
|
function: import_zod3.z.object({
|
|
841
884
|
name: import_zod3.z.string().nullish(),
|
|
842
885
|
arguments: import_zod3.z.string().nullish()
|
|
@@ -844,7 +887,7 @@ var openaiChatChunkSchema = import_zod3.z.union([
|
|
|
844
887
|
})
|
|
845
888
|
).nullish()
|
|
846
889
|
}).nullish(),
|
|
847
|
-
finish_reason: import_zod3.z.string().
|
|
890
|
+
finish_reason: import_zod3.z.string().nullish(),
|
|
848
891
|
index: import_zod3.z.number()
|
|
849
892
|
})
|
|
850
893
|
),
|
|
@@ -1004,7 +1047,17 @@ var openaiCompletionProviderOptions = import_zod4.z.object({
|
|
|
1004
1047
|
A unique identifier representing your end-user, which can help OpenAI to
|
|
1005
1048
|
monitor and detect abuse. Learn more.
|
|
1006
1049
|
*/
|
|
1007
|
-
user: import_zod4.z.string().optional()
|
|
1050
|
+
user: import_zod4.z.string().optional(),
|
|
1051
|
+
/**
|
|
1052
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1053
|
+
the response size and can slow down response times. However, it can
|
|
1054
|
+
be useful to better understand how the model is behaving.
|
|
1055
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1056
|
+
were generated.
|
|
1057
|
+
Setting to a number will return the log probabilities of the top n
|
|
1058
|
+
tokens that were generated.
|
|
1059
|
+
*/
|
|
1060
|
+
logprobs: import_zod4.z.union([import_zod4.z.boolean(), import_zod4.z.number()]).optional()
|
|
1008
1061
|
});
|
|
1009
1062
|
|
|
1010
1063
|
// src/openai-completion-language-model.ts
|
|
@@ -1076,6 +1129,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1076
1129
|
// model specific settings:
|
|
1077
1130
|
echo: openaiOptions.echo,
|
|
1078
1131
|
logit_bias: openaiOptions.logitBias,
|
|
1132
|
+
logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
|
|
1079
1133
|
suffix: openaiOptions.suffix,
|
|
1080
1134
|
user: openaiOptions.user,
|
|
1081
1135
|
// standardized settings:
|
|
@@ -1114,6 +1168,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1114
1168
|
fetch: this.config.fetch
|
|
1115
1169
|
});
|
|
1116
1170
|
const choice = response.choices[0];
|
|
1171
|
+
const providerMetadata = { openai: {} };
|
|
1172
|
+
if (choice.logprobs != null) {
|
|
1173
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1174
|
+
}
|
|
1117
1175
|
return {
|
|
1118
1176
|
content: [{ type: "text", text: choice.text }],
|
|
1119
1177
|
usage: {
|
|
@@ -1127,6 +1185,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1127
1185
|
headers: responseHeaders,
|
|
1128
1186
|
body: rawResponse
|
|
1129
1187
|
},
|
|
1188
|
+
providerMetadata,
|
|
1130
1189
|
warnings
|
|
1131
1190
|
};
|
|
1132
1191
|
}
|
|
@@ -1153,6 +1212,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1153
1212
|
fetch: this.config.fetch
|
|
1154
1213
|
});
|
|
1155
1214
|
let finishReason = "unknown";
|
|
1215
|
+
const providerMetadata = { openai: {} };
|
|
1156
1216
|
const usage = {
|
|
1157
1217
|
inputTokens: void 0,
|
|
1158
1218
|
outputTokens: void 0
|
|
@@ -1191,6 +1251,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1191
1251
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1192
1252
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
1193
1253
|
}
|
|
1254
|
+
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1255
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1256
|
+
}
|
|
1194
1257
|
if ((choice == null ? void 0 : choice.text) != null) {
|
|
1195
1258
|
controller.enqueue({
|
|
1196
1259
|
type: "text",
|
|
@@ -1202,6 +1265,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1202
1265
|
controller.enqueue({
|
|
1203
1266
|
type: "finish",
|
|
1204
1267
|
finishReason,
|
|
1268
|
+
providerMetadata,
|
|
1205
1269
|
usage
|
|
1206
1270
|
});
|
|
1207
1271
|
}
|
|
@@ -1219,7 +1283,12 @@ var openaiCompletionResponseSchema = import_zod5.z.object({
|
|
|
1219
1283
|
choices: import_zod5.z.array(
|
|
1220
1284
|
import_zod5.z.object({
|
|
1221
1285
|
text: import_zod5.z.string(),
|
|
1222
|
-
finish_reason: import_zod5.z.string()
|
|
1286
|
+
finish_reason: import_zod5.z.string(),
|
|
1287
|
+
logprobs: import_zod5.z.object({
|
|
1288
|
+
tokens: import_zod5.z.array(import_zod5.z.string()),
|
|
1289
|
+
token_logprobs: import_zod5.z.array(import_zod5.z.number()),
|
|
1290
|
+
top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
|
|
1291
|
+
}).nullish()
|
|
1223
1292
|
})
|
|
1224
1293
|
),
|
|
1225
1294
|
usage: import_zod5.z.object({
|
|
@@ -1236,7 +1305,12 @@ var openaiCompletionChunkSchema = import_zod5.z.union([
|
|
|
1236
1305
|
import_zod5.z.object({
|
|
1237
1306
|
text: import_zod5.z.string(),
|
|
1238
1307
|
finish_reason: import_zod5.z.string().nullish(),
|
|
1239
|
-
index: import_zod5.z.number()
|
|
1308
|
+
index: import_zod5.z.number(),
|
|
1309
|
+
logprobs: import_zod5.z.object({
|
|
1310
|
+
tokens: import_zod5.z.array(import_zod5.z.string()),
|
|
1311
|
+
token_logprobs: import_zod5.z.array(import_zod5.z.number()),
|
|
1312
|
+
top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
|
|
1313
|
+
}).nullish()
|
|
1240
1314
|
})
|
|
1241
1315
|
),
|
|
1242
1316
|
usage: import_zod5.z.object({
|
|
@@ -1413,12 +1487,23 @@ var OpenAIImageModel = class {
|
|
|
1413
1487
|
timestamp: currentDate,
|
|
1414
1488
|
modelId: this.modelId,
|
|
1415
1489
|
headers: responseHeaders
|
|
1490
|
+
},
|
|
1491
|
+
providerMetadata: {
|
|
1492
|
+
openai: {
|
|
1493
|
+
images: response.data.map(
|
|
1494
|
+
(item) => item.revised_prompt ? {
|
|
1495
|
+
revisedPrompt: item.revised_prompt
|
|
1496
|
+
} : null
|
|
1497
|
+
)
|
|
1498
|
+
}
|
|
1416
1499
|
}
|
|
1417
1500
|
};
|
|
1418
1501
|
}
|
|
1419
1502
|
};
|
|
1420
1503
|
var openaiImageResponseSchema = import_zod8.z.object({
|
|
1421
|
-
data: import_zod8.z.array(
|
|
1504
|
+
data: import_zod8.z.array(
|
|
1505
|
+
import_zod8.z.object({ b64_json: import_zod8.z.string(), revised_prompt: import_zod8.z.string().optional() })
|
|
1506
|
+
)
|
|
1422
1507
|
});
|
|
1423
1508
|
|
|
1424
1509
|
// src/openai-transcription-model.ts
|