@ai-sdk/openai 2.0.11 → 2.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.d.mts +2 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +106 -28
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +106 -28
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -0
- package/dist/internal/index.d.ts +2 -0
- package/dist/internal/index.js +106 -28
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +106 -28
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
|
@@ -233,6 +233,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
233
233
|
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
234
234
|
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
235
235
|
"file_search_call.results": "file_search_call.results";
|
|
236
|
+
"message.output_text.logprobs": "message.output_text.logprobs";
|
|
236
237
|
}>>>>;
|
|
237
238
|
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
238
239
|
low: "low";
|
|
@@ -241,6 +242,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
241
242
|
}>>>;
|
|
242
243
|
promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
243
244
|
safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
245
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
244
246
|
}, z.core.$strip>;
|
|
245
247
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
246
248
|
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -233,6 +233,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
233
233
|
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
234
234
|
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
235
235
|
"file_search_call.results": "file_search_call.results";
|
|
236
|
+
"message.output_text.logprobs": "message.output_text.logprobs";
|
|
236
237
|
}>>>>;
|
|
237
238
|
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
238
239
|
low: "low";
|
|
@@ -241,6 +242,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
241
242
|
}>>>;
|
|
242
243
|
promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
243
244
|
safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
245
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
244
246
|
}, z.core.$strip>;
|
|
245
247
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
246
248
|
|
package/dist/internal/index.js
CHANGED
|
@@ -1143,13 +1143,13 @@ var openaiChatChunkSchema = import_v45.z.union([
|
|
|
1143
1143
|
openaiErrorDataSchema
|
|
1144
1144
|
]);
|
|
1145
1145
|
function isReasoningModel(modelId) {
|
|
1146
|
-
return modelId.startsWith("o") || modelId.startsWith("gpt-5");
|
|
1146
|
+
return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
|
|
1147
1147
|
}
|
|
1148
1148
|
function supportsFlexProcessing(modelId) {
|
|
1149
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
|
|
1149
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
1150
1150
|
}
|
|
1151
1151
|
function supportsPriorityProcessing(modelId) {
|
|
1152
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1152
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1153
1153
|
}
|
|
1154
1154
|
function getSystemMessageMode(modelId) {
|
|
1155
1155
|
var _a, _b;
|
|
@@ -1920,6 +1920,8 @@ var OpenAITranscriptionModel = class {
|
|
|
1920
1920
|
include: openAIOptions.include,
|
|
1921
1921
|
language: openAIOptions.language,
|
|
1922
1922
|
prompt: openAIOptions.prompt,
|
|
1923
|
+
response_format: "verbose_json",
|
|
1924
|
+
// always use verbose_json to get segments
|
|
1923
1925
|
temperature: openAIOptions.temperature,
|
|
1924
1926
|
timestamp_granularities: openAIOptions.timestampGranularities
|
|
1925
1927
|
};
|
|
@@ -1935,7 +1937,7 @@ var OpenAITranscriptionModel = class {
|
|
|
1935
1937
|
};
|
|
1936
1938
|
}
|
|
1937
1939
|
async doGenerate(options) {
|
|
1938
|
-
var _a, _b, _c, _d, _e, _f;
|
|
1940
|
+
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
1939
1941
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1940
1942
|
const { formData, warnings } = await this.getArgs(options);
|
|
1941
1943
|
const {
|
|
@@ -1959,13 +1961,17 @@ var OpenAITranscriptionModel = class {
|
|
|
1959
1961
|
const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
|
|
1960
1962
|
return {
|
|
1961
1963
|
text: response.text,
|
|
1962
|
-
segments: (
|
|
1964
|
+
segments: (_g = (_f = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
|
|
1965
|
+
text: segment.text,
|
|
1966
|
+
startSecond: segment.start,
|
|
1967
|
+
endSecond: segment.end
|
|
1968
|
+
}))) != null ? _f : (_e = response.words) == null ? void 0 : _e.map((word) => ({
|
|
1963
1969
|
text: word.word,
|
|
1964
1970
|
startSecond: word.start,
|
|
1965
1971
|
endSecond: word.end
|
|
1966
|
-
}))) != null ?
|
|
1972
|
+
}))) != null ? _g : [],
|
|
1967
1973
|
language,
|
|
1968
|
-
durationInSeconds: (
|
|
1974
|
+
durationInSeconds: (_h = response.duration) != null ? _h : void 0,
|
|
1969
1975
|
warnings,
|
|
1970
1976
|
response: {
|
|
1971
1977
|
timestamp: currentDate,
|
|
@@ -1986,6 +1992,20 @@ var openaiTranscriptionResponseSchema = import_v412.z.object({
|
|
|
1986
1992
|
start: import_v412.z.number(),
|
|
1987
1993
|
end: import_v412.z.number()
|
|
1988
1994
|
})
|
|
1995
|
+
).nullish(),
|
|
1996
|
+
segments: import_v412.z.array(
|
|
1997
|
+
import_v412.z.object({
|
|
1998
|
+
id: import_v412.z.number(),
|
|
1999
|
+
seek: import_v412.z.number(),
|
|
2000
|
+
start: import_v412.z.number(),
|
|
2001
|
+
end: import_v412.z.number(),
|
|
2002
|
+
text: import_v412.z.string(),
|
|
2003
|
+
tokens: import_v412.z.array(import_v412.z.number()),
|
|
2004
|
+
temperature: import_v412.z.number(),
|
|
2005
|
+
avg_logprob: import_v412.z.number(),
|
|
2006
|
+
compression_ratio: import_v412.z.number(),
|
|
2007
|
+
no_speech_prob: import_v412.z.number()
|
|
2008
|
+
})
|
|
1989
2009
|
).nullish()
|
|
1990
2010
|
});
|
|
1991
2011
|
|
|
@@ -2426,6 +2446,19 @@ function prepareResponsesTools({
|
|
|
2426
2446
|
}
|
|
2427
2447
|
|
|
2428
2448
|
// src/responses/openai-responses-language-model.ts
|
|
2449
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2450
|
+
var LOGPROBS_SCHEMA = import_v416.z.array(
|
|
2451
|
+
import_v416.z.object({
|
|
2452
|
+
token: import_v416.z.string(),
|
|
2453
|
+
logprob: import_v416.z.number(),
|
|
2454
|
+
top_logprobs: import_v416.z.array(
|
|
2455
|
+
import_v416.z.object({
|
|
2456
|
+
token: import_v416.z.string(),
|
|
2457
|
+
logprob: import_v416.z.number()
|
|
2458
|
+
})
|
|
2459
|
+
)
|
|
2460
|
+
})
|
|
2461
|
+
);
|
|
2429
2462
|
var OpenAIResponsesLanguageModel = class {
|
|
2430
2463
|
constructor(modelId, config) {
|
|
2431
2464
|
this.specificationVersion = "v2";
|
|
@@ -2489,6 +2522,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2489
2522
|
schema: openaiResponsesProviderOptionsSchema
|
|
2490
2523
|
});
|
|
2491
2524
|
const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
|
|
2525
|
+
const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
|
|
2526
|
+
const openaiOptionsInclude = topLogprobs ? Array.isArray(openaiOptions == null ? void 0 : openaiOptions.include) ? [...openaiOptions == null ? void 0 : openaiOptions.include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : openaiOptions == null ? void 0 : openaiOptions.include;
|
|
2492
2527
|
const baseArgs = {
|
|
2493
2528
|
model: this.modelId,
|
|
2494
2529
|
input: messages,
|
|
@@ -2519,9 +2554,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2519
2554
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2520
2555
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2521
2556
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2522
|
-
include:
|
|
2557
|
+
include: openaiOptionsInclude,
|
|
2523
2558
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
2524
2559
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
2560
|
+
top_logprobs: topLogprobs,
|
|
2525
2561
|
// model-specific settings:
|
|
2526
2562
|
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2527
2563
|
reasoning: {
|
|
@@ -2605,7 +2641,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2605
2641
|
};
|
|
2606
2642
|
}
|
|
2607
2643
|
async doGenerate(options) {
|
|
2608
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2644
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
2609
2645
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2610
2646
|
const url = this.config.url({
|
|
2611
2647
|
path: "/responses",
|
|
@@ -2639,6 +2675,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2639
2675
|
import_v416.z.object({
|
|
2640
2676
|
type: import_v416.z.literal("output_text"),
|
|
2641
2677
|
text: import_v416.z.string(),
|
|
2678
|
+
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2642
2679
|
annotations: import_v416.z.array(
|
|
2643
2680
|
import_v416.z.object({
|
|
2644
2681
|
type: import_v416.z.literal("url_citation"),
|
|
@@ -2716,6 +2753,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2716
2753
|
});
|
|
2717
2754
|
}
|
|
2718
2755
|
const content = [];
|
|
2756
|
+
const logprobs = [];
|
|
2719
2757
|
for (const part of response.output) {
|
|
2720
2758
|
switch (part.type) {
|
|
2721
2759
|
case "reasoning": {
|
|
@@ -2738,6 +2776,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2738
2776
|
}
|
|
2739
2777
|
case "message": {
|
|
2740
2778
|
for (const contentPart of part.content) {
|
|
2779
|
+
if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
|
|
2780
|
+
logprobs.push(contentPart.logprobs);
|
|
2781
|
+
}
|
|
2741
2782
|
content.push({
|
|
2742
2783
|
type: "text",
|
|
2743
2784
|
text: contentPart.text,
|
|
@@ -2751,7 +2792,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2751
2792
|
content.push({
|
|
2752
2793
|
type: "source",
|
|
2753
2794
|
sourceType: "url",
|
|
2754
|
-
id: (
|
|
2795
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0, import_provider_utils14.generateId)(),
|
|
2755
2796
|
url: annotation.url,
|
|
2756
2797
|
title: annotation.title
|
|
2757
2798
|
});
|
|
@@ -2834,18 +2875,24 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2834
2875
|
}
|
|
2835
2876
|
}
|
|
2836
2877
|
}
|
|
2878
|
+
const providerMetadata = {
|
|
2879
|
+
openai: { responseId: response.id }
|
|
2880
|
+
};
|
|
2881
|
+
if (logprobs.length > 0) {
|
|
2882
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
2883
|
+
}
|
|
2837
2884
|
return {
|
|
2838
2885
|
content,
|
|
2839
2886
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2840
|
-
finishReason: (
|
|
2887
|
+
finishReason: (_g = response.incomplete_details) == null ? void 0 : _g.reason,
|
|
2841
2888
|
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2842
2889
|
}),
|
|
2843
2890
|
usage: {
|
|
2844
2891
|
inputTokens: response.usage.input_tokens,
|
|
2845
2892
|
outputTokens: response.usage.output_tokens,
|
|
2846
2893
|
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2847
|
-
reasoningTokens: (
|
|
2848
|
-
cachedInputTokens: (
|
|
2894
|
+
reasoningTokens: (_i = (_h = response.usage.output_tokens_details) == null ? void 0 : _h.reasoning_tokens) != null ? _i : void 0,
|
|
2895
|
+
cachedInputTokens: (_k = (_j = response.usage.input_tokens_details) == null ? void 0 : _j.cached_tokens) != null ? _k : void 0
|
|
2849
2896
|
},
|
|
2850
2897
|
request: { body },
|
|
2851
2898
|
response: {
|
|
@@ -2855,11 +2902,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2855
2902
|
headers: responseHeaders,
|
|
2856
2903
|
body: rawResponse
|
|
2857
2904
|
},
|
|
2858
|
-
providerMetadata
|
|
2859
|
-
openai: {
|
|
2860
|
-
responseId: response.id
|
|
2861
|
-
}
|
|
2862
|
-
},
|
|
2905
|
+
providerMetadata,
|
|
2863
2906
|
warnings
|
|
2864
2907
|
};
|
|
2865
2908
|
}
|
|
@@ -2889,6 +2932,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2889
2932
|
outputTokens: void 0,
|
|
2890
2933
|
totalTokens: void 0
|
|
2891
2934
|
};
|
|
2935
|
+
const logprobs = [];
|
|
2892
2936
|
let responseId = null;
|
|
2893
2937
|
const ongoingToolCalls = {};
|
|
2894
2938
|
let hasToolCalls = false;
|
|
@@ -3114,6 +3158,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3114
3158
|
id: value.item_id,
|
|
3115
3159
|
delta: value.delta
|
|
3116
3160
|
});
|
|
3161
|
+
if (value.logprobs) {
|
|
3162
|
+
logprobs.push(value.logprobs);
|
|
3163
|
+
}
|
|
3117
3164
|
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
|
3118
3165
|
if (value.summary_index > 0) {
|
|
3119
3166
|
(_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
|
|
@@ -3164,15 +3211,19 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3164
3211
|
}
|
|
3165
3212
|
},
|
|
3166
3213
|
flush(controller) {
|
|
3214
|
+
const providerMetadata = {
|
|
3215
|
+
openai: {
|
|
3216
|
+
responseId
|
|
3217
|
+
}
|
|
3218
|
+
};
|
|
3219
|
+
if (logprobs.length > 0) {
|
|
3220
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
3221
|
+
}
|
|
3167
3222
|
controller.enqueue({
|
|
3168
3223
|
type: "finish",
|
|
3169
3224
|
finishReason,
|
|
3170
3225
|
usage,
|
|
3171
|
-
providerMetadata
|
|
3172
|
-
openai: {
|
|
3173
|
-
responseId
|
|
3174
|
-
}
|
|
3175
|
-
}
|
|
3226
|
+
providerMetadata
|
|
3176
3227
|
});
|
|
3177
3228
|
}
|
|
3178
3229
|
})
|
|
@@ -3191,7 +3242,8 @@ var usageSchema2 = import_v416.z.object({
|
|
|
3191
3242
|
var textDeltaChunkSchema = import_v416.z.object({
|
|
3192
3243
|
type: import_v416.z.literal("response.output_text.delta"),
|
|
3193
3244
|
item_id: import_v416.z.string(),
|
|
3194
|
-
delta: import_v416.z.string()
|
|
3245
|
+
delta: import_v416.z.string(),
|
|
3246
|
+
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
3195
3247
|
});
|
|
3196
3248
|
var errorChunkSchema = import_v416.z.object({
|
|
3197
3249
|
type: import_v416.z.literal("error"),
|
|
@@ -3388,6 +3440,13 @@ function isErrorChunk(chunk) {
|
|
|
3388
3440
|
return chunk.type === "error";
|
|
3389
3441
|
}
|
|
3390
3442
|
function getResponsesModelConfig(modelId) {
|
|
3443
|
+
if (modelId.startsWith("gpt-5-chat")) {
|
|
3444
|
+
return {
|
|
3445
|
+
isReasoningModel: false,
|
|
3446
|
+
systemMessageMode: "system",
|
|
3447
|
+
requiredAutoTruncation: false
|
|
3448
|
+
};
|
|
3449
|
+
}
|
|
3391
3450
|
if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
|
|
3392
3451
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
3393
3452
|
return {
|
|
@@ -3409,10 +3468,10 @@ function getResponsesModelConfig(modelId) {
|
|
|
3409
3468
|
};
|
|
3410
3469
|
}
|
|
3411
3470
|
function supportsFlexProcessing2(modelId) {
|
|
3412
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
|
|
3471
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
3413
3472
|
}
|
|
3414
3473
|
function supportsPriorityProcessing2(modelId) {
|
|
3415
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3474
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3416
3475
|
}
|
|
3417
3476
|
var openaiResponsesProviderOptionsSchema = import_v416.z.object({
|
|
3418
3477
|
metadata: import_v416.z.any().nullish(),
|
|
@@ -3425,10 +3484,29 @@ var openaiResponsesProviderOptionsSchema = import_v416.z.object({
|
|
|
3425
3484
|
instructions: import_v416.z.string().nullish(),
|
|
3426
3485
|
reasoningSummary: import_v416.z.string().nullish(),
|
|
3427
3486
|
serviceTier: import_v416.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
3428
|
-
include: import_v416.z.array(
|
|
3487
|
+
include: import_v416.z.array(
|
|
3488
|
+
import_v416.z.enum([
|
|
3489
|
+
"reasoning.encrypted_content",
|
|
3490
|
+
"file_search_call.results",
|
|
3491
|
+
"message.output_text.logprobs"
|
|
3492
|
+
])
|
|
3493
|
+
).nullish(),
|
|
3429
3494
|
textVerbosity: import_v416.z.enum(["low", "medium", "high"]).nullish(),
|
|
3430
3495
|
promptCacheKey: import_v416.z.string().nullish(),
|
|
3431
|
-
safetyIdentifier: import_v416.z.string().nullish()
|
|
3496
|
+
safetyIdentifier: import_v416.z.string().nullish(),
|
|
3497
|
+
/**
|
|
3498
|
+
* Return the log probabilities of the tokens.
|
|
3499
|
+
*
|
|
3500
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
3501
|
+
* were generated.
|
|
3502
|
+
*
|
|
3503
|
+
* Setting to a number will return the log probabilities of the top n
|
|
3504
|
+
* tokens that were generated.
|
|
3505
|
+
*
|
|
3506
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
3507
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
3508
|
+
*/
|
|
3509
|
+
logprobs: import_v416.z.union([import_v416.z.boolean(), import_v416.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
|
|
3432
3510
|
});
|
|
3433
3511
|
// Annotate the CommonJS export names for ESM import in node:
|
|
3434
3512
|
0 && (module.exports = {
|