@ai-sdk/openai 2.0.11 → 2.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.d.mts +2 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +106 -28
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +106 -28
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -0
- package/dist/internal/index.d.ts +2 -0
- package/dist/internal/index.js +106 -28
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +106 -28
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/internal/index.mjs
CHANGED
|
@@ -1119,13 +1119,13 @@ var openaiChatChunkSchema = z5.union([
|
|
|
1119
1119
|
openaiErrorDataSchema
|
|
1120
1120
|
]);
|
|
1121
1121
|
function isReasoningModel(modelId) {
|
|
1122
|
-
return modelId.startsWith("o") || modelId.startsWith("gpt-5");
|
|
1122
|
+
return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
|
|
1123
1123
|
}
|
|
1124
1124
|
function supportsFlexProcessing(modelId) {
|
|
1125
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
|
|
1125
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
1126
1126
|
}
|
|
1127
1127
|
function supportsPriorityProcessing(modelId) {
|
|
1128
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1128
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1129
1129
|
}
|
|
1130
1130
|
function getSystemMessageMode(modelId) {
|
|
1131
1131
|
var _a, _b;
|
|
@@ -1922,6 +1922,8 @@ var OpenAITranscriptionModel = class {
|
|
|
1922
1922
|
include: openAIOptions.include,
|
|
1923
1923
|
language: openAIOptions.language,
|
|
1924
1924
|
prompt: openAIOptions.prompt,
|
|
1925
|
+
response_format: "verbose_json",
|
|
1926
|
+
// always use verbose_json to get segments
|
|
1925
1927
|
temperature: openAIOptions.temperature,
|
|
1926
1928
|
timestamp_granularities: openAIOptions.timestampGranularities
|
|
1927
1929
|
};
|
|
@@ -1937,7 +1939,7 @@ var OpenAITranscriptionModel = class {
|
|
|
1937
1939
|
};
|
|
1938
1940
|
}
|
|
1939
1941
|
async doGenerate(options) {
|
|
1940
|
-
var _a, _b, _c, _d, _e, _f;
|
|
1942
|
+
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
1941
1943
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1942
1944
|
const { formData, warnings } = await this.getArgs(options);
|
|
1943
1945
|
const {
|
|
@@ -1961,13 +1963,17 @@ var OpenAITranscriptionModel = class {
|
|
|
1961
1963
|
const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
|
|
1962
1964
|
return {
|
|
1963
1965
|
text: response.text,
|
|
1964
|
-
segments: (
|
|
1966
|
+
segments: (_g = (_f = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
|
|
1967
|
+
text: segment.text,
|
|
1968
|
+
startSecond: segment.start,
|
|
1969
|
+
endSecond: segment.end
|
|
1970
|
+
}))) != null ? _f : (_e = response.words) == null ? void 0 : _e.map((word) => ({
|
|
1965
1971
|
text: word.word,
|
|
1966
1972
|
startSecond: word.start,
|
|
1967
1973
|
endSecond: word.end
|
|
1968
|
-
}))) != null ?
|
|
1974
|
+
}))) != null ? _g : [],
|
|
1969
1975
|
language,
|
|
1970
|
-
durationInSeconds: (
|
|
1976
|
+
durationInSeconds: (_h = response.duration) != null ? _h : void 0,
|
|
1971
1977
|
warnings,
|
|
1972
1978
|
response: {
|
|
1973
1979
|
timestamp: currentDate,
|
|
@@ -1988,6 +1994,20 @@ var openaiTranscriptionResponseSchema = z12.object({
|
|
|
1988
1994
|
start: z12.number(),
|
|
1989
1995
|
end: z12.number()
|
|
1990
1996
|
})
|
|
1997
|
+
).nullish(),
|
|
1998
|
+
segments: z12.array(
|
|
1999
|
+
z12.object({
|
|
2000
|
+
id: z12.number(),
|
|
2001
|
+
seek: z12.number(),
|
|
2002
|
+
start: z12.number(),
|
|
2003
|
+
end: z12.number(),
|
|
2004
|
+
text: z12.string(),
|
|
2005
|
+
tokens: z12.array(z12.number()),
|
|
2006
|
+
temperature: z12.number(),
|
|
2007
|
+
avg_logprob: z12.number(),
|
|
2008
|
+
compression_ratio: z12.number(),
|
|
2009
|
+
no_speech_prob: z12.number()
|
|
2010
|
+
})
|
|
1991
2011
|
).nullish()
|
|
1992
2012
|
});
|
|
1993
2013
|
|
|
@@ -2446,6 +2466,19 @@ function prepareResponsesTools({
|
|
|
2446
2466
|
}
|
|
2447
2467
|
|
|
2448
2468
|
// src/responses/openai-responses-language-model.ts
|
|
2469
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2470
|
+
var LOGPROBS_SCHEMA = z16.array(
|
|
2471
|
+
z16.object({
|
|
2472
|
+
token: z16.string(),
|
|
2473
|
+
logprob: z16.number(),
|
|
2474
|
+
top_logprobs: z16.array(
|
|
2475
|
+
z16.object({
|
|
2476
|
+
token: z16.string(),
|
|
2477
|
+
logprob: z16.number()
|
|
2478
|
+
})
|
|
2479
|
+
)
|
|
2480
|
+
})
|
|
2481
|
+
);
|
|
2449
2482
|
var OpenAIResponsesLanguageModel = class {
|
|
2450
2483
|
constructor(modelId, config) {
|
|
2451
2484
|
this.specificationVersion = "v2";
|
|
@@ -2509,6 +2542,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2509
2542
|
schema: openaiResponsesProviderOptionsSchema
|
|
2510
2543
|
});
|
|
2511
2544
|
const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
|
|
2545
|
+
const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
|
|
2546
|
+
const openaiOptionsInclude = topLogprobs ? Array.isArray(openaiOptions == null ? void 0 : openaiOptions.include) ? [...openaiOptions == null ? void 0 : openaiOptions.include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : openaiOptions == null ? void 0 : openaiOptions.include;
|
|
2512
2547
|
const baseArgs = {
|
|
2513
2548
|
model: this.modelId,
|
|
2514
2549
|
input: messages,
|
|
@@ -2539,9 +2574,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2539
2574
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2540
2575
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2541
2576
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2542
|
-
include:
|
|
2577
|
+
include: openaiOptionsInclude,
|
|
2543
2578
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
2544
2579
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
2580
|
+
top_logprobs: topLogprobs,
|
|
2545
2581
|
// model-specific settings:
|
|
2546
2582
|
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2547
2583
|
reasoning: {
|
|
@@ -2625,7 +2661,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2625
2661
|
};
|
|
2626
2662
|
}
|
|
2627
2663
|
async doGenerate(options) {
|
|
2628
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2664
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
2629
2665
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2630
2666
|
const url = this.config.url({
|
|
2631
2667
|
path: "/responses",
|
|
@@ -2659,6 +2695,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2659
2695
|
z16.object({
|
|
2660
2696
|
type: z16.literal("output_text"),
|
|
2661
2697
|
text: z16.string(),
|
|
2698
|
+
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2662
2699
|
annotations: z16.array(
|
|
2663
2700
|
z16.object({
|
|
2664
2701
|
type: z16.literal("url_citation"),
|
|
@@ -2736,6 +2773,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2736
2773
|
});
|
|
2737
2774
|
}
|
|
2738
2775
|
const content = [];
|
|
2776
|
+
const logprobs = [];
|
|
2739
2777
|
for (const part of response.output) {
|
|
2740
2778
|
switch (part.type) {
|
|
2741
2779
|
case "reasoning": {
|
|
@@ -2758,6 +2796,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2758
2796
|
}
|
|
2759
2797
|
case "message": {
|
|
2760
2798
|
for (const contentPart of part.content) {
|
|
2799
|
+
if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
|
|
2800
|
+
logprobs.push(contentPart.logprobs);
|
|
2801
|
+
}
|
|
2761
2802
|
content.push({
|
|
2762
2803
|
type: "text",
|
|
2763
2804
|
text: contentPart.text,
|
|
@@ -2771,7 +2812,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2771
2812
|
content.push({
|
|
2772
2813
|
type: "source",
|
|
2773
2814
|
sourceType: "url",
|
|
2774
|
-
id: (
|
|
2815
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : generateId2(),
|
|
2775
2816
|
url: annotation.url,
|
|
2776
2817
|
title: annotation.title
|
|
2777
2818
|
});
|
|
@@ -2854,18 +2895,24 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2854
2895
|
}
|
|
2855
2896
|
}
|
|
2856
2897
|
}
|
|
2898
|
+
const providerMetadata = {
|
|
2899
|
+
openai: { responseId: response.id }
|
|
2900
|
+
};
|
|
2901
|
+
if (logprobs.length > 0) {
|
|
2902
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
2903
|
+
}
|
|
2857
2904
|
return {
|
|
2858
2905
|
content,
|
|
2859
2906
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2860
|
-
finishReason: (
|
|
2907
|
+
finishReason: (_g = response.incomplete_details) == null ? void 0 : _g.reason,
|
|
2861
2908
|
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2862
2909
|
}),
|
|
2863
2910
|
usage: {
|
|
2864
2911
|
inputTokens: response.usage.input_tokens,
|
|
2865
2912
|
outputTokens: response.usage.output_tokens,
|
|
2866
2913
|
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2867
|
-
reasoningTokens: (
|
|
2868
|
-
cachedInputTokens: (
|
|
2914
|
+
reasoningTokens: (_i = (_h = response.usage.output_tokens_details) == null ? void 0 : _h.reasoning_tokens) != null ? _i : void 0,
|
|
2915
|
+
cachedInputTokens: (_k = (_j = response.usage.input_tokens_details) == null ? void 0 : _j.cached_tokens) != null ? _k : void 0
|
|
2869
2916
|
},
|
|
2870
2917
|
request: { body },
|
|
2871
2918
|
response: {
|
|
@@ -2875,11 +2922,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2875
2922
|
headers: responseHeaders,
|
|
2876
2923
|
body: rawResponse
|
|
2877
2924
|
},
|
|
2878
|
-
providerMetadata
|
|
2879
|
-
openai: {
|
|
2880
|
-
responseId: response.id
|
|
2881
|
-
}
|
|
2882
|
-
},
|
|
2925
|
+
providerMetadata,
|
|
2883
2926
|
warnings
|
|
2884
2927
|
};
|
|
2885
2928
|
}
|
|
@@ -2909,6 +2952,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2909
2952
|
outputTokens: void 0,
|
|
2910
2953
|
totalTokens: void 0
|
|
2911
2954
|
};
|
|
2955
|
+
const logprobs = [];
|
|
2912
2956
|
let responseId = null;
|
|
2913
2957
|
const ongoingToolCalls = {};
|
|
2914
2958
|
let hasToolCalls = false;
|
|
@@ -3134,6 +3178,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3134
3178
|
id: value.item_id,
|
|
3135
3179
|
delta: value.delta
|
|
3136
3180
|
});
|
|
3181
|
+
if (value.logprobs) {
|
|
3182
|
+
logprobs.push(value.logprobs);
|
|
3183
|
+
}
|
|
3137
3184
|
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
|
3138
3185
|
if (value.summary_index > 0) {
|
|
3139
3186
|
(_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
|
|
@@ -3184,15 +3231,19 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3184
3231
|
}
|
|
3185
3232
|
},
|
|
3186
3233
|
flush(controller) {
|
|
3234
|
+
const providerMetadata = {
|
|
3235
|
+
openai: {
|
|
3236
|
+
responseId
|
|
3237
|
+
}
|
|
3238
|
+
};
|
|
3239
|
+
if (logprobs.length > 0) {
|
|
3240
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
3241
|
+
}
|
|
3187
3242
|
controller.enqueue({
|
|
3188
3243
|
type: "finish",
|
|
3189
3244
|
finishReason,
|
|
3190
3245
|
usage,
|
|
3191
|
-
providerMetadata
|
|
3192
|
-
openai: {
|
|
3193
|
-
responseId
|
|
3194
|
-
}
|
|
3195
|
-
}
|
|
3246
|
+
providerMetadata
|
|
3196
3247
|
});
|
|
3197
3248
|
}
|
|
3198
3249
|
})
|
|
@@ -3211,7 +3262,8 @@ var usageSchema2 = z16.object({
|
|
|
3211
3262
|
var textDeltaChunkSchema = z16.object({
|
|
3212
3263
|
type: z16.literal("response.output_text.delta"),
|
|
3213
3264
|
item_id: z16.string(),
|
|
3214
|
-
delta: z16.string()
|
|
3265
|
+
delta: z16.string(),
|
|
3266
|
+
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
3215
3267
|
});
|
|
3216
3268
|
var errorChunkSchema = z16.object({
|
|
3217
3269
|
type: z16.literal("error"),
|
|
@@ -3408,6 +3460,13 @@ function isErrorChunk(chunk) {
|
|
|
3408
3460
|
return chunk.type === "error";
|
|
3409
3461
|
}
|
|
3410
3462
|
function getResponsesModelConfig(modelId) {
|
|
3463
|
+
if (modelId.startsWith("gpt-5-chat")) {
|
|
3464
|
+
return {
|
|
3465
|
+
isReasoningModel: false,
|
|
3466
|
+
systemMessageMode: "system",
|
|
3467
|
+
requiredAutoTruncation: false
|
|
3468
|
+
};
|
|
3469
|
+
}
|
|
3411
3470
|
if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
|
|
3412
3471
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
3413
3472
|
return {
|
|
@@ -3429,10 +3488,10 @@ function getResponsesModelConfig(modelId) {
|
|
|
3429
3488
|
};
|
|
3430
3489
|
}
|
|
3431
3490
|
function supportsFlexProcessing2(modelId) {
|
|
3432
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
|
|
3491
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
3433
3492
|
}
|
|
3434
3493
|
function supportsPriorityProcessing2(modelId) {
|
|
3435
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3494
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3436
3495
|
}
|
|
3437
3496
|
var openaiResponsesProviderOptionsSchema = z16.object({
|
|
3438
3497
|
metadata: z16.any().nullish(),
|
|
@@ -3445,10 +3504,29 @@ var openaiResponsesProviderOptionsSchema = z16.object({
|
|
|
3445
3504
|
instructions: z16.string().nullish(),
|
|
3446
3505
|
reasoningSummary: z16.string().nullish(),
|
|
3447
3506
|
serviceTier: z16.enum(["auto", "flex", "priority"]).nullish(),
|
|
3448
|
-
include: z16.array(
|
|
3507
|
+
include: z16.array(
|
|
3508
|
+
z16.enum([
|
|
3509
|
+
"reasoning.encrypted_content",
|
|
3510
|
+
"file_search_call.results",
|
|
3511
|
+
"message.output_text.logprobs"
|
|
3512
|
+
])
|
|
3513
|
+
).nullish(),
|
|
3449
3514
|
textVerbosity: z16.enum(["low", "medium", "high"]).nullish(),
|
|
3450
3515
|
promptCacheKey: z16.string().nullish(),
|
|
3451
|
-
safetyIdentifier: z16.string().nullish()
|
|
3516
|
+
safetyIdentifier: z16.string().nullish(),
|
|
3517
|
+
/**
|
|
3518
|
+
* Return the log probabilities of the tokens.
|
|
3519
|
+
*
|
|
3520
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
3521
|
+
* were generated.
|
|
3522
|
+
*
|
|
3523
|
+
* Setting to a number will return the log probabilities of the top n
|
|
3524
|
+
* tokens that were generated.
|
|
3525
|
+
*
|
|
3526
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
3527
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
3528
|
+
*/
|
|
3529
|
+
logprobs: z16.union([z16.boolean(), z16.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
|
|
3452
3530
|
});
|
|
3453
3531
|
export {
|
|
3454
3532
|
OpenAIChatLanguageModel,
|