@ai-sdk/openai 2.0.11 → 2.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.d.mts +2 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +106 -28
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +106 -28
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -0
- package/dist/internal/index.d.ts +2 -0
- package/dist/internal/index.js +106 -28
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +106 -28
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/index.mjs
CHANGED
|
@@ -1125,13 +1125,13 @@ var openaiChatChunkSchema = z5.union([
|
|
|
1125
1125
|
openaiErrorDataSchema
|
|
1126
1126
|
]);
|
|
1127
1127
|
function isReasoningModel(modelId) {
|
|
1128
|
-
return modelId.startsWith("o") || modelId.startsWith("gpt-5");
|
|
1128
|
+
return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
|
|
1129
1129
|
}
|
|
1130
1130
|
function supportsFlexProcessing(modelId) {
|
|
1131
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
|
|
1131
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
1132
1132
|
}
|
|
1133
1133
|
function supportsPriorityProcessing(modelId) {
|
|
1134
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1134
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1135
1135
|
}
|
|
1136
1136
|
function getSystemMessageMode(modelId) {
|
|
1137
1137
|
var _a, _b;
|
|
@@ -2150,6 +2150,19 @@ function prepareResponsesTools({
|
|
|
2150
2150
|
}
|
|
2151
2151
|
|
|
2152
2152
|
// src/responses/openai-responses-language-model.ts
|
|
2153
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2154
|
+
var LOGPROBS_SCHEMA = z13.array(
|
|
2155
|
+
z13.object({
|
|
2156
|
+
token: z13.string(),
|
|
2157
|
+
logprob: z13.number(),
|
|
2158
|
+
top_logprobs: z13.array(
|
|
2159
|
+
z13.object({
|
|
2160
|
+
token: z13.string(),
|
|
2161
|
+
logprob: z13.number()
|
|
2162
|
+
})
|
|
2163
|
+
)
|
|
2164
|
+
})
|
|
2165
|
+
);
|
|
2153
2166
|
var OpenAIResponsesLanguageModel = class {
|
|
2154
2167
|
constructor(modelId, config) {
|
|
2155
2168
|
this.specificationVersion = "v2";
|
|
@@ -2213,6 +2226,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2213
2226
|
schema: openaiResponsesProviderOptionsSchema
|
|
2214
2227
|
});
|
|
2215
2228
|
const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
|
|
2229
|
+
const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
|
|
2230
|
+
const openaiOptionsInclude = topLogprobs ? Array.isArray(openaiOptions == null ? void 0 : openaiOptions.include) ? [...openaiOptions == null ? void 0 : openaiOptions.include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : openaiOptions == null ? void 0 : openaiOptions.include;
|
|
2216
2231
|
const baseArgs = {
|
|
2217
2232
|
model: this.modelId,
|
|
2218
2233
|
input: messages,
|
|
@@ -2243,9 +2258,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2243
2258
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2244
2259
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2245
2260
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2246
|
-
include:
|
|
2261
|
+
include: openaiOptionsInclude,
|
|
2247
2262
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
2248
2263
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
2264
|
+
top_logprobs: topLogprobs,
|
|
2249
2265
|
// model-specific settings:
|
|
2250
2266
|
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2251
2267
|
reasoning: {
|
|
@@ -2329,7 +2345,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2329
2345
|
};
|
|
2330
2346
|
}
|
|
2331
2347
|
async doGenerate(options) {
|
|
2332
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2348
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
2333
2349
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2334
2350
|
const url = this.config.url({
|
|
2335
2351
|
path: "/responses",
|
|
@@ -2363,6 +2379,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2363
2379
|
z13.object({
|
|
2364
2380
|
type: z13.literal("output_text"),
|
|
2365
2381
|
text: z13.string(),
|
|
2382
|
+
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2366
2383
|
annotations: z13.array(
|
|
2367
2384
|
z13.object({
|
|
2368
2385
|
type: z13.literal("url_citation"),
|
|
@@ -2440,6 +2457,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2440
2457
|
});
|
|
2441
2458
|
}
|
|
2442
2459
|
const content = [];
|
|
2460
|
+
const logprobs = [];
|
|
2443
2461
|
for (const part of response.output) {
|
|
2444
2462
|
switch (part.type) {
|
|
2445
2463
|
case "reasoning": {
|
|
@@ -2462,6 +2480,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2462
2480
|
}
|
|
2463
2481
|
case "message": {
|
|
2464
2482
|
for (const contentPart of part.content) {
|
|
2483
|
+
if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
|
|
2484
|
+
logprobs.push(contentPart.logprobs);
|
|
2485
|
+
}
|
|
2465
2486
|
content.push({
|
|
2466
2487
|
type: "text",
|
|
2467
2488
|
text: contentPart.text,
|
|
@@ -2475,7 +2496,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2475
2496
|
content.push({
|
|
2476
2497
|
type: "source",
|
|
2477
2498
|
sourceType: "url",
|
|
2478
|
-
id: (
|
|
2499
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : generateId2(),
|
|
2479
2500
|
url: annotation.url,
|
|
2480
2501
|
title: annotation.title
|
|
2481
2502
|
});
|
|
@@ -2558,18 +2579,24 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2558
2579
|
}
|
|
2559
2580
|
}
|
|
2560
2581
|
}
|
|
2582
|
+
const providerMetadata = {
|
|
2583
|
+
openai: { responseId: response.id }
|
|
2584
|
+
};
|
|
2585
|
+
if (logprobs.length > 0) {
|
|
2586
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
2587
|
+
}
|
|
2561
2588
|
return {
|
|
2562
2589
|
content,
|
|
2563
2590
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2564
|
-
finishReason: (
|
|
2591
|
+
finishReason: (_g = response.incomplete_details) == null ? void 0 : _g.reason,
|
|
2565
2592
|
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2566
2593
|
}),
|
|
2567
2594
|
usage: {
|
|
2568
2595
|
inputTokens: response.usage.input_tokens,
|
|
2569
2596
|
outputTokens: response.usage.output_tokens,
|
|
2570
2597
|
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2571
|
-
reasoningTokens: (
|
|
2572
|
-
cachedInputTokens: (
|
|
2598
|
+
reasoningTokens: (_i = (_h = response.usage.output_tokens_details) == null ? void 0 : _h.reasoning_tokens) != null ? _i : void 0,
|
|
2599
|
+
cachedInputTokens: (_k = (_j = response.usage.input_tokens_details) == null ? void 0 : _j.cached_tokens) != null ? _k : void 0
|
|
2573
2600
|
},
|
|
2574
2601
|
request: { body },
|
|
2575
2602
|
response: {
|
|
@@ -2579,11 +2606,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2579
2606
|
headers: responseHeaders,
|
|
2580
2607
|
body: rawResponse
|
|
2581
2608
|
},
|
|
2582
|
-
providerMetadata
|
|
2583
|
-
openai: {
|
|
2584
|
-
responseId: response.id
|
|
2585
|
-
}
|
|
2586
|
-
},
|
|
2609
|
+
providerMetadata,
|
|
2587
2610
|
warnings
|
|
2588
2611
|
};
|
|
2589
2612
|
}
|
|
@@ -2613,6 +2636,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2613
2636
|
outputTokens: void 0,
|
|
2614
2637
|
totalTokens: void 0
|
|
2615
2638
|
};
|
|
2639
|
+
const logprobs = [];
|
|
2616
2640
|
let responseId = null;
|
|
2617
2641
|
const ongoingToolCalls = {};
|
|
2618
2642
|
let hasToolCalls = false;
|
|
@@ -2838,6 +2862,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2838
2862
|
id: value.item_id,
|
|
2839
2863
|
delta: value.delta
|
|
2840
2864
|
});
|
|
2865
|
+
if (value.logprobs) {
|
|
2866
|
+
logprobs.push(value.logprobs);
|
|
2867
|
+
}
|
|
2841
2868
|
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
|
2842
2869
|
if (value.summary_index > 0) {
|
|
2843
2870
|
(_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
|
|
@@ -2888,15 +2915,19 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2888
2915
|
}
|
|
2889
2916
|
},
|
|
2890
2917
|
flush(controller) {
|
|
2918
|
+
const providerMetadata = {
|
|
2919
|
+
openai: {
|
|
2920
|
+
responseId
|
|
2921
|
+
}
|
|
2922
|
+
};
|
|
2923
|
+
if (logprobs.length > 0) {
|
|
2924
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
2925
|
+
}
|
|
2891
2926
|
controller.enqueue({
|
|
2892
2927
|
type: "finish",
|
|
2893
2928
|
finishReason,
|
|
2894
2929
|
usage,
|
|
2895
|
-
providerMetadata
|
|
2896
|
-
openai: {
|
|
2897
|
-
responseId
|
|
2898
|
-
}
|
|
2899
|
-
}
|
|
2930
|
+
providerMetadata
|
|
2900
2931
|
});
|
|
2901
2932
|
}
|
|
2902
2933
|
})
|
|
@@ -2915,7 +2946,8 @@ var usageSchema2 = z13.object({
|
|
|
2915
2946
|
var textDeltaChunkSchema = z13.object({
|
|
2916
2947
|
type: z13.literal("response.output_text.delta"),
|
|
2917
2948
|
item_id: z13.string(),
|
|
2918
|
-
delta: z13.string()
|
|
2949
|
+
delta: z13.string(),
|
|
2950
|
+
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
2919
2951
|
});
|
|
2920
2952
|
var errorChunkSchema = z13.object({
|
|
2921
2953
|
type: z13.literal("error"),
|
|
@@ -3112,6 +3144,13 @@ function isErrorChunk(chunk) {
|
|
|
3112
3144
|
return chunk.type === "error";
|
|
3113
3145
|
}
|
|
3114
3146
|
function getResponsesModelConfig(modelId) {
|
|
3147
|
+
if (modelId.startsWith("gpt-5-chat")) {
|
|
3148
|
+
return {
|
|
3149
|
+
isReasoningModel: false,
|
|
3150
|
+
systemMessageMode: "system",
|
|
3151
|
+
requiredAutoTruncation: false
|
|
3152
|
+
};
|
|
3153
|
+
}
|
|
3115
3154
|
if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
|
|
3116
3155
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
3117
3156
|
return {
|
|
@@ -3133,10 +3172,10 @@ function getResponsesModelConfig(modelId) {
|
|
|
3133
3172
|
};
|
|
3134
3173
|
}
|
|
3135
3174
|
function supportsFlexProcessing2(modelId) {
|
|
3136
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
|
|
3175
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
3137
3176
|
}
|
|
3138
3177
|
function supportsPriorityProcessing2(modelId) {
|
|
3139
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3178
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3140
3179
|
}
|
|
3141
3180
|
var openaiResponsesProviderOptionsSchema = z13.object({
|
|
3142
3181
|
metadata: z13.any().nullish(),
|
|
@@ -3149,10 +3188,29 @@ var openaiResponsesProviderOptionsSchema = z13.object({
|
|
|
3149
3188
|
instructions: z13.string().nullish(),
|
|
3150
3189
|
reasoningSummary: z13.string().nullish(),
|
|
3151
3190
|
serviceTier: z13.enum(["auto", "flex", "priority"]).nullish(),
|
|
3152
|
-
include: z13.array(
|
|
3191
|
+
include: z13.array(
|
|
3192
|
+
z13.enum([
|
|
3193
|
+
"reasoning.encrypted_content",
|
|
3194
|
+
"file_search_call.results",
|
|
3195
|
+
"message.output_text.logprobs"
|
|
3196
|
+
])
|
|
3197
|
+
).nullish(),
|
|
3153
3198
|
textVerbosity: z13.enum(["low", "medium", "high"]).nullish(),
|
|
3154
3199
|
promptCacheKey: z13.string().nullish(),
|
|
3155
|
-
safetyIdentifier: z13.string().nullish()
|
|
3200
|
+
safetyIdentifier: z13.string().nullish(),
|
|
3201
|
+
/**
|
|
3202
|
+
* Return the log probabilities of the tokens.
|
|
3203
|
+
*
|
|
3204
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
3205
|
+
* were generated.
|
|
3206
|
+
*
|
|
3207
|
+
* Setting to a number will return the log probabilities of the top n
|
|
3208
|
+
* tokens that were generated.
|
|
3209
|
+
*
|
|
3210
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
3211
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
3212
|
+
*/
|
|
3213
|
+
logprobs: z13.union([z13.boolean(), z13.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
|
|
3156
3214
|
});
|
|
3157
3215
|
|
|
3158
3216
|
// src/speech/openai-speech-model.ts
|
|
@@ -3393,6 +3451,8 @@ var OpenAITranscriptionModel = class {
|
|
|
3393
3451
|
include: openAIOptions.include,
|
|
3394
3452
|
language: openAIOptions.language,
|
|
3395
3453
|
prompt: openAIOptions.prompt,
|
|
3454
|
+
response_format: "verbose_json",
|
|
3455
|
+
// always use verbose_json to get segments
|
|
3396
3456
|
temperature: openAIOptions.temperature,
|
|
3397
3457
|
timestamp_granularities: openAIOptions.timestampGranularities
|
|
3398
3458
|
};
|
|
@@ -3408,7 +3468,7 @@ var OpenAITranscriptionModel = class {
|
|
|
3408
3468
|
};
|
|
3409
3469
|
}
|
|
3410
3470
|
async doGenerate(options) {
|
|
3411
|
-
var _a, _b, _c, _d, _e, _f;
|
|
3471
|
+
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
3412
3472
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
3413
3473
|
const { formData, warnings } = await this.getArgs(options);
|
|
3414
3474
|
const {
|
|
@@ -3432,13 +3492,17 @@ var OpenAITranscriptionModel = class {
|
|
|
3432
3492
|
const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
|
|
3433
3493
|
return {
|
|
3434
3494
|
text: response.text,
|
|
3435
|
-
segments: (
|
|
3495
|
+
segments: (_g = (_f = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
|
|
3496
|
+
text: segment.text,
|
|
3497
|
+
startSecond: segment.start,
|
|
3498
|
+
endSecond: segment.end
|
|
3499
|
+
}))) != null ? _f : (_e = response.words) == null ? void 0 : _e.map((word) => ({
|
|
3436
3500
|
text: word.word,
|
|
3437
3501
|
startSecond: word.start,
|
|
3438
3502
|
endSecond: word.end
|
|
3439
|
-
}))) != null ?
|
|
3503
|
+
}))) != null ? _g : [],
|
|
3440
3504
|
language,
|
|
3441
|
-
durationInSeconds: (
|
|
3505
|
+
durationInSeconds: (_h = response.duration) != null ? _h : void 0,
|
|
3442
3506
|
warnings,
|
|
3443
3507
|
response: {
|
|
3444
3508
|
timestamp: currentDate,
|
|
@@ -3459,6 +3523,20 @@ var openaiTranscriptionResponseSchema = z16.object({
|
|
|
3459
3523
|
start: z16.number(),
|
|
3460
3524
|
end: z16.number()
|
|
3461
3525
|
})
|
|
3526
|
+
).nullish(),
|
|
3527
|
+
segments: z16.array(
|
|
3528
|
+
z16.object({
|
|
3529
|
+
id: z16.number(),
|
|
3530
|
+
seek: z16.number(),
|
|
3531
|
+
start: z16.number(),
|
|
3532
|
+
end: z16.number(),
|
|
3533
|
+
text: z16.string(),
|
|
3534
|
+
tokens: z16.array(z16.number()),
|
|
3535
|
+
temperature: z16.number(),
|
|
3536
|
+
avg_logprob: z16.number(),
|
|
3537
|
+
compression_ratio: z16.number(),
|
|
3538
|
+
no_speech_prob: z16.number()
|
|
3539
|
+
})
|
|
3462
3540
|
).nullish()
|
|
3463
3541
|
});
|
|
3464
3542
|
|