@ai-sdk/openai 2.0.12 → 2.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +8 -0
- package/dist/index.d.mts +2 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +70 -19
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +70 -19
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -0
- package/dist/internal/index.d.ts +2 -0
- package/dist/internal/index.js +70 -19
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +70 -19
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
|
@@ -2150,6 +2150,19 @@ function prepareResponsesTools({
|
|
|
2150
2150
|
}
|
|
2151
2151
|
|
|
2152
2152
|
// src/responses/openai-responses-language-model.ts
|
|
2153
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2154
|
+
var LOGPROBS_SCHEMA = z13.array(
|
|
2155
|
+
z13.object({
|
|
2156
|
+
token: z13.string(),
|
|
2157
|
+
logprob: z13.number(),
|
|
2158
|
+
top_logprobs: z13.array(
|
|
2159
|
+
z13.object({
|
|
2160
|
+
token: z13.string(),
|
|
2161
|
+
logprob: z13.number()
|
|
2162
|
+
})
|
|
2163
|
+
)
|
|
2164
|
+
})
|
|
2165
|
+
);
|
|
2153
2166
|
var OpenAIResponsesLanguageModel = class {
|
|
2154
2167
|
constructor(modelId, config) {
|
|
2155
2168
|
this.specificationVersion = "v2";
|
|
@@ -2213,6 +2226,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2213
2226
|
schema: openaiResponsesProviderOptionsSchema
|
|
2214
2227
|
});
|
|
2215
2228
|
const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
|
|
2229
|
+
const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
|
|
2230
|
+
const openaiOptionsInclude = topLogprobs ? Array.isArray(openaiOptions == null ? void 0 : openaiOptions.include) ? [...openaiOptions == null ? void 0 : openaiOptions.include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : openaiOptions == null ? void 0 : openaiOptions.include;
|
|
2216
2231
|
const baseArgs = {
|
|
2217
2232
|
model: this.modelId,
|
|
2218
2233
|
input: messages,
|
|
@@ -2243,9 +2258,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2243
2258
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2244
2259
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2245
2260
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2246
|
-
include:
|
|
2261
|
+
include: openaiOptionsInclude,
|
|
2247
2262
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
2248
2263
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
2264
|
+
top_logprobs: topLogprobs,
|
|
2249
2265
|
// model-specific settings:
|
|
2250
2266
|
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2251
2267
|
reasoning: {
|
|
@@ -2329,7 +2345,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2329
2345
|
};
|
|
2330
2346
|
}
|
|
2331
2347
|
async doGenerate(options) {
|
|
2332
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2348
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
2333
2349
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2334
2350
|
const url = this.config.url({
|
|
2335
2351
|
path: "/responses",
|
|
@@ -2363,6 +2379,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2363
2379
|
z13.object({
|
|
2364
2380
|
type: z13.literal("output_text"),
|
|
2365
2381
|
text: z13.string(),
|
|
2382
|
+
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2366
2383
|
annotations: z13.array(
|
|
2367
2384
|
z13.object({
|
|
2368
2385
|
type: z13.literal("url_citation"),
|
|
@@ -2440,6 +2457,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2440
2457
|
});
|
|
2441
2458
|
}
|
|
2442
2459
|
const content = [];
|
|
2460
|
+
const logprobs = [];
|
|
2443
2461
|
for (const part of response.output) {
|
|
2444
2462
|
switch (part.type) {
|
|
2445
2463
|
case "reasoning": {
|
|
@@ -2462,6 +2480,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2462
2480
|
}
|
|
2463
2481
|
case "message": {
|
|
2464
2482
|
for (const contentPart of part.content) {
|
|
2483
|
+
if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
|
|
2484
|
+
logprobs.push(contentPart.logprobs);
|
|
2485
|
+
}
|
|
2465
2486
|
content.push({
|
|
2466
2487
|
type: "text",
|
|
2467
2488
|
text: contentPart.text,
|
|
@@ -2475,7 +2496,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2475
2496
|
content.push({
|
|
2476
2497
|
type: "source",
|
|
2477
2498
|
sourceType: "url",
|
|
2478
|
-
id: (
|
|
2499
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : generateId2(),
|
|
2479
2500
|
url: annotation.url,
|
|
2480
2501
|
title: annotation.title
|
|
2481
2502
|
});
|
|
@@ -2558,18 +2579,24 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2558
2579
|
}
|
|
2559
2580
|
}
|
|
2560
2581
|
}
|
|
2582
|
+
const providerMetadata = {
|
|
2583
|
+
openai: { responseId: response.id }
|
|
2584
|
+
};
|
|
2585
|
+
if (logprobs.length > 0) {
|
|
2586
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
2587
|
+
}
|
|
2561
2588
|
return {
|
|
2562
2589
|
content,
|
|
2563
2590
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2564
|
-
finishReason: (
|
|
2591
|
+
finishReason: (_g = response.incomplete_details) == null ? void 0 : _g.reason,
|
|
2565
2592
|
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2566
2593
|
}),
|
|
2567
2594
|
usage: {
|
|
2568
2595
|
inputTokens: response.usage.input_tokens,
|
|
2569
2596
|
outputTokens: response.usage.output_tokens,
|
|
2570
2597
|
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2571
|
-
reasoningTokens: (
|
|
2572
|
-
cachedInputTokens: (
|
|
2598
|
+
reasoningTokens: (_i = (_h = response.usage.output_tokens_details) == null ? void 0 : _h.reasoning_tokens) != null ? _i : void 0,
|
|
2599
|
+
cachedInputTokens: (_k = (_j = response.usage.input_tokens_details) == null ? void 0 : _j.cached_tokens) != null ? _k : void 0
|
|
2573
2600
|
},
|
|
2574
2601
|
request: { body },
|
|
2575
2602
|
response: {
|
|
@@ -2579,11 +2606,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2579
2606
|
headers: responseHeaders,
|
|
2580
2607
|
body: rawResponse
|
|
2581
2608
|
},
|
|
2582
|
-
providerMetadata
|
|
2583
|
-
openai: {
|
|
2584
|
-
responseId: response.id
|
|
2585
|
-
}
|
|
2586
|
-
},
|
|
2609
|
+
providerMetadata,
|
|
2587
2610
|
warnings
|
|
2588
2611
|
};
|
|
2589
2612
|
}
|
|
@@ -2613,6 +2636,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2613
2636
|
outputTokens: void 0,
|
|
2614
2637
|
totalTokens: void 0
|
|
2615
2638
|
};
|
|
2639
|
+
const logprobs = [];
|
|
2616
2640
|
let responseId = null;
|
|
2617
2641
|
const ongoingToolCalls = {};
|
|
2618
2642
|
let hasToolCalls = false;
|
|
@@ -2838,6 +2862,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2838
2862
|
id: value.item_id,
|
|
2839
2863
|
delta: value.delta
|
|
2840
2864
|
});
|
|
2865
|
+
if (value.logprobs) {
|
|
2866
|
+
logprobs.push(value.logprobs);
|
|
2867
|
+
}
|
|
2841
2868
|
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
|
2842
2869
|
if (value.summary_index > 0) {
|
|
2843
2870
|
(_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
|
|
@@ -2888,15 +2915,19 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2888
2915
|
}
|
|
2889
2916
|
},
|
|
2890
2917
|
flush(controller) {
|
|
2918
|
+
const providerMetadata = {
|
|
2919
|
+
openai: {
|
|
2920
|
+
responseId
|
|
2921
|
+
}
|
|
2922
|
+
};
|
|
2923
|
+
if (logprobs.length > 0) {
|
|
2924
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
2925
|
+
}
|
|
2891
2926
|
controller.enqueue({
|
|
2892
2927
|
type: "finish",
|
|
2893
2928
|
finishReason,
|
|
2894
2929
|
usage,
|
|
2895
|
-
providerMetadata
|
|
2896
|
-
openai: {
|
|
2897
|
-
responseId
|
|
2898
|
-
}
|
|
2899
|
-
}
|
|
2930
|
+
providerMetadata
|
|
2900
2931
|
});
|
|
2901
2932
|
}
|
|
2902
2933
|
})
|
|
@@ -2915,7 +2946,8 @@ var usageSchema2 = z13.object({
|
|
|
2915
2946
|
var textDeltaChunkSchema = z13.object({
|
|
2916
2947
|
type: z13.literal("response.output_text.delta"),
|
|
2917
2948
|
item_id: z13.string(),
|
|
2918
|
-
delta: z13.string()
|
|
2949
|
+
delta: z13.string(),
|
|
2950
|
+
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
2919
2951
|
});
|
|
2920
2952
|
var errorChunkSchema = z13.object({
|
|
2921
2953
|
type: z13.literal("error"),
|
|
@@ -3156,10 +3188,29 @@ var openaiResponsesProviderOptionsSchema = z13.object({
|
|
|
3156
3188
|
instructions: z13.string().nullish(),
|
|
3157
3189
|
reasoningSummary: z13.string().nullish(),
|
|
3158
3190
|
serviceTier: z13.enum(["auto", "flex", "priority"]).nullish(),
|
|
3159
|
-
include: z13.array(
|
|
3191
|
+
include: z13.array(
|
|
3192
|
+
z13.enum([
|
|
3193
|
+
"reasoning.encrypted_content",
|
|
3194
|
+
"file_search_call.results",
|
|
3195
|
+
"message.output_text.logprobs"
|
|
3196
|
+
])
|
|
3197
|
+
).nullish(),
|
|
3160
3198
|
textVerbosity: z13.enum(["low", "medium", "high"]).nullish(),
|
|
3161
3199
|
promptCacheKey: z13.string().nullish(),
|
|
3162
|
-
safetyIdentifier: z13.string().nullish()
|
|
3200
|
+
safetyIdentifier: z13.string().nullish(),
|
|
3201
|
+
/**
|
|
3202
|
+
* Return the log probabilities of the tokens.
|
|
3203
|
+
*
|
|
3204
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
3205
|
+
* were generated.
|
|
3206
|
+
*
|
|
3207
|
+
* Setting to a number will return the log probabilities of the top n
|
|
3208
|
+
* tokens that were generated.
|
|
3209
|
+
*
|
|
3210
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
3211
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
3212
|
+
*/
|
|
3213
|
+
logprobs: z13.union([z13.boolean(), z13.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
|
|
3163
3214
|
});
|
|
3164
3215
|
|
|
3165
3216
|
// src/speech/openai-speech-model.ts
|