@ai-sdk/openai 2.0.12 → 2.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +8 -0
- package/dist/index.d.mts +2 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +70 -19
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +70 -19
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -0
- package/dist/internal/index.d.ts +2 -0
- package/dist/internal/index.js +70 -19
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +70 -19
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.mjs
CHANGED
|
@@ -2466,6 +2466,19 @@ function prepareResponsesTools({
|
|
|
2466
2466
|
}
|
|
2467
2467
|
|
|
2468
2468
|
// src/responses/openai-responses-language-model.ts
|
|
2469
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2470
|
+
var LOGPROBS_SCHEMA = z16.array(
|
|
2471
|
+
z16.object({
|
|
2472
|
+
token: z16.string(),
|
|
2473
|
+
logprob: z16.number(),
|
|
2474
|
+
top_logprobs: z16.array(
|
|
2475
|
+
z16.object({
|
|
2476
|
+
token: z16.string(),
|
|
2477
|
+
logprob: z16.number()
|
|
2478
|
+
})
|
|
2479
|
+
)
|
|
2480
|
+
})
|
|
2481
|
+
);
|
|
2469
2482
|
var OpenAIResponsesLanguageModel = class {
|
|
2470
2483
|
constructor(modelId, config) {
|
|
2471
2484
|
this.specificationVersion = "v2";
|
|
@@ -2529,6 +2542,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2529
2542
|
schema: openaiResponsesProviderOptionsSchema
|
|
2530
2543
|
});
|
|
2531
2544
|
const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
|
|
2545
|
+
const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
|
|
2546
|
+
const openaiOptionsInclude = topLogprobs ? Array.isArray(openaiOptions == null ? void 0 : openaiOptions.include) ? [...openaiOptions == null ? void 0 : openaiOptions.include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : openaiOptions == null ? void 0 : openaiOptions.include;
|
|
2532
2547
|
const baseArgs = {
|
|
2533
2548
|
model: this.modelId,
|
|
2534
2549
|
input: messages,
|
|
@@ -2559,9 +2574,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2559
2574
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2560
2575
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2561
2576
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2562
|
-
include:
|
|
2577
|
+
include: openaiOptionsInclude,
|
|
2563
2578
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
2564
2579
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
2580
|
+
top_logprobs: topLogprobs,
|
|
2565
2581
|
// model-specific settings:
|
|
2566
2582
|
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2567
2583
|
reasoning: {
|
|
@@ -2645,7 +2661,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2645
2661
|
};
|
|
2646
2662
|
}
|
|
2647
2663
|
async doGenerate(options) {
|
|
2648
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2664
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
2649
2665
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2650
2666
|
const url = this.config.url({
|
|
2651
2667
|
path: "/responses",
|
|
@@ -2679,6 +2695,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2679
2695
|
z16.object({
|
|
2680
2696
|
type: z16.literal("output_text"),
|
|
2681
2697
|
text: z16.string(),
|
|
2698
|
+
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2682
2699
|
annotations: z16.array(
|
|
2683
2700
|
z16.object({
|
|
2684
2701
|
type: z16.literal("url_citation"),
|
|
@@ -2756,6 +2773,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2756
2773
|
});
|
|
2757
2774
|
}
|
|
2758
2775
|
const content = [];
|
|
2776
|
+
const logprobs = [];
|
|
2759
2777
|
for (const part of response.output) {
|
|
2760
2778
|
switch (part.type) {
|
|
2761
2779
|
case "reasoning": {
|
|
@@ -2778,6 +2796,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2778
2796
|
}
|
|
2779
2797
|
case "message": {
|
|
2780
2798
|
for (const contentPart of part.content) {
|
|
2799
|
+
if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
|
|
2800
|
+
logprobs.push(contentPart.logprobs);
|
|
2801
|
+
}
|
|
2781
2802
|
content.push({
|
|
2782
2803
|
type: "text",
|
|
2783
2804
|
text: contentPart.text,
|
|
@@ -2791,7 +2812,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2791
2812
|
content.push({
|
|
2792
2813
|
type: "source",
|
|
2793
2814
|
sourceType: "url",
|
|
2794
|
-
id: (
|
|
2815
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : generateId2(),
|
|
2795
2816
|
url: annotation.url,
|
|
2796
2817
|
title: annotation.title
|
|
2797
2818
|
});
|
|
@@ -2874,18 +2895,24 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2874
2895
|
}
|
|
2875
2896
|
}
|
|
2876
2897
|
}
|
|
2898
|
+
const providerMetadata = {
|
|
2899
|
+
openai: { responseId: response.id }
|
|
2900
|
+
};
|
|
2901
|
+
if (logprobs.length > 0) {
|
|
2902
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
2903
|
+
}
|
|
2877
2904
|
return {
|
|
2878
2905
|
content,
|
|
2879
2906
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2880
|
-
finishReason: (
|
|
2907
|
+
finishReason: (_g = response.incomplete_details) == null ? void 0 : _g.reason,
|
|
2881
2908
|
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2882
2909
|
}),
|
|
2883
2910
|
usage: {
|
|
2884
2911
|
inputTokens: response.usage.input_tokens,
|
|
2885
2912
|
outputTokens: response.usage.output_tokens,
|
|
2886
2913
|
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2887
|
-
reasoningTokens: (
|
|
2888
|
-
cachedInputTokens: (
|
|
2914
|
+
reasoningTokens: (_i = (_h = response.usage.output_tokens_details) == null ? void 0 : _h.reasoning_tokens) != null ? _i : void 0,
|
|
2915
|
+
cachedInputTokens: (_k = (_j = response.usage.input_tokens_details) == null ? void 0 : _j.cached_tokens) != null ? _k : void 0
|
|
2889
2916
|
},
|
|
2890
2917
|
request: { body },
|
|
2891
2918
|
response: {
|
|
@@ -2895,11 +2922,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2895
2922
|
headers: responseHeaders,
|
|
2896
2923
|
body: rawResponse
|
|
2897
2924
|
},
|
|
2898
|
-
providerMetadata
|
|
2899
|
-
openai: {
|
|
2900
|
-
responseId: response.id
|
|
2901
|
-
}
|
|
2902
|
-
},
|
|
2925
|
+
providerMetadata,
|
|
2903
2926
|
warnings
|
|
2904
2927
|
};
|
|
2905
2928
|
}
|
|
@@ -2929,6 +2952,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2929
2952
|
outputTokens: void 0,
|
|
2930
2953
|
totalTokens: void 0
|
|
2931
2954
|
};
|
|
2955
|
+
const logprobs = [];
|
|
2932
2956
|
let responseId = null;
|
|
2933
2957
|
const ongoingToolCalls = {};
|
|
2934
2958
|
let hasToolCalls = false;
|
|
@@ -3154,6 +3178,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3154
3178
|
id: value.item_id,
|
|
3155
3179
|
delta: value.delta
|
|
3156
3180
|
});
|
|
3181
|
+
if (value.logprobs) {
|
|
3182
|
+
logprobs.push(value.logprobs);
|
|
3183
|
+
}
|
|
3157
3184
|
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
|
3158
3185
|
if (value.summary_index > 0) {
|
|
3159
3186
|
(_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
|
|
@@ -3204,15 +3231,19 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3204
3231
|
}
|
|
3205
3232
|
},
|
|
3206
3233
|
flush(controller) {
|
|
3234
|
+
const providerMetadata = {
|
|
3235
|
+
openai: {
|
|
3236
|
+
responseId
|
|
3237
|
+
}
|
|
3238
|
+
};
|
|
3239
|
+
if (logprobs.length > 0) {
|
|
3240
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
3241
|
+
}
|
|
3207
3242
|
controller.enqueue({
|
|
3208
3243
|
type: "finish",
|
|
3209
3244
|
finishReason,
|
|
3210
3245
|
usage,
|
|
3211
|
-
providerMetadata
|
|
3212
|
-
openai: {
|
|
3213
|
-
responseId
|
|
3214
|
-
}
|
|
3215
|
-
}
|
|
3246
|
+
providerMetadata
|
|
3216
3247
|
});
|
|
3217
3248
|
}
|
|
3218
3249
|
})
|
|
@@ -3231,7 +3262,8 @@ var usageSchema2 = z16.object({
|
|
|
3231
3262
|
var textDeltaChunkSchema = z16.object({
|
|
3232
3263
|
type: z16.literal("response.output_text.delta"),
|
|
3233
3264
|
item_id: z16.string(),
|
|
3234
|
-
delta: z16.string()
|
|
3265
|
+
delta: z16.string(),
|
|
3266
|
+
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
3235
3267
|
});
|
|
3236
3268
|
var errorChunkSchema = z16.object({
|
|
3237
3269
|
type: z16.literal("error"),
|
|
@@ -3472,10 +3504,29 @@ var openaiResponsesProviderOptionsSchema = z16.object({
|
|
|
3472
3504
|
instructions: z16.string().nullish(),
|
|
3473
3505
|
reasoningSummary: z16.string().nullish(),
|
|
3474
3506
|
serviceTier: z16.enum(["auto", "flex", "priority"]).nullish(),
|
|
3475
|
-
include: z16.array(
|
|
3507
|
+
include: z16.array(
|
|
3508
|
+
z16.enum([
|
|
3509
|
+
"reasoning.encrypted_content",
|
|
3510
|
+
"file_search_call.results",
|
|
3511
|
+
"message.output_text.logprobs"
|
|
3512
|
+
])
|
|
3513
|
+
).nullish(),
|
|
3476
3514
|
textVerbosity: z16.enum(["low", "medium", "high"]).nullish(),
|
|
3477
3515
|
promptCacheKey: z16.string().nullish(),
|
|
3478
|
-
safetyIdentifier: z16.string().nullish()
|
|
3516
|
+
safetyIdentifier: z16.string().nullish(),
|
|
3517
|
+
/**
|
|
3518
|
+
* Return the log probabilities of the tokens.
|
|
3519
|
+
*
|
|
3520
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
3521
|
+
* were generated.
|
|
3522
|
+
*
|
|
3523
|
+
* Setting to a number will return the log probabilities of the top n
|
|
3524
|
+
* tokens that were generated.
|
|
3525
|
+
*
|
|
3526
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
3527
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
3528
|
+
*/
|
|
3529
|
+
logprobs: z16.union([z16.boolean(), z16.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
|
|
3479
3530
|
});
|
|
3480
3531
|
export {
|
|
3481
3532
|
OpenAIChatLanguageModel,
|