@ai-sdk/openai 2.0.12 → 2.0.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.d.mts +2 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +134 -45
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +134 -45
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -0
- package/dist/internal/index.d.ts +2 -0
- package/dist/internal/index.js +134 -45
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +134 -45
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.mjs
CHANGED
|
@@ -2466,6 +2466,19 @@ function prepareResponsesTools({
|
|
|
2466
2466
|
}
|
|
2467
2467
|
|
|
2468
2468
|
// src/responses/openai-responses-language-model.ts
|
|
2469
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2470
|
+
var LOGPROBS_SCHEMA = z16.array(
|
|
2471
|
+
z16.object({
|
|
2472
|
+
token: z16.string(),
|
|
2473
|
+
logprob: z16.number(),
|
|
2474
|
+
top_logprobs: z16.array(
|
|
2475
|
+
z16.object({
|
|
2476
|
+
token: z16.string(),
|
|
2477
|
+
logprob: z16.number()
|
|
2478
|
+
})
|
|
2479
|
+
)
|
|
2480
|
+
})
|
|
2481
|
+
);
|
|
2469
2482
|
var OpenAIResponsesLanguageModel = class {
|
|
2470
2483
|
constructor(modelId, config) {
|
|
2471
2484
|
this.specificationVersion = "v2";
|
|
@@ -2529,6 +2542,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2529
2542
|
schema: openaiResponsesProviderOptionsSchema
|
|
2530
2543
|
});
|
|
2531
2544
|
const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
|
|
2545
|
+
const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
|
|
2546
|
+
const openaiOptionsInclude = topLogprobs ? Array.isArray(openaiOptions == null ? void 0 : openaiOptions.include) ? [...openaiOptions == null ? void 0 : openaiOptions.include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : openaiOptions == null ? void 0 : openaiOptions.include;
|
|
2532
2547
|
const baseArgs = {
|
|
2533
2548
|
model: this.modelId,
|
|
2534
2549
|
input: messages,
|
|
@@ -2559,9 +2574,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2559
2574
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2560
2575
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2561
2576
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2562
|
-
include:
|
|
2577
|
+
include: openaiOptionsInclude,
|
|
2563
2578
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
2564
2579
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
2580
|
+
top_logprobs: topLogprobs,
|
|
2565
2581
|
// model-specific settings:
|
|
2566
2582
|
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2567
2583
|
reasoning: {
|
|
@@ -2645,7 +2661,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2645
2661
|
};
|
|
2646
2662
|
}
|
|
2647
2663
|
async doGenerate(options) {
|
|
2648
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2664
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
|
|
2649
2665
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2650
2666
|
const url = this.config.url({
|
|
2651
2667
|
path: "/responses",
|
|
@@ -2679,14 +2695,24 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2679
2695
|
z16.object({
|
|
2680
2696
|
type: z16.literal("output_text"),
|
|
2681
2697
|
text: z16.string(),
|
|
2698
|
+
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2682
2699
|
annotations: z16.array(
|
|
2683
|
-
z16.
|
|
2684
|
-
|
|
2685
|
-
|
|
2686
|
-
|
|
2687
|
-
|
|
2688
|
-
|
|
2689
|
-
|
|
2700
|
+
z16.discriminatedUnion("type", [
|
|
2701
|
+
z16.object({
|
|
2702
|
+
type: z16.literal("url_citation"),
|
|
2703
|
+
start_index: z16.number(),
|
|
2704
|
+
end_index: z16.number(),
|
|
2705
|
+
url: z16.string(),
|
|
2706
|
+
title: z16.string()
|
|
2707
|
+
}),
|
|
2708
|
+
z16.object({
|
|
2709
|
+
type: z16.literal("file_citation"),
|
|
2710
|
+
start_index: z16.number(),
|
|
2711
|
+
end_index: z16.number(),
|
|
2712
|
+
file_id: z16.string(),
|
|
2713
|
+
quote: z16.string()
|
|
2714
|
+
})
|
|
2715
|
+
])
|
|
2690
2716
|
)
|
|
2691
2717
|
})
|
|
2692
2718
|
)
|
|
@@ -2756,6 +2782,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2756
2782
|
});
|
|
2757
2783
|
}
|
|
2758
2784
|
const content = [];
|
|
2785
|
+
const logprobs = [];
|
|
2759
2786
|
for (const part of response.output) {
|
|
2760
2787
|
switch (part.type) {
|
|
2761
2788
|
case "reasoning": {
|
|
@@ -2778,6 +2805,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2778
2805
|
}
|
|
2779
2806
|
case "message": {
|
|
2780
2807
|
for (const contentPart of part.content) {
|
|
2808
|
+
if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
|
|
2809
|
+
logprobs.push(contentPart.logprobs);
|
|
2810
|
+
}
|
|
2781
2811
|
content.push({
|
|
2782
2812
|
type: "text",
|
|
2783
2813
|
text: contentPart.text,
|
|
@@ -2788,13 +2818,24 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2788
2818
|
}
|
|
2789
2819
|
});
|
|
2790
2820
|
for (const annotation of contentPart.annotations) {
|
|
2791
|
-
|
|
2792
|
-
|
|
2793
|
-
|
|
2794
|
-
|
|
2795
|
-
|
|
2796
|
-
|
|
2797
|
-
|
|
2821
|
+
if (annotation.type === "url_citation") {
|
|
2822
|
+
content.push({
|
|
2823
|
+
type: "source",
|
|
2824
|
+
sourceType: "url",
|
|
2825
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : generateId2(),
|
|
2826
|
+
url: annotation.url,
|
|
2827
|
+
title: annotation.title
|
|
2828
|
+
});
|
|
2829
|
+
} else if (annotation.type === "file_citation") {
|
|
2830
|
+
content.push({
|
|
2831
|
+
type: "source",
|
|
2832
|
+
sourceType: "document",
|
|
2833
|
+
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : generateId2(),
|
|
2834
|
+
mediaType: "text/plain",
|
|
2835
|
+
title: annotation.quote,
|
|
2836
|
+
filename: annotation.file_id
|
|
2837
|
+
});
|
|
2838
|
+
}
|
|
2798
2839
|
}
|
|
2799
2840
|
}
|
|
2800
2841
|
break;
|
|
@@ -2874,18 +2915,24 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2874
2915
|
}
|
|
2875
2916
|
}
|
|
2876
2917
|
}
|
|
2918
|
+
const providerMetadata = {
|
|
2919
|
+
openai: { responseId: response.id }
|
|
2920
|
+
};
|
|
2921
|
+
if (logprobs.length > 0) {
|
|
2922
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
2923
|
+
}
|
|
2877
2924
|
return {
|
|
2878
2925
|
content,
|
|
2879
2926
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2880
|
-
finishReason: (
|
|
2927
|
+
finishReason: (_j = response.incomplete_details) == null ? void 0 : _j.reason,
|
|
2881
2928
|
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2882
2929
|
}),
|
|
2883
2930
|
usage: {
|
|
2884
2931
|
inputTokens: response.usage.input_tokens,
|
|
2885
2932
|
outputTokens: response.usage.output_tokens,
|
|
2886
2933
|
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2887
|
-
reasoningTokens: (
|
|
2888
|
-
cachedInputTokens: (
|
|
2934
|
+
reasoningTokens: (_l = (_k = response.usage.output_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : void 0,
|
|
2935
|
+
cachedInputTokens: (_n = (_m = response.usage.input_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0
|
|
2889
2936
|
},
|
|
2890
2937
|
request: { body },
|
|
2891
2938
|
response: {
|
|
@@ -2895,11 +2942,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2895
2942
|
headers: responseHeaders,
|
|
2896
2943
|
body: rawResponse
|
|
2897
2944
|
},
|
|
2898
|
-
providerMetadata
|
|
2899
|
-
openai: {
|
|
2900
|
-
responseId: response.id
|
|
2901
|
-
}
|
|
2902
|
-
},
|
|
2945
|
+
providerMetadata,
|
|
2903
2946
|
warnings
|
|
2904
2947
|
};
|
|
2905
2948
|
}
|
|
@@ -2929,6 +2972,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2929
2972
|
outputTokens: void 0,
|
|
2930
2973
|
totalTokens: void 0
|
|
2931
2974
|
};
|
|
2975
|
+
const logprobs = [];
|
|
2932
2976
|
let responseId = null;
|
|
2933
2977
|
const ongoingToolCalls = {};
|
|
2934
2978
|
let hasToolCalls = false;
|
|
@@ -2940,7 +2984,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2940
2984
|
controller.enqueue({ type: "stream-start", warnings });
|
|
2941
2985
|
},
|
|
2942
2986
|
transform(chunk, controller) {
|
|
2943
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
2987
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
|
|
2944
2988
|
if (options.includeRawChunks) {
|
|
2945
2989
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
2946
2990
|
}
|
|
@@ -3154,6 +3198,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3154
3198
|
id: value.item_id,
|
|
3155
3199
|
delta: value.delta
|
|
3156
3200
|
});
|
|
3201
|
+
if (value.logprobs) {
|
|
3202
|
+
logprobs.push(value.logprobs);
|
|
3203
|
+
}
|
|
3157
3204
|
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
|
3158
3205
|
if (value.summary_index > 0) {
|
|
3159
3206
|
(_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
|
|
@@ -3192,27 +3239,42 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3192
3239
|
usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
|
|
3193
3240
|
usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
|
|
3194
3241
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
3195
|
-
|
|
3196
|
-
|
|
3197
|
-
|
|
3198
|
-
|
|
3199
|
-
|
|
3200
|
-
|
|
3201
|
-
|
|
3242
|
+
if (value.annotation.type === "url_citation") {
|
|
3243
|
+
controller.enqueue({
|
|
3244
|
+
type: "source",
|
|
3245
|
+
sourceType: "url",
|
|
3246
|
+
id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : generateId2(),
|
|
3247
|
+
url: value.annotation.url,
|
|
3248
|
+
title: value.annotation.title
|
|
3249
|
+
});
|
|
3250
|
+
} else if (value.annotation.type === "file_citation") {
|
|
3251
|
+
controller.enqueue({
|
|
3252
|
+
type: "source",
|
|
3253
|
+
sourceType: "document",
|
|
3254
|
+
id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : generateId2(),
|
|
3255
|
+
mediaType: "text/plain",
|
|
3256
|
+
title: value.annotation.quote,
|
|
3257
|
+
filename: value.annotation.file_id
|
|
3258
|
+
});
|
|
3259
|
+
}
|
|
3202
3260
|
} else if (isErrorChunk(value)) {
|
|
3203
3261
|
controller.enqueue({ type: "error", error: value });
|
|
3204
3262
|
}
|
|
3205
3263
|
},
|
|
3206
3264
|
flush(controller) {
|
|
3265
|
+
const providerMetadata = {
|
|
3266
|
+
openai: {
|
|
3267
|
+
responseId
|
|
3268
|
+
}
|
|
3269
|
+
};
|
|
3270
|
+
if (logprobs.length > 0) {
|
|
3271
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
3272
|
+
}
|
|
3207
3273
|
controller.enqueue({
|
|
3208
3274
|
type: "finish",
|
|
3209
3275
|
finishReason,
|
|
3210
3276
|
usage,
|
|
3211
|
-
providerMetadata
|
|
3212
|
-
openai: {
|
|
3213
|
-
responseId
|
|
3214
|
-
}
|
|
3215
|
-
}
|
|
3277
|
+
providerMetadata
|
|
3216
3278
|
});
|
|
3217
3279
|
}
|
|
3218
3280
|
})
|
|
@@ -3231,7 +3293,8 @@ var usageSchema2 = z16.object({
|
|
|
3231
3293
|
var textDeltaChunkSchema = z16.object({
|
|
3232
3294
|
type: z16.literal("response.output_text.delta"),
|
|
3233
3295
|
item_id: z16.string(),
|
|
3234
|
-
delta: z16.string()
|
|
3296
|
+
delta: z16.string(),
|
|
3297
|
+
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
3235
3298
|
});
|
|
3236
3299
|
var errorChunkSchema = z16.object({
|
|
3237
3300
|
type: z16.literal("error"),
|
|
@@ -3360,11 +3423,18 @@ var responseFunctionCallArgumentsDeltaSchema = z16.object({
|
|
|
3360
3423
|
});
|
|
3361
3424
|
var responseAnnotationAddedSchema = z16.object({
|
|
3362
3425
|
type: z16.literal("response.output_text.annotation.added"),
|
|
3363
|
-
annotation: z16.
|
|
3364
|
-
|
|
3365
|
-
|
|
3366
|
-
|
|
3367
|
-
|
|
3426
|
+
annotation: z16.discriminatedUnion("type", [
|
|
3427
|
+
z16.object({
|
|
3428
|
+
type: z16.literal("url_citation"),
|
|
3429
|
+
url: z16.string(),
|
|
3430
|
+
title: z16.string()
|
|
3431
|
+
}),
|
|
3432
|
+
z16.object({
|
|
3433
|
+
type: z16.literal("file_citation"),
|
|
3434
|
+
file_id: z16.string(),
|
|
3435
|
+
quote: z16.string()
|
|
3436
|
+
})
|
|
3437
|
+
])
|
|
3368
3438
|
});
|
|
3369
3439
|
var responseReasoningSummaryPartAddedSchema = z16.object({
|
|
3370
3440
|
type: z16.literal("response.reasoning_summary_part.added"),
|
|
@@ -3472,10 +3542,29 @@ var openaiResponsesProviderOptionsSchema = z16.object({
|
|
|
3472
3542
|
instructions: z16.string().nullish(),
|
|
3473
3543
|
reasoningSummary: z16.string().nullish(),
|
|
3474
3544
|
serviceTier: z16.enum(["auto", "flex", "priority"]).nullish(),
|
|
3475
|
-
include: z16.array(
|
|
3545
|
+
include: z16.array(
|
|
3546
|
+
z16.enum([
|
|
3547
|
+
"reasoning.encrypted_content",
|
|
3548
|
+
"file_search_call.results",
|
|
3549
|
+
"message.output_text.logprobs"
|
|
3550
|
+
])
|
|
3551
|
+
).nullish(),
|
|
3476
3552
|
textVerbosity: z16.enum(["low", "medium", "high"]).nullish(),
|
|
3477
3553
|
promptCacheKey: z16.string().nullish(),
|
|
3478
|
-
safetyIdentifier: z16.string().nullish()
|
|
3554
|
+
safetyIdentifier: z16.string().nullish(),
|
|
3555
|
+
/**
|
|
3556
|
+
* Return the log probabilities of the tokens.
|
|
3557
|
+
*
|
|
3558
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
3559
|
+
* were generated.
|
|
3560
|
+
*
|
|
3561
|
+
* Setting to a number will return the log probabilities of the top n
|
|
3562
|
+
* tokens that were generated.
|
|
3563
|
+
*
|
|
3564
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
3565
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
3566
|
+
*/
|
|
3567
|
+
logprobs: z16.union([z16.boolean(), z16.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
|
|
3479
3568
|
});
|
|
3480
3569
|
export {
|
|
3481
3570
|
OpenAIChatLanguageModel,
|