@ai-sdk/openai 2.0.12 → 2.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +8 -0
- package/dist/index.d.mts +2 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +70 -19
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +70 -19
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -0
- package/dist/internal/index.d.ts +2 -0
- package/dist/internal/index.js +70 -19
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +70 -19
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
|
@@ -233,6 +233,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
233
233
|
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
234
234
|
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
235
235
|
"file_search_call.results": "file_search_call.results";
|
|
236
|
+
"message.output_text.logprobs": "message.output_text.logprobs";
|
|
236
237
|
}>>>>;
|
|
237
238
|
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
238
239
|
low: "low";
|
|
@@ -241,6 +242,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
241
242
|
}>>>;
|
|
242
243
|
promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
243
244
|
safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
245
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
244
246
|
}, z.core.$strip>;
|
|
245
247
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
246
248
|
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -233,6 +233,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
233
233
|
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
234
234
|
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
235
235
|
"file_search_call.results": "file_search_call.results";
|
|
236
|
+
"message.output_text.logprobs": "message.output_text.logprobs";
|
|
236
237
|
}>>>>;
|
|
237
238
|
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
238
239
|
low: "low";
|
|
@@ -241,6 +242,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
241
242
|
}>>>;
|
|
242
243
|
promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
243
244
|
safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
245
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
244
246
|
}, z.core.$strip>;
|
|
245
247
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
246
248
|
|
package/dist/internal/index.js
CHANGED
|
@@ -2446,6 +2446,19 @@ function prepareResponsesTools({
|
|
|
2446
2446
|
}
|
|
2447
2447
|
|
|
2448
2448
|
// src/responses/openai-responses-language-model.ts
|
|
2449
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2450
|
+
var LOGPROBS_SCHEMA = import_v416.z.array(
|
|
2451
|
+
import_v416.z.object({
|
|
2452
|
+
token: import_v416.z.string(),
|
|
2453
|
+
logprob: import_v416.z.number(),
|
|
2454
|
+
top_logprobs: import_v416.z.array(
|
|
2455
|
+
import_v416.z.object({
|
|
2456
|
+
token: import_v416.z.string(),
|
|
2457
|
+
logprob: import_v416.z.number()
|
|
2458
|
+
})
|
|
2459
|
+
)
|
|
2460
|
+
})
|
|
2461
|
+
);
|
|
2449
2462
|
var OpenAIResponsesLanguageModel = class {
|
|
2450
2463
|
constructor(modelId, config) {
|
|
2451
2464
|
this.specificationVersion = "v2";
|
|
@@ -2509,6 +2522,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2509
2522
|
schema: openaiResponsesProviderOptionsSchema
|
|
2510
2523
|
});
|
|
2511
2524
|
const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
|
|
2525
|
+
const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
|
|
2526
|
+
const openaiOptionsInclude = topLogprobs ? Array.isArray(openaiOptions == null ? void 0 : openaiOptions.include) ? [...openaiOptions == null ? void 0 : openaiOptions.include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : openaiOptions == null ? void 0 : openaiOptions.include;
|
|
2512
2527
|
const baseArgs = {
|
|
2513
2528
|
model: this.modelId,
|
|
2514
2529
|
input: messages,
|
|
@@ -2539,9 +2554,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2539
2554
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2540
2555
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2541
2556
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2542
|
-
include:
|
|
2557
|
+
include: openaiOptionsInclude,
|
|
2543
2558
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
2544
2559
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
2560
|
+
top_logprobs: topLogprobs,
|
|
2545
2561
|
// model-specific settings:
|
|
2546
2562
|
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2547
2563
|
reasoning: {
|
|
@@ -2625,7 +2641,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2625
2641
|
};
|
|
2626
2642
|
}
|
|
2627
2643
|
async doGenerate(options) {
|
|
2628
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2644
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
2629
2645
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2630
2646
|
const url = this.config.url({
|
|
2631
2647
|
path: "/responses",
|
|
@@ -2659,6 +2675,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2659
2675
|
import_v416.z.object({
|
|
2660
2676
|
type: import_v416.z.literal("output_text"),
|
|
2661
2677
|
text: import_v416.z.string(),
|
|
2678
|
+
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2662
2679
|
annotations: import_v416.z.array(
|
|
2663
2680
|
import_v416.z.object({
|
|
2664
2681
|
type: import_v416.z.literal("url_citation"),
|
|
@@ -2736,6 +2753,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2736
2753
|
});
|
|
2737
2754
|
}
|
|
2738
2755
|
const content = [];
|
|
2756
|
+
const logprobs = [];
|
|
2739
2757
|
for (const part of response.output) {
|
|
2740
2758
|
switch (part.type) {
|
|
2741
2759
|
case "reasoning": {
|
|
@@ -2758,6 +2776,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2758
2776
|
}
|
|
2759
2777
|
case "message": {
|
|
2760
2778
|
for (const contentPart of part.content) {
|
|
2779
|
+
if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
|
|
2780
|
+
logprobs.push(contentPart.logprobs);
|
|
2781
|
+
}
|
|
2761
2782
|
content.push({
|
|
2762
2783
|
type: "text",
|
|
2763
2784
|
text: contentPart.text,
|
|
@@ -2771,7 +2792,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2771
2792
|
content.push({
|
|
2772
2793
|
type: "source",
|
|
2773
2794
|
sourceType: "url",
|
|
2774
|
-
id: (
|
|
2795
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0, import_provider_utils14.generateId)(),
|
|
2775
2796
|
url: annotation.url,
|
|
2776
2797
|
title: annotation.title
|
|
2777
2798
|
});
|
|
@@ -2854,18 +2875,24 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2854
2875
|
}
|
|
2855
2876
|
}
|
|
2856
2877
|
}
|
|
2878
|
+
const providerMetadata = {
|
|
2879
|
+
openai: { responseId: response.id }
|
|
2880
|
+
};
|
|
2881
|
+
if (logprobs.length > 0) {
|
|
2882
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
2883
|
+
}
|
|
2857
2884
|
return {
|
|
2858
2885
|
content,
|
|
2859
2886
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2860
|
-
finishReason: (
|
|
2887
|
+
finishReason: (_g = response.incomplete_details) == null ? void 0 : _g.reason,
|
|
2861
2888
|
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2862
2889
|
}),
|
|
2863
2890
|
usage: {
|
|
2864
2891
|
inputTokens: response.usage.input_tokens,
|
|
2865
2892
|
outputTokens: response.usage.output_tokens,
|
|
2866
2893
|
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2867
|
-
reasoningTokens: (
|
|
2868
|
-
cachedInputTokens: (
|
|
2894
|
+
reasoningTokens: (_i = (_h = response.usage.output_tokens_details) == null ? void 0 : _h.reasoning_tokens) != null ? _i : void 0,
|
|
2895
|
+
cachedInputTokens: (_k = (_j = response.usage.input_tokens_details) == null ? void 0 : _j.cached_tokens) != null ? _k : void 0
|
|
2869
2896
|
},
|
|
2870
2897
|
request: { body },
|
|
2871
2898
|
response: {
|
|
@@ -2875,11 +2902,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2875
2902
|
headers: responseHeaders,
|
|
2876
2903
|
body: rawResponse
|
|
2877
2904
|
},
|
|
2878
|
-
providerMetadata
|
|
2879
|
-
openai: {
|
|
2880
|
-
responseId: response.id
|
|
2881
|
-
}
|
|
2882
|
-
},
|
|
2905
|
+
providerMetadata,
|
|
2883
2906
|
warnings
|
|
2884
2907
|
};
|
|
2885
2908
|
}
|
|
@@ -2909,6 +2932,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2909
2932
|
outputTokens: void 0,
|
|
2910
2933
|
totalTokens: void 0
|
|
2911
2934
|
};
|
|
2935
|
+
const logprobs = [];
|
|
2912
2936
|
let responseId = null;
|
|
2913
2937
|
const ongoingToolCalls = {};
|
|
2914
2938
|
let hasToolCalls = false;
|
|
@@ -3134,6 +3158,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3134
3158
|
id: value.item_id,
|
|
3135
3159
|
delta: value.delta
|
|
3136
3160
|
});
|
|
3161
|
+
if (value.logprobs) {
|
|
3162
|
+
logprobs.push(value.logprobs);
|
|
3163
|
+
}
|
|
3137
3164
|
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
|
3138
3165
|
if (value.summary_index > 0) {
|
|
3139
3166
|
(_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
|
|
@@ -3184,15 +3211,19 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3184
3211
|
}
|
|
3185
3212
|
},
|
|
3186
3213
|
flush(controller) {
|
|
3214
|
+
const providerMetadata = {
|
|
3215
|
+
openai: {
|
|
3216
|
+
responseId
|
|
3217
|
+
}
|
|
3218
|
+
};
|
|
3219
|
+
if (logprobs.length > 0) {
|
|
3220
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
3221
|
+
}
|
|
3187
3222
|
controller.enqueue({
|
|
3188
3223
|
type: "finish",
|
|
3189
3224
|
finishReason,
|
|
3190
3225
|
usage,
|
|
3191
|
-
providerMetadata
|
|
3192
|
-
openai: {
|
|
3193
|
-
responseId
|
|
3194
|
-
}
|
|
3195
|
-
}
|
|
3226
|
+
providerMetadata
|
|
3196
3227
|
});
|
|
3197
3228
|
}
|
|
3198
3229
|
})
|
|
@@ -3211,7 +3242,8 @@ var usageSchema2 = import_v416.z.object({
|
|
|
3211
3242
|
var textDeltaChunkSchema = import_v416.z.object({
|
|
3212
3243
|
type: import_v416.z.literal("response.output_text.delta"),
|
|
3213
3244
|
item_id: import_v416.z.string(),
|
|
3214
|
-
delta: import_v416.z.string()
|
|
3245
|
+
delta: import_v416.z.string(),
|
|
3246
|
+
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
3215
3247
|
});
|
|
3216
3248
|
var errorChunkSchema = import_v416.z.object({
|
|
3217
3249
|
type: import_v416.z.literal("error"),
|
|
@@ -3452,10 +3484,29 @@ var openaiResponsesProviderOptionsSchema = import_v416.z.object({
|
|
|
3452
3484
|
instructions: import_v416.z.string().nullish(),
|
|
3453
3485
|
reasoningSummary: import_v416.z.string().nullish(),
|
|
3454
3486
|
serviceTier: import_v416.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
3455
|
-
include: import_v416.z.array(
|
|
3487
|
+
include: import_v416.z.array(
|
|
3488
|
+
import_v416.z.enum([
|
|
3489
|
+
"reasoning.encrypted_content",
|
|
3490
|
+
"file_search_call.results",
|
|
3491
|
+
"message.output_text.logprobs"
|
|
3492
|
+
])
|
|
3493
|
+
).nullish(),
|
|
3456
3494
|
textVerbosity: import_v416.z.enum(["low", "medium", "high"]).nullish(),
|
|
3457
3495
|
promptCacheKey: import_v416.z.string().nullish(),
|
|
3458
|
-
safetyIdentifier: import_v416.z.string().nullish()
|
|
3496
|
+
safetyIdentifier: import_v416.z.string().nullish(),
|
|
3497
|
+
/**
|
|
3498
|
+
* Return the log probabilities of the tokens.
|
|
3499
|
+
*
|
|
3500
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
3501
|
+
* were generated.
|
|
3502
|
+
*
|
|
3503
|
+
* Setting to a number will return the log probabilities of the top n
|
|
3504
|
+
* tokens that were generated.
|
|
3505
|
+
*
|
|
3506
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
3507
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
3508
|
+
*/
|
|
3509
|
+
logprobs: import_v416.z.union([import_v416.z.boolean(), import_v416.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
|
|
3459
3510
|
});
|
|
3460
3511
|
// Annotate the CommonJS export names for ESM import in node:
|
|
3461
3512
|
0 && (module.exports = {
|