@ai-sdk/openai 2.0.12 → 2.0.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.d.mts +2 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +134 -45
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +134 -45
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -0
- package/dist/internal/index.d.ts +2 -0
- package/dist/internal/index.js +134 -45
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +134 -45
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
|
@@ -2150,6 +2150,19 @@ function prepareResponsesTools({
|
|
|
2150
2150
|
}
|
|
2151
2151
|
|
|
2152
2152
|
// src/responses/openai-responses-language-model.ts
|
|
2153
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2154
|
+
var LOGPROBS_SCHEMA = z13.array(
|
|
2155
|
+
z13.object({
|
|
2156
|
+
token: z13.string(),
|
|
2157
|
+
logprob: z13.number(),
|
|
2158
|
+
top_logprobs: z13.array(
|
|
2159
|
+
z13.object({
|
|
2160
|
+
token: z13.string(),
|
|
2161
|
+
logprob: z13.number()
|
|
2162
|
+
})
|
|
2163
|
+
)
|
|
2164
|
+
})
|
|
2165
|
+
);
|
|
2153
2166
|
var OpenAIResponsesLanguageModel = class {
|
|
2154
2167
|
constructor(modelId, config) {
|
|
2155
2168
|
this.specificationVersion = "v2";
|
|
@@ -2213,6 +2226,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2213
2226
|
schema: openaiResponsesProviderOptionsSchema
|
|
2214
2227
|
});
|
|
2215
2228
|
const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
|
|
2229
|
+
const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
|
|
2230
|
+
const openaiOptionsInclude = topLogprobs ? Array.isArray(openaiOptions == null ? void 0 : openaiOptions.include) ? [...openaiOptions == null ? void 0 : openaiOptions.include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : openaiOptions == null ? void 0 : openaiOptions.include;
|
|
2216
2231
|
const baseArgs = {
|
|
2217
2232
|
model: this.modelId,
|
|
2218
2233
|
input: messages,
|
|
@@ -2243,9 +2258,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2243
2258
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2244
2259
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2245
2260
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2246
|
-
include:
|
|
2261
|
+
include: openaiOptionsInclude,
|
|
2247
2262
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
2248
2263
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
2264
|
+
top_logprobs: topLogprobs,
|
|
2249
2265
|
// model-specific settings:
|
|
2250
2266
|
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2251
2267
|
reasoning: {
|
|
@@ -2329,7 +2345,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2329
2345
|
};
|
|
2330
2346
|
}
|
|
2331
2347
|
async doGenerate(options) {
|
|
2332
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2348
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
|
|
2333
2349
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2334
2350
|
const url = this.config.url({
|
|
2335
2351
|
path: "/responses",
|
|
@@ -2363,14 +2379,24 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2363
2379
|
z13.object({
|
|
2364
2380
|
type: z13.literal("output_text"),
|
|
2365
2381
|
text: z13.string(),
|
|
2382
|
+
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2366
2383
|
annotations: z13.array(
|
|
2367
|
-
z13.
|
|
2368
|
-
|
|
2369
|
-
|
|
2370
|
-
|
|
2371
|
-
|
|
2372
|
-
|
|
2373
|
-
|
|
2384
|
+
z13.discriminatedUnion("type", [
|
|
2385
|
+
z13.object({
|
|
2386
|
+
type: z13.literal("url_citation"),
|
|
2387
|
+
start_index: z13.number(),
|
|
2388
|
+
end_index: z13.number(),
|
|
2389
|
+
url: z13.string(),
|
|
2390
|
+
title: z13.string()
|
|
2391
|
+
}),
|
|
2392
|
+
z13.object({
|
|
2393
|
+
type: z13.literal("file_citation"),
|
|
2394
|
+
start_index: z13.number(),
|
|
2395
|
+
end_index: z13.number(),
|
|
2396
|
+
file_id: z13.string(),
|
|
2397
|
+
quote: z13.string()
|
|
2398
|
+
})
|
|
2399
|
+
])
|
|
2374
2400
|
)
|
|
2375
2401
|
})
|
|
2376
2402
|
)
|
|
@@ -2440,6 +2466,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2440
2466
|
});
|
|
2441
2467
|
}
|
|
2442
2468
|
const content = [];
|
|
2469
|
+
const logprobs = [];
|
|
2443
2470
|
for (const part of response.output) {
|
|
2444
2471
|
switch (part.type) {
|
|
2445
2472
|
case "reasoning": {
|
|
@@ -2462,6 +2489,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2462
2489
|
}
|
|
2463
2490
|
case "message": {
|
|
2464
2491
|
for (const contentPart of part.content) {
|
|
2492
|
+
if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
|
|
2493
|
+
logprobs.push(contentPart.logprobs);
|
|
2494
|
+
}
|
|
2465
2495
|
content.push({
|
|
2466
2496
|
type: "text",
|
|
2467
2497
|
text: contentPart.text,
|
|
@@ -2472,13 +2502,24 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2472
2502
|
}
|
|
2473
2503
|
});
|
|
2474
2504
|
for (const annotation of contentPart.annotations) {
|
|
2475
|
-
|
|
2476
|
-
|
|
2477
|
-
|
|
2478
|
-
|
|
2479
|
-
|
|
2480
|
-
|
|
2481
|
-
|
|
2505
|
+
if (annotation.type === "url_citation") {
|
|
2506
|
+
content.push({
|
|
2507
|
+
type: "source",
|
|
2508
|
+
sourceType: "url",
|
|
2509
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : generateId2(),
|
|
2510
|
+
url: annotation.url,
|
|
2511
|
+
title: annotation.title
|
|
2512
|
+
});
|
|
2513
|
+
} else if (annotation.type === "file_citation") {
|
|
2514
|
+
content.push({
|
|
2515
|
+
type: "source",
|
|
2516
|
+
sourceType: "document",
|
|
2517
|
+
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : generateId2(),
|
|
2518
|
+
mediaType: "text/plain",
|
|
2519
|
+
title: annotation.quote,
|
|
2520
|
+
filename: annotation.file_id
|
|
2521
|
+
});
|
|
2522
|
+
}
|
|
2482
2523
|
}
|
|
2483
2524
|
}
|
|
2484
2525
|
break;
|
|
@@ -2558,18 +2599,24 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2558
2599
|
}
|
|
2559
2600
|
}
|
|
2560
2601
|
}
|
|
2602
|
+
const providerMetadata = {
|
|
2603
|
+
openai: { responseId: response.id }
|
|
2604
|
+
};
|
|
2605
|
+
if (logprobs.length > 0) {
|
|
2606
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
2607
|
+
}
|
|
2561
2608
|
return {
|
|
2562
2609
|
content,
|
|
2563
2610
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2564
|
-
finishReason: (
|
|
2611
|
+
finishReason: (_j = response.incomplete_details) == null ? void 0 : _j.reason,
|
|
2565
2612
|
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2566
2613
|
}),
|
|
2567
2614
|
usage: {
|
|
2568
2615
|
inputTokens: response.usage.input_tokens,
|
|
2569
2616
|
outputTokens: response.usage.output_tokens,
|
|
2570
2617
|
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2571
|
-
reasoningTokens: (
|
|
2572
|
-
cachedInputTokens: (
|
|
2618
|
+
reasoningTokens: (_l = (_k = response.usage.output_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : void 0,
|
|
2619
|
+
cachedInputTokens: (_n = (_m = response.usage.input_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0
|
|
2573
2620
|
},
|
|
2574
2621
|
request: { body },
|
|
2575
2622
|
response: {
|
|
@@ -2579,11 +2626,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2579
2626
|
headers: responseHeaders,
|
|
2580
2627
|
body: rawResponse
|
|
2581
2628
|
},
|
|
2582
|
-
providerMetadata
|
|
2583
|
-
openai: {
|
|
2584
|
-
responseId: response.id
|
|
2585
|
-
}
|
|
2586
|
-
},
|
|
2629
|
+
providerMetadata,
|
|
2587
2630
|
warnings
|
|
2588
2631
|
};
|
|
2589
2632
|
}
|
|
@@ -2613,6 +2656,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2613
2656
|
outputTokens: void 0,
|
|
2614
2657
|
totalTokens: void 0
|
|
2615
2658
|
};
|
|
2659
|
+
const logprobs = [];
|
|
2616
2660
|
let responseId = null;
|
|
2617
2661
|
const ongoingToolCalls = {};
|
|
2618
2662
|
let hasToolCalls = false;
|
|
@@ -2624,7 +2668,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2624
2668
|
controller.enqueue({ type: "stream-start", warnings });
|
|
2625
2669
|
},
|
|
2626
2670
|
transform(chunk, controller) {
|
|
2627
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
2671
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
|
|
2628
2672
|
if (options.includeRawChunks) {
|
|
2629
2673
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
2630
2674
|
}
|
|
@@ -2838,6 +2882,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2838
2882
|
id: value.item_id,
|
|
2839
2883
|
delta: value.delta
|
|
2840
2884
|
});
|
|
2885
|
+
if (value.logprobs) {
|
|
2886
|
+
logprobs.push(value.logprobs);
|
|
2887
|
+
}
|
|
2841
2888
|
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
|
2842
2889
|
if (value.summary_index > 0) {
|
|
2843
2890
|
(_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
|
|
@@ -2876,27 +2923,42 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2876
2923
|
usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
|
|
2877
2924
|
usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
|
|
2878
2925
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2879
|
-
|
|
2880
|
-
|
|
2881
|
-
|
|
2882
|
-
|
|
2883
|
-
|
|
2884
|
-
|
|
2885
|
-
|
|
2926
|
+
if (value.annotation.type === "url_citation") {
|
|
2927
|
+
controller.enqueue({
|
|
2928
|
+
type: "source",
|
|
2929
|
+
sourceType: "url",
|
|
2930
|
+
id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : generateId2(),
|
|
2931
|
+
url: value.annotation.url,
|
|
2932
|
+
title: value.annotation.title
|
|
2933
|
+
});
|
|
2934
|
+
} else if (value.annotation.type === "file_citation") {
|
|
2935
|
+
controller.enqueue({
|
|
2936
|
+
type: "source",
|
|
2937
|
+
sourceType: "document",
|
|
2938
|
+
id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : generateId2(),
|
|
2939
|
+
mediaType: "text/plain",
|
|
2940
|
+
title: value.annotation.quote,
|
|
2941
|
+
filename: value.annotation.file_id
|
|
2942
|
+
});
|
|
2943
|
+
}
|
|
2886
2944
|
} else if (isErrorChunk(value)) {
|
|
2887
2945
|
controller.enqueue({ type: "error", error: value });
|
|
2888
2946
|
}
|
|
2889
2947
|
},
|
|
2890
2948
|
flush(controller) {
|
|
2949
|
+
const providerMetadata = {
|
|
2950
|
+
openai: {
|
|
2951
|
+
responseId
|
|
2952
|
+
}
|
|
2953
|
+
};
|
|
2954
|
+
if (logprobs.length > 0) {
|
|
2955
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
2956
|
+
}
|
|
2891
2957
|
controller.enqueue({
|
|
2892
2958
|
type: "finish",
|
|
2893
2959
|
finishReason,
|
|
2894
2960
|
usage,
|
|
2895
|
-
providerMetadata
|
|
2896
|
-
openai: {
|
|
2897
|
-
responseId
|
|
2898
|
-
}
|
|
2899
|
-
}
|
|
2961
|
+
providerMetadata
|
|
2900
2962
|
});
|
|
2901
2963
|
}
|
|
2902
2964
|
})
|
|
@@ -2915,7 +2977,8 @@ var usageSchema2 = z13.object({
|
|
|
2915
2977
|
var textDeltaChunkSchema = z13.object({
|
|
2916
2978
|
type: z13.literal("response.output_text.delta"),
|
|
2917
2979
|
item_id: z13.string(),
|
|
2918
|
-
delta: z13.string()
|
|
2980
|
+
delta: z13.string(),
|
|
2981
|
+
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
2919
2982
|
});
|
|
2920
2983
|
var errorChunkSchema = z13.object({
|
|
2921
2984
|
type: z13.literal("error"),
|
|
@@ -3044,11 +3107,18 @@ var responseFunctionCallArgumentsDeltaSchema = z13.object({
|
|
|
3044
3107
|
});
|
|
3045
3108
|
var responseAnnotationAddedSchema = z13.object({
|
|
3046
3109
|
type: z13.literal("response.output_text.annotation.added"),
|
|
3047
|
-
annotation: z13.
|
|
3048
|
-
|
|
3049
|
-
|
|
3050
|
-
|
|
3051
|
-
|
|
3110
|
+
annotation: z13.discriminatedUnion("type", [
|
|
3111
|
+
z13.object({
|
|
3112
|
+
type: z13.literal("url_citation"),
|
|
3113
|
+
url: z13.string(),
|
|
3114
|
+
title: z13.string()
|
|
3115
|
+
}),
|
|
3116
|
+
z13.object({
|
|
3117
|
+
type: z13.literal("file_citation"),
|
|
3118
|
+
file_id: z13.string(),
|
|
3119
|
+
quote: z13.string()
|
|
3120
|
+
})
|
|
3121
|
+
])
|
|
3052
3122
|
});
|
|
3053
3123
|
var responseReasoningSummaryPartAddedSchema = z13.object({
|
|
3054
3124
|
type: z13.literal("response.reasoning_summary_part.added"),
|
|
@@ -3156,10 +3226,29 @@ var openaiResponsesProviderOptionsSchema = z13.object({
|
|
|
3156
3226
|
instructions: z13.string().nullish(),
|
|
3157
3227
|
reasoningSummary: z13.string().nullish(),
|
|
3158
3228
|
serviceTier: z13.enum(["auto", "flex", "priority"]).nullish(),
|
|
3159
|
-
include: z13.array(
|
|
3229
|
+
include: z13.array(
|
|
3230
|
+
z13.enum([
|
|
3231
|
+
"reasoning.encrypted_content",
|
|
3232
|
+
"file_search_call.results",
|
|
3233
|
+
"message.output_text.logprobs"
|
|
3234
|
+
])
|
|
3235
|
+
).nullish(),
|
|
3160
3236
|
textVerbosity: z13.enum(["low", "medium", "high"]).nullish(),
|
|
3161
3237
|
promptCacheKey: z13.string().nullish(),
|
|
3162
|
-
safetyIdentifier: z13.string().nullish()
|
|
3238
|
+
safetyIdentifier: z13.string().nullish(),
|
|
3239
|
+
/**
|
|
3240
|
+
* Return the log probabilities of the tokens.
|
|
3241
|
+
*
|
|
3242
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
3243
|
+
* were generated.
|
|
3244
|
+
*
|
|
3245
|
+
* Setting to a number will return the log probabilities of the top n
|
|
3246
|
+
* tokens that were generated.
|
|
3247
|
+
*
|
|
3248
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
3249
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
3250
|
+
*/
|
|
3251
|
+
logprobs: z13.union([z13.boolean(), z13.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
|
|
3163
3252
|
});
|
|
3164
3253
|
|
|
3165
3254
|
// src/speech/openai-speech-model.ts
|