@ai-sdk/openai 1.3.15 → 1.3.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +4 -1
- package/dist/index.d.ts +4 -1
- package/dist/index.js +44 -9
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +44 -9
- package/dist/index.mjs.map +1 -1
- package/internal/dist/index.d.mts +4 -1
- package/internal/dist/index.d.ts +4 -1
- package/internal/dist/index.js +44 -9
- package/internal/dist/index.js.map +1 -1
- package/internal/dist/index.mjs +44 -9
- package/internal/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/internal/dist/index.mjs
CHANGED
|
@@ -1037,7 +1037,7 @@ var openaiChatChunkSchema = z2.union([
|
|
|
1037
1037
|
openaiErrorDataSchema
|
|
1038
1038
|
]);
|
|
1039
1039
|
function isReasoningModel(modelId) {
|
|
1040
|
-
return modelId
|
|
1040
|
+
return modelId.startsWith("o");
|
|
1041
1041
|
}
|
|
1042
1042
|
function isAudioModel(modelId) {
|
|
1043
1043
|
return modelId.startsWith("gpt-4o-audio-preview");
|
|
@@ -2225,8 +2225,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2225
2225
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2226
2226
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2227
2227
|
// model-specific settings:
|
|
2228
|
-
...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
2229
|
-
reasoning: {
|
|
2228
|
+
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2229
|
+
reasoning: {
|
|
2230
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
2231
|
+
effort: openaiOptions.reasoningEffort
|
|
2232
|
+
},
|
|
2233
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
|
|
2234
|
+
summary: openaiOptions.reasoningSummary
|
|
2235
|
+
}
|
|
2236
|
+
}
|
|
2230
2237
|
},
|
|
2231
2238
|
...modelConfig.requiredAutoTruncation && {
|
|
2232
2239
|
truncation: "auto"
|
|
@@ -2308,7 +2315,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2308
2315
|
}
|
|
2309
2316
|
}
|
|
2310
2317
|
async doGenerate(options) {
|
|
2311
|
-
var _a, _b, _c, _d, _e;
|
|
2318
|
+
var _a, _b, _c, _d, _e, _f, _g;
|
|
2312
2319
|
const { args: body, warnings } = this.getArgs(options);
|
|
2313
2320
|
const {
|
|
2314
2321
|
responseHeaders,
|
|
@@ -2361,7 +2368,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2361
2368
|
type: z8.literal("computer_call")
|
|
2362
2369
|
}),
|
|
2363
2370
|
z8.object({
|
|
2364
|
-
type: z8.literal("reasoning")
|
|
2371
|
+
type: z8.literal("reasoning"),
|
|
2372
|
+
summary: z8.array(
|
|
2373
|
+
z8.object({
|
|
2374
|
+
type: z8.literal("summary_text"),
|
|
2375
|
+
text: z8.string()
|
|
2376
|
+
})
|
|
2377
|
+
)
|
|
2365
2378
|
})
|
|
2366
2379
|
])
|
|
2367
2380
|
),
|
|
@@ -2379,6 +2392,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2379
2392
|
toolName: output.name,
|
|
2380
2393
|
args: output.arguments
|
|
2381
2394
|
}));
|
|
2395
|
+
const reasoningSummary = (_b = (_a = response.output.find((item) => item.type === "reasoning")) == null ? void 0 : _a.summary) != null ? _b : null;
|
|
2382
2396
|
return {
|
|
2383
2397
|
text: outputTextElements.map((content) => content.text).join("\n"),
|
|
2384
2398
|
sources: outputTextElements.flatMap(
|
|
@@ -2393,10 +2407,14 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2393
2407
|
})
|
|
2394
2408
|
),
|
|
2395
2409
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2396
|
-
finishReason: (
|
|
2410
|
+
finishReason: (_c = response.incomplete_details) == null ? void 0 : _c.reason,
|
|
2397
2411
|
hasToolCalls: toolCalls.length > 0
|
|
2398
2412
|
}),
|
|
2399
2413
|
toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
|
|
2414
|
+
reasoning: reasoningSummary ? reasoningSummary.map((summary) => ({
|
|
2415
|
+
type: "text",
|
|
2416
|
+
text: summary.text
|
|
2417
|
+
})) : void 0,
|
|
2400
2418
|
usage: {
|
|
2401
2419
|
promptTokens: response.usage.input_tokens,
|
|
2402
2420
|
completionTokens: response.usage.output_tokens
|
|
@@ -2420,8 +2438,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2420
2438
|
providerMetadata: {
|
|
2421
2439
|
openai: {
|
|
2422
2440
|
responseId: response.id,
|
|
2423
|
-
cachedPromptTokens: (
|
|
2424
|
-
reasoningTokens: (
|
|
2441
|
+
cachedPromptTokens: (_e = (_d = response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : null,
|
|
2442
|
+
reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : null
|
|
2425
2443
|
}
|
|
2426
2444
|
},
|
|
2427
2445
|
warnings
|
|
@@ -2504,6 +2522,11 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2504
2522
|
type: "text-delta",
|
|
2505
2523
|
textDelta: value.delta
|
|
2506
2524
|
});
|
|
2525
|
+
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2526
|
+
controller.enqueue({
|
|
2527
|
+
type: "reasoning",
|
|
2528
|
+
textDelta: value.delta
|
|
2529
|
+
});
|
|
2507
2530
|
} else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
|
|
2508
2531
|
ongoingToolCalls[value.output_index] = void 0;
|
|
2509
2532
|
hasToolCalls = true;
|
|
@@ -2635,6 +2658,13 @@ var responseAnnotationAddedSchema = z8.object({
|
|
|
2635
2658
|
title: z8.string()
|
|
2636
2659
|
})
|
|
2637
2660
|
});
|
|
2661
|
+
var responseReasoningSummaryTextDeltaSchema = z8.object({
|
|
2662
|
+
type: z8.literal("response.reasoning_summary_text.delta"),
|
|
2663
|
+
item_id: z8.string(),
|
|
2664
|
+
output_index: z8.number(),
|
|
2665
|
+
summary_index: z8.number(),
|
|
2666
|
+
delta: z8.string()
|
|
2667
|
+
});
|
|
2638
2668
|
var openaiResponsesChunkSchema = z8.union([
|
|
2639
2669
|
textDeltaChunkSchema,
|
|
2640
2670
|
responseFinishedChunkSchema,
|
|
@@ -2643,6 +2673,7 @@ var openaiResponsesChunkSchema = z8.union([
|
|
|
2643
2673
|
responseFunctionCallArgumentsDeltaSchema,
|
|
2644
2674
|
responseOutputItemAddedSchema,
|
|
2645
2675
|
responseAnnotationAddedSchema,
|
|
2676
|
+
responseReasoningSummaryTextDeltaSchema,
|
|
2646
2677
|
z8.object({ type: z8.string() }).passthrough()
|
|
2647
2678
|
// fallback for unknown chunks
|
|
2648
2679
|
]);
|
|
@@ -2667,6 +2698,9 @@ function isResponseOutputItemAddedChunk(chunk) {
|
|
|
2667
2698
|
function isResponseAnnotationAddedChunk(chunk) {
|
|
2668
2699
|
return chunk.type === "response.output_text.annotation.added";
|
|
2669
2700
|
}
|
|
2701
|
+
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
2702
|
+
return chunk.type === "response.reasoning_summary_text.delta";
|
|
2703
|
+
}
|
|
2670
2704
|
function getResponsesModelConfig(modelId) {
|
|
2671
2705
|
if (modelId.startsWith("o")) {
|
|
2672
2706
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
@@ -2696,7 +2730,8 @@ var openaiResponsesProviderOptionsSchema = z8.object({
|
|
|
2696
2730
|
user: z8.string().nullish(),
|
|
2697
2731
|
reasoningEffort: z8.string().nullish(),
|
|
2698
2732
|
strictSchemas: z8.boolean().nullish(),
|
|
2699
|
-
instructions: z8.string().nullish()
|
|
2733
|
+
instructions: z8.string().nullish(),
|
|
2734
|
+
reasoningSummary: z8.string().nullish()
|
|
2700
2735
|
});
|
|
2701
2736
|
export {
|
|
2702
2737
|
OpenAIChatLanguageModel,
|