@ai-sdk/openai 2.0.0-beta.3 → 2.0.0-beta.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/dist/index.d.mts +5 -1
- package/dist/index.d.ts +5 -1
- package/dist/index.js +289 -190
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +277 -178
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +5 -1
- package/dist/internal/index.d.ts +5 -1
- package/dist/internal/index.js +277 -178
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +271 -172
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/internal/index.mjs
CHANGED
|
@@ -1988,19 +1988,22 @@ import {
|
|
|
1988
1988
|
createEventSourceResponseHandler as createEventSourceResponseHandler3,
|
|
1989
1989
|
createJsonResponseHandler as createJsonResponseHandler6,
|
|
1990
1990
|
generateId as generateId2,
|
|
1991
|
-
parseProviderOptions as
|
|
1991
|
+
parseProviderOptions as parseProviderOptions7,
|
|
1992
1992
|
postJsonToApi as postJsonToApi6
|
|
1993
1993
|
} from "@ai-sdk/provider-utils";
|
|
1994
|
-
import { z as
|
|
1994
|
+
import { z as z15 } from "zod/v4";
|
|
1995
1995
|
|
|
1996
1996
|
// src/responses/convert-to-openai-responses-messages.ts
|
|
1997
1997
|
import {
|
|
1998
1998
|
UnsupportedFunctionalityError as UnsupportedFunctionalityError4
|
|
1999
1999
|
} from "@ai-sdk/provider";
|
|
2000
|
-
|
|
2000
|
+
import { parseProviderOptions as parseProviderOptions6 } from "@ai-sdk/provider-utils";
|
|
2001
|
+
import { z as z14 } from "zod/v4";
|
|
2002
|
+
async function convertToOpenAIResponsesMessages({
|
|
2001
2003
|
prompt,
|
|
2002
2004
|
systemMessageMode
|
|
2003
2005
|
}) {
|
|
2006
|
+
var _a, _b;
|
|
2004
2007
|
const messages = [];
|
|
2005
2008
|
const warnings = [];
|
|
2006
2009
|
for (const { role, content } of prompt) {
|
|
@@ -2035,7 +2038,7 @@ function convertToOpenAIResponsesMessages({
|
|
|
2035
2038
|
messages.push({
|
|
2036
2039
|
role: "user",
|
|
2037
2040
|
content: content.map((part, index) => {
|
|
2038
|
-
var
|
|
2041
|
+
var _a2, _b2, _c;
|
|
2039
2042
|
switch (part.type) {
|
|
2040
2043
|
case "text": {
|
|
2041
2044
|
return { type: "input_text", text: part.text };
|
|
@@ -2047,7 +2050,7 @@ function convertToOpenAIResponsesMessages({
|
|
|
2047
2050
|
type: "input_image",
|
|
2048
2051
|
image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
|
|
2049
2052
|
// OpenAI specific extension: image detail
|
|
2050
|
-
detail: (
|
|
2053
|
+
detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
|
|
2051
2054
|
};
|
|
2052
2055
|
} else if (part.mediaType === "application/pdf") {
|
|
2053
2056
|
if (part.data instanceof URL) {
|
|
@@ -2072,6 +2075,7 @@ function convertToOpenAIResponsesMessages({
|
|
|
2072
2075
|
break;
|
|
2073
2076
|
}
|
|
2074
2077
|
case "assistant": {
|
|
2078
|
+
const reasoningMessages = {};
|
|
2075
2079
|
for (const part of content) {
|
|
2076
2080
|
switch (part.type) {
|
|
2077
2081
|
case "text": {
|
|
@@ -2100,6 +2104,43 @@ function convertToOpenAIResponsesMessages({
|
|
|
2100
2104
|
});
|
|
2101
2105
|
break;
|
|
2102
2106
|
}
|
|
2107
|
+
case "reasoning": {
|
|
2108
|
+
const providerOptions = await parseProviderOptions6({
|
|
2109
|
+
provider: "openai",
|
|
2110
|
+
providerOptions: part.providerOptions,
|
|
2111
|
+
schema: openaiResponsesReasoningProviderOptionsSchema
|
|
2112
|
+
});
|
|
2113
|
+
const reasoningId = (_a = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _a.id;
|
|
2114
|
+
if (reasoningId != null) {
|
|
2115
|
+
const existingReasoningMessage = reasoningMessages[reasoningId];
|
|
2116
|
+
const summaryParts = [];
|
|
2117
|
+
if (part.text.length > 0) {
|
|
2118
|
+
summaryParts.push({ type: "summary_text", text: part.text });
|
|
2119
|
+
} else {
|
|
2120
|
+
warnings.push({
|
|
2121
|
+
type: "other",
|
|
2122
|
+
message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
|
|
2123
|
+
});
|
|
2124
|
+
}
|
|
2125
|
+
if (existingReasoningMessage === void 0) {
|
|
2126
|
+
reasoningMessages[reasoningId] = {
|
|
2127
|
+
type: "reasoning",
|
|
2128
|
+
id: reasoningId,
|
|
2129
|
+
encrypted_content: (_b = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _b.encryptedContent,
|
|
2130
|
+
summary: summaryParts
|
|
2131
|
+
};
|
|
2132
|
+
messages.push(reasoningMessages[reasoningId]);
|
|
2133
|
+
} else {
|
|
2134
|
+
existingReasoningMessage.summary.push(...summaryParts);
|
|
2135
|
+
}
|
|
2136
|
+
} else {
|
|
2137
|
+
warnings.push({
|
|
2138
|
+
type: "other",
|
|
2139
|
+
message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
|
|
2140
|
+
});
|
|
2141
|
+
}
|
|
2142
|
+
break;
|
|
2143
|
+
}
|
|
2103
2144
|
}
|
|
2104
2145
|
}
|
|
2105
2146
|
break;
|
|
@@ -2135,6 +2176,12 @@ function convertToOpenAIResponsesMessages({
|
|
|
2135
2176
|
}
|
|
2136
2177
|
return { messages, warnings };
|
|
2137
2178
|
}
|
|
2179
|
+
var openaiResponsesReasoningProviderOptionsSchema = z14.object({
|
|
2180
|
+
reasoning: z14.object({
|
|
2181
|
+
id: z14.string().nullish(),
|
|
2182
|
+
encryptedContent: z14.string().nullish()
|
|
2183
|
+
}).nullish()
|
|
2184
|
+
});
|
|
2138
2185
|
|
|
2139
2186
|
// src/responses/map-openai-responses-finish-reason.ts
|
|
2140
2187
|
function mapOpenAIResponseFinishReason({
|
|
@@ -2275,12 +2322,12 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2275
2322
|
if (stopSequences != null) {
|
|
2276
2323
|
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2277
2324
|
}
|
|
2278
|
-
const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
|
|
2325
|
+
const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
|
|
2279
2326
|
prompt,
|
|
2280
2327
|
systemMessageMode: modelConfig.systemMessageMode
|
|
2281
2328
|
});
|
|
2282
2329
|
warnings.push(...messageWarnings);
|
|
2283
|
-
const openaiOptions = await
|
|
2330
|
+
const openaiOptions = await parseProviderOptions7({
|
|
2284
2331
|
provider: "openai",
|
|
2285
2332
|
providerOptions,
|
|
2286
2333
|
schema: openaiResponsesProviderOptionsSchema
|
|
@@ -2311,6 +2358,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2311
2358
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2312
2359
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2313
2360
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2361
|
+
include: openaiOptions == null ? void 0 : openaiOptions.include,
|
|
2314
2362
|
// model-specific settings:
|
|
2315
2363
|
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2316
2364
|
reasoning: {
|
|
@@ -2343,6 +2391,21 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2343
2391
|
details: "topP is not supported for reasoning models"
|
|
2344
2392
|
});
|
|
2345
2393
|
}
|
|
2394
|
+
} else {
|
|
2395
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
|
|
2396
|
+
warnings.push({
|
|
2397
|
+
type: "unsupported-setting",
|
|
2398
|
+
setting: "reasoningEffort",
|
|
2399
|
+
details: "reasoningEffort is not supported for non-reasoning models"
|
|
2400
|
+
});
|
|
2401
|
+
}
|
|
2402
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
|
|
2403
|
+
warnings.push({
|
|
2404
|
+
type: "unsupported-setting",
|
|
2405
|
+
setting: "reasoningSummary",
|
|
2406
|
+
details: "reasoningSummary is not supported for non-reasoning models"
|
|
2407
|
+
});
|
|
2408
|
+
}
|
|
2346
2409
|
}
|
|
2347
2410
|
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
|
|
2348
2411
|
warnings.push({
|
|
@@ -2371,7 +2434,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2371
2434
|
};
|
|
2372
2435
|
}
|
|
2373
2436
|
async doGenerate(options) {
|
|
2374
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2437
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2375
2438
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2376
2439
|
const {
|
|
2377
2440
|
responseHeaders,
|
|
@@ -2386,59 +2449,61 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2386
2449
|
body,
|
|
2387
2450
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2388
2451
|
successfulResponseHandler: createJsonResponseHandler6(
|
|
2389
|
-
|
|
2390
|
-
id:
|
|
2391
|
-
created_at:
|
|
2392
|
-
model:
|
|
2393
|
-
output:
|
|
2394
|
-
|
|
2395
|
-
|
|
2396
|
-
type:
|
|
2397
|
-
role:
|
|
2398
|
-
content:
|
|
2399
|
-
|
|
2400
|
-
type:
|
|
2401
|
-
text:
|
|
2402
|
-
annotations:
|
|
2403
|
-
|
|
2404
|
-
type:
|
|
2405
|
-
start_index:
|
|
2406
|
-
end_index:
|
|
2407
|
-
url:
|
|
2408
|
-
title:
|
|
2452
|
+
z15.object({
|
|
2453
|
+
id: z15.string(),
|
|
2454
|
+
created_at: z15.number(),
|
|
2455
|
+
model: z15.string(),
|
|
2456
|
+
output: z15.array(
|
|
2457
|
+
z15.discriminatedUnion("type", [
|
|
2458
|
+
z15.object({
|
|
2459
|
+
type: z15.literal("message"),
|
|
2460
|
+
role: z15.literal("assistant"),
|
|
2461
|
+
content: z15.array(
|
|
2462
|
+
z15.object({
|
|
2463
|
+
type: z15.literal("output_text"),
|
|
2464
|
+
text: z15.string(),
|
|
2465
|
+
annotations: z15.array(
|
|
2466
|
+
z15.object({
|
|
2467
|
+
type: z15.literal("url_citation"),
|
|
2468
|
+
start_index: z15.number(),
|
|
2469
|
+
end_index: z15.number(),
|
|
2470
|
+
url: z15.string(),
|
|
2471
|
+
title: z15.string()
|
|
2409
2472
|
})
|
|
2410
2473
|
)
|
|
2411
2474
|
})
|
|
2412
2475
|
)
|
|
2413
2476
|
}),
|
|
2414
|
-
|
|
2415
|
-
type:
|
|
2416
|
-
call_id:
|
|
2417
|
-
name:
|
|
2418
|
-
arguments:
|
|
2477
|
+
z15.object({
|
|
2478
|
+
type: z15.literal("function_call"),
|
|
2479
|
+
call_id: z15.string(),
|
|
2480
|
+
name: z15.string(),
|
|
2481
|
+
arguments: z15.string()
|
|
2419
2482
|
}),
|
|
2420
|
-
|
|
2421
|
-
type:
|
|
2422
|
-
id:
|
|
2423
|
-
status:
|
|
2483
|
+
z15.object({
|
|
2484
|
+
type: z15.literal("web_search_call"),
|
|
2485
|
+
id: z15.string(),
|
|
2486
|
+
status: z15.string().optional()
|
|
2424
2487
|
}),
|
|
2425
|
-
|
|
2426
|
-
type:
|
|
2427
|
-
id:
|
|
2428
|
-
status:
|
|
2488
|
+
z15.object({
|
|
2489
|
+
type: z15.literal("computer_call"),
|
|
2490
|
+
id: z15.string(),
|
|
2491
|
+
status: z15.string().optional()
|
|
2429
2492
|
}),
|
|
2430
|
-
|
|
2431
|
-
type:
|
|
2432
|
-
|
|
2433
|
-
|
|
2434
|
-
|
|
2435
|
-
|
|
2493
|
+
z15.object({
|
|
2494
|
+
type: z15.literal("reasoning"),
|
|
2495
|
+
id: z15.string(),
|
|
2496
|
+
encrypted_content: z15.string().nullish(),
|
|
2497
|
+
summary: z15.array(
|
|
2498
|
+
z15.object({
|
|
2499
|
+
type: z15.literal("summary_text"),
|
|
2500
|
+
text: z15.string()
|
|
2436
2501
|
})
|
|
2437
2502
|
)
|
|
2438
2503
|
})
|
|
2439
2504
|
])
|
|
2440
2505
|
),
|
|
2441
|
-
incomplete_details:
|
|
2506
|
+
incomplete_details: z15.object({ reason: z15.string() }).nullable(),
|
|
2442
2507
|
usage: usageSchema2
|
|
2443
2508
|
})
|
|
2444
2509
|
),
|
|
@@ -2449,10 +2514,23 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2449
2514
|
for (const part of response.output) {
|
|
2450
2515
|
switch (part.type) {
|
|
2451
2516
|
case "reasoning": {
|
|
2452
|
-
|
|
2453
|
-
type: "
|
|
2454
|
-
|
|
2455
|
-
|
|
2517
|
+
if (part.summary.length === 0) {
|
|
2518
|
+
part.summary.push({ type: "summary_text", text: "" });
|
|
2519
|
+
}
|
|
2520
|
+
for (const summary of part.summary) {
|
|
2521
|
+
content.push({
|
|
2522
|
+
type: "reasoning",
|
|
2523
|
+
text: summary.text,
|
|
2524
|
+
providerMetadata: {
|
|
2525
|
+
openai: {
|
|
2526
|
+
reasoning: {
|
|
2527
|
+
id: part.id,
|
|
2528
|
+
encryptedContent: (_a = part.encrypted_content) != null ? _a : null
|
|
2529
|
+
}
|
|
2530
|
+
}
|
|
2531
|
+
}
|
|
2532
|
+
});
|
|
2533
|
+
}
|
|
2456
2534
|
break;
|
|
2457
2535
|
}
|
|
2458
2536
|
case "message": {
|
|
@@ -2465,7 +2543,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2465
2543
|
content.push({
|
|
2466
2544
|
type: "source",
|
|
2467
2545
|
sourceType: "url",
|
|
2468
|
-
id: (
|
|
2546
|
+
id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : generateId2(),
|
|
2469
2547
|
url: annotation.url,
|
|
2470
2548
|
title: annotation.title
|
|
2471
2549
|
});
|
|
@@ -2524,15 +2602,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2524
2602
|
return {
|
|
2525
2603
|
content,
|
|
2526
2604
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2527
|
-
finishReason: (
|
|
2605
|
+
finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
|
|
2528
2606
|
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2529
2607
|
}),
|
|
2530
2608
|
usage: {
|
|
2531
2609
|
inputTokens: response.usage.input_tokens,
|
|
2532
2610
|
outputTokens: response.usage.output_tokens,
|
|
2533
2611
|
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2534
|
-
reasoningTokens: (
|
|
2535
|
-
cachedInputTokens: (
|
|
2612
|
+
reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
|
|
2613
|
+
cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
|
|
2536
2614
|
},
|
|
2537
2615
|
request: { body },
|
|
2538
2616
|
response: {
|
|
@@ -2586,7 +2664,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2586
2664
|
controller.enqueue({ type: "stream-start", warnings });
|
|
2587
2665
|
},
|
|
2588
2666
|
transform(chunk, controller) {
|
|
2589
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2667
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
|
2590
2668
|
if (options.includeRawChunks) {
|
|
2591
2669
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
2592
2670
|
}
|
|
@@ -2635,7 +2713,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2635
2713
|
} else if (value.item.type === "reasoning") {
|
|
2636
2714
|
controller.enqueue({
|
|
2637
2715
|
type: "reasoning-start",
|
|
2638
|
-
id: value.item.id
|
|
2716
|
+
id: value.item.id,
|
|
2717
|
+
providerMetadata: {
|
|
2718
|
+
openai: {
|
|
2719
|
+
reasoning: {
|
|
2720
|
+
id: value.item.id,
|
|
2721
|
+
encryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
|
|
2722
|
+
}
|
|
2723
|
+
}
|
|
2724
|
+
}
|
|
2639
2725
|
});
|
|
2640
2726
|
}
|
|
2641
2727
|
} else if (isResponseOutputItemDoneChunk(value)) {
|
|
@@ -2708,7 +2794,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2708
2794
|
} else if (value.item.type === "reasoning") {
|
|
2709
2795
|
controller.enqueue({
|
|
2710
2796
|
type: "reasoning-end",
|
|
2711
|
-
id: value.item.id
|
|
2797
|
+
id: value.item.id,
|
|
2798
|
+
providerMetadata: {
|
|
2799
|
+
openai: {
|
|
2800
|
+
reasoning: {
|
|
2801
|
+
id: value.item.id,
|
|
2802
|
+
encryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
|
|
2803
|
+
}
|
|
2804
|
+
}
|
|
2805
|
+
}
|
|
2712
2806
|
});
|
|
2713
2807
|
}
|
|
2714
2808
|
} else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
|
|
@@ -2737,24 +2831,24 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2737
2831
|
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2738
2832
|
controller.enqueue({
|
|
2739
2833
|
type: "reasoning-delta",
|
|
2740
|
-
|
|
2741
|
-
|
|
2834
|
+
id: value.item_id,
|
|
2835
|
+
delta: value.delta
|
|
2742
2836
|
});
|
|
2743
2837
|
} else if (isResponseFinishedChunk(value)) {
|
|
2744
2838
|
finishReason = mapOpenAIResponseFinishReason({
|
|
2745
|
-
finishReason: (
|
|
2839
|
+
finishReason: (_c = value.response.incomplete_details) == null ? void 0 : _c.reason,
|
|
2746
2840
|
hasToolCalls
|
|
2747
2841
|
});
|
|
2748
2842
|
usage.inputTokens = value.response.usage.input_tokens;
|
|
2749
2843
|
usage.outputTokens = value.response.usage.output_tokens;
|
|
2750
2844
|
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2751
|
-
usage.reasoningTokens = (
|
|
2752
|
-
usage.cachedInputTokens = (
|
|
2845
|
+
usage.reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
2846
|
+
usage.cachedInputTokens = (_g = (_f = value.response.usage.input_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
2753
2847
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2754
2848
|
controller.enqueue({
|
|
2755
2849
|
type: "source",
|
|
2756
2850
|
sourceType: "url",
|
|
2757
|
-
id: (
|
|
2851
|
+
id: (_j = (_i = (_h = self.config).generateId) == null ? void 0 : _i.call(_h)) != null ? _j : generateId2(),
|
|
2758
2852
|
url: value.annotation.url,
|
|
2759
2853
|
title: value.annotation.title
|
|
2760
2854
|
});
|
|
@@ -2779,124 +2873,129 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2779
2873
|
};
|
|
2780
2874
|
}
|
|
2781
2875
|
};
|
|
2782
|
-
var usageSchema2 =
|
|
2783
|
-
input_tokens:
|
|
2784
|
-
input_tokens_details:
|
|
2785
|
-
output_tokens:
|
|
2786
|
-
output_tokens_details:
|
|
2876
|
+
var usageSchema2 = z15.object({
|
|
2877
|
+
input_tokens: z15.number(),
|
|
2878
|
+
input_tokens_details: z15.object({ cached_tokens: z15.number().nullish() }).nullish(),
|
|
2879
|
+
output_tokens: z15.number(),
|
|
2880
|
+
output_tokens_details: z15.object({ reasoning_tokens: z15.number().nullish() }).nullish()
|
|
2787
2881
|
});
|
|
2788
|
-
var textDeltaChunkSchema =
|
|
2789
|
-
type:
|
|
2790
|
-
item_id:
|
|
2791
|
-
delta:
|
|
2882
|
+
var textDeltaChunkSchema = z15.object({
|
|
2883
|
+
type: z15.literal("response.output_text.delta"),
|
|
2884
|
+
item_id: z15.string(),
|
|
2885
|
+
delta: z15.string()
|
|
2792
2886
|
});
|
|
2793
|
-
var responseFinishedChunkSchema =
|
|
2794
|
-
type:
|
|
2795
|
-
response:
|
|
2796
|
-
incomplete_details:
|
|
2887
|
+
var responseFinishedChunkSchema = z15.object({
|
|
2888
|
+
type: z15.enum(["response.completed", "response.incomplete"]),
|
|
2889
|
+
response: z15.object({
|
|
2890
|
+
incomplete_details: z15.object({ reason: z15.string() }).nullish(),
|
|
2797
2891
|
usage: usageSchema2
|
|
2798
2892
|
})
|
|
2799
2893
|
});
|
|
2800
|
-
var responseCreatedChunkSchema =
|
|
2801
|
-
type:
|
|
2802
|
-
response:
|
|
2803
|
-
id:
|
|
2804
|
-
created_at:
|
|
2805
|
-
model:
|
|
2894
|
+
var responseCreatedChunkSchema = z15.object({
|
|
2895
|
+
type: z15.literal("response.created"),
|
|
2896
|
+
response: z15.object({
|
|
2897
|
+
id: z15.string(),
|
|
2898
|
+
created_at: z15.number(),
|
|
2899
|
+
model: z15.string()
|
|
2806
2900
|
})
|
|
2807
2901
|
});
|
|
2808
|
-
var responseOutputItemAddedSchema =
|
|
2809
|
-
type:
|
|
2810
|
-
output_index:
|
|
2811
|
-
item:
|
|
2812
|
-
|
|
2813
|
-
type:
|
|
2814
|
-
id:
|
|
2902
|
+
var responseOutputItemAddedSchema = z15.object({
|
|
2903
|
+
type: z15.literal("response.output_item.added"),
|
|
2904
|
+
output_index: z15.number(),
|
|
2905
|
+
item: z15.discriminatedUnion("type", [
|
|
2906
|
+
z15.object({
|
|
2907
|
+
type: z15.literal("message"),
|
|
2908
|
+
id: z15.string()
|
|
2815
2909
|
}),
|
|
2816
|
-
|
|
2817
|
-
type:
|
|
2818
|
-
id:
|
|
2910
|
+
z15.object({
|
|
2911
|
+
type: z15.literal("reasoning"),
|
|
2912
|
+
id: z15.string(),
|
|
2913
|
+
encrypted_content: z15.string().nullish(),
|
|
2914
|
+
summary: z15.array(
|
|
2915
|
+
z15.object({
|
|
2916
|
+
type: z15.literal("summary_text"),
|
|
2917
|
+
text: z15.string()
|
|
2918
|
+
})
|
|
2919
|
+
)
|
|
2819
2920
|
}),
|
|
2820
|
-
|
|
2821
|
-
type:
|
|
2822
|
-
id:
|
|
2823
|
-
call_id:
|
|
2824
|
-
name:
|
|
2825
|
-
arguments:
|
|
2921
|
+
z15.object({
|
|
2922
|
+
type: z15.literal("function_call"),
|
|
2923
|
+
id: z15.string(),
|
|
2924
|
+
call_id: z15.string(),
|
|
2925
|
+
name: z15.string(),
|
|
2926
|
+
arguments: z15.string()
|
|
2826
2927
|
}),
|
|
2827
|
-
|
|
2828
|
-
type:
|
|
2829
|
-
id:
|
|
2830
|
-
status:
|
|
2928
|
+
z15.object({
|
|
2929
|
+
type: z15.literal("web_search_call"),
|
|
2930
|
+
id: z15.string(),
|
|
2931
|
+
status: z15.string()
|
|
2831
2932
|
}),
|
|
2832
|
-
|
|
2833
|
-
type:
|
|
2834
|
-
id:
|
|
2835
|
-
status:
|
|
2933
|
+
z15.object({
|
|
2934
|
+
type: z15.literal("computer_call"),
|
|
2935
|
+
id: z15.string(),
|
|
2936
|
+
status: z15.string()
|
|
2836
2937
|
})
|
|
2837
2938
|
])
|
|
2838
2939
|
});
|
|
2839
|
-
var responseOutputItemDoneSchema =
|
|
2840
|
-
type:
|
|
2841
|
-
output_index:
|
|
2842
|
-
item:
|
|
2843
|
-
|
|
2844
|
-
type:
|
|
2845
|
-
id:
|
|
2940
|
+
var responseOutputItemDoneSchema = z15.object({
|
|
2941
|
+
type: z15.literal("response.output_item.done"),
|
|
2942
|
+
output_index: z15.number(),
|
|
2943
|
+
item: z15.discriminatedUnion("type", [
|
|
2944
|
+
z15.object({
|
|
2945
|
+
type: z15.literal("message"),
|
|
2946
|
+
id: z15.string()
|
|
2846
2947
|
}),
|
|
2847
|
-
|
|
2848
|
-
type:
|
|
2849
|
-
id:
|
|
2948
|
+
z15.object({
|
|
2949
|
+
type: z15.literal("reasoning"),
|
|
2950
|
+
id: z15.string(),
|
|
2951
|
+
encrypted_content: z15.string().nullish(),
|
|
2952
|
+
summary: z15.array(
|
|
2953
|
+
z15.object({
|
|
2954
|
+
type: z15.literal("summary_text"),
|
|
2955
|
+
text: z15.string()
|
|
2956
|
+
})
|
|
2957
|
+
)
|
|
2850
2958
|
}),
|
|
2851
|
-
|
|
2852
|
-
type:
|
|
2853
|
-
id:
|
|
2854
|
-
call_id:
|
|
2855
|
-
name:
|
|
2856
|
-
arguments:
|
|
2857
|
-
status:
|
|
2959
|
+
z15.object({
|
|
2960
|
+
type: z15.literal("function_call"),
|
|
2961
|
+
id: z15.string(),
|
|
2962
|
+
call_id: z15.string(),
|
|
2963
|
+
name: z15.string(),
|
|
2964
|
+
arguments: z15.string(),
|
|
2965
|
+
status: z15.literal("completed")
|
|
2858
2966
|
}),
|
|
2859
|
-
|
|
2860
|
-
type:
|
|
2861
|
-
id:
|
|
2862
|
-
status:
|
|
2967
|
+
z15.object({
|
|
2968
|
+
type: z15.literal("web_search_call"),
|
|
2969
|
+
id: z15.string(),
|
|
2970
|
+
status: z15.literal("completed")
|
|
2863
2971
|
}),
|
|
2864
|
-
|
|
2865
|
-
type:
|
|
2866
|
-
id:
|
|
2867
|
-
status:
|
|
2972
|
+
z15.object({
|
|
2973
|
+
type: z15.literal("computer_call"),
|
|
2974
|
+
id: z15.string(),
|
|
2975
|
+
status: z15.literal("completed")
|
|
2868
2976
|
})
|
|
2869
2977
|
])
|
|
2870
2978
|
});
|
|
2871
|
-
var responseFunctionCallArgumentsDeltaSchema =
|
|
2872
|
-
type:
|
|
2873
|
-
item_id:
|
|
2874
|
-
output_index:
|
|
2875
|
-
delta:
|
|
2979
|
+
var responseFunctionCallArgumentsDeltaSchema = z15.object({
|
|
2980
|
+
type: z15.literal("response.function_call_arguments.delta"),
|
|
2981
|
+
item_id: z15.string(),
|
|
2982
|
+
output_index: z15.number(),
|
|
2983
|
+
delta: z15.string()
|
|
2876
2984
|
});
|
|
2877
|
-
var responseAnnotationAddedSchema =
|
|
2878
|
-
type:
|
|
2879
|
-
annotation:
|
|
2880
|
-
type:
|
|
2881
|
-
url:
|
|
2882
|
-
title:
|
|
2985
|
+
var responseAnnotationAddedSchema = z15.object({
|
|
2986
|
+
type: z15.literal("response.output_text.annotation.added"),
|
|
2987
|
+
annotation: z15.object({
|
|
2988
|
+
type: z15.literal("url_citation"),
|
|
2989
|
+
url: z15.string(),
|
|
2990
|
+
title: z15.string()
|
|
2883
2991
|
})
|
|
2884
2992
|
});
|
|
2885
|
-
var responseReasoningSummaryTextDeltaSchema =
|
|
2886
|
-
type:
|
|
2887
|
-
item_id:
|
|
2888
|
-
|
|
2889
|
-
summary_index: z14.number(),
|
|
2890
|
-
delta: z14.string()
|
|
2891
|
-
});
|
|
2892
|
-
var responseReasoningSummaryPartDoneSchema = z14.object({
|
|
2893
|
-
type: z14.literal("response.reasoning_summary_part.done"),
|
|
2894
|
-
item_id: z14.string(),
|
|
2895
|
-
output_index: z14.number(),
|
|
2896
|
-
summary_index: z14.number(),
|
|
2897
|
-
part: z14.unknown().nullish()
|
|
2993
|
+
var responseReasoningSummaryTextDeltaSchema = z15.object({
|
|
2994
|
+
type: z15.literal("response.reasoning_summary_text.delta"),
|
|
2995
|
+
item_id: z15.string(),
|
|
2996
|
+
delta: z15.string()
|
|
2898
2997
|
});
|
|
2899
|
-
var openaiResponsesChunkSchema =
|
|
2998
|
+
var openaiResponsesChunkSchema = z15.union([
|
|
2900
2999
|
textDeltaChunkSchema,
|
|
2901
3000
|
responseFinishedChunkSchema,
|
|
2902
3001
|
responseCreatedChunkSchema,
|
|
@@ -2905,8 +3004,7 @@ var openaiResponsesChunkSchema = z14.union([
|
|
|
2905
3004
|
responseFunctionCallArgumentsDeltaSchema,
|
|
2906
3005
|
responseAnnotationAddedSchema,
|
|
2907
3006
|
responseReasoningSummaryTextDeltaSchema,
|
|
2908
|
-
|
|
2909
|
-
z14.object({ type: z14.string() }).passthrough()
|
|
3007
|
+
z15.object({ type: z15.string() }).passthrough()
|
|
2910
3008
|
// fallback for unknown chunks
|
|
2911
3009
|
]);
|
|
2912
3010
|
function isTextDeltaChunk(chunk) {
|
|
@@ -2934,7 +3032,7 @@ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
|
2934
3032
|
return chunk.type === "response.reasoning_summary_text.delta";
|
|
2935
3033
|
}
|
|
2936
3034
|
function getResponsesModelConfig(modelId) {
|
|
2937
|
-
if (modelId.startsWith("o")) {
|
|
3035
|
+
if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
|
|
2938
3036
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
2939
3037
|
return {
|
|
2940
3038
|
isReasoningModel: true,
|
|
@@ -2957,17 +3055,18 @@ function getResponsesModelConfig(modelId) {
|
|
|
2957
3055
|
function supportsFlexProcessing2(modelId) {
|
|
2958
3056
|
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
2959
3057
|
}
|
|
2960
|
-
var openaiResponsesProviderOptionsSchema =
|
|
2961
|
-
metadata:
|
|
2962
|
-
parallelToolCalls:
|
|
2963
|
-
previousResponseId:
|
|
2964
|
-
store:
|
|
2965
|
-
user:
|
|
2966
|
-
reasoningEffort:
|
|
2967
|
-
strictJsonSchema:
|
|
2968
|
-
instructions:
|
|
2969
|
-
reasoningSummary:
|
|
2970
|
-
serviceTier:
|
|
3058
|
+
var openaiResponsesProviderOptionsSchema = z15.object({
|
|
3059
|
+
metadata: z15.any().nullish(),
|
|
3060
|
+
parallelToolCalls: z15.boolean().nullish(),
|
|
3061
|
+
previousResponseId: z15.string().nullish(),
|
|
3062
|
+
store: z15.boolean().nullish(),
|
|
3063
|
+
user: z15.string().nullish(),
|
|
3064
|
+
reasoningEffort: z15.string().nullish(),
|
|
3065
|
+
strictJsonSchema: z15.boolean().nullish(),
|
|
3066
|
+
instructions: z15.string().nullish(),
|
|
3067
|
+
reasoningSummary: z15.string().nullish(),
|
|
3068
|
+
serviceTier: z15.enum(["auto", "flex"]).nullish(),
|
|
3069
|
+
include: z15.array(z15.enum(["reasoning.encrypted_content"])).nullish()
|
|
2971
3070
|
});
|
|
2972
3071
|
export {
|
|
2973
3072
|
OpenAIChatLanguageModel,
|