@ai-sdk/openai 2.0.0-beta.3 → 2.0.0-beta.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.d.mts +5 -1
- package/dist/index.d.ts +5 -1
- package/dist/index.js +323 -194
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +313 -182
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +5 -1
- package/dist/internal/index.d.ts +5 -1
- package/dist/internal/index.js +311 -182
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +307 -176
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/internal/index.mjs
CHANGED
|
@@ -1983,24 +1983,30 @@ var OpenAISpeechModel = class {
|
|
|
1983
1983
|
};
|
|
1984
1984
|
|
|
1985
1985
|
// src/responses/openai-responses-language-model.ts
|
|
1986
|
+
import {
|
|
1987
|
+
APICallError
|
|
1988
|
+
} from "@ai-sdk/provider";
|
|
1986
1989
|
import {
|
|
1987
1990
|
combineHeaders as combineHeaders7,
|
|
1988
1991
|
createEventSourceResponseHandler as createEventSourceResponseHandler3,
|
|
1989
1992
|
createJsonResponseHandler as createJsonResponseHandler6,
|
|
1990
1993
|
generateId as generateId2,
|
|
1991
|
-
parseProviderOptions as
|
|
1994
|
+
parseProviderOptions as parseProviderOptions7,
|
|
1992
1995
|
postJsonToApi as postJsonToApi6
|
|
1993
1996
|
} from "@ai-sdk/provider-utils";
|
|
1994
|
-
import { z as
|
|
1997
|
+
import { z as z15 } from "zod/v4";
|
|
1995
1998
|
|
|
1996
1999
|
// src/responses/convert-to-openai-responses-messages.ts
|
|
1997
2000
|
import {
|
|
1998
2001
|
UnsupportedFunctionalityError as UnsupportedFunctionalityError4
|
|
1999
2002
|
} from "@ai-sdk/provider";
|
|
2000
|
-
|
|
2003
|
+
import { parseProviderOptions as parseProviderOptions6 } from "@ai-sdk/provider-utils";
|
|
2004
|
+
import { z as z14 } from "zod/v4";
|
|
2005
|
+
async function convertToOpenAIResponsesMessages({
|
|
2001
2006
|
prompt,
|
|
2002
2007
|
systemMessageMode
|
|
2003
2008
|
}) {
|
|
2009
|
+
var _a, _b;
|
|
2004
2010
|
const messages = [];
|
|
2005
2011
|
const warnings = [];
|
|
2006
2012
|
for (const { role, content } of prompt) {
|
|
@@ -2035,7 +2041,7 @@ function convertToOpenAIResponsesMessages({
|
|
|
2035
2041
|
messages.push({
|
|
2036
2042
|
role: "user",
|
|
2037
2043
|
content: content.map((part, index) => {
|
|
2038
|
-
var
|
|
2044
|
+
var _a2, _b2, _c;
|
|
2039
2045
|
switch (part.type) {
|
|
2040
2046
|
case "text": {
|
|
2041
2047
|
return { type: "input_text", text: part.text };
|
|
@@ -2047,7 +2053,7 @@ function convertToOpenAIResponsesMessages({
|
|
|
2047
2053
|
type: "input_image",
|
|
2048
2054
|
image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
|
|
2049
2055
|
// OpenAI specific extension: image detail
|
|
2050
|
-
detail: (
|
|
2056
|
+
detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
|
|
2051
2057
|
};
|
|
2052
2058
|
} else if (part.mediaType === "application/pdf") {
|
|
2053
2059
|
if (part.data instanceof URL) {
|
|
@@ -2072,6 +2078,7 @@ function convertToOpenAIResponsesMessages({
|
|
|
2072
2078
|
break;
|
|
2073
2079
|
}
|
|
2074
2080
|
case "assistant": {
|
|
2081
|
+
const reasoningMessages = {};
|
|
2075
2082
|
for (const part of content) {
|
|
2076
2083
|
switch (part.type) {
|
|
2077
2084
|
case "text": {
|
|
@@ -2100,6 +2107,43 @@ function convertToOpenAIResponsesMessages({
|
|
|
2100
2107
|
});
|
|
2101
2108
|
break;
|
|
2102
2109
|
}
|
|
2110
|
+
case "reasoning": {
|
|
2111
|
+
const providerOptions = await parseProviderOptions6({
|
|
2112
|
+
provider: "openai",
|
|
2113
|
+
providerOptions: part.providerOptions,
|
|
2114
|
+
schema: openaiResponsesReasoningProviderOptionsSchema
|
|
2115
|
+
});
|
|
2116
|
+
const reasoningId = (_a = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _a.id;
|
|
2117
|
+
if (reasoningId != null) {
|
|
2118
|
+
const existingReasoningMessage = reasoningMessages[reasoningId];
|
|
2119
|
+
const summaryParts = [];
|
|
2120
|
+
if (part.text.length > 0) {
|
|
2121
|
+
summaryParts.push({ type: "summary_text", text: part.text });
|
|
2122
|
+
} else if (existingReasoningMessage !== void 0) {
|
|
2123
|
+
warnings.push({
|
|
2124
|
+
type: "other",
|
|
2125
|
+
message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
|
|
2126
|
+
});
|
|
2127
|
+
}
|
|
2128
|
+
if (existingReasoningMessage === void 0) {
|
|
2129
|
+
reasoningMessages[reasoningId] = {
|
|
2130
|
+
type: "reasoning",
|
|
2131
|
+
id: reasoningId,
|
|
2132
|
+
encrypted_content: (_b = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _b.encryptedContent,
|
|
2133
|
+
summary: summaryParts
|
|
2134
|
+
};
|
|
2135
|
+
messages.push(reasoningMessages[reasoningId]);
|
|
2136
|
+
} else {
|
|
2137
|
+
existingReasoningMessage.summary.push(...summaryParts);
|
|
2138
|
+
}
|
|
2139
|
+
} else {
|
|
2140
|
+
warnings.push({
|
|
2141
|
+
type: "other",
|
|
2142
|
+
message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
|
|
2143
|
+
});
|
|
2144
|
+
}
|
|
2145
|
+
break;
|
|
2146
|
+
}
|
|
2103
2147
|
}
|
|
2104
2148
|
}
|
|
2105
2149
|
break;
|
|
@@ -2135,6 +2179,12 @@ function convertToOpenAIResponsesMessages({
|
|
|
2135
2179
|
}
|
|
2136
2180
|
return { messages, warnings };
|
|
2137
2181
|
}
|
|
2182
|
+
var openaiResponsesReasoningProviderOptionsSchema = z14.object({
|
|
2183
|
+
reasoning: z14.object({
|
|
2184
|
+
id: z14.string().nullish(),
|
|
2185
|
+
encryptedContent: z14.string().nullish()
|
|
2186
|
+
}).nullish()
|
|
2187
|
+
});
|
|
2138
2188
|
|
|
2139
2189
|
// src/responses/map-openai-responses-finish-reason.ts
|
|
2140
2190
|
function mapOpenAIResponseFinishReason({
|
|
@@ -2275,12 +2325,12 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2275
2325
|
if (stopSequences != null) {
|
|
2276
2326
|
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2277
2327
|
}
|
|
2278
|
-
const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
|
|
2328
|
+
const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
|
|
2279
2329
|
prompt,
|
|
2280
2330
|
systemMessageMode: modelConfig.systemMessageMode
|
|
2281
2331
|
});
|
|
2282
2332
|
warnings.push(...messageWarnings);
|
|
2283
|
-
const openaiOptions = await
|
|
2333
|
+
const openaiOptions = await parseProviderOptions7({
|
|
2284
2334
|
provider: "openai",
|
|
2285
2335
|
providerOptions,
|
|
2286
2336
|
schema: openaiResponsesProviderOptionsSchema
|
|
@@ -2311,6 +2361,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2311
2361
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2312
2362
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2313
2363
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2364
|
+
include: openaiOptions == null ? void 0 : openaiOptions.include,
|
|
2314
2365
|
// model-specific settings:
|
|
2315
2366
|
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2316
2367
|
reasoning: {
|
|
@@ -2343,6 +2394,21 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2343
2394
|
details: "topP is not supported for reasoning models"
|
|
2344
2395
|
});
|
|
2345
2396
|
}
|
|
2397
|
+
} else {
|
|
2398
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
|
|
2399
|
+
warnings.push({
|
|
2400
|
+
type: "unsupported-setting",
|
|
2401
|
+
setting: "reasoningEffort",
|
|
2402
|
+
details: "reasoningEffort is not supported for non-reasoning models"
|
|
2403
|
+
});
|
|
2404
|
+
}
|
|
2405
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
|
|
2406
|
+
warnings.push({
|
|
2407
|
+
type: "unsupported-setting",
|
|
2408
|
+
setting: "reasoningSummary",
|
|
2409
|
+
details: "reasoningSummary is not supported for non-reasoning models"
|
|
2410
|
+
});
|
|
2411
|
+
}
|
|
2346
2412
|
}
|
|
2347
2413
|
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
|
|
2348
2414
|
warnings.push({
|
|
@@ -2371,88 +2437,119 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2371
2437
|
};
|
|
2372
2438
|
}
|
|
2373
2439
|
async doGenerate(options) {
|
|
2374
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2440
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2375
2441
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2442
|
+
const url = this.config.url({
|
|
2443
|
+
path: "/responses",
|
|
2444
|
+
modelId: this.modelId
|
|
2445
|
+
});
|
|
2376
2446
|
const {
|
|
2377
2447
|
responseHeaders,
|
|
2378
2448
|
value: response,
|
|
2379
2449
|
rawValue: rawResponse
|
|
2380
2450
|
} = await postJsonToApi6({
|
|
2381
|
-
url
|
|
2382
|
-
path: "/responses",
|
|
2383
|
-
modelId: this.modelId
|
|
2384
|
-
}),
|
|
2451
|
+
url,
|
|
2385
2452
|
headers: combineHeaders7(this.config.headers(), options.headers),
|
|
2386
2453
|
body,
|
|
2387
2454
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2388
2455
|
successfulResponseHandler: createJsonResponseHandler6(
|
|
2389
|
-
|
|
2390
|
-
id:
|
|
2391
|
-
created_at:
|
|
2392
|
-
|
|
2393
|
-
|
|
2394
|
-
|
|
2395
|
-
|
|
2396
|
-
|
|
2397
|
-
|
|
2398
|
-
|
|
2399
|
-
|
|
2400
|
-
|
|
2401
|
-
|
|
2402
|
-
|
|
2403
|
-
|
|
2404
|
-
|
|
2405
|
-
|
|
2406
|
-
|
|
2407
|
-
|
|
2408
|
-
|
|
2456
|
+
z15.object({
|
|
2457
|
+
id: z15.string(),
|
|
2458
|
+
created_at: z15.number(),
|
|
2459
|
+
error: z15.object({
|
|
2460
|
+
code: z15.string(),
|
|
2461
|
+
message: z15.string()
|
|
2462
|
+
}).nullish(),
|
|
2463
|
+
model: z15.string(),
|
|
2464
|
+
output: z15.array(
|
|
2465
|
+
z15.discriminatedUnion("type", [
|
|
2466
|
+
z15.object({
|
|
2467
|
+
type: z15.literal("message"),
|
|
2468
|
+
role: z15.literal("assistant"),
|
|
2469
|
+
content: z15.array(
|
|
2470
|
+
z15.object({
|
|
2471
|
+
type: z15.literal("output_text"),
|
|
2472
|
+
text: z15.string(),
|
|
2473
|
+
annotations: z15.array(
|
|
2474
|
+
z15.object({
|
|
2475
|
+
type: z15.literal("url_citation"),
|
|
2476
|
+
start_index: z15.number(),
|
|
2477
|
+
end_index: z15.number(),
|
|
2478
|
+
url: z15.string(),
|
|
2479
|
+
title: z15.string()
|
|
2409
2480
|
})
|
|
2410
2481
|
)
|
|
2411
2482
|
})
|
|
2412
2483
|
)
|
|
2413
2484
|
}),
|
|
2414
|
-
|
|
2415
|
-
type:
|
|
2416
|
-
call_id:
|
|
2417
|
-
name:
|
|
2418
|
-
arguments:
|
|
2485
|
+
z15.object({
|
|
2486
|
+
type: z15.literal("function_call"),
|
|
2487
|
+
call_id: z15.string(),
|
|
2488
|
+
name: z15.string(),
|
|
2489
|
+
arguments: z15.string()
|
|
2419
2490
|
}),
|
|
2420
|
-
|
|
2421
|
-
type:
|
|
2422
|
-
id:
|
|
2423
|
-
status:
|
|
2491
|
+
z15.object({
|
|
2492
|
+
type: z15.literal("web_search_call"),
|
|
2493
|
+
id: z15.string(),
|
|
2494
|
+
status: z15.string().optional()
|
|
2424
2495
|
}),
|
|
2425
|
-
|
|
2426
|
-
type:
|
|
2427
|
-
id:
|
|
2428
|
-
status:
|
|
2496
|
+
z15.object({
|
|
2497
|
+
type: z15.literal("computer_call"),
|
|
2498
|
+
id: z15.string(),
|
|
2499
|
+
status: z15.string().optional()
|
|
2429
2500
|
}),
|
|
2430
|
-
|
|
2431
|
-
type:
|
|
2432
|
-
|
|
2433
|
-
|
|
2434
|
-
|
|
2435
|
-
|
|
2501
|
+
z15.object({
|
|
2502
|
+
type: z15.literal("reasoning"),
|
|
2503
|
+
id: z15.string(),
|
|
2504
|
+
encrypted_content: z15.string().nullish(),
|
|
2505
|
+
summary: z15.array(
|
|
2506
|
+
z15.object({
|
|
2507
|
+
type: z15.literal("summary_text"),
|
|
2508
|
+
text: z15.string()
|
|
2436
2509
|
})
|
|
2437
2510
|
)
|
|
2438
2511
|
})
|
|
2439
2512
|
])
|
|
2440
2513
|
),
|
|
2441
|
-
incomplete_details:
|
|
2514
|
+
incomplete_details: z15.object({ reason: z15.string() }).nullable(),
|
|
2442
2515
|
usage: usageSchema2
|
|
2443
2516
|
})
|
|
2444
2517
|
),
|
|
2445
2518
|
abortSignal: options.abortSignal,
|
|
2446
2519
|
fetch: this.config.fetch
|
|
2447
2520
|
});
|
|
2521
|
+
if (response.error) {
|
|
2522
|
+
throw new APICallError({
|
|
2523
|
+
message: response.error.message,
|
|
2524
|
+
url,
|
|
2525
|
+
requestBodyValues: body,
|
|
2526
|
+
statusCode: 400,
|
|
2527
|
+
responseHeaders,
|
|
2528
|
+
responseBody: rawResponse,
|
|
2529
|
+
isRetryable: false
|
|
2530
|
+
});
|
|
2531
|
+
}
|
|
2448
2532
|
const content = [];
|
|
2449
2533
|
for (const part of response.output) {
|
|
2450
2534
|
switch (part.type) {
|
|
2451
2535
|
case "reasoning": {
|
|
2452
|
-
|
|
2453
|
-
type: "
|
|
2454
|
-
|
|
2455
|
-
|
|
2536
|
+
if (part.summary.length === 0) {
|
|
2537
|
+
part.summary.push({ type: "summary_text", text: "" });
|
|
2538
|
+
}
|
|
2539
|
+
for (const summary of part.summary) {
|
|
2540
|
+
content.push({
|
|
2541
|
+
type: "reasoning",
|
|
2542
|
+
text: summary.text,
|
|
2543
|
+
providerMetadata: {
|
|
2544
|
+
openai: {
|
|
2545
|
+
reasoning: {
|
|
2546
|
+
id: part.id,
|
|
2547
|
+
encryptedContent: (_a = part.encrypted_content) != null ? _a : null
|
|
2548
|
+
}
|
|
2549
|
+
}
|
|
2550
|
+
}
|
|
2551
|
+
});
|
|
2552
|
+
}
|
|
2456
2553
|
break;
|
|
2457
2554
|
}
|
|
2458
2555
|
case "message": {
|
|
@@ -2465,7 +2562,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2465
2562
|
content.push({
|
|
2466
2563
|
type: "source",
|
|
2467
2564
|
sourceType: "url",
|
|
2468
|
-
id: (
|
|
2565
|
+
id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : generateId2(),
|
|
2469
2566
|
url: annotation.url,
|
|
2470
2567
|
title: annotation.title
|
|
2471
2568
|
});
|
|
@@ -2524,15 +2621,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2524
2621
|
return {
|
|
2525
2622
|
content,
|
|
2526
2623
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2527
|
-
finishReason: (
|
|
2624
|
+
finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
|
|
2528
2625
|
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2529
2626
|
}),
|
|
2530
2627
|
usage: {
|
|
2531
2628
|
inputTokens: response.usage.input_tokens,
|
|
2532
2629
|
outputTokens: response.usage.output_tokens,
|
|
2533
2630
|
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2534
|
-
reasoningTokens: (
|
|
2535
|
-
cachedInputTokens: (
|
|
2631
|
+
reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
|
|
2632
|
+
cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
|
|
2536
2633
|
},
|
|
2537
2634
|
request: { body },
|
|
2538
2635
|
response: {
|
|
@@ -2586,7 +2683,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2586
2683
|
controller.enqueue({ type: "stream-start", warnings });
|
|
2587
2684
|
},
|
|
2588
2685
|
transform(chunk, controller) {
|
|
2589
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2686
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
|
2590
2687
|
if (options.includeRawChunks) {
|
|
2591
2688
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
2592
2689
|
}
|
|
@@ -2635,7 +2732,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2635
2732
|
} else if (value.item.type === "reasoning") {
|
|
2636
2733
|
controller.enqueue({
|
|
2637
2734
|
type: "reasoning-start",
|
|
2638
|
-
id: value.item.id
|
|
2735
|
+
id: value.item.id,
|
|
2736
|
+
providerMetadata: {
|
|
2737
|
+
openai: {
|
|
2738
|
+
reasoning: {
|
|
2739
|
+
id: value.item.id,
|
|
2740
|
+
encryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
|
|
2741
|
+
}
|
|
2742
|
+
}
|
|
2743
|
+
}
|
|
2639
2744
|
});
|
|
2640
2745
|
}
|
|
2641
2746
|
} else if (isResponseOutputItemDoneChunk(value)) {
|
|
@@ -2708,7 +2813,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2708
2813
|
} else if (value.item.type === "reasoning") {
|
|
2709
2814
|
controller.enqueue({
|
|
2710
2815
|
type: "reasoning-end",
|
|
2711
|
-
id: value.item.id
|
|
2816
|
+
id: value.item.id,
|
|
2817
|
+
providerMetadata: {
|
|
2818
|
+
openai: {
|
|
2819
|
+
reasoning: {
|
|
2820
|
+
id: value.item.id,
|
|
2821
|
+
encryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
|
|
2822
|
+
}
|
|
2823
|
+
}
|
|
2824
|
+
}
|
|
2712
2825
|
});
|
|
2713
2826
|
}
|
|
2714
2827
|
} else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
|
|
@@ -2737,27 +2850,29 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2737
2850
|
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2738
2851
|
controller.enqueue({
|
|
2739
2852
|
type: "reasoning-delta",
|
|
2740
|
-
|
|
2741
|
-
|
|
2853
|
+
id: value.item_id,
|
|
2854
|
+
delta: value.delta
|
|
2742
2855
|
});
|
|
2743
2856
|
} else if (isResponseFinishedChunk(value)) {
|
|
2744
2857
|
finishReason = mapOpenAIResponseFinishReason({
|
|
2745
|
-
finishReason: (
|
|
2858
|
+
finishReason: (_c = value.response.incomplete_details) == null ? void 0 : _c.reason,
|
|
2746
2859
|
hasToolCalls
|
|
2747
2860
|
});
|
|
2748
2861
|
usage.inputTokens = value.response.usage.input_tokens;
|
|
2749
2862
|
usage.outputTokens = value.response.usage.output_tokens;
|
|
2750
2863
|
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2751
|
-
usage.reasoningTokens = (
|
|
2752
|
-
usage.cachedInputTokens = (
|
|
2864
|
+
usage.reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
2865
|
+
usage.cachedInputTokens = (_g = (_f = value.response.usage.input_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
2753
2866
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2754
2867
|
controller.enqueue({
|
|
2755
2868
|
type: "source",
|
|
2756
2869
|
sourceType: "url",
|
|
2757
|
-
id: (
|
|
2870
|
+
id: (_j = (_i = (_h = self.config).generateId) == null ? void 0 : _i.call(_h)) != null ? _j : generateId2(),
|
|
2758
2871
|
url: value.annotation.url,
|
|
2759
2872
|
title: value.annotation.title
|
|
2760
2873
|
});
|
|
2874
|
+
} else if (isErrorChunk(value)) {
|
|
2875
|
+
controller.enqueue({ type: "error", error: value });
|
|
2761
2876
|
}
|
|
2762
2877
|
},
|
|
2763
2878
|
flush(controller) {
|
|
@@ -2779,124 +2894,136 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2779
2894
|
};
|
|
2780
2895
|
}
|
|
2781
2896
|
};
|
|
2782
|
-
var usageSchema2 =
|
|
2783
|
-
input_tokens:
|
|
2784
|
-
input_tokens_details:
|
|
2785
|
-
output_tokens:
|
|
2786
|
-
output_tokens_details:
|
|
2897
|
+
var usageSchema2 = z15.object({
|
|
2898
|
+
input_tokens: z15.number(),
|
|
2899
|
+
input_tokens_details: z15.object({ cached_tokens: z15.number().nullish() }).nullish(),
|
|
2900
|
+
output_tokens: z15.number(),
|
|
2901
|
+
output_tokens_details: z15.object({ reasoning_tokens: z15.number().nullish() }).nullish()
|
|
2787
2902
|
});
|
|
2788
|
-
var textDeltaChunkSchema =
|
|
2789
|
-
type:
|
|
2790
|
-
item_id:
|
|
2791
|
-
delta:
|
|
2903
|
+
var textDeltaChunkSchema = z15.object({
|
|
2904
|
+
type: z15.literal("response.output_text.delta"),
|
|
2905
|
+
item_id: z15.string(),
|
|
2906
|
+
delta: z15.string()
|
|
2792
2907
|
});
|
|
2793
|
-
var
|
|
2794
|
-
type:
|
|
2795
|
-
|
|
2796
|
-
|
|
2908
|
+
var errorChunkSchema = z15.object({
|
|
2909
|
+
type: z15.literal("error"),
|
|
2910
|
+
code: z15.string(),
|
|
2911
|
+
message: z15.string(),
|
|
2912
|
+
param: z15.string().nullish(),
|
|
2913
|
+
sequence_number: z15.number()
|
|
2914
|
+
});
|
|
2915
|
+
var responseFinishedChunkSchema = z15.object({
|
|
2916
|
+
type: z15.enum(["response.completed", "response.incomplete"]),
|
|
2917
|
+
response: z15.object({
|
|
2918
|
+
incomplete_details: z15.object({ reason: z15.string() }).nullish(),
|
|
2797
2919
|
usage: usageSchema2
|
|
2798
2920
|
})
|
|
2799
2921
|
});
|
|
2800
|
-
var responseCreatedChunkSchema =
|
|
2801
|
-
type:
|
|
2802
|
-
response:
|
|
2803
|
-
id:
|
|
2804
|
-
created_at:
|
|
2805
|
-
model:
|
|
2922
|
+
var responseCreatedChunkSchema = z15.object({
|
|
2923
|
+
type: z15.literal("response.created"),
|
|
2924
|
+
response: z15.object({
|
|
2925
|
+
id: z15.string(),
|
|
2926
|
+
created_at: z15.number(),
|
|
2927
|
+
model: z15.string()
|
|
2806
2928
|
})
|
|
2807
2929
|
});
|
|
2808
|
-
var responseOutputItemAddedSchema =
|
|
2809
|
-
type:
|
|
2810
|
-
output_index:
|
|
2811
|
-
item:
|
|
2812
|
-
|
|
2813
|
-
type:
|
|
2814
|
-
id:
|
|
2930
|
+
var responseOutputItemAddedSchema = z15.object({
|
|
2931
|
+
type: z15.literal("response.output_item.added"),
|
|
2932
|
+
output_index: z15.number(),
|
|
2933
|
+
item: z15.discriminatedUnion("type", [
|
|
2934
|
+
z15.object({
|
|
2935
|
+
type: z15.literal("message"),
|
|
2936
|
+
id: z15.string()
|
|
2815
2937
|
}),
|
|
2816
|
-
|
|
2817
|
-
type:
|
|
2818
|
-
id:
|
|
2938
|
+
z15.object({
|
|
2939
|
+
type: z15.literal("reasoning"),
|
|
2940
|
+
id: z15.string(),
|
|
2941
|
+
encrypted_content: z15.string().nullish(),
|
|
2942
|
+
summary: z15.array(
|
|
2943
|
+
z15.object({
|
|
2944
|
+
type: z15.literal("summary_text"),
|
|
2945
|
+
text: z15.string()
|
|
2946
|
+
})
|
|
2947
|
+
)
|
|
2819
2948
|
}),
|
|
2820
|
-
|
|
2821
|
-
type:
|
|
2822
|
-
id:
|
|
2823
|
-
call_id:
|
|
2824
|
-
name:
|
|
2825
|
-
arguments:
|
|
2949
|
+
z15.object({
|
|
2950
|
+
type: z15.literal("function_call"),
|
|
2951
|
+
id: z15.string(),
|
|
2952
|
+
call_id: z15.string(),
|
|
2953
|
+
name: z15.string(),
|
|
2954
|
+
arguments: z15.string()
|
|
2826
2955
|
}),
|
|
2827
|
-
|
|
2828
|
-
type:
|
|
2829
|
-
id:
|
|
2830
|
-
status:
|
|
2956
|
+
z15.object({
|
|
2957
|
+
type: z15.literal("web_search_call"),
|
|
2958
|
+
id: z15.string(),
|
|
2959
|
+
status: z15.string()
|
|
2831
2960
|
}),
|
|
2832
|
-
|
|
2833
|
-
type:
|
|
2834
|
-
id:
|
|
2835
|
-
status:
|
|
2961
|
+
z15.object({
|
|
2962
|
+
type: z15.literal("computer_call"),
|
|
2963
|
+
id: z15.string(),
|
|
2964
|
+
status: z15.string()
|
|
2836
2965
|
})
|
|
2837
2966
|
])
|
|
2838
2967
|
});
|
|
2839
|
-
var responseOutputItemDoneSchema =
|
|
2840
|
-
type:
|
|
2841
|
-
output_index:
|
|
2842
|
-
item:
|
|
2843
|
-
|
|
2844
|
-
type:
|
|
2845
|
-
id:
|
|
2968
|
+
var responseOutputItemDoneSchema = z15.object({
|
|
2969
|
+
type: z15.literal("response.output_item.done"),
|
|
2970
|
+
output_index: z15.number(),
|
|
2971
|
+
item: z15.discriminatedUnion("type", [
|
|
2972
|
+
z15.object({
|
|
2973
|
+
type: z15.literal("message"),
|
|
2974
|
+
id: z15.string()
|
|
2846
2975
|
}),
|
|
2847
|
-
|
|
2848
|
-
type:
|
|
2849
|
-
id:
|
|
2976
|
+
z15.object({
|
|
2977
|
+
type: z15.literal("reasoning"),
|
|
2978
|
+
id: z15.string(),
|
|
2979
|
+
encrypted_content: z15.string().nullish(),
|
|
2980
|
+
summary: z15.array(
|
|
2981
|
+
z15.object({
|
|
2982
|
+
type: z15.literal("summary_text"),
|
|
2983
|
+
text: z15.string()
|
|
2984
|
+
})
|
|
2985
|
+
)
|
|
2850
2986
|
}),
|
|
2851
|
-
|
|
2852
|
-
type:
|
|
2853
|
-
id:
|
|
2854
|
-
call_id:
|
|
2855
|
-
name:
|
|
2856
|
-
arguments:
|
|
2857
|
-
status:
|
|
2987
|
+
z15.object({
|
|
2988
|
+
type: z15.literal("function_call"),
|
|
2989
|
+
id: z15.string(),
|
|
2990
|
+
call_id: z15.string(),
|
|
2991
|
+
name: z15.string(),
|
|
2992
|
+
arguments: z15.string(),
|
|
2993
|
+
status: z15.literal("completed")
|
|
2858
2994
|
}),
|
|
2859
|
-
|
|
2860
|
-
type:
|
|
2861
|
-
id:
|
|
2862
|
-
status:
|
|
2995
|
+
z15.object({
|
|
2996
|
+
type: z15.literal("web_search_call"),
|
|
2997
|
+
id: z15.string(),
|
|
2998
|
+
status: z15.literal("completed")
|
|
2863
2999
|
}),
|
|
2864
|
-
|
|
2865
|
-
type:
|
|
2866
|
-
id:
|
|
2867
|
-
status:
|
|
3000
|
+
z15.object({
|
|
3001
|
+
type: z15.literal("computer_call"),
|
|
3002
|
+
id: z15.string(),
|
|
3003
|
+
status: z15.literal("completed")
|
|
2868
3004
|
})
|
|
2869
3005
|
])
|
|
2870
3006
|
});
|
|
2871
|
-
var responseFunctionCallArgumentsDeltaSchema =
|
|
2872
|
-
type:
|
|
2873
|
-
item_id:
|
|
2874
|
-
output_index:
|
|
2875
|
-
delta:
|
|
3007
|
+
var responseFunctionCallArgumentsDeltaSchema = z15.object({
|
|
3008
|
+
type: z15.literal("response.function_call_arguments.delta"),
|
|
3009
|
+
item_id: z15.string(),
|
|
3010
|
+
output_index: z15.number(),
|
|
3011
|
+
delta: z15.string()
|
|
2876
3012
|
});
|
|
2877
|
-
var responseAnnotationAddedSchema =
|
|
2878
|
-
type:
|
|
2879
|
-
annotation:
|
|
2880
|
-
type:
|
|
2881
|
-
url:
|
|
2882
|
-
title:
|
|
3013
|
+
var responseAnnotationAddedSchema = z15.object({
|
|
3014
|
+
type: z15.literal("response.output_text.annotation.added"),
|
|
3015
|
+
annotation: z15.object({
|
|
3016
|
+
type: z15.literal("url_citation"),
|
|
3017
|
+
url: z15.string(),
|
|
3018
|
+
title: z15.string()
|
|
2883
3019
|
})
|
|
2884
3020
|
});
|
|
2885
|
-
var responseReasoningSummaryTextDeltaSchema =
|
|
2886
|
-
type:
|
|
2887
|
-
item_id:
|
|
2888
|
-
|
|
2889
|
-
summary_index: z14.number(),
|
|
2890
|
-
delta: z14.string()
|
|
2891
|
-
});
|
|
2892
|
-
var responseReasoningSummaryPartDoneSchema = z14.object({
|
|
2893
|
-
type: z14.literal("response.reasoning_summary_part.done"),
|
|
2894
|
-
item_id: z14.string(),
|
|
2895
|
-
output_index: z14.number(),
|
|
2896
|
-
summary_index: z14.number(),
|
|
2897
|
-
part: z14.unknown().nullish()
|
|
3021
|
+
var responseReasoningSummaryTextDeltaSchema = z15.object({
|
|
3022
|
+
type: z15.literal("response.reasoning_summary_text.delta"),
|
|
3023
|
+
item_id: z15.string(),
|
|
3024
|
+
delta: z15.string()
|
|
2898
3025
|
});
|
|
2899
|
-
var openaiResponsesChunkSchema =
|
|
3026
|
+
var openaiResponsesChunkSchema = z15.union([
|
|
2900
3027
|
textDeltaChunkSchema,
|
|
2901
3028
|
responseFinishedChunkSchema,
|
|
2902
3029
|
responseCreatedChunkSchema,
|
|
@@ -2905,8 +3032,8 @@ var openaiResponsesChunkSchema = z14.union([
|
|
|
2905
3032
|
responseFunctionCallArgumentsDeltaSchema,
|
|
2906
3033
|
responseAnnotationAddedSchema,
|
|
2907
3034
|
responseReasoningSummaryTextDeltaSchema,
|
|
2908
|
-
|
|
2909
|
-
|
|
3035
|
+
errorChunkSchema,
|
|
3036
|
+
z15.object({ type: z15.string() }).loose()
|
|
2910
3037
|
// fallback for unknown chunks
|
|
2911
3038
|
]);
|
|
2912
3039
|
function isTextDeltaChunk(chunk) {
|
|
@@ -2933,8 +3060,11 @@ function isResponseAnnotationAddedChunk(chunk) {
|
|
|
2933
3060
|
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
2934
3061
|
return chunk.type === "response.reasoning_summary_text.delta";
|
|
2935
3062
|
}
|
|
3063
|
+
function isErrorChunk(chunk) {
|
|
3064
|
+
return chunk.type === "error";
|
|
3065
|
+
}
|
|
2936
3066
|
function getResponsesModelConfig(modelId) {
|
|
2937
|
-
if (modelId.startsWith("o")) {
|
|
3067
|
+
if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
|
|
2938
3068
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
2939
3069
|
return {
|
|
2940
3070
|
isReasoningModel: true,
|
|
@@ -2957,17 +3087,18 @@ function getResponsesModelConfig(modelId) {
|
|
|
2957
3087
|
function supportsFlexProcessing2(modelId) {
|
|
2958
3088
|
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
2959
3089
|
}
|
|
2960
|
-
var openaiResponsesProviderOptionsSchema =
|
|
2961
|
-
metadata:
|
|
2962
|
-
parallelToolCalls:
|
|
2963
|
-
previousResponseId:
|
|
2964
|
-
store:
|
|
2965
|
-
user:
|
|
2966
|
-
reasoningEffort:
|
|
2967
|
-
strictJsonSchema:
|
|
2968
|
-
instructions:
|
|
2969
|
-
reasoningSummary:
|
|
2970
|
-
serviceTier:
|
|
3090
|
+
var openaiResponsesProviderOptionsSchema = z15.object({
|
|
3091
|
+
metadata: z15.any().nullish(),
|
|
3092
|
+
parallelToolCalls: z15.boolean().nullish(),
|
|
3093
|
+
previousResponseId: z15.string().nullish(),
|
|
3094
|
+
store: z15.boolean().nullish(),
|
|
3095
|
+
user: z15.string().nullish(),
|
|
3096
|
+
reasoningEffort: z15.string().nullish(),
|
|
3097
|
+
strictJsonSchema: z15.boolean().nullish(),
|
|
3098
|
+
instructions: z15.string().nullish(),
|
|
3099
|
+
reasoningSummary: z15.string().nullish(),
|
|
3100
|
+
serviceTier: z15.enum(["auto", "flex"]).nullish(),
|
|
3101
|
+
include: z15.array(z15.enum(["reasoning.encrypted_content"])).nullish()
|
|
2971
3102
|
});
|
|
2972
3103
|
export {
|
|
2973
3104
|
OpenAIChatLanguageModel,
|