@ai-sdk/openai 2.0.0-beta.3 → 2.0.0-beta.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.d.mts +5 -1
- package/dist/index.d.ts +5 -1
- package/dist/index.js +323 -194
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +313 -182
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +5 -1
- package/dist/internal/index.d.ts +5 -1
- package/dist/internal/index.js +311 -182
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +307 -176
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/internal/index.js
CHANGED
|
@@ -1976,15 +1976,19 @@ var OpenAISpeechModel = class {
|
|
|
1976
1976
|
};
|
|
1977
1977
|
|
|
1978
1978
|
// src/responses/openai-responses-language-model.ts
|
|
1979
|
-
var
|
|
1980
|
-
var
|
|
1979
|
+
var import_provider8 = require("@ai-sdk/provider");
|
|
1980
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
|
1981
|
+
var import_v415 = require("zod/v4");
|
|
1981
1982
|
|
|
1982
1983
|
// src/responses/convert-to-openai-responses-messages.ts
|
|
1983
1984
|
var import_provider6 = require("@ai-sdk/provider");
|
|
1984
|
-
|
|
1985
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
|
1986
|
+
var import_v414 = require("zod/v4");
|
|
1987
|
+
async function convertToOpenAIResponsesMessages({
|
|
1985
1988
|
prompt,
|
|
1986
1989
|
systemMessageMode
|
|
1987
1990
|
}) {
|
|
1991
|
+
var _a, _b;
|
|
1988
1992
|
const messages = [];
|
|
1989
1993
|
const warnings = [];
|
|
1990
1994
|
for (const { role, content } of prompt) {
|
|
@@ -2019,7 +2023,7 @@ function convertToOpenAIResponsesMessages({
|
|
|
2019
2023
|
messages.push({
|
|
2020
2024
|
role: "user",
|
|
2021
2025
|
content: content.map((part, index) => {
|
|
2022
|
-
var
|
|
2026
|
+
var _a2, _b2, _c;
|
|
2023
2027
|
switch (part.type) {
|
|
2024
2028
|
case "text": {
|
|
2025
2029
|
return { type: "input_text", text: part.text };
|
|
@@ -2031,7 +2035,7 @@ function convertToOpenAIResponsesMessages({
|
|
|
2031
2035
|
type: "input_image",
|
|
2032
2036
|
image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
|
|
2033
2037
|
// OpenAI specific extension: image detail
|
|
2034
|
-
detail: (
|
|
2038
|
+
detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
|
|
2035
2039
|
};
|
|
2036
2040
|
} else if (part.mediaType === "application/pdf") {
|
|
2037
2041
|
if (part.data instanceof URL) {
|
|
@@ -2056,6 +2060,7 @@ function convertToOpenAIResponsesMessages({
|
|
|
2056
2060
|
break;
|
|
2057
2061
|
}
|
|
2058
2062
|
case "assistant": {
|
|
2063
|
+
const reasoningMessages = {};
|
|
2059
2064
|
for (const part of content) {
|
|
2060
2065
|
switch (part.type) {
|
|
2061
2066
|
case "text": {
|
|
@@ -2084,6 +2089,43 @@ function convertToOpenAIResponsesMessages({
|
|
|
2084
2089
|
});
|
|
2085
2090
|
break;
|
|
2086
2091
|
}
|
|
2092
|
+
case "reasoning": {
|
|
2093
|
+
const providerOptions = await (0, import_provider_utils11.parseProviderOptions)({
|
|
2094
|
+
provider: "openai",
|
|
2095
|
+
providerOptions: part.providerOptions,
|
|
2096
|
+
schema: openaiResponsesReasoningProviderOptionsSchema
|
|
2097
|
+
});
|
|
2098
|
+
const reasoningId = (_a = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _a.id;
|
|
2099
|
+
if (reasoningId != null) {
|
|
2100
|
+
const existingReasoningMessage = reasoningMessages[reasoningId];
|
|
2101
|
+
const summaryParts = [];
|
|
2102
|
+
if (part.text.length > 0) {
|
|
2103
|
+
summaryParts.push({ type: "summary_text", text: part.text });
|
|
2104
|
+
} else if (existingReasoningMessage !== void 0) {
|
|
2105
|
+
warnings.push({
|
|
2106
|
+
type: "other",
|
|
2107
|
+
message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
|
|
2108
|
+
});
|
|
2109
|
+
}
|
|
2110
|
+
if (existingReasoningMessage === void 0) {
|
|
2111
|
+
reasoningMessages[reasoningId] = {
|
|
2112
|
+
type: "reasoning",
|
|
2113
|
+
id: reasoningId,
|
|
2114
|
+
encrypted_content: (_b = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _b.encryptedContent,
|
|
2115
|
+
summary: summaryParts
|
|
2116
|
+
};
|
|
2117
|
+
messages.push(reasoningMessages[reasoningId]);
|
|
2118
|
+
} else {
|
|
2119
|
+
existingReasoningMessage.summary.push(...summaryParts);
|
|
2120
|
+
}
|
|
2121
|
+
} else {
|
|
2122
|
+
warnings.push({
|
|
2123
|
+
type: "other",
|
|
2124
|
+
message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
|
|
2125
|
+
});
|
|
2126
|
+
}
|
|
2127
|
+
break;
|
|
2128
|
+
}
|
|
2087
2129
|
}
|
|
2088
2130
|
}
|
|
2089
2131
|
break;
|
|
@@ -2119,6 +2161,12 @@ function convertToOpenAIResponsesMessages({
|
|
|
2119
2161
|
}
|
|
2120
2162
|
return { messages, warnings };
|
|
2121
2163
|
}
|
|
2164
|
+
var openaiResponsesReasoningProviderOptionsSchema = import_v414.z.object({
|
|
2165
|
+
reasoning: import_v414.z.object({
|
|
2166
|
+
id: import_v414.z.string().nullish(),
|
|
2167
|
+
encryptedContent: import_v414.z.string().nullish()
|
|
2168
|
+
}).nullish()
|
|
2169
|
+
});
|
|
2122
2170
|
|
|
2123
2171
|
// src/responses/map-openai-responses-finish-reason.ts
|
|
2124
2172
|
function mapOpenAIResponseFinishReason({
|
|
@@ -2257,12 +2305,12 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2257
2305
|
if (stopSequences != null) {
|
|
2258
2306
|
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2259
2307
|
}
|
|
2260
|
-
const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
|
|
2308
|
+
const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
|
|
2261
2309
|
prompt,
|
|
2262
2310
|
systemMessageMode: modelConfig.systemMessageMode
|
|
2263
2311
|
});
|
|
2264
2312
|
warnings.push(...messageWarnings);
|
|
2265
|
-
const openaiOptions = await (0,
|
|
2313
|
+
const openaiOptions = await (0, import_provider_utils12.parseProviderOptions)({
|
|
2266
2314
|
provider: "openai",
|
|
2267
2315
|
providerOptions,
|
|
2268
2316
|
schema: openaiResponsesProviderOptionsSchema
|
|
@@ -2293,6 +2341,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2293
2341
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2294
2342
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2295
2343
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2344
|
+
include: openaiOptions == null ? void 0 : openaiOptions.include,
|
|
2296
2345
|
// model-specific settings:
|
|
2297
2346
|
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2298
2347
|
reasoning: {
|
|
@@ -2325,6 +2374,21 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2325
2374
|
details: "topP is not supported for reasoning models"
|
|
2326
2375
|
});
|
|
2327
2376
|
}
|
|
2377
|
+
} else {
|
|
2378
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
|
|
2379
|
+
warnings.push({
|
|
2380
|
+
type: "unsupported-setting",
|
|
2381
|
+
setting: "reasoningEffort",
|
|
2382
|
+
details: "reasoningEffort is not supported for non-reasoning models"
|
|
2383
|
+
});
|
|
2384
|
+
}
|
|
2385
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
|
|
2386
|
+
warnings.push({
|
|
2387
|
+
type: "unsupported-setting",
|
|
2388
|
+
setting: "reasoningSummary",
|
|
2389
|
+
details: "reasoningSummary is not supported for non-reasoning models"
|
|
2390
|
+
});
|
|
2391
|
+
}
|
|
2328
2392
|
}
|
|
2329
2393
|
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
|
|
2330
2394
|
warnings.push({
|
|
@@ -2353,88 +2417,119 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2353
2417
|
};
|
|
2354
2418
|
}
|
|
2355
2419
|
async doGenerate(options) {
|
|
2356
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2420
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2357
2421
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2422
|
+
const url = this.config.url({
|
|
2423
|
+
path: "/responses",
|
|
2424
|
+
modelId: this.modelId
|
|
2425
|
+
});
|
|
2358
2426
|
const {
|
|
2359
2427
|
responseHeaders,
|
|
2360
2428
|
value: response,
|
|
2361
2429
|
rawValue: rawResponse
|
|
2362
|
-
} = await (0,
|
|
2363
|
-
url
|
|
2364
|
-
|
|
2365
|
-
modelId: this.modelId
|
|
2366
|
-
}),
|
|
2367
|
-
headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), options.headers),
|
|
2430
|
+
} = await (0, import_provider_utils12.postJsonToApi)({
|
|
2431
|
+
url,
|
|
2432
|
+
headers: (0, import_provider_utils12.combineHeaders)(this.config.headers(), options.headers),
|
|
2368
2433
|
body,
|
|
2369
2434
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2370
|
-
successfulResponseHandler: (0,
|
|
2371
|
-
|
|
2372
|
-
id:
|
|
2373
|
-
created_at:
|
|
2374
|
-
|
|
2375
|
-
|
|
2376
|
-
|
|
2377
|
-
|
|
2378
|
-
|
|
2379
|
-
|
|
2380
|
-
|
|
2381
|
-
|
|
2382
|
-
|
|
2383
|
-
|
|
2384
|
-
|
|
2385
|
-
|
|
2386
|
-
|
|
2387
|
-
|
|
2388
|
-
|
|
2389
|
-
|
|
2390
|
-
|
|
2435
|
+
successfulResponseHandler: (0, import_provider_utils12.createJsonResponseHandler)(
|
|
2436
|
+
import_v415.z.object({
|
|
2437
|
+
id: import_v415.z.string(),
|
|
2438
|
+
created_at: import_v415.z.number(),
|
|
2439
|
+
error: import_v415.z.object({
|
|
2440
|
+
code: import_v415.z.string(),
|
|
2441
|
+
message: import_v415.z.string()
|
|
2442
|
+
}).nullish(),
|
|
2443
|
+
model: import_v415.z.string(),
|
|
2444
|
+
output: import_v415.z.array(
|
|
2445
|
+
import_v415.z.discriminatedUnion("type", [
|
|
2446
|
+
import_v415.z.object({
|
|
2447
|
+
type: import_v415.z.literal("message"),
|
|
2448
|
+
role: import_v415.z.literal("assistant"),
|
|
2449
|
+
content: import_v415.z.array(
|
|
2450
|
+
import_v415.z.object({
|
|
2451
|
+
type: import_v415.z.literal("output_text"),
|
|
2452
|
+
text: import_v415.z.string(),
|
|
2453
|
+
annotations: import_v415.z.array(
|
|
2454
|
+
import_v415.z.object({
|
|
2455
|
+
type: import_v415.z.literal("url_citation"),
|
|
2456
|
+
start_index: import_v415.z.number(),
|
|
2457
|
+
end_index: import_v415.z.number(),
|
|
2458
|
+
url: import_v415.z.string(),
|
|
2459
|
+
title: import_v415.z.string()
|
|
2391
2460
|
})
|
|
2392
2461
|
)
|
|
2393
2462
|
})
|
|
2394
2463
|
)
|
|
2395
2464
|
}),
|
|
2396
|
-
|
|
2397
|
-
type:
|
|
2398
|
-
call_id:
|
|
2399
|
-
name:
|
|
2400
|
-
arguments:
|
|
2465
|
+
import_v415.z.object({
|
|
2466
|
+
type: import_v415.z.literal("function_call"),
|
|
2467
|
+
call_id: import_v415.z.string(),
|
|
2468
|
+
name: import_v415.z.string(),
|
|
2469
|
+
arguments: import_v415.z.string()
|
|
2401
2470
|
}),
|
|
2402
|
-
|
|
2403
|
-
type:
|
|
2404
|
-
id:
|
|
2405
|
-
status:
|
|
2471
|
+
import_v415.z.object({
|
|
2472
|
+
type: import_v415.z.literal("web_search_call"),
|
|
2473
|
+
id: import_v415.z.string(),
|
|
2474
|
+
status: import_v415.z.string().optional()
|
|
2406
2475
|
}),
|
|
2407
|
-
|
|
2408
|
-
type:
|
|
2409
|
-
id:
|
|
2410
|
-
status:
|
|
2476
|
+
import_v415.z.object({
|
|
2477
|
+
type: import_v415.z.literal("computer_call"),
|
|
2478
|
+
id: import_v415.z.string(),
|
|
2479
|
+
status: import_v415.z.string().optional()
|
|
2411
2480
|
}),
|
|
2412
|
-
|
|
2413
|
-
type:
|
|
2414
|
-
|
|
2415
|
-
|
|
2416
|
-
|
|
2417
|
-
|
|
2481
|
+
import_v415.z.object({
|
|
2482
|
+
type: import_v415.z.literal("reasoning"),
|
|
2483
|
+
id: import_v415.z.string(),
|
|
2484
|
+
encrypted_content: import_v415.z.string().nullish(),
|
|
2485
|
+
summary: import_v415.z.array(
|
|
2486
|
+
import_v415.z.object({
|
|
2487
|
+
type: import_v415.z.literal("summary_text"),
|
|
2488
|
+
text: import_v415.z.string()
|
|
2418
2489
|
})
|
|
2419
2490
|
)
|
|
2420
2491
|
})
|
|
2421
2492
|
])
|
|
2422
2493
|
),
|
|
2423
|
-
incomplete_details:
|
|
2494
|
+
incomplete_details: import_v415.z.object({ reason: import_v415.z.string() }).nullable(),
|
|
2424
2495
|
usage: usageSchema2
|
|
2425
2496
|
})
|
|
2426
2497
|
),
|
|
2427
2498
|
abortSignal: options.abortSignal,
|
|
2428
2499
|
fetch: this.config.fetch
|
|
2429
2500
|
});
|
|
2501
|
+
if (response.error) {
|
|
2502
|
+
throw new import_provider8.APICallError({
|
|
2503
|
+
message: response.error.message,
|
|
2504
|
+
url,
|
|
2505
|
+
requestBodyValues: body,
|
|
2506
|
+
statusCode: 400,
|
|
2507
|
+
responseHeaders,
|
|
2508
|
+
responseBody: rawResponse,
|
|
2509
|
+
isRetryable: false
|
|
2510
|
+
});
|
|
2511
|
+
}
|
|
2430
2512
|
const content = [];
|
|
2431
2513
|
for (const part of response.output) {
|
|
2432
2514
|
switch (part.type) {
|
|
2433
2515
|
case "reasoning": {
|
|
2434
|
-
|
|
2435
|
-
type: "
|
|
2436
|
-
|
|
2437
|
-
|
|
2516
|
+
if (part.summary.length === 0) {
|
|
2517
|
+
part.summary.push({ type: "summary_text", text: "" });
|
|
2518
|
+
}
|
|
2519
|
+
for (const summary of part.summary) {
|
|
2520
|
+
content.push({
|
|
2521
|
+
type: "reasoning",
|
|
2522
|
+
text: summary.text,
|
|
2523
|
+
providerMetadata: {
|
|
2524
|
+
openai: {
|
|
2525
|
+
reasoning: {
|
|
2526
|
+
id: part.id,
|
|
2527
|
+
encryptedContent: (_a = part.encrypted_content) != null ? _a : null
|
|
2528
|
+
}
|
|
2529
|
+
}
|
|
2530
|
+
}
|
|
2531
|
+
});
|
|
2532
|
+
}
|
|
2438
2533
|
break;
|
|
2439
2534
|
}
|
|
2440
2535
|
case "message": {
|
|
@@ -2447,7 +2542,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2447
2542
|
content.push({
|
|
2448
2543
|
type: "source",
|
|
2449
2544
|
sourceType: "url",
|
|
2450
|
-
id: (
|
|
2545
|
+
id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : (0, import_provider_utils12.generateId)(),
|
|
2451
2546
|
url: annotation.url,
|
|
2452
2547
|
title: annotation.title
|
|
2453
2548
|
});
|
|
@@ -2506,15 +2601,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2506
2601
|
return {
|
|
2507
2602
|
content,
|
|
2508
2603
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2509
|
-
finishReason: (
|
|
2604
|
+
finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
|
|
2510
2605
|
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2511
2606
|
}),
|
|
2512
2607
|
usage: {
|
|
2513
2608
|
inputTokens: response.usage.input_tokens,
|
|
2514
2609
|
outputTokens: response.usage.output_tokens,
|
|
2515
2610
|
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2516
|
-
reasoningTokens: (
|
|
2517
|
-
cachedInputTokens: (
|
|
2611
|
+
reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
|
|
2612
|
+
cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
|
|
2518
2613
|
},
|
|
2519
2614
|
request: { body },
|
|
2520
2615
|
response: {
|
|
@@ -2534,18 +2629,18 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2534
2629
|
}
|
|
2535
2630
|
async doStream(options) {
|
|
2536
2631
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2537
|
-
const { responseHeaders, value: response } = await (0,
|
|
2632
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils12.postJsonToApi)({
|
|
2538
2633
|
url: this.config.url({
|
|
2539
2634
|
path: "/responses",
|
|
2540
2635
|
modelId: this.modelId
|
|
2541
2636
|
}),
|
|
2542
|
-
headers: (0,
|
|
2637
|
+
headers: (0, import_provider_utils12.combineHeaders)(this.config.headers(), options.headers),
|
|
2543
2638
|
body: {
|
|
2544
2639
|
...body,
|
|
2545
2640
|
stream: true
|
|
2546
2641
|
},
|
|
2547
2642
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2548
|
-
successfulResponseHandler: (0,
|
|
2643
|
+
successfulResponseHandler: (0, import_provider_utils12.createEventSourceResponseHandler)(
|
|
2549
2644
|
openaiResponsesChunkSchema
|
|
2550
2645
|
),
|
|
2551
2646
|
abortSignal: options.abortSignal,
|
|
@@ -2568,7 +2663,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2568
2663
|
controller.enqueue({ type: "stream-start", warnings });
|
|
2569
2664
|
},
|
|
2570
2665
|
transform(chunk, controller) {
|
|
2571
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2666
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
|
2572
2667
|
if (options.includeRawChunks) {
|
|
2573
2668
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
2574
2669
|
}
|
|
@@ -2617,7 +2712,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2617
2712
|
} else if (value.item.type === "reasoning") {
|
|
2618
2713
|
controller.enqueue({
|
|
2619
2714
|
type: "reasoning-start",
|
|
2620
|
-
id: value.item.id
|
|
2715
|
+
id: value.item.id,
|
|
2716
|
+
providerMetadata: {
|
|
2717
|
+
openai: {
|
|
2718
|
+
reasoning: {
|
|
2719
|
+
id: value.item.id,
|
|
2720
|
+
encryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
|
|
2721
|
+
}
|
|
2722
|
+
}
|
|
2723
|
+
}
|
|
2621
2724
|
});
|
|
2622
2725
|
}
|
|
2623
2726
|
} else if (isResponseOutputItemDoneChunk(value)) {
|
|
@@ -2690,7 +2793,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2690
2793
|
} else if (value.item.type === "reasoning") {
|
|
2691
2794
|
controller.enqueue({
|
|
2692
2795
|
type: "reasoning-end",
|
|
2693
|
-
id: value.item.id
|
|
2796
|
+
id: value.item.id,
|
|
2797
|
+
providerMetadata: {
|
|
2798
|
+
openai: {
|
|
2799
|
+
reasoning: {
|
|
2800
|
+
id: value.item.id,
|
|
2801
|
+
encryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
|
|
2802
|
+
}
|
|
2803
|
+
}
|
|
2804
|
+
}
|
|
2694
2805
|
});
|
|
2695
2806
|
}
|
|
2696
2807
|
} else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
|
|
@@ -2719,27 +2830,29 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2719
2830
|
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2720
2831
|
controller.enqueue({
|
|
2721
2832
|
type: "reasoning-delta",
|
|
2722
|
-
|
|
2723
|
-
|
|
2833
|
+
id: value.item_id,
|
|
2834
|
+
delta: value.delta
|
|
2724
2835
|
});
|
|
2725
2836
|
} else if (isResponseFinishedChunk(value)) {
|
|
2726
2837
|
finishReason = mapOpenAIResponseFinishReason({
|
|
2727
|
-
finishReason: (
|
|
2838
|
+
finishReason: (_c = value.response.incomplete_details) == null ? void 0 : _c.reason,
|
|
2728
2839
|
hasToolCalls
|
|
2729
2840
|
});
|
|
2730
2841
|
usage.inputTokens = value.response.usage.input_tokens;
|
|
2731
2842
|
usage.outputTokens = value.response.usage.output_tokens;
|
|
2732
2843
|
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2733
|
-
usage.reasoningTokens = (
|
|
2734
|
-
usage.cachedInputTokens = (
|
|
2844
|
+
usage.reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
2845
|
+
usage.cachedInputTokens = (_g = (_f = value.response.usage.input_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
2735
2846
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2736
2847
|
controller.enqueue({
|
|
2737
2848
|
type: "source",
|
|
2738
2849
|
sourceType: "url",
|
|
2739
|
-
id: (
|
|
2850
|
+
id: (_j = (_i = (_h = self.config).generateId) == null ? void 0 : _i.call(_h)) != null ? _j : (0, import_provider_utils12.generateId)(),
|
|
2740
2851
|
url: value.annotation.url,
|
|
2741
2852
|
title: value.annotation.title
|
|
2742
2853
|
});
|
|
2854
|
+
} else if (isErrorChunk(value)) {
|
|
2855
|
+
controller.enqueue({ type: "error", error: value });
|
|
2743
2856
|
}
|
|
2744
2857
|
},
|
|
2745
2858
|
flush(controller) {
|
|
@@ -2761,124 +2874,136 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2761
2874
|
};
|
|
2762
2875
|
}
|
|
2763
2876
|
};
|
|
2764
|
-
var usageSchema2 =
|
|
2765
|
-
input_tokens:
|
|
2766
|
-
input_tokens_details:
|
|
2767
|
-
output_tokens:
|
|
2768
|
-
output_tokens_details:
|
|
2877
|
+
var usageSchema2 = import_v415.z.object({
|
|
2878
|
+
input_tokens: import_v415.z.number(),
|
|
2879
|
+
input_tokens_details: import_v415.z.object({ cached_tokens: import_v415.z.number().nullish() }).nullish(),
|
|
2880
|
+
output_tokens: import_v415.z.number(),
|
|
2881
|
+
output_tokens_details: import_v415.z.object({ reasoning_tokens: import_v415.z.number().nullish() }).nullish()
|
|
2769
2882
|
});
|
|
2770
|
-
var textDeltaChunkSchema =
|
|
2771
|
-
type:
|
|
2772
|
-
item_id:
|
|
2773
|
-
delta:
|
|
2883
|
+
var textDeltaChunkSchema = import_v415.z.object({
|
|
2884
|
+
type: import_v415.z.literal("response.output_text.delta"),
|
|
2885
|
+
item_id: import_v415.z.string(),
|
|
2886
|
+
delta: import_v415.z.string()
|
|
2774
2887
|
});
|
|
2775
|
-
var
|
|
2776
|
-
type:
|
|
2777
|
-
|
|
2778
|
-
|
|
2888
|
+
var errorChunkSchema = import_v415.z.object({
|
|
2889
|
+
type: import_v415.z.literal("error"),
|
|
2890
|
+
code: import_v415.z.string(),
|
|
2891
|
+
message: import_v415.z.string(),
|
|
2892
|
+
param: import_v415.z.string().nullish(),
|
|
2893
|
+
sequence_number: import_v415.z.number()
|
|
2894
|
+
});
|
|
2895
|
+
var responseFinishedChunkSchema = import_v415.z.object({
|
|
2896
|
+
type: import_v415.z.enum(["response.completed", "response.incomplete"]),
|
|
2897
|
+
response: import_v415.z.object({
|
|
2898
|
+
incomplete_details: import_v415.z.object({ reason: import_v415.z.string() }).nullish(),
|
|
2779
2899
|
usage: usageSchema2
|
|
2780
2900
|
})
|
|
2781
2901
|
});
|
|
2782
|
-
var responseCreatedChunkSchema =
|
|
2783
|
-
type:
|
|
2784
|
-
response:
|
|
2785
|
-
id:
|
|
2786
|
-
created_at:
|
|
2787
|
-
model:
|
|
2902
|
+
var responseCreatedChunkSchema = import_v415.z.object({
|
|
2903
|
+
type: import_v415.z.literal("response.created"),
|
|
2904
|
+
response: import_v415.z.object({
|
|
2905
|
+
id: import_v415.z.string(),
|
|
2906
|
+
created_at: import_v415.z.number(),
|
|
2907
|
+
model: import_v415.z.string()
|
|
2788
2908
|
})
|
|
2789
2909
|
});
|
|
2790
|
-
var responseOutputItemAddedSchema =
|
|
2791
|
-
type:
|
|
2792
|
-
output_index:
|
|
2793
|
-
item:
|
|
2794
|
-
|
|
2795
|
-
type:
|
|
2796
|
-
id:
|
|
2910
|
+
var responseOutputItemAddedSchema = import_v415.z.object({
|
|
2911
|
+
type: import_v415.z.literal("response.output_item.added"),
|
|
2912
|
+
output_index: import_v415.z.number(),
|
|
2913
|
+
item: import_v415.z.discriminatedUnion("type", [
|
|
2914
|
+
import_v415.z.object({
|
|
2915
|
+
type: import_v415.z.literal("message"),
|
|
2916
|
+
id: import_v415.z.string()
|
|
2797
2917
|
}),
|
|
2798
|
-
|
|
2799
|
-
type:
|
|
2800
|
-
id:
|
|
2918
|
+
import_v415.z.object({
|
|
2919
|
+
type: import_v415.z.literal("reasoning"),
|
|
2920
|
+
id: import_v415.z.string(),
|
|
2921
|
+
encrypted_content: import_v415.z.string().nullish(),
|
|
2922
|
+
summary: import_v415.z.array(
|
|
2923
|
+
import_v415.z.object({
|
|
2924
|
+
type: import_v415.z.literal("summary_text"),
|
|
2925
|
+
text: import_v415.z.string()
|
|
2926
|
+
})
|
|
2927
|
+
)
|
|
2801
2928
|
}),
|
|
2802
|
-
|
|
2803
|
-
type:
|
|
2804
|
-
id:
|
|
2805
|
-
call_id:
|
|
2806
|
-
name:
|
|
2807
|
-
arguments:
|
|
2929
|
+
import_v415.z.object({
|
|
2930
|
+
type: import_v415.z.literal("function_call"),
|
|
2931
|
+
id: import_v415.z.string(),
|
|
2932
|
+
call_id: import_v415.z.string(),
|
|
2933
|
+
name: import_v415.z.string(),
|
|
2934
|
+
arguments: import_v415.z.string()
|
|
2808
2935
|
}),
|
|
2809
|
-
|
|
2810
|
-
type:
|
|
2811
|
-
id:
|
|
2812
|
-
status:
|
|
2936
|
+
import_v415.z.object({
|
|
2937
|
+
type: import_v415.z.literal("web_search_call"),
|
|
2938
|
+
id: import_v415.z.string(),
|
|
2939
|
+
status: import_v415.z.string()
|
|
2813
2940
|
}),
|
|
2814
|
-
|
|
2815
|
-
type:
|
|
2816
|
-
id:
|
|
2817
|
-
status:
|
|
2941
|
+
import_v415.z.object({
|
|
2942
|
+
type: import_v415.z.literal("computer_call"),
|
|
2943
|
+
id: import_v415.z.string(),
|
|
2944
|
+
status: import_v415.z.string()
|
|
2818
2945
|
})
|
|
2819
2946
|
])
|
|
2820
2947
|
});
|
|
2821
|
-
var responseOutputItemDoneSchema =
|
|
2822
|
-
type:
|
|
2823
|
-
output_index:
|
|
2824
|
-
item:
|
|
2825
|
-
|
|
2826
|
-
type:
|
|
2827
|
-
id:
|
|
2948
|
+
var responseOutputItemDoneSchema = import_v415.z.object({
|
|
2949
|
+
type: import_v415.z.literal("response.output_item.done"),
|
|
2950
|
+
output_index: import_v415.z.number(),
|
|
2951
|
+
item: import_v415.z.discriminatedUnion("type", [
|
|
2952
|
+
import_v415.z.object({
|
|
2953
|
+
type: import_v415.z.literal("message"),
|
|
2954
|
+
id: import_v415.z.string()
|
|
2828
2955
|
}),
|
|
2829
|
-
|
|
2830
|
-
type:
|
|
2831
|
-
id:
|
|
2956
|
+
import_v415.z.object({
|
|
2957
|
+
type: import_v415.z.literal("reasoning"),
|
|
2958
|
+
id: import_v415.z.string(),
|
|
2959
|
+
encrypted_content: import_v415.z.string().nullish(),
|
|
2960
|
+
summary: import_v415.z.array(
|
|
2961
|
+
import_v415.z.object({
|
|
2962
|
+
type: import_v415.z.literal("summary_text"),
|
|
2963
|
+
text: import_v415.z.string()
|
|
2964
|
+
})
|
|
2965
|
+
)
|
|
2832
2966
|
}),
|
|
2833
|
-
|
|
2834
|
-
type:
|
|
2835
|
-
id:
|
|
2836
|
-
call_id:
|
|
2837
|
-
name:
|
|
2838
|
-
arguments:
|
|
2839
|
-
status:
|
|
2967
|
+
import_v415.z.object({
|
|
2968
|
+
type: import_v415.z.literal("function_call"),
|
|
2969
|
+
id: import_v415.z.string(),
|
|
2970
|
+
call_id: import_v415.z.string(),
|
|
2971
|
+
name: import_v415.z.string(),
|
|
2972
|
+
arguments: import_v415.z.string(),
|
|
2973
|
+
status: import_v415.z.literal("completed")
|
|
2840
2974
|
}),
|
|
2841
|
-
|
|
2842
|
-
type:
|
|
2843
|
-
id:
|
|
2844
|
-
status:
|
|
2975
|
+
import_v415.z.object({
|
|
2976
|
+
type: import_v415.z.literal("web_search_call"),
|
|
2977
|
+
id: import_v415.z.string(),
|
|
2978
|
+
status: import_v415.z.literal("completed")
|
|
2845
2979
|
}),
|
|
2846
|
-
|
|
2847
|
-
type:
|
|
2848
|
-
id:
|
|
2849
|
-
status:
|
|
2980
|
+
import_v415.z.object({
|
|
2981
|
+
type: import_v415.z.literal("computer_call"),
|
|
2982
|
+
id: import_v415.z.string(),
|
|
2983
|
+
status: import_v415.z.literal("completed")
|
|
2850
2984
|
})
|
|
2851
2985
|
])
|
|
2852
2986
|
});
|
|
2853
|
-
var responseFunctionCallArgumentsDeltaSchema =
|
|
2854
|
-
type:
|
|
2855
|
-
item_id:
|
|
2856
|
-
output_index:
|
|
2857
|
-
delta:
|
|
2987
|
+
var responseFunctionCallArgumentsDeltaSchema = import_v415.z.object({
|
|
2988
|
+
type: import_v415.z.literal("response.function_call_arguments.delta"),
|
|
2989
|
+
item_id: import_v415.z.string(),
|
|
2990
|
+
output_index: import_v415.z.number(),
|
|
2991
|
+
delta: import_v415.z.string()
|
|
2858
2992
|
});
|
|
2859
|
-
var responseAnnotationAddedSchema =
|
|
2860
|
-
type:
|
|
2861
|
-
annotation:
|
|
2862
|
-
type:
|
|
2863
|
-
url:
|
|
2864
|
-
title:
|
|
2993
|
+
var responseAnnotationAddedSchema = import_v415.z.object({
|
|
2994
|
+
type: import_v415.z.literal("response.output_text.annotation.added"),
|
|
2995
|
+
annotation: import_v415.z.object({
|
|
2996
|
+
type: import_v415.z.literal("url_citation"),
|
|
2997
|
+
url: import_v415.z.string(),
|
|
2998
|
+
title: import_v415.z.string()
|
|
2865
2999
|
})
|
|
2866
3000
|
});
|
|
2867
|
-
var responseReasoningSummaryTextDeltaSchema =
|
|
2868
|
-
type:
|
|
2869
|
-
item_id:
|
|
2870
|
-
|
|
2871
|
-
summary_index: import_v414.z.number(),
|
|
2872
|
-
delta: import_v414.z.string()
|
|
2873
|
-
});
|
|
2874
|
-
var responseReasoningSummaryPartDoneSchema = import_v414.z.object({
|
|
2875
|
-
type: import_v414.z.literal("response.reasoning_summary_part.done"),
|
|
2876
|
-
item_id: import_v414.z.string(),
|
|
2877
|
-
output_index: import_v414.z.number(),
|
|
2878
|
-
summary_index: import_v414.z.number(),
|
|
2879
|
-
part: import_v414.z.unknown().nullish()
|
|
3001
|
+
var responseReasoningSummaryTextDeltaSchema = import_v415.z.object({
|
|
3002
|
+
type: import_v415.z.literal("response.reasoning_summary_text.delta"),
|
|
3003
|
+
item_id: import_v415.z.string(),
|
|
3004
|
+
delta: import_v415.z.string()
|
|
2880
3005
|
});
|
|
2881
|
-
var openaiResponsesChunkSchema =
|
|
3006
|
+
var openaiResponsesChunkSchema = import_v415.z.union([
|
|
2882
3007
|
textDeltaChunkSchema,
|
|
2883
3008
|
responseFinishedChunkSchema,
|
|
2884
3009
|
responseCreatedChunkSchema,
|
|
@@ -2887,8 +3012,8 @@ var openaiResponsesChunkSchema = import_v414.z.union([
|
|
|
2887
3012
|
responseFunctionCallArgumentsDeltaSchema,
|
|
2888
3013
|
responseAnnotationAddedSchema,
|
|
2889
3014
|
responseReasoningSummaryTextDeltaSchema,
|
|
2890
|
-
|
|
2891
|
-
|
|
3015
|
+
errorChunkSchema,
|
|
3016
|
+
import_v415.z.object({ type: import_v415.z.string() }).loose()
|
|
2892
3017
|
// fallback for unknown chunks
|
|
2893
3018
|
]);
|
|
2894
3019
|
function isTextDeltaChunk(chunk) {
|
|
@@ -2915,8 +3040,11 @@ function isResponseAnnotationAddedChunk(chunk) {
|
|
|
2915
3040
|
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
2916
3041
|
return chunk.type === "response.reasoning_summary_text.delta";
|
|
2917
3042
|
}
|
|
3043
|
+
function isErrorChunk(chunk) {
|
|
3044
|
+
return chunk.type === "error";
|
|
3045
|
+
}
|
|
2918
3046
|
function getResponsesModelConfig(modelId) {
|
|
2919
|
-
if (modelId.startsWith("o")) {
|
|
3047
|
+
if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
|
|
2920
3048
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
2921
3049
|
return {
|
|
2922
3050
|
isReasoningModel: true,
|
|
@@ -2939,17 +3067,18 @@ function getResponsesModelConfig(modelId) {
|
|
|
2939
3067
|
function supportsFlexProcessing2(modelId) {
|
|
2940
3068
|
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
2941
3069
|
}
|
|
2942
|
-
var openaiResponsesProviderOptionsSchema =
|
|
2943
|
-
metadata:
|
|
2944
|
-
parallelToolCalls:
|
|
2945
|
-
previousResponseId:
|
|
2946
|
-
store:
|
|
2947
|
-
user:
|
|
2948
|
-
reasoningEffort:
|
|
2949
|
-
strictJsonSchema:
|
|
2950
|
-
instructions:
|
|
2951
|
-
reasoningSummary:
|
|
2952
|
-
serviceTier:
|
|
3070
|
+
var openaiResponsesProviderOptionsSchema = import_v415.z.object({
|
|
3071
|
+
metadata: import_v415.z.any().nullish(),
|
|
3072
|
+
parallelToolCalls: import_v415.z.boolean().nullish(),
|
|
3073
|
+
previousResponseId: import_v415.z.string().nullish(),
|
|
3074
|
+
store: import_v415.z.boolean().nullish(),
|
|
3075
|
+
user: import_v415.z.string().nullish(),
|
|
3076
|
+
reasoningEffort: import_v415.z.string().nullish(),
|
|
3077
|
+
strictJsonSchema: import_v415.z.boolean().nullish(),
|
|
3078
|
+
instructions: import_v415.z.string().nullish(),
|
|
3079
|
+
reasoningSummary: import_v415.z.string().nullish(),
|
|
3080
|
+
serviceTier: import_v415.z.enum(["auto", "flex"]).nullish(),
|
|
3081
|
+
include: import_v415.z.array(import_v415.z.enum(["reasoning.encrypted_content"])).nullish()
|
|
2953
3082
|
});
|
|
2954
3083
|
// Annotate the CommonJS export names for ESM import in node:
|
|
2955
3084
|
0 && (module.exports = {
|