@ai-sdk/openai 2.0.0-beta.4 → 2.0.0-beta.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/dist/index.js +109 -41
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +111 -41
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +109 -41
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +111 -41
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.js
CHANGED
|
@@ -314,7 +314,7 @@ var openaiProviderOptions = import_v4.z.object({
|
|
|
314
314
|
/**
|
|
315
315
|
* Whether to use strict JSON schema validation.
|
|
316
316
|
*
|
|
317
|
-
* @default
|
|
317
|
+
* @default false
|
|
318
318
|
*/
|
|
319
319
|
strictJsonSchema: import_v4.z.boolean().optional()
|
|
320
320
|
});
|
|
@@ -1976,6 +1976,7 @@ var OpenAISpeechModel = class {
|
|
|
1976
1976
|
};
|
|
1977
1977
|
|
|
1978
1978
|
// src/responses/openai-responses-language-model.ts
|
|
1979
|
+
var import_provider8 = require("@ai-sdk/provider");
|
|
1979
1980
|
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
|
1980
1981
|
var import_v415 = require("zod/v4");
|
|
1981
1982
|
|
|
@@ -2100,7 +2101,7 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2100
2101
|
const summaryParts = [];
|
|
2101
2102
|
if (part.text.length > 0) {
|
|
2102
2103
|
summaryParts.push({ type: "summary_text", text: part.text });
|
|
2103
|
-
} else {
|
|
2104
|
+
} else if (existingReasoningMessage !== void 0) {
|
|
2104
2105
|
warnings.push({
|
|
2105
2106
|
type: "other",
|
|
2106
2107
|
message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
|
|
@@ -2418,15 +2419,16 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2418
2419
|
async doGenerate(options) {
|
|
2419
2420
|
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2420
2421
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2422
|
+
const url = this.config.url({
|
|
2423
|
+
path: "/responses",
|
|
2424
|
+
modelId: this.modelId
|
|
2425
|
+
});
|
|
2421
2426
|
const {
|
|
2422
2427
|
responseHeaders,
|
|
2423
2428
|
value: response,
|
|
2424
2429
|
rawValue: rawResponse
|
|
2425
2430
|
} = await (0, import_provider_utils12.postJsonToApi)({
|
|
2426
|
-
url
|
|
2427
|
-
path: "/responses",
|
|
2428
|
-
modelId: this.modelId
|
|
2429
|
-
}),
|
|
2431
|
+
url,
|
|
2430
2432
|
headers: (0, import_provider_utils12.combineHeaders)(this.config.headers(), options.headers),
|
|
2431
2433
|
body,
|
|
2432
2434
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
@@ -2434,6 +2436,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2434
2436
|
import_v415.z.object({
|
|
2435
2437
|
id: import_v415.z.string(),
|
|
2436
2438
|
created_at: import_v415.z.number(),
|
|
2439
|
+
error: import_v415.z.object({
|
|
2440
|
+
code: import_v415.z.string(),
|
|
2441
|
+
message: import_v415.z.string()
|
|
2442
|
+
}).nullish(),
|
|
2437
2443
|
model: import_v415.z.string(),
|
|
2438
2444
|
output: import_v415.z.array(
|
|
2439
2445
|
import_v415.z.discriminatedUnion("type", [
|
|
@@ -2492,6 +2498,17 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2492
2498
|
abortSignal: options.abortSignal,
|
|
2493
2499
|
fetch: this.config.fetch
|
|
2494
2500
|
});
|
|
2501
|
+
if (response.error) {
|
|
2502
|
+
throw new import_provider8.APICallError({
|
|
2503
|
+
message: response.error.message,
|
|
2504
|
+
url,
|
|
2505
|
+
requestBodyValues: body,
|
|
2506
|
+
statusCode: 400,
|
|
2507
|
+
responseHeaders,
|
|
2508
|
+
responseBody: rawResponse,
|
|
2509
|
+
isRetryable: false
|
|
2510
|
+
});
|
|
2511
|
+
}
|
|
2495
2512
|
const content = [];
|
|
2496
2513
|
for (const part of response.output) {
|
|
2497
2514
|
switch (part.type) {
|
|
@@ -2639,6 +2656,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2639
2656
|
let responseId = null;
|
|
2640
2657
|
const ongoingToolCalls = {};
|
|
2641
2658
|
let hasToolCalls = false;
|
|
2659
|
+
const activeReasoning = {};
|
|
2642
2660
|
return {
|
|
2643
2661
|
stream: response.pipeThrough(
|
|
2644
2662
|
new TransformStream({
|
|
@@ -2646,7 +2664,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2646
2664
|
controller.enqueue({ type: "stream-start", warnings });
|
|
2647
2665
|
},
|
|
2648
2666
|
transform(chunk, controller) {
|
|
2649
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
|
2667
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
2650
2668
|
if (options.includeRawChunks) {
|
|
2651
2669
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
2652
2670
|
}
|
|
@@ -2692,10 +2710,14 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2692
2710
|
type: "text-start",
|
|
2693
2711
|
id: value.item.id
|
|
2694
2712
|
});
|
|
2695
|
-
} else if (value
|
|
2713
|
+
} else if (isResponseOutputItemAddedReasoningChunk(value)) {
|
|
2714
|
+
activeReasoning[value.item.id] = {
|
|
2715
|
+
encryptedContent: value.item.encrypted_content,
|
|
2716
|
+
summaryParts: [0]
|
|
2717
|
+
};
|
|
2696
2718
|
controller.enqueue({
|
|
2697
2719
|
type: "reasoning-start",
|
|
2698
|
-
id: value.item.id
|
|
2720
|
+
id: `${value.item.id}:0`,
|
|
2699
2721
|
providerMetadata: {
|
|
2700
2722
|
openai: {
|
|
2701
2723
|
reasoning: {
|
|
@@ -2773,19 +2795,23 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2773
2795
|
type: "text-end",
|
|
2774
2796
|
id: value.item.id
|
|
2775
2797
|
});
|
|
2776
|
-
} else if (value
|
|
2777
|
-
|
|
2778
|
-
|
|
2779
|
-
|
|
2780
|
-
|
|
2781
|
-
|
|
2782
|
-
|
|
2783
|
-
|
|
2784
|
-
|
|
2798
|
+
} else if (isResponseOutputItemDoneReasoningChunk(value)) {
|
|
2799
|
+
const activeReasoningPart = activeReasoning[value.item.id];
|
|
2800
|
+
for (const summaryIndex of activeReasoningPart.summaryParts) {
|
|
2801
|
+
controller.enqueue({
|
|
2802
|
+
type: "reasoning-end",
|
|
2803
|
+
id: `${value.item.id}:${summaryIndex}`,
|
|
2804
|
+
providerMetadata: {
|
|
2805
|
+
openai: {
|
|
2806
|
+
reasoning: {
|
|
2807
|
+
id: value.item.id,
|
|
2808
|
+
encryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
|
|
2809
|
+
}
|
|
2785
2810
|
}
|
|
2786
2811
|
}
|
|
2787
|
-
}
|
|
2788
|
-
}
|
|
2812
|
+
});
|
|
2813
|
+
}
|
|
2814
|
+
delete activeReasoning[value.item.id];
|
|
2789
2815
|
}
|
|
2790
2816
|
} else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
|
|
2791
2817
|
const toolCall = ongoingToolCalls[value.output_index];
|
|
@@ -2810,30 +2836,57 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2810
2836
|
id: value.item_id,
|
|
2811
2837
|
delta: value.delta
|
|
2812
2838
|
});
|
|
2839
|
+
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
|
2840
|
+
if (value.summary_index > 0) {
|
|
2841
|
+
(_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
|
|
2842
|
+
value.summary_index
|
|
2843
|
+
);
|
|
2844
|
+
controller.enqueue({
|
|
2845
|
+
type: "reasoning-start",
|
|
2846
|
+
id: `${value.item_id}:${value.summary_index}`,
|
|
2847
|
+
providerMetadata: {
|
|
2848
|
+
openai: {
|
|
2849
|
+
reasoning: {
|
|
2850
|
+
id: value.item_id,
|
|
2851
|
+
encryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
|
|
2852
|
+
}
|
|
2853
|
+
}
|
|
2854
|
+
}
|
|
2855
|
+
});
|
|
2856
|
+
}
|
|
2813
2857
|
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2814
2858
|
controller.enqueue({
|
|
2815
2859
|
type: "reasoning-delta",
|
|
2816
|
-
id: value.item_id
|
|
2817
|
-
delta: value.delta
|
|
2860
|
+
id: `${value.item_id}:${value.summary_index}`,
|
|
2861
|
+
delta: value.delta,
|
|
2862
|
+
providerMetadata: {
|
|
2863
|
+
openai: {
|
|
2864
|
+
reasoning: {
|
|
2865
|
+
id: value.item_id
|
|
2866
|
+
}
|
|
2867
|
+
}
|
|
2868
|
+
}
|
|
2818
2869
|
});
|
|
2819
2870
|
} else if (isResponseFinishedChunk(value)) {
|
|
2820
2871
|
finishReason = mapOpenAIResponseFinishReason({
|
|
2821
|
-
finishReason: (
|
|
2872
|
+
finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
|
|
2822
2873
|
hasToolCalls
|
|
2823
2874
|
});
|
|
2824
2875
|
usage.inputTokens = value.response.usage.input_tokens;
|
|
2825
2876
|
usage.outputTokens = value.response.usage.output_tokens;
|
|
2826
2877
|
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2827
|
-
usage.reasoningTokens = (
|
|
2828
|
-
usage.cachedInputTokens = (
|
|
2878
|
+
usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
|
|
2879
|
+
usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
|
|
2829
2880
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2830
2881
|
controller.enqueue({
|
|
2831
2882
|
type: "source",
|
|
2832
2883
|
sourceType: "url",
|
|
2833
|
-
id: (
|
|
2884
|
+
id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils12.generateId)(),
|
|
2834
2885
|
url: value.annotation.url,
|
|
2835
2886
|
title: value.annotation.title
|
|
2836
2887
|
});
|
|
2888
|
+
} else if (isErrorChunk(value)) {
|
|
2889
|
+
controller.enqueue({ type: "error", error: value });
|
|
2837
2890
|
}
|
|
2838
2891
|
},
|
|
2839
2892
|
flush(controller) {
|
|
@@ -2866,6 +2919,13 @@ var textDeltaChunkSchema = import_v415.z.object({
|
|
|
2866
2919
|
item_id: import_v415.z.string(),
|
|
2867
2920
|
delta: import_v415.z.string()
|
|
2868
2921
|
});
|
|
2922
|
+
var errorChunkSchema = import_v415.z.object({
|
|
2923
|
+
type: import_v415.z.literal("error"),
|
|
2924
|
+
code: import_v415.z.string(),
|
|
2925
|
+
message: import_v415.z.string(),
|
|
2926
|
+
param: import_v415.z.string().nullish(),
|
|
2927
|
+
sequence_number: import_v415.z.number()
|
|
2928
|
+
});
|
|
2869
2929
|
var responseFinishedChunkSchema = import_v415.z.object({
|
|
2870
2930
|
type: import_v415.z.enum(["response.completed", "response.incomplete"]),
|
|
2871
2931
|
response: import_v415.z.object({
|
|
@@ -2892,13 +2952,7 @@ var responseOutputItemAddedSchema = import_v415.z.object({
|
|
|
2892
2952
|
import_v415.z.object({
|
|
2893
2953
|
type: import_v415.z.literal("reasoning"),
|
|
2894
2954
|
id: import_v415.z.string(),
|
|
2895
|
-
encrypted_content: import_v415.z.string().nullish()
|
|
2896
|
-
summary: import_v415.z.array(
|
|
2897
|
-
import_v415.z.object({
|
|
2898
|
-
type: import_v415.z.literal("summary_text"),
|
|
2899
|
-
text: import_v415.z.string()
|
|
2900
|
-
})
|
|
2901
|
-
)
|
|
2955
|
+
encrypted_content: import_v415.z.string().nullish()
|
|
2902
2956
|
}),
|
|
2903
2957
|
import_v415.z.object({
|
|
2904
2958
|
type: import_v415.z.literal("function_call"),
|
|
@@ -2930,13 +2984,7 @@ var responseOutputItemDoneSchema = import_v415.z.object({
|
|
|
2930
2984
|
import_v415.z.object({
|
|
2931
2985
|
type: import_v415.z.literal("reasoning"),
|
|
2932
2986
|
id: import_v415.z.string(),
|
|
2933
|
-
encrypted_content: import_v415.z.string().nullish()
|
|
2934
|
-
summary: import_v415.z.array(
|
|
2935
|
-
import_v415.z.object({
|
|
2936
|
-
type: import_v415.z.literal("summary_text"),
|
|
2937
|
-
text: import_v415.z.string()
|
|
2938
|
-
})
|
|
2939
|
-
)
|
|
2987
|
+
encrypted_content: import_v415.z.string().nullish()
|
|
2940
2988
|
}),
|
|
2941
2989
|
import_v415.z.object({
|
|
2942
2990
|
type: import_v415.z.literal("function_call"),
|
|
@@ -2972,9 +3020,15 @@ var responseAnnotationAddedSchema = import_v415.z.object({
|
|
|
2972
3020
|
title: import_v415.z.string()
|
|
2973
3021
|
})
|
|
2974
3022
|
});
|
|
3023
|
+
var responseReasoningSummaryPartAddedSchema = import_v415.z.object({
|
|
3024
|
+
type: import_v415.z.literal("response.reasoning_summary_part.added"),
|
|
3025
|
+
item_id: import_v415.z.string(),
|
|
3026
|
+
summary_index: import_v415.z.number()
|
|
3027
|
+
});
|
|
2975
3028
|
var responseReasoningSummaryTextDeltaSchema = import_v415.z.object({
|
|
2976
3029
|
type: import_v415.z.literal("response.reasoning_summary_text.delta"),
|
|
2977
3030
|
item_id: import_v415.z.string(),
|
|
3031
|
+
summary_index: import_v415.z.number(),
|
|
2978
3032
|
delta: import_v415.z.string()
|
|
2979
3033
|
});
|
|
2980
3034
|
var openaiResponsesChunkSchema = import_v415.z.union([
|
|
@@ -2985,8 +3039,10 @@ var openaiResponsesChunkSchema = import_v415.z.union([
|
|
|
2985
3039
|
responseOutputItemDoneSchema,
|
|
2986
3040
|
responseFunctionCallArgumentsDeltaSchema,
|
|
2987
3041
|
responseAnnotationAddedSchema,
|
|
3042
|
+
responseReasoningSummaryPartAddedSchema,
|
|
2988
3043
|
responseReasoningSummaryTextDeltaSchema,
|
|
2989
|
-
|
|
3044
|
+
errorChunkSchema,
|
|
3045
|
+
import_v415.z.object({ type: import_v415.z.string() }).loose()
|
|
2990
3046
|
// fallback for unknown chunks
|
|
2991
3047
|
]);
|
|
2992
3048
|
function isTextDeltaChunk(chunk) {
|
|
@@ -2995,6 +3051,9 @@ function isTextDeltaChunk(chunk) {
|
|
|
2995
3051
|
function isResponseOutputItemDoneChunk(chunk) {
|
|
2996
3052
|
return chunk.type === "response.output_item.done";
|
|
2997
3053
|
}
|
|
3054
|
+
function isResponseOutputItemDoneReasoningChunk(chunk) {
|
|
3055
|
+
return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
|
|
3056
|
+
}
|
|
2998
3057
|
function isResponseFinishedChunk(chunk) {
|
|
2999
3058
|
return chunk.type === "response.completed" || chunk.type === "response.incomplete";
|
|
3000
3059
|
}
|
|
@@ -3007,12 +3066,21 @@ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
|
|
|
3007
3066
|
function isResponseOutputItemAddedChunk(chunk) {
|
|
3008
3067
|
return chunk.type === "response.output_item.added";
|
|
3009
3068
|
}
|
|
3069
|
+
function isResponseOutputItemAddedReasoningChunk(chunk) {
|
|
3070
|
+
return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
|
|
3071
|
+
}
|
|
3010
3072
|
function isResponseAnnotationAddedChunk(chunk) {
|
|
3011
3073
|
return chunk.type === "response.output_text.annotation.added";
|
|
3012
3074
|
}
|
|
3075
|
+
function isResponseReasoningSummaryPartAddedChunk(chunk) {
|
|
3076
|
+
return chunk.type === "response.reasoning_summary_part.added";
|
|
3077
|
+
}
|
|
3013
3078
|
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
3014
3079
|
return chunk.type === "response.reasoning_summary_text.delta";
|
|
3015
3080
|
}
|
|
3081
|
+
function isErrorChunk(chunk) {
|
|
3082
|
+
return chunk.type === "error";
|
|
3083
|
+
}
|
|
3016
3084
|
function getResponsesModelConfig(modelId) {
|
|
3017
3085
|
if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
|
|
3018
3086
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|