@ai-sdk/openai 2.0.0-beta.4 → 2.0.0-beta.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/dist/index.js +109 -41
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +111 -41
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +109 -41
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +111 -41
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.mjs
CHANGED
|
@@ -288,7 +288,7 @@ var openaiProviderOptions = z.object({
|
|
|
288
288
|
/**
|
|
289
289
|
* Whether to use strict JSON schema validation.
|
|
290
290
|
*
|
|
291
|
-
* @default
|
|
291
|
+
* @default false
|
|
292
292
|
*/
|
|
293
293
|
strictJsonSchema: z.boolean().optional()
|
|
294
294
|
});
|
|
@@ -1983,6 +1983,9 @@ var OpenAISpeechModel = class {
|
|
|
1983
1983
|
};
|
|
1984
1984
|
|
|
1985
1985
|
// src/responses/openai-responses-language-model.ts
|
|
1986
|
+
import {
|
|
1987
|
+
APICallError
|
|
1988
|
+
} from "@ai-sdk/provider";
|
|
1986
1989
|
import {
|
|
1987
1990
|
combineHeaders as combineHeaders7,
|
|
1988
1991
|
createEventSourceResponseHandler as createEventSourceResponseHandler3,
|
|
@@ -2116,7 +2119,7 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2116
2119
|
const summaryParts = [];
|
|
2117
2120
|
if (part.text.length > 0) {
|
|
2118
2121
|
summaryParts.push({ type: "summary_text", text: part.text });
|
|
2119
|
-
} else {
|
|
2122
|
+
} else if (existingReasoningMessage !== void 0) {
|
|
2120
2123
|
warnings.push({
|
|
2121
2124
|
type: "other",
|
|
2122
2125
|
message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
|
|
@@ -2436,15 +2439,16 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2436
2439
|
async doGenerate(options) {
|
|
2437
2440
|
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2438
2441
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2442
|
+
const url = this.config.url({
|
|
2443
|
+
path: "/responses",
|
|
2444
|
+
modelId: this.modelId
|
|
2445
|
+
});
|
|
2439
2446
|
const {
|
|
2440
2447
|
responseHeaders,
|
|
2441
2448
|
value: response,
|
|
2442
2449
|
rawValue: rawResponse
|
|
2443
2450
|
} = await postJsonToApi6({
|
|
2444
|
-
url
|
|
2445
|
-
path: "/responses",
|
|
2446
|
-
modelId: this.modelId
|
|
2447
|
-
}),
|
|
2451
|
+
url,
|
|
2448
2452
|
headers: combineHeaders7(this.config.headers(), options.headers),
|
|
2449
2453
|
body,
|
|
2450
2454
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
@@ -2452,6 +2456,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2452
2456
|
z15.object({
|
|
2453
2457
|
id: z15.string(),
|
|
2454
2458
|
created_at: z15.number(),
|
|
2459
|
+
error: z15.object({
|
|
2460
|
+
code: z15.string(),
|
|
2461
|
+
message: z15.string()
|
|
2462
|
+
}).nullish(),
|
|
2455
2463
|
model: z15.string(),
|
|
2456
2464
|
output: z15.array(
|
|
2457
2465
|
z15.discriminatedUnion("type", [
|
|
@@ -2510,6 +2518,17 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2510
2518
|
abortSignal: options.abortSignal,
|
|
2511
2519
|
fetch: this.config.fetch
|
|
2512
2520
|
});
|
|
2521
|
+
if (response.error) {
|
|
2522
|
+
throw new APICallError({
|
|
2523
|
+
message: response.error.message,
|
|
2524
|
+
url,
|
|
2525
|
+
requestBodyValues: body,
|
|
2526
|
+
statusCode: 400,
|
|
2527
|
+
responseHeaders,
|
|
2528
|
+
responseBody: rawResponse,
|
|
2529
|
+
isRetryable: false
|
|
2530
|
+
});
|
|
2531
|
+
}
|
|
2513
2532
|
const content = [];
|
|
2514
2533
|
for (const part of response.output) {
|
|
2515
2534
|
switch (part.type) {
|
|
@@ -2657,6 +2676,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2657
2676
|
let responseId = null;
|
|
2658
2677
|
const ongoingToolCalls = {};
|
|
2659
2678
|
let hasToolCalls = false;
|
|
2679
|
+
const activeReasoning = {};
|
|
2660
2680
|
return {
|
|
2661
2681
|
stream: response.pipeThrough(
|
|
2662
2682
|
new TransformStream({
|
|
@@ -2664,7 +2684,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2664
2684
|
controller.enqueue({ type: "stream-start", warnings });
|
|
2665
2685
|
},
|
|
2666
2686
|
transform(chunk, controller) {
|
|
2667
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
|
2687
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
2668
2688
|
if (options.includeRawChunks) {
|
|
2669
2689
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
2670
2690
|
}
|
|
@@ -2710,10 +2730,14 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2710
2730
|
type: "text-start",
|
|
2711
2731
|
id: value.item.id
|
|
2712
2732
|
});
|
|
2713
|
-
} else if (value
|
|
2733
|
+
} else if (isResponseOutputItemAddedReasoningChunk(value)) {
|
|
2734
|
+
activeReasoning[value.item.id] = {
|
|
2735
|
+
encryptedContent: value.item.encrypted_content,
|
|
2736
|
+
summaryParts: [0]
|
|
2737
|
+
};
|
|
2714
2738
|
controller.enqueue({
|
|
2715
2739
|
type: "reasoning-start",
|
|
2716
|
-
id: value.item.id
|
|
2740
|
+
id: `${value.item.id}:0`,
|
|
2717
2741
|
providerMetadata: {
|
|
2718
2742
|
openai: {
|
|
2719
2743
|
reasoning: {
|
|
@@ -2791,19 +2815,23 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2791
2815
|
type: "text-end",
|
|
2792
2816
|
id: value.item.id
|
|
2793
2817
|
});
|
|
2794
|
-
} else if (value
|
|
2795
|
-
|
|
2796
|
-
|
|
2797
|
-
|
|
2798
|
-
|
|
2799
|
-
|
|
2800
|
-
|
|
2801
|
-
|
|
2802
|
-
|
|
2818
|
+
} else if (isResponseOutputItemDoneReasoningChunk(value)) {
|
|
2819
|
+
const activeReasoningPart = activeReasoning[value.item.id];
|
|
2820
|
+
for (const summaryIndex of activeReasoningPart.summaryParts) {
|
|
2821
|
+
controller.enqueue({
|
|
2822
|
+
type: "reasoning-end",
|
|
2823
|
+
id: `${value.item.id}:${summaryIndex}`,
|
|
2824
|
+
providerMetadata: {
|
|
2825
|
+
openai: {
|
|
2826
|
+
reasoning: {
|
|
2827
|
+
id: value.item.id,
|
|
2828
|
+
encryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
|
|
2829
|
+
}
|
|
2803
2830
|
}
|
|
2804
2831
|
}
|
|
2805
|
-
}
|
|
2806
|
-
}
|
|
2832
|
+
});
|
|
2833
|
+
}
|
|
2834
|
+
delete activeReasoning[value.item.id];
|
|
2807
2835
|
}
|
|
2808
2836
|
} else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
|
|
2809
2837
|
const toolCall = ongoingToolCalls[value.output_index];
|
|
@@ -2828,30 +2856,57 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2828
2856
|
id: value.item_id,
|
|
2829
2857
|
delta: value.delta
|
|
2830
2858
|
});
|
|
2859
|
+
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
|
2860
|
+
if (value.summary_index > 0) {
|
|
2861
|
+
(_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
|
|
2862
|
+
value.summary_index
|
|
2863
|
+
);
|
|
2864
|
+
controller.enqueue({
|
|
2865
|
+
type: "reasoning-start",
|
|
2866
|
+
id: `${value.item_id}:${value.summary_index}`,
|
|
2867
|
+
providerMetadata: {
|
|
2868
|
+
openai: {
|
|
2869
|
+
reasoning: {
|
|
2870
|
+
id: value.item_id,
|
|
2871
|
+
encryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
|
|
2872
|
+
}
|
|
2873
|
+
}
|
|
2874
|
+
}
|
|
2875
|
+
});
|
|
2876
|
+
}
|
|
2831
2877
|
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2832
2878
|
controller.enqueue({
|
|
2833
2879
|
type: "reasoning-delta",
|
|
2834
|
-
id: value.item_id
|
|
2835
|
-
delta: value.delta
|
|
2880
|
+
id: `${value.item_id}:${value.summary_index}`,
|
|
2881
|
+
delta: value.delta,
|
|
2882
|
+
providerMetadata: {
|
|
2883
|
+
openai: {
|
|
2884
|
+
reasoning: {
|
|
2885
|
+
id: value.item_id
|
|
2886
|
+
}
|
|
2887
|
+
}
|
|
2888
|
+
}
|
|
2836
2889
|
});
|
|
2837
2890
|
} else if (isResponseFinishedChunk(value)) {
|
|
2838
2891
|
finishReason = mapOpenAIResponseFinishReason({
|
|
2839
|
-
finishReason: (
|
|
2892
|
+
finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
|
|
2840
2893
|
hasToolCalls
|
|
2841
2894
|
});
|
|
2842
2895
|
usage.inputTokens = value.response.usage.input_tokens;
|
|
2843
2896
|
usage.outputTokens = value.response.usage.output_tokens;
|
|
2844
2897
|
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2845
|
-
usage.reasoningTokens = (
|
|
2846
|
-
usage.cachedInputTokens = (
|
|
2898
|
+
usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
|
|
2899
|
+
usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
|
|
2847
2900
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2848
2901
|
controller.enqueue({
|
|
2849
2902
|
type: "source",
|
|
2850
2903
|
sourceType: "url",
|
|
2851
|
-
id: (
|
|
2904
|
+
id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : generateId2(),
|
|
2852
2905
|
url: value.annotation.url,
|
|
2853
2906
|
title: value.annotation.title
|
|
2854
2907
|
});
|
|
2908
|
+
} else if (isErrorChunk(value)) {
|
|
2909
|
+
controller.enqueue({ type: "error", error: value });
|
|
2855
2910
|
}
|
|
2856
2911
|
},
|
|
2857
2912
|
flush(controller) {
|
|
@@ -2884,6 +2939,13 @@ var textDeltaChunkSchema = z15.object({
|
|
|
2884
2939
|
item_id: z15.string(),
|
|
2885
2940
|
delta: z15.string()
|
|
2886
2941
|
});
|
|
2942
|
+
var errorChunkSchema = z15.object({
|
|
2943
|
+
type: z15.literal("error"),
|
|
2944
|
+
code: z15.string(),
|
|
2945
|
+
message: z15.string(),
|
|
2946
|
+
param: z15.string().nullish(),
|
|
2947
|
+
sequence_number: z15.number()
|
|
2948
|
+
});
|
|
2887
2949
|
var responseFinishedChunkSchema = z15.object({
|
|
2888
2950
|
type: z15.enum(["response.completed", "response.incomplete"]),
|
|
2889
2951
|
response: z15.object({
|
|
@@ -2910,13 +2972,7 @@ var responseOutputItemAddedSchema = z15.object({
|
|
|
2910
2972
|
z15.object({
|
|
2911
2973
|
type: z15.literal("reasoning"),
|
|
2912
2974
|
id: z15.string(),
|
|
2913
|
-
encrypted_content: z15.string().nullish()
|
|
2914
|
-
summary: z15.array(
|
|
2915
|
-
z15.object({
|
|
2916
|
-
type: z15.literal("summary_text"),
|
|
2917
|
-
text: z15.string()
|
|
2918
|
-
})
|
|
2919
|
-
)
|
|
2975
|
+
encrypted_content: z15.string().nullish()
|
|
2920
2976
|
}),
|
|
2921
2977
|
z15.object({
|
|
2922
2978
|
type: z15.literal("function_call"),
|
|
@@ -2948,13 +3004,7 @@ var responseOutputItemDoneSchema = z15.object({
|
|
|
2948
3004
|
z15.object({
|
|
2949
3005
|
type: z15.literal("reasoning"),
|
|
2950
3006
|
id: z15.string(),
|
|
2951
|
-
encrypted_content: z15.string().nullish()
|
|
2952
|
-
summary: z15.array(
|
|
2953
|
-
z15.object({
|
|
2954
|
-
type: z15.literal("summary_text"),
|
|
2955
|
-
text: z15.string()
|
|
2956
|
-
})
|
|
2957
|
-
)
|
|
3007
|
+
encrypted_content: z15.string().nullish()
|
|
2958
3008
|
}),
|
|
2959
3009
|
z15.object({
|
|
2960
3010
|
type: z15.literal("function_call"),
|
|
@@ -2990,9 +3040,15 @@ var responseAnnotationAddedSchema = z15.object({
|
|
|
2990
3040
|
title: z15.string()
|
|
2991
3041
|
})
|
|
2992
3042
|
});
|
|
3043
|
+
var responseReasoningSummaryPartAddedSchema = z15.object({
|
|
3044
|
+
type: z15.literal("response.reasoning_summary_part.added"),
|
|
3045
|
+
item_id: z15.string(),
|
|
3046
|
+
summary_index: z15.number()
|
|
3047
|
+
});
|
|
2993
3048
|
var responseReasoningSummaryTextDeltaSchema = z15.object({
|
|
2994
3049
|
type: z15.literal("response.reasoning_summary_text.delta"),
|
|
2995
3050
|
item_id: z15.string(),
|
|
3051
|
+
summary_index: z15.number(),
|
|
2996
3052
|
delta: z15.string()
|
|
2997
3053
|
});
|
|
2998
3054
|
var openaiResponsesChunkSchema = z15.union([
|
|
@@ -3003,8 +3059,10 @@ var openaiResponsesChunkSchema = z15.union([
|
|
|
3003
3059
|
responseOutputItemDoneSchema,
|
|
3004
3060
|
responseFunctionCallArgumentsDeltaSchema,
|
|
3005
3061
|
responseAnnotationAddedSchema,
|
|
3062
|
+
responseReasoningSummaryPartAddedSchema,
|
|
3006
3063
|
responseReasoningSummaryTextDeltaSchema,
|
|
3007
|
-
|
|
3064
|
+
errorChunkSchema,
|
|
3065
|
+
z15.object({ type: z15.string() }).loose()
|
|
3008
3066
|
// fallback for unknown chunks
|
|
3009
3067
|
]);
|
|
3010
3068
|
function isTextDeltaChunk(chunk) {
|
|
@@ -3013,6 +3071,9 @@ function isTextDeltaChunk(chunk) {
|
|
|
3013
3071
|
function isResponseOutputItemDoneChunk(chunk) {
|
|
3014
3072
|
return chunk.type === "response.output_item.done";
|
|
3015
3073
|
}
|
|
3074
|
+
function isResponseOutputItemDoneReasoningChunk(chunk) {
|
|
3075
|
+
return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
|
|
3076
|
+
}
|
|
3016
3077
|
function isResponseFinishedChunk(chunk) {
|
|
3017
3078
|
return chunk.type === "response.completed" || chunk.type === "response.incomplete";
|
|
3018
3079
|
}
|
|
@@ -3025,12 +3086,21 @@ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
|
|
|
3025
3086
|
function isResponseOutputItemAddedChunk(chunk) {
|
|
3026
3087
|
return chunk.type === "response.output_item.added";
|
|
3027
3088
|
}
|
|
3089
|
+
function isResponseOutputItemAddedReasoningChunk(chunk) {
|
|
3090
|
+
return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
|
|
3091
|
+
}
|
|
3028
3092
|
function isResponseAnnotationAddedChunk(chunk) {
|
|
3029
3093
|
return chunk.type === "response.output_text.annotation.added";
|
|
3030
3094
|
}
|
|
3095
|
+
function isResponseReasoningSummaryPartAddedChunk(chunk) {
|
|
3096
|
+
return chunk.type === "response.reasoning_summary_part.added";
|
|
3097
|
+
}
|
|
3031
3098
|
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
3032
3099
|
return chunk.type === "response.reasoning_summary_text.delta";
|
|
3033
3100
|
}
|
|
3101
|
+
function isErrorChunk(chunk) {
|
|
3102
|
+
return chunk.type === "error";
|
|
3103
|
+
}
|
|
3034
3104
|
function getResponsesModelConfig(modelId) {
|
|
3035
3105
|
if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
|
|
3036
3106
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|