@ai-sdk/openai 1.3.22 → 1.3.24
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +2 -2
- package/dist/index.d.ts +2 -2
- package/dist/index.js +36 -6
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +38 -6
- package/dist/index.mjs.map +1 -1
- package/internal/dist/index.d.mts +2 -2
- package/internal/dist/index.d.ts +2 -2
- package/internal/dist/index.js +36 -6
- package/internal/dist/index.js.map +1 -1
- package/internal/dist/index.mjs +38 -6
- package/internal/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/internal/dist/index.mjs
CHANGED
|
@@ -1037,7 +1037,7 @@ var openaiChatChunkSchema = z2.union([
|
|
|
1037
1037
|
openaiErrorDataSchema
|
|
1038
1038
|
]);
|
|
1039
1039
|
function isReasoningModel(modelId) {
|
|
1040
|
-
return modelId.startsWith("o");
|
|
1040
|
+
return modelId.startsWith("o") || modelId.startsWith("gpt-5");
|
|
1041
1041
|
}
|
|
1042
1042
|
function isAudioModel(modelId) {
|
|
1043
1043
|
return modelId.startsWith("gpt-4o-audio-preview");
|
|
@@ -1908,6 +1908,9 @@ var OpenAISpeechModel = class {
|
|
|
1908
1908
|
};
|
|
1909
1909
|
|
|
1910
1910
|
// src/responses/openai-responses-language-model.ts
|
|
1911
|
+
import {
|
|
1912
|
+
APICallError
|
|
1913
|
+
} from "@ai-sdk/provider";
|
|
1911
1914
|
import {
|
|
1912
1915
|
combineHeaders as combineHeaders7,
|
|
1913
1916
|
createEventSourceResponseHandler as createEventSourceResponseHandler3,
|
|
@@ -2331,15 +2334,16 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2331
2334
|
async doGenerate(options) {
|
|
2332
2335
|
var _a, _b, _c, _d, _e, _f, _g;
|
|
2333
2336
|
const { args: body, warnings } = this.getArgs(options);
|
|
2337
|
+
const url = this.config.url({
|
|
2338
|
+
path: "/responses",
|
|
2339
|
+
modelId: this.modelId
|
|
2340
|
+
});
|
|
2334
2341
|
const {
|
|
2335
2342
|
responseHeaders,
|
|
2336
2343
|
value: response,
|
|
2337
2344
|
rawValue: rawResponse
|
|
2338
2345
|
} = await postJsonToApi6({
|
|
2339
|
-
url
|
|
2340
|
-
path: "/responses",
|
|
2341
|
-
modelId: this.modelId
|
|
2342
|
-
}),
|
|
2346
|
+
url,
|
|
2343
2347
|
headers: combineHeaders7(this.config.headers(), options.headers),
|
|
2344
2348
|
body,
|
|
2345
2349
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
@@ -2347,6 +2351,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2347
2351
|
z8.object({
|
|
2348
2352
|
id: z8.string(),
|
|
2349
2353
|
created_at: z8.number(),
|
|
2354
|
+
error: z8.object({
|
|
2355
|
+
message: z8.string(),
|
|
2356
|
+
code: z8.string()
|
|
2357
|
+
}).nullish(),
|
|
2350
2358
|
model: z8.string(),
|
|
2351
2359
|
output: z8.array(
|
|
2352
2360
|
z8.discriminatedUnion("type", [
|
|
@@ -2399,6 +2407,17 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2399
2407
|
abortSignal: options.abortSignal,
|
|
2400
2408
|
fetch: this.config.fetch
|
|
2401
2409
|
});
|
|
2410
|
+
if (response.error) {
|
|
2411
|
+
throw new APICallError({
|
|
2412
|
+
message: response.error.message,
|
|
2413
|
+
url,
|
|
2414
|
+
requestBodyValues: body,
|
|
2415
|
+
statusCode: 400,
|
|
2416
|
+
responseHeaders,
|
|
2417
|
+
responseBody: rawResponse,
|
|
2418
|
+
isRetryable: false
|
|
2419
|
+
});
|
|
2420
|
+
}
|
|
2402
2421
|
const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
|
|
2403
2422
|
const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
|
|
2404
2423
|
toolCallType: "function",
|
|
@@ -2570,6 +2589,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2570
2589
|
title: value.annotation.title
|
|
2571
2590
|
}
|
|
2572
2591
|
});
|
|
2592
|
+
} else if (isErrorChunk(value)) {
|
|
2593
|
+
controller.enqueue({ type: "error", error: value });
|
|
2573
2594
|
}
|
|
2574
2595
|
},
|
|
2575
2596
|
flush(controller) {
|
|
@@ -2679,6 +2700,13 @@ var responseReasoningSummaryTextDeltaSchema = z8.object({
|
|
|
2679
2700
|
summary_index: z8.number(),
|
|
2680
2701
|
delta: z8.string()
|
|
2681
2702
|
});
|
|
2703
|
+
var errorChunkSchema = z8.object({
|
|
2704
|
+
type: z8.literal("error"),
|
|
2705
|
+
code: z8.string(),
|
|
2706
|
+
message: z8.string(),
|
|
2707
|
+
param: z8.string().nullish(),
|
|
2708
|
+
sequence_number: z8.number()
|
|
2709
|
+
});
|
|
2682
2710
|
var openaiResponsesChunkSchema = z8.union([
|
|
2683
2711
|
textDeltaChunkSchema,
|
|
2684
2712
|
responseFinishedChunkSchema,
|
|
@@ -2688,6 +2716,7 @@ var openaiResponsesChunkSchema = z8.union([
|
|
|
2688
2716
|
responseOutputItemAddedSchema,
|
|
2689
2717
|
responseAnnotationAddedSchema,
|
|
2690
2718
|
responseReasoningSummaryTextDeltaSchema,
|
|
2719
|
+
errorChunkSchema,
|
|
2691
2720
|
z8.object({ type: z8.string() }).passthrough()
|
|
2692
2721
|
// fallback for unknown chunks
|
|
2693
2722
|
]);
|
|
@@ -2715,8 +2744,11 @@ function isResponseAnnotationAddedChunk(chunk) {
|
|
|
2715
2744
|
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
2716
2745
|
return chunk.type === "response.reasoning_summary_text.delta";
|
|
2717
2746
|
}
|
|
2747
|
+
function isErrorChunk(chunk) {
|
|
2748
|
+
return chunk.type === "error";
|
|
2749
|
+
}
|
|
2718
2750
|
function getResponsesModelConfig(modelId) {
|
|
2719
|
-
if (modelId.startsWith("o")) {
|
|
2751
|
+
if (modelId.startsWith("o") || modelId.startsWith("gpt-5")) {
|
|
2720
2752
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
2721
2753
|
return {
|
|
2722
2754
|
isReasoningModel: true,
|