@ai-sdk/openai 1.3.16 → 1.3.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +3 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +47 -10
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +47 -10
- package/dist/index.mjs.map +1 -1
- package/internal/dist/index.d.mts +5 -1
- package/internal/dist/index.d.ts +5 -1
- package/internal/dist/index.js +49 -10
- package/internal/dist/index.js.map +1 -1
- package/internal/dist/index.mjs +48 -10
- package/internal/dist/index.mjs.map +1 -1
- package/package.json +1 -1
|
@@ -220,6 +220,7 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV1<string> {
|
|
|
220
220
|
|
|
221
221
|
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
222
222
|
declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
|
|
223
|
+
declare const hasDefaultResponseFormat: Set<string>;
|
|
223
224
|
interface OpenAIImageSettings {
|
|
224
225
|
/**
|
|
225
226
|
Override the maximum number of images per call (default is dependent on the
|
|
@@ -360,6 +361,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
360
361
|
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
361
362
|
strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
362
363
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
364
|
+
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
363
365
|
}, "strip", z.ZodTypeAny, {
|
|
364
366
|
user?: string | null | undefined;
|
|
365
367
|
store?: boolean | null | undefined;
|
|
@@ -369,6 +371,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
369
371
|
parallelToolCalls?: boolean | null | undefined;
|
|
370
372
|
previousResponseId?: string | null | undefined;
|
|
371
373
|
strictSchemas?: boolean | null | undefined;
|
|
374
|
+
reasoningSummary?: string | null | undefined;
|
|
372
375
|
}, {
|
|
373
376
|
user?: string | null | undefined;
|
|
374
377
|
store?: boolean | null | undefined;
|
|
@@ -378,7 +381,8 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
378
381
|
parallelToolCalls?: boolean | null | undefined;
|
|
379
382
|
previousResponseId?: string | null | undefined;
|
|
380
383
|
strictSchemas?: boolean | null | undefined;
|
|
384
|
+
reasoningSummary?: string | null | undefined;
|
|
381
385
|
}>;
|
|
382
386
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
383
387
|
|
|
384
|
-
export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, modelMaxImagesPerCall };
|
|
388
|
+
export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, hasDefaultResponseFormat, modelMaxImagesPerCall };
|
package/internal/dist/index.d.ts
CHANGED
|
@@ -220,6 +220,7 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV1<string> {
|
|
|
220
220
|
|
|
221
221
|
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
222
222
|
declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
|
|
223
|
+
declare const hasDefaultResponseFormat: Set<string>;
|
|
223
224
|
interface OpenAIImageSettings {
|
|
224
225
|
/**
|
|
225
226
|
Override the maximum number of images per call (default is dependent on the
|
|
@@ -360,6 +361,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
360
361
|
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
361
362
|
strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
362
363
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
364
|
+
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
363
365
|
}, "strip", z.ZodTypeAny, {
|
|
364
366
|
user?: string | null | undefined;
|
|
365
367
|
store?: boolean | null | undefined;
|
|
@@ -369,6 +371,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
369
371
|
parallelToolCalls?: boolean | null | undefined;
|
|
370
372
|
previousResponseId?: string | null | undefined;
|
|
371
373
|
strictSchemas?: boolean | null | undefined;
|
|
374
|
+
reasoningSummary?: string | null | undefined;
|
|
372
375
|
}, {
|
|
373
376
|
user?: string | null | undefined;
|
|
374
377
|
store?: boolean | null | undefined;
|
|
@@ -378,7 +381,8 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
378
381
|
parallelToolCalls?: boolean | null | undefined;
|
|
379
382
|
previousResponseId?: string | null | undefined;
|
|
380
383
|
strictSchemas?: boolean | null | undefined;
|
|
384
|
+
reasoningSummary?: string | null | undefined;
|
|
381
385
|
}>;
|
|
382
386
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
383
387
|
|
|
384
|
-
export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, modelMaxImagesPerCall };
|
|
388
|
+
export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, hasDefaultResponseFormat, modelMaxImagesPerCall };
|
package/internal/dist/index.js
CHANGED
|
@@ -27,6 +27,7 @@ __export(internal_exports, {
|
|
|
27
27
|
OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
|
|
28
28
|
OpenAISpeechModel: () => OpenAISpeechModel,
|
|
29
29
|
OpenAITranscriptionModel: () => OpenAITranscriptionModel,
|
|
30
|
+
hasDefaultResponseFormat: () => hasDefaultResponseFormat,
|
|
30
31
|
modelMaxImagesPerCall: () => modelMaxImagesPerCall
|
|
31
32
|
});
|
|
32
33
|
module.exports = __toCommonJS(internal_exports);
|
|
@@ -1536,8 +1537,10 @@ var import_zod5 = require("zod");
|
|
|
1536
1537
|
// src/openai-image-settings.ts
|
|
1537
1538
|
var modelMaxImagesPerCall = {
|
|
1538
1539
|
"dall-e-3": 1,
|
|
1539
|
-
"dall-e-2": 10
|
|
1540
|
+
"dall-e-2": 10,
|
|
1541
|
+
"gpt-image-1": 10
|
|
1540
1542
|
};
|
|
1543
|
+
var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
|
|
1541
1544
|
|
|
1542
1545
|
// src/openai-image-model.ts
|
|
1543
1546
|
var OpenAIImageModel = class {
|
|
@@ -1589,7 +1592,7 @@ var OpenAIImageModel = class {
|
|
|
1589
1592
|
n,
|
|
1590
1593
|
size,
|
|
1591
1594
|
...(_d = providerOptions.openai) != null ? _d : {},
|
|
1592
|
-
response_format: "b64_json"
|
|
1595
|
+
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1593
1596
|
},
|
|
1594
1597
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1595
1598
|
successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
|
|
@@ -2202,8 +2205,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2202
2205
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2203
2206
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2204
2207
|
// model-specific settings:
|
|
2205
|
-
...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
2206
|
-
reasoning: {
|
|
2208
|
+
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2209
|
+
reasoning: {
|
|
2210
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
2211
|
+
effort: openaiOptions.reasoningEffort
|
|
2212
|
+
},
|
|
2213
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
|
|
2214
|
+
summary: openaiOptions.reasoningSummary
|
|
2215
|
+
}
|
|
2216
|
+
}
|
|
2207
2217
|
},
|
|
2208
2218
|
...modelConfig.requiredAutoTruncation && {
|
|
2209
2219
|
truncation: "auto"
|
|
@@ -2285,7 +2295,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2285
2295
|
}
|
|
2286
2296
|
}
|
|
2287
2297
|
async doGenerate(options) {
|
|
2288
|
-
var _a, _b, _c, _d, _e;
|
|
2298
|
+
var _a, _b, _c, _d, _e, _f, _g;
|
|
2289
2299
|
const { args: body, warnings } = this.getArgs(options);
|
|
2290
2300
|
const {
|
|
2291
2301
|
responseHeaders,
|
|
@@ -2338,7 +2348,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2338
2348
|
type: import_zod8.z.literal("computer_call")
|
|
2339
2349
|
}),
|
|
2340
2350
|
import_zod8.z.object({
|
|
2341
|
-
type: import_zod8.z.literal("reasoning")
|
|
2351
|
+
type: import_zod8.z.literal("reasoning"),
|
|
2352
|
+
summary: import_zod8.z.array(
|
|
2353
|
+
import_zod8.z.object({
|
|
2354
|
+
type: import_zod8.z.literal("summary_text"),
|
|
2355
|
+
text: import_zod8.z.string()
|
|
2356
|
+
})
|
|
2357
|
+
)
|
|
2342
2358
|
})
|
|
2343
2359
|
])
|
|
2344
2360
|
),
|
|
@@ -2356,6 +2372,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2356
2372
|
toolName: output.name,
|
|
2357
2373
|
args: output.arguments
|
|
2358
2374
|
}));
|
|
2375
|
+
const reasoningSummary = (_b = (_a = response.output.find((item) => item.type === "reasoning")) == null ? void 0 : _a.summary) != null ? _b : null;
|
|
2359
2376
|
return {
|
|
2360
2377
|
text: outputTextElements.map((content) => content.text).join("\n"),
|
|
2361
2378
|
sources: outputTextElements.flatMap(
|
|
@@ -2370,10 +2387,14 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2370
2387
|
})
|
|
2371
2388
|
),
|
|
2372
2389
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2373
|
-
finishReason: (
|
|
2390
|
+
finishReason: (_c = response.incomplete_details) == null ? void 0 : _c.reason,
|
|
2374
2391
|
hasToolCalls: toolCalls.length > 0
|
|
2375
2392
|
}),
|
|
2376
2393
|
toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
|
|
2394
|
+
reasoning: reasoningSummary ? reasoningSummary.map((summary) => ({
|
|
2395
|
+
type: "text",
|
|
2396
|
+
text: summary.text
|
|
2397
|
+
})) : void 0,
|
|
2377
2398
|
usage: {
|
|
2378
2399
|
promptTokens: response.usage.input_tokens,
|
|
2379
2400
|
completionTokens: response.usage.output_tokens
|
|
@@ -2397,8 +2418,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2397
2418
|
providerMetadata: {
|
|
2398
2419
|
openai: {
|
|
2399
2420
|
responseId: response.id,
|
|
2400
|
-
cachedPromptTokens: (
|
|
2401
|
-
reasoningTokens: (
|
|
2421
|
+
cachedPromptTokens: (_e = (_d = response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : null,
|
|
2422
|
+
reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : null
|
|
2402
2423
|
}
|
|
2403
2424
|
},
|
|
2404
2425
|
warnings
|
|
@@ -2481,6 +2502,11 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2481
2502
|
type: "text-delta",
|
|
2482
2503
|
textDelta: value.delta
|
|
2483
2504
|
});
|
|
2505
|
+
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2506
|
+
controller.enqueue({
|
|
2507
|
+
type: "reasoning",
|
|
2508
|
+
textDelta: value.delta
|
|
2509
|
+
});
|
|
2484
2510
|
} else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
|
|
2485
2511
|
ongoingToolCalls[value.output_index] = void 0;
|
|
2486
2512
|
hasToolCalls = true;
|
|
@@ -2612,6 +2638,13 @@ var responseAnnotationAddedSchema = import_zod8.z.object({
|
|
|
2612
2638
|
title: import_zod8.z.string()
|
|
2613
2639
|
})
|
|
2614
2640
|
});
|
|
2641
|
+
var responseReasoningSummaryTextDeltaSchema = import_zod8.z.object({
|
|
2642
|
+
type: import_zod8.z.literal("response.reasoning_summary_text.delta"),
|
|
2643
|
+
item_id: import_zod8.z.string(),
|
|
2644
|
+
output_index: import_zod8.z.number(),
|
|
2645
|
+
summary_index: import_zod8.z.number(),
|
|
2646
|
+
delta: import_zod8.z.string()
|
|
2647
|
+
});
|
|
2615
2648
|
var openaiResponsesChunkSchema = import_zod8.z.union([
|
|
2616
2649
|
textDeltaChunkSchema,
|
|
2617
2650
|
responseFinishedChunkSchema,
|
|
@@ -2620,6 +2653,7 @@ var openaiResponsesChunkSchema = import_zod8.z.union([
|
|
|
2620
2653
|
responseFunctionCallArgumentsDeltaSchema,
|
|
2621
2654
|
responseOutputItemAddedSchema,
|
|
2622
2655
|
responseAnnotationAddedSchema,
|
|
2656
|
+
responseReasoningSummaryTextDeltaSchema,
|
|
2623
2657
|
import_zod8.z.object({ type: import_zod8.z.string() }).passthrough()
|
|
2624
2658
|
// fallback for unknown chunks
|
|
2625
2659
|
]);
|
|
@@ -2644,6 +2678,9 @@ function isResponseOutputItemAddedChunk(chunk) {
|
|
|
2644
2678
|
function isResponseAnnotationAddedChunk(chunk) {
|
|
2645
2679
|
return chunk.type === "response.output_text.annotation.added";
|
|
2646
2680
|
}
|
|
2681
|
+
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
2682
|
+
return chunk.type === "response.reasoning_summary_text.delta";
|
|
2683
|
+
}
|
|
2647
2684
|
function getResponsesModelConfig(modelId) {
|
|
2648
2685
|
if (modelId.startsWith("o")) {
|
|
2649
2686
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
@@ -2673,7 +2710,8 @@ var openaiResponsesProviderOptionsSchema = import_zod8.z.object({
|
|
|
2673
2710
|
user: import_zod8.z.string().nullish(),
|
|
2674
2711
|
reasoningEffort: import_zod8.z.string().nullish(),
|
|
2675
2712
|
strictSchemas: import_zod8.z.boolean().nullish(),
|
|
2676
|
-
instructions: import_zod8.z.string().nullish()
|
|
2713
|
+
instructions: import_zod8.z.string().nullish(),
|
|
2714
|
+
reasoningSummary: import_zod8.z.string().nullish()
|
|
2677
2715
|
});
|
|
2678
2716
|
// Annotate the CommonJS export names for ESM import in node:
|
|
2679
2717
|
0 && (module.exports = {
|
|
@@ -2684,6 +2722,7 @@ var openaiResponsesProviderOptionsSchema = import_zod8.z.object({
|
|
|
2684
2722
|
OpenAIResponsesLanguageModel,
|
|
2685
2723
|
OpenAISpeechModel,
|
|
2686
2724
|
OpenAITranscriptionModel,
|
|
2725
|
+
hasDefaultResponseFormat,
|
|
2687
2726
|
modelMaxImagesPerCall
|
|
2688
2727
|
});
|
|
2689
2728
|
//# sourceMappingURL=index.js.map
|