@ai-sdk/openai 2.0.29 → 2.0.30
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/dist/index.js +45 -44
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +17 -16
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +34 -33
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +17 -16
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/internal/index.mjs
CHANGED
|
@@ -2157,35 +2157,34 @@ import {
|
|
|
2157
2157
|
} from "@ai-sdk/provider-utils";
|
|
2158
2158
|
import { z as z17 } from "zod/v4";
|
|
2159
2159
|
|
|
2160
|
-
// src/responses/convert-to-openai-responses-
|
|
2160
|
+
// src/responses/convert-to-openai-responses-input.ts
|
|
2161
2161
|
import {
|
|
2162
2162
|
UnsupportedFunctionalityError as UnsupportedFunctionalityError4
|
|
2163
2163
|
} from "@ai-sdk/provider";
|
|
2164
|
-
import { parseProviderOptions as parseProviderOptions6 } from "@ai-sdk/provider-utils";
|
|
2164
|
+
import { convertToBase64 as convertToBase642, parseProviderOptions as parseProviderOptions6 } from "@ai-sdk/provider-utils";
|
|
2165
2165
|
import { z as z14 } from "zod/v4";
|
|
2166
|
-
import { convertToBase64 as convertToBase642 } from "@ai-sdk/provider-utils";
|
|
2167
2166
|
function isFileId(data, prefixes) {
|
|
2168
2167
|
if (!prefixes) return false;
|
|
2169
2168
|
return prefixes.some((prefix) => data.startsWith(prefix));
|
|
2170
2169
|
}
|
|
2171
|
-
async function
|
|
2170
|
+
async function convertToOpenAIResponsesInput({
|
|
2172
2171
|
prompt,
|
|
2173
2172
|
systemMessageMode,
|
|
2174
2173
|
fileIdPrefixes
|
|
2175
2174
|
}) {
|
|
2176
2175
|
var _a, _b, _c, _d, _e, _f;
|
|
2177
|
-
const
|
|
2176
|
+
const input = [];
|
|
2178
2177
|
const warnings = [];
|
|
2179
2178
|
for (const { role, content } of prompt) {
|
|
2180
2179
|
switch (role) {
|
|
2181
2180
|
case "system": {
|
|
2182
2181
|
switch (systemMessageMode) {
|
|
2183
2182
|
case "system": {
|
|
2184
|
-
|
|
2183
|
+
input.push({ role: "system", content });
|
|
2185
2184
|
break;
|
|
2186
2185
|
}
|
|
2187
2186
|
case "developer": {
|
|
2188
|
-
|
|
2187
|
+
input.push({ role: "developer", content });
|
|
2189
2188
|
break;
|
|
2190
2189
|
}
|
|
2191
2190
|
case "remove": {
|
|
@@ -2205,7 +2204,7 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2205
2204
|
break;
|
|
2206
2205
|
}
|
|
2207
2206
|
case "user": {
|
|
2208
|
-
|
|
2207
|
+
input.push({
|
|
2209
2208
|
role: "user",
|
|
2210
2209
|
content: content.map((part, index) => {
|
|
2211
2210
|
var _a2, _b2, _c2;
|
|
@@ -2250,10 +2249,11 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2250
2249
|
}
|
|
2251
2250
|
case "assistant": {
|
|
2252
2251
|
const reasoningMessages = {};
|
|
2252
|
+
const toolCallParts = {};
|
|
2253
2253
|
for (const part of content) {
|
|
2254
2254
|
switch (part.type) {
|
|
2255
2255
|
case "text": {
|
|
2256
|
-
|
|
2256
|
+
input.push({
|
|
2257
2257
|
role: "assistant",
|
|
2258
2258
|
content: [{ type: "output_text", text: part.text }],
|
|
2259
2259
|
id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
|
|
@@ -2261,10 +2261,11 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2261
2261
|
break;
|
|
2262
2262
|
}
|
|
2263
2263
|
case "tool-call": {
|
|
2264
|
+
toolCallParts[part.toolCallId] = part;
|
|
2264
2265
|
if (part.providerExecuted) {
|
|
2265
2266
|
break;
|
|
2266
2267
|
}
|
|
2267
|
-
|
|
2268
|
+
input.push({
|
|
2268
2269
|
type: "function_call",
|
|
2269
2270
|
call_id: part.toolCallId,
|
|
2270
2271
|
name: part.toolName,
|
|
@@ -2305,7 +2306,7 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2305
2306
|
encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
|
|
2306
2307
|
summary: summaryParts
|
|
2307
2308
|
};
|
|
2308
|
-
|
|
2309
|
+
input.push(reasoningMessages[reasoningId]);
|
|
2309
2310
|
} else {
|
|
2310
2311
|
existingReasoningMessage.summary.push(...summaryParts);
|
|
2311
2312
|
}
|
|
@@ -2336,7 +2337,7 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2336
2337
|
contentValue = JSON.stringify(output.value);
|
|
2337
2338
|
break;
|
|
2338
2339
|
}
|
|
2339
|
-
|
|
2340
|
+
input.push({
|
|
2340
2341
|
type: "function_call_output",
|
|
2341
2342
|
call_id: part.toolCallId,
|
|
2342
2343
|
output: contentValue
|
|
@@ -2350,7 +2351,7 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2350
2351
|
}
|
|
2351
2352
|
}
|
|
2352
2353
|
}
|
|
2353
|
-
return {
|
|
2354
|
+
return { input, warnings };
|
|
2354
2355
|
}
|
|
2355
2356
|
var openaiResponsesReasoningProviderOptionsSchema = z14.object({
|
|
2356
2357
|
itemId: z14.string().nullish(),
|
|
@@ -2645,12 +2646,12 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2645
2646
|
if (stopSequences != null) {
|
|
2646
2647
|
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2647
2648
|
}
|
|
2648
|
-
const {
|
|
2649
|
+
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
2649
2650
|
prompt,
|
|
2650
2651
|
systemMessageMode: modelConfig.systemMessageMode,
|
|
2651
2652
|
fileIdPrefixes: this.config.fileIdPrefixes
|
|
2652
2653
|
});
|
|
2653
|
-
warnings.push(...
|
|
2654
|
+
warnings.push(...inputWarnings);
|
|
2654
2655
|
const openaiOptions = await parseProviderOptions7({
|
|
2655
2656
|
provider: "openai",
|
|
2656
2657
|
providerOptions,
|
|
@@ -2670,7 +2671,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2670
2671
|
include = codeInterpreterToolName ? Array.isArray(include) ? [...include, "code_interpreter_call.outputs"] : ["code_interpreter_call.outputs"] : include;
|
|
2671
2672
|
const baseArgs = {
|
|
2672
2673
|
model: this.modelId,
|
|
2673
|
-
input
|
|
2674
|
+
input,
|
|
2674
2675
|
temperature,
|
|
2675
2676
|
top_p: topP,
|
|
2676
2677
|
max_output_tokens: maxOutputTokens,
|