@ai-sdk/openai 2.0.29 → 2.0.30
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/dist/index.js +45 -44
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +17 -16
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +34 -33
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +17 -16
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/index.mjs
CHANGED
|
@@ -1944,35 +1944,34 @@ import {
|
|
|
1944
1944
|
} from "@ai-sdk/provider-utils";
|
|
1945
1945
|
import { z as z14 } from "zod/v4";
|
|
1946
1946
|
|
|
1947
|
-
// src/responses/convert-to-openai-responses-
|
|
1947
|
+
// src/responses/convert-to-openai-responses-input.ts
|
|
1948
1948
|
import {
|
|
1949
1949
|
UnsupportedFunctionalityError as UnsupportedFunctionalityError4
|
|
1950
1950
|
} from "@ai-sdk/provider";
|
|
1951
|
-
import { parseProviderOptions as parseProviderOptions4 } from "@ai-sdk/provider-utils";
|
|
1951
|
+
import { convertToBase64 as convertToBase642, parseProviderOptions as parseProviderOptions4 } from "@ai-sdk/provider-utils";
|
|
1952
1952
|
import { z as z13 } from "zod/v4";
|
|
1953
|
-
import { convertToBase64 as convertToBase642 } from "@ai-sdk/provider-utils";
|
|
1954
1953
|
function isFileId(data, prefixes) {
|
|
1955
1954
|
if (!prefixes) return false;
|
|
1956
1955
|
return prefixes.some((prefix) => data.startsWith(prefix));
|
|
1957
1956
|
}
|
|
1958
|
-
async function
|
|
1957
|
+
async function convertToOpenAIResponsesInput({
|
|
1959
1958
|
prompt,
|
|
1960
1959
|
systemMessageMode,
|
|
1961
1960
|
fileIdPrefixes
|
|
1962
1961
|
}) {
|
|
1963
1962
|
var _a, _b, _c, _d, _e, _f;
|
|
1964
|
-
const
|
|
1963
|
+
const input = [];
|
|
1965
1964
|
const warnings = [];
|
|
1966
1965
|
for (const { role, content } of prompt) {
|
|
1967
1966
|
switch (role) {
|
|
1968
1967
|
case "system": {
|
|
1969
1968
|
switch (systemMessageMode) {
|
|
1970
1969
|
case "system": {
|
|
1971
|
-
|
|
1970
|
+
input.push({ role: "system", content });
|
|
1972
1971
|
break;
|
|
1973
1972
|
}
|
|
1974
1973
|
case "developer": {
|
|
1975
|
-
|
|
1974
|
+
input.push({ role: "developer", content });
|
|
1976
1975
|
break;
|
|
1977
1976
|
}
|
|
1978
1977
|
case "remove": {
|
|
@@ -1992,7 +1991,7 @@ async function convertToOpenAIResponsesMessages({
|
|
|
1992
1991
|
break;
|
|
1993
1992
|
}
|
|
1994
1993
|
case "user": {
|
|
1995
|
-
|
|
1994
|
+
input.push({
|
|
1996
1995
|
role: "user",
|
|
1997
1996
|
content: content.map((part, index) => {
|
|
1998
1997
|
var _a2, _b2, _c2;
|
|
@@ -2037,10 +2036,11 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2037
2036
|
}
|
|
2038
2037
|
case "assistant": {
|
|
2039
2038
|
const reasoningMessages = {};
|
|
2039
|
+
const toolCallParts = {};
|
|
2040
2040
|
for (const part of content) {
|
|
2041
2041
|
switch (part.type) {
|
|
2042
2042
|
case "text": {
|
|
2043
|
-
|
|
2043
|
+
input.push({
|
|
2044
2044
|
role: "assistant",
|
|
2045
2045
|
content: [{ type: "output_text", text: part.text }],
|
|
2046
2046
|
id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
|
|
@@ -2048,10 +2048,11 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2048
2048
|
break;
|
|
2049
2049
|
}
|
|
2050
2050
|
case "tool-call": {
|
|
2051
|
+
toolCallParts[part.toolCallId] = part;
|
|
2051
2052
|
if (part.providerExecuted) {
|
|
2052
2053
|
break;
|
|
2053
2054
|
}
|
|
2054
|
-
|
|
2055
|
+
input.push({
|
|
2055
2056
|
type: "function_call",
|
|
2056
2057
|
call_id: part.toolCallId,
|
|
2057
2058
|
name: part.toolName,
|
|
@@ -2092,7 +2093,7 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2092
2093
|
encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
|
|
2093
2094
|
summary: summaryParts
|
|
2094
2095
|
};
|
|
2095
|
-
|
|
2096
|
+
input.push(reasoningMessages[reasoningId]);
|
|
2096
2097
|
} else {
|
|
2097
2098
|
existingReasoningMessage.summary.push(...summaryParts);
|
|
2098
2099
|
}
|
|
@@ -2123,7 +2124,7 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2123
2124
|
contentValue = JSON.stringify(output.value);
|
|
2124
2125
|
break;
|
|
2125
2126
|
}
|
|
2126
|
-
|
|
2127
|
+
input.push({
|
|
2127
2128
|
type: "function_call_output",
|
|
2128
2129
|
call_id: part.toolCallId,
|
|
2129
2130
|
output: contentValue
|
|
@@ -2137,7 +2138,7 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2137
2138
|
}
|
|
2138
2139
|
}
|
|
2139
2140
|
}
|
|
2140
|
-
return {
|
|
2141
|
+
return { input, warnings };
|
|
2141
2142
|
}
|
|
2142
2143
|
var openaiResponsesReasoningProviderOptionsSchema = z13.object({
|
|
2143
2144
|
itemId: z13.string().nullish(),
|
|
@@ -2362,12 +2363,12 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2362
2363
|
if (stopSequences != null) {
|
|
2363
2364
|
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2364
2365
|
}
|
|
2365
|
-
const {
|
|
2366
|
+
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
2366
2367
|
prompt,
|
|
2367
2368
|
systemMessageMode: modelConfig.systemMessageMode,
|
|
2368
2369
|
fileIdPrefixes: this.config.fileIdPrefixes
|
|
2369
2370
|
});
|
|
2370
|
-
warnings.push(...
|
|
2371
|
+
warnings.push(...inputWarnings);
|
|
2371
2372
|
const openaiOptions = await parseProviderOptions5({
|
|
2372
2373
|
provider: "openai",
|
|
2373
2374
|
providerOptions,
|
|
@@ -2387,7 +2388,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2387
2388
|
include = codeInterpreterToolName ? Array.isArray(include) ? [...include, "code_interpreter_call.outputs"] : ["code_interpreter_call.outputs"] : include;
|
|
2388
2389
|
const baseArgs = {
|
|
2389
2390
|
model: this.modelId,
|
|
2390
|
-
input
|
|
2391
|
+
input,
|
|
2391
2392
|
temperature,
|
|
2392
2393
|
top_p: topP,
|
|
2393
2394
|
max_output_tokens: maxOutputTokens,
|