@ai-sdk/openai 2.0.29 → 2.0.31
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/dist/index.d.mts +42 -12
- package/dist/index.d.ts +42 -12
- package/dist/index.js +466 -345
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +465 -344
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +13 -12
- package/dist/internal/index.d.ts +13 -12
- package/dist/internal/index.js +393 -289
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +388 -284
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/index.js
CHANGED
|
@@ -1829,39 +1829,69 @@ var codeInterpreter = (args = {}) => {
|
|
|
1829
1829
|
return codeInterpreterToolFactory(args);
|
|
1830
1830
|
};
|
|
1831
1831
|
|
|
1832
|
-
// src/tool/
|
|
1832
|
+
// src/tool/image-generation.ts
|
|
1833
1833
|
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
|
1834
1834
|
var import_v412 = require("zod/v4");
|
|
1835
|
-
var
|
|
1836
|
-
|
|
1837
|
-
|
|
1835
|
+
var imageGenerationArgsSchema = import_v412.z.object({
|
|
1836
|
+
background: import_v412.z.enum(["auto", "opaque", "transparent"]).optional(),
|
|
1837
|
+
inputFidelity: import_v412.z.enum(["low", "high"]).optional(),
|
|
1838
|
+
inputImageMask: import_v412.z.object({
|
|
1839
|
+
fileId: import_v412.z.string().optional(),
|
|
1840
|
+
imageUrl: import_v412.z.string().optional()
|
|
1838
1841
|
}).optional(),
|
|
1839
|
-
|
|
1840
|
-
|
|
1841
|
-
|
|
1842
|
-
|
|
1843
|
-
|
|
1844
|
-
|
|
1845
|
-
|
|
1842
|
+
model: import_v412.z.string().optional(),
|
|
1843
|
+
moderation: import_v412.z.enum(["auto"]).optional(),
|
|
1844
|
+
outputCompression: import_v412.z.number().int().min(0).max(100).optional(),
|
|
1845
|
+
outputFormat: import_v412.z.enum(["png", "jpeg", "webp"]).optional(),
|
|
1846
|
+
quality: import_v412.z.enum(["auto", "low", "medium", "high"]).optional(),
|
|
1847
|
+
size: import_v412.z.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
|
|
1848
|
+
}).strict();
|
|
1849
|
+
var imageGenerationOutputSchema = import_v412.z.object({
|
|
1850
|
+
result: import_v412.z.string()
|
|
1851
|
+
});
|
|
1852
|
+
var imageGenerationToolFactory = (0, import_provider_utils10.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
1853
|
+
id: "openai.image_generation",
|
|
1854
|
+
name: "image_generation",
|
|
1855
|
+
inputSchema: import_v412.z.object({}),
|
|
1856
|
+
outputSchema: imageGenerationOutputSchema
|
|
1857
|
+
});
|
|
1858
|
+
var imageGeneration = (args = {}) => {
|
|
1859
|
+
return imageGenerationToolFactory(args);
|
|
1860
|
+
};
|
|
1861
|
+
|
|
1862
|
+
// src/tool/web-search.ts
|
|
1863
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
|
1864
|
+
var import_v413 = require("zod/v4");
|
|
1865
|
+
var webSearchArgsSchema = import_v413.z.object({
|
|
1866
|
+
filters: import_v413.z.object({
|
|
1867
|
+
allowedDomains: import_v413.z.array(import_v413.z.string()).optional()
|
|
1868
|
+
}).optional(),
|
|
1869
|
+
searchContextSize: import_v413.z.enum(["low", "medium", "high"]).optional(),
|
|
1870
|
+
userLocation: import_v413.z.object({
|
|
1871
|
+
type: import_v413.z.literal("approximate"),
|
|
1872
|
+
country: import_v413.z.string().optional(),
|
|
1873
|
+
city: import_v413.z.string().optional(),
|
|
1874
|
+
region: import_v413.z.string().optional(),
|
|
1875
|
+
timezone: import_v413.z.string().optional()
|
|
1846
1876
|
}).optional()
|
|
1847
1877
|
});
|
|
1848
|
-
var webSearchToolFactory = (0,
|
|
1878
|
+
var webSearchToolFactory = (0, import_provider_utils11.createProviderDefinedToolFactory)({
|
|
1849
1879
|
id: "openai.web_search",
|
|
1850
1880
|
name: "web_search",
|
|
1851
|
-
inputSchema:
|
|
1852
|
-
action:
|
|
1853
|
-
|
|
1854
|
-
type:
|
|
1855
|
-
query:
|
|
1881
|
+
inputSchema: import_v413.z.object({
|
|
1882
|
+
action: import_v413.z.discriminatedUnion("type", [
|
|
1883
|
+
import_v413.z.object({
|
|
1884
|
+
type: import_v413.z.literal("search"),
|
|
1885
|
+
query: import_v413.z.string().nullish()
|
|
1856
1886
|
}),
|
|
1857
|
-
|
|
1858
|
-
type:
|
|
1859
|
-
url:
|
|
1887
|
+
import_v413.z.object({
|
|
1888
|
+
type: import_v413.z.literal("open_page"),
|
|
1889
|
+
url: import_v413.z.string()
|
|
1860
1890
|
}),
|
|
1861
|
-
|
|
1862
|
-
type:
|
|
1863
|
-
url:
|
|
1864
|
-
pattern:
|
|
1891
|
+
import_v413.z.object({
|
|
1892
|
+
type: import_v413.z.literal("find"),
|
|
1893
|
+
url: import_v413.z.string(),
|
|
1894
|
+
pattern: import_v413.z.string()
|
|
1865
1895
|
})
|
|
1866
1896
|
]).nullish()
|
|
1867
1897
|
})
|
|
@@ -1895,6 +1925,20 @@ var openaiTools = {
|
|
|
1895
1925
|
* @param filters - The filters to use for the file search.
|
|
1896
1926
|
*/
|
|
1897
1927
|
fileSearch,
|
|
1928
|
+
/**
|
|
1929
|
+
* The image generation tool allows you to generate images using a text prompt,
|
|
1930
|
+
* and optionally image inputs. It leverages the GPT Image model,
|
|
1931
|
+
* and automatically optimizes text inputs for improved performance.
|
|
1932
|
+
*
|
|
1933
|
+
* Must have name `image_generation`.
|
|
1934
|
+
*
|
|
1935
|
+
* @param size - Image dimensions (e.g., 1024x1024, 1024x1536)
|
|
1936
|
+
* @param quality - Rendering quality (e.g. low, medium, high)
|
|
1937
|
+
* @param format - File output format
|
|
1938
|
+
* @param compression - Compression level (0-100%) for JPEG and WebP formats
|
|
1939
|
+
* @param background - Transparent or opaque
|
|
1940
|
+
*/
|
|
1941
|
+
imageGeneration,
|
|
1898
1942
|
/**
|
|
1899
1943
|
* Web search allows models to access up-to-date information from the internet
|
|
1900
1944
|
* and provide answers with sourced citations.
|
|
@@ -1923,35 +1967,35 @@ var openaiTools = {
|
|
|
1923
1967
|
// src/responses/openai-responses-language-model.ts
|
|
1924
1968
|
var import_provider8 = require("@ai-sdk/provider");
|
|
1925
1969
|
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
|
1926
|
-
var
|
|
1970
|
+
var import_v415 = require("zod/v4");
|
|
1927
1971
|
|
|
1928
|
-
// src/responses/convert-to-openai-responses-
|
|
1972
|
+
// src/responses/convert-to-openai-responses-input.ts
|
|
1929
1973
|
var import_provider6 = require("@ai-sdk/provider");
|
|
1930
|
-
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
|
1931
|
-
var import_v413 = require("zod/v4");
|
|
1932
1974
|
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
|
1975
|
+
var import_v414 = require("zod/v4");
|
|
1933
1976
|
function isFileId(data, prefixes) {
|
|
1934
1977
|
if (!prefixes) return false;
|
|
1935
1978
|
return prefixes.some((prefix) => data.startsWith(prefix));
|
|
1936
1979
|
}
|
|
1937
|
-
async function
|
|
1980
|
+
async function convertToOpenAIResponsesInput({
|
|
1938
1981
|
prompt,
|
|
1939
1982
|
systemMessageMode,
|
|
1940
|
-
fileIdPrefixes
|
|
1983
|
+
fileIdPrefixes,
|
|
1984
|
+
store
|
|
1941
1985
|
}) {
|
|
1942
1986
|
var _a, _b, _c, _d, _e, _f;
|
|
1943
|
-
const
|
|
1987
|
+
const input = [];
|
|
1944
1988
|
const warnings = [];
|
|
1945
1989
|
for (const { role, content } of prompt) {
|
|
1946
1990
|
switch (role) {
|
|
1947
1991
|
case "system": {
|
|
1948
1992
|
switch (systemMessageMode) {
|
|
1949
1993
|
case "system": {
|
|
1950
|
-
|
|
1994
|
+
input.push({ role: "system", content });
|
|
1951
1995
|
break;
|
|
1952
1996
|
}
|
|
1953
1997
|
case "developer": {
|
|
1954
|
-
|
|
1998
|
+
input.push({ role: "developer", content });
|
|
1955
1999
|
break;
|
|
1956
2000
|
}
|
|
1957
2001
|
case "remove": {
|
|
@@ -1971,7 +2015,7 @@ async function convertToOpenAIResponsesMessages({
|
|
|
1971
2015
|
break;
|
|
1972
2016
|
}
|
|
1973
2017
|
case "user": {
|
|
1974
|
-
|
|
2018
|
+
input.push({
|
|
1975
2019
|
role: "user",
|
|
1976
2020
|
content: content.map((part, index) => {
|
|
1977
2021
|
var _a2, _b2, _c2;
|
|
@@ -2016,10 +2060,11 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2016
2060
|
}
|
|
2017
2061
|
case "assistant": {
|
|
2018
2062
|
const reasoningMessages = {};
|
|
2063
|
+
const toolCallParts = {};
|
|
2019
2064
|
for (const part of content) {
|
|
2020
2065
|
switch (part.type) {
|
|
2021
2066
|
case "text": {
|
|
2022
|
-
|
|
2067
|
+
input.push({
|
|
2023
2068
|
role: "assistant",
|
|
2024
2069
|
content: [{ type: "output_text", text: part.text }],
|
|
2025
2070
|
id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
|
|
@@ -2027,10 +2072,11 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2027
2072
|
break;
|
|
2028
2073
|
}
|
|
2029
2074
|
case "tool-call": {
|
|
2075
|
+
toolCallParts[part.toolCallId] = part;
|
|
2030
2076
|
if (part.providerExecuted) {
|
|
2031
2077
|
break;
|
|
2032
2078
|
}
|
|
2033
|
-
|
|
2079
|
+
input.push({
|
|
2034
2080
|
type: "function_call",
|
|
2035
2081
|
call_id: part.toolCallId,
|
|
2036
2082
|
name: part.toolName,
|
|
@@ -2040,14 +2086,18 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2040
2086
|
break;
|
|
2041
2087
|
}
|
|
2042
2088
|
case "tool-result": {
|
|
2043
|
-
|
|
2044
|
-
type: "
|
|
2045
|
-
|
|
2046
|
-
|
|
2089
|
+
if (store) {
|
|
2090
|
+
input.push({ type: "item_reference", id: part.toolCallId });
|
|
2091
|
+
} else {
|
|
2092
|
+
warnings.push({
|
|
2093
|
+
type: "other",
|
|
2094
|
+
message: `Results for OpenAI tool ${part.toolName} are not sent to the API when store is false`
|
|
2095
|
+
});
|
|
2096
|
+
}
|
|
2047
2097
|
break;
|
|
2048
2098
|
}
|
|
2049
2099
|
case "reasoning": {
|
|
2050
|
-
const providerOptions = await (0,
|
|
2100
|
+
const providerOptions = await (0, import_provider_utils12.parseProviderOptions)({
|
|
2051
2101
|
provider: "openai",
|
|
2052
2102
|
providerOptions: part.providerOptions,
|
|
2053
2103
|
schema: openaiResponsesReasoningProviderOptionsSchema
|
|
@@ -2071,7 +2121,7 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2071
2121
|
encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
|
|
2072
2122
|
summary: summaryParts
|
|
2073
2123
|
};
|
|
2074
|
-
|
|
2124
|
+
input.push(reasoningMessages[reasoningId]);
|
|
2075
2125
|
} else {
|
|
2076
2126
|
existingReasoningMessage.summary.push(...summaryParts);
|
|
2077
2127
|
}
|
|
@@ -2102,7 +2152,7 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2102
2152
|
contentValue = JSON.stringify(output.value);
|
|
2103
2153
|
break;
|
|
2104
2154
|
}
|
|
2105
|
-
|
|
2155
|
+
input.push({
|
|
2106
2156
|
type: "function_call_output",
|
|
2107
2157
|
call_id: part.toolCallId,
|
|
2108
2158
|
output: contentValue
|
|
@@ -2116,11 +2166,11 @@ async function convertToOpenAIResponsesMessages({
|
|
|
2116
2166
|
}
|
|
2117
2167
|
}
|
|
2118
2168
|
}
|
|
2119
|
-
return {
|
|
2169
|
+
return { input, warnings };
|
|
2120
2170
|
}
|
|
2121
|
-
var openaiResponsesReasoningProviderOptionsSchema =
|
|
2122
|
-
itemId:
|
|
2123
|
-
reasoningEncryptedContent:
|
|
2171
|
+
var openaiResponsesReasoningProviderOptionsSchema = import_v414.z.object({
|
|
2172
|
+
itemId: import_v414.z.string().nullish(),
|
|
2173
|
+
reasoningEncryptedContent: import_v414.z.string().nullish()
|
|
2124
2174
|
});
|
|
2125
2175
|
|
|
2126
2176
|
// src/responses/map-openai-responses-finish-reason.ts
|
|
@@ -2205,8 +2255,23 @@ function prepareResponsesTools({
|
|
|
2205
2255
|
});
|
|
2206
2256
|
break;
|
|
2207
2257
|
}
|
|
2208
|
-
|
|
2209
|
-
|
|
2258
|
+
case "openai.image_generation": {
|
|
2259
|
+
const args = imageGenerationArgsSchema.parse(tool.args);
|
|
2260
|
+
openaiTools2.push({
|
|
2261
|
+
type: "image_generation",
|
|
2262
|
+
background: args.background,
|
|
2263
|
+
input_fidelity: args.inputFidelity,
|
|
2264
|
+
input_image_mask: args.inputImageMask ? {
|
|
2265
|
+
file_id: args.inputImageMask.fileId,
|
|
2266
|
+
image_url: args.inputImageMask.imageUrl
|
|
2267
|
+
} : void 0,
|
|
2268
|
+
model: args.model,
|
|
2269
|
+
size: args.size,
|
|
2270
|
+
quality: args.quality,
|
|
2271
|
+
moderation: args.moderation,
|
|
2272
|
+
output_format: args.outputFormat,
|
|
2273
|
+
output_compression: args.outputCompression
|
|
2274
|
+
});
|
|
2210
2275
|
break;
|
|
2211
2276
|
}
|
|
2212
2277
|
}
|
|
@@ -2229,7 +2294,7 @@ function prepareResponsesTools({
|
|
|
2229
2294
|
case "tool":
|
|
2230
2295
|
return {
|
|
2231
2296
|
tools: openaiTools2,
|
|
2232
|
-
toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "web_search_preview" || toolChoice.toolName === "web_search" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
|
|
2297
|
+
toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "image_generation" || toolChoice.toolName === "web_search_preview" || toolChoice.toolName === "web_search" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
|
|
2233
2298
|
toolWarnings
|
|
2234
2299
|
};
|
|
2235
2300
|
default: {
|
|
@@ -2242,47 +2307,52 @@ function prepareResponsesTools({
|
|
|
2242
2307
|
}
|
|
2243
2308
|
|
|
2244
2309
|
// src/responses/openai-responses-language-model.ts
|
|
2245
|
-
var webSearchCallItem =
|
|
2246
|
-
type:
|
|
2247
|
-
id:
|
|
2248
|
-
status:
|
|
2249
|
-
action:
|
|
2250
|
-
|
|
2251
|
-
type:
|
|
2252
|
-
query:
|
|
2310
|
+
var webSearchCallItem = import_v415.z.object({
|
|
2311
|
+
type: import_v415.z.literal("web_search_call"),
|
|
2312
|
+
id: import_v415.z.string(),
|
|
2313
|
+
status: import_v415.z.string(),
|
|
2314
|
+
action: import_v415.z.discriminatedUnion("type", [
|
|
2315
|
+
import_v415.z.object({
|
|
2316
|
+
type: import_v415.z.literal("search"),
|
|
2317
|
+
query: import_v415.z.string().nullish()
|
|
2253
2318
|
}),
|
|
2254
|
-
|
|
2255
|
-
type:
|
|
2256
|
-
url:
|
|
2319
|
+
import_v415.z.object({
|
|
2320
|
+
type: import_v415.z.literal("open_page"),
|
|
2321
|
+
url: import_v415.z.string()
|
|
2257
2322
|
}),
|
|
2258
|
-
|
|
2259
|
-
type:
|
|
2260
|
-
url:
|
|
2261
|
-
pattern:
|
|
2323
|
+
import_v415.z.object({
|
|
2324
|
+
type: import_v415.z.literal("find"),
|
|
2325
|
+
url: import_v415.z.string(),
|
|
2326
|
+
pattern: import_v415.z.string()
|
|
2262
2327
|
})
|
|
2263
2328
|
]).nullish()
|
|
2264
2329
|
});
|
|
2265
|
-
var codeInterpreterCallItem =
|
|
2266
|
-
type:
|
|
2267
|
-
id:
|
|
2268
|
-
code:
|
|
2269
|
-
container_id:
|
|
2270
|
-
outputs:
|
|
2271
|
-
|
|
2272
|
-
|
|
2273
|
-
|
|
2330
|
+
var codeInterpreterCallItem = import_v415.z.object({
|
|
2331
|
+
type: import_v415.z.literal("code_interpreter_call"),
|
|
2332
|
+
id: import_v415.z.string(),
|
|
2333
|
+
code: import_v415.z.string().nullable(),
|
|
2334
|
+
container_id: import_v415.z.string(),
|
|
2335
|
+
outputs: import_v415.z.array(
|
|
2336
|
+
import_v415.z.discriminatedUnion("type", [
|
|
2337
|
+
import_v415.z.object({ type: import_v415.z.literal("logs"), logs: import_v415.z.string() }),
|
|
2338
|
+
import_v415.z.object({ type: import_v415.z.literal("image"), url: import_v415.z.string() })
|
|
2274
2339
|
])
|
|
2275
2340
|
).nullable()
|
|
2276
2341
|
});
|
|
2342
|
+
var imageGenerationCallItem = import_v415.z.object({
|
|
2343
|
+
type: import_v415.z.literal("image_generation_call"),
|
|
2344
|
+
id: import_v415.z.string(),
|
|
2345
|
+
result: import_v415.z.string()
|
|
2346
|
+
});
|
|
2277
2347
|
var TOP_LOGPROBS_MAX = 20;
|
|
2278
|
-
var LOGPROBS_SCHEMA =
|
|
2279
|
-
|
|
2280
|
-
token:
|
|
2281
|
-
logprob:
|
|
2282
|
-
top_logprobs:
|
|
2283
|
-
|
|
2284
|
-
token:
|
|
2285
|
-
logprob:
|
|
2348
|
+
var LOGPROBS_SCHEMA = import_v415.z.array(
|
|
2349
|
+
import_v415.z.object({
|
|
2350
|
+
token: import_v415.z.string(),
|
|
2351
|
+
logprob: import_v415.z.number(),
|
|
2352
|
+
top_logprobs: import_v415.z.array(
|
|
2353
|
+
import_v415.z.object({
|
|
2354
|
+
token: import_v415.z.string(),
|
|
2355
|
+
logprob: import_v415.z.number()
|
|
2286
2356
|
})
|
|
2287
2357
|
)
|
|
2288
2358
|
})
|
|
@@ -2315,7 +2385,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2315
2385
|
toolChoice,
|
|
2316
2386
|
responseFormat
|
|
2317
2387
|
}) {
|
|
2318
|
-
var _a, _b, _c, _d;
|
|
2388
|
+
var _a, _b, _c, _d, _e;
|
|
2319
2389
|
const warnings = [];
|
|
2320
2390
|
const modelConfig = getResponsesModelConfig(this.modelId);
|
|
2321
2391
|
if (topK != null) {
|
|
@@ -2339,32 +2409,33 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2339
2409
|
if (stopSequences != null) {
|
|
2340
2410
|
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2341
2411
|
}
|
|
2342
|
-
const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
|
|
2343
|
-
prompt,
|
|
2344
|
-
systemMessageMode: modelConfig.systemMessageMode,
|
|
2345
|
-
fileIdPrefixes: this.config.fileIdPrefixes
|
|
2346
|
-
});
|
|
2347
|
-
warnings.push(...messageWarnings);
|
|
2348
2412
|
const openaiOptions = await (0, import_provider_utils13.parseProviderOptions)({
|
|
2349
2413
|
provider: "openai",
|
|
2350
2414
|
providerOptions,
|
|
2351
2415
|
schema: openaiResponsesProviderOptionsSchema
|
|
2352
2416
|
});
|
|
2353
|
-
const
|
|
2417
|
+
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
2418
|
+
prompt,
|
|
2419
|
+
systemMessageMode: modelConfig.systemMessageMode,
|
|
2420
|
+
fileIdPrefixes: this.config.fileIdPrefixes,
|
|
2421
|
+
store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true
|
|
2422
|
+
});
|
|
2423
|
+
warnings.push(...inputWarnings);
|
|
2424
|
+
const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : false;
|
|
2354
2425
|
let include = openaiOptions == null ? void 0 : openaiOptions.include;
|
|
2355
2426
|
const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
|
|
2356
2427
|
include = topLogprobs ? Array.isArray(include) ? [...include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : include;
|
|
2357
|
-
const webSearchToolName = (
|
|
2428
|
+
const webSearchToolName = (_c = tools == null ? void 0 : tools.find(
|
|
2358
2429
|
(tool) => tool.type === "provider-defined" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
|
|
2359
|
-
)) == null ? void 0 :
|
|
2430
|
+
)) == null ? void 0 : _c.name;
|
|
2360
2431
|
include = webSearchToolName ? Array.isArray(include) ? [...include, "web_search_call.action.sources"] : ["web_search_call.action.sources"] : include;
|
|
2361
|
-
const codeInterpreterToolName = (
|
|
2432
|
+
const codeInterpreterToolName = (_d = tools == null ? void 0 : tools.find(
|
|
2362
2433
|
(tool) => tool.type === "provider-defined" && tool.id === "openai.code_interpreter"
|
|
2363
|
-
)) == null ? void 0 :
|
|
2434
|
+
)) == null ? void 0 : _d.name;
|
|
2364
2435
|
include = codeInterpreterToolName ? Array.isArray(include) ? [...include, "code_interpreter_call.outputs"] : ["code_interpreter_call.outputs"] : include;
|
|
2365
2436
|
const baseArgs = {
|
|
2366
2437
|
model: this.modelId,
|
|
2367
|
-
input
|
|
2438
|
+
input,
|
|
2368
2439
|
temperature,
|
|
2369
2440
|
top_p: topP,
|
|
2370
2441
|
max_output_tokens: maxOutputTokens,
|
|
@@ -2374,7 +2445,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2374
2445
|
format: responseFormat.schema != null ? {
|
|
2375
2446
|
type: "json_schema",
|
|
2376
2447
|
strict: strictJsonSchema,
|
|
2377
|
-
name: (
|
|
2448
|
+
name: (_e = responseFormat.name) != null ? _e : "response",
|
|
2378
2449
|
description: responseFormat.description,
|
|
2379
2450
|
schema: responseFormat.schema
|
|
2380
2451
|
} : { type: "json_object" }
|
|
@@ -2385,6 +2456,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2385
2456
|
}
|
|
2386
2457
|
},
|
|
2387
2458
|
// provider options:
|
|
2459
|
+
max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
|
|
2388
2460
|
metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
|
|
2389
2461
|
parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
|
|
2390
2462
|
previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
|
|
@@ -2500,45 +2572,45 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2500
2572
|
body,
|
|
2501
2573
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2502
2574
|
successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
|
|
2503
|
-
|
|
2504
|
-
id:
|
|
2505
|
-
created_at:
|
|
2506
|
-
error:
|
|
2507
|
-
code:
|
|
2508
|
-
message:
|
|
2575
|
+
import_v415.z.object({
|
|
2576
|
+
id: import_v415.z.string(),
|
|
2577
|
+
created_at: import_v415.z.number(),
|
|
2578
|
+
error: import_v415.z.object({
|
|
2579
|
+
code: import_v415.z.string(),
|
|
2580
|
+
message: import_v415.z.string()
|
|
2509
2581
|
}).nullish(),
|
|
2510
|
-
model:
|
|
2511
|
-
output:
|
|
2512
|
-
|
|
2513
|
-
|
|
2514
|
-
type:
|
|
2515
|
-
role:
|
|
2516
|
-
id:
|
|
2517
|
-
content:
|
|
2518
|
-
|
|
2519
|
-
type:
|
|
2520
|
-
text:
|
|
2582
|
+
model: import_v415.z.string(),
|
|
2583
|
+
output: import_v415.z.array(
|
|
2584
|
+
import_v415.z.discriminatedUnion("type", [
|
|
2585
|
+
import_v415.z.object({
|
|
2586
|
+
type: import_v415.z.literal("message"),
|
|
2587
|
+
role: import_v415.z.literal("assistant"),
|
|
2588
|
+
id: import_v415.z.string(),
|
|
2589
|
+
content: import_v415.z.array(
|
|
2590
|
+
import_v415.z.object({
|
|
2591
|
+
type: import_v415.z.literal("output_text"),
|
|
2592
|
+
text: import_v415.z.string(),
|
|
2521
2593
|
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2522
|
-
annotations:
|
|
2523
|
-
|
|
2524
|
-
|
|
2525
|
-
type:
|
|
2526
|
-
start_index:
|
|
2527
|
-
end_index:
|
|
2528
|
-
url:
|
|
2529
|
-
title:
|
|
2594
|
+
annotations: import_v415.z.array(
|
|
2595
|
+
import_v415.z.discriminatedUnion("type", [
|
|
2596
|
+
import_v415.z.object({
|
|
2597
|
+
type: import_v415.z.literal("url_citation"),
|
|
2598
|
+
start_index: import_v415.z.number(),
|
|
2599
|
+
end_index: import_v415.z.number(),
|
|
2600
|
+
url: import_v415.z.string(),
|
|
2601
|
+
title: import_v415.z.string()
|
|
2530
2602
|
}),
|
|
2531
|
-
|
|
2532
|
-
type:
|
|
2533
|
-
file_id:
|
|
2534
|
-
filename:
|
|
2535
|
-
index:
|
|
2536
|
-
start_index:
|
|
2537
|
-
end_index:
|
|
2538
|
-
quote:
|
|
2603
|
+
import_v415.z.object({
|
|
2604
|
+
type: import_v415.z.literal("file_citation"),
|
|
2605
|
+
file_id: import_v415.z.string(),
|
|
2606
|
+
filename: import_v415.z.string().nullish(),
|
|
2607
|
+
index: import_v415.z.number().nullish(),
|
|
2608
|
+
start_index: import_v415.z.number().nullish(),
|
|
2609
|
+
end_index: import_v415.z.number().nullish(),
|
|
2610
|
+
quote: import_v415.z.string().nullish()
|
|
2539
2611
|
}),
|
|
2540
|
-
|
|
2541
|
-
type:
|
|
2612
|
+
import_v415.z.object({
|
|
2613
|
+
type: import_v415.z.literal("container_file_citation")
|
|
2542
2614
|
})
|
|
2543
2615
|
])
|
|
2544
2616
|
)
|
|
@@ -2546,50 +2618,51 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2546
2618
|
)
|
|
2547
2619
|
}),
|
|
2548
2620
|
codeInterpreterCallItem,
|
|
2549
|
-
|
|
2550
|
-
|
|
2551
|
-
|
|
2552
|
-
|
|
2553
|
-
|
|
2554
|
-
|
|
2621
|
+
imageGenerationCallItem,
|
|
2622
|
+
import_v415.z.object({
|
|
2623
|
+
type: import_v415.z.literal("function_call"),
|
|
2624
|
+
call_id: import_v415.z.string(),
|
|
2625
|
+
name: import_v415.z.string(),
|
|
2626
|
+
arguments: import_v415.z.string(),
|
|
2627
|
+
id: import_v415.z.string()
|
|
2555
2628
|
}),
|
|
2556
2629
|
webSearchCallItem,
|
|
2557
|
-
|
|
2558
|
-
type:
|
|
2559
|
-
id:
|
|
2560
|
-
status:
|
|
2630
|
+
import_v415.z.object({
|
|
2631
|
+
type: import_v415.z.literal("computer_call"),
|
|
2632
|
+
id: import_v415.z.string(),
|
|
2633
|
+
status: import_v415.z.string().optional()
|
|
2561
2634
|
}),
|
|
2562
|
-
|
|
2563
|
-
type:
|
|
2564
|
-
id:
|
|
2565
|
-
status:
|
|
2566
|
-
queries:
|
|
2567
|
-
results:
|
|
2568
|
-
|
|
2569
|
-
attributes:
|
|
2570
|
-
file_id:
|
|
2571
|
-
filename:
|
|
2572
|
-
score:
|
|
2573
|
-
text:
|
|
2635
|
+
import_v415.z.object({
|
|
2636
|
+
type: import_v415.z.literal("file_search_call"),
|
|
2637
|
+
id: import_v415.z.string(),
|
|
2638
|
+
status: import_v415.z.string().optional(),
|
|
2639
|
+
queries: import_v415.z.array(import_v415.z.string()).nullish(),
|
|
2640
|
+
results: import_v415.z.array(
|
|
2641
|
+
import_v415.z.object({
|
|
2642
|
+
attributes: import_v415.z.object({
|
|
2643
|
+
file_id: import_v415.z.string(),
|
|
2644
|
+
filename: import_v415.z.string(),
|
|
2645
|
+
score: import_v415.z.number(),
|
|
2646
|
+
text: import_v415.z.string()
|
|
2574
2647
|
})
|
|
2575
2648
|
})
|
|
2576
2649
|
).nullish()
|
|
2577
2650
|
}),
|
|
2578
|
-
|
|
2579
|
-
type:
|
|
2580
|
-
id:
|
|
2581
|
-
encrypted_content:
|
|
2582
|
-
summary:
|
|
2583
|
-
|
|
2584
|
-
type:
|
|
2585
|
-
text:
|
|
2651
|
+
import_v415.z.object({
|
|
2652
|
+
type: import_v415.z.literal("reasoning"),
|
|
2653
|
+
id: import_v415.z.string(),
|
|
2654
|
+
encrypted_content: import_v415.z.string().nullish(),
|
|
2655
|
+
summary: import_v415.z.array(
|
|
2656
|
+
import_v415.z.object({
|
|
2657
|
+
type: import_v415.z.literal("summary_text"),
|
|
2658
|
+
text: import_v415.z.string()
|
|
2586
2659
|
})
|
|
2587
2660
|
)
|
|
2588
2661
|
})
|
|
2589
2662
|
])
|
|
2590
2663
|
),
|
|
2591
|
-
service_tier:
|
|
2592
|
-
incomplete_details:
|
|
2664
|
+
service_tier: import_v415.z.string().nullish(),
|
|
2665
|
+
incomplete_details: import_v415.z.object({ reason: import_v415.z.string() }).nullable(),
|
|
2593
2666
|
usage: usageSchema2
|
|
2594
2667
|
})
|
|
2595
2668
|
),
|
|
@@ -2630,6 +2703,25 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2630
2703
|
}
|
|
2631
2704
|
break;
|
|
2632
2705
|
}
|
|
2706
|
+
case "image_generation_call": {
|
|
2707
|
+
content.push({
|
|
2708
|
+
type: "tool-call",
|
|
2709
|
+
toolCallId: part.id,
|
|
2710
|
+
toolName: "image_generation",
|
|
2711
|
+
input: "{}",
|
|
2712
|
+
providerExecuted: true
|
|
2713
|
+
});
|
|
2714
|
+
content.push({
|
|
2715
|
+
type: "tool-result",
|
|
2716
|
+
toolCallId: part.id,
|
|
2717
|
+
toolName: "image_generation",
|
|
2718
|
+
result: {
|
|
2719
|
+
result: part.result
|
|
2720
|
+
},
|
|
2721
|
+
providerExecuted: true
|
|
2722
|
+
});
|
|
2723
|
+
break;
|
|
2724
|
+
}
|
|
2633
2725
|
case "message": {
|
|
2634
2726
|
for (const contentPart of part.content) {
|
|
2635
2727
|
if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
|
|
@@ -2893,6 +2985,14 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2893
2985
|
id: value.item.id,
|
|
2894
2986
|
toolName: "file_search"
|
|
2895
2987
|
});
|
|
2988
|
+
} else if (value.item.type === "image_generation_call") {
|
|
2989
|
+
controller.enqueue({
|
|
2990
|
+
type: "tool-call",
|
|
2991
|
+
toolCallId: value.item.id,
|
|
2992
|
+
toolName: "image_generation",
|
|
2993
|
+
input: "{}",
|
|
2994
|
+
providerExecuted: true
|
|
2995
|
+
});
|
|
2896
2996
|
} else if (value.item.type === "message") {
|
|
2897
2997
|
controller.enqueue({
|
|
2898
2998
|
type: "text-start",
|
|
@@ -3026,6 +3126,16 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3026
3126
|
},
|
|
3027
3127
|
providerExecuted: true
|
|
3028
3128
|
});
|
|
3129
|
+
} else if (value.item.type === "image_generation_call") {
|
|
3130
|
+
controller.enqueue({
|
|
3131
|
+
type: "tool-result",
|
|
3132
|
+
toolCallId: value.item.id,
|
|
3133
|
+
toolName: "image_generation",
|
|
3134
|
+
result: {
|
|
3135
|
+
result: value.item.result
|
|
3136
|
+
},
|
|
3137
|
+
providerExecuted: true
|
|
3138
|
+
});
|
|
3029
3139
|
} else if (value.item.type === "message") {
|
|
3030
3140
|
controller.enqueue({
|
|
3031
3141
|
type: "text-end",
|
|
@@ -3162,177 +3272,182 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3162
3272
|
};
|
|
3163
3273
|
}
|
|
3164
3274
|
};
|
|
3165
|
-
var usageSchema2 =
|
|
3166
|
-
input_tokens:
|
|
3167
|
-
input_tokens_details:
|
|
3168
|
-
output_tokens:
|
|
3169
|
-
output_tokens_details:
|
|
3275
|
+
var usageSchema2 = import_v415.z.object({
|
|
3276
|
+
input_tokens: import_v415.z.number(),
|
|
3277
|
+
input_tokens_details: import_v415.z.object({ cached_tokens: import_v415.z.number().nullish() }).nullish(),
|
|
3278
|
+
output_tokens: import_v415.z.number(),
|
|
3279
|
+
output_tokens_details: import_v415.z.object({ reasoning_tokens: import_v415.z.number().nullish() }).nullish()
|
|
3170
3280
|
});
|
|
3171
|
-
var textDeltaChunkSchema =
|
|
3172
|
-
type:
|
|
3173
|
-
item_id:
|
|
3174
|
-
delta:
|
|
3281
|
+
var textDeltaChunkSchema = import_v415.z.object({
|
|
3282
|
+
type: import_v415.z.literal("response.output_text.delta"),
|
|
3283
|
+
item_id: import_v415.z.string(),
|
|
3284
|
+
delta: import_v415.z.string(),
|
|
3175
3285
|
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
3176
3286
|
});
|
|
3177
|
-
var errorChunkSchema =
|
|
3178
|
-
type:
|
|
3179
|
-
code:
|
|
3180
|
-
message:
|
|
3181
|
-
param:
|
|
3182
|
-
sequence_number:
|
|
3287
|
+
var errorChunkSchema = import_v415.z.object({
|
|
3288
|
+
type: import_v415.z.literal("error"),
|
|
3289
|
+
code: import_v415.z.string(),
|
|
3290
|
+
message: import_v415.z.string(),
|
|
3291
|
+
param: import_v415.z.string().nullish(),
|
|
3292
|
+
sequence_number: import_v415.z.number()
|
|
3183
3293
|
});
|
|
3184
|
-
var responseFinishedChunkSchema =
|
|
3185
|
-
type:
|
|
3186
|
-
response:
|
|
3187
|
-
incomplete_details:
|
|
3294
|
+
var responseFinishedChunkSchema = import_v415.z.object({
|
|
3295
|
+
type: import_v415.z.enum(["response.completed", "response.incomplete"]),
|
|
3296
|
+
response: import_v415.z.object({
|
|
3297
|
+
incomplete_details: import_v415.z.object({ reason: import_v415.z.string() }).nullish(),
|
|
3188
3298
|
usage: usageSchema2,
|
|
3189
|
-
service_tier:
|
|
3299
|
+
service_tier: import_v415.z.string().nullish()
|
|
3190
3300
|
})
|
|
3191
3301
|
});
|
|
3192
|
-
var responseCreatedChunkSchema =
|
|
3193
|
-
type:
|
|
3194
|
-
response:
|
|
3195
|
-
id:
|
|
3196
|
-
created_at:
|
|
3197
|
-
model:
|
|
3198
|
-
service_tier:
|
|
3302
|
+
var responseCreatedChunkSchema = import_v415.z.object({
|
|
3303
|
+
type: import_v415.z.literal("response.created"),
|
|
3304
|
+
response: import_v415.z.object({
|
|
3305
|
+
id: import_v415.z.string(),
|
|
3306
|
+
created_at: import_v415.z.number(),
|
|
3307
|
+
model: import_v415.z.string(),
|
|
3308
|
+
service_tier: import_v415.z.string().nullish()
|
|
3199
3309
|
})
|
|
3200
3310
|
});
|
|
3201
|
-
var responseOutputItemAddedSchema =
|
|
3202
|
-
type:
|
|
3203
|
-
output_index:
|
|
3204
|
-
item:
|
|
3205
|
-
|
|
3206
|
-
type:
|
|
3207
|
-
id:
|
|
3311
|
+
var responseOutputItemAddedSchema = import_v415.z.object({
|
|
3312
|
+
type: import_v415.z.literal("response.output_item.added"),
|
|
3313
|
+
output_index: import_v415.z.number(),
|
|
3314
|
+
item: import_v415.z.discriminatedUnion("type", [
|
|
3315
|
+
import_v415.z.object({
|
|
3316
|
+
type: import_v415.z.literal("message"),
|
|
3317
|
+
id: import_v415.z.string()
|
|
3208
3318
|
}),
|
|
3209
|
-
|
|
3210
|
-
type:
|
|
3211
|
-
id:
|
|
3212
|
-
encrypted_content:
|
|
3319
|
+
import_v415.z.object({
|
|
3320
|
+
type: import_v415.z.literal("reasoning"),
|
|
3321
|
+
id: import_v415.z.string(),
|
|
3322
|
+
encrypted_content: import_v415.z.string().nullish()
|
|
3213
3323
|
}),
|
|
3214
|
-
|
|
3215
|
-
type:
|
|
3216
|
-
id:
|
|
3217
|
-
call_id:
|
|
3218
|
-
name:
|
|
3219
|
-
arguments:
|
|
3324
|
+
import_v415.z.object({
|
|
3325
|
+
type: import_v415.z.literal("function_call"),
|
|
3326
|
+
id: import_v415.z.string(),
|
|
3327
|
+
call_id: import_v415.z.string(),
|
|
3328
|
+
name: import_v415.z.string(),
|
|
3329
|
+
arguments: import_v415.z.string()
|
|
3220
3330
|
}),
|
|
3221
|
-
|
|
3222
|
-
type:
|
|
3223
|
-
id:
|
|
3224
|
-
status:
|
|
3225
|
-
action:
|
|
3226
|
-
type:
|
|
3227
|
-
query:
|
|
3331
|
+
import_v415.z.object({
|
|
3332
|
+
type: import_v415.z.literal("web_search_call"),
|
|
3333
|
+
id: import_v415.z.string(),
|
|
3334
|
+
status: import_v415.z.string(),
|
|
3335
|
+
action: import_v415.z.object({
|
|
3336
|
+
type: import_v415.z.literal("search"),
|
|
3337
|
+
query: import_v415.z.string().optional()
|
|
3228
3338
|
}).nullish()
|
|
3229
3339
|
}),
|
|
3230
|
-
|
|
3231
|
-
type:
|
|
3232
|
-
id:
|
|
3233
|
-
status:
|
|
3340
|
+
import_v415.z.object({
|
|
3341
|
+
type: import_v415.z.literal("computer_call"),
|
|
3342
|
+
id: import_v415.z.string(),
|
|
3343
|
+
status: import_v415.z.string()
|
|
3234
3344
|
}),
|
|
3235
|
-
|
|
3236
|
-
type:
|
|
3237
|
-
id:
|
|
3238
|
-
status:
|
|
3239
|
-
queries:
|
|
3240
|
-
results:
|
|
3241
|
-
|
|
3242
|
-
attributes:
|
|
3243
|
-
file_id:
|
|
3244
|
-
filename:
|
|
3245
|
-
score:
|
|
3246
|
-
text:
|
|
3345
|
+
import_v415.z.object({
|
|
3346
|
+
type: import_v415.z.literal("file_search_call"),
|
|
3347
|
+
id: import_v415.z.string(),
|
|
3348
|
+
status: import_v415.z.string(),
|
|
3349
|
+
queries: import_v415.z.array(import_v415.z.string()).nullish(),
|
|
3350
|
+
results: import_v415.z.array(
|
|
3351
|
+
import_v415.z.object({
|
|
3352
|
+
attributes: import_v415.z.object({
|
|
3353
|
+
file_id: import_v415.z.string(),
|
|
3354
|
+
filename: import_v415.z.string(),
|
|
3355
|
+
score: import_v415.z.number(),
|
|
3356
|
+
text: import_v415.z.string()
|
|
3247
3357
|
})
|
|
3248
3358
|
})
|
|
3249
3359
|
).optional()
|
|
3360
|
+
}),
|
|
3361
|
+
import_v415.z.object({
|
|
3362
|
+
type: import_v415.z.literal("image_generation_call"),
|
|
3363
|
+
id: import_v415.z.string()
|
|
3250
3364
|
})
|
|
3251
3365
|
])
|
|
3252
3366
|
});
|
|
3253
|
-
var responseOutputItemDoneSchema =
|
|
3254
|
-
type:
|
|
3255
|
-
output_index:
|
|
3256
|
-
item:
|
|
3257
|
-
|
|
3258
|
-
type:
|
|
3259
|
-
id:
|
|
3367
|
+
var responseOutputItemDoneSchema = import_v415.z.object({
|
|
3368
|
+
type: import_v415.z.literal("response.output_item.done"),
|
|
3369
|
+
output_index: import_v415.z.number(),
|
|
3370
|
+
item: import_v415.z.discriminatedUnion("type", [
|
|
3371
|
+
import_v415.z.object({
|
|
3372
|
+
type: import_v415.z.literal("message"),
|
|
3373
|
+
id: import_v415.z.string()
|
|
3260
3374
|
}),
|
|
3261
|
-
|
|
3262
|
-
type:
|
|
3263
|
-
id:
|
|
3264
|
-
encrypted_content:
|
|
3375
|
+
import_v415.z.object({
|
|
3376
|
+
type: import_v415.z.literal("reasoning"),
|
|
3377
|
+
id: import_v415.z.string(),
|
|
3378
|
+
encrypted_content: import_v415.z.string().nullish()
|
|
3265
3379
|
}),
|
|
3266
|
-
|
|
3267
|
-
type:
|
|
3268
|
-
id:
|
|
3269
|
-
call_id:
|
|
3270
|
-
name:
|
|
3271
|
-
arguments:
|
|
3272
|
-
status:
|
|
3380
|
+
import_v415.z.object({
|
|
3381
|
+
type: import_v415.z.literal("function_call"),
|
|
3382
|
+
id: import_v415.z.string(),
|
|
3383
|
+
call_id: import_v415.z.string(),
|
|
3384
|
+
name: import_v415.z.string(),
|
|
3385
|
+
arguments: import_v415.z.string(),
|
|
3386
|
+
status: import_v415.z.literal("completed")
|
|
3273
3387
|
}),
|
|
3274
3388
|
codeInterpreterCallItem,
|
|
3389
|
+
imageGenerationCallItem,
|
|
3275
3390
|
webSearchCallItem,
|
|
3276
|
-
|
|
3277
|
-
type:
|
|
3278
|
-
id:
|
|
3279
|
-
status:
|
|
3391
|
+
import_v415.z.object({
|
|
3392
|
+
type: import_v415.z.literal("computer_call"),
|
|
3393
|
+
id: import_v415.z.string(),
|
|
3394
|
+
status: import_v415.z.literal("completed")
|
|
3280
3395
|
}),
|
|
3281
|
-
|
|
3282
|
-
type:
|
|
3283
|
-
id:
|
|
3284
|
-
status:
|
|
3285
|
-
queries:
|
|
3286
|
-
results:
|
|
3287
|
-
|
|
3288
|
-
attributes:
|
|
3289
|
-
file_id:
|
|
3290
|
-
filename:
|
|
3291
|
-
score:
|
|
3292
|
-
text:
|
|
3396
|
+
import_v415.z.object({
|
|
3397
|
+
type: import_v415.z.literal("file_search_call"),
|
|
3398
|
+
id: import_v415.z.string(),
|
|
3399
|
+
status: import_v415.z.literal("completed"),
|
|
3400
|
+
queries: import_v415.z.array(import_v415.z.string()).nullish(),
|
|
3401
|
+
results: import_v415.z.array(
|
|
3402
|
+
import_v415.z.object({
|
|
3403
|
+
attributes: import_v415.z.object({
|
|
3404
|
+
file_id: import_v415.z.string(),
|
|
3405
|
+
filename: import_v415.z.string(),
|
|
3406
|
+
score: import_v415.z.number(),
|
|
3407
|
+
text: import_v415.z.string()
|
|
3293
3408
|
})
|
|
3294
3409
|
})
|
|
3295
3410
|
).nullish()
|
|
3296
3411
|
})
|
|
3297
3412
|
])
|
|
3298
3413
|
});
|
|
3299
|
-
var responseFunctionCallArgumentsDeltaSchema =
|
|
3300
|
-
type:
|
|
3301
|
-
item_id:
|
|
3302
|
-
output_index:
|
|
3303
|
-
delta:
|
|
3414
|
+
var responseFunctionCallArgumentsDeltaSchema = import_v415.z.object({
|
|
3415
|
+
type: import_v415.z.literal("response.function_call_arguments.delta"),
|
|
3416
|
+
item_id: import_v415.z.string(),
|
|
3417
|
+
output_index: import_v415.z.number(),
|
|
3418
|
+
delta: import_v415.z.string()
|
|
3304
3419
|
});
|
|
3305
|
-
var responseAnnotationAddedSchema =
|
|
3306
|
-
type:
|
|
3307
|
-
annotation:
|
|
3308
|
-
|
|
3309
|
-
type:
|
|
3310
|
-
url:
|
|
3311
|
-
title:
|
|
3420
|
+
var responseAnnotationAddedSchema = import_v415.z.object({
|
|
3421
|
+
type: import_v415.z.literal("response.output_text.annotation.added"),
|
|
3422
|
+
annotation: import_v415.z.discriminatedUnion("type", [
|
|
3423
|
+
import_v415.z.object({
|
|
3424
|
+
type: import_v415.z.literal("url_citation"),
|
|
3425
|
+
url: import_v415.z.string(),
|
|
3426
|
+
title: import_v415.z.string()
|
|
3312
3427
|
}),
|
|
3313
|
-
|
|
3314
|
-
type:
|
|
3315
|
-
file_id:
|
|
3316
|
-
filename:
|
|
3317
|
-
index:
|
|
3318
|
-
start_index:
|
|
3319
|
-
end_index:
|
|
3320
|
-
quote:
|
|
3428
|
+
import_v415.z.object({
|
|
3429
|
+
type: import_v415.z.literal("file_citation"),
|
|
3430
|
+
file_id: import_v415.z.string(),
|
|
3431
|
+
filename: import_v415.z.string().nullish(),
|
|
3432
|
+
index: import_v415.z.number().nullish(),
|
|
3433
|
+
start_index: import_v415.z.number().nullish(),
|
|
3434
|
+
end_index: import_v415.z.number().nullish(),
|
|
3435
|
+
quote: import_v415.z.string().nullish()
|
|
3321
3436
|
})
|
|
3322
3437
|
])
|
|
3323
3438
|
});
|
|
3324
|
-
var responseReasoningSummaryPartAddedSchema =
|
|
3325
|
-
type:
|
|
3326
|
-
item_id:
|
|
3327
|
-
summary_index:
|
|
3439
|
+
var responseReasoningSummaryPartAddedSchema = import_v415.z.object({
|
|
3440
|
+
type: import_v415.z.literal("response.reasoning_summary_part.added"),
|
|
3441
|
+
item_id: import_v415.z.string(),
|
|
3442
|
+
summary_index: import_v415.z.number()
|
|
3328
3443
|
});
|
|
3329
|
-
var responseReasoningSummaryTextDeltaSchema =
|
|
3330
|
-
type:
|
|
3331
|
-
item_id:
|
|
3332
|
-
summary_index:
|
|
3333
|
-
delta:
|
|
3444
|
+
var responseReasoningSummaryTextDeltaSchema = import_v415.z.object({
|
|
3445
|
+
type: import_v415.z.literal("response.reasoning_summary_text.delta"),
|
|
3446
|
+
item_id: import_v415.z.string(),
|
|
3447
|
+
summary_index: import_v415.z.number(),
|
|
3448
|
+
delta: import_v415.z.string()
|
|
3334
3449
|
});
|
|
3335
|
-
var openaiResponsesChunkSchema =
|
|
3450
|
+
var openaiResponsesChunkSchema = import_v415.z.union([
|
|
3336
3451
|
textDeltaChunkSchema,
|
|
3337
3452
|
responseFinishedChunkSchema,
|
|
3338
3453
|
responseCreatedChunkSchema,
|
|
@@ -3343,7 +3458,7 @@ var openaiResponsesChunkSchema = import_v414.z.union([
|
|
|
3343
3458
|
responseReasoningSummaryPartAddedSchema,
|
|
3344
3459
|
responseReasoningSummaryTextDeltaSchema,
|
|
3345
3460
|
errorChunkSchema,
|
|
3346
|
-
|
|
3461
|
+
import_v415.z.object({ type: import_v415.z.string() }).loose()
|
|
3347
3462
|
// fallback for unknown chunks
|
|
3348
3463
|
]);
|
|
3349
3464
|
function isTextDeltaChunk(chunk) {
|
|
@@ -3416,27 +3531,15 @@ function getResponsesModelConfig(modelId) {
|
|
|
3416
3531
|
isReasoningModel: false
|
|
3417
3532
|
};
|
|
3418
3533
|
}
|
|
3419
|
-
var openaiResponsesProviderOptionsSchema =
|
|
3420
|
-
|
|
3421
|
-
|
|
3422
|
-
previousResponseId: import_v414.z.string().nullish(),
|
|
3423
|
-
store: import_v414.z.boolean().nullish(),
|
|
3424
|
-
user: import_v414.z.string().nullish(),
|
|
3425
|
-
reasoningEffort: import_v414.z.string().nullish(),
|
|
3426
|
-
strictJsonSchema: import_v414.z.boolean().nullish(),
|
|
3427
|
-
instructions: import_v414.z.string().nullish(),
|
|
3428
|
-
reasoningSummary: import_v414.z.string().nullish(),
|
|
3429
|
-
serviceTier: import_v414.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
3430
|
-
include: import_v414.z.array(
|
|
3431
|
-
import_v414.z.enum([
|
|
3534
|
+
var openaiResponsesProviderOptionsSchema = import_v415.z.object({
|
|
3535
|
+
include: import_v415.z.array(
|
|
3536
|
+
import_v415.z.enum([
|
|
3432
3537
|
"reasoning.encrypted_content",
|
|
3433
3538
|
"file_search_call.results",
|
|
3434
3539
|
"message.output_text.logprobs"
|
|
3435
3540
|
])
|
|
3436
3541
|
).nullish(),
|
|
3437
|
-
|
|
3438
|
-
promptCacheKey: import_v414.z.string().nullish(),
|
|
3439
|
-
safetyIdentifier: import_v414.z.string().nullish(),
|
|
3542
|
+
instructions: import_v415.z.string().nullish(),
|
|
3440
3543
|
/**
|
|
3441
3544
|
* Return the log probabilities of the tokens.
|
|
3442
3545
|
*
|
|
@@ -3449,15 +3552,33 @@ var openaiResponsesProviderOptionsSchema = import_v414.z.object({
|
|
|
3449
3552
|
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
3450
3553
|
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
3451
3554
|
*/
|
|
3452
|
-
logprobs:
|
|
3555
|
+
logprobs: import_v415.z.union([import_v415.z.boolean(), import_v415.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
3556
|
+
/**
|
|
3557
|
+
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
3558
|
+
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
3559
|
+
* Any further attempts to call a tool by the model will be ignored.
|
|
3560
|
+
*/
|
|
3561
|
+
maxToolCalls: import_v415.z.number().nullish(),
|
|
3562
|
+
metadata: import_v415.z.any().nullish(),
|
|
3563
|
+
parallelToolCalls: import_v415.z.boolean().nullish(),
|
|
3564
|
+
previousResponseId: import_v415.z.string().nullish(),
|
|
3565
|
+
promptCacheKey: import_v415.z.string().nullish(),
|
|
3566
|
+
reasoningEffort: import_v415.z.string().nullish(),
|
|
3567
|
+
reasoningSummary: import_v415.z.string().nullish(),
|
|
3568
|
+
safetyIdentifier: import_v415.z.string().nullish(),
|
|
3569
|
+
serviceTier: import_v415.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
3570
|
+
store: import_v415.z.boolean().nullish(),
|
|
3571
|
+
strictJsonSchema: import_v415.z.boolean().nullish(),
|
|
3572
|
+
textVerbosity: import_v415.z.enum(["low", "medium", "high"]).nullish(),
|
|
3573
|
+
user: import_v415.z.string().nullish()
|
|
3453
3574
|
});
|
|
3454
3575
|
|
|
3455
3576
|
// src/speech/openai-speech-model.ts
|
|
3456
3577
|
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
|
3457
|
-
var
|
|
3458
|
-
var OpenAIProviderOptionsSchema =
|
|
3459
|
-
instructions:
|
|
3460
|
-
speed:
|
|
3578
|
+
var import_v416 = require("zod/v4");
|
|
3579
|
+
var OpenAIProviderOptionsSchema = import_v416.z.object({
|
|
3580
|
+
instructions: import_v416.z.string().nullish(),
|
|
3581
|
+
speed: import_v416.z.number().min(0.25).max(4).default(1).nullish()
|
|
3461
3582
|
});
|
|
3462
3583
|
var OpenAISpeechModel = class {
|
|
3463
3584
|
constructor(modelId, config) {
|
|
@@ -3561,33 +3682,33 @@ var OpenAISpeechModel = class {
|
|
|
3561
3682
|
|
|
3562
3683
|
// src/transcription/openai-transcription-model.ts
|
|
3563
3684
|
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
|
3564
|
-
var
|
|
3685
|
+
var import_v418 = require("zod/v4");
|
|
3565
3686
|
|
|
3566
3687
|
// src/transcription/openai-transcription-options.ts
|
|
3567
|
-
var
|
|
3568
|
-
var openAITranscriptionProviderOptions =
|
|
3688
|
+
var import_v417 = require("zod/v4");
|
|
3689
|
+
var openAITranscriptionProviderOptions = import_v417.z.object({
|
|
3569
3690
|
/**
|
|
3570
3691
|
* Additional information to include in the transcription response.
|
|
3571
3692
|
*/
|
|
3572
|
-
include:
|
|
3693
|
+
include: import_v417.z.array(import_v417.z.string()).optional(),
|
|
3573
3694
|
/**
|
|
3574
3695
|
* The language of the input audio in ISO-639-1 format.
|
|
3575
3696
|
*/
|
|
3576
|
-
language:
|
|
3697
|
+
language: import_v417.z.string().optional(),
|
|
3577
3698
|
/**
|
|
3578
3699
|
* An optional text to guide the model's style or continue a previous audio segment.
|
|
3579
3700
|
*/
|
|
3580
|
-
prompt:
|
|
3701
|
+
prompt: import_v417.z.string().optional(),
|
|
3581
3702
|
/**
|
|
3582
3703
|
* The sampling temperature, between 0 and 1.
|
|
3583
3704
|
* @default 0
|
|
3584
3705
|
*/
|
|
3585
|
-
temperature:
|
|
3706
|
+
temperature: import_v417.z.number().min(0).max(1).default(0).optional(),
|
|
3586
3707
|
/**
|
|
3587
3708
|
* The timestamp granularities to populate for this transcription.
|
|
3588
3709
|
* @default ['segment']
|
|
3589
3710
|
*/
|
|
3590
|
-
timestampGranularities:
|
|
3711
|
+
timestampGranularities: import_v417.z.array(import_v417.z.enum(["word", "segment"])).default(["segment"]).optional()
|
|
3591
3712
|
});
|
|
3592
3713
|
|
|
3593
3714
|
// src/transcription/openai-transcription-model.ts
|
|
@@ -3756,29 +3877,29 @@ var OpenAITranscriptionModel = class {
|
|
|
3756
3877
|
};
|
|
3757
3878
|
}
|
|
3758
3879
|
};
|
|
3759
|
-
var openaiTranscriptionResponseSchema =
|
|
3760
|
-
text:
|
|
3761
|
-
language:
|
|
3762
|
-
duration:
|
|
3763
|
-
words:
|
|
3764
|
-
|
|
3765
|
-
word:
|
|
3766
|
-
start:
|
|
3767
|
-
end:
|
|
3880
|
+
var openaiTranscriptionResponseSchema = import_v418.z.object({
|
|
3881
|
+
text: import_v418.z.string(),
|
|
3882
|
+
language: import_v418.z.string().nullish(),
|
|
3883
|
+
duration: import_v418.z.number().nullish(),
|
|
3884
|
+
words: import_v418.z.array(
|
|
3885
|
+
import_v418.z.object({
|
|
3886
|
+
word: import_v418.z.string(),
|
|
3887
|
+
start: import_v418.z.number(),
|
|
3888
|
+
end: import_v418.z.number()
|
|
3768
3889
|
})
|
|
3769
3890
|
).nullish(),
|
|
3770
|
-
segments:
|
|
3771
|
-
|
|
3772
|
-
id:
|
|
3773
|
-
seek:
|
|
3774
|
-
start:
|
|
3775
|
-
end:
|
|
3776
|
-
text:
|
|
3777
|
-
tokens:
|
|
3778
|
-
temperature:
|
|
3779
|
-
avg_logprob:
|
|
3780
|
-
compression_ratio:
|
|
3781
|
-
no_speech_prob:
|
|
3891
|
+
segments: import_v418.z.array(
|
|
3892
|
+
import_v418.z.object({
|
|
3893
|
+
id: import_v418.z.number(),
|
|
3894
|
+
seek: import_v418.z.number(),
|
|
3895
|
+
start: import_v418.z.number(),
|
|
3896
|
+
end: import_v418.z.number(),
|
|
3897
|
+
text: import_v418.z.string(),
|
|
3898
|
+
tokens: import_v418.z.array(import_v418.z.number()),
|
|
3899
|
+
temperature: import_v418.z.number(),
|
|
3900
|
+
avg_logprob: import_v418.z.number(),
|
|
3901
|
+
compression_ratio: import_v418.z.number(),
|
|
3902
|
+
no_speech_prob: import_v418.z.number()
|
|
3782
3903
|
})
|
|
3783
3904
|
).nullish()
|
|
3784
3905
|
});
|