@ai-sdk/openai 2.0.9 → 2.0.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/dist/index.d.mts +36 -53
- package/dist/index.d.ts +36 -53
- package/dist/index.js +502 -442
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +513 -453
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +316 -255
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +301 -240
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.js
CHANGED
|
@@ -36,14 +36,33 @@ __export(internal_exports, {
|
|
|
36
36
|
});
|
|
37
37
|
module.exports = __toCommonJS(internal_exports);
|
|
38
38
|
|
|
39
|
-
// src/openai-chat-language-model.ts
|
|
39
|
+
// src/chat/openai-chat-language-model.ts
|
|
40
40
|
var import_provider3 = require("@ai-sdk/provider");
|
|
41
41
|
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
42
42
|
var import_v45 = require("zod/v4");
|
|
43
43
|
|
|
44
|
-
// src/
|
|
45
|
-
var
|
|
44
|
+
// src/openai-error.ts
|
|
45
|
+
var import_v4 = require("zod/v4");
|
|
46
46
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
47
|
+
var openaiErrorDataSchema = import_v4.z.object({
|
|
48
|
+
error: import_v4.z.object({
|
|
49
|
+
message: import_v4.z.string(),
|
|
50
|
+
// The additional information below is handled loosely to support
|
|
51
|
+
// OpenAI-compatible providers that have slightly different error
|
|
52
|
+
// responses:
|
|
53
|
+
type: import_v4.z.string().nullish(),
|
|
54
|
+
param: import_v4.z.any().nullish(),
|
|
55
|
+
code: import_v4.z.union([import_v4.z.string(), import_v4.z.number()]).nullish()
|
|
56
|
+
})
|
|
57
|
+
});
|
|
58
|
+
var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
|
59
|
+
errorSchema: openaiErrorDataSchema,
|
|
60
|
+
errorToMessage: (data) => data.error.message
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
// src/chat/convert-to-openai-chat-messages.ts
|
|
64
|
+
var import_provider = require("@ai-sdk/provider");
|
|
65
|
+
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
|
47
66
|
function convertToOpenAIChatMessages({
|
|
48
67
|
prompt,
|
|
49
68
|
systemMessageMode = "system"
|
|
@@ -97,7 +116,7 @@ function convertToOpenAIChatMessages({
|
|
|
97
116
|
return {
|
|
98
117
|
type: "image_url",
|
|
99
118
|
image_url: {
|
|
100
|
-
url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0,
|
|
119
|
+
url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils2.convertToBase64)(part.data)}`,
|
|
101
120
|
// OpenAI specific extension: image detail
|
|
102
121
|
detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
|
|
103
122
|
}
|
|
@@ -113,7 +132,7 @@ function convertToOpenAIChatMessages({
|
|
|
113
132
|
return {
|
|
114
133
|
type: "input_audio",
|
|
115
134
|
input_audio: {
|
|
116
|
-
data: (0,
|
|
135
|
+
data: (0, import_provider_utils2.convertToBase64)(part.data),
|
|
117
136
|
format: "wav"
|
|
118
137
|
}
|
|
119
138
|
};
|
|
@@ -123,7 +142,7 @@ function convertToOpenAIChatMessages({
|
|
|
123
142
|
return {
|
|
124
143
|
type: "input_audio",
|
|
125
144
|
input_audio: {
|
|
126
|
-
data: (0,
|
|
145
|
+
data: (0, import_provider_utils2.convertToBase64)(part.data),
|
|
127
146
|
format: "mp3"
|
|
128
147
|
}
|
|
129
148
|
};
|
|
@@ -144,7 +163,7 @@ function convertToOpenAIChatMessages({
|
|
|
144
163
|
type: "file",
|
|
145
164
|
file: typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
|
|
146
165
|
filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
|
|
147
|
-
file_data: `data:application/pdf;base64,${(0,
|
|
166
|
+
file_data: `data:application/pdf;base64,${(0, import_provider_utils2.convertToBase64)(part.data)}`
|
|
148
167
|
}
|
|
149
168
|
};
|
|
150
169
|
} else {
|
|
@@ -219,7 +238,7 @@ function convertToOpenAIChatMessages({
|
|
|
219
238
|
return { messages, warnings };
|
|
220
239
|
}
|
|
221
240
|
|
|
222
|
-
// src/get-response-metadata.ts
|
|
241
|
+
// src/chat/get-response-metadata.ts
|
|
223
242
|
function getResponseMetadata({
|
|
224
243
|
id,
|
|
225
244
|
model,
|
|
@@ -232,7 +251,7 @@ function getResponseMetadata({
|
|
|
232
251
|
};
|
|
233
252
|
}
|
|
234
253
|
|
|
235
|
-
// src/map-openai-finish-reason.ts
|
|
254
|
+
// src/chat/map-openai-finish-reason.ts
|
|
236
255
|
function mapOpenAIFinishReason(finishReason) {
|
|
237
256
|
switch (finishReason) {
|
|
238
257
|
case "stop":
|
|
@@ -249,16 +268,16 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
249
268
|
}
|
|
250
269
|
}
|
|
251
270
|
|
|
252
|
-
// src/openai-chat-options.ts
|
|
253
|
-
var
|
|
254
|
-
var openaiProviderOptions =
|
|
271
|
+
// src/chat/openai-chat-options.ts
|
|
272
|
+
var import_v42 = require("zod/v4");
|
|
273
|
+
var openaiProviderOptions = import_v42.z.object({
|
|
255
274
|
/**
|
|
256
275
|
* Modify the likelihood of specified tokens appearing in the completion.
|
|
257
276
|
*
|
|
258
277
|
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
259
278
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
260
279
|
*/
|
|
261
|
-
logitBias:
|
|
280
|
+
logitBias: import_v42.z.record(import_v42.z.coerce.number(), import_v42.z.number()).optional(),
|
|
262
281
|
/**
|
|
263
282
|
* Return the log probabilities of the tokens.
|
|
264
283
|
*
|
|
@@ -268,42 +287,42 @@ var openaiProviderOptions = import_v4.z.object({
|
|
|
268
287
|
* Setting to a number will return the log probabilities of the top n
|
|
269
288
|
* tokens that were generated.
|
|
270
289
|
*/
|
|
271
|
-
logprobs:
|
|
290
|
+
logprobs: import_v42.z.union([import_v42.z.boolean(), import_v42.z.number()]).optional(),
|
|
272
291
|
/**
|
|
273
292
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
274
293
|
*/
|
|
275
|
-
parallelToolCalls:
|
|
294
|
+
parallelToolCalls: import_v42.z.boolean().optional(),
|
|
276
295
|
/**
|
|
277
296
|
* A unique identifier representing your end-user, which can help OpenAI to
|
|
278
297
|
* monitor and detect abuse.
|
|
279
298
|
*/
|
|
280
|
-
user:
|
|
299
|
+
user: import_v42.z.string().optional(),
|
|
281
300
|
/**
|
|
282
301
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
283
302
|
*/
|
|
284
|
-
reasoningEffort:
|
|
303
|
+
reasoningEffort: import_v42.z.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
285
304
|
/**
|
|
286
305
|
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
287
306
|
*/
|
|
288
|
-
maxCompletionTokens:
|
|
307
|
+
maxCompletionTokens: import_v42.z.number().optional(),
|
|
289
308
|
/**
|
|
290
309
|
* Whether to enable persistence in responses API.
|
|
291
310
|
*/
|
|
292
|
-
store:
|
|
311
|
+
store: import_v42.z.boolean().optional(),
|
|
293
312
|
/**
|
|
294
313
|
* Metadata to associate with the request.
|
|
295
314
|
*/
|
|
296
|
-
metadata:
|
|
315
|
+
metadata: import_v42.z.record(import_v42.z.string().max(64), import_v42.z.string().max(512)).optional(),
|
|
297
316
|
/**
|
|
298
317
|
* Parameters for prediction mode.
|
|
299
318
|
*/
|
|
300
|
-
prediction:
|
|
319
|
+
prediction: import_v42.z.record(import_v42.z.string(), import_v42.z.any()).optional(),
|
|
301
320
|
/**
|
|
302
321
|
* Whether to use structured outputs.
|
|
303
322
|
*
|
|
304
323
|
* @default true
|
|
305
324
|
*/
|
|
306
|
-
structuredOutputs:
|
|
325
|
+
structuredOutputs: import_v42.z.boolean().optional(),
|
|
307
326
|
/**
|
|
308
327
|
* Service tier for the request.
|
|
309
328
|
* - 'auto': Default service tier
|
|
@@ -312,40 +331,21 @@ var openaiProviderOptions = import_v4.z.object({
|
|
|
312
331
|
*
|
|
313
332
|
* @default 'auto'
|
|
314
333
|
*/
|
|
315
|
-
serviceTier:
|
|
334
|
+
serviceTier: import_v42.z.enum(["auto", "flex", "priority"]).optional(),
|
|
316
335
|
/**
|
|
317
336
|
* Whether to use strict JSON schema validation.
|
|
318
337
|
*
|
|
319
338
|
* @default false
|
|
320
339
|
*/
|
|
321
|
-
strictJsonSchema:
|
|
340
|
+
strictJsonSchema: import_v42.z.boolean().optional(),
|
|
322
341
|
/**
|
|
323
342
|
* Controls the verbosity of the model's responses.
|
|
324
343
|
* Lower values will result in more concise responses, while higher values will result in more verbose responses.
|
|
325
344
|
*/
|
|
326
|
-
textVerbosity:
|
|
327
|
-
});
|
|
328
|
-
|
|
329
|
-
// src/openai-error.ts
|
|
330
|
-
var import_v42 = require("zod/v4");
|
|
331
|
-
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
|
332
|
-
var openaiErrorDataSchema = import_v42.z.object({
|
|
333
|
-
error: import_v42.z.object({
|
|
334
|
-
message: import_v42.z.string(),
|
|
335
|
-
// The additional information below is handled loosely to support
|
|
336
|
-
// OpenAI-compatible providers that have slightly different error
|
|
337
|
-
// responses:
|
|
338
|
-
type: import_v42.z.string().nullish(),
|
|
339
|
-
param: import_v42.z.any().nullish(),
|
|
340
|
-
code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
|
|
341
|
-
})
|
|
342
|
-
});
|
|
343
|
-
var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
|
|
344
|
-
errorSchema: openaiErrorDataSchema,
|
|
345
|
-
errorToMessage: (data) => data.error.message
|
|
345
|
+
textVerbosity: import_v42.z.enum(["low", "medium", "high"]).optional()
|
|
346
346
|
});
|
|
347
347
|
|
|
348
|
-
// src/openai-prepare-tools.ts
|
|
348
|
+
// src/chat/openai-chat-prepare-tools.ts
|
|
349
349
|
var import_provider2 = require("@ai-sdk/provider");
|
|
350
350
|
|
|
351
351
|
// src/tool/file-search.ts
|
|
@@ -434,8 +434,8 @@ var webSearchPreview = (0, import_provider_utils4.createProviderDefinedToolFacto
|
|
|
434
434
|
inputSchema: import_v44.z.object({})
|
|
435
435
|
});
|
|
436
436
|
|
|
437
|
-
// src/openai-prepare-tools.ts
|
|
438
|
-
function
|
|
437
|
+
// src/chat/openai-chat-prepare-tools.ts
|
|
438
|
+
function prepareChatTools({
|
|
439
439
|
tools,
|
|
440
440
|
toolChoice,
|
|
441
441
|
structuredOutputs,
|
|
@@ -521,7 +521,7 @@ function prepareTools({
|
|
|
521
521
|
}
|
|
522
522
|
}
|
|
523
523
|
|
|
524
|
-
// src/openai-chat-language-model.ts
|
|
524
|
+
// src/chat/openai-chat-language-model.ts
|
|
525
525
|
var OpenAIChatLanguageModel = class {
|
|
526
526
|
constructor(modelId, config) {
|
|
527
527
|
this.specificationVersion = "v2";
|
|
@@ -606,7 +606,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
606
606
|
seed,
|
|
607
607
|
verbosity: openaiOptions.textVerbosity,
|
|
608
608
|
// openai specific settings:
|
|
609
|
-
// TODO
|
|
609
|
+
// TODO AI SDK 6: remove, we auto-map maxOutputTokens now
|
|
610
610
|
max_completion_tokens: openaiOptions.maxCompletionTokens,
|
|
611
611
|
store: openaiOptions.store,
|
|
612
612
|
metadata: openaiOptions.metadata,
|
|
@@ -706,7 +706,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
706
706
|
tools: openaiTools,
|
|
707
707
|
toolChoice: openaiToolChoice,
|
|
708
708
|
toolWarnings
|
|
709
|
-
} =
|
|
709
|
+
} = prepareChatTools({
|
|
710
710
|
tools,
|
|
711
711
|
toolChoice,
|
|
712
712
|
structuredOutputs,
|
|
@@ -1176,11 +1176,11 @@ var reasoningModels = {
|
|
|
1176
1176
|
}
|
|
1177
1177
|
};
|
|
1178
1178
|
|
|
1179
|
-
// src/openai-completion-language-model.ts
|
|
1179
|
+
// src/completion/openai-completion-language-model.ts
|
|
1180
1180
|
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
1181
1181
|
var import_v47 = require("zod/v4");
|
|
1182
1182
|
|
|
1183
|
-
// src/convert-to-openai-completion-prompt.ts
|
|
1183
|
+
// src/completion/convert-to-openai-completion-prompt.ts
|
|
1184
1184
|
var import_provider4 = require("@ai-sdk/provider");
|
|
1185
1185
|
function convertToOpenAICompletionPrompt({
|
|
1186
1186
|
prompt,
|
|
@@ -1255,7 +1255,37 @@ ${user}:`]
|
|
|
1255
1255
|
};
|
|
1256
1256
|
}
|
|
1257
1257
|
|
|
1258
|
-
// src/
|
|
1258
|
+
// src/completion/get-response-metadata.ts
|
|
1259
|
+
function getResponseMetadata2({
|
|
1260
|
+
id,
|
|
1261
|
+
model,
|
|
1262
|
+
created
|
|
1263
|
+
}) {
|
|
1264
|
+
return {
|
|
1265
|
+
id: id != null ? id : void 0,
|
|
1266
|
+
modelId: model != null ? model : void 0,
|
|
1267
|
+
timestamp: created != null ? new Date(created * 1e3) : void 0
|
|
1268
|
+
};
|
|
1269
|
+
}
|
|
1270
|
+
|
|
1271
|
+
// src/completion/map-openai-finish-reason.ts
|
|
1272
|
+
function mapOpenAIFinishReason2(finishReason) {
|
|
1273
|
+
switch (finishReason) {
|
|
1274
|
+
case "stop":
|
|
1275
|
+
return "stop";
|
|
1276
|
+
case "length":
|
|
1277
|
+
return "length";
|
|
1278
|
+
case "content_filter":
|
|
1279
|
+
return "content-filter";
|
|
1280
|
+
case "function_call":
|
|
1281
|
+
case "tool_calls":
|
|
1282
|
+
return "tool-calls";
|
|
1283
|
+
default:
|
|
1284
|
+
return "unknown";
|
|
1285
|
+
}
|
|
1286
|
+
}
|
|
1287
|
+
|
|
1288
|
+
// src/completion/openai-completion-options.ts
|
|
1259
1289
|
var import_v46 = require("zod/v4");
|
|
1260
1290
|
var openaiCompletionProviderOptions = import_v46.z.object({
|
|
1261
1291
|
/**
|
|
@@ -1298,7 +1328,7 @@ var openaiCompletionProviderOptions = import_v46.z.object({
|
|
|
1298
1328
|
logprobs: import_v46.z.union([import_v46.z.boolean(), import_v46.z.number()]).optional()
|
|
1299
1329
|
});
|
|
1300
1330
|
|
|
1301
|
-
// src/openai-completion-language-model.ts
|
|
1331
|
+
// src/completion/openai-completion-language-model.ts
|
|
1302
1332
|
var OpenAICompletionLanguageModel = class {
|
|
1303
1333
|
constructor(modelId, config) {
|
|
1304
1334
|
this.specificationVersion = "v2";
|
|
@@ -1418,10 +1448,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1418
1448
|
outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
|
|
1419
1449
|
totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
|
|
1420
1450
|
},
|
|
1421
|
-
finishReason:
|
|
1451
|
+
finishReason: mapOpenAIFinishReason2(choice.finish_reason),
|
|
1422
1452
|
request: { body: args },
|
|
1423
1453
|
response: {
|
|
1424
|
-
...
|
|
1454
|
+
...getResponseMetadata2(response),
|
|
1425
1455
|
headers: responseHeaders,
|
|
1426
1456
|
body: rawResponse
|
|
1427
1457
|
},
|
|
@@ -1485,7 +1515,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1485
1515
|
isFirstChunk = false;
|
|
1486
1516
|
controller.enqueue({
|
|
1487
1517
|
type: "response-metadata",
|
|
1488
|
-
...
|
|
1518
|
+
...getResponseMetadata2(value)
|
|
1489
1519
|
});
|
|
1490
1520
|
controller.enqueue({ type: "text-start", id: "0" });
|
|
1491
1521
|
}
|
|
@@ -1496,7 +1526,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1496
1526
|
}
|
|
1497
1527
|
const choice = value.choices[0];
|
|
1498
1528
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1499
|
-
finishReason =
|
|
1529
|
+
finishReason = mapOpenAIFinishReason2(choice.finish_reason);
|
|
1500
1530
|
}
|
|
1501
1531
|
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1502
1532
|
providerMetadata.openai.logprobs = choice.logprobs;
|
|
@@ -1571,12 +1601,12 @@ var openaiCompletionChunkSchema = import_v47.z.union([
|
|
|
1571
1601
|
openaiErrorDataSchema
|
|
1572
1602
|
]);
|
|
1573
1603
|
|
|
1574
|
-
// src/openai-embedding-model.ts
|
|
1604
|
+
// src/embedding/openai-embedding-model.ts
|
|
1575
1605
|
var import_provider5 = require("@ai-sdk/provider");
|
|
1576
1606
|
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
|
1577
1607
|
var import_v49 = require("zod/v4");
|
|
1578
1608
|
|
|
1579
|
-
// src/openai-embedding-options.ts
|
|
1609
|
+
// src/embedding/openai-embedding-options.ts
|
|
1580
1610
|
var import_v48 = require("zod/v4");
|
|
1581
1611
|
var openaiEmbeddingProviderOptions = import_v48.z.object({
|
|
1582
1612
|
/**
|
|
@@ -1591,7 +1621,7 @@ var openaiEmbeddingProviderOptions = import_v48.z.object({
|
|
|
1591
1621
|
user: import_v48.z.string().optional()
|
|
1592
1622
|
});
|
|
1593
1623
|
|
|
1594
|
-
// src/openai-embedding-model.ts
|
|
1624
|
+
// src/embedding/openai-embedding-model.ts
|
|
1595
1625
|
var OpenAIEmbeddingModel = class {
|
|
1596
1626
|
constructor(modelId, config) {
|
|
1597
1627
|
this.specificationVersion = "v2";
|
|
@@ -1659,11 +1689,11 @@ var openaiTextEmbeddingResponseSchema = import_v49.z.object({
|
|
|
1659
1689
|
usage: import_v49.z.object({ prompt_tokens: import_v49.z.number() }).nullish()
|
|
1660
1690
|
});
|
|
1661
1691
|
|
|
1662
|
-
// src/openai-image-model.ts
|
|
1692
|
+
// src/image/openai-image-model.ts
|
|
1663
1693
|
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1664
1694
|
var import_v410 = require("zod/v4");
|
|
1665
1695
|
|
|
1666
|
-
// src/openai-image-
|
|
1696
|
+
// src/image/openai-image-options.ts
|
|
1667
1697
|
var modelMaxImagesPerCall = {
|
|
1668
1698
|
"dall-e-3": 1,
|
|
1669
1699
|
"dall-e-2": 10,
|
|
@@ -1671,7 +1701,7 @@ var modelMaxImagesPerCall = {
|
|
|
1671
1701
|
};
|
|
1672
1702
|
var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
|
|
1673
1703
|
|
|
1674
|
-
// src/openai-image-model.ts
|
|
1704
|
+
// src/image/openai-image-model.ts
|
|
1675
1705
|
var OpenAIImageModel = class {
|
|
1676
1706
|
constructor(modelId, config) {
|
|
1677
1707
|
this.modelId = modelId;
|
|
@@ -1755,11 +1785,11 @@ var openaiImageResponseSchema = import_v410.z.object({
|
|
|
1755
1785
|
)
|
|
1756
1786
|
});
|
|
1757
1787
|
|
|
1758
|
-
// src/openai-transcription-model.ts
|
|
1788
|
+
// src/transcription/openai-transcription-model.ts
|
|
1759
1789
|
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
|
1760
1790
|
var import_v412 = require("zod/v4");
|
|
1761
1791
|
|
|
1762
|
-
// src/openai-transcription-options.ts
|
|
1792
|
+
// src/transcription/openai-transcription-options.ts
|
|
1763
1793
|
var import_v411 = require("zod/v4");
|
|
1764
1794
|
var openAITranscriptionProviderOptions = import_v411.z.object({
|
|
1765
1795
|
/**
|
|
@@ -1786,7 +1816,7 @@ var openAITranscriptionProviderOptions = import_v411.z.object({
|
|
|
1786
1816
|
timestampGranularities: import_v411.z.array(import_v411.z.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1787
1817
|
});
|
|
1788
1818
|
|
|
1789
|
-
// src/openai-transcription-model.ts
|
|
1819
|
+
// src/transcription/openai-transcription-model.ts
|
|
1790
1820
|
var languageMap = {
|
|
1791
1821
|
afrikaans: "af",
|
|
1792
1822
|
arabic: "ar",
|
|
@@ -1944,7 +1974,7 @@ var openaiTranscriptionResponseSchema = import_v412.z.object({
|
|
|
1944
1974
|
).nullish()
|
|
1945
1975
|
});
|
|
1946
1976
|
|
|
1947
|
-
// src/openai-speech-model.ts
|
|
1977
|
+
// src/speech/openai-speech-model.ts
|
|
1948
1978
|
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
|
1949
1979
|
var import_v413 = require("zod/v4");
|
|
1950
1980
|
var OpenAIProviderOptionsSchema = import_v413.z.object({
|
|
@@ -2053,8 +2083,8 @@ var OpenAISpeechModel = class {
|
|
|
2053
2083
|
|
|
2054
2084
|
// src/responses/openai-responses-language-model.ts
|
|
2055
2085
|
var import_provider8 = require("@ai-sdk/provider");
|
|
2056
|
-
var
|
|
2057
|
-
var
|
|
2086
|
+
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
|
2087
|
+
var import_v416 = require("zod/v4");
|
|
2058
2088
|
|
|
2059
2089
|
// src/responses/convert-to-openai-responses-messages.ts
|
|
2060
2090
|
var import_provider6 = require("@ai-sdk/provider");
|
|
@@ -2268,6 +2298,25 @@ function mapOpenAIResponseFinishReason({
|
|
|
2268
2298
|
|
|
2269
2299
|
// src/responses/openai-responses-prepare-tools.ts
|
|
2270
2300
|
var import_provider7 = require("@ai-sdk/provider");
|
|
2301
|
+
|
|
2302
|
+
// src/tool/code-interpreter.ts
|
|
2303
|
+
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
|
2304
|
+
var import_v415 = require("zod/v4");
|
|
2305
|
+
var codeInterpreterArgsSchema = import_v415.z.object({
|
|
2306
|
+
container: import_v415.z.union([
|
|
2307
|
+
import_v415.z.string(),
|
|
2308
|
+
import_v415.z.object({
|
|
2309
|
+
fileIds: import_v415.z.array(import_v415.z.string()).optional()
|
|
2310
|
+
})
|
|
2311
|
+
]).optional()
|
|
2312
|
+
});
|
|
2313
|
+
var codeInterpreter = (0, import_provider_utils13.createProviderDefinedToolFactory)({
|
|
2314
|
+
id: "openai.code_interpreter",
|
|
2315
|
+
name: "code_interpreter",
|
|
2316
|
+
inputSchema: import_v415.z.object({})
|
|
2317
|
+
});
|
|
2318
|
+
|
|
2319
|
+
// src/responses/openai-responses-prepare-tools.ts
|
|
2271
2320
|
function prepareResponsesTools({
|
|
2272
2321
|
tools,
|
|
2273
2322
|
toolChoice,
|
|
@@ -2290,7 +2339,7 @@ function prepareResponsesTools({
|
|
|
2290
2339
|
strict: strictJsonSchema
|
|
2291
2340
|
});
|
|
2292
2341
|
break;
|
|
2293
|
-
case "provider-defined":
|
|
2342
|
+
case "provider-defined": {
|
|
2294
2343
|
switch (tool.id) {
|
|
2295
2344
|
case "openai.file_search": {
|
|
2296
2345
|
const args = fileSearchArgsSchema.parse(tool.args);
|
|
@@ -2303,18 +2352,30 @@ function prepareResponsesTools({
|
|
|
2303
2352
|
});
|
|
2304
2353
|
break;
|
|
2305
2354
|
}
|
|
2306
|
-
case "openai.web_search_preview":
|
|
2355
|
+
case "openai.web_search_preview": {
|
|
2356
|
+
const args = webSearchPreviewArgsSchema.parse(tool.args);
|
|
2307
2357
|
openaiTools.push({
|
|
2308
2358
|
type: "web_search_preview",
|
|
2309
|
-
search_context_size:
|
|
2310
|
-
user_location:
|
|
2359
|
+
search_context_size: args.searchContextSize,
|
|
2360
|
+
user_location: args.userLocation
|
|
2311
2361
|
});
|
|
2312
2362
|
break;
|
|
2313
|
-
|
|
2363
|
+
}
|
|
2364
|
+
case "openai.code_interpreter": {
|
|
2365
|
+
const args = codeInterpreterArgsSchema.parse(tool.args);
|
|
2366
|
+
openaiTools.push({
|
|
2367
|
+
type: "code_interpreter",
|
|
2368
|
+
container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
|
|
2369
|
+
});
|
|
2370
|
+
break;
|
|
2371
|
+
}
|
|
2372
|
+
default: {
|
|
2314
2373
|
toolWarnings.push({ type: "unsupported-tool", tool });
|
|
2315
2374
|
break;
|
|
2375
|
+
}
|
|
2316
2376
|
}
|
|
2317
2377
|
break;
|
|
2378
|
+
}
|
|
2318
2379
|
default:
|
|
2319
2380
|
toolWarnings.push({ type: "unsupported-tool", tool });
|
|
2320
2381
|
break;
|
|
@@ -2332,7 +2393,7 @@ function prepareResponsesTools({
|
|
|
2332
2393
|
case "tool":
|
|
2333
2394
|
return {
|
|
2334
2395
|
tools: openaiTools,
|
|
2335
|
-
toolChoice: toolChoice.toolName === "
|
|
2396
|
+
toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "web_search_preview" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
|
|
2336
2397
|
toolWarnings
|
|
2337
2398
|
};
|
|
2338
2399
|
default: {
|
|
@@ -2401,7 +2462,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2401
2462
|
systemMessageMode: modelConfig.systemMessageMode
|
|
2402
2463
|
});
|
|
2403
2464
|
warnings.push(...messageWarnings);
|
|
2404
|
-
const openaiOptions = await (0,
|
|
2465
|
+
const openaiOptions = await (0, import_provider_utils14.parseProviderOptions)({
|
|
2405
2466
|
provider: "openai",
|
|
2406
2467
|
providerOptions,
|
|
2407
2468
|
schema: openaiResponsesProviderOptionsSchema
|
|
@@ -2531,78 +2592,78 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2531
2592
|
responseHeaders,
|
|
2532
2593
|
value: response,
|
|
2533
2594
|
rawValue: rawResponse
|
|
2534
|
-
} = await (0,
|
|
2595
|
+
} = await (0, import_provider_utils14.postJsonToApi)({
|
|
2535
2596
|
url,
|
|
2536
|
-
headers: (0,
|
|
2597
|
+
headers: (0, import_provider_utils14.combineHeaders)(this.config.headers(), options.headers),
|
|
2537
2598
|
body,
|
|
2538
2599
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2539
|
-
successfulResponseHandler: (0,
|
|
2540
|
-
|
|
2541
|
-
id:
|
|
2542
|
-
created_at:
|
|
2543
|
-
error:
|
|
2544
|
-
code:
|
|
2545
|
-
message:
|
|
2600
|
+
successfulResponseHandler: (0, import_provider_utils14.createJsonResponseHandler)(
|
|
2601
|
+
import_v416.z.object({
|
|
2602
|
+
id: import_v416.z.string(),
|
|
2603
|
+
created_at: import_v416.z.number(),
|
|
2604
|
+
error: import_v416.z.object({
|
|
2605
|
+
code: import_v416.z.string(),
|
|
2606
|
+
message: import_v416.z.string()
|
|
2546
2607
|
}).nullish(),
|
|
2547
|
-
model:
|
|
2548
|
-
output:
|
|
2549
|
-
|
|
2550
|
-
|
|
2551
|
-
type:
|
|
2552
|
-
role:
|
|
2553
|
-
id:
|
|
2554
|
-
content:
|
|
2555
|
-
|
|
2556
|
-
type:
|
|
2557
|
-
text:
|
|
2558
|
-
annotations:
|
|
2559
|
-
|
|
2560
|
-
type:
|
|
2561
|
-
start_index:
|
|
2562
|
-
end_index:
|
|
2563
|
-
url:
|
|
2564
|
-
title:
|
|
2608
|
+
model: import_v416.z.string(),
|
|
2609
|
+
output: import_v416.z.array(
|
|
2610
|
+
import_v416.z.discriminatedUnion("type", [
|
|
2611
|
+
import_v416.z.object({
|
|
2612
|
+
type: import_v416.z.literal("message"),
|
|
2613
|
+
role: import_v416.z.literal("assistant"),
|
|
2614
|
+
id: import_v416.z.string(),
|
|
2615
|
+
content: import_v416.z.array(
|
|
2616
|
+
import_v416.z.object({
|
|
2617
|
+
type: import_v416.z.literal("output_text"),
|
|
2618
|
+
text: import_v416.z.string(),
|
|
2619
|
+
annotations: import_v416.z.array(
|
|
2620
|
+
import_v416.z.object({
|
|
2621
|
+
type: import_v416.z.literal("url_citation"),
|
|
2622
|
+
start_index: import_v416.z.number(),
|
|
2623
|
+
end_index: import_v416.z.number(),
|
|
2624
|
+
url: import_v416.z.string(),
|
|
2625
|
+
title: import_v416.z.string()
|
|
2565
2626
|
})
|
|
2566
2627
|
)
|
|
2567
2628
|
})
|
|
2568
2629
|
)
|
|
2569
2630
|
}),
|
|
2570
|
-
|
|
2571
|
-
type:
|
|
2572
|
-
call_id:
|
|
2573
|
-
name:
|
|
2574
|
-
arguments:
|
|
2575
|
-
id:
|
|
2631
|
+
import_v416.z.object({
|
|
2632
|
+
type: import_v416.z.literal("function_call"),
|
|
2633
|
+
call_id: import_v416.z.string(),
|
|
2634
|
+
name: import_v416.z.string(),
|
|
2635
|
+
arguments: import_v416.z.string(),
|
|
2636
|
+
id: import_v416.z.string()
|
|
2576
2637
|
}),
|
|
2577
|
-
|
|
2578
|
-
type:
|
|
2579
|
-
id:
|
|
2580
|
-
status:
|
|
2638
|
+
import_v416.z.object({
|
|
2639
|
+
type: import_v416.z.literal("web_search_call"),
|
|
2640
|
+
id: import_v416.z.string(),
|
|
2641
|
+
status: import_v416.z.string().optional()
|
|
2581
2642
|
}),
|
|
2582
|
-
|
|
2583
|
-
type:
|
|
2584
|
-
id:
|
|
2585
|
-
status:
|
|
2643
|
+
import_v416.z.object({
|
|
2644
|
+
type: import_v416.z.literal("computer_call"),
|
|
2645
|
+
id: import_v416.z.string(),
|
|
2646
|
+
status: import_v416.z.string().optional()
|
|
2586
2647
|
}),
|
|
2587
|
-
|
|
2588
|
-
type:
|
|
2589
|
-
id:
|
|
2590
|
-
status:
|
|
2648
|
+
import_v416.z.object({
|
|
2649
|
+
type: import_v416.z.literal("file_search_call"),
|
|
2650
|
+
id: import_v416.z.string(),
|
|
2651
|
+
status: import_v416.z.string().optional()
|
|
2591
2652
|
}),
|
|
2592
|
-
|
|
2593
|
-
type:
|
|
2594
|
-
id:
|
|
2595
|
-
encrypted_content:
|
|
2596
|
-
summary:
|
|
2597
|
-
|
|
2598
|
-
type:
|
|
2599
|
-
text:
|
|
2653
|
+
import_v416.z.object({
|
|
2654
|
+
type: import_v416.z.literal("reasoning"),
|
|
2655
|
+
id: import_v416.z.string(),
|
|
2656
|
+
encrypted_content: import_v416.z.string().nullish(),
|
|
2657
|
+
summary: import_v416.z.array(
|
|
2658
|
+
import_v416.z.object({
|
|
2659
|
+
type: import_v416.z.literal("summary_text"),
|
|
2660
|
+
text: import_v416.z.string()
|
|
2600
2661
|
})
|
|
2601
2662
|
)
|
|
2602
2663
|
})
|
|
2603
2664
|
])
|
|
2604
2665
|
),
|
|
2605
|
-
incomplete_details:
|
|
2666
|
+
incomplete_details: import_v416.z.object({ reason: import_v416.z.string() }).nullable(),
|
|
2606
2667
|
usage: usageSchema2
|
|
2607
2668
|
})
|
|
2608
2669
|
),
|
|
@@ -2656,7 +2717,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2656
2717
|
content.push({
|
|
2657
2718
|
type: "source",
|
|
2658
2719
|
sourceType: "url",
|
|
2659
|
-
id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : (0,
|
|
2720
|
+
id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : (0, import_provider_utils14.generateId)(),
|
|
2660
2721
|
url: annotation.url,
|
|
2661
2722
|
title: annotation.title
|
|
2662
2723
|
});
|
|
@@ -2768,18 +2829,18 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2768
2829
|
}
|
|
2769
2830
|
async doStream(options) {
|
|
2770
2831
|
const { args: body, warnings } = await this.getArgs(options);
|
|
2771
|
-
const { responseHeaders, value: response } = await (0,
|
|
2832
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils14.postJsonToApi)({
|
|
2772
2833
|
url: this.config.url({
|
|
2773
2834
|
path: "/responses",
|
|
2774
2835
|
modelId: this.modelId
|
|
2775
2836
|
}),
|
|
2776
|
-
headers: (0,
|
|
2837
|
+
headers: (0, import_provider_utils14.combineHeaders)(this.config.headers(), options.headers),
|
|
2777
2838
|
body: {
|
|
2778
2839
|
...body,
|
|
2779
2840
|
stream: true
|
|
2780
2841
|
},
|
|
2781
2842
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2782
|
-
successfulResponseHandler: (0,
|
|
2843
|
+
successfulResponseHandler: (0, import_provider_utils14.createEventSourceResponseHandler)(
|
|
2783
2844
|
openaiResponsesChunkSchema
|
|
2784
2845
|
),
|
|
2785
2846
|
abortSignal: options.abortSignal,
|
|
@@ -3022,7 +3083,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3022
3083
|
controller.enqueue({
|
|
3023
3084
|
type: "source",
|
|
3024
3085
|
sourceType: "url",
|
|
3025
|
-
id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0,
|
|
3086
|
+
id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils14.generateId)(),
|
|
3026
3087
|
url: value.annotation.url,
|
|
3027
3088
|
title: value.annotation.title
|
|
3028
3089
|
});
|
|
@@ -3049,140 +3110,140 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3049
3110
|
};
|
|
3050
3111
|
}
|
|
3051
3112
|
};
|
|
3052
|
-
var usageSchema2 =
|
|
3053
|
-
input_tokens:
|
|
3054
|
-
input_tokens_details:
|
|
3055
|
-
output_tokens:
|
|
3056
|
-
output_tokens_details:
|
|
3113
|
+
var usageSchema2 = import_v416.z.object({
|
|
3114
|
+
input_tokens: import_v416.z.number(),
|
|
3115
|
+
input_tokens_details: import_v416.z.object({ cached_tokens: import_v416.z.number().nullish() }).nullish(),
|
|
3116
|
+
output_tokens: import_v416.z.number(),
|
|
3117
|
+
output_tokens_details: import_v416.z.object({ reasoning_tokens: import_v416.z.number().nullish() }).nullish()
|
|
3057
3118
|
});
|
|
3058
|
-
var textDeltaChunkSchema =
|
|
3059
|
-
type:
|
|
3060
|
-
item_id:
|
|
3061
|
-
delta:
|
|
3119
|
+
var textDeltaChunkSchema = import_v416.z.object({
|
|
3120
|
+
type: import_v416.z.literal("response.output_text.delta"),
|
|
3121
|
+
item_id: import_v416.z.string(),
|
|
3122
|
+
delta: import_v416.z.string()
|
|
3062
3123
|
});
|
|
3063
|
-
var errorChunkSchema =
|
|
3064
|
-
type:
|
|
3065
|
-
code:
|
|
3066
|
-
message:
|
|
3067
|
-
param:
|
|
3068
|
-
sequence_number:
|
|
3124
|
+
var errorChunkSchema = import_v416.z.object({
|
|
3125
|
+
type: import_v416.z.literal("error"),
|
|
3126
|
+
code: import_v416.z.string(),
|
|
3127
|
+
message: import_v416.z.string(),
|
|
3128
|
+
param: import_v416.z.string().nullish(),
|
|
3129
|
+
sequence_number: import_v416.z.number()
|
|
3069
3130
|
});
|
|
3070
|
-
var responseFinishedChunkSchema =
|
|
3071
|
-
type:
|
|
3072
|
-
response:
|
|
3073
|
-
incomplete_details:
|
|
3131
|
+
var responseFinishedChunkSchema = import_v416.z.object({
|
|
3132
|
+
type: import_v416.z.enum(["response.completed", "response.incomplete"]),
|
|
3133
|
+
response: import_v416.z.object({
|
|
3134
|
+
incomplete_details: import_v416.z.object({ reason: import_v416.z.string() }).nullish(),
|
|
3074
3135
|
usage: usageSchema2
|
|
3075
3136
|
})
|
|
3076
3137
|
});
|
|
3077
|
-
var responseCreatedChunkSchema =
|
|
3078
|
-
type:
|
|
3079
|
-
response:
|
|
3080
|
-
id:
|
|
3081
|
-
created_at:
|
|
3082
|
-
model:
|
|
3138
|
+
var responseCreatedChunkSchema = import_v416.z.object({
|
|
3139
|
+
type: import_v416.z.literal("response.created"),
|
|
3140
|
+
response: import_v416.z.object({
|
|
3141
|
+
id: import_v416.z.string(),
|
|
3142
|
+
created_at: import_v416.z.number(),
|
|
3143
|
+
model: import_v416.z.string()
|
|
3083
3144
|
})
|
|
3084
3145
|
});
|
|
3085
|
-
var responseOutputItemAddedSchema =
|
|
3086
|
-
type:
|
|
3087
|
-
output_index:
|
|
3088
|
-
item:
|
|
3089
|
-
|
|
3090
|
-
type:
|
|
3091
|
-
id:
|
|
3146
|
+
var responseOutputItemAddedSchema = import_v416.z.object({
|
|
3147
|
+
type: import_v416.z.literal("response.output_item.added"),
|
|
3148
|
+
output_index: import_v416.z.number(),
|
|
3149
|
+
item: import_v416.z.discriminatedUnion("type", [
|
|
3150
|
+
import_v416.z.object({
|
|
3151
|
+
type: import_v416.z.literal("message"),
|
|
3152
|
+
id: import_v416.z.string()
|
|
3092
3153
|
}),
|
|
3093
|
-
|
|
3094
|
-
type:
|
|
3095
|
-
id:
|
|
3096
|
-
encrypted_content:
|
|
3154
|
+
import_v416.z.object({
|
|
3155
|
+
type: import_v416.z.literal("reasoning"),
|
|
3156
|
+
id: import_v416.z.string(),
|
|
3157
|
+
encrypted_content: import_v416.z.string().nullish()
|
|
3097
3158
|
}),
|
|
3098
|
-
|
|
3099
|
-
type:
|
|
3100
|
-
id:
|
|
3101
|
-
call_id:
|
|
3102
|
-
name:
|
|
3103
|
-
arguments:
|
|
3159
|
+
import_v416.z.object({
|
|
3160
|
+
type: import_v416.z.literal("function_call"),
|
|
3161
|
+
id: import_v416.z.string(),
|
|
3162
|
+
call_id: import_v416.z.string(),
|
|
3163
|
+
name: import_v416.z.string(),
|
|
3164
|
+
arguments: import_v416.z.string()
|
|
3104
3165
|
}),
|
|
3105
|
-
|
|
3106
|
-
type:
|
|
3107
|
-
id:
|
|
3108
|
-
status:
|
|
3166
|
+
import_v416.z.object({
|
|
3167
|
+
type: import_v416.z.literal("web_search_call"),
|
|
3168
|
+
id: import_v416.z.string(),
|
|
3169
|
+
status: import_v416.z.string()
|
|
3109
3170
|
}),
|
|
3110
|
-
|
|
3111
|
-
type:
|
|
3112
|
-
id:
|
|
3113
|
-
status:
|
|
3171
|
+
import_v416.z.object({
|
|
3172
|
+
type: import_v416.z.literal("computer_call"),
|
|
3173
|
+
id: import_v416.z.string(),
|
|
3174
|
+
status: import_v416.z.string()
|
|
3114
3175
|
}),
|
|
3115
|
-
|
|
3116
|
-
type:
|
|
3117
|
-
id:
|
|
3118
|
-
status:
|
|
3176
|
+
import_v416.z.object({
|
|
3177
|
+
type: import_v416.z.literal("file_search_call"),
|
|
3178
|
+
id: import_v416.z.string(),
|
|
3179
|
+
status: import_v416.z.string()
|
|
3119
3180
|
})
|
|
3120
3181
|
])
|
|
3121
3182
|
});
|
|
3122
|
-
var responseOutputItemDoneSchema =
|
|
3123
|
-
type:
|
|
3124
|
-
output_index:
|
|
3125
|
-
item:
|
|
3126
|
-
|
|
3127
|
-
type:
|
|
3128
|
-
id:
|
|
3183
|
+
var responseOutputItemDoneSchema = import_v416.z.object({
|
|
3184
|
+
type: import_v416.z.literal("response.output_item.done"),
|
|
3185
|
+
output_index: import_v416.z.number(),
|
|
3186
|
+
item: import_v416.z.discriminatedUnion("type", [
|
|
3187
|
+
import_v416.z.object({
|
|
3188
|
+
type: import_v416.z.literal("message"),
|
|
3189
|
+
id: import_v416.z.string()
|
|
3129
3190
|
}),
|
|
3130
|
-
|
|
3131
|
-
type:
|
|
3132
|
-
id:
|
|
3133
|
-
encrypted_content:
|
|
3191
|
+
import_v416.z.object({
|
|
3192
|
+
type: import_v416.z.literal("reasoning"),
|
|
3193
|
+
id: import_v416.z.string(),
|
|
3194
|
+
encrypted_content: import_v416.z.string().nullish()
|
|
3134
3195
|
}),
|
|
3135
|
-
|
|
3136
|
-
type:
|
|
3137
|
-
id:
|
|
3138
|
-
call_id:
|
|
3139
|
-
name:
|
|
3140
|
-
arguments:
|
|
3141
|
-
status:
|
|
3196
|
+
import_v416.z.object({
|
|
3197
|
+
type: import_v416.z.literal("function_call"),
|
|
3198
|
+
id: import_v416.z.string(),
|
|
3199
|
+
call_id: import_v416.z.string(),
|
|
3200
|
+
name: import_v416.z.string(),
|
|
3201
|
+
arguments: import_v416.z.string(),
|
|
3202
|
+
status: import_v416.z.literal("completed")
|
|
3142
3203
|
}),
|
|
3143
|
-
|
|
3144
|
-
type:
|
|
3145
|
-
id:
|
|
3146
|
-
status:
|
|
3204
|
+
import_v416.z.object({
|
|
3205
|
+
type: import_v416.z.literal("web_search_call"),
|
|
3206
|
+
id: import_v416.z.string(),
|
|
3207
|
+
status: import_v416.z.literal("completed")
|
|
3147
3208
|
}),
|
|
3148
|
-
|
|
3149
|
-
type:
|
|
3150
|
-
id:
|
|
3151
|
-
status:
|
|
3209
|
+
import_v416.z.object({
|
|
3210
|
+
type: import_v416.z.literal("computer_call"),
|
|
3211
|
+
id: import_v416.z.string(),
|
|
3212
|
+
status: import_v416.z.literal("completed")
|
|
3152
3213
|
}),
|
|
3153
|
-
|
|
3154
|
-
type:
|
|
3155
|
-
id:
|
|
3156
|
-
status:
|
|
3214
|
+
import_v416.z.object({
|
|
3215
|
+
type: import_v416.z.literal("file_search_call"),
|
|
3216
|
+
id: import_v416.z.string(),
|
|
3217
|
+
status: import_v416.z.literal("completed")
|
|
3157
3218
|
})
|
|
3158
3219
|
])
|
|
3159
3220
|
});
|
|
3160
|
-
var responseFunctionCallArgumentsDeltaSchema =
|
|
3161
|
-
type:
|
|
3162
|
-
item_id:
|
|
3163
|
-
output_index:
|
|
3164
|
-
delta:
|
|
3221
|
+
var responseFunctionCallArgumentsDeltaSchema = import_v416.z.object({
|
|
3222
|
+
type: import_v416.z.literal("response.function_call_arguments.delta"),
|
|
3223
|
+
item_id: import_v416.z.string(),
|
|
3224
|
+
output_index: import_v416.z.number(),
|
|
3225
|
+
delta: import_v416.z.string()
|
|
3165
3226
|
});
|
|
3166
|
-
var responseAnnotationAddedSchema =
|
|
3167
|
-
type:
|
|
3168
|
-
annotation:
|
|
3169
|
-
type:
|
|
3170
|
-
url:
|
|
3171
|
-
title:
|
|
3227
|
+
var responseAnnotationAddedSchema = import_v416.z.object({
|
|
3228
|
+
type: import_v416.z.literal("response.output_text.annotation.added"),
|
|
3229
|
+
annotation: import_v416.z.object({
|
|
3230
|
+
type: import_v416.z.literal("url_citation"),
|
|
3231
|
+
url: import_v416.z.string(),
|
|
3232
|
+
title: import_v416.z.string()
|
|
3172
3233
|
})
|
|
3173
3234
|
});
|
|
3174
|
-
var responseReasoningSummaryPartAddedSchema =
|
|
3175
|
-
type:
|
|
3176
|
-
item_id:
|
|
3177
|
-
summary_index:
|
|
3235
|
+
var responseReasoningSummaryPartAddedSchema = import_v416.z.object({
|
|
3236
|
+
type: import_v416.z.literal("response.reasoning_summary_part.added"),
|
|
3237
|
+
item_id: import_v416.z.string(),
|
|
3238
|
+
summary_index: import_v416.z.number()
|
|
3178
3239
|
});
|
|
3179
|
-
var responseReasoningSummaryTextDeltaSchema =
|
|
3180
|
-
type:
|
|
3181
|
-
item_id:
|
|
3182
|
-
summary_index:
|
|
3183
|
-
delta:
|
|
3240
|
+
var responseReasoningSummaryTextDeltaSchema = import_v416.z.object({
|
|
3241
|
+
type: import_v416.z.literal("response.reasoning_summary_text.delta"),
|
|
3242
|
+
item_id: import_v416.z.string(),
|
|
3243
|
+
summary_index: import_v416.z.number(),
|
|
3244
|
+
delta: import_v416.z.string()
|
|
3184
3245
|
});
|
|
3185
|
-
var openaiResponsesChunkSchema =
|
|
3246
|
+
var openaiResponsesChunkSchema = import_v416.z.union([
|
|
3186
3247
|
textDeltaChunkSchema,
|
|
3187
3248
|
responseFinishedChunkSchema,
|
|
3188
3249
|
responseCreatedChunkSchema,
|
|
@@ -3193,7 +3254,7 @@ var openaiResponsesChunkSchema = import_v415.z.union([
|
|
|
3193
3254
|
responseReasoningSummaryPartAddedSchema,
|
|
3194
3255
|
responseReasoningSummaryTextDeltaSchema,
|
|
3195
3256
|
errorChunkSchema,
|
|
3196
|
-
|
|
3257
|
+
import_v416.z.object({ type: import_v416.z.string() }).loose()
|
|
3197
3258
|
// fallback for unknown chunks
|
|
3198
3259
|
]);
|
|
3199
3260
|
function isTextDeltaChunk(chunk) {
|
|
@@ -3259,19 +3320,19 @@ function supportsFlexProcessing2(modelId) {
|
|
|
3259
3320
|
function supportsPriorityProcessing2(modelId) {
|
|
3260
3321
|
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3261
3322
|
}
|
|
3262
|
-
var openaiResponsesProviderOptionsSchema =
|
|
3263
|
-
metadata:
|
|
3264
|
-
parallelToolCalls:
|
|
3265
|
-
previousResponseId:
|
|
3266
|
-
store:
|
|
3267
|
-
user:
|
|
3268
|
-
reasoningEffort:
|
|
3269
|
-
strictJsonSchema:
|
|
3270
|
-
instructions:
|
|
3271
|
-
reasoningSummary:
|
|
3272
|
-
serviceTier:
|
|
3273
|
-
include:
|
|
3274
|
-
textVerbosity:
|
|
3323
|
+
var openaiResponsesProviderOptionsSchema = import_v416.z.object({
|
|
3324
|
+
metadata: import_v416.z.any().nullish(),
|
|
3325
|
+
parallelToolCalls: import_v416.z.boolean().nullish(),
|
|
3326
|
+
previousResponseId: import_v416.z.string().nullish(),
|
|
3327
|
+
store: import_v416.z.boolean().nullish(),
|
|
3328
|
+
user: import_v416.z.string().nullish(),
|
|
3329
|
+
reasoningEffort: import_v416.z.string().nullish(),
|
|
3330
|
+
strictJsonSchema: import_v416.z.boolean().nullish(),
|
|
3331
|
+
instructions: import_v416.z.string().nullish(),
|
|
3332
|
+
reasoningSummary: import_v416.z.string().nullish(),
|
|
3333
|
+
serviceTier: import_v416.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
3334
|
+
include: import_v416.z.array(import_v416.z.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish(),
|
|
3335
|
+
textVerbosity: import_v416.z.enum(["low", "medium", "high"]).nullish()
|
|
3275
3336
|
});
|
|
3276
3337
|
// Annotate the CommonJS export names for ESM import in node:
|
|
3277
3338
|
0 && (module.exports = {
|