@ai-sdk/openai 2.1.0-beta.9 → 3.0.0-beta.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +77 -0
- package/dist/index.d.mts +54 -75
- package/dist/index.d.ts +54 -75
- package/dist/index.js +1391 -1043
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1345 -952
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +174 -152
- package/dist/internal/index.d.ts +174 -152
- package/dist/internal/index.js +1387 -1033
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +1353 -958
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
package/dist/internal/index.js
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
+
var __create = Object.create;
|
|
2
3
|
var __defProp = Object.defineProperty;
|
|
3
4
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
5
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
5
7
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
8
|
var __export = (target, all) => {
|
|
7
9
|
for (var name in all)
|
|
@@ -15,6 +17,14 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
15
17
|
}
|
|
16
18
|
return to;
|
|
17
19
|
};
|
|
20
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
21
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
22
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
23
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
24
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
25
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
26
|
+
mod
|
|
27
|
+
));
|
|
18
28
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
29
|
|
|
20
30
|
// src/internal/index.ts
|
|
@@ -36,31 +46,34 @@ __export(internal_exports, {
|
|
|
36
46
|
fileSearchArgsSchema: () => fileSearchArgsSchema,
|
|
37
47
|
fileSearchOutputSchema: () => fileSearchOutputSchema,
|
|
38
48
|
hasDefaultResponseFormat: () => hasDefaultResponseFormat,
|
|
49
|
+
imageGeneration: () => imageGeneration,
|
|
50
|
+
imageGenerationArgsSchema: () => imageGenerationArgsSchema,
|
|
51
|
+
imageGenerationOutputSchema: () => imageGenerationOutputSchema,
|
|
39
52
|
modelMaxImagesPerCall: () => modelMaxImagesPerCall,
|
|
40
53
|
openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions,
|
|
41
54
|
openaiChatLanguageModelOptions: () => openaiChatLanguageModelOptions,
|
|
42
55
|
openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
|
|
43
|
-
openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions
|
|
56
|
+
openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
|
|
57
|
+
openaiSpeechProviderOptionsSchema: () => openaiSpeechProviderOptionsSchema
|
|
44
58
|
});
|
|
45
59
|
module.exports = __toCommonJS(internal_exports);
|
|
46
60
|
|
|
47
61
|
// src/chat/openai-chat-language-model.ts
|
|
48
62
|
var import_provider3 = require("@ai-sdk/provider");
|
|
49
|
-
var
|
|
50
|
-
var import_v43 = require("zod/v4");
|
|
63
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
51
64
|
|
|
52
65
|
// src/openai-error.ts
|
|
53
|
-
var
|
|
66
|
+
var z = __toESM(require("zod/v4"));
|
|
54
67
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
55
|
-
var openaiErrorDataSchema =
|
|
56
|
-
error:
|
|
57
|
-
message:
|
|
68
|
+
var openaiErrorDataSchema = z.object({
|
|
69
|
+
error: z.object({
|
|
70
|
+
message: z.string(),
|
|
58
71
|
// The additional information below is handled loosely to support
|
|
59
72
|
// OpenAI-compatible providers that have slightly different error
|
|
60
73
|
// responses:
|
|
61
|
-
type:
|
|
62
|
-
param:
|
|
63
|
-
code:
|
|
74
|
+
type: z.string().nullish(),
|
|
75
|
+
param: z.any().nullish(),
|
|
76
|
+
code: z.union([z.string(), z.number()]).nullish()
|
|
64
77
|
})
|
|
65
78
|
});
|
|
66
79
|
var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
|
@@ -75,6 +88,7 @@ function convertToOpenAIChatMessages({
|
|
|
75
88
|
prompt,
|
|
76
89
|
systemMessageMode = "system"
|
|
77
90
|
}) {
|
|
91
|
+
var _a;
|
|
78
92
|
const messages = [];
|
|
79
93
|
const warnings = [];
|
|
80
94
|
for (const { role, content } of prompt) {
|
|
@@ -113,7 +127,7 @@ function convertToOpenAIChatMessages({
|
|
|
113
127
|
messages.push({
|
|
114
128
|
role: "user",
|
|
115
129
|
content: content.map((part, index) => {
|
|
116
|
-
var
|
|
130
|
+
var _a2, _b, _c;
|
|
117
131
|
switch (part.type) {
|
|
118
132
|
case "text": {
|
|
119
133
|
return { type: "text", text: part.text };
|
|
@@ -126,7 +140,7 @@ function convertToOpenAIChatMessages({
|
|
|
126
140
|
image_url: {
|
|
127
141
|
url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils2.convertToBase64)(part.data)}`,
|
|
128
142
|
// OpenAI specific extension: image detail
|
|
129
|
-
detail: (_b = (
|
|
143
|
+
detail: (_b = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b.imageDetail
|
|
130
144
|
}
|
|
131
145
|
};
|
|
132
146
|
} else if (part.mediaType.startsWith("audio/")) {
|
|
@@ -223,6 +237,9 @@ function convertToOpenAIChatMessages({
|
|
|
223
237
|
case "error-text":
|
|
224
238
|
contentValue = output.value;
|
|
225
239
|
break;
|
|
240
|
+
case "execution-denied":
|
|
241
|
+
contentValue = (_a = output.reason) != null ? _a : "Tool execution denied.";
|
|
242
|
+
break;
|
|
226
243
|
case "content":
|
|
227
244
|
case "json":
|
|
228
245
|
case "error-json":
|
|
@@ -276,95 +293,238 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
276
293
|
}
|
|
277
294
|
}
|
|
278
295
|
|
|
296
|
+
// src/chat/openai-chat-api.ts
|
|
297
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
298
|
+
var z2 = __toESM(require("zod/v4"));
|
|
299
|
+
var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
|
300
|
+
() => (0, import_provider_utils3.zodSchema)(
|
|
301
|
+
z2.object({
|
|
302
|
+
id: z2.string().nullish(),
|
|
303
|
+
created: z2.number().nullish(),
|
|
304
|
+
model: z2.string().nullish(),
|
|
305
|
+
choices: z2.array(
|
|
306
|
+
z2.object({
|
|
307
|
+
message: z2.object({
|
|
308
|
+
role: z2.literal("assistant").nullish(),
|
|
309
|
+
content: z2.string().nullish(),
|
|
310
|
+
tool_calls: z2.array(
|
|
311
|
+
z2.object({
|
|
312
|
+
id: z2.string().nullish(),
|
|
313
|
+
type: z2.literal("function"),
|
|
314
|
+
function: z2.object({
|
|
315
|
+
name: z2.string(),
|
|
316
|
+
arguments: z2.string()
|
|
317
|
+
})
|
|
318
|
+
})
|
|
319
|
+
).nullish(),
|
|
320
|
+
annotations: z2.array(
|
|
321
|
+
z2.object({
|
|
322
|
+
type: z2.literal("url_citation"),
|
|
323
|
+
start_index: z2.number(),
|
|
324
|
+
end_index: z2.number(),
|
|
325
|
+
url: z2.string(),
|
|
326
|
+
title: z2.string()
|
|
327
|
+
})
|
|
328
|
+
).nullish()
|
|
329
|
+
}),
|
|
330
|
+
index: z2.number(),
|
|
331
|
+
logprobs: z2.object({
|
|
332
|
+
content: z2.array(
|
|
333
|
+
z2.object({
|
|
334
|
+
token: z2.string(),
|
|
335
|
+
logprob: z2.number(),
|
|
336
|
+
top_logprobs: z2.array(
|
|
337
|
+
z2.object({
|
|
338
|
+
token: z2.string(),
|
|
339
|
+
logprob: z2.number()
|
|
340
|
+
})
|
|
341
|
+
)
|
|
342
|
+
})
|
|
343
|
+
).nullish()
|
|
344
|
+
}).nullish(),
|
|
345
|
+
finish_reason: z2.string().nullish()
|
|
346
|
+
})
|
|
347
|
+
),
|
|
348
|
+
usage: z2.object({
|
|
349
|
+
prompt_tokens: z2.number().nullish(),
|
|
350
|
+
completion_tokens: z2.number().nullish(),
|
|
351
|
+
total_tokens: z2.number().nullish(),
|
|
352
|
+
prompt_tokens_details: z2.object({
|
|
353
|
+
cached_tokens: z2.number().nullish()
|
|
354
|
+
}).nullish(),
|
|
355
|
+
completion_tokens_details: z2.object({
|
|
356
|
+
reasoning_tokens: z2.number().nullish(),
|
|
357
|
+
accepted_prediction_tokens: z2.number().nullish(),
|
|
358
|
+
rejected_prediction_tokens: z2.number().nullish()
|
|
359
|
+
}).nullish()
|
|
360
|
+
}).nullish()
|
|
361
|
+
})
|
|
362
|
+
)
|
|
363
|
+
);
|
|
364
|
+
var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
|
365
|
+
() => (0, import_provider_utils3.zodSchema)(
|
|
366
|
+
z2.union([
|
|
367
|
+
z2.object({
|
|
368
|
+
id: z2.string().nullish(),
|
|
369
|
+
created: z2.number().nullish(),
|
|
370
|
+
model: z2.string().nullish(),
|
|
371
|
+
choices: z2.array(
|
|
372
|
+
z2.object({
|
|
373
|
+
delta: z2.object({
|
|
374
|
+
role: z2.enum(["assistant"]).nullish(),
|
|
375
|
+
content: z2.string().nullish(),
|
|
376
|
+
tool_calls: z2.array(
|
|
377
|
+
z2.object({
|
|
378
|
+
index: z2.number(),
|
|
379
|
+
id: z2.string().nullish(),
|
|
380
|
+
type: z2.literal("function").nullish(),
|
|
381
|
+
function: z2.object({
|
|
382
|
+
name: z2.string().nullish(),
|
|
383
|
+
arguments: z2.string().nullish()
|
|
384
|
+
})
|
|
385
|
+
})
|
|
386
|
+
).nullish(),
|
|
387
|
+
annotations: z2.array(
|
|
388
|
+
z2.object({
|
|
389
|
+
type: z2.literal("url_citation"),
|
|
390
|
+
start_index: z2.number(),
|
|
391
|
+
end_index: z2.number(),
|
|
392
|
+
url: z2.string(),
|
|
393
|
+
title: z2.string()
|
|
394
|
+
})
|
|
395
|
+
).nullish()
|
|
396
|
+
}).nullish(),
|
|
397
|
+
logprobs: z2.object({
|
|
398
|
+
content: z2.array(
|
|
399
|
+
z2.object({
|
|
400
|
+
token: z2.string(),
|
|
401
|
+
logprob: z2.number(),
|
|
402
|
+
top_logprobs: z2.array(
|
|
403
|
+
z2.object({
|
|
404
|
+
token: z2.string(),
|
|
405
|
+
logprob: z2.number()
|
|
406
|
+
})
|
|
407
|
+
)
|
|
408
|
+
})
|
|
409
|
+
).nullish()
|
|
410
|
+
}).nullish(),
|
|
411
|
+
finish_reason: z2.string().nullish(),
|
|
412
|
+
index: z2.number()
|
|
413
|
+
})
|
|
414
|
+
),
|
|
415
|
+
usage: z2.object({
|
|
416
|
+
prompt_tokens: z2.number().nullish(),
|
|
417
|
+
completion_tokens: z2.number().nullish(),
|
|
418
|
+
total_tokens: z2.number().nullish(),
|
|
419
|
+
prompt_tokens_details: z2.object({
|
|
420
|
+
cached_tokens: z2.number().nullish()
|
|
421
|
+
}).nullish(),
|
|
422
|
+
completion_tokens_details: z2.object({
|
|
423
|
+
reasoning_tokens: z2.number().nullish(),
|
|
424
|
+
accepted_prediction_tokens: z2.number().nullish(),
|
|
425
|
+
rejected_prediction_tokens: z2.number().nullish()
|
|
426
|
+
}).nullish()
|
|
427
|
+
}).nullish()
|
|
428
|
+
}),
|
|
429
|
+
openaiErrorDataSchema
|
|
430
|
+
])
|
|
431
|
+
)
|
|
432
|
+
);
|
|
433
|
+
|
|
279
434
|
// src/chat/openai-chat-options.ts
|
|
280
|
-
var
|
|
281
|
-
var
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
435
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
436
|
+
var z3 = __toESM(require("zod/v4"));
|
|
437
|
+
var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
|
|
438
|
+
() => (0, import_provider_utils4.zodSchema)(
|
|
439
|
+
z3.object({
|
|
440
|
+
/**
|
|
441
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
|
442
|
+
*
|
|
443
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
444
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
445
|
+
*/
|
|
446
|
+
logitBias: z3.record(z3.coerce.number(), z3.number()).optional(),
|
|
447
|
+
/**
|
|
448
|
+
* Return the log probabilities of the tokens.
|
|
449
|
+
*
|
|
450
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
451
|
+
* were generated.
|
|
452
|
+
*
|
|
453
|
+
* Setting to a number will return the log probabilities of the top n
|
|
454
|
+
* tokens that were generated.
|
|
455
|
+
*/
|
|
456
|
+
logprobs: z3.union([z3.boolean(), z3.number()]).optional(),
|
|
457
|
+
/**
|
|
458
|
+
* Whether to enable parallel function calling during tool use. Default to true.
|
|
459
|
+
*/
|
|
460
|
+
parallelToolCalls: z3.boolean().optional(),
|
|
461
|
+
/**
|
|
462
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
463
|
+
* monitor and detect abuse.
|
|
464
|
+
*/
|
|
465
|
+
user: z3.string().optional(),
|
|
466
|
+
/**
|
|
467
|
+
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
468
|
+
*/
|
|
469
|
+
reasoningEffort: z3.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
470
|
+
/**
|
|
471
|
+
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
472
|
+
*/
|
|
473
|
+
maxCompletionTokens: z3.number().optional(),
|
|
474
|
+
/**
|
|
475
|
+
* Whether to enable persistence in responses API.
|
|
476
|
+
*/
|
|
477
|
+
store: z3.boolean().optional(),
|
|
478
|
+
/**
|
|
479
|
+
* Metadata to associate with the request.
|
|
480
|
+
*/
|
|
481
|
+
metadata: z3.record(z3.string().max(64), z3.string().max(512)).optional(),
|
|
482
|
+
/**
|
|
483
|
+
* Parameters for prediction mode.
|
|
484
|
+
*/
|
|
485
|
+
prediction: z3.record(z3.string(), z3.any()).optional(),
|
|
486
|
+
/**
|
|
487
|
+
* Whether to use structured outputs.
|
|
488
|
+
*
|
|
489
|
+
* @default true
|
|
490
|
+
*/
|
|
491
|
+
structuredOutputs: z3.boolean().optional(),
|
|
492
|
+
/**
|
|
493
|
+
* Service tier for the request.
|
|
494
|
+
* - 'auto': Default service tier
|
|
495
|
+
* - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
496
|
+
* - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
|
|
497
|
+
*
|
|
498
|
+
* @default 'auto'
|
|
499
|
+
*/
|
|
500
|
+
serviceTier: z3.enum(["auto", "flex", "priority"]).optional(),
|
|
501
|
+
/**
|
|
502
|
+
* Whether to use strict JSON schema validation.
|
|
503
|
+
*
|
|
504
|
+
* @default false
|
|
505
|
+
*/
|
|
506
|
+
strictJsonSchema: z3.boolean().optional(),
|
|
507
|
+
/**
|
|
508
|
+
* Controls the verbosity of the model's responses.
|
|
509
|
+
* Lower values will result in more concise responses, while higher values will result in more verbose responses.
|
|
510
|
+
*/
|
|
511
|
+
textVerbosity: z3.enum(["low", "medium", "high"]).optional(),
|
|
512
|
+
/**
|
|
513
|
+
* A cache key for prompt caching. Allows manual control over prompt caching behavior.
|
|
514
|
+
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
515
|
+
*/
|
|
516
|
+
promptCacheKey: z3.string().optional(),
|
|
517
|
+
/**
|
|
518
|
+
* A stable identifier used to help detect users of your application
|
|
519
|
+
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
520
|
+
* string that uniquely identifies each user. We recommend hashing their
|
|
521
|
+
* username or email address, in order to avoid sending us any identifying
|
|
522
|
+
* information.
|
|
523
|
+
*/
|
|
524
|
+
safetyIdentifier: z3.string().optional()
|
|
525
|
+
})
|
|
526
|
+
)
|
|
527
|
+
);
|
|
368
528
|
|
|
369
529
|
// src/chat/openai-chat-prepare-tools.ts
|
|
370
530
|
var import_provider2 = require("@ai-sdk/provider");
|
|
@@ -457,7 +617,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
457
617
|
}) {
|
|
458
618
|
var _a, _b, _c, _d;
|
|
459
619
|
const warnings = [];
|
|
460
|
-
const openaiOptions = (_a = await (0,
|
|
620
|
+
const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
|
|
461
621
|
provider: "openai",
|
|
462
622
|
providerOptions,
|
|
463
623
|
schema: openaiChatLanguageModelOptions
|
|
@@ -636,15 +796,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
636
796
|
responseHeaders,
|
|
637
797
|
value: response,
|
|
638
798
|
rawValue: rawResponse
|
|
639
|
-
} = await (0,
|
|
799
|
+
} = await (0, import_provider_utils5.postJsonToApi)({
|
|
640
800
|
url: this.config.url({
|
|
641
801
|
path: "/chat/completions",
|
|
642
802
|
modelId: this.modelId
|
|
643
803
|
}),
|
|
644
|
-
headers: (0,
|
|
804
|
+
headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
|
|
645
805
|
body,
|
|
646
806
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
647
|
-
successfulResponseHandler: (0,
|
|
807
|
+
successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
|
|
648
808
|
openaiChatResponseSchema
|
|
649
809
|
),
|
|
650
810
|
abortSignal: options.abortSignal,
|
|
@@ -659,7 +819,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
659
819
|
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
|
660
820
|
content.push({
|
|
661
821
|
type: "tool-call",
|
|
662
|
-
toolCallId: (_b = toolCall.id) != null ? _b : (0,
|
|
822
|
+
toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils5.generateId)(),
|
|
663
823
|
toolName: toolCall.function.name,
|
|
664
824
|
input: toolCall.function.arguments
|
|
665
825
|
});
|
|
@@ -668,7 +828,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
668
828
|
content.push({
|
|
669
829
|
type: "source",
|
|
670
830
|
sourceType: "url",
|
|
671
|
-
id: (0,
|
|
831
|
+
id: (0, import_provider_utils5.generateId)(),
|
|
672
832
|
url: annotation.url,
|
|
673
833
|
title: annotation.title
|
|
674
834
|
});
|
|
@@ -714,15 +874,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
714
874
|
include_usage: true
|
|
715
875
|
}
|
|
716
876
|
};
|
|
717
|
-
const { responseHeaders, value: response } = await (0,
|
|
877
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
|
|
718
878
|
url: this.config.url({
|
|
719
879
|
path: "/chat/completions",
|
|
720
880
|
modelId: this.modelId
|
|
721
881
|
}),
|
|
722
|
-
headers: (0,
|
|
882
|
+
headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
|
|
723
883
|
body,
|
|
724
884
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
725
|
-
successfulResponseHandler: (0,
|
|
885
|
+
successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(
|
|
726
886
|
openaiChatChunkSchema
|
|
727
887
|
),
|
|
728
888
|
abortSignal: options.abortSignal,
|
|
@@ -847,14 +1007,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
847
1007
|
delta: toolCall2.function.arguments
|
|
848
1008
|
});
|
|
849
1009
|
}
|
|
850
|
-
if ((0,
|
|
1010
|
+
if ((0, import_provider_utils5.isParsableJson)(toolCall2.function.arguments)) {
|
|
851
1011
|
controller.enqueue({
|
|
852
1012
|
type: "tool-input-end",
|
|
853
1013
|
id: toolCall2.id
|
|
854
1014
|
});
|
|
855
1015
|
controller.enqueue({
|
|
856
1016
|
type: "tool-call",
|
|
857
|
-
toolCallId: (_q = toolCall2.id) != null ? _q : (0,
|
|
1017
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
|
|
858
1018
|
toolName: toolCall2.function.name,
|
|
859
1019
|
input: toolCall2.function.arguments
|
|
860
1020
|
});
|
|
@@ -875,14 +1035,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
875
1035
|
id: toolCall.id,
|
|
876
1036
|
delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
877
1037
|
});
|
|
878
|
-
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0,
|
|
1038
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
|
|
879
1039
|
controller.enqueue({
|
|
880
1040
|
type: "tool-input-end",
|
|
881
1041
|
id: toolCall.id
|
|
882
1042
|
});
|
|
883
1043
|
controller.enqueue({
|
|
884
1044
|
type: "tool-call",
|
|
885
|
-
toolCallId: (_x = toolCall.id) != null ? _x : (0,
|
|
1045
|
+
toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
|
|
886
1046
|
toolName: toolCall.function.name,
|
|
887
1047
|
input: toolCall.function.arguments
|
|
888
1048
|
});
|
|
@@ -895,7 +1055,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
895
1055
|
controller.enqueue({
|
|
896
1056
|
type: "source",
|
|
897
1057
|
sourceType: "url",
|
|
898
|
-
id: (0,
|
|
1058
|
+
id: (0, import_provider_utils5.generateId)(),
|
|
899
1059
|
url: annotation.url,
|
|
900
1060
|
title: annotation.title
|
|
901
1061
|
});
|
|
@@ -920,121 +1080,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
920
1080
|
};
|
|
921
1081
|
}
|
|
922
1082
|
};
|
|
923
|
-
var openaiTokenUsageSchema = import_v43.z.object({
|
|
924
|
-
prompt_tokens: import_v43.z.number().nullish(),
|
|
925
|
-
completion_tokens: import_v43.z.number().nullish(),
|
|
926
|
-
total_tokens: import_v43.z.number().nullish(),
|
|
927
|
-
prompt_tokens_details: import_v43.z.object({
|
|
928
|
-
cached_tokens: import_v43.z.number().nullish()
|
|
929
|
-
}).nullish(),
|
|
930
|
-
completion_tokens_details: import_v43.z.object({
|
|
931
|
-
reasoning_tokens: import_v43.z.number().nullish(),
|
|
932
|
-
accepted_prediction_tokens: import_v43.z.number().nullish(),
|
|
933
|
-
rejected_prediction_tokens: import_v43.z.number().nullish()
|
|
934
|
-
}).nullish()
|
|
935
|
-
}).nullish();
|
|
936
|
-
var openaiChatResponseSchema = import_v43.z.object({
|
|
937
|
-
id: import_v43.z.string().nullish(),
|
|
938
|
-
created: import_v43.z.number().nullish(),
|
|
939
|
-
model: import_v43.z.string().nullish(),
|
|
940
|
-
choices: import_v43.z.array(
|
|
941
|
-
import_v43.z.object({
|
|
942
|
-
message: import_v43.z.object({
|
|
943
|
-
role: import_v43.z.literal("assistant").nullish(),
|
|
944
|
-
content: import_v43.z.string().nullish(),
|
|
945
|
-
tool_calls: import_v43.z.array(
|
|
946
|
-
import_v43.z.object({
|
|
947
|
-
id: import_v43.z.string().nullish(),
|
|
948
|
-
type: import_v43.z.literal("function"),
|
|
949
|
-
function: import_v43.z.object({
|
|
950
|
-
name: import_v43.z.string(),
|
|
951
|
-
arguments: import_v43.z.string()
|
|
952
|
-
})
|
|
953
|
-
})
|
|
954
|
-
).nullish(),
|
|
955
|
-
annotations: import_v43.z.array(
|
|
956
|
-
import_v43.z.object({
|
|
957
|
-
type: import_v43.z.literal("url_citation"),
|
|
958
|
-
start_index: import_v43.z.number(),
|
|
959
|
-
end_index: import_v43.z.number(),
|
|
960
|
-
url: import_v43.z.string(),
|
|
961
|
-
title: import_v43.z.string()
|
|
962
|
-
})
|
|
963
|
-
).nullish()
|
|
964
|
-
}),
|
|
965
|
-
index: import_v43.z.number(),
|
|
966
|
-
logprobs: import_v43.z.object({
|
|
967
|
-
content: import_v43.z.array(
|
|
968
|
-
import_v43.z.object({
|
|
969
|
-
token: import_v43.z.string(),
|
|
970
|
-
logprob: import_v43.z.number(),
|
|
971
|
-
top_logprobs: import_v43.z.array(
|
|
972
|
-
import_v43.z.object({
|
|
973
|
-
token: import_v43.z.string(),
|
|
974
|
-
logprob: import_v43.z.number()
|
|
975
|
-
})
|
|
976
|
-
)
|
|
977
|
-
})
|
|
978
|
-
).nullish()
|
|
979
|
-
}).nullish(),
|
|
980
|
-
finish_reason: import_v43.z.string().nullish()
|
|
981
|
-
})
|
|
982
|
-
),
|
|
983
|
-
usage: openaiTokenUsageSchema
|
|
984
|
-
});
|
|
985
|
-
var openaiChatChunkSchema = import_v43.z.union([
|
|
986
|
-
import_v43.z.object({
|
|
987
|
-
id: import_v43.z.string().nullish(),
|
|
988
|
-
created: import_v43.z.number().nullish(),
|
|
989
|
-
model: import_v43.z.string().nullish(),
|
|
990
|
-
choices: import_v43.z.array(
|
|
991
|
-
import_v43.z.object({
|
|
992
|
-
delta: import_v43.z.object({
|
|
993
|
-
role: import_v43.z.enum(["assistant"]).nullish(),
|
|
994
|
-
content: import_v43.z.string().nullish(),
|
|
995
|
-
tool_calls: import_v43.z.array(
|
|
996
|
-
import_v43.z.object({
|
|
997
|
-
index: import_v43.z.number(),
|
|
998
|
-
id: import_v43.z.string().nullish(),
|
|
999
|
-
type: import_v43.z.literal("function").nullish(),
|
|
1000
|
-
function: import_v43.z.object({
|
|
1001
|
-
name: import_v43.z.string().nullish(),
|
|
1002
|
-
arguments: import_v43.z.string().nullish()
|
|
1003
|
-
})
|
|
1004
|
-
})
|
|
1005
|
-
).nullish(),
|
|
1006
|
-
annotations: import_v43.z.array(
|
|
1007
|
-
import_v43.z.object({
|
|
1008
|
-
type: import_v43.z.literal("url_citation"),
|
|
1009
|
-
start_index: import_v43.z.number(),
|
|
1010
|
-
end_index: import_v43.z.number(),
|
|
1011
|
-
url: import_v43.z.string(),
|
|
1012
|
-
title: import_v43.z.string()
|
|
1013
|
-
})
|
|
1014
|
-
).nullish()
|
|
1015
|
-
}).nullish(),
|
|
1016
|
-
logprobs: import_v43.z.object({
|
|
1017
|
-
content: import_v43.z.array(
|
|
1018
|
-
import_v43.z.object({
|
|
1019
|
-
token: import_v43.z.string(),
|
|
1020
|
-
logprob: import_v43.z.number(),
|
|
1021
|
-
top_logprobs: import_v43.z.array(
|
|
1022
|
-
import_v43.z.object({
|
|
1023
|
-
token: import_v43.z.string(),
|
|
1024
|
-
logprob: import_v43.z.number()
|
|
1025
|
-
})
|
|
1026
|
-
)
|
|
1027
|
-
})
|
|
1028
|
-
).nullish()
|
|
1029
|
-
}).nullish(),
|
|
1030
|
-
finish_reason: import_v43.z.string().nullish(),
|
|
1031
|
-
index: import_v43.z.number()
|
|
1032
|
-
})
|
|
1033
|
-
),
|
|
1034
|
-
usage: openaiTokenUsageSchema
|
|
1035
|
-
}),
|
|
1036
|
-
openaiErrorDataSchema
|
|
1037
|
-
]);
|
|
1038
1083
|
function isReasoningModel(modelId) {
|
|
1039
1084
|
return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
|
|
1040
1085
|
}
|
|
@@ -1085,8 +1130,7 @@ var reasoningModels = {
|
|
|
1085
1130
|
};
|
|
1086
1131
|
|
|
1087
1132
|
// src/completion/openai-completion-language-model.ts
|
|
1088
|
-
var
|
|
1089
|
-
var import_v45 = require("zod/v4");
|
|
1133
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1090
1134
|
|
|
1091
1135
|
// src/completion/convert-to-openai-completion-prompt.ts
|
|
1092
1136
|
var import_provider4 = require("@ai-sdk/provider");
|
|
@@ -1193,48 +1237,111 @@ function mapOpenAIFinishReason2(finishReason) {
|
|
|
1193
1237
|
}
|
|
1194
1238
|
}
|
|
1195
1239
|
|
|
1240
|
+
// src/completion/openai-completion-api.ts
|
|
1241
|
+
var z4 = __toESM(require("zod/v4"));
|
|
1242
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
1243
|
+
var openaiCompletionResponseSchema = (0, import_provider_utils6.lazyValidator)(
|
|
1244
|
+
() => (0, import_provider_utils6.zodSchema)(
|
|
1245
|
+
z4.object({
|
|
1246
|
+
id: z4.string().nullish(),
|
|
1247
|
+
created: z4.number().nullish(),
|
|
1248
|
+
model: z4.string().nullish(),
|
|
1249
|
+
choices: z4.array(
|
|
1250
|
+
z4.object({
|
|
1251
|
+
text: z4.string(),
|
|
1252
|
+
finish_reason: z4.string(),
|
|
1253
|
+
logprobs: z4.object({
|
|
1254
|
+
tokens: z4.array(z4.string()),
|
|
1255
|
+
token_logprobs: z4.array(z4.number()),
|
|
1256
|
+
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullish()
|
|
1257
|
+
}).nullish()
|
|
1258
|
+
})
|
|
1259
|
+
),
|
|
1260
|
+
usage: z4.object({
|
|
1261
|
+
prompt_tokens: z4.number(),
|
|
1262
|
+
completion_tokens: z4.number(),
|
|
1263
|
+
total_tokens: z4.number()
|
|
1264
|
+
}).nullish()
|
|
1265
|
+
})
|
|
1266
|
+
)
|
|
1267
|
+
);
|
|
1268
|
+
var openaiCompletionChunkSchema = (0, import_provider_utils6.lazyValidator)(
|
|
1269
|
+
() => (0, import_provider_utils6.zodSchema)(
|
|
1270
|
+
z4.union([
|
|
1271
|
+
z4.object({
|
|
1272
|
+
id: z4.string().nullish(),
|
|
1273
|
+
created: z4.number().nullish(),
|
|
1274
|
+
model: z4.string().nullish(),
|
|
1275
|
+
choices: z4.array(
|
|
1276
|
+
z4.object({
|
|
1277
|
+
text: z4.string(),
|
|
1278
|
+
finish_reason: z4.string().nullish(),
|
|
1279
|
+
index: z4.number(),
|
|
1280
|
+
logprobs: z4.object({
|
|
1281
|
+
tokens: z4.array(z4.string()),
|
|
1282
|
+
token_logprobs: z4.array(z4.number()),
|
|
1283
|
+
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullish()
|
|
1284
|
+
}).nullish()
|
|
1285
|
+
})
|
|
1286
|
+
),
|
|
1287
|
+
usage: z4.object({
|
|
1288
|
+
prompt_tokens: z4.number(),
|
|
1289
|
+
completion_tokens: z4.number(),
|
|
1290
|
+
total_tokens: z4.number()
|
|
1291
|
+
}).nullish()
|
|
1292
|
+
}),
|
|
1293
|
+
openaiErrorDataSchema
|
|
1294
|
+
])
|
|
1295
|
+
)
|
|
1296
|
+
);
|
|
1297
|
+
|
|
1196
1298
|
// src/completion/openai-completion-options.ts
|
|
1197
|
-
var
|
|
1198
|
-
var
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1299
|
+
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
|
1300
|
+
var z5 = __toESM(require("zod/v4"));
|
|
1301
|
+
var openaiCompletionProviderOptions = (0, import_provider_utils7.lazyValidator)(
|
|
1302
|
+
() => (0, import_provider_utils7.zodSchema)(
|
|
1303
|
+
z5.object({
|
|
1304
|
+
/**
|
|
1305
|
+
Echo back the prompt in addition to the completion.
|
|
1306
|
+
*/
|
|
1307
|
+
echo: z5.boolean().optional(),
|
|
1308
|
+
/**
|
|
1309
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
1310
|
+
|
|
1311
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
1312
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
1313
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
1314
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
1315
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
1316
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
1317
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
1318
|
+
|
|
1319
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
1320
|
+
token from being generated.
|
|
1321
|
+
*/
|
|
1322
|
+
logitBias: z5.record(z5.string(), z5.number()).optional(),
|
|
1323
|
+
/**
|
|
1324
|
+
The suffix that comes after a completion of inserted text.
|
|
1325
|
+
*/
|
|
1326
|
+
suffix: z5.string().optional(),
|
|
1327
|
+
/**
|
|
1328
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1329
|
+
monitor and detect abuse. Learn more.
|
|
1330
|
+
*/
|
|
1331
|
+
user: z5.string().optional(),
|
|
1332
|
+
/**
|
|
1333
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1334
|
+
the response size and can slow down response times. However, it can
|
|
1335
|
+
be useful to better understand how the model is behaving.
|
|
1336
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1337
|
+
were generated.
|
|
1338
|
+
Setting to a number will return the log probabilities of the top n
|
|
1339
|
+
tokens that were generated.
|
|
1340
|
+
*/
|
|
1341
|
+
logprobs: z5.union([z5.boolean(), z5.number()]).optional()
|
|
1342
|
+
})
|
|
1343
|
+
)
|
|
1344
|
+
);
|
|
1238
1345
|
|
|
1239
1346
|
// src/completion/openai-completion-language-model.ts
|
|
1240
1347
|
var OpenAICompletionLanguageModel = class {
|
|
@@ -1269,12 +1376,12 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1269
1376
|
}) {
|
|
1270
1377
|
const warnings = [];
|
|
1271
1378
|
const openaiOptions = {
|
|
1272
|
-
...await (0,
|
|
1379
|
+
...await (0, import_provider_utils8.parseProviderOptions)({
|
|
1273
1380
|
provider: "openai",
|
|
1274
1381
|
providerOptions,
|
|
1275
1382
|
schema: openaiCompletionProviderOptions
|
|
1276
1383
|
}),
|
|
1277
|
-
...await (0,
|
|
1384
|
+
...await (0, import_provider_utils8.parseProviderOptions)({
|
|
1278
1385
|
provider: this.providerOptionsName,
|
|
1279
1386
|
providerOptions,
|
|
1280
1387
|
schema: openaiCompletionProviderOptions
|
|
@@ -1330,15 +1437,15 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1330
1437
|
responseHeaders,
|
|
1331
1438
|
value: response,
|
|
1332
1439
|
rawValue: rawResponse
|
|
1333
|
-
} = await (0,
|
|
1440
|
+
} = await (0, import_provider_utils8.postJsonToApi)({
|
|
1334
1441
|
url: this.config.url({
|
|
1335
1442
|
path: "/completions",
|
|
1336
1443
|
modelId: this.modelId
|
|
1337
1444
|
}),
|
|
1338
|
-
headers: (0,
|
|
1445
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
|
|
1339
1446
|
body: args,
|
|
1340
1447
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1341
|
-
successfulResponseHandler: (0,
|
|
1448
|
+
successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
|
|
1342
1449
|
openaiCompletionResponseSchema
|
|
1343
1450
|
),
|
|
1344
1451
|
abortSignal: options.abortSignal,
|
|
@@ -1376,15 +1483,15 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1376
1483
|
include_usage: true
|
|
1377
1484
|
}
|
|
1378
1485
|
};
|
|
1379
|
-
const { responseHeaders, value: response } = await (0,
|
|
1486
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
|
|
1380
1487
|
url: this.config.url({
|
|
1381
1488
|
path: "/completions",
|
|
1382
1489
|
modelId: this.modelId
|
|
1383
1490
|
}),
|
|
1384
|
-
headers: (0,
|
|
1491
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
|
|
1385
1492
|
body,
|
|
1386
1493
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1387
|
-
successfulResponseHandler: (0,
|
|
1494
|
+
successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
|
|
1388
1495
|
openaiCompletionChunkSchema
|
|
1389
1496
|
),
|
|
1390
1497
|
abortSignal: options.abortSignal,
|
|
@@ -1465,69 +1572,42 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1465
1572
|
};
|
|
1466
1573
|
}
|
|
1467
1574
|
};
|
|
1468
|
-
var usageSchema = import_v45.z.object({
|
|
1469
|
-
prompt_tokens: import_v45.z.number(),
|
|
1470
|
-
completion_tokens: import_v45.z.number(),
|
|
1471
|
-
total_tokens: import_v45.z.number()
|
|
1472
|
-
});
|
|
1473
|
-
var openaiCompletionResponseSchema = import_v45.z.object({
|
|
1474
|
-
id: import_v45.z.string().nullish(),
|
|
1475
|
-
created: import_v45.z.number().nullish(),
|
|
1476
|
-
model: import_v45.z.string().nullish(),
|
|
1477
|
-
choices: import_v45.z.array(
|
|
1478
|
-
import_v45.z.object({
|
|
1479
|
-
text: import_v45.z.string(),
|
|
1480
|
-
finish_reason: import_v45.z.string(),
|
|
1481
|
-
logprobs: import_v45.z.object({
|
|
1482
|
-
tokens: import_v45.z.array(import_v45.z.string()),
|
|
1483
|
-
token_logprobs: import_v45.z.array(import_v45.z.number()),
|
|
1484
|
-
top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullish()
|
|
1485
|
-
}).nullish()
|
|
1486
|
-
})
|
|
1487
|
-
),
|
|
1488
|
-
usage: usageSchema.nullish()
|
|
1489
|
-
});
|
|
1490
|
-
var openaiCompletionChunkSchema = import_v45.z.union([
|
|
1491
|
-
import_v45.z.object({
|
|
1492
|
-
id: import_v45.z.string().nullish(),
|
|
1493
|
-
created: import_v45.z.number().nullish(),
|
|
1494
|
-
model: import_v45.z.string().nullish(),
|
|
1495
|
-
choices: import_v45.z.array(
|
|
1496
|
-
import_v45.z.object({
|
|
1497
|
-
text: import_v45.z.string(),
|
|
1498
|
-
finish_reason: import_v45.z.string().nullish(),
|
|
1499
|
-
index: import_v45.z.number(),
|
|
1500
|
-
logprobs: import_v45.z.object({
|
|
1501
|
-
tokens: import_v45.z.array(import_v45.z.string()),
|
|
1502
|
-
token_logprobs: import_v45.z.array(import_v45.z.number()),
|
|
1503
|
-
top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullish()
|
|
1504
|
-
}).nullish()
|
|
1505
|
-
})
|
|
1506
|
-
),
|
|
1507
|
-
usage: usageSchema.nullish()
|
|
1508
|
-
}),
|
|
1509
|
-
openaiErrorDataSchema
|
|
1510
|
-
]);
|
|
1511
1575
|
|
|
1512
1576
|
// src/embedding/openai-embedding-model.ts
|
|
1513
1577
|
var import_provider5 = require("@ai-sdk/provider");
|
|
1514
|
-
var
|
|
1515
|
-
var import_v47 = require("zod/v4");
|
|
1578
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
|
1516
1579
|
|
|
1517
1580
|
// src/embedding/openai-embedding-options.ts
|
|
1518
|
-
var
|
|
1519
|
-
var
|
|
1520
|
-
|
|
1521
|
-
|
|
1522
|
-
|
|
1523
|
-
|
|
1524
|
-
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1581
|
+
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
|
1582
|
+
var z6 = __toESM(require("zod/v4"));
|
|
1583
|
+
var openaiEmbeddingProviderOptions = (0, import_provider_utils9.lazyValidator)(
|
|
1584
|
+
() => (0, import_provider_utils9.zodSchema)(
|
|
1585
|
+
z6.object({
|
|
1586
|
+
/**
|
|
1587
|
+
The number of dimensions the resulting output embeddings should have.
|
|
1588
|
+
Only supported in text-embedding-3 and later models.
|
|
1589
|
+
*/
|
|
1590
|
+
dimensions: z6.number().optional(),
|
|
1591
|
+
/**
|
|
1592
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1593
|
+
monitor and detect abuse. Learn more.
|
|
1594
|
+
*/
|
|
1595
|
+
user: z6.string().optional()
|
|
1596
|
+
})
|
|
1597
|
+
)
|
|
1598
|
+
);
|
|
1599
|
+
|
|
1600
|
+
// src/embedding/openai-embedding-api.ts
|
|
1601
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
|
1602
|
+
var z7 = __toESM(require("zod/v4"));
|
|
1603
|
+
var openaiTextEmbeddingResponseSchema = (0, import_provider_utils10.lazyValidator)(
|
|
1604
|
+
() => (0, import_provider_utils10.zodSchema)(
|
|
1605
|
+
z7.object({
|
|
1606
|
+
data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
|
|
1607
|
+
usage: z7.object({ prompt_tokens: z7.number() }).nullish()
|
|
1608
|
+
})
|
|
1609
|
+
)
|
|
1610
|
+
);
|
|
1531
1611
|
|
|
1532
1612
|
// src/embedding/openai-embedding-model.ts
|
|
1533
1613
|
var OpenAIEmbeddingModel = class {
|
|
@@ -1556,7 +1636,7 @@ var OpenAIEmbeddingModel = class {
|
|
|
1556
1636
|
values
|
|
1557
1637
|
});
|
|
1558
1638
|
}
|
|
1559
|
-
const openaiOptions = (_a = await (0,
|
|
1639
|
+
const openaiOptions = (_a = await (0, import_provider_utils11.parseProviderOptions)({
|
|
1560
1640
|
provider: "openai",
|
|
1561
1641
|
providerOptions,
|
|
1562
1642
|
schema: openaiEmbeddingProviderOptions
|
|
@@ -1565,12 +1645,12 @@ var OpenAIEmbeddingModel = class {
|
|
|
1565
1645
|
responseHeaders,
|
|
1566
1646
|
value: response,
|
|
1567
1647
|
rawValue
|
|
1568
|
-
} = await (0,
|
|
1648
|
+
} = await (0, import_provider_utils11.postJsonToApi)({
|
|
1569
1649
|
url: this.config.url({
|
|
1570
1650
|
path: "/embeddings",
|
|
1571
1651
|
modelId: this.modelId
|
|
1572
1652
|
}),
|
|
1573
|
-
headers: (0,
|
|
1653
|
+
headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), headers),
|
|
1574
1654
|
body: {
|
|
1575
1655
|
model: this.modelId,
|
|
1576
1656
|
input: values,
|
|
@@ -1579,7 +1659,7 @@ var OpenAIEmbeddingModel = class {
|
|
|
1579
1659
|
user: openaiOptions.user
|
|
1580
1660
|
},
|
|
1581
1661
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1582
|
-
successfulResponseHandler: (0,
|
|
1662
|
+
successfulResponseHandler: (0, import_provider_utils11.createJsonResponseHandler)(
|
|
1583
1663
|
openaiTextEmbeddingResponseSchema
|
|
1584
1664
|
),
|
|
1585
1665
|
abortSignal,
|
|
@@ -1592,22 +1672,37 @@ var OpenAIEmbeddingModel = class {
|
|
|
1592
1672
|
};
|
|
1593
1673
|
}
|
|
1594
1674
|
};
|
|
1595
|
-
var openaiTextEmbeddingResponseSchema = import_v47.z.object({
|
|
1596
|
-
data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
|
|
1597
|
-
usage: import_v47.z.object({ prompt_tokens: import_v47.z.number() }).nullish()
|
|
1598
|
-
});
|
|
1599
1675
|
|
|
1600
1676
|
// src/image/openai-image-model.ts
|
|
1601
|
-
var
|
|
1602
|
-
|
|
1677
|
+
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
|
1678
|
+
|
|
1679
|
+
// src/image/openai-image-api.ts
|
|
1680
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
|
1681
|
+
var z8 = __toESM(require("zod/v4"));
|
|
1682
|
+
var openaiImageResponseSchema = (0, import_provider_utils12.lazyValidator)(
|
|
1683
|
+
() => (0, import_provider_utils12.zodSchema)(
|
|
1684
|
+
z8.object({
|
|
1685
|
+
data: z8.array(
|
|
1686
|
+
z8.object({
|
|
1687
|
+
b64_json: z8.string(),
|
|
1688
|
+
revised_prompt: z8.string().optional()
|
|
1689
|
+
})
|
|
1690
|
+
)
|
|
1691
|
+
})
|
|
1692
|
+
)
|
|
1693
|
+
);
|
|
1603
1694
|
|
|
1604
1695
|
// src/image/openai-image-options.ts
|
|
1605
1696
|
var modelMaxImagesPerCall = {
|
|
1606
1697
|
"dall-e-3": 1,
|
|
1607
1698
|
"dall-e-2": 10,
|
|
1608
|
-
"gpt-image-1": 10
|
|
1699
|
+
"gpt-image-1": 10,
|
|
1700
|
+
"gpt-image-1-mini": 10
|
|
1609
1701
|
};
|
|
1610
|
-
var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
|
|
1702
|
+
var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
|
|
1703
|
+
"gpt-image-1",
|
|
1704
|
+
"gpt-image-1-mini"
|
|
1705
|
+
]);
|
|
1611
1706
|
|
|
1612
1707
|
// src/image/openai-image-model.ts
|
|
1613
1708
|
var OpenAIImageModel = class {
|
|
@@ -1646,12 +1741,12 @@ var OpenAIImageModel = class {
|
|
|
1646
1741
|
warnings.push({ type: "unsupported-setting", setting: "seed" });
|
|
1647
1742
|
}
|
|
1648
1743
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1649
|
-
const { value: response, responseHeaders } = await (0,
|
|
1744
|
+
const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
|
|
1650
1745
|
url: this.config.url({
|
|
1651
1746
|
path: "/images/generations",
|
|
1652
1747
|
modelId: this.modelId
|
|
1653
1748
|
}),
|
|
1654
|
-
headers: (0,
|
|
1749
|
+
headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), headers),
|
|
1655
1750
|
body: {
|
|
1656
1751
|
model: this.modelId,
|
|
1657
1752
|
prompt,
|
|
@@ -1661,7 +1756,7 @@ var OpenAIImageModel = class {
|
|
|
1661
1756
|
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1662
1757
|
},
|
|
1663
1758
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1664
|
-
successfulResponseHandler: (0,
|
|
1759
|
+
successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
|
|
1665
1760
|
openaiImageResponseSchema
|
|
1666
1761
|
),
|
|
1667
1762
|
abortSignal,
|
|
@@ -1687,42 +1782,75 @@ var OpenAIImageModel = class {
|
|
|
1687
1782
|
};
|
|
1688
1783
|
}
|
|
1689
1784
|
};
|
|
1690
|
-
var openaiImageResponseSchema = import_v48.z.object({
|
|
1691
|
-
data: import_v48.z.array(
|
|
1692
|
-
import_v48.z.object({ b64_json: import_v48.z.string(), revised_prompt: import_v48.z.string().optional() })
|
|
1693
|
-
)
|
|
1694
|
-
});
|
|
1695
1785
|
|
|
1696
1786
|
// src/transcription/openai-transcription-model.ts
|
|
1697
|
-
var
|
|
1698
|
-
|
|
1787
|
+
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
|
1788
|
+
|
|
1789
|
+
// src/transcription/openai-transcription-api.ts
|
|
1790
|
+
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
|
1791
|
+
var z9 = __toESM(require("zod/v4"));
|
|
1792
|
+
var openaiTranscriptionResponseSchema = (0, import_provider_utils14.lazyValidator)(
|
|
1793
|
+
() => (0, import_provider_utils14.zodSchema)(
|
|
1794
|
+
z9.object({
|
|
1795
|
+
text: z9.string(),
|
|
1796
|
+
language: z9.string().nullish(),
|
|
1797
|
+
duration: z9.number().nullish(),
|
|
1798
|
+
words: z9.array(
|
|
1799
|
+
z9.object({
|
|
1800
|
+
word: z9.string(),
|
|
1801
|
+
start: z9.number(),
|
|
1802
|
+
end: z9.number()
|
|
1803
|
+
})
|
|
1804
|
+
).nullish(),
|
|
1805
|
+
segments: z9.array(
|
|
1806
|
+
z9.object({
|
|
1807
|
+
id: z9.number(),
|
|
1808
|
+
seek: z9.number(),
|
|
1809
|
+
start: z9.number(),
|
|
1810
|
+
end: z9.number(),
|
|
1811
|
+
text: z9.string(),
|
|
1812
|
+
tokens: z9.array(z9.number()),
|
|
1813
|
+
temperature: z9.number(),
|
|
1814
|
+
avg_logprob: z9.number(),
|
|
1815
|
+
compression_ratio: z9.number(),
|
|
1816
|
+
no_speech_prob: z9.number()
|
|
1817
|
+
})
|
|
1818
|
+
).nullish()
|
|
1819
|
+
})
|
|
1820
|
+
)
|
|
1821
|
+
);
|
|
1699
1822
|
|
|
1700
1823
|
// src/transcription/openai-transcription-options.ts
|
|
1701
|
-
var
|
|
1702
|
-
var
|
|
1703
|
-
|
|
1704
|
-
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
|
|
1710
|
-
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
|
|
1824
|
+
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
|
1825
|
+
var z10 = __toESM(require("zod/v4"));
|
|
1826
|
+
var openAITranscriptionProviderOptions = (0, import_provider_utils15.lazyValidator)(
|
|
1827
|
+
() => (0, import_provider_utils15.zodSchema)(
|
|
1828
|
+
z10.object({
|
|
1829
|
+
/**
|
|
1830
|
+
* Additional information to include in the transcription response.
|
|
1831
|
+
*/
|
|
1832
|
+
include: z10.array(z10.string()).optional(),
|
|
1833
|
+
/**
|
|
1834
|
+
* The language of the input audio in ISO-639-1 format.
|
|
1835
|
+
*/
|
|
1836
|
+
language: z10.string().optional(),
|
|
1837
|
+
/**
|
|
1838
|
+
* An optional text to guide the model's style or continue a previous audio segment.
|
|
1839
|
+
*/
|
|
1840
|
+
prompt: z10.string().optional(),
|
|
1841
|
+
/**
|
|
1842
|
+
* The sampling temperature, between 0 and 1.
|
|
1843
|
+
* @default 0
|
|
1844
|
+
*/
|
|
1845
|
+
temperature: z10.number().min(0).max(1).default(0).optional(),
|
|
1846
|
+
/**
|
|
1847
|
+
* The timestamp granularities to populate for this transcription.
|
|
1848
|
+
* @default ['segment']
|
|
1849
|
+
*/
|
|
1850
|
+
timestampGranularities: z10.array(z10.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1851
|
+
})
|
|
1852
|
+
)
|
|
1853
|
+
);
|
|
1726
1854
|
|
|
1727
1855
|
// src/transcription/openai-transcription-model.ts
|
|
1728
1856
|
var languageMap = {
|
|
@@ -1788,7 +1916,7 @@ var OpenAITranscriptionModel = class {
|
|
|
1788
1916
|
constructor(modelId, config) {
|
|
1789
1917
|
this.modelId = modelId;
|
|
1790
1918
|
this.config = config;
|
|
1791
|
-
this.specificationVersion = "
|
|
1919
|
+
this.specificationVersion = "v3";
|
|
1792
1920
|
}
|
|
1793
1921
|
get provider() {
|
|
1794
1922
|
return this.config.provider;
|
|
@@ -1799,15 +1927,15 @@ var OpenAITranscriptionModel = class {
|
|
|
1799
1927
|
providerOptions
|
|
1800
1928
|
}) {
|
|
1801
1929
|
const warnings = [];
|
|
1802
|
-
const openAIOptions = await (0,
|
|
1930
|
+
const openAIOptions = await (0, import_provider_utils16.parseProviderOptions)({
|
|
1803
1931
|
provider: "openai",
|
|
1804
1932
|
providerOptions,
|
|
1805
1933
|
schema: openAITranscriptionProviderOptions
|
|
1806
1934
|
});
|
|
1807
1935
|
const formData = new FormData();
|
|
1808
|
-
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0,
|
|
1936
|
+
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils16.convertBase64ToUint8Array)(audio)]);
|
|
1809
1937
|
formData.append("model", this.modelId);
|
|
1810
|
-
const fileExtension = (0,
|
|
1938
|
+
const fileExtension = (0, import_provider_utils16.mediaTypeToExtension)(mediaType);
|
|
1811
1939
|
formData.append(
|
|
1812
1940
|
"file",
|
|
1813
1941
|
new File([blob], "audio", { type: mediaType }),
|
|
@@ -1852,15 +1980,15 @@ var OpenAITranscriptionModel = class {
|
|
|
1852
1980
|
value: response,
|
|
1853
1981
|
responseHeaders,
|
|
1854
1982
|
rawValue: rawResponse
|
|
1855
|
-
} = await (0,
|
|
1983
|
+
} = await (0, import_provider_utils16.postFormDataToApi)({
|
|
1856
1984
|
url: this.config.url({
|
|
1857
1985
|
path: "/audio/transcriptions",
|
|
1858
1986
|
modelId: this.modelId
|
|
1859
1987
|
}),
|
|
1860
|
-
headers: (0,
|
|
1988
|
+
headers: (0, import_provider_utils16.combineHeaders)(this.config.headers(), options.headers),
|
|
1861
1989
|
formData,
|
|
1862
1990
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1863
|
-
successfulResponseHandler: (0,
|
|
1991
|
+
successfulResponseHandler: (0, import_provider_utils16.createJsonResponseHandler)(
|
|
1864
1992
|
openaiTranscriptionResponseSchema
|
|
1865
1993
|
),
|
|
1866
1994
|
abortSignal: options.abortSignal,
|
|
@@ -1890,45 +2018,28 @@ var OpenAITranscriptionModel = class {
|
|
|
1890
2018
|
};
|
|
1891
2019
|
}
|
|
1892
2020
|
};
|
|
1893
|
-
|
|
1894
|
-
|
|
1895
|
-
|
|
1896
|
-
|
|
1897
|
-
|
|
1898
|
-
|
|
1899
|
-
|
|
1900
|
-
|
|
1901
|
-
|
|
1902
|
-
|
|
1903
|
-
|
|
1904
|
-
|
|
1905
|
-
import_v410.z.object({
|
|
1906
|
-
id: import_v410.z.number(),
|
|
1907
|
-
seek: import_v410.z.number(),
|
|
1908
|
-
start: import_v410.z.number(),
|
|
1909
|
-
end: import_v410.z.number(),
|
|
1910
|
-
text: import_v410.z.string(),
|
|
1911
|
-
tokens: import_v410.z.array(import_v410.z.number()),
|
|
1912
|
-
temperature: import_v410.z.number(),
|
|
1913
|
-
avg_logprob: import_v410.z.number(),
|
|
1914
|
-
compression_ratio: import_v410.z.number(),
|
|
1915
|
-
no_speech_prob: import_v410.z.number()
|
|
2021
|
+
|
|
2022
|
+
// src/speech/openai-speech-model.ts
|
|
2023
|
+
var import_provider_utils18 = require("@ai-sdk/provider-utils");
|
|
2024
|
+
|
|
2025
|
+
// src/speech/openai-speech-options.ts
|
|
2026
|
+
var import_provider_utils17 = require("@ai-sdk/provider-utils");
|
|
2027
|
+
var z11 = __toESM(require("zod/v4"));
|
|
2028
|
+
var openaiSpeechProviderOptionsSchema = (0, import_provider_utils17.lazyValidator)(
|
|
2029
|
+
() => (0, import_provider_utils17.zodSchema)(
|
|
2030
|
+
z11.object({
|
|
2031
|
+
instructions: z11.string().nullish(),
|
|
2032
|
+
speed: z11.number().min(0.25).max(4).default(1).nullish()
|
|
1916
2033
|
})
|
|
1917
|
-
)
|
|
1918
|
-
|
|
2034
|
+
)
|
|
2035
|
+
);
|
|
1919
2036
|
|
|
1920
2037
|
// src/speech/openai-speech-model.ts
|
|
1921
|
-
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1922
|
-
var import_v411 = require("zod/v4");
|
|
1923
|
-
var OpenAIProviderOptionsSchema = import_v411.z.object({
|
|
1924
|
-
instructions: import_v411.z.string().nullish(),
|
|
1925
|
-
speed: import_v411.z.number().min(0.25).max(4).default(1).nullish()
|
|
1926
|
-
});
|
|
1927
2038
|
var OpenAISpeechModel = class {
|
|
1928
2039
|
constructor(modelId, config) {
|
|
1929
2040
|
this.modelId = modelId;
|
|
1930
2041
|
this.config = config;
|
|
1931
|
-
this.specificationVersion = "
|
|
2042
|
+
this.specificationVersion = "v3";
|
|
1932
2043
|
}
|
|
1933
2044
|
get provider() {
|
|
1934
2045
|
return this.config.provider;
|
|
@@ -1943,10 +2054,10 @@ var OpenAISpeechModel = class {
|
|
|
1943
2054
|
providerOptions
|
|
1944
2055
|
}) {
|
|
1945
2056
|
const warnings = [];
|
|
1946
|
-
const openAIOptions = await (0,
|
|
2057
|
+
const openAIOptions = await (0, import_provider_utils18.parseProviderOptions)({
|
|
1947
2058
|
provider: "openai",
|
|
1948
2059
|
providerOptions,
|
|
1949
|
-
schema:
|
|
2060
|
+
schema: openaiSpeechProviderOptionsSchema
|
|
1950
2061
|
});
|
|
1951
2062
|
const requestBody = {
|
|
1952
2063
|
model: this.modelId,
|
|
@@ -1996,15 +2107,15 @@ var OpenAISpeechModel = class {
|
|
|
1996
2107
|
value: audio,
|
|
1997
2108
|
responseHeaders,
|
|
1998
2109
|
rawValue: rawResponse
|
|
1999
|
-
} = await (0,
|
|
2110
|
+
} = await (0, import_provider_utils18.postJsonToApi)({
|
|
2000
2111
|
url: this.config.url({
|
|
2001
2112
|
path: "/audio/speech",
|
|
2002
2113
|
modelId: this.modelId
|
|
2003
2114
|
}),
|
|
2004
|
-
headers: (0,
|
|
2115
|
+
headers: (0, import_provider_utils18.combineHeaders)(this.config.headers(), options.headers),
|
|
2005
2116
|
body: requestBody,
|
|
2006
2117
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2007
|
-
successfulResponseHandler: (0,
|
|
2118
|
+
successfulResponseHandler: (0, import_provider_utils18.createBinaryResponseHandler)(),
|
|
2008
2119
|
abortSignal: options.abortSignal,
|
|
2009
2120
|
fetch: this.config.fetch
|
|
2010
2121
|
});
|
|
@@ -2026,31 +2137,34 @@ var OpenAISpeechModel = class {
|
|
|
2026
2137
|
|
|
2027
2138
|
// src/responses/openai-responses-language-model.ts
|
|
2028
2139
|
var import_provider8 = require("@ai-sdk/provider");
|
|
2029
|
-
var
|
|
2030
|
-
var import_v419 = require("zod/v4");
|
|
2140
|
+
var import_provider_utils29 = require("@ai-sdk/provider-utils");
|
|
2031
2141
|
|
|
2032
2142
|
// src/responses/convert-to-openai-responses-input.ts
|
|
2033
2143
|
var import_provider6 = require("@ai-sdk/provider");
|
|
2034
|
-
var
|
|
2035
|
-
var
|
|
2144
|
+
var import_provider_utils20 = require("@ai-sdk/provider-utils");
|
|
2145
|
+
var z13 = __toESM(require("zod/v4"));
|
|
2036
2146
|
|
|
2037
2147
|
// src/tool/local-shell.ts
|
|
2038
|
-
var
|
|
2039
|
-
var
|
|
2040
|
-
var localShellInputSchema =
|
|
2041
|
-
|
|
2042
|
-
|
|
2043
|
-
|
|
2044
|
-
|
|
2045
|
-
|
|
2046
|
-
|
|
2047
|
-
|
|
2048
|
-
|
|
2049
|
-
|
|
2050
|
-
|
|
2051
|
-
|
|
2052
|
-
|
|
2053
|
-
|
|
2148
|
+
var import_provider_utils19 = require("@ai-sdk/provider-utils");
|
|
2149
|
+
var z12 = __toESM(require("zod/v4"));
|
|
2150
|
+
var localShellInputSchema = (0, import_provider_utils19.lazySchema)(
|
|
2151
|
+
() => (0, import_provider_utils19.zodSchema)(
|
|
2152
|
+
z12.object({
|
|
2153
|
+
action: z12.object({
|
|
2154
|
+
type: z12.literal("exec"),
|
|
2155
|
+
command: z12.array(z12.string()),
|
|
2156
|
+
timeoutMs: z12.number().optional(),
|
|
2157
|
+
user: z12.string().optional(),
|
|
2158
|
+
workingDirectory: z12.string().optional(),
|
|
2159
|
+
env: z12.record(z12.string(), z12.string()).optional()
|
|
2160
|
+
})
|
|
2161
|
+
})
|
|
2162
|
+
)
|
|
2163
|
+
);
|
|
2164
|
+
var localShellOutputSchema = (0, import_provider_utils19.lazySchema)(
|
|
2165
|
+
() => (0, import_provider_utils19.zodSchema)(z12.object({ output: z12.string() }))
|
|
2166
|
+
);
|
|
2167
|
+
var localShell = (0, import_provider_utils19.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2054
2168
|
id: "openai.local_shell",
|
|
2055
2169
|
name: "local_shell",
|
|
2056
2170
|
inputSchema: localShellInputSchema,
|
|
@@ -2069,7 +2183,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2069
2183
|
store,
|
|
2070
2184
|
hasLocalShellTool = false
|
|
2071
2185
|
}) {
|
|
2072
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2186
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
|
2073
2187
|
const input = [];
|
|
2074
2188
|
const warnings = [];
|
|
2075
2189
|
for (const { role, content } of prompt) {
|
|
@@ -2115,7 +2229,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2115
2229
|
return {
|
|
2116
2230
|
type: "input_image",
|
|
2117
2231
|
...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
|
|
2118
|
-
image_url: `data:${mediaType};base64,${(0,
|
|
2232
|
+
image_url: `data:${mediaType};base64,${(0, import_provider_utils20.convertToBase64)(part.data)}`
|
|
2119
2233
|
},
|
|
2120
2234
|
detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
|
|
2121
2235
|
};
|
|
@@ -2130,7 +2244,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2130
2244
|
type: "input_file",
|
|
2131
2245
|
...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
|
|
2132
2246
|
filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
|
|
2133
|
-
file_data: `data:application/pdf;base64,${(0,
|
|
2247
|
+
file_data: `data:application/pdf;base64,${(0, import_provider_utils20.convertToBase64)(part.data)}`
|
|
2134
2248
|
}
|
|
2135
2249
|
};
|
|
2136
2250
|
} else {
|
|
@@ -2163,7 +2277,10 @@ async function convertToOpenAIResponsesInput({
|
|
|
2163
2277
|
break;
|
|
2164
2278
|
}
|
|
2165
2279
|
if (hasLocalShellTool && part.toolName === "local_shell") {
|
|
2166
|
-
const parsedInput =
|
|
2280
|
+
const parsedInput = await (0, import_provider_utils20.validateTypes)({
|
|
2281
|
+
value: part.input,
|
|
2282
|
+
schema: localShellInputSchema
|
|
2283
|
+
});
|
|
2167
2284
|
input.push({
|
|
2168
2285
|
type: "local_shell_call",
|
|
2169
2286
|
call_id: part.toolCallId,
|
|
@@ -2201,7 +2318,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2201
2318
|
break;
|
|
2202
2319
|
}
|
|
2203
2320
|
case "reasoning": {
|
|
2204
|
-
const providerOptions = await (0,
|
|
2321
|
+
const providerOptions = await (0, import_provider_utils20.parseProviderOptions)({
|
|
2205
2322
|
provider: "openai",
|
|
2206
2323
|
providerOptions: part.providerOptions,
|
|
2207
2324
|
schema: openaiResponsesReasoningProviderOptionsSchema
|
|
@@ -2259,10 +2376,14 @@ async function convertToOpenAIResponsesInput({
|
|
|
2259
2376
|
for (const part of content) {
|
|
2260
2377
|
const output = part.output;
|
|
2261
2378
|
if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
|
|
2379
|
+
const parsedOutput = await (0, import_provider_utils20.validateTypes)({
|
|
2380
|
+
value: output.value,
|
|
2381
|
+
schema: localShellOutputSchema
|
|
2382
|
+
});
|
|
2262
2383
|
input.push({
|
|
2263
2384
|
type: "local_shell_call_output",
|
|
2264
2385
|
call_id: part.toolCallId,
|
|
2265
|
-
output:
|
|
2386
|
+
output: parsedOutput.output
|
|
2266
2387
|
});
|
|
2267
2388
|
break;
|
|
2268
2389
|
}
|
|
@@ -2272,6 +2393,9 @@ async function convertToOpenAIResponsesInput({
|
|
|
2272
2393
|
case "error-text":
|
|
2273
2394
|
contentValue = output.value;
|
|
2274
2395
|
break;
|
|
2396
|
+
case "execution-denied":
|
|
2397
|
+
contentValue = (_j = output.reason) != null ? _j : "Tool execution denied.";
|
|
2398
|
+
break;
|
|
2275
2399
|
case "content":
|
|
2276
2400
|
case "json":
|
|
2277
2401
|
case "error-json":
|
|
@@ -2294,9 +2418,9 @@ async function convertToOpenAIResponsesInput({
|
|
|
2294
2418
|
}
|
|
2295
2419
|
return { input, warnings };
|
|
2296
2420
|
}
|
|
2297
|
-
var openaiResponsesReasoningProviderOptionsSchema =
|
|
2298
|
-
itemId:
|
|
2299
|
-
reasoningEncryptedContent:
|
|
2421
|
+
var openaiResponsesReasoningProviderOptionsSchema = z13.object({
|
|
2422
|
+
itemId: z13.string().nullish(),
|
|
2423
|
+
reasoningEncryptedContent: z13.string().nullish()
|
|
2300
2424
|
});
|
|
2301
2425
|
|
|
2302
2426
|
// src/responses/map-openai-responses-finish-reason.ts
|
|
@@ -2317,33 +2441,574 @@ function mapOpenAIResponseFinishReason({
|
|
|
2317
2441
|
}
|
|
2318
2442
|
}
|
|
2319
2443
|
|
|
2444
|
+
// src/responses/openai-responses-api.ts
|
|
2445
|
+
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
|
2446
|
+
var z14 = __toESM(require("zod/v4"));
|
|
2447
|
+
var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
|
|
2448
|
+
() => (0, import_provider_utils21.zodSchema)(
|
|
2449
|
+
z14.union([
|
|
2450
|
+
z14.object({
|
|
2451
|
+
type: z14.literal("response.output_text.delta"),
|
|
2452
|
+
item_id: z14.string(),
|
|
2453
|
+
delta: z14.string(),
|
|
2454
|
+
logprobs: z14.array(
|
|
2455
|
+
z14.object({
|
|
2456
|
+
token: z14.string(),
|
|
2457
|
+
logprob: z14.number(),
|
|
2458
|
+
top_logprobs: z14.array(
|
|
2459
|
+
z14.object({
|
|
2460
|
+
token: z14.string(),
|
|
2461
|
+
logprob: z14.number()
|
|
2462
|
+
})
|
|
2463
|
+
)
|
|
2464
|
+
})
|
|
2465
|
+
).nullish()
|
|
2466
|
+
}),
|
|
2467
|
+
z14.object({
|
|
2468
|
+
type: z14.enum(["response.completed", "response.incomplete"]),
|
|
2469
|
+
response: z14.object({
|
|
2470
|
+
incomplete_details: z14.object({ reason: z14.string() }).nullish(),
|
|
2471
|
+
usage: z14.object({
|
|
2472
|
+
input_tokens: z14.number(),
|
|
2473
|
+
input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
|
|
2474
|
+
output_tokens: z14.number(),
|
|
2475
|
+
output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
|
|
2476
|
+
}),
|
|
2477
|
+
service_tier: z14.string().nullish()
|
|
2478
|
+
})
|
|
2479
|
+
}),
|
|
2480
|
+
z14.object({
|
|
2481
|
+
type: z14.literal("response.created"),
|
|
2482
|
+
response: z14.object({
|
|
2483
|
+
id: z14.string(),
|
|
2484
|
+
created_at: z14.number(),
|
|
2485
|
+
model: z14.string(),
|
|
2486
|
+
service_tier: z14.string().nullish()
|
|
2487
|
+
})
|
|
2488
|
+
}),
|
|
2489
|
+
z14.object({
|
|
2490
|
+
type: z14.literal("response.output_item.added"),
|
|
2491
|
+
output_index: z14.number(),
|
|
2492
|
+
item: z14.discriminatedUnion("type", [
|
|
2493
|
+
z14.object({
|
|
2494
|
+
type: z14.literal("message"),
|
|
2495
|
+
id: z14.string()
|
|
2496
|
+
}),
|
|
2497
|
+
z14.object({
|
|
2498
|
+
type: z14.literal("reasoning"),
|
|
2499
|
+
id: z14.string(),
|
|
2500
|
+
encrypted_content: z14.string().nullish()
|
|
2501
|
+
}),
|
|
2502
|
+
z14.object({
|
|
2503
|
+
type: z14.literal("function_call"),
|
|
2504
|
+
id: z14.string(),
|
|
2505
|
+
call_id: z14.string(),
|
|
2506
|
+
name: z14.string(),
|
|
2507
|
+
arguments: z14.string()
|
|
2508
|
+
}),
|
|
2509
|
+
z14.object({
|
|
2510
|
+
type: z14.literal("web_search_call"),
|
|
2511
|
+
id: z14.string(),
|
|
2512
|
+
status: z14.string(),
|
|
2513
|
+
action: z14.object({
|
|
2514
|
+
type: z14.literal("search"),
|
|
2515
|
+
query: z14.string().optional()
|
|
2516
|
+
}).nullish()
|
|
2517
|
+
}),
|
|
2518
|
+
z14.object({
|
|
2519
|
+
type: z14.literal("computer_call"),
|
|
2520
|
+
id: z14.string(),
|
|
2521
|
+
status: z14.string()
|
|
2522
|
+
}),
|
|
2523
|
+
z14.object({
|
|
2524
|
+
type: z14.literal("file_search_call"),
|
|
2525
|
+
id: z14.string()
|
|
2526
|
+
}),
|
|
2527
|
+
z14.object({
|
|
2528
|
+
type: z14.literal("image_generation_call"),
|
|
2529
|
+
id: z14.string()
|
|
2530
|
+
}),
|
|
2531
|
+
z14.object({
|
|
2532
|
+
type: z14.literal("code_interpreter_call"),
|
|
2533
|
+
id: z14.string(),
|
|
2534
|
+
container_id: z14.string(),
|
|
2535
|
+
code: z14.string().nullable(),
|
|
2536
|
+
outputs: z14.array(
|
|
2537
|
+
z14.discriminatedUnion("type", [
|
|
2538
|
+
z14.object({ type: z14.literal("logs"), logs: z14.string() }),
|
|
2539
|
+
z14.object({ type: z14.literal("image"), url: z14.string() })
|
|
2540
|
+
])
|
|
2541
|
+
).nullable(),
|
|
2542
|
+
status: z14.string()
|
|
2543
|
+
})
|
|
2544
|
+
])
|
|
2545
|
+
}),
|
|
2546
|
+
z14.object({
|
|
2547
|
+
type: z14.literal("response.output_item.done"),
|
|
2548
|
+
output_index: z14.number(),
|
|
2549
|
+
item: z14.discriminatedUnion("type", [
|
|
2550
|
+
z14.object({
|
|
2551
|
+
type: z14.literal("message"),
|
|
2552
|
+
id: z14.string()
|
|
2553
|
+
}),
|
|
2554
|
+
z14.object({
|
|
2555
|
+
type: z14.literal("reasoning"),
|
|
2556
|
+
id: z14.string(),
|
|
2557
|
+
encrypted_content: z14.string().nullish()
|
|
2558
|
+
}),
|
|
2559
|
+
z14.object({
|
|
2560
|
+
type: z14.literal("function_call"),
|
|
2561
|
+
id: z14.string(),
|
|
2562
|
+
call_id: z14.string(),
|
|
2563
|
+
name: z14.string(),
|
|
2564
|
+
arguments: z14.string(),
|
|
2565
|
+
status: z14.literal("completed")
|
|
2566
|
+
}),
|
|
2567
|
+
z14.object({
|
|
2568
|
+
type: z14.literal("code_interpreter_call"),
|
|
2569
|
+
id: z14.string(),
|
|
2570
|
+
code: z14.string().nullable(),
|
|
2571
|
+
container_id: z14.string(),
|
|
2572
|
+
outputs: z14.array(
|
|
2573
|
+
z14.discriminatedUnion("type", [
|
|
2574
|
+
z14.object({ type: z14.literal("logs"), logs: z14.string() }),
|
|
2575
|
+
z14.object({ type: z14.literal("image"), url: z14.string() })
|
|
2576
|
+
])
|
|
2577
|
+
).nullable()
|
|
2578
|
+
}),
|
|
2579
|
+
z14.object({
|
|
2580
|
+
type: z14.literal("image_generation_call"),
|
|
2581
|
+
id: z14.string(),
|
|
2582
|
+
result: z14.string()
|
|
2583
|
+
}),
|
|
2584
|
+
z14.object({
|
|
2585
|
+
type: z14.literal("web_search_call"),
|
|
2586
|
+
id: z14.string(),
|
|
2587
|
+
status: z14.string(),
|
|
2588
|
+
action: z14.discriminatedUnion("type", [
|
|
2589
|
+
z14.object({
|
|
2590
|
+
type: z14.literal("search"),
|
|
2591
|
+
query: z14.string().nullish()
|
|
2592
|
+
}),
|
|
2593
|
+
z14.object({
|
|
2594
|
+
type: z14.literal("open_page"),
|
|
2595
|
+
url: z14.string()
|
|
2596
|
+
}),
|
|
2597
|
+
z14.object({
|
|
2598
|
+
type: z14.literal("find"),
|
|
2599
|
+
url: z14.string(),
|
|
2600
|
+
pattern: z14.string()
|
|
2601
|
+
})
|
|
2602
|
+
]).nullish()
|
|
2603
|
+
}),
|
|
2604
|
+
z14.object({
|
|
2605
|
+
type: z14.literal("file_search_call"),
|
|
2606
|
+
id: z14.string(),
|
|
2607
|
+
queries: z14.array(z14.string()),
|
|
2608
|
+
results: z14.array(
|
|
2609
|
+
z14.object({
|
|
2610
|
+
attributes: z14.record(z14.string(), z14.unknown()),
|
|
2611
|
+
file_id: z14.string(),
|
|
2612
|
+
filename: z14.string(),
|
|
2613
|
+
score: z14.number(),
|
|
2614
|
+
text: z14.string()
|
|
2615
|
+
})
|
|
2616
|
+
).nullish()
|
|
2617
|
+
}),
|
|
2618
|
+
z14.object({
|
|
2619
|
+
type: z14.literal("local_shell_call"),
|
|
2620
|
+
id: z14.string(),
|
|
2621
|
+
call_id: z14.string(),
|
|
2622
|
+
action: z14.object({
|
|
2623
|
+
type: z14.literal("exec"),
|
|
2624
|
+
command: z14.array(z14.string()),
|
|
2625
|
+
timeout_ms: z14.number().optional(),
|
|
2626
|
+
user: z14.string().optional(),
|
|
2627
|
+
working_directory: z14.string().optional(),
|
|
2628
|
+
env: z14.record(z14.string(), z14.string()).optional()
|
|
2629
|
+
})
|
|
2630
|
+
}),
|
|
2631
|
+
z14.object({
|
|
2632
|
+
type: z14.literal("computer_call"),
|
|
2633
|
+
id: z14.string(),
|
|
2634
|
+
status: z14.literal("completed")
|
|
2635
|
+
})
|
|
2636
|
+
])
|
|
2637
|
+
}),
|
|
2638
|
+
z14.object({
|
|
2639
|
+
type: z14.literal("response.function_call_arguments.delta"),
|
|
2640
|
+
item_id: z14.string(),
|
|
2641
|
+
output_index: z14.number(),
|
|
2642
|
+
delta: z14.string()
|
|
2643
|
+
}),
|
|
2644
|
+
z14.object({
|
|
2645
|
+
type: z14.literal("response.image_generation_call.partial_image"),
|
|
2646
|
+
item_id: z14.string(),
|
|
2647
|
+
output_index: z14.number(),
|
|
2648
|
+
partial_image_b64: z14.string()
|
|
2649
|
+
}),
|
|
2650
|
+
z14.object({
|
|
2651
|
+
type: z14.literal("response.code_interpreter_call_code.delta"),
|
|
2652
|
+
item_id: z14.string(),
|
|
2653
|
+
output_index: z14.number(),
|
|
2654
|
+
delta: z14.string()
|
|
2655
|
+
}),
|
|
2656
|
+
z14.object({
|
|
2657
|
+
type: z14.literal("response.code_interpreter_call_code.done"),
|
|
2658
|
+
item_id: z14.string(),
|
|
2659
|
+
output_index: z14.number(),
|
|
2660
|
+
code: z14.string()
|
|
2661
|
+
}),
|
|
2662
|
+
z14.object({
|
|
2663
|
+
type: z14.literal("response.output_text.annotation.added"),
|
|
2664
|
+
annotation: z14.discriminatedUnion("type", [
|
|
2665
|
+
z14.object({
|
|
2666
|
+
type: z14.literal("url_citation"),
|
|
2667
|
+
url: z14.string(),
|
|
2668
|
+
title: z14.string()
|
|
2669
|
+
}),
|
|
2670
|
+
z14.object({
|
|
2671
|
+
type: z14.literal("file_citation"),
|
|
2672
|
+
file_id: z14.string(),
|
|
2673
|
+
filename: z14.string().nullish(),
|
|
2674
|
+
index: z14.number().nullish(),
|
|
2675
|
+
start_index: z14.number().nullish(),
|
|
2676
|
+
end_index: z14.number().nullish(),
|
|
2677
|
+
quote: z14.string().nullish()
|
|
2678
|
+
})
|
|
2679
|
+
])
|
|
2680
|
+
}),
|
|
2681
|
+
z14.object({
|
|
2682
|
+
type: z14.literal("response.reasoning_summary_part.added"),
|
|
2683
|
+
item_id: z14.string(),
|
|
2684
|
+
summary_index: z14.number()
|
|
2685
|
+
}),
|
|
2686
|
+
z14.object({
|
|
2687
|
+
type: z14.literal("response.reasoning_summary_text.delta"),
|
|
2688
|
+
item_id: z14.string(),
|
|
2689
|
+
summary_index: z14.number(),
|
|
2690
|
+
delta: z14.string()
|
|
2691
|
+
}),
|
|
2692
|
+
z14.object({
|
|
2693
|
+
type: z14.literal("error"),
|
|
2694
|
+
code: z14.string(),
|
|
2695
|
+
message: z14.string(),
|
|
2696
|
+
param: z14.string().nullish(),
|
|
2697
|
+
sequence_number: z14.number()
|
|
2698
|
+
}),
|
|
2699
|
+
z14.object({ type: z14.string() }).loose().transform((value) => ({
|
|
2700
|
+
type: "unknown_chunk",
|
|
2701
|
+
message: value.type
|
|
2702
|
+
}))
|
|
2703
|
+
// fallback for unknown chunks
|
|
2704
|
+
])
|
|
2705
|
+
)
|
|
2706
|
+
);
|
|
2707
|
+
var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
|
|
2708
|
+
() => (0, import_provider_utils21.zodSchema)(
|
|
2709
|
+
z14.object({
|
|
2710
|
+
id: z14.string(),
|
|
2711
|
+
created_at: z14.number(),
|
|
2712
|
+
error: z14.object({
|
|
2713
|
+
code: z14.string(),
|
|
2714
|
+
message: z14.string()
|
|
2715
|
+
}).nullish(),
|
|
2716
|
+
model: z14.string(),
|
|
2717
|
+
output: z14.array(
|
|
2718
|
+
z14.discriminatedUnion("type", [
|
|
2719
|
+
z14.object({
|
|
2720
|
+
type: z14.literal("message"),
|
|
2721
|
+
role: z14.literal("assistant"),
|
|
2722
|
+
id: z14.string(),
|
|
2723
|
+
content: z14.array(
|
|
2724
|
+
z14.object({
|
|
2725
|
+
type: z14.literal("output_text"),
|
|
2726
|
+
text: z14.string(),
|
|
2727
|
+
logprobs: z14.array(
|
|
2728
|
+
z14.object({
|
|
2729
|
+
token: z14.string(),
|
|
2730
|
+
logprob: z14.number(),
|
|
2731
|
+
top_logprobs: z14.array(
|
|
2732
|
+
z14.object({
|
|
2733
|
+
token: z14.string(),
|
|
2734
|
+
logprob: z14.number()
|
|
2735
|
+
})
|
|
2736
|
+
)
|
|
2737
|
+
})
|
|
2738
|
+
).nullish(),
|
|
2739
|
+
annotations: z14.array(
|
|
2740
|
+
z14.discriminatedUnion("type", [
|
|
2741
|
+
z14.object({
|
|
2742
|
+
type: z14.literal("url_citation"),
|
|
2743
|
+
start_index: z14.number(),
|
|
2744
|
+
end_index: z14.number(),
|
|
2745
|
+
url: z14.string(),
|
|
2746
|
+
title: z14.string()
|
|
2747
|
+
}),
|
|
2748
|
+
z14.object({
|
|
2749
|
+
type: z14.literal("file_citation"),
|
|
2750
|
+
file_id: z14.string(),
|
|
2751
|
+
filename: z14.string().nullish(),
|
|
2752
|
+
index: z14.number().nullish(),
|
|
2753
|
+
start_index: z14.number().nullish(),
|
|
2754
|
+
end_index: z14.number().nullish(),
|
|
2755
|
+
quote: z14.string().nullish()
|
|
2756
|
+
}),
|
|
2757
|
+
z14.object({
|
|
2758
|
+
type: z14.literal("container_file_citation")
|
|
2759
|
+
})
|
|
2760
|
+
])
|
|
2761
|
+
)
|
|
2762
|
+
})
|
|
2763
|
+
)
|
|
2764
|
+
}),
|
|
2765
|
+
z14.object({
|
|
2766
|
+
type: z14.literal("web_search_call"),
|
|
2767
|
+
id: z14.string(),
|
|
2768
|
+
status: z14.string(),
|
|
2769
|
+
action: z14.discriminatedUnion("type", [
|
|
2770
|
+
z14.object({
|
|
2771
|
+
type: z14.literal("search"),
|
|
2772
|
+
query: z14.string().nullish()
|
|
2773
|
+
}),
|
|
2774
|
+
z14.object({
|
|
2775
|
+
type: z14.literal("open_page"),
|
|
2776
|
+
url: z14.string()
|
|
2777
|
+
}),
|
|
2778
|
+
z14.object({
|
|
2779
|
+
type: z14.literal("find"),
|
|
2780
|
+
url: z14.string(),
|
|
2781
|
+
pattern: z14.string()
|
|
2782
|
+
})
|
|
2783
|
+
]).nullish()
|
|
2784
|
+
}),
|
|
2785
|
+
z14.object({
|
|
2786
|
+
type: z14.literal("file_search_call"),
|
|
2787
|
+
id: z14.string(),
|
|
2788
|
+
queries: z14.array(z14.string()),
|
|
2789
|
+
results: z14.array(
|
|
2790
|
+
z14.object({
|
|
2791
|
+
attributes: z14.record(z14.string(), z14.unknown()),
|
|
2792
|
+
file_id: z14.string(),
|
|
2793
|
+
filename: z14.string(),
|
|
2794
|
+
score: z14.number(),
|
|
2795
|
+
text: z14.string()
|
|
2796
|
+
})
|
|
2797
|
+
).nullish()
|
|
2798
|
+
}),
|
|
2799
|
+
z14.object({
|
|
2800
|
+
type: z14.literal("code_interpreter_call"),
|
|
2801
|
+
id: z14.string(),
|
|
2802
|
+
code: z14.string().nullable(),
|
|
2803
|
+
container_id: z14.string(),
|
|
2804
|
+
outputs: z14.array(
|
|
2805
|
+
z14.discriminatedUnion("type", [
|
|
2806
|
+
z14.object({ type: z14.literal("logs"), logs: z14.string() }),
|
|
2807
|
+
z14.object({ type: z14.literal("image"), url: z14.string() })
|
|
2808
|
+
])
|
|
2809
|
+
).nullable()
|
|
2810
|
+
}),
|
|
2811
|
+
z14.object({
|
|
2812
|
+
type: z14.literal("image_generation_call"),
|
|
2813
|
+
id: z14.string(),
|
|
2814
|
+
result: z14.string()
|
|
2815
|
+
}),
|
|
2816
|
+
z14.object({
|
|
2817
|
+
type: z14.literal("local_shell_call"),
|
|
2818
|
+
id: z14.string(),
|
|
2819
|
+
call_id: z14.string(),
|
|
2820
|
+
action: z14.object({
|
|
2821
|
+
type: z14.literal("exec"),
|
|
2822
|
+
command: z14.array(z14.string()),
|
|
2823
|
+
timeout_ms: z14.number().optional(),
|
|
2824
|
+
user: z14.string().optional(),
|
|
2825
|
+
working_directory: z14.string().optional(),
|
|
2826
|
+
env: z14.record(z14.string(), z14.string()).optional()
|
|
2827
|
+
})
|
|
2828
|
+
}),
|
|
2829
|
+
z14.object({
|
|
2830
|
+
type: z14.literal("function_call"),
|
|
2831
|
+
call_id: z14.string(),
|
|
2832
|
+
name: z14.string(),
|
|
2833
|
+
arguments: z14.string(),
|
|
2834
|
+
id: z14.string()
|
|
2835
|
+
}),
|
|
2836
|
+
z14.object({
|
|
2837
|
+
type: z14.literal("computer_call"),
|
|
2838
|
+
id: z14.string(),
|
|
2839
|
+
status: z14.string().optional()
|
|
2840
|
+
}),
|
|
2841
|
+
z14.object({
|
|
2842
|
+
type: z14.literal("reasoning"),
|
|
2843
|
+
id: z14.string(),
|
|
2844
|
+
encrypted_content: z14.string().nullish(),
|
|
2845
|
+
summary: z14.array(
|
|
2846
|
+
z14.object({
|
|
2847
|
+
type: z14.literal("summary_text"),
|
|
2848
|
+
text: z14.string()
|
|
2849
|
+
})
|
|
2850
|
+
)
|
|
2851
|
+
})
|
|
2852
|
+
])
|
|
2853
|
+
),
|
|
2854
|
+
service_tier: z14.string().nullish(),
|
|
2855
|
+
incomplete_details: z14.object({ reason: z14.string() }).nullish(),
|
|
2856
|
+
usage: z14.object({
|
|
2857
|
+
input_tokens: z14.number(),
|
|
2858
|
+
input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
|
|
2859
|
+
output_tokens: z14.number(),
|
|
2860
|
+
output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
|
|
2861
|
+
})
|
|
2862
|
+
})
|
|
2863
|
+
)
|
|
2864
|
+
);
|
|
2865
|
+
|
|
2866
|
+
// src/responses/openai-responses-options.ts
|
|
2867
|
+
var import_provider_utils22 = require("@ai-sdk/provider-utils");
|
|
2868
|
+
var z15 = __toESM(require("zod/v4"));
|
|
2869
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2870
|
+
var openaiResponsesReasoningModelIds = [
|
|
2871
|
+
"o1",
|
|
2872
|
+
"o1-2024-12-17",
|
|
2873
|
+
"o3-mini",
|
|
2874
|
+
"o3-mini-2025-01-31",
|
|
2875
|
+
"o3",
|
|
2876
|
+
"o3-2025-04-16",
|
|
2877
|
+
"o4-mini",
|
|
2878
|
+
"o4-mini-2025-04-16",
|
|
2879
|
+
"codex-mini-latest",
|
|
2880
|
+
"computer-use-preview",
|
|
2881
|
+
"gpt-5",
|
|
2882
|
+
"gpt-5-2025-08-07",
|
|
2883
|
+
"gpt-5-codex",
|
|
2884
|
+
"gpt-5-mini",
|
|
2885
|
+
"gpt-5-mini-2025-08-07",
|
|
2886
|
+
"gpt-5-nano",
|
|
2887
|
+
"gpt-5-nano-2025-08-07",
|
|
2888
|
+
"gpt-5-pro",
|
|
2889
|
+
"gpt-5-pro-2025-10-06"
|
|
2890
|
+
];
|
|
2891
|
+
var openaiResponsesModelIds = [
|
|
2892
|
+
"gpt-4.1",
|
|
2893
|
+
"gpt-4.1-2025-04-14",
|
|
2894
|
+
"gpt-4.1-mini",
|
|
2895
|
+
"gpt-4.1-mini-2025-04-14",
|
|
2896
|
+
"gpt-4.1-nano",
|
|
2897
|
+
"gpt-4.1-nano-2025-04-14",
|
|
2898
|
+
"gpt-4o",
|
|
2899
|
+
"gpt-4o-2024-05-13",
|
|
2900
|
+
"gpt-4o-2024-08-06",
|
|
2901
|
+
"gpt-4o-2024-11-20",
|
|
2902
|
+
"gpt-4o-audio-preview",
|
|
2903
|
+
"gpt-4o-audio-preview-2024-10-01",
|
|
2904
|
+
"gpt-4o-audio-preview-2024-12-17",
|
|
2905
|
+
"gpt-4o-search-preview",
|
|
2906
|
+
"gpt-4o-search-preview-2025-03-11",
|
|
2907
|
+
"gpt-4o-mini-search-preview",
|
|
2908
|
+
"gpt-4o-mini-search-preview-2025-03-11",
|
|
2909
|
+
"gpt-4o-mini",
|
|
2910
|
+
"gpt-4o-mini-2024-07-18",
|
|
2911
|
+
"gpt-4-turbo",
|
|
2912
|
+
"gpt-4-turbo-2024-04-09",
|
|
2913
|
+
"gpt-4-turbo-preview",
|
|
2914
|
+
"gpt-4-0125-preview",
|
|
2915
|
+
"gpt-4-1106-preview",
|
|
2916
|
+
"gpt-4",
|
|
2917
|
+
"gpt-4-0613",
|
|
2918
|
+
"gpt-4.5-preview",
|
|
2919
|
+
"gpt-4.5-preview-2025-02-27",
|
|
2920
|
+
"gpt-3.5-turbo-0125",
|
|
2921
|
+
"gpt-3.5-turbo",
|
|
2922
|
+
"gpt-3.5-turbo-1106",
|
|
2923
|
+
"chatgpt-4o-latest",
|
|
2924
|
+
"gpt-5-chat-latest",
|
|
2925
|
+
...openaiResponsesReasoningModelIds
|
|
2926
|
+
];
|
|
2927
|
+
var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValidator)(
|
|
2928
|
+
() => (0, import_provider_utils22.zodSchema)(
|
|
2929
|
+
z15.object({
|
|
2930
|
+
include: z15.array(
|
|
2931
|
+
z15.enum([
|
|
2932
|
+
"reasoning.encrypted_content",
|
|
2933
|
+
"file_search_call.results",
|
|
2934
|
+
"message.output_text.logprobs"
|
|
2935
|
+
])
|
|
2936
|
+
).nullish(),
|
|
2937
|
+
instructions: z15.string().nullish(),
|
|
2938
|
+
/**
|
|
2939
|
+
* Return the log probabilities of the tokens.
|
|
2940
|
+
*
|
|
2941
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
2942
|
+
* were generated.
|
|
2943
|
+
*
|
|
2944
|
+
* Setting to a number will return the log probabilities of the top n
|
|
2945
|
+
* tokens that were generated.
|
|
2946
|
+
*
|
|
2947
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
2948
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
2949
|
+
*/
|
|
2950
|
+
logprobs: z15.union([z15.boolean(), z15.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
2951
|
+
/**
|
|
2952
|
+
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
2953
|
+
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
2954
|
+
* Any further attempts to call a tool by the model will be ignored.
|
|
2955
|
+
*/
|
|
2956
|
+
maxToolCalls: z15.number().nullish(),
|
|
2957
|
+
metadata: z15.any().nullish(),
|
|
2958
|
+
parallelToolCalls: z15.boolean().nullish(),
|
|
2959
|
+
previousResponseId: z15.string().nullish(),
|
|
2960
|
+
promptCacheKey: z15.string().nullish(),
|
|
2961
|
+
reasoningEffort: z15.string().nullish(),
|
|
2962
|
+
reasoningSummary: z15.string().nullish(),
|
|
2963
|
+
safetyIdentifier: z15.string().nullish(),
|
|
2964
|
+
serviceTier: z15.enum(["auto", "flex", "priority"]).nullish(),
|
|
2965
|
+
store: z15.boolean().nullish(),
|
|
2966
|
+
strictJsonSchema: z15.boolean().nullish(),
|
|
2967
|
+
textVerbosity: z15.enum(["low", "medium", "high"]).nullish(),
|
|
2968
|
+
user: z15.string().nullish()
|
|
2969
|
+
})
|
|
2970
|
+
)
|
|
2971
|
+
);
|
|
2972
|
+
|
|
2320
2973
|
// src/responses/openai-responses-prepare-tools.ts
|
|
2321
2974
|
var import_provider7 = require("@ai-sdk/provider");
|
|
2322
2975
|
|
|
2323
2976
|
// src/tool/code-interpreter.ts
|
|
2324
|
-
var
|
|
2325
|
-
var
|
|
2326
|
-
var codeInterpreterInputSchema =
|
|
2327
|
-
|
|
2328
|
-
|
|
2329
|
-
|
|
2330
|
-
|
|
2331
|
-
outputs: import_v414.z.array(
|
|
2332
|
-
import_v414.z.discriminatedUnion("type", [
|
|
2333
|
-
import_v414.z.object({ type: import_v414.z.literal("logs"), logs: import_v414.z.string() }),
|
|
2334
|
-
import_v414.z.object({ type: import_v414.z.literal("image"), url: import_v414.z.string() })
|
|
2335
|
-
])
|
|
2336
|
-
).nullish()
|
|
2337
|
-
});
|
|
2338
|
-
var codeInterpreterArgsSchema = import_v414.z.object({
|
|
2339
|
-
container: import_v414.z.union([
|
|
2340
|
-
import_v414.z.string(),
|
|
2341
|
-
import_v414.z.object({
|
|
2342
|
-
fileIds: import_v414.z.array(import_v414.z.string()).optional()
|
|
2977
|
+
var import_provider_utils23 = require("@ai-sdk/provider-utils");
|
|
2978
|
+
var z16 = __toESM(require("zod/v4"));
|
|
2979
|
+
var codeInterpreterInputSchema = (0, import_provider_utils23.lazySchema)(
|
|
2980
|
+
() => (0, import_provider_utils23.zodSchema)(
|
|
2981
|
+
z16.object({
|
|
2982
|
+
code: z16.string().nullish(),
|
|
2983
|
+
containerId: z16.string()
|
|
2343
2984
|
})
|
|
2344
|
-
|
|
2345
|
-
|
|
2346
|
-
var
|
|
2985
|
+
)
|
|
2986
|
+
);
|
|
2987
|
+
var codeInterpreterOutputSchema = (0, import_provider_utils23.lazySchema)(
|
|
2988
|
+
() => (0, import_provider_utils23.zodSchema)(
|
|
2989
|
+
z16.object({
|
|
2990
|
+
outputs: z16.array(
|
|
2991
|
+
z16.discriminatedUnion("type", [
|
|
2992
|
+
z16.object({ type: z16.literal("logs"), logs: z16.string() }),
|
|
2993
|
+
z16.object({ type: z16.literal("image"), url: z16.string() })
|
|
2994
|
+
])
|
|
2995
|
+
).nullish()
|
|
2996
|
+
})
|
|
2997
|
+
)
|
|
2998
|
+
);
|
|
2999
|
+
var codeInterpreterArgsSchema = (0, import_provider_utils23.lazySchema)(
|
|
3000
|
+
() => (0, import_provider_utils23.zodSchema)(
|
|
3001
|
+
z16.object({
|
|
3002
|
+
container: z16.union([
|
|
3003
|
+
z16.string(),
|
|
3004
|
+
z16.object({
|
|
3005
|
+
fileIds: z16.array(z16.string()).optional()
|
|
3006
|
+
})
|
|
3007
|
+
]).optional()
|
|
3008
|
+
})
|
|
3009
|
+
)
|
|
3010
|
+
);
|
|
3011
|
+
var codeInterpreterToolFactory = (0, import_provider_utils23.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2347
3012
|
id: "openai.code_interpreter",
|
|
2348
3013
|
name: "code_interpreter",
|
|
2349
3014
|
inputSchema: codeInterpreterInputSchema,
|
|
@@ -2354,173 +3019,209 @@ var codeInterpreter = (args = {}) => {
|
|
|
2354
3019
|
};
|
|
2355
3020
|
|
|
2356
3021
|
// src/tool/file-search.ts
|
|
2357
|
-
var
|
|
2358
|
-
var
|
|
2359
|
-
var comparisonFilterSchema =
|
|
2360
|
-
key:
|
|
2361
|
-
type:
|
|
2362
|
-
value:
|
|
3022
|
+
var import_provider_utils24 = require("@ai-sdk/provider-utils");
|
|
3023
|
+
var z17 = __toESM(require("zod/v4"));
|
|
3024
|
+
var comparisonFilterSchema = z17.object({
|
|
3025
|
+
key: z17.string(),
|
|
3026
|
+
type: z17.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
|
|
3027
|
+
value: z17.union([z17.string(), z17.number(), z17.boolean()])
|
|
2363
3028
|
});
|
|
2364
|
-
var compoundFilterSchema =
|
|
2365
|
-
type:
|
|
2366
|
-
filters:
|
|
2367
|
-
|
|
3029
|
+
var compoundFilterSchema = z17.object({
|
|
3030
|
+
type: z17.enum(["and", "or"]),
|
|
3031
|
+
filters: z17.array(
|
|
3032
|
+
z17.union([comparisonFilterSchema, z17.lazy(() => compoundFilterSchema)])
|
|
2368
3033
|
)
|
|
2369
3034
|
});
|
|
2370
|
-
var fileSearchArgsSchema =
|
|
2371
|
-
|
|
2372
|
-
|
|
2373
|
-
|
|
2374
|
-
|
|
2375
|
-
|
|
2376
|
-
|
|
2377
|
-
|
|
2378
|
-
})
|
|
2379
|
-
|
|
2380
|
-
queries: import_v415.z.array(import_v415.z.string()),
|
|
2381
|
-
results: import_v415.z.array(
|
|
2382
|
-
import_v415.z.object({
|
|
2383
|
-
attributes: import_v415.z.record(import_v415.z.string(), import_v415.z.unknown()),
|
|
2384
|
-
fileId: import_v415.z.string(),
|
|
2385
|
-
filename: import_v415.z.string(),
|
|
2386
|
-
score: import_v415.z.number(),
|
|
2387
|
-
text: import_v415.z.string()
|
|
3035
|
+
var fileSearchArgsSchema = (0, import_provider_utils24.lazySchema)(
|
|
3036
|
+
() => (0, import_provider_utils24.zodSchema)(
|
|
3037
|
+
z17.object({
|
|
3038
|
+
vectorStoreIds: z17.array(z17.string()),
|
|
3039
|
+
maxNumResults: z17.number().optional(),
|
|
3040
|
+
ranking: z17.object({
|
|
3041
|
+
ranker: z17.string().optional(),
|
|
3042
|
+
scoreThreshold: z17.number().optional()
|
|
3043
|
+
}).optional(),
|
|
3044
|
+
filters: z17.union([comparisonFilterSchema, compoundFilterSchema]).optional()
|
|
2388
3045
|
})
|
|
2389
|
-
)
|
|
2390
|
-
|
|
2391
|
-
var
|
|
3046
|
+
)
|
|
3047
|
+
);
|
|
3048
|
+
var fileSearchOutputSchema = (0, import_provider_utils24.lazySchema)(
|
|
3049
|
+
() => (0, import_provider_utils24.zodSchema)(
|
|
3050
|
+
z17.object({
|
|
3051
|
+
queries: z17.array(z17.string()),
|
|
3052
|
+
results: z17.array(
|
|
3053
|
+
z17.object({
|
|
3054
|
+
attributes: z17.record(z17.string(), z17.unknown()),
|
|
3055
|
+
fileId: z17.string(),
|
|
3056
|
+
filename: z17.string(),
|
|
3057
|
+
score: z17.number(),
|
|
3058
|
+
text: z17.string()
|
|
3059
|
+
})
|
|
3060
|
+
).nullable()
|
|
3061
|
+
})
|
|
3062
|
+
)
|
|
3063
|
+
);
|
|
3064
|
+
var fileSearch = (0, import_provider_utils24.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2392
3065
|
id: "openai.file_search",
|
|
2393
3066
|
name: "file_search",
|
|
2394
|
-
inputSchema:
|
|
3067
|
+
inputSchema: z17.object({}),
|
|
2395
3068
|
outputSchema: fileSearchOutputSchema
|
|
2396
3069
|
});
|
|
2397
3070
|
|
|
2398
3071
|
// src/tool/web-search.ts
|
|
2399
|
-
var
|
|
2400
|
-
var
|
|
2401
|
-
var webSearchArgsSchema =
|
|
2402
|
-
|
|
2403
|
-
|
|
2404
|
-
|
|
2405
|
-
|
|
2406
|
-
|
|
2407
|
-
|
|
2408
|
-
|
|
2409
|
-
|
|
2410
|
-
|
|
2411
|
-
|
|
2412
|
-
|
|
2413
|
-
|
|
2414
|
-
|
|
3072
|
+
var import_provider_utils25 = require("@ai-sdk/provider-utils");
|
|
3073
|
+
var z18 = __toESM(require("zod/v4"));
|
|
3074
|
+
var webSearchArgsSchema = (0, import_provider_utils25.lazySchema)(
|
|
3075
|
+
() => (0, import_provider_utils25.zodSchema)(
|
|
3076
|
+
z18.object({
|
|
3077
|
+
filters: z18.object({
|
|
3078
|
+
allowedDomains: z18.array(z18.string()).optional()
|
|
3079
|
+
}).optional(),
|
|
3080
|
+
searchContextSize: z18.enum(["low", "medium", "high"]).optional(),
|
|
3081
|
+
userLocation: z18.object({
|
|
3082
|
+
type: z18.literal("approximate"),
|
|
3083
|
+
country: z18.string().optional(),
|
|
3084
|
+
city: z18.string().optional(),
|
|
3085
|
+
region: z18.string().optional(),
|
|
3086
|
+
timezone: z18.string().optional()
|
|
3087
|
+
}).optional()
|
|
3088
|
+
})
|
|
3089
|
+
)
|
|
3090
|
+
);
|
|
3091
|
+
var webSearchInputSchema = (0, import_provider_utils25.lazySchema)(
|
|
3092
|
+
() => (0, import_provider_utils25.zodSchema)(
|
|
3093
|
+
z18.object({
|
|
3094
|
+
action: z18.discriminatedUnion("type", [
|
|
3095
|
+
z18.object({
|
|
3096
|
+
type: z18.literal("search"),
|
|
3097
|
+
query: z18.string().nullish()
|
|
3098
|
+
}),
|
|
3099
|
+
z18.object({
|
|
3100
|
+
type: z18.literal("open_page"),
|
|
3101
|
+
url: z18.string()
|
|
3102
|
+
}),
|
|
3103
|
+
z18.object({
|
|
3104
|
+
type: z18.literal("find"),
|
|
3105
|
+
url: z18.string(),
|
|
3106
|
+
pattern: z18.string()
|
|
3107
|
+
})
|
|
3108
|
+
]).nullish()
|
|
3109
|
+
})
|
|
3110
|
+
)
|
|
3111
|
+
);
|
|
3112
|
+
var webSearchToolFactory = (0, import_provider_utils25.createProviderDefinedToolFactory)({
|
|
2415
3113
|
id: "openai.web_search",
|
|
2416
3114
|
name: "web_search",
|
|
2417
|
-
inputSchema:
|
|
2418
|
-
action: import_v416.z.discriminatedUnion("type", [
|
|
2419
|
-
import_v416.z.object({
|
|
2420
|
-
type: import_v416.z.literal("search"),
|
|
2421
|
-
query: import_v416.z.string().nullish()
|
|
2422
|
-
}),
|
|
2423
|
-
import_v416.z.object({
|
|
2424
|
-
type: import_v416.z.literal("open_page"),
|
|
2425
|
-
url: import_v416.z.string()
|
|
2426
|
-
}),
|
|
2427
|
-
import_v416.z.object({
|
|
2428
|
-
type: import_v416.z.literal("find"),
|
|
2429
|
-
url: import_v416.z.string(),
|
|
2430
|
-
pattern: import_v416.z.string()
|
|
2431
|
-
})
|
|
2432
|
-
]).nullish()
|
|
2433
|
-
})
|
|
3115
|
+
inputSchema: webSearchInputSchema
|
|
2434
3116
|
});
|
|
2435
3117
|
|
|
2436
3118
|
// src/tool/web-search-preview.ts
|
|
2437
|
-
var
|
|
2438
|
-
var
|
|
2439
|
-
var webSearchPreviewArgsSchema =
|
|
2440
|
-
|
|
2441
|
-
|
|
2442
|
-
|
|
2443
|
-
|
|
2444
|
-
|
|
2445
|
-
|
|
2446
|
-
|
|
2447
|
-
|
|
2448
|
-
|
|
2449
|
-
|
|
2450
|
-
|
|
2451
|
-
|
|
2452
|
-
|
|
2453
|
-
|
|
2454
|
-
|
|
2455
|
-
|
|
2456
|
-
|
|
2457
|
-
|
|
2458
|
-
|
|
2459
|
-
|
|
2460
|
-
|
|
2461
|
-
|
|
2462
|
-
|
|
2463
|
-
|
|
2464
|
-
|
|
2465
|
-
|
|
2466
|
-
|
|
2467
|
-
|
|
2468
|
-
|
|
2469
|
-
|
|
2470
|
-
|
|
2471
|
-
|
|
2472
|
-
|
|
2473
|
-
|
|
3119
|
+
var import_provider_utils26 = require("@ai-sdk/provider-utils");
|
|
3120
|
+
var z19 = __toESM(require("zod/v4"));
|
|
3121
|
+
var webSearchPreviewArgsSchema = (0, import_provider_utils26.lazySchema)(
|
|
3122
|
+
() => (0, import_provider_utils26.zodSchema)(
|
|
3123
|
+
z19.object({
|
|
3124
|
+
/**
|
|
3125
|
+
* Search context size to use for the web search.
|
|
3126
|
+
* - high: Most comprehensive context, highest cost, slower response
|
|
3127
|
+
* - medium: Balanced context, cost, and latency (default)
|
|
3128
|
+
* - low: Least context, lowest cost, fastest response
|
|
3129
|
+
*/
|
|
3130
|
+
searchContextSize: z19.enum(["low", "medium", "high"]).optional(),
|
|
3131
|
+
/**
|
|
3132
|
+
* User location information to provide geographically relevant search results.
|
|
3133
|
+
*/
|
|
3134
|
+
userLocation: z19.object({
|
|
3135
|
+
/**
|
|
3136
|
+
* Type of location (always 'approximate')
|
|
3137
|
+
*/
|
|
3138
|
+
type: z19.literal("approximate"),
|
|
3139
|
+
/**
|
|
3140
|
+
* Two-letter ISO country code (e.g., 'US', 'GB')
|
|
3141
|
+
*/
|
|
3142
|
+
country: z19.string().optional(),
|
|
3143
|
+
/**
|
|
3144
|
+
* City name (free text, e.g., 'Minneapolis')
|
|
3145
|
+
*/
|
|
3146
|
+
city: z19.string().optional(),
|
|
3147
|
+
/**
|
|
3148
|
+
* Region name (free text, e.g., 'Minnesota')
|
|
3149
|
+
*/
|
|
3150
|
+
region: z19.string().optional(),
|
|
3151
|
+
/**
|
|
3152
|
+
* IANA timezone (e.g., 'America/Chicago')
|
|
3153
|
+
*/
|
|
3154
|
+
timezone: z19.string().optional()
|
|
3155
|
+
}).optional()
|
|
3156
|
+
})
|
|
3157
|
+
)
|
|
3158
|
+
);
|
|
3159
|
+
var webSearchPreviewInputSchema = (0, import_provider_utils26.lazySchema)(
|
|
3160
|
+
() => (0, import_provider_utils26.zodSchema)(
|
|
3161
|
+
z19.object({
|
|
3162
|
+
action: z19.discriminatedUnion("type", [
|
|
3163
|
+
z19.object({
|
|
3164
|
+
type: z19.literal("search"),
|
|
3165
|
+
query: z19.string().nullish()
|
|
3166
|
+
}),
|
|
3167
|
+
z19.object({
|
|
3168
|
+
type: z19.literal("open_page"),
|
|
3169
|
+
url: z19.string()
|
|
3170
|
+
}),
|
|
3171
|
+
z19.object({
|
|
3172
|
+
type: z19.literal("find"),
|
|
3173
|
+
url: z19.string(),
|
|
3174
|
+
pattern: z19.string()
|
|
3175
|
+
})
|
|
3176
|
+
]).nullish()
|
|
3177
|
+
})
|
|
3178
|
+
)
|
|
3179
|
+
);
|
|
3180
|
+
var webSearchPreview = (0, import_provider_utils26.createProviderDefinedToolFactory)({
|
|
2474
3181
|
id: "openai.web_search_preview",
|
|
2475
3182
|
name: "web_search_preview",
|
|
2476
|
-
inputSchema:
|
|
2477
|
-
action: import_v417.z.discriminatedUnion("type", [
|
|
2478
|
-
import_v417.z.object({
|
|
2479
|
-
type: import_v417.z.literal("search"),
|
|
2480
|
-
query: import_v417.z.string().nullish()
|
|
2481
|
-
}),
|
|
2482
|
-
import_v417.z.object({
|
|
2483
|
-
type: import_v417.z.literal("open_page"),
|
|
2484
|
-
url: import_v417.z.string()
|
|
2485
|
-
}),
|
|
2486
|
-
import_v417.z.object({
|
|
2487
|
-
type: import_v417.z.literal("find"),
|
|
2488
|
-
url: import_v417.z.string(),
|
|
2489
|
-
pattern: import_v417.z.string()
|
|
2490
|
-
})
|
|
2491
|
-
]).nullish()
|
|
2492
|
-
})
|
|
3183
|
+
inputSchema: webSearchPreviewInputSchema
|
|
2493
3184
|
});
|
|
2494
3185
|
|
|
2495
3186
|
// src/tool/image-generation.ts
|
|
2496
|
-
var
|
|
2497
|
-
var
|
|
2498
|
-
var imageGenerationArgsSchema =
|
|
2499
|
-
|
|
2500
|
-
|
|
2501
|
-
|
|
2502
|
-
|
|
2503
|
-
|
|
2504
|
-
|
|
2505
|
-
|
|
2506
|
-
|
|
2507
|
-
|
|
2508
|
-
|
|
2509
|
-
|
|
2510
|
-
|
|
2511
|
-
|
|
2512
|
-
|
|
2513
|
-
|
|
2514
|
-
})
|
|
2515
|
-
|
|
3187
|
+
var import_provider_utils27 = require("@ai-sdk/provider-utils");
|
|
3188
|
+
var z20 = __toESM(require("zod/v4"));
|
|
3189
|
+
var imageGenerationArgsSchema = (0, import_provider_utils27.lazySchema)(
|
|
3190
|
+
() => (0, import_provider_utils27.zodSchema)(
|
|
3191
|
+
z20.object({
|
|
3192
|
+
background: z20.enum(["auto", "opaque", "transparent"]).optional(),
|
|
3193
|
+
inputFidelity: z20.enum(["low", "high"]).optional(),
|
|
3194
|
+
inputImageMask: z20.object({
|
|
3195
|
+
fileId: z20.string().optional(),
|
|
3196
|
+
imageUrl: z20.string().optional()
|
|
3197
|
+
}).optional(),
|
|
3198
|
+
model: z20.string().optional(),
|
|
3199
|
+
moderation: z20.enum(["auto"]).optional(),
|
|
3200
|
+
outputCompression: z20.number().int().min(0).max(100).optional(),
|
|
3201
|
+
outputFormat: z20.enum(["png", "jpeg", "webp"]).optional(),
|
|
3202
|
+
partialImages: z20.number().int().min(0).max(3).optional(),
|
|
3203
|
+
quality: z20.enum(["auto", "low", "medium", "high"]).optional(),
|
|
3204
|
+
size: z20.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
|
|
3205
|
+
}).strict()
|
|
3206
|
+
)
|
|
3207
|
+
);
|
|
3208
|
+
var imageGenerationInputSchema = (0, import_provider_utils27.lazySchema)(() => (0, import_provider_utils27.zodSchema)(z20.object({})));
|
|
3209
|
+
var imageGenerationOutputSchema = (0, import_provider_utils27.lazySchema)(
|
|
3210
|
+
() => (0, import_provider_utils27.zodSchema)(z20.object({ result: z20.string() }))
|
|
3211
|
+
);
|
|
3212
|
+
var imageGenerationToolFactory = (0, import_provider_utils27.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2516
3213
|
id: "openai.image_generation",
|
|
2517
3214
|
name: "image_generation",
|
|
2518
|
-
inputSchema:
|
|
3215
|
+
inputSchema: imageGenerationInputSchema,
|
|
2519
3216
|
outputSchema: imageGenerationOutputSchema
|
|
2520
3217
|
});
|
|
3218
|
+
var imageGeneration = (args = {}) => {
|
|
3219
|
+
return imageGenerationToolFactory(args);
|
|
3220
|
+
};
|
|
2521
3221
|
|
|
2522
3222
|
// src/responses/openai-responses-prepare-tools.ts
|
|
2523
|
-
|
|
3223
|
+
var import_provider_utils28 = require("@ai-sdk/provider-utils");
|
|
3224
|
+
async function prepareResponsesTools({
|
|
2524
3225
|
tools,
|
|
2525
3226
|
toolChoice,
|
|
2526
3227
|
strictJsonSchema
|
|
@@ -2545,7 +3246,10 @@ function prepareResponsesTools({
|
|
|
2545
3246
|
case "provider-defined": {
|
|
2546
3247
|
switch (tool.id) {
|
|
2547
3248
|
case "openai.file_search": {
|
|
2548
|
-
const args =
|
|
3249
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3250
|
+
value: tool.args,
|
|
3251
|
+
schema: fileSearchArgsSchema
|
|
3252
|
+
});
|
|
2549
3253
|
openaiTools.push({
|
|
2550
3254
|
type: "file_search",
|
|
2551
3255
|
vector_store_ids: args.vectorStoreIds,
|
|
@@ -2565,7 +3269,10 @@ function prepareResponsesTools({
|
|
|
2565
3269
|
break;
|
|
2566
3270
|
}
|
|
2567
3271
|
case "openai.web_search_preview": {
|
|
2568
|
-
const args =
|
|
3272
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3273
|
+
value: tool.args,
|
|
3274
|
+
schema: webSearchPreviewArgsSchema
|
|
3275
|
+
});
|
|
2569
3276
|
openaiTools.push({
|
|
2570
3277
|
type: "web_search_preview",
|
|
2571
3278
|
search_context_size: args.searchContextSize,
|
|
@@ -2574,7 +3281,10 @@ function prepareResponsesTools({
|
|
|
2574
3281
|
break;
|
|
2575
3282
|
}
|
|
2576
3283
|
case "openai.web_search": {
|
|
2577
|
-
const args =
|
|
3284
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3285
|
+
value: tool.args,
|
|
3286
|
+
schema: webSearchArgsSchema
|
|
3287
|
+
});
|
|
2578
3288
|
openaiTools.push({
|
|
2579
3289
|
type: "web_search",
|
|
2580
3290
|
filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
|
|
@@ -2584,7 +3294,10 @@ function prepareResponsesTools({
|
|
|
2584
3294
|
break;
|
|
2585
3295
|
}
|
|
2586
3296
|
case "openai.code_interpreter": {
|
|
2587
|
-
const args =
|
|
3297
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3298
|
+
value: tool.args,
|
|
3299
|
+
schema: codeInterpreterArgsSchema
|
|
3300
|
+
});
|
|
2588
3301
|
openaiTools.push({
|
|
2589
3302
|
type: "code_interpreter",
|
|
2590
3303
|
container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
|
|
@@ -2592,7 +3305,10 @@ function prepareResponsesTools({
|
|
|
2592
3305
|
break;
|
|
2593
3306
|
}
|
|
2594
3307
|
case "openai.image_generation": {
|
|
2595
|
-
const args =
|
|
3308
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3309
|
+
value: tool.args,
|
|
3310
|
+
schema: imageGenerationArgsSchema
|
|
3311
|
+
});
|
|
2596
3312
|
openaiTools.push({
|
|
2597
3313
|
type: "image_generation",
|
|
2598
3314
|
background: args.background,
|
|
@@ -2602,11 +3318,12 @@ function prepareResponsesTools({
|
|
|
2602
3318
|
image_url: args.inputImageMask.imageUrl
|
|
2603
3319
|
} : void 0,
|
|
2604
3320
|
model: args.model,
|
|
2605
|
-
size: args.size,
|
|
2606
|
-
quality: args.quality,
|
|
2607
3321
|
moderation: args.moderation,
|
|
3322
|
+
partial_images: args.partialImages,
|
|
3323
|
+
quality: args.quality,
|
|
3324
|
+
output_compression: args.outputCompression,
|
|
2608
3325
|
output_format: args.outputFormat,
|
|
2609
|
-
|
|
3326
|
+
size: args.size
|
|
2610
3327
|
});
|
|
2611
3328
|
break;
|
|
2612
3329
|
}
|
|
@@ -2643,83 +3360,6 @@ function prepareResponsesTools({
|
|
|
2643
3360
|
}
|
|
2644
3361
|
|
|
2645
3362
|
// src/responses/openai-responses-language-model.ts
|
|
2646
|
-
var webSearchCallItem = import_v419.z.object({
|
|
2647
|
-
type: import_v419.z.literal("web_search_call"),
|
|
2648
|
-
id: import_v419.z.string(),
|
|
2649
|
-
status: import_v419.z.string(),
|
|
2650
|
-
action: import_v419.z.discriminatedUnion("type", [
|
|
2651
|
-
import_v419.z.object({
|
|
2652
|
-
type: import_v419.z.literal("search"),
|
|
2653
|
-
query: import_v419.z.string().nullish()
|
|
2654
|
-
}),
|
|
2655
|
-
import_v419.z.object({
|
|
2656
|
-
type: import_v419.z.literal("open_page"),
|
|
2657
|
-
url: import_v419.z.string()
|
|
2658
|
-
}),
|
|
2659
|
-
import_v419.z.object({
|
|
2660
|
-
type: import_v419.z.literal("find"),
|
|
2661
|
-
url: import_v419.z.string(),
|
|
2662
|
-
pattern: import_v419.z.string()
|
|
2663
|
-
})
|
|
2664
|
-
]).nullish()
|
|
2665
|
-
});
|
|
2666
|
-
var fileSearchCallItem = import_v419.z.object({
|
|
2667
|
-
type: import_v419.z.literal("file_search_call"),
|
|
2668
|
-
id: import_v419.z.string(),
|
|
2669
|
-
queries: import_v419.z.array(import_v419.z.string()),
|
|
2670
|
-
results: import_v419.z.array(
|
|
2671
|
-
import_v419.z.object({
|
|
2672
|
-
attributes: import_v419.z.record(import_v419.z.string(), import_v419.z.unknown()),
|
|
2673
|
-
file_id: import_v419.z.string(),
|
|
2674
|
-
filename: import_v419.z.string(),
|
|
2675
|
-
score: import_v419.z.number(),
|
|
2676
|
-
text: import_v419.z.string()
|
|
2677
|
-
})
|
|
2678
|
-
).nullish()
|
|
2679
|
-
});
|
|
2680
|
-
var codeInterpreterCallItem = import_v419.z.object({
|
|
2681
|
-
type: import_v419.z.literal("code_interpreter_call"),
|
|
2682
|
-
id: import_v419.z.string(),
|
|
2683
|
-
code: import_v419.z.string().nullable(),
|
|
2684
|
-
container_id: import_v419.z.string(),
|
|
2685
|
-
outputs: import_v419.z.array(
|
|
2686
|
-
import_v419.z.discriminatedUnion("type", [
|
|
2687
|
-
import_v419.z.object({ type: import_v419.z.literal("logs"), logs: import_v419.z.string() }),
|
|
2688
|
-
import_v419.z.object({ type: import_v419.z.literal("image"), url: import_v419.z.string() })
|
|
2689
|
-
])
|
|
2690
|
-
).nullable()
|
|
2691
|
-
});
|
|
2692
|
-
var localShellCallItem = import_v419.z.object({
|
|
2693
|
-
type: import_v419.z.literal("local_shell_call"),
|
|
2694
|
-
id: import_v419.z.string(),
|
|
2695
|
-
call_id: import_v419.z.string(),
|
|
2696
|
-
action: import_v419.z.object({
|
|
2697
|
-
type: import_v419.z.literal("exec"),
|
|
2698
|
-
command: import_v419.z.array(import_v419.z.string()),
|
|
2699
|
-
timeout_ms: import_v419.z.number().optional(),
|
|
2700
|
-
user: import_v419.z.string().optional(),
|
|
2701
|
-
working_directory: import_v419.z.string().optional(),
|
|
2702
|
-
env: import_v419.z.record(import_v419.z.string(), import_v419.z.string()).optional()
|
|
2703
|
-
})
|
|
2704
|
-
});
|
|
2705
|
-
var imageGenerationCallItem = import_v419.z.object({
|
|
2706
|
-
type: import_v419.z.literal("image_generation_call"),
|
|
2707
|
-
id: import_v419.z.string(),
|
|
2708
|
-
result: import_v419.z.string()
|
|
2709
|
-
});
|
|
2710
|
-
var TOP_LOGPROBS_MAX = 20;
|
|
2711
|
-
var LOGPROBS_SCHEMA = import_v419.z.array(
|
|
2712
|
-
import_v419.z.object({
|
|
2713
|
-
token: import_v419.z.string(),
|
|
2714
|
-
logprob: import_v419.z.number(),
|
|
2715
|
-
top_logprobs: import_v419.z.array(
|
|
2716
|
-
import_v419.z.object({
|
|
2717
|
-
token: import_v419.z.string(),
|
|
2718
|
-
logprob: import_v419.z.number()
|
|
2719
|
-
})
|
|
2720
|
-
)
|
|
2721
|
-
})
|
|
2722
|
-
);
|
|
2723
3363
|
var OpenAIResponsesLanguageModel = class {
|
|
2724
3364
|
constructor(modelId, config) {
|
|
2725
3365
|
this.specificationVersion = "v3";
|
|
@@ -2772,7 +3412,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2772
3412
|
if (stopSequences != null) {
|
|
2773
3413
|
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2774
3414
|
}
|
|
2775
|
-
const openaiOptions = await (0,
|
|
3415
|
+
const openaiOptions = await (0, import_provider_utils29.parseProviderOptions)({
|
|
2776
3416
|
provider: "openai",
|
|
2777
3417
|
providerOptions,
|
|
2778
3418
|
schema: openaiResponsesProviderOptionsSchema
|
|
@@ -2911,7 +3551,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2911
3551
|
tools: openaiTools,
|
|
2912
3552
|
toolChoice: openaiToolChoice,
|
|
2913
3553
|
toolWarnings
|
|
2914
|
-
} = prepareResponsesTools({
|
|
3554
|
+
} = await prepareResponsesTools({
|
|
2915
3555
|
tools,
|
|
2916
3556
|
toolChoice,
|
|
2917
3557
|
strictJsonSchema
|
|
@@ -2941,91 +3581,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2941
3581
|
responseHeaders,
|
|
2942
3582
|
value: response,
|
|
2943
3583
|
rawValue: rawResponse
|
|
2944
|
-
} = await (0,
|
|
3584
|
+
} = await (0, import_provider_utils29.postJsonToApi)({
|
|
2945
3585
|
url,
|
|
2946
|
-
headers: (0,
|
|
3586
|
+
headers: (0, import_provider_utils29.combineHeaders)(this.config.headers(), options.headers),
|
|
2947
3587
|
body,
|
|
2948
3588
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2949
|
-
successfulResponseHandler: (0,
|
|
2950
|
-
|
|
2951
|
-
id: import_v419.z.string(),
|
|
2952
|
-
created_at: import_v419.z.number(),
|
|
2953
|
-
error: import_v419.z.object({
|
|
2954
|
-
code: import_v419.z.string(),
|
|
2955
|
-
message: import_v419.z.string()
|
|
2956
|
-
}).nullish(),
|
|
2957
|
-
model: import_v419.z.string(),
|
|
2958
|
-
output: import_v419.z.array(
|
|
2959
|
-
import_v419.z.discriminatedUnion("type", [
|
|
2960
|
-
import_v419.z.object({
|
|
2961
|
-
type: import_v419.z.literal("message"),
|
|
2962
|
-
role: import_v419.z.literal("assistant"),
|
|
2963
|
-
id: import_v419.z.string(),
|
|
2964
|
-
content: import_v419.z.array(
|
|
2965
|
-
import_v419.z.object({
|
|
2966
|
-
type: import_v419.z.literal("output_text"),
|
|
2967
|
-
text: import_v419.z.string(),
|
|
2968
|
-
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2969
|
-
annotations: import_v419.z.array(
|
|
2970
|
-
import_v419.z.discriminatedUnion("type", [
|
|
2971
|
-
import_v419.z.object({
|
|
2972
|
-
type: import_v419.z.literal("url_citation"),
|
|
2973
|
-
start_index: import_v419.z.number(),
|
|
2974
|
-
end_index: import_v419.z.number(),
|
|
2975
|
-
url: import_v419.z.string(),
|
|
2976
|
-
title: import_v419.z.string()
|
|
2977
|
-
}),
|
|
2978
|
-
import_v419.z.object({
|
|
2979
|
-
type: import_v419.z.literal("file_citation"),
|
|
2980
|
-
file_id: import_v419.z.string(),
|
|
2981
|
-
filename: import_v419.z.string().nullish(),
|
|
2982
|
-
index: import_v419.z.number().nullish(),
|
|
2983
|
-
start_index: import_v419.z.number().nullish(),
|
|
2984
|
-
end_index: import_v419.z.number().nullish(),
|
|
2985
|
-
quote: import_v419.z.string().nullish()
|
|
2986
|
-
}),
|
|
2987
|
-
import_v419.z.object({
|
|
2988
|
-
type: import_v419.z.literal("container_file_citation")
|
|
2989
|
-
})
|
|
2990
|
-
])
|
|
2991
|
-
)
|
|
2992
|
-
})
|
|
2993
|
-
)
|
|
2994
|
-
}),
|
|
2995
|
-
webSearchCallItem,
|
|
2996
|
-
fileSearchCallItem,
|
|
2997
|
-
codeInterpreterCallItem,
|
|
2998
|
-
imageGenerationCallItem,
|
|
2999
|
-
localShellCallItem,
|
|
3000
|
-
import_v419.z.object({
|
|
3001
|
-
type: import_v419.z.literal("function_call"),
|
|
3002
|
-
call_id: import_v419.z.string(),
|
|
3003
|
-
name: import_v419.z.string(),
|
|
3004
|
-
arguments: import_v419.z.string(),
|
|
3005
|
-
id: import_v419.z.string()
|
|
3006
|
-
}),
|
|
3007
|
-
import_v419.z.object({
|
|
3008
|
-
type: import_v419.z.literal("computer_call"),
|
|
3009
|
-
id: import_v419.z.string(),
|
|
3010
|
-
status: import_v419.z.string().optional()
|
|
3011
|
-
}),
|
|
3012
|
-
import_v419.z.object({
|
|
3013
|
-
type: import_v419.z.literal("reasoning"),
|
|
3014
|
-
id: import_v419.z.string(),
|
|
3015
|
-
encrypted_content: import_v419.z.string().nullish(),
|
|
3016
|
-
summary: import_v419.z.array(
|
|
3017
|
-
import_v419.z.object({
|
|
3018
|
-
type: import_v419.z.literal("summary_text"),
|
|
3019
|
-
text: import_v419.z.string()
|
|
3020
|
-
})
|
|
3021
|
-
)
|
|
3022
|
-
})
|
|
3023
|
-
])
|
|
3024
|
-
),
|
|
3025
|
-
service_tier: import_v419.z.string().nullish(),
|
|
3026
|
-
incomplete_details: import_v419.z.object({ reason: import_v419.z.string() }).nullish(),
|
|
3027
|
-
usage: usageSchema2
|
|
3028
|
-
})
|
|
3589
|
+
successfulResponseHandler: (0, import_provider_utils29.createJsonResponseHandler)(
|
|
3590
|
+
openaiResponsesResponseSchema
|
|
3029
3591
|
),
|
|
3030
3592
|
abortSignal: options.abortSignal,
|
|
3031
3593
|
fetch: this.config.fetch
|
|
@@ -3088,7 +3650,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3088
3650
|
type: "tool-call",
|
|
3089
3651
|
toolCallId: part.call_id,
|
|
3090
3652
|
toolName: "local_shell",
|
|
3091
|
-
input: JSON.stringify({
|
|
3653
|
+
input: JSON.stringify({
|
|
3654
|
+
action: part.action
|
|
3655
|
+
}),
|
|
3092
3656
|
providerMetadata: {
|
|
3093
3657
|
openai: {
|
|
3094
3658
|
itemId: part.id
|
|
@@ -3116,7 +3680,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3116
3680
|
content.push({
|
|
3117
3681
|
type: "source",
|
|
3118
3682
|
sourceType: "url",
|
|
3119
|
-
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0,
|
|
3683
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0, import_provider_utils29.generateId)(),
|
|
3120
3684
|
url: annotation.url,
|
|
3121
3685
|
title: annotation.title
|
|
3122
3686
|
});
|
|
@@ -3124,7 +3688,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3124
3688
|
content.push({
|
|
3125
3689
|
type: "source",
|
|
3126
3690
|
sourceType: "document",
|
|
3127
|
-
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0,
|
|
3691
|
+
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils29.generateId)(),
|
|
3128
3692
|
mediaType: "text/plain",
|
|
3129
3693
|
title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
|
|
3130
3694
|
filename: (_l = annotation.filename) != null ? _l : annotation.file_id
|
|
@@ -3276,18 +3840,18 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3276
3840
|
warnings,
|
|
3277
3841
|
webSearchToolName
|
|
3278
3842
|
} = await this.getArgs(options);
|
|
3279
|
-
const { responseHeaders, value: response } = await (0,
|
|
3843
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils29.postJsonToApi)({
|
|
3280
3844
|
url: this.config.url({
|
|
3281
3845
|
path: "/responses",
|
|
3282
3846
|
modelId: this.modelId
|
|
3283
3847
|
}),
|
|
3284
|
-
headers: (0,
|
|
3848
|
+
headers: (0, import_provider_utils29.combineHeaders)(this.config.headers(), options.headers),
|
|
3285
3849
|
body: {
|
|
3286
3850
|
...body,
|
|
3287
3851
|
stream: true
|
|
3288
3852
|
},
|
|
3289
3853
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
3290
|
-
successfulResponseHandler: (0,
|
|
3854
|
+
successfulResponseHandler: (0, import_provider_utils29.createEventSourceResponseHandler)(
|
|
3291
3855
|
openaiResponsesChunkSchema
|
|
3292
3856
|
),
|
|
3293
3857
|
abortSignal: options.abortSignal,
|
|
@@ -3342,7 +3906,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3342
3906
|
controller.enqueue({
|
|
3343
3907
|
type: "tool-input-start",
|
|
3344
3908
|
id: value.item.id,
|
|
3345
|
-
toolName: webSearchToolName != null ? webSearchToolName : "web_search"
|
|
3909
|
+
toolName: webSearchToolName != null ? webSearchToolName : "web_search",
|
|
3910
|
+
providerExecuted: true
|
|
3346
3911
|
});
|
|
3347
3912
|
} else if (value.item.type === "computer_call") {
|
|
3348
3913
|
ongoingToolCalls[value.output_index] = {
|
|
@@ -3352,7 +3917,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3352
3917
|
controller.enqueue({
|
|
3353
3918
|
type: "tool-input-start",
|
|
3354
3919
|
id: value.item.id,
|
|
3355
|
-
toolName: "computer_use"
|
|
3920
|
+
toolName: "computer_use",
|
|
3921
|
+
providerExecuted: true
|
|
3356
3922
|
});
|
|
3357
3923
|
} else if (value.item.type === "code_interpreter_call") {
|
|
3358
3924
|
ongoingToolCalls[value.output_index] = {
|
|
@@ -3365,7 +3931,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3365
3931
|
controller.enqueue({
|
|
3366
3932
|
type: "tool-input-start",
|
|
3367
3933
|
id: value.item.id,
|
|
3368
|
-
toolName: "code_interpreter"
|
|
3934
|
+
toolName: "code_interpreter",
|
|
3935
|
+
providerExecuted: true
|
|
3369
3936
|
});
|
|
3370
3937
|
controller.enqueue({
|
|
3371
3938
|
type: "tool-input-delta",
|
|
@@ -3565,6 +4132,17 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3565
4132
|
delta: value.delta
|
|
3566
4133
|
});
|
|
3567
4134
|
}
|
|
4135
|
+
} else if (isResponseImageGenerationCallPartialImageChunk(value)) {
|
|
4136
|
+
controller.enqueue({
|
|
4137
|
+
type: "tool-result",
|
|
4138
|
+
toolCallId: value.item_id,
|
|
4139
|
+
toolName: "image_generation",
|
|
4140
|
+
result: {
|
|
4141
|
+
result: value.partial_image_b64
|
|
4142
|
+
},
|
|
4143
|
+
providerExecuted: true,
|
|
4144
|
+
preliminary: true
|
|
4145
|
+
});
|
|
3568
4146
|
} else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) {
|
|
3569
4147
|
const toolCall = ongoingToolCalls[value.output_index];
|
|
3570
4148
|
if (toolCall != null) {
|
|
@@ -3661,7 +4239,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3661
4239
|
controller.enqueue({
|
|
3662
4240
|
type: "source",
|
|
3663
4241
|
sourceType: "url",
|
|
3664
|
-
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0,
|
|
4242
|
+
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0, import_provider_utils29.generateId)(),
|
|
3665
4243
|
url: value.annotation.url,
|
|
3666
4244
|
title: value.annotation.title
|
|
3667
4245
|
});
|
|
@@ -3669,7 +4247,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3669
4247
|
controller.enqueue({
|
|
3670
4248
|
type: "source",
|
|
3671
4249
|
sourceType: "document",
|
|
3672
|
-
id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0,
|
|
4250
|
+
id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0, import_provider_utils29.generateId)(),
|
|
3673
4251
|
mediaType: "text/plain",
|
|
3674
4252
|
title: (_v = (_u = value.annotation.quote) != null ? _u : value.annotation.filename) != null ? _v : "Document",
|
|
3675
4253
|
filename: (_w = value.annotation.filename) != null ? _w : value.annotation.file_id
|
|
@@ -3705,196 +4283,6 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3705
4283
|
};
|
|
3706
4284
|
}
|
|
3707
4285
|
};
|
|
3708
|
-
var usageSchema2 = import_v419.z.object({
|
|
3709
|
-
input_tokens: import_v419.z.number(),
|
|
3710
|
-
input_tokens_details: import_v419.z.object({ cached_tokens: import_v419.z.number().nullish() }).nullish(),
|
|
3711
|
-
output_tokens: import_v419.z.number(),
|
|
3712
|
-
output_tokens_details: import_v419.z.object({ reasoning_tokens: import_v419.z.number().nullish() }).nullish()
|
|
3713
|
-
});
|
|
3714
|
-
var textDeltaChunkSchema = import_v419.z.object({
|
|
3715
|
-
type: import_v419.z.literal("response.output_text.delta"),
|
|
3716
|
-
item_id: import_v419.z.string(),
|
|
3717
|
-
delta: import_v419.z.string(),
|
|
3718
|
-
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
3719
|
-
});
|
|
3720
|
-
var errorChunkSchema = import_v419.z.object({
|
|
3721
|
-
type: import_v419.z.literal("error"),
|
|
3722
|
-
code: import_v419.z.string(),
|
|
3723
|
-
message: import_v419.z.string(),
|
|
3724
|
-
param: import_v419.z.string().nullish(),
|
|
3725
|
-
sequence_number: import_v419.z.number()
|
|
3726
|
-
});
|
|
3727
|
-
var responseFinishedChunkSchema = import_v419.z.object({
|
|
3728
|
-
type: import_v419.z.enum(["response.completed", "response.incomplete"]),
|
|
3729
|
-
response: import_v419.z.object({
|
|
3730
|
-
incomplete_details: import_v419.z.object({ reason: import_v419.z.string() }).nullish(),
|
|
3731
|
-
usage: usageSchema2,
|
|
3732
|
-
service_tier: import_v419.z.string().nullish()
|
|
3733
|
-
})
|
|
3734
|
-
});
|
|
3735
|
-
var responseCreatedChunkSchema = import_v419.z.object({
|
|
3736
|
-
type: import_v419.z.literal("response.created"),
|
|
3737
|
-
response: import_v419.z.object({
|
|
3738
|
-
id: import_v419.z.string(),
|
|
3739
|
-
created_at: import_v419.z.number(),
|
|
3740
|
-
model: import_v419.z.string(),
|
|
3741
|
-
service_tier: import_v419.z.string().nullish()
|
|
3742
|
-
})
|
|
3743
|
-
});
|
|
3744
|
-
var responseOutputItemAddedSchema = import_v419.z.object({
|
|
3745
|
-
type: import_v419.z.literal("response.output_item.added"),
|
|
3746
|
-
output_index: import_v419.z.number(),
|
|
3747
|
-
item: import_v419.z.discriminatedUnion("type", [
|
|
3748
|
-
import_v419.z.object({
|
|
3749
|
-
type: import_v419.z.literal("message"),
|
|
3750
|
-
id: import_v419.z.string()
|
|
3751
|
-
}),
|
|
3752
|
-
import_v419.z.object({
|
|
3753
|
-
type: import_v419.z.literal("reasoning"),
|
|
3754
|
-
id: import_v419.z.string(),
|
|
3755
|
-
encrypted_content: import_v419.z.string().nullish()
|
|
3756
|
-
}),
|
|
3757
|
-
import_v419.z.object({
|
|
3758
|
-
type: import_v419.z.literal("function_call"),
|
|
3759
|
-
id: import_v419.z.string(),
|
|
3760
|
-
call_id: import_v419.z.string(),
|
|
3761
|
-
name: import_v419.z.string(),
|
|
3762
|
-
arguments: import_v419.z.string()
|
|
3763
|
-
}),
|
|
3764
|
-
import_v419.z.object({
|
|
3765
|
-
type: import_v419.z.literal("web_search_call"),
|
|
3766
|
-
id: import_v419.z.string(),
|
|
3767
|
-
status: import_v419.z.string(),
|
|
3768
|
-
action: import_v419.z.object({
|
|
3769
|
-
type: import_v419.z.literal("search"),
|
|
3770
|
-
query: import_v419.z.string().optional()
|
|
3771
|
-
}).nullish()
|
|
3772
|
-
}),
|
|
3773
|
-
import_v419.z.object({
|
|
3774
|
-
type: import_v419.z.literal("computer_call"),
|
|
3775
|
-
id: import_v419.z.string(),
|
|
3776
|
-
status: import_v419.z.string()
|
|
3777
|
-
}),
|
|
3778
|
-
import_v419.z.object({
|
|
3779
|
-
type: import_v419.z.literal("file_search_call"),
|
|
3780
|
-
id: import_v419.z.string()
|
|
3781
|
-
}),
|
|
3782
|
-
import_v419.z.object({
|
|
3783
|
-
type: import_v419.z.literal("image_generation_call"),
|
|
3784
|
-
id: import_v419.z.string()
|
|
3785
|
-
}),
|
|
3786
|
-
import_v419.z.object({
|
|
3787
|
-
type: import_v419.z.literal("code_interpreter_call"),
|
|
3788
|
-
id: import_v419.z.string(),
|
|
3789
|
-
container_id: import_v419.z.string(),
|
|
3790
|
-
code: import_v419.z.string().nullable(),
|
|
3791
|
-
outputs: import_v419.z.array(
|
|
3792
|
-
import_v419.z.discriminatedUnion("type", [
|
|
3793
|
-
import_v419.z.object({ type: import_v419.z.literal("logs"), logs: import_v419.z.string() }),
|
|
3794
|
-
import_v419.z.object({ type: import_v419.z.literal("image"), url: import_v419.z.string() })
|
|
3795
|
-
])
|
|
3796
|
-
).nullable(),
|
|
3797
|
-
status: import_v419.z.string()
|
|
3798
|
-
})
|
|
3799
|
-
])
|
|
3800
|
-
});
|
|
3801
|
-
var responseOutputItemDoneSchema = import_v419.z.object({
|
|
3802
|
-
type: import_v419.z.literal("response.output_item.done"),
|
|
3803
|
-
output_index: import_v419.z.number(),
|
|
3804
|
-
item: import_v419.z.discriminatedUnion("type", [
|
|
3805
|
-
import_v419.z.object({
|
|
3806
|
-
type: import_v419.z.literal("message"),
|
|
3807
|
-
id: import_v419.z.string()
|
|
3808
|
-
}),
|
|
3809
|
-
import_v419.z.object({
|
|
3810
|
-
type: import_v419.z.literal("reasoning"),
|
|
3811
|
-
id: import_v419.z.string(),
|
|
3812
|
-
encrypted_content: import_v419.z.string().nullish()
|
|
3813
|
-
}),
|
|
3814
|
-
import_v419.z.object({
|
|
3815
|
-
type: import_v419.z.literal("function_call"),
|
|
3816
|
-
id: import_v419.z.string(),
|
|
3817
|
-
call_id: import_v419.z.string(),
|
|
3818
|
-
name: import_v419.z.string(),
|
|
3819
|
-
arguments: import_v419.z.string(),
|
|
3820
|
-
status: import_v419.z.literal("completed")
|
|
3821
|
-
}),
|
|
3822
|
-
codeInterpreterCallItem,
|
|
3823
|
-
imageGenerationCallItem,
|
|
3824
|
-
webSearchCallItem,
|
|
3825
|
-
fileSearchCallItem,
|
|
3826
|
-
localShellCallItem,
|
|
3827
|
-
import_v419.z.object({
|
|
3828
|
-
type: import_v419.z.literal("computer_call"),
|
|
3829
|
-
id: import_v419.z.string(),
|
|
3830
|
-
status: import_v419.z.literal("completed")
|
|
3831
|
-
})
|
|
3832
|
-
])
|
|
3833
|
-
});
|
|
3834
|
-
var responseFunctionCallArgumentsDeltaSchema = import_v419.z.object({
|
|
3835
|
-
type: import_v419.z.literal("response.function_call_arguments.delta"),
|
|
3836
|
-
item_id: import_v419.z.string(),
|
|
3837
|
-
output_index: import_v419.z.number(),
|
|
3838
|
-
delta: import_v419.z.string()
|
|
3839
|
-
});
|
|
3840
|
-
var responseCodeInterpreterCallCodeDeltaSchema = import_v419.z.object({
|
|
3841
|
-
type: import_v419.z.literal("response.code_interpreter_call_code.delta"),
|
|
3842
|
-
item_id: import_v419.z.string(),
|
|
3843
|
-
output_index: import_v419.z.number(),
|
|
3844
|
-
delta: import_v419.z.string()
|
|
3845
|
-
});
|
|
3846
|
-
var responseCodeInterpreterCallCodeDoneSchema = import_v419.z.object({
|
|
3847
|
-
type: import_v419.z.literal("response.code_interpreter_call_code.done"),
|
|
3848
|
-
item_id: import_v419.z.string(),
|
|
3849
|
-
output_index: import_v419.z.number(),
|
|
3850
|
-
code: import_v419.z.string()
|
|
3851
|
-
});
|
|
3852
|
-
var responseAnnotationAddedSchema = import_v419.z.object({
|
|
3853
|
-
type: import_v419.z.literal("response.output_text.annotation.added"),
|
|
3854
|
-
annotation: import_v419.z.discriminatedUnion("type", [
|
|
3855
|
-
import_v419.z.object({
|
|
3856
|
-
type: import_v419.z.literal("url_citation"),
|
|
3857
|
-
url: import_v419.z.string(),
|
|
3858
|
-
title: import_v419.z.string()
|
|
3859
|
-
}),
|
|
3860
|
-
import_v419.z.object({
|
|
3861
|
-
type: import_v419.z.literal("file_citation"),
|
|
3862
|
-
file_id: import_v419.z.string(),
|
|
3863
|
-
filename: import_v419.z.string().nullish(),
|
|
3864
|
-
index: import_v419.z.number().nullish(),
|
|
3865
|
-
start_index: import_v419.z.number().nullish(),
|
|
3866
|
-
end_index: import_v419.z.number().nullish(),
|
|
3867
|
-
quote: import_v419.z.string().nullish()
|
|
3868
|
-
})
|
|
3869
|
-
])
|
|
3870
|
-
});
|
|
3871
|
-
var responseReasoningSummaryPartAddedSchema = import_v419.z.object({
|
|
3872
|
-
type: import_v419.z.literal("response.reasoning_summary_part.added"),
|
|
3873
|
-
item_id: import_v419.z.string(),
|
|
3874
|
-
summary_index: import_v419.z.number()
|
|
3875
|
-
});
|
|
3876
|
-
var responseReasoningSummaryTextDeltaSchema = import_v419.z.object({
|
|
3877
|
-
type: import_v419.z.literal("response.reasoning_summary_text.delta"),
|
|
3878
|
-
item_id: import_v419.z.string(),
|
|
3879
|
-
summary_index: import_v419.z.number(),
|
|
3880
|
-
delta: import_v419.z.string()
|
|
3881
|
-
});
|
|
3882
|
-
var openaiResponsesChunkSchema = import_v419.z.union([
|
|
3883
|
-
textDeltaChunkSchema,
|
|
3884
|
-
responseFinishedChunkSchema,
|
|
3885
|
-
responseCreatedChunkSchema,
|
|
3886
|
-
responseOutputItemAddedSchema,
|
|
3887
|
-
responseOutputItemDoneSchema,
|
|
3888
|
-
responseFunctionCallArgumentsDeltaSchema,
|
|
3889
|
-
responseCodeInterpreterCallCodeDeltaSchema,
|
|
3890
|
-
responseCodeInterpreterCallCodeDoneSchema,
|
|
3891
|
-
responseAnnotationAddedSchema,
|
|
3892
|
-
responseReasoningSummaryPartAddedSchema,
|
|
3893
|
-
responseReasoningSummaryTextDeltaSchema,
|
|
3894
|
-
errorChunkSchema,
|
|
3895
|
-
import_v419.z.object({ type: import_v419.z.string() }).loose()
|
|
3896
|
-
// fallback for unknown chunks
|
|
3897
|
-
]);
|
|
3898
4286
|
function isTextDeltaChunk(chunk) {
|
|
3899
4287
|
return chunk.type === "response.output_text.delta";
|
|
3900
4288
|
}
|
|
@@ -3913,6 +4301,9 @@ function isResponseCreatedChunk(chunk) {
|
|
|
3913
4301
|
function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
|
|
3914
4302
|
return chunk.type === "response.function_call_arguments.delta";
|
|
3915
4303
|
}
|
|
4304
|
+
function isResponseImageGenerationCallPartialImageChunk(chunk) {
|
|
4305
|
+
return chunk.type === "response.image_generation_call.partial_image";
|
|
4306
|
+
}
|
|
3916
4307
|
function isResponseCodeInterpreterCallCodeDeltaChunk(chunk) {
|
|
3917
4308
|
return chunk.type === "response.code_interpreter_call_code.delta";
|
|
3918
4309
|
}
|
|
@@ -3971,47 +4362,6 @@ function getResponsesModelConfig(modelId) {
|
|
|
3971
4362
|
isReasoningModel: false
|
|
3972
4363
|
};
|
|
3973
4364
|
}
|
|
3974
|
-
var openaiResponsesProviderOptionsSchema = import_v419.z.object({
|
|
3975
|
-
include: import_v419.z.array(
|
|
3976
|
-
import_v419.z.enum([
|
|
3977
|
-
"reasoning.encrypted_content",
|
|
3978
|
-
"file_search_call.results",
|
|
3979
|
-
"message.output_text.logprobs"
|
|
3980
|
-
])
|
|
3981
|
-
).nullish(),
|
|
3982
|
-
instructions: import_v419.z.string().nullish(),
|
|
3983
|
-
/**
|
|
3984
|
-
* Return the log probabilities of the tokens.
|
|
3985
|
-
*
|
|
3986
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
3987
|
-
* were generated.
|
|
3988
|
-
*
|
|
3989
|
-
* Setting to a number will return the log probabilities of the top n
|
|
3990
|
-
* tokens that were generated.
|
|
3991
|
-
*
|
|
3992
|
-
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
3993
|
-
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
3994
|
-
*/
|
|
3995
|
-
logprobs: import_v419.z.union([import_v419.z.boolean(), import_v419.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
3996
|
-
/**
|
|
3997
|
-
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
3998
|
-
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
3999
|
-
* Any further attempts to call a tool by the model will be ignored.
|
|
4000
|
-
*/
|
|
4001
|
-
maxToolCalls: import_v419.z.number().nullish(),
|
|
4002
|
-
metadata: import_v419.z.any().nullish(),
|
|
4003
|
-
parallelToolCalls: import_v419.z.boolean().nullish(),
|
|
4004
|
-
previousResponseId: import_v419.z.string().nullish(),
|
|
4005
|
-
promptCacheKey: import_v419.z.string().nullish(),
|
|
4006
|
-
reasoningEffort: import_v419.z.string().nullish(),
|
|
4007
|
-
reasoningSummary: import_v419.z.string().nullish(),
|
|
4008
|
-
safetyIdentifier: import_v419.z.string().nullish(),
|
|
4009
|
-
serviceTier: import_v419.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
4010
|
-
store: import_v419.z.boolean().nullish(),
|
|
4011
|
-
strictJsonSchema: import_v419.z.boolean().nullish(),
|
|
4012
|
-
textVerbosity: import_v419.z.enum(["low", "medium", "high"]).nullish(),
|
|
4013
|
-
user: import_v419.z.string().nullish()
|
|
4014
|
-
});
|
|
4015
4365
|
// Annotate the CommonJS export names for ESM import in node:
|
|
4016
4366
|
0 && (module.exports = {
|
|
4017
4367
|
OpenAIChatLanguageModel,
|
|
@@ -4030,10 +4380,14 @@ var openaiResponsesProviderOptionsSchema = import_v419.z.object({
|
|
|
4030
4380
|
fileSearchArgsSchema,
|
|
4031
4381
|
fileSearchOutputSchema,
|
|
4032
4382
|
hasDefaultResponseFormat,
|
|
4383
|
+
imageGeneration,
|
|
4384
|
+
imageGenerationArgsSchema,
|
|
4385
|
+
imageGenerationOutputSchema,
|
|
4033
4386
|
modelMaxImagesPerCall,
|
|
4034
4387
|
openAITranscriptionProviderOptions,
|
|
4035
4388
|
openaiChatLanguageModelOptions,
|
|
4036
4389
|
openaiCompletionProviderOptions,
|
|
4037
|
-
openaiEmbeddingProviderOptions
|
|
4390
|
+
openaiEmbeddingProviderOptions,
|
|
4391
|
+
openaiSpeechProviderOptionsSchema
|
|
4038
4392
|
});
|
|
4039
4393
|
//# sourceMappingURL=index.js.map
|