@ai-sdk/openai 3.0.0-beta.17 → 3.0.0-beta.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/index.d.mts +38 -65
- package/dist/index.d.ts +38 -65
- package/dist/index.js +1341 -1033
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1295 -942
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +101 -183
- package/dist/internal/index.d.ts +101 -183
- package/dist/internal/index.js +1338 -1028
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +1307 -953
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/internal/index.js
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
+
var __create = Object.create;
|
|
2
3
|
var __defProp = Object.defineProperty;
|
|
3
4
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
5
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
5
7
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
8
|
var __export = (target, all) => {
|
|
7
9
|
for (var name in all)
|
|
@@ -15,6 +17,14 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
15
17
|
}
|
|
16
18
|
return to;
|
|
17
19
|
};
|
|
20
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
21
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
22
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
23
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
24
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
25
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
26
|
+
mod
|
|
27
|
+
));
|
|
18
28
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
29
|
|
|
20
30
|
// src/internal/index.ts
|
|
@@ -43,27 +53,27 @@ __export(internal_exports, {
|
|
|
43
53
|
openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions,
|
|
44
54
|
openaiChatLanguageModelOptions: () => openaiChatLanguageModelOptions,
|
|
45
55
|
openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
|
|
46
|
-
openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions
|
|
56
|
+
openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
|
|
57
|
+
openaiSpeechProviderOptionsSchema: () => openaiSpeechProviderOptionsSchema
|
|
47
58
|
});
|
|
48
59
|
module.exports = __toCommonJS(internal_exports);
|
|
49
60
|
|
|
50
61
|
// src/chat/openai-chat-language-model.ts
|
|
51
62
|
var import_provider3 = require("@ai-sdk/provider");
|
|
52
|
-
var
|
|
53
|
-
var import_v43 = require("zod/v4");
|
|
63
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
54
64
|
|
|
55
65
|
// src/openai-error.ts
|
|
56
|
-
var
|
|
66
|
+
var z = __toESM(require("zod/v4"));
|
|
57
67
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
58
|
-
var openaiErrorDataSchema =
|
|
59
|
-
error:
|
|
60
|
-
message:
|
|
68
|
+
var openaiErrorDataSchema = z.object({
|
|
69
|
+
error: z.object({
|
|
70
|
+
message: z.string(),
|
|
61
71
|
// The additional information below is handled loosely to support
|
|
62
72
|
// OpenAI-compatible providers that have slightly different error
|
|
63
73
|
// responses:
|
|
64
|
-
type:
|
|
65
|
-
param:
|
|
66
|
-
code:
|
|
74
|
+
type: z.string().nullish(),
|
|
75
|
+
param: z.any().nullish(),
|
|
76
|
+
code: z.union([z.string(), z.number()]).nullish()
|
|
67
77
|
})
|
|
68
78
|
});
|
|
69
79
|
var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
|
@@ -283,95 +293,240 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
283
293
|
}
|
|
284
294
|
}
|
|
285
295
|
|
|
296
|
+
// src/chat/openai-chat-api.ts
|
|
297
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
298
|
+
var z2 = __toESM(require("zod/v4"));
|
|
299
|
+
var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
|
300
|
+
() => (0, import_provider_utils3.zodSchema)(
|
|
301
|
+
z2.object({
|
|
302
|
+
id: z2.string().nullish(),
|
|
303
|
+
created: z2.number().nullish(),
|
|
304
|
+
model: z2.string().nullish(),
|
|
305
|
+
choices: z2.array(
|
|
306
|
+
z2.object({
|
|
307
|
+
message: z2.object({
|
|
308
|
+
role: z2.literal("assistant").nullish(),
|
|
309
|
+
content: z2.string().nullish(),
|
|
310
|
+
tool_calls: z2.array(
|
|
311
|
+
z2.object({
|
|
312
|
+
id: z2.string().nullish(),
|
|
313
|
+
type: z2.literal("function"),
|
|
314
|
+
function: z2.object({
|
|
315
|
+
name: z2.string(),
|
|
316
|
+
arguments: z2.string()
|
|
317
|
+
})
|
|
318
|
+
})
|
|
319
|
+
).nullish(),
|
|
320
|
+
annotations: z2.array(
|
|
321
|
+
z2.object({
|
|
322
|
+
type: z2.literal("url_citation"),
|
|
323
|
+
start_index: z2.number(),
|
|
324
|
+
end_index: z2.number(),
|
|
325
|
+
url: z2.string(),
|
|
326
|
+
title: z2.string()
|
|
327
|
+
})
|
|
328
|
+
).nullish()
|
|
329
|
+
}),
|
|
330
|
+
index: z2.number(),
|
|
331
|
+
logprobs: z2.object({
|
|
332
|
+
content: z2.array(
|
|
333
|
+
z2.object({
|
|
334
|
+
token: z2.string(),
|
|
335
|
+
logprob: z2.number(),
|
|
336
|
+
top_logprobs: z2.array(
|
|
337
|
+
z2.object({
|
|
338
|
+
token: z2.string(),
|
|
339
|
+
logprob: z2.number()
|
|
340
|
+
})
|
|
341
|
+
)
|
|
342
|
+
})
|
|
343
|
+
).nullish()
|
|
344
|
+
}).nullish(),
|
|
345
|
+
finish_reason: z2.string().nullish()
|
|
346
|
+
})
|
|
347
|
+
),
|
|
348
|
+
usage: z2.object({
|
|
349
|
+
prompt_tokens: z2.number().nullish(),
|
|
350
|
+
completion_tokens: z2.number().nullish(),
|
|
351
|
+
total_tokens: z2.number().nullish(),
|
|
352
|
+
prompt_tokens_details: z2.object({
|
|
353
|
+
cached_tokens: z2.number().nullish()
|
|
354
|
+
}).nullish(),
|
|
355
|
+
completion_tokens_details: z2.object({
|
|
356
|
+
reasoning_tokens: z2.number().nullish(),
|
|
357
|
+
accepted_prediction_tokens: z2.number().nullish(),
|
|
358
|
+
rejected_prediction_tokens: z2.number().nullish()
|
|
359
|
+
}).nullish()
|
|
360
|
+
}).nullish()
|
|
361
|
+
})
|
|
362
|
+
)
|
|
363
|
+
);
|
|
364
|
+
var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
|
365
|
+
() => (0, import_provider_utils3.zodSchema)(
|
|
366
|
+
z2.union([
|
|
367
|
+
z2.object({
|
|
368
|
+
id: z2.string().nullish(),
|
|
369
|
+
created: z2.number().nullish(),
|
|
370
|
+
model: z2.string().nullish(),
|
|
371
|
+
choices: z2.array(
|
|
372
|
+
z2.object({
|
|
373
|
+
delta: z2.object({
|
|
374
|
+
role: z2.enum(["assistant"]).nullish(),
|
|
375
|
+
content: z2.string().nullish(),
|
|
376
|
+
tool_calls: z2.array(
|
|
377
|
+
z2.object({
|
|
378
|
+
index: z2.number(),
|
|
379
|
+
id: z2.string().nullish(),
|
|
380
|
+
type: z2.literal("function").nullish(),
|
|
381
|
+
function: z2.object({
|
|
382
|
+
name: z2.string().nullish(),
|
|
383
|
+
arguments: z2.string().nullish()
|
|
384
|
+
})
|
|
385
|
+
})
|
|
386
|
+
).nullish(),
|
|
387
|
+
annotations: z2.array(
|
|
388
|
+
z2.object({
|
|
389
|
+
type: z2.literal("url_citation"),
|
|
390
|
+
start_index: z2.number(),
|
|
391
|
+
end_index: z2.number(),
|
|
392
|
+
url: z2.string(),
|
|
393
|
+
title: z2.string()
|
|
394
|
+
})
|
|
395
|
+
).nullish()
|
|
396
|
+
}).nullish(),
|
|
397
|
+
logprobs: z2.object({
|
|
398
|
+
content: z2.array(
|
|
399
|
+
z2.object({
|
|
400
|
+
token: z2.string(),
|
|
401
|
+
logprob: z2.number(),
|
|
402
|
+
top_logprobs: z2.array(
|
|
403
|
+
z2.object({
|
|
404
|
+
token: z2.string(),
|
|
405
|
+
logprob: z2.number()
|
|
406
|
+
})
|
|
407
|
+
)
|
|
408
|
+
})
|
|
409
|
+
).nullish()
|
|
410
|
+
}).nullish(),
|
|
411
|
+
finish_reason: z2.string().nullish(),
|
|
412
|
+
index: z2.number()
|
|
413
|
+
})
|
|
414
|
+
),
|
|
415
|
+
usage: z2.object({
|
|
416
|
+
prompt_tokens: z2.number().nullish(),
|
|
417
|
+
completion_tokens: z2.number().nullish(),
|
|
418
|
+
total_tokens: z2.number().nullish(),
|
|
419
|
+
prompt_tokens_details: z2.object({
|
|
420
|
+
cached_tokens: z2.number().nullish()
|
|
421
|
+
}).nullish(),
|
|
422
|
+
completion_tokens_details: z2.object({
|
|
423
|
+
reasoning_tokens: z2.number().nullish(),
|
|
424
|
+
accepted_prediction_tokens: z2.number().nullish(),
|
|
425
|
+
rejected_prediction_tokens: z2.number().nullish()
|
|
426
|
+
}).nullish()
|
|
427
|
+
}).nullish()
|
|
428
|
+
}),
|
|
429
|
+
openaiErrorDataSchema
|
|
430
|
+
])
|
|
431
|
+
)
|
|
432
|
+
);
|
|
433
|
+
|
|
286
434
|
// src/chat/openai-chat-options.ts
|
|
287
|
-
var
|
|
288
|
-
var
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
435
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
436
|
+
var z3 = __toESM(require("zod/v4"));
|
|
437
|
+
var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
|
|
438
|
+
() => (0, import_provider_utils4.zodSchema)(
|
|
439
|
+
z3.object({
|
|
440
|
+
/**
|
|
441
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
|
442
|
+
*
|
|
443
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
444
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
445
|
+
*/
|
|
446
|
+
logitBias: z3.record(z3.coerce.number(), z3.number()).optional(),
|
|
447
|
+
/**
|
|
448
|
+
* Return the log probabilities of the tokens.
|
|
449
|
+
*
|
|
450
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
451
|
+
* were generated.
|
|
452
|
+
*
|
|
453
|
+
* Setting to a number will return the log probabilities of the top n
|
|
454
|
+
* tokens that were generated.
|
|
455
|
+
*/
|
|
456
|
+
logprobs: z3.union([z3.boolean(), z3.number()]).optional(),
|
|
457
|
+
/**
|
|
458
|
+
* Whether to enable parallel function calling during tool use. Default to true.
|
|
459
|
+
*/
|
|
460
|
+
parallelToolCalls: z3.boolean().optional(),
|
|
461
|
+
/**
|
|
462
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
463
|
+
* monitor and detect abuse.
|
|
464
|
+
*/
|
|
465
|
+
user: z3.string().optional(),
|
|
466
|
+
/**
|
|
467
|
+
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
468
|
+
*/
|
|
469
|
+
reasoningEffort: z3.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
470
|
+
/**
|
|
471
|
+
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
472
|
+
*/
|
|
473
|
+
maxCompletionTokens: z3.number().optional(),
|
|
474
|
+
/**
|
|
475
|
+
* Whether to enable persistence in responses API.
|
|
476
|
+
*/
|
|
477
|
+
store: z3.boolean().optional(),
|
|
478
|
+
/**
|
|
479
|
+
* Metadata to associate with the request.
|
|
480
|
+
*/
|
|
481
|
+
metadata: z3.record(z3.string().max(64), z3.string().max(512)).optional(),
|
|
482
|
+
/**
|
|
483
|
+
* Parameters for prediction mode.
|
|
484
|
+
*/
|
|
485
|
+
prediction: z3.record(z3.string(), z3.any()).optional(),
|
|
486
|
+
/**
|
|
487
|
+
* Whether to use structured outputs.
|
|
488
|
+
*
|
|
489
|
+
* @default true
|
|
490
|
+
*/
|
|
491
|
+
structuredOutputs: z3.boolean().optional(),
|
|
492
|
+
/**
|
|
493
|
+
* Service tier for the request.
|
|
494
|
+
* - 'auto': Default service tier. The request will be processed with the service tier configured in the
|
|
495
|
+
* Project settings. Unless otherwise configured, the Project will use 'default'.
|
|
496
|
+
* - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
497
|
+
* - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
|
|
498
|
+
* - 'default': The request will be processed with the standard pricing and performance for the selected model.
|
|
499
|
+
*
|
|
500
|
+
* @default 'auto'
|
|
501
|
+
*/
|
|
502
|
+
serviceTier: z3.enum(["auto", "flex", "priority", "default"]).optional(),
|
|
503
|
+
/**
|
|
504
|
+
* Whether to use strict JSON schema validation.
|
|
505
|
+
*
|
|
506
|
+
* @default false
|
|
507
|
+
*/
|
|
508
|
+
strictJsonSchema: z3.boolean().optional(),
|
|
509
|
+
/**
|
|
510
|
+
* Controls the verbosity of the model's responses.
|
|
511
|
+
* Lower values will result in more concise responses, while higher values will result in more verbose responses.
|
|
512
|
+
*/
|
|
513
|
+
textVerbosity: z3.enum(["low", "medium", "high"]).optional(),
|
|
514
|
+
/**
|
|
515
|
+
* A cache key for prompt caching. Allows manual control over prompt caching behavior.
|
|
516
|
+
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
517
|
+
*/
|
|
518
|
+
promptCacheKey: z3.string().optional(),
|
|
519
|
+
/**
|
|
520
|
+
* A stable identifier used to help detect users of your application
|
|
521
|
+
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
522
|
+
* string that uniquely identifies each user. We recommend hashing their
|
|
523
|
+
* username or email address, in order to avoid sending us any identifying
|
|
524
|
+
* information.
|
|
525
|
+
*/
|
|
526
|
+
safetyIdentifier: z3.string().optional()
|
|
527
|
+
})
|
|
528
|
+
)
|
|
529
|
+
);
|
|
375
530
|
|
|
376
531
|
// src/chat/openai-chat-prepare-tools.ts
|
|
377
532
|
var import_provider2 = require("@ai-sdk/provider");
|
|
@@ -464,7 +619,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
464
619
|
}) {
|
|
465
620
|
var _a, _b, _c, _d;
|
|
466
621
|
const warnings = [];
|
|
467
|
-
const openaiOptions = (_a = await (0,
|
|
622
|
+
const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
|
|
468
623
|
provider: "openai",
|
|
469
624
|
providerOptions,
|
|
470
625
|
schema: openaiChatLanguageModelOptions
|
|
@@ -643,15 +798,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
643
798
|
responseHeaders,
|
|
644
799
|
value: response,
|
|
645
800
|
rawValue: rawResponse
|
|
646
|
-
} = await (0,
|
|
801
|
+
} = await (0, import_provider_utils5.postJsonToApi)({
|
|
647
802
|
url: this.config.url({
|
|
648
803
|
path: "/chat/completions",
|
|
649
804
|
modelId: this.modelId
|
|
650
805
|
}),
|
|
651
|
-
headers: (0,
|
|
806
|
+
headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
|
|
652
807
|
body,
|
|
653
808
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
654
|
-
successfulResponseHandler: (0,
|
|
809
|
+
successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
|
|
655
810
|
openaiChatResponseSchema
|
|
656
811
|
),
|
|
657
812
|
abortSignal: options.abortSignal,
|
|
@@ -666,7 +821,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
666
821
|
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
|
667
822
|
content.push({
|
|
668
823
|
type: "tool-call",
|
|
669
|
-
toolCallId: (_b = toolCall.id) != null ? _b : (0,
|
|
824
|
+
toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils5.generateId)(),
|
|
670
825
|
toolName: toolCall.function.name,
|
|
671
826
|
input: toolCall.function.arguments
|
|
672
827
|
});
|
|
@@ -675,7 +830,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
675
830
|
content.push({
|
|
676
831
|
type: "source",
|
|
677
832
|
sourceType: "url",
|
|
678
|
-
id: (0,
|
|
833
|
+
id: (0, import_provider_utils5.generateId)(),
|
|
679
834
|
url: annotation.url,
|
|
680
835
|
title: annotation.title
|
|
681
836
|
});
|
|
@@ -721,15 +876,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
721
876
|
include_usage: true
|
|
722
877
|
}
|
|
723
878
|
};
|
|
724
|
-
const { responseHeaders, value: response } = await (0,
|
|
879
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
|
|
725
880
|
url: this.config.url({
|
|
726
881
|
path: "/chat/completions",
|
|
727
882
|
modelId: this.modelId
|
|
728
883
|
}),
|
|
729
|
-
headers: (0,
|
|
884
|
+
headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
|
|
730
885
|
body,
|
|
731
886
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
732
|
-
successfulResponseHandler: (0,
|
|
887
|
+
successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(
|
|
733
888
|
openaiChatChunkSchema
|
|
734
889
|
),
|
|
735
890
|
abortSignal: options.abortSignal,
|
|
@@ -854,14 +1009,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
854
1009
|
delta: toolCall2.function.arguments
|
|
855
1010
|
});
|
|
856
1011
|
}
|
|
857
|
-
if ((0,
|
|
1012
|
+
if ((0, import_provider_utils5.isParsableJson)(toolCall2.function.arguments)) {
|
|
858
1013
|
controller.enqueue({
|
|
859
1014
|
type: "tool-input-end",
|
|
860
1015
|
id: toolCall2.id
|
|
861
1016
|
});
|
|
862
1017
|
controller.enqueue({
|
|
863
1018
|
type: "tool-call",
|
|
864
|
-
toolCallId: (_q = toolCall2.id) != null ? _q : (0,
|
|
1019
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
|
|
865
1020
|
toolName: toolCall2.function.name,
|
|
866
1021
|
input: toolCall2.function.arguments
|
|
867
1022
|
});
|
|
@@ -882,14 +1037,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
882
1037
|
id: toolCall.id,
|
|
883
1038
|
delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
884
1039
|
});
|
|
885
|
-
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0,
|
|
1040
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
|
|
886
1041
|
controller.enqueue({
|
|
887
1042
|
type: "tool-input-end",
|
|
888
1043
|
id: toolCall.id
|
|
889
1044
|
});
|
|
890
1045
|
controller.enqueue({
|
|
891
1046
|
type: "tool-call",
|
|
892
|
-
toolCallId: (_x = toolCall.id) != null ? _x : (0,
|
|
1047
|
+
toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
|
|
893
1048
|
toolName: toolCall.function.name,
|
|
894
1049
|
input: toolCall.function.arguments
|
|
895
1050
|
});
|
|
@@ -902,7 +1057,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
902
1057
|
controller.enqueue({
|
|
903
1058
|
type: "source",
|
|
904
1059
|
sourceType: "url",
|
|
905
|
-
id: (0,
|
|
1060
|
+
id: (0, import_provider_utils5.generateId)(),
|
|
906
1061
|
url: annotation.url,
|
|
907
1062
|
title: annotation.title
|
|
908
1063
|
});
|
|
@@ -927,121 +1082,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
927
1082
|
};
|
|
928
1083
|
}
|
|
929
1084
|
};
|
|
930
|
-
var openaiTokenUsageSchema = import_v43.z.object({
|
|
931
|
-
prompt_tokens: import_v43.z.number().nullish(),
|
|
932
|
-
completion_tokens: import_v43.z.number().nullish(),
|
|
933
|
-
total_tokens: import_v43.z.number().nullish(),
|
|
934
|
-
prompt_tokens_details: import_v43.z.object({
|
|
935
|
-
cached_tokens: import_v43.z.number().nullish()
|
|
936
|
-
}).nullish(),
|
|
937
|
-
completion_tokens_details: import_v43.z.object({
|
|
938
|
-
reasoning_tokens: import_v43.z.number().nullish(),
|
|
939
|
-
accepted_prediction_tokens: import_v43.z.number().nullish(),
|
|
940
|
-
rejected_prediction_tokens: import_v43.z.number().nullish()
|
|
941
|
-
}).nullish()
|
|
942
|
-
}).nullish();
|
|
943
|
-
var openaiChatResponseSchema = import_v43.z.object({
|
|
944
|
-
id: import_v43.z.string().nullish(),
|
|
945
|
-
created: import_v43.z.number().nullish(),
|
|
946
|
-
model: import_v43.z.string().nullish(),
|
|
947
|
-
choices: import_v43.z.array(
|
|
948
|
-
import_v43.z.object({
|
|
949
|
-
message: import_v43.z.object({
|
|
950
|
-
role: import_v43.z.literal("assistant").nullish(),
|
|
951
|
-
content: import_v43.z.string().nullish(),
|
|
952
|
-
tool_calls: import_v43.z.array(
|
|
953
|
-
import_v43.z.object({
|
|
954
|
-
id: import_v43.z.string().nullish(),
|
|
955
|
-
type: import_v43.z.literal("function"),
|
|
956
|
-
function: import_v43.z.object({
|
|
957
|
-
name: import_v43.z.string(),
|
|
958
|
-
arguments: import_v43.z.string()
|
|
959
|
-
})
|
|
960
|
-
})
|
|
961
|
-
).nullish(),
|
|
962
|
-
annotations: import_v43.z.array(
|
|
963
|
-
import_v43.z.object({
|
|
964
|
-
type: import_v43.z.literal("url_citation"),
|
|
965
|
-
start_index: import_v43.z.number(),
|
|
966
|
-
end_index: import_v43.z.number(),
|
|
967
|
-
url: import_v43.z.string(),
|
|
968
|
-
title: import_v43.z.string()
|
|
969
|
-
})
|
|
970
|
-
).nullish()
|
|
971
|
-
}),
|
|
972
|
-
index: import_v43.z.number(),
|
|
973
|
-
logprobs: import_v43.z.object({
|
|
974
|
-
content: import_v43.z.array(
|
|
975
|
-
import_v43.z.object({
|
|
976
|
-
token: import_v43.z.string(),
|
|
977
|
-
logprob: import_v43.z.number(),
|
|
978
|
-
top_logprobs: import_v43.z.array(
|
|
979
|
-
import_v43.z.object({
|
|
980
|
-
token: import_v43.z.string(),
|
|
981
|
-
logprob: import_v43.z.number()
|
|
982
|
-
})
|
|
983
|
-
)
|
|
984
|
-
})
|
|
985
|
-
).nullish()
|
|
986
|
-
}).nullish(),
|
|
987
|
-
finish_reason: import_v43.z.string().nullish()
|
|
988
|
-
})
|
|
989
|
-
),
|
|
990
|
-
usage: openaiTokenUsageSchema
|
|
991
|
-
});
|
|
992
|
-
var openaiChatChunkSchema = import_v43.z.union([
|
|
993
|
-
import_v43.z.object({
|
|
994
|
-
id: import_v43.z.string().nullish(),
|
|
995
|
-
created: import_v43.z.number().nullish(),
|
|
996
|
-
model: import_v43.z.string().nullish(),
|
|
997
|
-
choices: import_v43.z.array(
|
|
998
|
-
import_v43.z.object({
|
|
999
|
-
delta: import_v43.z.object({
|
|
1000
|
-
role: import_v43.z.enum(["assistant"]).nullish(),
|
|
1001
|
-
content: import_v43.z.string().nullish(),
|
|
1002
|
-
tool_calls: import_v43.z.array(
|
|
1003
|
-
import_v43.z.object({
|
|
1004
|
-
index: import_v43.z.number(),
|
|
1005
|
-
id: import_v43.z.string().nullish(),
|
|
1006
|
-
type: import_v43.z.literal("function").nullish(),
|
|
1007
|
-
function: import_v43.z.object({
|
|
1008
|
-
name: import_v43.z.string().nullish(),
|
|
1009
|
-
arguments: import_v43.z.string().nullish()
|
|
1010
|
-
})
|
|
1011
|
-
})
|
|
1012
|
-
).nullish(),
|
|
1013
|
-
annotations: import_v43.z.array(
|
|
1014
|
-
import_v43.z.object({
|
|
1015
|
-
type: import_v43.z.literal("url_citation"),
|
|
1016
|
-
start_index: import_v43.z.number(),
|
|
1017
|
-
end_index: import_v43.z.number(),
|
|
1018
|
-
url: import_v43.z.string(),
|
|
1019
|
-
title: import_v43.z.string()
|
|
1020
|
-
})
|
|
1021
|
-
).nullish()
|
|
1022
|
-
}).nullish(),
|
|
1023
|
-
logprobs: import_v43.z.object({
|
|
1024
|
-
content: import_v43.z.array(
|
|
1025
|
-
import_v43.z.object({
|
|
1026
|
-
token: import_v43.z.string(),
|
|
1027
|
-
logprob: import_v43.z.number(),
|
|
1028
|
-
top_logprobs: import_v43.z.array(
|
|
1029
|
-
import_v43.z.object({
|
|
1030
|
-
token: import_v43.z.string(),
|
|
1031
|
-
logprob: import_v43.z.number()
|
|
1032
|
-
})
|
|
1033
|
-
)
|
|
1034
|
-
})
|
|
1035
|
-
).nullish()
|
|
1036
|
-
}).nullish(),
|
|
1037
|
-
finish_reason: import_v43.z.string().nullish(),
|
|
1038
|
-
index: import_v43.z.number()
|
|
1039
|
-
})
|
|
1040
|
-
),
|
|
1041
|
-
usage: openaiTokenUsageSchema
|
|
1042
|
-
}),
|
|
1043
|
-
openaiErrorDataSchema
|
|
1044
|
-
]);
|
|
1045
1085
|
function isReasoningModel(modelId) {
|
|
1046
1086
|
return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
|
|
1047
1087
|
}
|
|
@@ -1092,8 +1132,7 @@ var reasoningModels = {
|
|
|
1092
1132
|
};
|
|
1093
1133
|
|
|
1094
1134
|
// src/completion/openai-completion-language-model.ts
|
|
1095
|
-
var
|
|
1096
|
-
var import_v45 = require("zod/v4");
|
|
1135
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1097
1136
|
|
|
1098
1137
|
// src/completion/convert-to-openai-completion-prompt.ts
|
|
1099
1138
|
var import_provider4 = require("@ai-sdk/provider");
|
|
@@ -1200,48 +1239,111 @@ function mapOpenAIFinishReason2(finishReason) {
|
|
|
1200
1239
|
}
|
|
1201
1240
|
}
|
|
1202
1241
|
|
|
1242
|
+
// src/completion/openai-completion-api.ts
|
|
1243
|
+
var z4 = __toESM(require("zod/v4"));
|
|
1244
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
1245
|
+
var openaiCompletionResponseSchema = (0, import_provider_utils6.lazyValidator)(
|
|
1246
|
+
() => (0, import_provider_utils6.zodSchema)(
|
|
1247
|
+
z4.object({
|
|
1248
|
+
id: z4.string().nullish(),
|
|
1249
|
+
created: z4.number().nullish(),
|
|
1250
|
+
model: z4.string().nullish(),
|
|
1251
|
+
choices: z4.array(
|
|
1252
|
+
z4.object({
|
|
1253
|
+
text: z4.string(),
|
|
1254
|
+
finish_reason: z4.string(),
|
|
1255
|
+
logprobs: z4.object({
|
|
1256
|
+
tokens: z4.array(z4.string()),
|
|
1257
|
+
token_logprobs: z4.array(z4.number()),
|
|
1258
|
+
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullish()
|
|
1259
|
+
}).nullish()
|
|
1260
|
+
})
|
|
1261
|
+
),
|
|
1262
|
+
usage: z4.object({
|
|
1263
|
+
prompt_tokens: z4.number(),
|
|
1264
|
+
completion_tokens: z4.number(),
|
|
1265
|
+
total_tokens: z4.number()
|
|
1266
|
+
}).nullish()
|
|
1267
|
+
})
|
|
1268
|
+
)
|
|
1269
|
+
);
|
|
1270
|
+
var openaiCompletionChunkSchema = (0, import_provider_utils6.lazyValidator)(
|
|
1271
|
+
() => (0, import_provider_utils6.zodSchema)(
|
|
1272
|
+
z4.union([
|
|
1273
|
+
z4.object({
|
|
1274
|
+
id: z4.string().nullish(),
|
|
1275
|
+
created: z4.number().nullish(),
|
|
1276
|
+
model: z4.string().nullish(),
|
|
1277
|
+
choices: z4.array(
|
|
1278
|
+
z4.object({
|
|
1279
|
+
text: z4.string(),
|
|
1280
|
+
finish_reason: z4.string().nullish(),
|
|
1281
|
+
index: z4.number(),
|
|
1282
|
+
logprobs: z4.object({
|
|
1283
|
+
tokens: z4.array(z4.string()),
|
|
1284
|
+
token_logprobs: z4.array(z4.number()),
|
|
1285
|
+
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullish()
|
|
1286
|
+
}).nullish()
|
|
1287
|
+
})
|
|
1288
|
+
),
|
|
1289
|
+
usage: z4.object({
|
|
1290
|
+
prompt_tokens: z4.number(),
|
|
1291
|
+
completion_tokens: z4.number(),
|
|
1292
|
+
total_tokens: z4.number()
|
|
1293
|
+
}).nullish()
|
|
1294
|
+
}),
|
|
1295
|
+
openaiErrorDataSchema
|
|
1296
|
+
])
|
|
1297
|
+
)
|
|
1298
|
+
);
|
|
1299
|
+
|
|
1203
1300
|
// src/completion/openai-completion-options.ts
|
|
1204
|
-
var
|
|
1205
|
-
var
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1301
|
+
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
|
1302
|
+
var z5 = __toESM(require("zod/v4"));
|
|
1303
|
+
var openaiCompletionProviderOptions = (0, import_provider_utils7.lazyValidator)(
|
|
1304
|
+
() => (0, import_provider_utils7.zodSchema)(
|
|
1305
|
+
z5.object({
|
|
1306
|
+
/**
|
|
1307
|
+
Echo back the prompt in addition to the completion.
|
|
1308
|
+
*/
|
|
1309
|
+
echo: z5.boolean().optional(),
|
|
1310
|
+
/**
|
|
1311
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
1312
|
+
|
|
1313
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
1314
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
1315
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
1316
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
1317
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
1318
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
1319
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
1320
|
+
|
|
1321
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
1322
|
+
token from being generated.
|
|
1323
|
+
*/
|
|
1324
|
+
logitBias: z5.record(z5.string(), z5.number()).optional(),
|
|
1325
|
+
/**
|
|
1326
|
+
The suffix that comes after a completion of inserted text.
|
|
1327
|
+
*/
|
|
1328
|
+
suffix: z5.string().optional(),
|
|
1329
|
+
/**
|
|
1330
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1331
|
+
monitor and detect abuse. Learn more.
|
|
1332
|
+
*/
|
|
1333
|
+
user: z5.string().optional(),
|
|
1334
|
+
/**
|
|
1335
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1336
|
+
the response size and can slow down response times. However, it can
|
|
1337
|
+
be useful to better understand how the model is behaving.
|
|
1338
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1339
|
+
were generated.
|
|
1340
|
+
Setting to a number will return the log probabilities of the top n
|
|
1341
|
+
tokens that were generated.
|
|
1342
|
+
*/
|
|
1343
|
+
logprobs: z5.union([z5.boolean(), z5.number()]).optional()
|
|
1344
|
+
})
|
|
1345
|
+
)
|
|
1346
|
+
);
|
|
1245
1347
|
|
|
1246
1348
|
// src/completion/openai-completion-language-model.ts
|
|
1247
1349
|
var OpenAICompletionLanguageModel = class {
|
|
@@ -1276,12 +1378,12 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1276
1378
|
}) {
|
|
1277
1379
|
const warnings = [];
|
|
1278
1380
|
const openaiOptions = {
|
|
1279
|
-
...await (0,
|
|
1381
|
+
...await (0, import_provider_utils8.parseProviderOptions)({
|
|
1280
1382
|
provider: "openai",
|
|
1281
1383
|
providerOptions,
|
|
1282
1384
|
schema: openaiCompletionProviderOptions
|
|
1283
1385
|
}),
|
|
1284
|
-
...await (0,
|
|
1386
|
+
...await (0, import_provider_utils8.parseProviderOptions)({
|
|
1285
1387
|
provider: this.providerOptionsName,
|
|
1286
1388
|
providerOptions,
|
|
1287
1389
|
schema: openaiCompletionProviderOptions
|
|
@@ -1337,15 +1439,15 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1337
1439
|
responseHeaders,
|
|
1338
1440
|
value: response,
|
|
1339
1441
|
rawValue: rawResponse
|
|
1340
|
-
} = await (0,
|
|
1442
|
+
} = await (0, import_provider_utils8.postJsonToApi)({
|
|
1341
1443
|
url: this.config.url({
|
|
1342
1444
|
path: "/completions",
|
|
1343
1445
|
modelId: this.modelId
|
|
1344
1446
|
}),
|
|
1345
|
-
headers: (0,
|
|
1447
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
|
|
1346
1448
|
body: args,
|
|
1347
1449
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1348
|
-
successfulResponseHandler: (0,
|
|
1450
|
+
successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
|
|
1349
1451
|
openaiCompletionResponseSchema
|
|
1350
1452
|
),
|
|
1351
1453
|
abortSignal: options.abortSignal,
|
|
@@ -1383,15 +1485,15 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1383
1485
|
include_usage: true
|
|
1384
1486
|
}
|
|
1385
1487
|
};
|
|
1386
|
-
const { responseHeaders, value: response } = await (0,
|
|
1488
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
|
|
1387
1489
|
url: this.config.url({
|
|
1388
1490
|
path: "/completions",
|
|
1389
1491
|
modelId: this.modelId
|
|
1390
1492
|
}),
|
|
1391
|
-
headers: (0,
|
|
1493
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
|
|
1392
1494
|
body,
|
|
1393
1495
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1394
|
-
successfulResponseHandler: (0,
|
|
1496
|
+
successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
|
|
1395
1497
|
openaiCompletionChunkSchema
|
|
1396
1498
|
),
|
|
1397
1499
|
abortSignal: options.abortSignal,
|
|
@@ -1472,69 +1574,42 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1472
1574
|
};
|
|
1473
1575
|
}
|
|
1474
1576
|
};
|
|
1475
|
-
var usageSchema = import_v45.z.object({
|
|
1476
|
-
prompt_tokens: import_v45.z.number(),
|
|
1477
|
-
completion_tokens: import_v45.z.number(),
|
|
1478
|
-
total_tokens: import_v45.z.number()
|
|
1479
|
-
});
|
|
1480
|
-
var openaiCompletionResponseSchema = import_v45.z.object({
|
|
1481
|
-
id: import_v45.z.string().nullish(),
|
|
1482
|
-
created: import_v45.z.number().nullish(),
|
|
1483
|
-
model: import_v45.z.string().nullish(),
|
|
1484
|
-
choices: import_v45.z.array(
|
|
1485
|
-
import_v45.z.object({
|
|
1486
|
-
text: import_v45.z.string(),
|
|
1487
|
-
finish_reason: import_v45.z.string(),
|
|
1488
|
-
logprobs: import_v45.z.object({
|
|
1489
|
-
tokens: import_v45.z.array(import_v45.z.string()),
|
|
1490
|
-
token_logprobs: import_v45.z.array(import_v45.z.number()),
|
|
1491
|
-
top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullish()
|
|
1492
|
-
}).nullish()
|
|
1493
|
-
})
|
|
1494
|
-
),
|
|
1495
|
-
usage: usageSchema.nullish()
|
|
1496
|
-
});
|
|
1497
|
-
var openaiCompletionChunkSchema = import_v45.z.union([
|
|
1498
|
-
import_v45.z.object({
|
|
1499
|
-
id: import_v45.z.string().nullish(),
|
|
1500
|
-
created: import_v45.z.number().nullish(),
|
|
1501
|
-
model: import_v45.z.string().nullish(),
|
|
1502
|
-
choices: import_v45.z.array(
|
|
1503
|
-
import_v45.z.object({
|
|
1504
|
-
text: import_v45.z.string(),
|
|
1505
|
-
finish_reason: import_v45.z.string().nullish(),
|
|
1506
|
-
index: import_v45.z.number(),
|
|
1507
|
-
logprobs: import_v45.z.object({
|
|
1508
|
-
tokens: import_v45.z.array(import_v45.z.string()),
|
|
1509
|
-
token_logprobs: import_v45.z.array(import_v45.z.number()),
|
|
1510
|
-
top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullish()
|
|
1511
|
-
}).nullish()
|
|
1512
|
-
})
|
|
1513
|
-
),
|
|
1514
|
-
usage: usageSchema.nullish()
|
|
1515
|
-
}),
|
|
1516
|
-
openaiErrorDataSchema
|
|
1517
|
-
]);
|
|
1518
1577
|
|
|
1519
1578
|
// src/embedding/openai-embedding-model.ts
|
|
1520
1579
|
var import_provider5 = require("@ai-sdk/provider");
|
|
1521
|
-
var
|
|
1522
|
-
var import_v47 = require("zod/v4");
|
|
1580
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
|
1523
1581
|
|
|
1524
1582
|
// src/embedding/openai-embedding-options.ts
|
|
1525
|
-
var
|
|
1526
|
-
var
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1583
|
+
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
|
1584
|
+
var z6 = __toESM(require("zod/v4"));
|
|
1585
|
+
var openaiEmbeddingProviderOptions = (0, import_provider_utils9.lazyValidator)(
|
|
1586
|
+
() => (0, import_provider_utils9.zodSchema)(
|
|
1587
|
+
z6.object({
|
|
1588
|
+
/**
|
|
1589
|
+
The number of dimensions the resulting output embeddings should have.
|
|
1590
|
+
Only supported in text-embedding-3 and later models.
|
|
1591
|
+
*/
|
|
1592
|
+
dimensions: z6.number().optional(),
|
|
1593
|
+
/**
|
|
1594
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1595
|
+
monitor and detect abuse. Learn more.
|
|
1596
|
+
*/
|
|
1597
|
+
user: z6.string().optional()
|
|
1598
|
+
})
|
|
1599
|
+
)
|
|
1600
|
+
);
|
|
1601
|
+
|
|
1602
|
+
// src/embedding/openai-embedding-api.ts
|
|
1603
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
|
1604
|
+
var z7 = __toESM(require("zod/v4"));
|
|
1605
|
+
var openaiTextEmbeddingResponseSchema = (0, import_provider_utils10.lazyValidator)(
|
|
1606
|
+
() => (0, import_provider_utils10.zodSchema)(
|
|
1607
|
+
z7.object({
|
|
1608
|
+
data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
|
|
1609
|
+
usage: z7.object({ prompt_tokens: z7.number() }).nullish()
|
|
1610
|
+
})
|
|
1611
|
+
)
|
|
1612
|
+
);
|
|
1538
1613
|
|
|
1539
1614
|
// src/embedding/openai-embedding-model.ts
|
|
1540
1615
|
var OpenAIEmbeddingModel = class {
|
|
@@ -1563,7 +1638,7 @@ var OpenAIEmbeddingModel = class {
|
|
|
1563
1638
|
values
|
|
1564
1639
|
});
|
|
1565
1640
|
}
|
|
1566
|
-
const openaiOptions = (_a = await (0,
|
|
1641
|
+
const openaiOptions = (_a = await (0, import_provider_utils11.parseProviderOptions)({
|
|
1567
1642
|
provider: "openai",
|
|
1568
1643
|
providerOptions,
|
|
1569
1644
|
schema: openaiEmbeddingProviderOptions
|
|
@@ -1572,12 +1647,12 @@ var OpenAIEmbeddingModel = class {
|
|
|
1572
1647
|
responseHeaders,
|
|
1573
1648
|
value: response,
|
|
1574
1649
|
rawValue
|
|
1575
|
-
} = await (0,
|
|
1650
|
+
} = await (0, import_provider_utils11.postJsonToApi)({
|
|
1576
1651
|
url: this.config.url({
|
|
1577
1652
|
path: "/embeddings",
|
|
1578
1653
|
modelId: this.modelId
|
|
1579
1654
|
}),
|
|
1580
|
-
headers: (0,
|
|
1655
|
+
headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), headers),
|
|
1581
1656
|
body: {
|
|
1582
1657
|
model: this.modelId,
|
|
1583
1658
|
input: values,
|
|
@@ -1586,7 +1661,7 @@ var OpenAIEmbeddingModel = class {
|
|
|
1586
1661
|
user: openaiOptions.user
|
|
1587
1662
|
},
|
|
1588
1663
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1589
|
-
successfulResponseHandler: (0,
|
|
1664
|
+
successfulResponseHandler: (0, import_provider_utils11.createJsonResponseHandler)(
|
|
1590
1665
|
openaiTextEmbeddingResponseSchema
|
|
1591
1666
|
),
|
|
1592
1667
|
abortSignal,
|
|
@@ -1599,14 +1674,25 @@ var OpenAIEmbeddingModel = class {
|
|
|
1599
1674
|
};
|
|
1600
1675
|
}
|
|
1601
1676
|
};
|
|
1602
|
-
var openaiTextEmbeddingResponseSchema = import_v47.z.object({
|
|
1603
|
-
data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
|
|
1604
|
-
usage: import_v47.z.object({ prompt_tokens: import_v47.z.number() }).nullish()
|
|
1605
|
-
});
|
|
1606
1677
|
|
|
1607
1678
|
// src/image/openai-image-model.ts
|
|
1608
|
-
var
|
|
1609
|
-
|
|
1679
|
+
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
|
1680
|
+
|
|
1681
|
+
// src/image/openai-image-api.ts
|
|
1682
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
|
1683
|
+
var z8 = __toESM(require("zod/v4"));
|
|
1684
|
+
var openaiImageResponseSchema = (0, import_provider_utils12.lazyValidator)(
|
|
1685
|
+
() => (0, import_provider_utils12.zodSchema)(
|
|
1686
|
+
z8.object({
|
|
1687
|
+
data: z8.array(
|
|
1688
|
+
z8.object({
|
|
1689
|
+
b64_json: z8.string(),
|
|
1690
|
+
revised_prompt: z8.string().optional()
|
|
1691
|
+
})
|
|
1692
|
+
)
|
|
1693
|
+
})
|
|
1694
|
+
)
|
|
1695
|
+
);
|
|
1610
1696
|
|
|
1611
1697
|
// src/image/openai-image-options.ts
|
|
1612
1698
|
var modelMaxImagesPerCall = {
|
|
@@ -1657,12 +1743,12 @@ var OpenAIImageModel = class {
|
|
|
1657
1743
|
warnings.push({ type: "unsupported-setting", setting: "seed" });
|
|
1658
1744
|
}
|
|
1659
1745
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1660
|
-
const { value: response, responseHeaders } = await (0,
|
|
1746
|
+
const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
|
|
1661
1747
|
url: this.config.url({
|
|
1662
1748
|
path: "/images/generations",
|
|
1663
1749
|
modelId: this.modelId
|
|
1664
1750
|
}),
|
|
1665
|
-
headers: (0,
|
|
1751
|
+
headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), headers),
|
|
1666
1752
|
body: {
|
|
1667
1753
|
model: this.modelId,
|
|
1668
1754
|
prompt,
|
|
@@ -1672,7 +1758,7 @@ var OpenAIImageModel = class {
|
|
|
1672
1758
|
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1673
1759
|
},
|
|
1674
1760
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1675
|
-
successfulResponseHandler: (0,
|
|
1761
|
+
successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
|
|
1676
1762
|
openaiImageResponseSchema
|
|
1677
1763
|
),
|
|
1678
1764
|
abortSignal,
|
|
@@ -1698,42 +1784,75 @@ var OpenAIImageModel = class {
|
|
|
1698
1784
|
};
|
|
1699
1785
|
}
|
|
1700
1786
|
};
|
|
1701
|
-
var openaiImageResponseSchema = import_v48.z.object({
|
|
1702
|
-
data: import_v48.z.array(
|
|
1703
|
-
import_v48.z.object({ b64_json: import_v48.z.string(), revised_prompt: import_v48.z.string().optional() })
|
|
1704
|
-
)
|
|
1705
|
-
});
|
|
1706
1787
|
|
|
1707
1788
|
// src/transcription/openai-transcription-model.ts
|
|
1708
|
-
var
|
|
1709
|
-
|
|
1789
|
+
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
|
1790
|
+
|
|
1791
|
+
// src/transcription/openai-transcription-api.ts
|
|
1792
|
+
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
|
1793
|
+
var z9 = __toESM(require("zod/v4"));
|
|
1794
|
+
var openaiTranscriptionResponseSchema = (0, import_provider_utils14.lazyValidator)(
|
|
1795
|
+
() => (0, import_provider_utils14.zodSchema)(
|
|
1796
|
+
z9.object({
|
|
1797
|
+
text: z9.string(),
|
|
1798
|
+
language: z9.string().nullish(),
|
|
1799
|
+
duration: z9.number().nullish(),
|
|
1800
|
+
words: z9.array(
|
|
1801
|
+
z9.object({
|
|
1802
|
+
word: z9.string(),
|
|
1803
|
+
start: z9.number(),
|
|
1804
|
+
end: z9.number()
|
|
1805
|
+
})
|
|
1806
|
+
).nullish(),
|
|
1807
|
+
segments: z9.array(
|
|
1808
|
+
z9.object({
|
|
1809
|
+
id: z9.number(),
|
|
1810
|
+
seek: z9.number(),
|
|
1811
|
+
start: z9.number(),
|
|
1812
|
+
end: z9.number(),
|
|
1813
|
+
text: z9.string(),
|
|
1814
|
+
tokens: z9.array(z9.number()),
|
|
1815
|
+
temperature: z9.number(),
|
|
1816
|
+
avg_logprob: z9.number(),
|
|
1817
|
+
compression_ratio: z9.number(),
|
|
1818
|
+
no_speech_prob: z9.number()
|
|
1819
|
+
})
|
|
1820
|
+
).nullish()
|
|
1821
|
+
})
|
|
1822
|
+
)
|
|
1823
|
+
);
|
|
1710
1824
|
|
|
1711
1825
|
// src/transcription/openai-transcription-options.ts
|
|
1712
|
-
var
|
|
1713
|
-
var
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
|
|
1726
|
-
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
|
|
1731
|
-
|
|
1732
|
-
|
|
1733
|
-
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1826
|
+
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
|
1827
|
+
var z10 = __toESM(require("zod/v4"));
|
|
1828
|
+
var openAITranscriptionProviderOptions = (0, import_provider_utils15.lazyValidator)(
|
|
1829
|
+
() => (0, import_provider_utils15.zodSchema)(
|
|
1830
|
+
z10.object({
|
|
1831
|
+
/**
|
|
1832
|
+
* Additional information to include in the transcription response.
|
|
1833
|
+
*/
|
|
1834
|
+
include: z10.array(z10.string()).optional(),
|
|
1835
|
+
/**
|
|
1836
|
+
* The language of the input audio in ISO-639-1 format.
|
|
1837
|
+
*/
|
|
1838
|
+
language: z10.string().optional(),
|
|
1839
|
+
/**
|
|
1840
|
+
* An optional text to guide the model's style or continue a previous audio segment.
|
|
1841
|
+
*/
|
|
1842
|
+
prompt: z10.string().optional(),
|
|
1843
|
+
/**
|
|
1844
|
+
* The sampling temperature, between 0 and 1.
|
|
1845
|
+
* @default 0
|
|
1846
|
+
*/
|
|
1847
|
+
temperature: z10.number().min(0).max(1).default(0).optional(),
|
|
1848
|
+
/**
|
|
1849
|
+
* The timestamp granularities to populate for this transcription.
|
|
1850
|
+
* @default ['segment']
|
|
1851
|
+
*/
|
|
1852
|
+
timestampGranularities: z10.array(z10.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1853
|
+
})
|
|
1854
|
+
)
|
|
1855
|
+
);
|
|
1737
1856
|
|
|
1738
1857
|
// src/transcription/openai-transcription-model.ts
|
|
1739
1858
|
var languageMap = {
|
|
@@ -1810,15 +1929,15 @@ var OpenAITranscriptionModel = class {
|
|
|
1810
1929
|
providerOptions
|
|
1811
1930
|
}) {
|
|
1812
1931
|
const warnings = [];
|
|
1813
|
-
const openAIOptions = await (0,
|
|
1932
|
+
const openAIOptions = await (0, import_provider_utils16.parseProviderOptions)({
|
|
1814
1933
|
provider: "openai",
|
|
1815
1934
|
providerOptions,
|
|
1816
1935
|
schema: openAITranscriptionProviderOptions
|
|
1817
1936
|
});
|
|
1818
1937
|
const formData = new FormData();
|
|
1819
|
-
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0,
|
|
1938
|
+
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils16.convertBase64ToUint8Array)(audio)]);
|
|
1820
1939
|
formData.append("model", this.modelId);
|
|
1821
|
-
const fileExtension = (0,
|
|
1940
|
+
const fileExtension = (0, import_provider_utils16.mediaTypeToExtension)(mediaType);
|
|
1822
1941
|
formData.append(
|
|
1823
1942
|
"file",
|
|
1824
1943
|
new File([blob], "audio", { type: mediaType }),
|
|
@@ -1863,15 +1982,15 @@ var OpenAITranscriptionModel = class {
|
|
|
1863
1982
|
value: response,
|
|
1864
1983
|
responseHeaders,
|
|
1865
1984
|
rawValue: rawResponse
|
|
1866
|
-
} = await (0,
|
|
1985
|
+
} = await (0, import_provider_utils16.postFormDataToApi)({
|
|
1867
1986
|
url: this.config.url({
|
|
1868
1987
|
path: "/audio/transcriptions",
|
|
1869
1988
|
modelId: this.modelId
|
|
1870
1989
|
}),
|
|
1871
|
-
headers: (0,
|
|
1990
|
+
headers: (0, import_provider_utils16.combineHeaders)(this.config.headers(), options.headers),
|
|
1872
1991
|
formData,
|
|
1873
1992
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1874
|
-
successfulResponseHandler: (0,
|
|
1993
|
+
successfulResponseHandler: (0, import_provider_utils16.createJsonResponseHandler)(
|
|
1875
1994
|
openaiTranscriptionResponseSchema
|
|
1876
1995
|
),
|
|
1877
1996
|
abortSignal: options.abortSignal,
|
|
@@ -1901,40 +2020,23 @@ var OpenAITranscriptionModel = class {
|
|
|
1901
2020
|
};
|
|
1902
2021
|
}
|
|
1903
2022
|
};
|
|
1904
|
-
|
|
1905
|
-
|
|
1906
|
-
|
|
1907
|
-
|
|
1908
|
-
|
|
1909
|
-
|
|
1910
|
-
|
|
1911
|
-
|
|
1912
|
-
|
|
1913
|
-
|
|
1914
|
-
|
|
1915
|
-
|
|
1916
|
-
import_v410.z.object({
|
|
1917
|
-
id: import_v410.z.number(),
|
|
1918
|
-
seek: import_v410.z.number(),
|
|
1919
|
-
start: import_v410.z.number(),
|
|
1920
|
-
end: import_v410.z.number(),
|
|
1921
|
-
text: import_v410.z.string(),
|
|
1922
|
-
tokens: import_v410.z.array(import_v410.z.number()),
|
|
1923
|
-
temperature: import_v410.z.number(),
|
|
1924
|
-
avg_logprob: import_v410.z.number(),
|
|
1925
|
-
compression_ratio: import_v410.z.number(),
|
|
1926
|
-
no_speech_prob: import_v410.z.number()
|
|
2023
|
+
|
|
2024
|
+
// src/speech/openai-speech-model.ts
|
|
2025
|
+
var import_provider_utils18 = require("@ai-sdk/provider-utils");
|
|
2026
|
+
|
|
2027
|
+
// src/speech/openai-speech-options.ts
|
|
2028
|
+
var import_provider_utils17 = require("@ai-sdk/provider-utils");
|
|
2029
|
+
var z11 = __toESM(require("zod/v4"));
|
|
2030
|
+
var openaiSpeechProviderOptionsSchema = (0, import_provider_utils17.lazyValidator)(
|
|
2031
|
+
() => (0, import_provider_utils17.zodSchema)(
|
|
2032
|
+
z11.object({
|
|
2033
|
+
instructions: z11.string().nullish(),
|
|
2034
|
+
speed: z11.number().min(0.25).max(4).default(1).nullish()
|
|
1927
2035
|
})
|
|
1928
|
-
)
|
|
1929
|
-
|
|
2036
|
+
)
|
|
2037
|
+
);
|
|
1930
2038
|
|
|
1931
2039
|
// src/speech/openai-speech-model.ts
|
|
1932
|
-
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1933
|
-
var import_v411 = require("zod/v4");
|
|
1934
|
-
var OpenAIProviderOptionsSchema = import_v411.z.object({
|
|
1935
|
-
instructions: import_v411.z.string().nullish(),
|
|
1936
|
-
speed: import_v411.z.number().min(0.25).max(4).default(1).nullish()
|
|
1937
|
-
});
|
|
1938
2040
|
var OpenAISpeechModel = class {
|
|
1939
2041
|
constructor(modelId, config) {
|
|
1940
2042
|
this.modelId = modelId;
|
|
@@ -1954,10 +2056,10 @@ var OpenAISpeechModel = class {
|
|
|
1954
2056
|
providerOptions
|
|
1955
2057
|
}) {
|
|
1956
2058
|
const warnings = [];
|
|
1957
|
-
const openAIOptions = await (0,
|
|
2059
|
+
const openAIOptions = await (0, import_provider_utils18.parseProviderOptions)({
|
|
1958
2060
|
provider: "openai",
|
|
1959
2061
|
providerOptions,
|
|
1960
|
-
schema:
|
|
2062
|
+
schema: openaiSpeechProviderOptionsSchema
|
|
1961
2063
|
});
|
|
1962
2064
|
const requestBody = {
|
|
1963
2065
|
model: this.modelId,
|
|
@@ -2007,15 +2109,15 @@ var OpenAISpeechModel = class {
|
|
|
2007
2109
|
value: audio,
|
|
2008
2110
|
responseHeaders,
|
|
2009
2111
|
rawValue: rawResponse
|
|
2010
|
-
} = await (0,
|
|
2112
|
+
} = await (0, import_provider_utils18.postJsonToApi)({
|
|
2011
2113
|
url: this.config.url({
|
|
2012
2114
|
path: "/audio/speech",
|
|
2013
2115
|
modelId: this.modelId
|
|
2014
2116
|
}),
|
|
2015
|
-
headers: (0,
|
|
2117
|
+
headers: (0, import_provider_utils18.combineHeaders)(this.config.headers(), options.headers),
|
|
2016
2118
|
body: requestBody,
|
|
2017
2119
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2018
|
-
successfulResponseHandler: (0,
|
|
2120
|
+
successfulResponseHandler: (0, import_provider_utils18.createBinaryResponseHandler)(),
|
|
2019
2121
|
abortSignal: options.abortSignal,
|
|
2020
2122
|
fetch: this.config.fetch
|
|
2021
2123
|
});
|
|
@@ -2037,31 +2139,34 @@ var OpenAISpeechModel = class {
|
|
|
2037
2139
|
|
|
2038
2140
|
// src/responses/openai-responses-language-model.ts
|
|
2039
2141
|
var import_provider8 = require("@ai-sdk/provider");
|
|
2040
|
-
var
|
|
2041
|
-
var import_v419 = require("zod/v4");
|
|
2142
|
+
var import_provider_utils29 = require("@ai-sdk/provider-utils");
|
|
2042
2143
|
|
|
2043
2144
|
// src/responses/convert-to-openai-responses-input.ts
|
|
2044
2145
|
var import_provider6 = require("@ai-sdk/provider");
|
|
2045
|
-
var
|
|
2046
|
-
var
|
|
2146
|
+
var import_provider_utils20 = require("@ai-sdk/provider-utils");
|
|
2147
|
+
var z13 = __toESM(require("zod/v4"));
|
|
2047
2148
|
|
|
2048
2149
|
// src/tool/local-shell.ts
|
|
2049
|
-
var
|
|
2050
|
-
var
|
|
2051
|
-
var localShellInputSchema =
|
|
2052
|
-
|
|
2053
|
-
|
|
2054
|
-
|
|
2055
|
-
|
|
2056
|
-
|
|
2057
|
-
|
|
2058
|
-
|
|
2059
|
-
|
|
2060
|
-
|
|
2061
|
-
|
|
2062
|
-
|
|
2063
|
-
|
|
2064
|
-
|
|
2150
|
+
var import_provider_utils19 = require("@ai-sdk/provider-utils");
|
|
2151
|
+
var z12 = __toESM(require("zod/v4"));
|
|
2152
|
+
var localShellInputSchema = (0, import_provider_utils19.lazySchema)(
|
|
2153
|
+
() => (0, import_provider_utils19.zodSchema)(
|
|
2154
|
+
z12.object({
|
|
2155
|
+
action: z12.object({
|
|
2156
|
+
type: z12.literal("exec"),
|
|
2157
|
+
command: z12.array(z12.string()),
|
|
2158
|
+
timeoutMs: z12.number().optional(),
|
|
2159
|
+
user: z12.string().optional(),
|
|
2160
|
+
workingDirectory: z12.string().optional(),
|
|
2161
|
+
env: z12.record(z12.string(), z12.string()).optional()
|
|
2162
|
+
})
|
|
2163
|
+
})
|
|
2164
|
+
)
|
|
2165
|
+
);
|
|
2166
|
+
var localShellOutputSchema = (0, import_provider_utils19.lazySchema)(
|
|
2167
|
+
() => (0, import_provider_utils19.zodSchema)(z12.object({ output: z12.string() }))
|
|
2168
|
+
);
|
|
2169
|
+
var localShell = (0, import_provider_utils19.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2065
2170
|
id: "openai.local_shell",
|
|
2066
2171
|
name: "local_shell",
|
|
2067
2172
|
inputSchema: localShellInputSchema,
|
|
@@ -2126,7 +2231,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2126
2231
|
return {
|
|
2127
2232
|
type: "input_image",
|
|
2128
2233
|
...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
|
|
2129
|
-
image_url: `data:${mediaType};base64,${(0,
|
|
2234
|
+
image_url: `data:${mediaType};base64,${(0, import_provider_utils20.convertToBase64)(part.data)}`
|
|
2130
2235
|
},
|
|
2131
2236
|
detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
|
|
2132
2237
|
};
|
|
@@ -2141,7 +2246,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2141
2246
|
type: "input_file",
|
|
2142
2247
|
...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
|
|
2143
2248
|
filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
|
|
2144
|
-
file_data: `data:application/pdf;base64,${(0,
|
|
2249
|
+
file_data: `data:application/pdf;base64,${(0, import_provider_utils20.convertToBase64)(part.data)}`
|
|
2145
2250
|
}
|
|
2146
2251
|
};
|
|
2147
2252
|
} else {
|
|
@@ -2174,7 +2279,10 @@ async function convertToOpenAIResponsesInput({
|
|
|
2174
2279
|
break;
|
|
2175
2280
|
}
|
|
2176
2281
|
if (hasLocalShellTool && part.toolName === "local_shell") {
|
|
2177
|
-
const parsedInput =
|
|
2282
|
+
const parsedInput = await (0, import_provider_utils20.validateTypes)({
|
|
2283
|
+
value: part.input,
|
|
2284
|
+
schema: localShellInputSchema
|
|
2285
|
+
});
|
|
2178
2286
|
input.push({
|
|
2179
2287
|
type: "local_shell_call",
|
|
2180
2288
|
call_id: part.toolCallId,
|
|
@@ -2212,7 +2320,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2212
2320
|
break;
|
|
2213
2321
|
}
|
|
2214
2322
|
case "reasoning": {
|
|
2215
|
-
const providerOptions = await (0,
|
|
2323
|
+
const providerOptions = await (0, import_provider_utils20.parseProviderOptions)({
|
|
2216
2324
|
provider: "openai",
|
|
2217
2325
|
providerOptions: part.providerOptions,
|
|
2218
2326
|
schema: openaiResponsesReasoningProviderOptionsSchema
|
|
@@ -2270,10 +2378,14 @@ async function convertToOpenAIResponsesInput({
|
|
|
2270
2378
|
for (const part of content) {
|
|
2271
2379
|
const output = part.output;
|
|
2272
2380
|
if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
|
|
2381
|
+
const parsedOutput = await (0, import_provider_utils20.validateTypes)({
|
|
2382
|
+
value: output.value,
|
|
2383
|
+
schema: localShellOutputSchema
|
|
2384
|
+
});
|
|
2273
2385
|
input.push({
|
|
2274
2386
|
type: "local_shell_call_output",
|
|
2275
2387
|
call_id: part.toolCallId,
|
|
2276
|
-
output:
|
|
2388
|
+
output: parsedOutput.output
|
|
2277
2389
|
});
|
|
2278
2390
|
break;
|
|
2279
2391
|
}
|
|
@@ -2308,9 +2420,9 @@ async function convertToOpenAIResponsesInput({
|
|
|
2308
2420
|
}
|
|
2309
2421
|
return { input, warnings };
|
|
2310
2422
|
}
|
|
2311
|
-
var openaiResponsesReasoningProviderOptionsSchema =
|
|
2312
|
-
itemId:
|
|
2313
|
-
reasoningEncryptedContent:
|
|
2423
|
+
var openaiResponsesReasoningProviderOptionsSchema = z13.object({
|
|
2424
|
+
itemId: z13.string().nullish(),
|
|
2425
|
+
reasoningEncryptedContent: z13.string().nullish()
|
|
2314
2426
|
});
|
|
2315
2427
|
|
|
2316
2428
|
// src/responses/map-openai-responses-finish-reason.ts
|
|
@@ -2331,33 +2443,574 @@ function mapOpenAIResponseFinishReason({
|
|
|
2331
2443
|
}
|
|
2332
2444
|
}
|
|
2333
2445
|
|
|
2446
|
+
// src/responses/openai-responses-api.ts
|
|
2447
|
+
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
|
2448
|
+
var z14 = __toESM(require("zod/v4"));
|
|
2449
|
+
var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
|
|
2450
|
+
() => (0, import_provider_utils21.zodSchema)(
|
|
2451
|
+
z14.union([
|
|
2452
|
+
z14.object({
|
|
2453
|
+
type: z14.literal("response.output_text.delta"),
|
|
2454
|
+
item_id: z14.string(),
|
|
2455
|
+
delta: z14.string(),
|
|
2456
|
+
logprobs: z14.array(
|
|
2457
|
+
z14.object({
|
|
2458
|
+
token: z14.string(),
|
|
2459
|
+
logprob: z14.number(),
|
|
2460
|
+
top_logprobs: z14.array(
|
|
2461
|
+
z14.object({
|
|
2462
|
+
token: z14.string(),
|
|
2463
|
+
logprob: z14.number()
|
|
2464
|
+
})
|
|
2465
|
+
)
|
|
2466
|
+
})
|
|
2467
|
+
).nullish()
|
|
2468
|
+
}),
|
|
2469
|
+
z14.object({
|
|
2470
|
+
type: z14.enum(["response.completed", "response.incomplete"]),
|
|
2471
|
+
response: z14.object({
|
|
2472
|
+
incomplete_details: z14.object({ reason: z14.string() }).nullish(),
|
|
2473
|
+
usage: z14.object({
|
|
2474
|
+
input_tokens: z14.number(),
|
|
2475
|
+
input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
|
|
2476
|
+
output_tokens: z14.number(),
|
|
2477
|
+
output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
|
|
2478
|
+
}),
|
|
2479
|
+
service_tier: z14.string().nullish()
|
|
2480
|
+
})
|
|
2481
|
+
}),
|
|
2482
|
+
z14.object({
|
|
2483
|
+
type: z14.literal("response.created"),
|
|
2484
|
+
response: z14.object({
|
|
2485
|
+
id: z14.string(),
|
|
2486
|
+
created_at: z14.number(),
|
|
2487
|
+
model: z14.string(),
|
|
2488
|
+
service_tier: z14.string().nullish()
|
|
2489
|
+
})
|
|
2490
|
+
}),
|
|
2491
|
+
z14.object({
|
|
2492
|
+
type: z14.literal("response.output_item.added"),
|
|
2493
|
+
output_index: z14.number(),
|
|
2494
|
+
item: z14.discriminatedUnion("type", [
|
|
2495
|
+
z14.object({
|
|
2496
|
+
type: z14.literal("message"),
|
|
2497
|
+
id: z14.string()
|
|
2498
|
+
}),
|
|
2499
|
+
z14.object({
|
|
2500
|
+
type: z14.literal("reasoning"),
|
|
2501
|
+
id: z14.string(),
|
|
2502
|
+
encrypted_content: z14.string().nullish()
|
|
2503
|
+
}),
|
|
2504
|
+
z14.object({
|
|
2505
|
+
type: z14.literal("function_call"),
|
|
2506
|
+
id: z14.string(),
|
|
2507
|
+
call_id: z14.string(),
|
|
2508
|
+
name: z14.string(),
|
|
2509
|
+
arguments: z14.string()
|
|
2510
|
+
}),
|
|
2511
|
+
z14.object({
|
|
2512
|
+
type: z14.literal("web_search_call"),
|
|
2513
|
+
id: z14.string(),
|
|
2514
|
+
status: z14.string(),
|
|
2515
|
+
action: z14.object({
|
|
2516
|
+
type: z14.literal("search"),
|
|
2517
|
+
query: z14.string().optional()
|
|
2518
|
+
}).nullish()
|
|
2519
|
+
}),
|
|
2520
|
+
z14.object({
|
|
2521
|
+
type: z14.literal("computer_call"),
|
|
2522
|
+
id: z14.string(),
|
|
2523
|
+
status: z14.string()
|
|
2524
|
+
}),
|
|
2525
|
+
z14.object({
|
|
2526
|
+
type: z14.literal("file_search_call"),
|
|
2527
|
+
id: z14.string()
|
|
2528
|
+
}),
|
|
2529
|
+
z14.object({
|
|
2530
|
+
type: z14.literal("image_generation_call"),
|
|
2531
|
+
id: z14.string()
|
|
2532
|
+
}),
|
|
2533
|
+
z14.object({
|
|
2534
|
+
type: z14.literal("code_interpreter_call"),
|
|
2535
|
+
id: z14.string(),
|
|
2536
|
+
container_id: z14.string(),
|
|
2537
|
+
code: z14.string().nullable(),
|
|
2538
|
+
outputs: z14.array(
|
|
2539
|
+
z14.discriminatedUnion("type", [
|
|
2540
|
+
z14.object({ type: z14.literal("logs"), logs: z14.string() }),
|
|
2541
|
+
z14.object({ type: z14.literal("image"), url: z14.string() })
|
|
2542
|
+
])
|
|
2543
|
+
).nullable(),
|
|
2544
|
+
status: z14.string()
|
|
2545
|
+
})
|
|
2546
|
+
])
|
|
2547
|
+
}),
|
|
2548
|
+
z14.object({
|
|
2549
|
+
type: z14.literal("response.output_item.done"),
|
|
2550
|
+
output_index: z14.number(),
|
|
2551
|
+
item: z14.discriminatedUnion("type", [
|
|
2552
|
+
z14.object({
|
|
2553
|
+
type: z14.literal("message"),
|
|
2554
|
+
id: z14.string()
|
|
2555
|
+
}),
|
|
2556
|
+
z14.object({
|
|
2557
|
+
type: z14.literal("reasoning"),
|
|
2558
|
+
id: z14.string(),
|
|
2559
|
+
encrypted_content: z14.string().nullish()
|
|
2560
|
+
}),
|
|
2561
|
+
z14.object({
|
|
2562
|
+
type: z14.literal("function_call"),
|
|
2563
|
+
id: z14.string(),
|
|
2564
|
+
call_id: z14.string(),
|
|
2565
|
+
name: z14.string(),
|
|
2566
|
+
arguments: z14.string(),
|
|
2567
|
+
status: z14.literal("completed")
|
|
2568
|
+
}),
|
|
2569
|
+
z14.object({
|
|
2570
|
+
type: z14.literal("code_interpreter_call"),
|
|
2571
|
+
id: z14.string(),
|
|
2572
|
+
code: z14.string().nullable(),
|
|
2573
|
+
container_id: z14.string(),
|
|
2574
|
+
outputs: z14.array(
|
|
2575
|
+
z14.discriminatedUnion("type", [
|
|
2576
|
+
z14.object({ type: z14.literal("logs"), logs: z14.string() }),
|
|
2577
|
+
z14.object({ type: z14.literal("image"), url: z14.string() })
|
|
2578
|
+
])
|
|
2579
|
+
).nullable()
|
|
2580
|
+
}),
|
|
2581
|
+
z14.object({
|
|
2582
|
+
type: z14.literal("image_generation_call"),
|
|
2583
|
+
id: z14.string(),
|
|
2584
|
+
result: z14.string()
|
|
2585
|
+
}),
|
|
2586
|
+
z14.object({
|
|
2587
|
+
type: z14.literal("web_search_call"),
|
|
2588
|
+
id: z14.string(),
|
|
2589
|
+
status: z14.string(),
|
|
2590
|
+
action: z14.discriminatedUnion("type", [
|
|
2591
|
+
z14.object({
|
|
2592
|
+
type: z14.literal("search"),
|
|
2593
|
+
query: z14.string().nullish()
|
|
2594
|
+
}),
|
|
2595
|
+
z14.object({
|
|
2596
|
+
type: z14.literal("open_page"),
|
|
2597
|
+
url: z14.string()
|
|
2598
|
+
}),
|
|
2599
|
+
z14.object({
|
|
2600
|
+
type: z14.literal("find"),
|
|
2601
|
+
url: z14.string(),
|
|
2602
|
+
pattern: z14.string()
|
|
2603
|
+
})
|
|
2604
|
+
]).nullish()
|
|
2605
|
+
}),
|
|
2606
|
+
z14.object({
|
|
2607
|
+
type: z14.literal("file_search_call"),
|
|
2608
|
+
id: z14.string(),
|
|
2609
|
+
queries: z14.array(z14.string()),
|
|
2610
|
+
results: z14.array(
|
|
2611
|
+
z14.object({
|
|
2612
|
+
attributes: z14.record(z14.string(), z14.unknown()),
|
|
2613
|
+
file_id: z14.string(),
|
|
2614
|
+
filename: z14.string(),
|
|
2615
|
+
score: z14.number(),
|
|
2616
|
+
text: z14.string()
|
|
2617
|
+
})
|
|
2618
|
+
).nullish()
|
|
2619
|
+
}),
|
|
2620
|
+
z14.object({
|
|
2621
|
+
type: z14.literal("local_shell_call"),
|
|
2622
|
+
id: z14.string(),
|
|
2623
|
+
call_id: z14.string(),
|
|
2624
|
+
action: z14.object({
|
|
2625
|
+
type: z14.literal("exec"),
|
|
2626
|
+
command: z14.array(z14.string()),
|
|
2627
|
+
timeout_ms: z14.number().optional(),
|
|
2628
|
+
user: z14.string().optional(),
|
|
2629
|
+
working_directory: z14.string().optional(),
|
|
2630
|
+
env: z14.record(z14.string(), z14.string()).optional()
|
|
2631
|
+
})
|
|
2632
|
+
}),
|
|
2633
|
+
z14.object({
|
|
2634
|
+
type: z14.literal("computer_call"),
|
|
2635
|
+
id: z14.string(),
|
|
2636
|
+
status: z14.literal("completed")
|
|
2637
|
+
})
|
|
2638
|
+
])
|
|
2639
|
+
}),
|
|
2640
|
+
z14.object({
|
|
2641
|
+
type: z14.literal("response.function_call_arguments.delta"),
|
|
2642
|
+
item_id: z14.string(),
|
|
2643
|
+
output_index: z14.number(),
|
|
2644
|
+
delta: z14.string()
|
|
2645
|
+
}),
|
|
2646
|
+
z14.object({
|
|
2647
|
+
type: z14.literal("response.image_generation_call.partial_image"),
|
|
2648
|
+
item_id: z14.string(),
|
|
2649
|
+
output_index: z14.number(),
|
|
2650
|
+
partial_image_b64: z14.string()
|
|
2651
|
+
}),
|
|
2652
|
+
z14.object({
|
|
2653
|
+
type: z14.literal("response.code_interpreter_call_code.delta"),
|
|
2654
|
+
item_id: z14.string(),
|
|
2655
|
+
output_index: z14.number(),
|
|
2656
|
+
delta: z14.string()
|
|
2657
|
+
}),
|
|
2658
|
+
z14.object({
|
|
2659
|
+
type: z14.literal("response.code_interpreter_call_code.done"),
|
|
2660
|
+
item_id: z14.string(),
|
|
2661
|
+
output_index: z14.number(),
|
|
2662
|
+
code: z14.string()
|
|
2663
|
+
}),
|
|
2664
|
+
z14.object({
|
|
2665
|
+
type: z14.literal("response.output_text.annotation.added"),
|
|
2666
|
+
annotation: z14.discriminatedUnion("type", [
|
|
2667
|
+
z14.object({
|
|
2668
|
+
type: z14.literal("url_citation"),
|
|
2669
|
+
url: z14.string(),
|
|
2670
|
+
title: z14.string()
|
|
2671
|
+
}),
|
|
2672
|
+
z14.object({
|
|
2673
|
+
type: z14.literal("file_citation"),
|
|
2674
|
+
file_id: z14.string(),
|
|
2675
|
+
filename: z14.string().nullish(),
|
|
2676
|
+
index: z14.number().nullish(),
|
|
2677
|
+
start_index: z14.number().nullish(),
|
|
2678
|
+
end_index: z14.number().nullish(),
|
|
2679
|
+
quote: z14.string().nullish()
|
|
2680
|
+
})
|
|
2681
|
+
])
|
|
2682
|
+
}),
|
|
2683
|
+
z14.object({
|
|
2684
|
+
type: z14.literal("response.reasoning_summary_part.added"),
|
|
2685
|
+
item_id: z14.string(),
|
|
2686
|
+
summary_index: z14.number()
|
|
2687
|
+
}),
|
|
2688
|
+
z14.object({
|
|
2689
|
+
type: z14.literal("response.reasoning_summary_text.delta"),
|
|
2690
|
+
item_id: z14.string(),
|
|
2691
|
+
summary_index: z14.number(),
|
|
2692
|
+
delta: z14.string()
|
|
2693
|
+
}),
|
|
2694
|
+
z14.object({
|
|
2695
|
+
type: z14.literal("error"),
|
|
2696
|
+
code: z14.string(),
|
|
2697
|
+
message: z14.string(),
|
|
2698
|
+
param: z14.string().nullish(),
|
|
2699
|
+
sequence_number: z14.number()
|
|
2700
|
+
}),
|
|
2701
|
+
z14.object({ type: z14.string() }).loose().transform((value) => ({
|
|
2702
|
+
type: "unknown_chunk",
|
|
2703
|
+
message: value.type
|
|
2704
|
+
}))
|
|
2705
|
+
// fallback for unknown chunks
|
|
2706
|
+
])
|
|
2707
|
+
)
|
|
2708
|
+
);
|
|
2709
|
+
var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
|
|
2710
|
+
() => (0, import_provider_utils21.zodSchema)(
|
|
2711
|
+
z14.object({
|
|
2712
|
+
id: z14.string(),
|
|
2713
|
+
created_at: z14.number(),
|
|
2714
|
+
error: z14.object({
|
|
2715
|
+
code: z14.string(),
|
|
2716
|
+
message: z14.string()
|
|
2717
|
+
}).nullish(),
|
|
2718
|
+
model: z14.string(),
|
|
2719
|
+
output: z14.array(
|
|
2720
|
+
z14.discriminatedUnion("type", [
|
|
2721
|
+
z14.object({
|
|
2722
|
+
type: z14.literal("message"),
|
|
2723
|
+
role: z14.literal("assistant"),
|
|
2724
|
+
id: z14.string(),
|
|
2725
|
+
content: z14.array(
|
|
2726
|
+
z14.object({
|
|
2727
|
+
type: z14.literal("output_text"),
|
|
2728
|
+
text: z14.string(),
|
|
2729
|
+
logprobs: z14.array(
|
|
2730
|
+
z14.object({
|
|
2731
|
+
token: z14.string(),
|
|
2732
|
+
logprob: z14.number(),
|
|
2733
|
+
top_logprobs: z14.array(
|
|
2734
|
+
z14.object({
|
|
2735
|
+
token: z14.string(),
|
|
2736
|
+
logprob: z14.number()
|
|
2737
|
+
})
|
|
2738
|
+
)
|
|
2739
|
+
})
|
|
2740
|
+
).nullish(),
|
|
2741
|
+
annotations: z14.array(
|
|
2742
|
+
z14.discriminatedUnion("type", [
|
|
2743
|
+
z14.object({
|
|
2744
|
+
type: z14.literal("url_citation"),
|
|
2745
|
+
start_index: z14.number(),
|
|
2746
|
+
end_index: z14.number(),
|
|
2747
|
+
url: z14.string(),
|
|
2748
|
+
title: z14.string()
|
|
2749
|
+
}),
|
|
2750
|
+
z14.object({
|
|
2751
|
+
type: z14.literal("file_citation"),
|
|
2752
|
+
file_id: z14.string(),
|
|
2753
|
+
filename: z14.string().nullish(),
|
|
2754
|
+
index: z14.number().nullish(),
|
|
2755
|
+
start_index: z14.number().nullish(),
|
|
2756
|
+
end_index: z14.number().nullish(),
|
|
2757
|
+
quote: z14.string().nullish()
|
|
2758
|
+
}),
|
|
2759
|
+
z14.object({
|
|
2760
|
+
type: z14.literal("container_file_citation")
|
|
2761
|
+
})
|
|
2762
|
+
])
|
|
2763
|
+
)
|
|
2764
|
+
})
|
|
2765
|
+
)
|
|
2766
|
+
}),
|
|
2767
|
+
z14.object({
|
|
2768
|
+
type: z14.literal("web_search_call"),
|
|
2769
|
+
id: z14.string(),
|
|
2770
|
+
status: z14.string(),
|
|
2771
|
+
action: z14.discriminatedUnion("type", [
|
|
2772
|
+
z14.object({
|
|
2773
|
+
type: z14.literal("search"),
|
|
2774
|
+
query: z14.string().nullish()
|
|
2775
|
+
}),
|
|
2776
|
+
z14.object({
|
|
2777
|
+
type: z14.literal("open_page"),
|
|
2778
|
+
url: z14.string()
|
|
2779
|
+
}),
|
|
2780
|
+
z14.object({
|
|
2781
|
+
type: z14.literal("find"),
|
|
2782
|
+
url: z14.string(),
|
|
2783
|
+
pattern: z14.string()
|
|
2784
|
+
})
|
|
2785
|
+
]).nullish()
|
|
2786
|
+
}),
|
|
2787
|
+
z14.object({
|
|
2788
|
+
type: z14.literal("file_search_call"),
|
|
2789
|
+
id: z14.string(),
|
|
2790
|
+
queries: z14.array(z14.string()),
|
|
2791
|
+
results: z14.array(
|
|
2792
|
+
z14.object({
|
|
2793
|
+
attributes: z14.record(z14.string(), z14.unknown()),
|
|
2794
|
+
file_id: z14.string(),
|
|
2795
|
+
filename: z14.string(),
|
|
2796
|
+
score: z14.number(),
|
|
2797
|
+
text: z14.string()
|
|
2798
|
+
})
|
|
2799
|
+
).nullish()
|
|
2800
|
+
}),
|
|
2801
|
+
z14.object({
|
|
2802
|
+
type: z14.literal("code_interpreter_call"),
|
|
2803
|
+
id: z14.string(),
|
|
2804
|
+
code: z14.string().nullable(),
|
|
2805
|
+
container_id: z14.string(),
|
|
2806
|
+
outputs: z14.array(
|
|
2807
|
+
z14.discriminatedUnion("type", [
|
|
2808
|
+
z14.object({ type: z14.literal("logs"), logs: z14.string() }),
|
|
2809
|
+
z14.object({ type: z14.literal("image"), url: z14.string() })
|
|
2810
|
+
])
|
|
2811
|
+
).nullable()
|
|
2812
|
+
}),
|
|
2813
|
+
z14.object({
|
|
2814
|
+
type: z14.literal("image_generation_call"),
|
|
2815
|
+
id: z14.string(),
|
|
2816
|
+
result: z14.string()
|
|
2817
|
+
}),
|
|
2818
|
+
z14.object({
|
|
2819
|
+
type: z14.literal("local_shell_call"),
|
|
2820
|
+
id: z14.string(),
|
|
2821
|
+
call_id: z14.string(),
|
|
2822
|
+
action: z14.object({
|
|
2823
|
+
type: z14.literal("exec"),
|
|
2824
|
+
command: z14.array(z14.string()),
|
|
2825
|
+
timeout_ms: z14.number().optional(),
|
|
2826
|
+
user: z14.string().optional(),
|
|
2827
|
+
working_directory: z14.string().optional(),
|
|
2828
|
+
env: z14.record(z14.string(), z14.string()).optional()
|
|
2829
|
+
})
|
|
2830
|
+
}),
|
|
2831
|
+
z14.object({
|
|
2832
|
+
type: z14.literal("function_call"),
|
|
2833
|
+
call_id: z14.string(),
|
|
2834
|
+
name: z14.string(),
|
|
2835
|
+
arguments: z14.string(),
|
|
2836
|
+
id: z14.string()
|
|
2837
|
+
}),
|
|
2838
|
+
z14.object({
|
|
2839
|
+
type: z14.literal("computer_call"),
|
|
2840
|
+
id: z14.string(),
|
|
2841
|
+
status: z14.string().optional()
|
|
2842
|
+
}),
|
|
2843
|
+
z14.object({
|
|
2844
|
+
type: z14.literal("reasoning"),
|
|
2845
|
+
id: z14.string(),
|
|
2846
|
+
encrypted_content: z14.string().nullish(),
|
|
2847
|
+
summary: z14.array(
|
|
2848
|
+
z14.object({
|
|
2849
|
+
type: z14.literal("summary_text"),
|
|
2850
|
+
text: z14.string()
|
|
2851
|
+
})
|
|
2852
|
+
)
|
|
2853
|
+
})
|
|
2854
|
+
])
|
|
2855
|
+
),
|
|
2856
|
+
service_tier: z14.string().nullish(),
|
|
2857
|
+
incomplete_details: z14.object({ reason: z14.string() }).nullish(),
|
|
2858
|
+
usage: z14.object({
|
|
2859
|
+
input_tokens: z14.number(),
|
|
2860
|
+
input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
|
|
2861
|
+
output_tokens: z14.number(),
|
|
2862
|
+
output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
|
|
2863
|
+
})
|
|
2864
|
+
})
|
|
2865
|
+
)
|
|
2866
|
+
);
|
|
2867
|
+
|
|
2868
|
+
// src/responses/openai-responses-options.ts
|
|
2869
|
+
var import_provider_utils22 = require("@ai-sdk/provider-utils");
|
|
2870
|
+
var z15 = __toESM(require("zod/v4"));
|
|
2871
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2872
|
+
var openaiResponsesReasoningModelIds = [
|
|
2873
|
+
"o1",
|
|
2874
|
+
"o1-2024-12-17",
|
|
2875
|
+
"o3-mini",
|
|
2876
|
+
"o3-mini-2025-01-31",
|
|
2877
|
+
"o3",
|
|
2878
|
+
"o3-2025-04-16",
|
|
2879
|
+
"o4-mini",
|
|
2880
|
+
"o4-mini-2025-04-16",
|
|
2881
|
+
"codex-mini-latest",
|
|
2882
|
+
"computer-use-preview",
|
|
2883
|
+
"gpt-5",
|
|
2884
|
+
"gpt-5-2025-08-07",
|
|
2885
|
+
"gpt-5-codex",
|
|
2886
|
+
"gpt-5-mini",
|
|
2887
|
+
"gpt-5-mini-2025-08-07",
|
|
2888
|
+
"gpt-5-nano",
|
|
2889
|
+
"gpt-5-nano-2025-08-07",
|
|
2890
|
+
"gpt-5-pro",
|
|
2891
|
+
"gpt-5-pro-2025-10-06"
|
|
2892
|
+
];
|
|
2893
|
+
var openaiResponsesModelIds = [
|
|
2894
|
+
"gpt-4.1",
|
|
2895
|
+
"gpt-4.1-2025-04-14",
|
|
2896
|
+
"gpt-4.1-mini",
|
|
2897
|
+
"gpt-4.1-mini-2025-04-14",
|
|
2898
|
+
"gpt-4.1-nano",
|
|
2899
|
+
"gpt-4.1-nano-2025-04-14",
|
|
2900
|
+
"gpt-4o",
|
|
2901
|
+
"gpt-4o-2024-05-13",
|
|
2902
|
+
"gpt-4o-2024-08-06",
|
|
2903
|
+
"gpt-4o-2024-11-20",
|
|
2904
|
+
"gpt-4o-audio-preview",
|
|
2905
|
+
"gpt-4o-audio-preview-2024-10-01",
|
|
2906
|
+
"gpt-4o-audio-preview-2024-12-17",
|
|
2907
|
+
"gpt-4o-search-preview",
|
|
2908
|
+
"gpt-4o-search-preview-2025-03-11",
|
|
2909
|
+
"gpt-4o-mini-search-preview",
|
|
2910
|
+
"gpt-4o-mini-search-preview-2025-03-11",
|
|
2911
|
+
"gpt-4o-mini",
|
|
2912
|
+
"gpt-4o-mini-2024-07-18",
|
|
2913
|
+
"gpt-4-turbo",
|
|
2914
|
+
"gpt-4-turbo-2024-04-09",
|
|
2915
|
+
"gpt-4-turbo-preview",
|
|
2916
|
+
"gpt-4-0125-preview",
|
|
2917
|
+
"gpt-4-1106-preview",
|
|
2918
|
+
"gpt-4",
|
|
2919
|
+
"gpt-4-0613",
|
|
2920
|
+
"gpt-4.5-preview",
|
|
2921
|
+
"gpt-4.5-preview-2025-02-27",
|
|
2922
|
+
"gpt-3.5-turbo-0125",
|
|
2923
|
+
"gpt-3.5-turbo",
|
|
2924
|
+
"gpt-3.5-turbo-1106",
|
|
2925
|
+
"chatgpt-4o-latest",
|
|
2926
|
+
"gpt-5-chat-latest",
|
|
2927
|
+
...openaiResponsesReasoningModelIds
|
|
2928
|
+
];
|
|
2929
|
+
var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValidator)(
|
|
2930
|
+
() => (0, import_provider_utils22.zodSchema)(
|
|
2931
|
+
z15.object({
|
|
2932
|
+
include: z15.array(
|
|
2933
|
+
z15.enum([
|
|
2934
|
+
"reasoning.encrypted_content",
|
|
2935
|
+
"file_search_call.results",
|
|
2936
|
+
"message.output_text.logprobs"
|
|
2937
|
+
])
|
|
2938
|
+
).nullish(),
|
|
2939
|
+
instructions: z15.string().nullish(),
|
|
2940
|
+
/**
|
|
2941
|
+
* Return the log probabilities of the tokens.
|
|
2942
|
+
*
|
|
2943
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
2944
|
+
* were generated.
|
|
2945
|
+
*
|
|
2946
|
+
* Setting to a number will return the log probabilities of the top n
|
|
2947
|
+
* tokens that were generated.
|
|
2948
|
+
*
|
|
2949
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
2950
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
2951
|
+
*/
|
|
2952
|
+
logprobs: z15.union([z15.boolean(), z15.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
2953
|
+
/**
|
|
2954
|
+
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
2955
|
+
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
2956
|
+
* Any further attempts to call a tool by the model will be ignored.
|
|
2957
|
+
*/
|
|
2958
|
+
maxToolCalls: z15.number().nullish(),
|
|
2959
|
+
metadata: z15.any().nullish(),
|
|
2960
|
+
parallelToolCalls: z15.boolean().nullish(),
|
|
2961
|
+
previousResponseId: z15.string().nullish(),
|
|
2962
|
+
promptCacheKey: z15.string().nullish(),
|
|
2963
|
+
reasoningEffort: z15.string().nullish(),
|
|
2964
|
+
reasoningSummary: z15.string().nullish(),
|
|
2965
|
+
safetyIdentifier: z15.string().nullish(),
|
|
2966
|
+
serviceTier: z15.enum(["auto", "flex", "priority", "default"]).nullish(),
|
|
2967
|
+
store: z15.boolean().nullish(),
|
|
2968
|
+
strictJsonSchema: z15.boolean().nullish(),
|
|
2969
|
+
textVerbosity: z15.enum(["low", "medium", "high"]).nullish(),
|
|
2970
|
+
user: z15.string().nullish()
|
|
2971
|
+
})
|
|
2972
|
+
)
|
|
2973
|
+
);
|
|
2974
|
+
|
|
2334
2975
|
// src/responses/openai-responses-prepare-tools.ts
|
|
2335
2976
|
var import_provider7 = require("@ai-sdk/provider");
|
|
2336
2977
|
|
|
2337
2978
|
// src/tool/code-interpreter.ts
|
|
2338
|
-
var
|
|
2339
|
-
var
|
|
2340
|
-
var codeInterpreterInputSchema =
|
|
2341
|
-
|
|
2342
|
-
|
|
2343
|
-
|
|
2344
|
-
|
|
2345
|
-
outputs: import_v414.z.array(
|
|
2346
|
-
import_v414.z.discriminatedUnion("type", [
|
|
2347
|
-
import_v414.z.object({ type: import_v414.z.literal("logs"), logs: import_v414.z.string() }),
|
|
2348
|
-
import_v414.z.object({ type: import_v414.z.literal("image"), url: import_v414.z.string() })
|
|
2349
|
-
])
|
|
2350
|
-
).nullish()
|
|
2351
|
-
});
|
|
2352
|
-
var codeInterpreterArgsSchema = import_v414.z.object({
|
|
2353
|
-
container: import_v414.z.union([
|
|
2354
|
-
import_v414.z.string(),
|
|
2355
|
-
import_v414.z.object({
|
|
2356
|
-
fileIds: import_v414.z.array(import_v414.z.string()).optional()
|
|
2979
|
+
var import_provider_utils23 = require("@ai-sdk/provider-utils");
|
|
2980
|
+
var z16 = __toESM(require("zod/v4"));
|
|
2981
|
+
var codeInterpreterInputSchema = (0, import_provider_utils23.lazySchema)(
|
|
2982
|
+
() => (0, import_provider_utils23.zodSchema)(
|
|
2983
|
+
z16.object({
|
|
2984
|
+
code: z16.string().nullish(),
|
|
2985
|
+
containerId: z16.string()
|
|
2357
2986
|
})
|
|
2358
|
-
|
|
2359
|
-
|
|
2360
|
-
var
|
|
2987
|
+
)
|
|
2988
|
+
);
|
|
2989
|
+
var codeInterpreterOutputSchema = (0, import_provider_utils23.lazySchema)(
|
|
2990
|
+
() => (0, import_provider_utils23.zodSchema)(
|
|
2991
|
+
z16.object({
|
|
2992
|
+
outputs: z16.array(
|
|
2993
|
+
z16.discriminatedUnion("type", [
|
|
2994
|
+
z16.object({ type: z16.literal("logs"), logs: z16.string() }),
|
|
2995
|
+
z16.object({ type: z16.literal("image"), url: z16.string() })
|
|
2996
|
+
])
|
|
2997
|
+
).nullish()
|
|
2998
|
+
})
|
|
2999
|
+
)
|
|
3000
|
+
);
|
|
3001
|
+
var codeInterpreterArgsSchema = (0, import_provider_utils23.lazySchema)(
|
|
3002
|
+
() => (0, import_provider_utils23.zodSchema)(
|
|
3003
|
+
z16.object({
|
|
3004
|
+
container: z16.union([
|
|
3005
|
+
z16.string(),
|
|
3006
|
+
z16.object({
|
|
3007
|
+
fileIds: z16.array(z16.string()).optional()
|
|
3008
|
+
})
|
|
3009
|
+
]).optional()
|
|
3010
|
+
})
|
|
3011
|
+
)
|
|
3012
|
+
);
|
|
3013
|
+
var codeInterpreterToolFactory = (0, import_provider_utils23.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2361
3014
|
id: "openai.code_interpreter",
|
|
2362
3015
|
name: "code_interpreter",
|
|
2363
3016
|
inputSchema: codeInterpreterInputSchema,
|
|
@@ -2368,169 +3021,200 @@ var codeInterpreter = (args = {}) => {
|
|
|
2368
3021
|
};
|
|
2369
3022
|
|
|
2370
3023
|
// src/tool/file-search.ts
|
|
2371
|
-
var
|
|
2372
|
-
var
|
|
2373
|
-
var comparisonFilterSchema =
|
|
2374
|
-
key:
|
|
2375
|
-
type:
|
|
2376
|
-
value:
|
|
3024
|
+
var import_provider_utils24 = require("@ai-sdk/provider-utils");
|
|
3025
|
+
var z17 = __toESM(require("zod/v4"));
|
|
3026
|
+
var comparisonFilterSchema = z17.object({
|
|
3027
|
+
key: z17.string(),
|
|
3028
|
+
type: z17.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
|
|
3029
|
+
value: z17.union([z17.string(), z17.number(), z17.boolean()])
|
|
2377
3030
|
});
|
|
2378
|
-
var compoundFilterSchema =
|
|
2379
|
-
type:
|
|
2380
|
-
filters:
|
|
2381
|
-
|
|
3031
|
+
var compoundFilterSchema = z17.object({
|
|
3032
|
+
type: z17.enum(["and", "or"]),
|
|
3033
|
+
filters: z17.array(
|
|
3034
|
+
z17.union([comparisonFilterSchema, z17.lazy(() => compoundFilterSchema)])
|
|
2382
3035
|
)
|
|
2383
3036
|
});
|
|
2384
|
-
var fileSearchArgsSchema =
|
|
2385
|
-
|
|
2386
|
-
|
|
2387
|
-
|
|
2388
|
-
|
|
2389
|
-
|
|
2390
|
-
|
|
2391
|
-
|
|
2392
|
-
})
|
|
2393
|
-
|
|
2394
|
-
queries: import_v415.z.array(import_v415.z.string()),
|
|
2395
|
-
results: import_v415.z.array(
|
|
2396
|
-
import_v415.z.object({
|
|
2397
|
-
attributes: import_v415.z.record(import_v415.z.string(), import_v415.z.unknown()),
|
|
2398
|
-
fileId: import_v415.z.string(),
|
|
2399
|
-
filename: import_v415.z.string(),
|
|
2400
|
-
score: import_v415.z.number(),
|
|
2401
|
-
text: import_v415.z.string()
|
|
3037
|
+
var fileSearchArgsSchema = (0, import_provider_utils24.lazySchema)(
|
|
3038
|
+
() => (0, import_provider_utils24.zodSchema)(
|
|
3039
|
+
z17.object({
|
|
3040
|
+
vectorStoreIds: z17.array(z17.string()),
|
|
3041
|
+
maxNumResults: z17.number().optional(),
|
|
3042
|
+
ranking: z17.object({
|
|
3043
|
+
ranker: z17.string().optional(),
|
|
3044
|
+
scoreThreshold: z17.number().optional()
|
|
3045
|
+
}).optional(),
|
|
3046
|
+
filters: z17.union([comparisonFilterSchema, compoundFilterSchema]).optional()
|
|
2402
3047
|
})
|
|
2403
|
-
)
|
|
2404
|
-
|
|
2405
|
-
var
|
|
3048
|
+
)
|
|
3049
|
+
);
|
|
3050
|
+
var fileSearchOutputSchema = (0, import_provider_utils24.lazySchema)(
|
|
3051
|
+
() => (0, import_provider_utils24.zodSchema)(
|
|
3052
|
+
z17.object({
|
|
3053
|
+
queries: z17.array(z17.string()),
|
|
3054
|
+
results: z17.array(
|
|
3055
|
+
z17.object({
|
|
3056
|
+
attributes: z17.record(z17.string(), z17.unknown()),
|
|
3057
|
+
fileId: z17.string(),
|
|
3058
|
+
filename: z17.string(),
|
|
3059
|
+
score: z17.number(),
|
|
3060
|
+
text: z17.string()
|
|
3061
|
+
})
|
|
3062
|
+
).nullable()
|
|
3063
|
+
})
|
|
3064
|
+
)
|
|
3065
|
+
);
|
|
3066
|
+
var fileSearch = (0, import_provider_utils24.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2406
3067
|
id: "openai.file_search",
|
|
2407
3068
|
name: "file_search",
|
|
2408
|
-
inputSchema:
|
|
3069
|
+
inputSchema: z17.object({}),
|
|
2409
3070
|
outputSchema: fileSearchOutputSchema
|
|
2410
3071
|
});
|
|
2411
3072
|
|
|
2412
3073
|
// src/tool/web-search.ts
|
|
2413
|
-
var
|
|
2414
|
-
var
|
|
2415
|
-
var webSearchArgsSchema =
|
|
2416
|
-
|
|
2417
|
-
|
|
2418
|
-
|
|
2419
|
-
|
|
2420
|
-
|
|
2421
|
-
|
|
2422
|
-
|
|
2423
|
-
|
|
2424
|
-
|
|
2425
|
-
|
|
2426
|
-
|
|
2427
|
-
|
|
2428
|
-
|
|
3074
|
+
var import_provider_utils25 = require("@ai-sdk/provider-utils");
|
|
3075
|
+
var z18 = __toESM(require("zod/v4"));
|
|
3076
|
+
var webSearchArgsSchema = (0, import_provider_utils25.lazySchema)(
|
|
3077
|
+
() => (0, import_provider_utils25.zodSchema)(
|
|
3078
|
+
z18.object({
|
|
3079
|
+
filters: z18.object({
|
|
3080
|
+
allowedDomains: z18.array(z18.string()).optional()
|
|
3081
|
+
}).optional(),
|
|
3082
|
+
searchContextSize: z18.enum(["low", "medium", "high"]).optional(),
|
|
3083
|
+
userLocation: z18.object({
|
|
3084
|
+
type: z18.literal("approximate"),
|
|
3085
|
+
country: z18.string().optional(),
|
|
3086
|
+
city: z18.string().optional(),
|
|
3087
|
+
region: z18.string().optional(),
|
|
3088
|
+
timezone: z18.string().optional()
|
|
3089
|
+
}).optional()
|
|
3090
|
+
})
|
|
3091
|
+
)
|
|
3092
|
+
);
|
|
3093
|
+
var webSearchInputSchema = (0, import_provider_utils25.lazySchema)(
|
|
3094
|
+
() => (0, import_provider_utils25.zodSchema)(
|
|
3095
|
+
z18.object({
|
|
3096
|
+
action: z18.discriminatedUnion("type", [
|
|
3097
|
+
z18.object({
|
|
3098
|
+
type: z18.literal("search"),
|
|
3099
|
+
query: z18.string().nullish()
|
|
3100
|
+
}),
|
|
3101
|
+
z18.object({
|
|
3102
|
+
type: z18.literal("open_page"),
|
|
3103
|
+
url: z18.string()
|
|
3104
|
+
}),
|
|
3105
|
+
z18.object({
|
|
3106
|
+
type: z18.literal("find"),
|
|
3107
|
+
url: z18.string(),
|
|
3108
|
+
pattern: z18.string()
|
|
3109
|
+
})
|
|
3110
|
+
]).nullish()
|
|
3111
|
+
})
|
|
3112
|
+
)
|
|
3113
|
+
);
|
|
3114
|
+
var webSearchToolFactory = (0, import_provider_utils25.createProviderDefinedToolFactory)({
|
|
2429
3115
|
id: "openai.web_search",
|
|
2430
3116
|
name: "web_search",
|
|
2431
|
-
inputSchema:
|
|
2432
|
-
action: import_v416.z.discriminatedUnion("type", [
|
|
2433
|
-
import_v416.z.object({
|
|
2434
|
-
type: import_v416.z.literal("search"),
|
|
2435
|
-
query: import_v416.z.string().nullish()
|
|
2436
|
-
}),
|
|
2437
|
-
import_v416.z.object({
|
|
2438
|
-
type: import_v416.z.literal("open_page"),
|
|
2439
|
-
url: import_v416.z.string()
|
|
2440
|
-
}),
|
|
2441
|
-
import_v416.z.object({
|
|
2442
|
-
type: import_v416.z.literal("find"),
|
|
2443
|
-
url: import_v416.z.string(),
|
|
2444
|
-
pattern: import_v416.z.string()
|
|
2445
|
-
})
|
|
2446
|
-
]).nullish()
|
|
2447
|
-
})
|
|
3117
|
+
inputSchema: webSearchInputSchema
|
|
2448
3118
|
});
|
|
2449
3119
|
|
|
2450
3120
|
// src/tool/web-search-preview.ts
|
|
2451
|
-
var
|
|
2452
|
-
var
|
|
2453
|
-
var webSearchPreviewArgsSchema =
|
|
2454
|
-
|
|
2455
|
-
|
|
2456
|
-
|
|
2457
|
-
|
|
2458
|
-
|
|
2459
|
-
|
|
2460
|
-
|
|
2461
|
-
|
|
2462
|
-
|
|
2463
|
-
|
|
2464
|
-
|
|
2465
|
-
|
|
2466
|
-
|
|
2467
|
-
|
|
2468
|
-
|
|
2469
|
-
|
|
2470
|
-
|
|
2471
|
-
|
|
2472
|
-
|
|
2473
|
-
|
|
2474
|
-
|
|
2475
|
-
|
|
2476
|
-
|
|
2477
|
-
|
|
2478
|
-
|
|
2479
|
-
|
|
2480
|
-
|
|
2481
|
-
|
|
2482
|
-
|
|
2483
|
-
|
|
2484
|
-
|
|
2485
|
-
|
|
2486
|
-
|
|
2487
|
-
|
|
3121
|
+
var import_provider_utils26 = require("@ai-sdk/provider-utils");
|
|
3122
|
+
var z19 = __toESM(require("zod/v4"));
|
|
3123
|
+
var webSearchPreviewArgsSchema = (0, import_provider_utils26.lazySchema)(
|
|
3124
|
+
() => (0, import_provider_utils26.zodSchema)(
|
|
3125
|
+
z19.object({
|
|
3126
|
+
/**
|
|
3127
|
+
* Search context size to use for the web search.
|
|
3128
|
+
* - high: Most comprehensive context, highest cost, slower response
|
|
3129
|
+
* - medium: Balanced context, cost, and latency (default)
|
|
3130
|
+
* - low: Least context, lowest cost, fastest response
|
|
3131
|
+
*/
|
|
3132
|
+
searchContextSize: z19.enum(["low", "medium", "high"]).optional(),
|
|
3133
|
+
/**
|
|
3134
|
+
* User location information to provide geographically relevant search results.
|
|
3135
|
+
*/
|
|
3136
|
+
userLocation: z19.object({
|
|
3137
|
+
/**
|
|
3138
|
+
* Type of location (always 'approximate')
|
|
3139
|
+
*/
|
|
3140
|
+
type: z19.literal("approximate"),
|
|
3141
|
+
/**
|
|
3142
|
+
* Two-letter ISO country code (e.g., 'US', 'GB')
|
|
3143
|
+
*/
|
|
3144
|
+
country: z19.string().optional(),
|
|
3145
|
+
/**
|
|
3146
|
+
* City name (free text, e.g., 'Minneapolis')
|
|
3147
|
+
*/
|
|
3148
|
+
city: z19.string().optional(),
|
|
3149
|
+
/**
|
|
3150
|
+
* Region name (free text, e.g., 'Minnesota')
|
|
3151
|
+
*/
|
|
3152
|
+
region: z19.string().optional(),
|
|
3153
|
+
/**
|
|
3154
|
+
* IANA timezone (e.g., 'America/Chicago')
|
|
3155
|
+
*/
|
|
3156
|
+
timezone: z19.string().optional()
|
|
3157
|
+
}).optional()
|
|
3158
|
+
})
|
|
3159
|
+
)
|
|
3160
|
+
);
|
|
3161
|
+
var webSearchPreviewInputSchema = (0, import_provider_utils26.lazySchema)(
|
|
3162
|
+
() => (0, import_provider_utils26.zodSchema)(
|
|
3163
|
+
z19.object({
|
|
3164
|
+
action: z19.discriminatedUnion("type", [
|
|
3165
|
+
z19.object({
|
|
3166
|
+
type: z19.literal("search"),
|
|
3167
|
+
query: z19.string().nullish()
|
|
3168
|
+
}),
|
|
3169
|
+
z19.object({
|
|
3170
|
+
type: z19.literal("open_page"),
|
|
3171
|
+
url: z19.string()
|
|
3172
|
+
}),
|
|
3173
|
+
z19.object({
|
|
3174
|
+
type: z19.literal("find"),
|
|
3175
|
+
url: z19.string(),
|
|
3176
|
+
pattern: z19.string()
|
|
3177
|
+
})
|
|
3178
|
+
]).nullish()
|
|
3179
|
+
})
|
|
3180
|
+
)
|
|
3181
|
+
);
|
|
3182
|
+
var webSearchPreview = (0, import_provider_utils26.createProviderDefinedToolFactory)({
|
|
2488
3183
|
id: "openai.web_search_preview",
|
|
2489
3184
|
name: "web_search_preview",
|
|
2490
|
-
inputSchema:
|
|
2491
|
-
action: import_v417.z.discriminatedUnion("type", [
|
|
2492
|
-
import_v417.z.object({
|
|
2493
|
-
type: import_v417.z.literal("search"),
|
|
2494
|
-
query: import_v417.z.string().nullish()
|
|
2495
|
-
}),
|
|
2496
|
-
import_v417.z.object({
|
|
2497
|
-
type: import_v417.z.literal("open_page"),
|
|
2498
|
-
url: import_v417.z.string()
|
|
2499
|
-
}),
|
|
2500
|
-
import_v417.z.object({
|
|
2501
|
-
type: import_v417.z.literal("find"),
|
|
2502
|
-
url: import_v417.z.string(),
|
|
2503
|
-
pattern: import_v417.z.string()
|
|
2504
|
-
})
|
|
2505
|
-
]).nullish()
|
|
2506
|
-
})
|
|
3185
|
+
inputSchema: webSearchPreviewInputSchema
|
|
2507
3186
|
});
|
|
2508
3187
|
|
|
2509
3188
|
// src/tool/image-generation.ts
|
|
2510
|
-
var
|
|
2511
|
-
var
|
|
2512
|
-
var imageGenerationArgsSchema =
|
|
2513
|
-
|
|
2514
|
-
|
|
2515
|
-
|
|
2516
|
-
|
|
2517
|
-
|
|
2518
|
-
|
|
2519
|
-
|
|
2520
|
-
|
|
2521
|
-
|
|
2522
|
-
|
|
2523
|
-
|
|
2524
|
-
|
|
2525
|
-
|
|
2526
|
-
|
|
2527
|
-
|
|
2528
|
-
|
|
2529
|
-
|
|
2530
|
-
|
|
3189
|
+
var import_provider_utils27 = require("@ai-sdk/provider-utils");
|
|
3190
|
+
var z20 = __toESM(require("zod/v4"));
|
|
3191
|
+
var imageGenerationArgsSchema = (0, import_provider_utils27.lazySchema)(
|
|
3192
|
+
() => (0, import_provider_utils27.zodSchema)(
|
|
3193
|
+
z20.object({
|
|
3194
|
+
background: z20.enum(["auto", "opaque", "transparent"]).optional(),
|
|
3195
|
+
inputFidelity: z20.enum(["low", "high"]).optional(),
|
|
3196
|
+
inputImageMask: z20.object({
|
|
3197
|
+
fileId: z20.string().optional(),
|
|
3198
|
+
imageUrl: z20.string().optional()
|
|
3199
|
+
}).optional(),
|
|
3200
|
+
model: z20.string().optional(),
|
|
3201
|
+
moderation: z20.enum(["auto"]).optional(),
|
|
3202
|
+
outputCompression: z20.number().int().min(0).max(100).optional(),
|
|
3203
|
+
outputFormat: z20.enum(["png", "jpeg", "webp"]).optional(),
|
|
3204
|
+
partialImages: z20.number().int().min(0).max(3).optional(),
|
|
3205
|
+
quality: z20.enum(["auto", "low", "medium", "high"]).optional(),
|
|
3206
|
+
size: z20.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
|
|
3207
|
+
}).strict()
|
|
3208
|
+
)
|
|
3209
|
+
);
|
|
3210
|
+
var imageGenerationInputSchema = (0, import_provider_utils27.lazySchema)(() => (0, import_provider_utils27.zodSchema)(z20.object({})));
|
|
3211
|
+
var imageGenerationOutputSchema = (0, import_provider_utils27.lazySchema)(
|
|
3212
|
+
() => (0, import_provider_utils27.zodSchema)(z20.object({ result: z20.string() }))
|
|
3213
|
+
);
|
|
3214
|
+
var imageGenerationToolFactory = (0, import_provider_utils27.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2531
3215
|
id: "openai.image_generation",
|
|
2532
3216
|
name: "image_generation",
|
|
2533
|
-
inputSchema:
|
|
3217
|
+
inputSchema: imageGenerationInputSchema,
|
|
2534
3218
|
outputSchema: imageGenerationOutputSchema
|
|
2535
3219
|
});
|
|
2536
3220
|
var imageGeneration = (args = {}) => {
|
|
@@ -2538,7 +3222,8 @@ var imageGeneration = (args = {}) => {
|
|
|
2538
3222
|
};
|
|
2539
3223
|
|
|
2540
3224
|
// src/responses/openai-responses-prepare-tools.ts
|
|
2541
|
-
|
|
3225
|
+
var import_provider_utils28 = require("@ai-sdk/provider-utils");
|
|
3226
|
+
async function prepareResponsesTools({
|
|
2542
3227
|
tools,
|
|
2543
3228
|
toolChoice,
|
|
2544
3229
|
strictJsonSchema
|
|
@@ -2563,7 +3248,10 @@ function prepareResponsesTools({
|
|
|
2563
3248
|
case "provider-defined": {
|
|
2564
3249
|
switch (tool.id) {
|
|
2565
3250
|
case "openai.file_search": {
|
|
2566
|
-
const args =
|
|
3251
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3252
|
+
value: tool.args,
|
|
3253
|
+
schema: fileSearchArgsSchema
|
|
3254
|
+
});
|
|
2567
3255
|
openaiTools.push({
|
|
2568
3256
|
type: "file_search",
|
|
2569
3257
|
vector_store_ids: args.vectorStoreIds,
|
|
@@ -2583,7 +3271,10 @@ function prepareResponsesTools({
|
|
|
2583
3271
|
break;
|
|
2584
3272
|
}
|
|
2585
3273
|
case "openai.web_search_preview": {
|
|
2586
|
-
const args =
|
|
3274
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3275
|
+
value: tool.args,
|
|
3276
|
+
schema: webSearchPreviewArgsSchema
|
|
3277
|
+
});
|
|
2587
3278
|
openaiTools.push({
|
|
2588
3279
|
type: "web_search_preview",
|
|
2589
3280
|
search_context_size: args.searchContextSize,
|
|
@@ -2592,7 +3283,10 @@ function prepareResponsesTools({
|
|
|
2592
3283
|
break;
|
|
2593
3284
|
}
|
|
2594
3285
|
case "openai.web_search": {
|
|
2595
|
-
const args =
|
|
3286
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3287
|
+
value: tool.args,
|
|
3288
|
+
schema: webSearchArgsSchema
|
|
3289
|
+
});
|
|
2596
3290
|
openaiTools.push({
|
|
2597
3291
|
type: "web_search",
|
|
2598
3292
|
filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
|
|
@@ -2602,7 +3296,10 @@ function prepareResponsesTools({
|
|
|
2602
3296
|
break;
|
|
2603
3297
|
}
|
|
2604
3298
|
case "openai.code_interpreter": {
|
|
2605
|
-
const args =
|
|
3299
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3300
|
+
value: tool.args,
|
|
3301
|
+
schema: codeInterpreterArgsSchema
|
|
3302
|
+
});
|
|
2606
3303
|
openaiTools.push({
|
|
2607
3304
|
type: "code_interpreter",
|
|
2608
3305
|
container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
|
|
@@ -2610,7 +3307,10 @@ function prepareResponsesTools({
|
|
|
2610
3307
|
break;
|
|
2611
3308
|
}
|
|
2612
3309
|
case "openai.image_generation": {
|
|
2613
|
-
const args =
|
|
3310
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3311
|
+
value: tool.args,
|
|
3312
|
+
schema: imageGenerationArgsSchema
|
|
3313
|
+
});
|
|
2614
3314
|
openaiTools.push({
|
|
2615
3315
|
type: "image_generation",
|
|
2616
3316
|
background: args.background,
|
|
@@ -2662,83 +3362,6 @@ function prepareResponsesTools({
|
|
|
2662
3362
|
}
|
|
2663
3363
|
|
|
2664
3364
|
// src/responses/openai-responses-language-model.ts
|
|
2665
|
-
var webSearchCallItem = import_v419.z.object({
|
|
2666
|
-
type: import_v419.z.literal("web_search_call"),
|
|
2667
|
-
id: import_v419.z.string(),
|
|
2668
|
-
status: import_v419.z.string(),
|
|
2669
|
-
action: import_v419.z.discriminatedUnion("type", [
|
|
2670
|
-
import_v419.z.object({
|
|
2671
|
-
type: import_v419.z.literal("search"),
|
|
2672
|
-
query: import_v419.z.string().nullish()
|
|
2673
|
-
}),
|
|
2674
|
-
import_v419.z.object({
|
|
2675
|
-
type: import_v419.z.literal("open_page"),
|
|
2676
|
-
url: import_v419.z.string()
|
|
2677
|
-
}),
|
|
2678
|
-
import_v419.z.object({
|
|
2679
|
-
type: import_v419.z.literal("find"),
|
|
2680
|
-
url: import_v419.z.string(),
|
|
2681
|
-
pattern: import_v419.z.string()
|
|
2682
|
-
})
|
|
2683
|
-
]).nullish()
|
|
2684
|
-
});
|
|
2685
|
-
var fileSearchCallItem = import_v419.z.object({
|
|
2686
|
-
type: import_v419.z.literal("file_search_call"),
|
|
2687
|
-
id: import_v419.z.string(),
|
|
2688
|
-
queries: import_v419.z.array(import_v419.z.string()),
|
|
2689
|
-
results: import_v419.z.array(
|
|
2690
|
-
import_v419.z.object({
|
|
2691
|
-
attributes: import_v419.z.record(import_v419.z.string(), import_v419.z.unknown()),
|
|
2692
|
-
file_id: import_v419.z.string(),
|
|
2693
|
-
filename: import_v419.z.string(),
|
|
2694
|
-
score: import_v419.z.number(),
|
|
2695
|
-
text: import_v419.z.string()
|
|
2696
|
-
})
|
|
2697
|
-
).nullish()
|
|
2698
|
-
});
|
|
2699
|
-
var codeInterpreterCallItem = import_v419.z.object({
|
|
2700
|
-
type: import_v419.z.literal("code_interpreter_call"),
|
|
2701
|
-
id: import_v419.z.string(),
|
|
2702
|
-
code: import_v419.z.string().nullable(),
|
|
2703
|
-
container_id: import_v419.z.string(),
|
|
2704
|
-
outputs: import_v419.z.array(
|
|
2705
|
-
import_v419.z.discriminatedUnion("type", [
|
|
2706
|
-
import_v419.z.object({ type: import_v419.z.literal("logs"), logs: import_v419.z.string() }),
|
|
2707
|
-
import_v419.z.object({ type: import_v419.z.literal("image"), url: import_v419.z.string() })
|
|
2708
|
-
])
|
|
2709
|
-
).nullable()
|
|
2710
|
-
});
|
|
2711
|
-
var localShellCallItem = import_v419.z.object({
|
|
2712
|
-
type: import_v419.z.literal("local_shell_call"),
|
|
2713
|
-
id: import_v419.z.string(),
|
|
2714
|
-
call_id: import_v419.z.string(),
|
|
2715
|
-
action: import_v419.z.object({
|
|
2716
|
-
type: import_v419.z.literal("exec"),
|
|
2717
|
-
command: import_v419.z.array(import_v419.z.string()),
|
|
2718
|
-
timeout_ms: import_v419.z.number().optional(),
|
|
2719
|
-
user: import_v419.z.string().optional(),
|
|
2720
|
-
working_directory: import_v419.z.string().optional(),
|
|
2721
|
-
env: import_v419.z.record(import_v419.z.string(), import_v419.z.string()).optional()
|
|
2722
|
-
})
|
|
2723
|
-
});
|
|
2724
|
-
var imageGenerationCallItem = import_v419.z.object({
|
|
2725
|
-
type: import_v419.z.literal("image_generation_call"),
|
|
2726
|
-
id: import_v419.z.string(),
|
|
2727
|
-
result: import_v419.z.string()
|
|
2728
|
-
});
|
|
2729
|
-
var TOP_LOGPROBS_MAX = 20;
|
|
2730
|
-
var LOGPROBS_SCHEMA = import_v419.z.array(
|
|
2731
|
-
import_v419.z.object({
|
|
2732
|
-
token: import_v419.z.string(),
|
|
2733
|
-
logprob: import_v419.z.number(),
|
|
2734
|
-
top_logprobs: import_v419.z.array(
|
|
2735
|
-
import_v419.z.object({
|
|
2736
|
-
token: import_v419.z.string(),
|
|
2737
|
-
logprob: import_v419.z.number()
|
|
2738
|
-
})
|
|
2739
|
-
)
|
|
2740
|
-
})
|
|
2741
|
-
);
|
|
2742
3365
|
var OpenAIResponsesLanguageModel = class {
|
|
2743
3366
|
constructor(modelId, config) {
|
|
2744
3367
|
this.specificationVersion = "v3";
|
|
@@ -2791,7 +3414,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2791
3414
|
if (stopSequences != null) {
|
|
2792
3415
|
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2793
3416
|
}
|
|
2794
|
-
const openaiOptions = await (0,
|
|
3417
|
+
const openaiOptions = await (0, import_provider_utils29.parseProviderOptions)({
|
|
2795
3418
|
provider: "openai",
|
|
2796
3419
|
providerOptions,
|
|
2797
3420
|
schema: openaiResponsesProviderOptionsSchema
|
|
@@ -2930,7 +3553,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2930
3553
|
tools: openaiTools,
|
|
2931
3554
|
toolChoice: openaiToolChoice,
|
|
2932
3555
|
toolWarnings
|
|
2933
|
-
} = prepareResponsesTools({
|
|
3556
|
+
} = await prepareResponsesTools({
|
|
2934
3557
|
tools,
|
|
2935
3558
|
toolChoice,
|
|
2936
3559
|
strictJsonSchema
|
|
@@ -2960,91 +3583,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2960
3583
|
responseHeaders,
|
|
2961
3584
|
value: response,
|
|
2962
3585
|
rawValue: rawResponse
|
|
2963
|
-
} = await (0,
|
|
3586
|
+
} = await (0, import_provider_utils29.postJsonToApi)({
|
|
2964
3587
|
url,
|
|
2965
|
-
headers: (0,
|
|
3588
|
+
headers: (0, import_provider_utils29.combineHeaders)(this.config.headers(), options.headers),
|
|
2966
3589
|
body,
|
|
2967
3590
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2968
|
-
successfulResponseHandler: (0,
|
|
2969
|
-
|
|
2970
|
-
id: import_v419.z.string(),
|
|
2971
|
-
created_at: import_v419.z.number(),
|
|
2972
|
-
error: import_v419.z.object({
|
|
2973
|
-
code: import_v419.z.string(),
|
|
2974
|
-
message: import_v419.z.string()
|
|
2975
|
-
}).nullish(),
|
|
2976
|
-
model: import_v419.z.string(),
|
|
2977
|
-
output: import_v419.z.array(
|
|
2978
|
-
import_v419.z.discriminatedUnion("type", [
|
|
2979
|
-
import_v419.z.object({
|
|
2980
|
-
type: import_v419.z.literal("message"),
|
|
2981
|
-
role: import_v419.z.literal("assistant"),
|
|
2982
|
-
id: import_v419.z.string(),
|
|
2983
|
-
content: import_v419.z.array(
|
|
2984
|
-
import_v419.z.object({
|
|
2985
|
-
type: import_v419.z.literal("output_text"),
|
|
2986
|
-
text: import_v419.z.string(),
|
|
2987
|
-
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2988
|
-
annotations: import_v419.z.array(
|
|
2989
|
-
import_v419.z.discriminatedUnion("type", [
|
|
2990
|
-
import_v419.z.object({
|
|
2991
|
-
type: import_v419.z.literal("url_citation"),
|
|
2992
|
-
start_index: import_v419.z.number(),
|
|
2993
|
-
end_index: import_v419.z.number(),
|
|
2994
|
-
url: import_v419.z.string(),
|
|
2995
|
-
title: import_v419.z.string()
|
|
2996
|
-
}),
|
|
2997
|
-
import_v419.z.object({
|
|
2998
|
-
type: import_v419.z.literal("file_citation"),
|
|
2999
|
-
file_id: import_v419.z.string(),
|
|
3000
|
-
filename: import_v419.z.string().nullish(),
|
|
3001
|
-
index: import_v419.z.number().nullish(),
|
|
3002
|
-
start_index: import_v419.z.number().nullish(),
|
|
3003
|
-
end_index: import_v419.z.number().nullish(),
|
|
3004
|
-
quote: import_v419.z.string().nullish()
|
|
3005
|
-
}),
|
|
3006
|
-
import_v419.z.object({
|
|
3007
|
-
type: import_v419.z.literal("container_file_citation")
|
|
3008
|
-
})
|
|
3009
|
-
])
|
|
3010
|
-
)
|
|
3011
|
-
})
|
|
3012
|
-
)
|
|
3013
|
-
}),
|
|
3014
|
-
webSearchCallItem,
|
|
3015
|
-
fileSearchCallItem,
|
|
3016
|
-
codeInterpreterCallItem,
|
|
3017
|
-
imageGenerationCallItem,
|
|
3018
|
-
localShellCallItem,
|
|
3019
|
-
import_v419.z.object({
|
|
3020
|
-
type: import_v419.z.literal("function_call"),
|
|
3021
|
-
call_id: import_v419.z.string(),
|
|
3022
|
-
name: import_v419.z.string(),
|
|
3023
|
-
arguments: import_v419.z.string(),
|
|
3024
|
-
id: import_v419.z.string()
|
|
3025
|
-
}),
|
|
3026
|
-
import_v419.z.object({
|
|
3027
|
-
type: import_v419.z.literal("computer_call"),
|
|
3028
|
-
id: import_v419.z.string(),
|
|
3029
|
-
status: import_v419.z.string().optional()
|
|
3030
|
-
}),
|
|
3031
|
-
import_v419.z.object({
|
|
3032
|
-
type: import_v419.z.literal("reasoning"),
|
|
3033
|
-
id: import_v419.z.string(),
|
|
3034
|
-
encrypted_content: import_v419.z.string().nullish(),
|
|
3035
|
-
summary: import_v419.z.array(
|
|
3036
|
-
import_v419.z.object({
|
|
3037
|
-
type: import_v419.z.literal("summary_text"),
|
|
3038
|
-
text: import_v419.z.string()
|
|
3039
|
-
})
|
|
3040
|
-
)
|
|
3041
|
-
})
|
|
3042
|
-
])
|
|
3043
|
-
),
|
|
3044
|
-
service_tier: import_v419.z.string().nullish(),
|
|
3045
|
-
incomplete_details: import_v419.z.object({ reason: import_v419.z.string() }).nullish(),
|
|
3046
|
-
usage: usageSchema2
|
|
3047
|
-
})
|
|
3591
|
+
successfulResponseHandler: (0, import_provider_utils29.createJsonResponseHandler)(
|
|
3592
|
+
openaiResponsesResponseSchema
|
|
3048
3593
|
),
|
|
3049
3594
|
abortSignal: options.abortSignal,
|
|
3050
3595
|
fetch: this.config.fetch
|
|
@@ -3107,7 +3652,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3107
3652
|
type: "tool-call",
|
|
3108
3653
|
toolCallId: part.call_id,
|
|
3109
3654
|
toolName: "local_shell",
|
|
3110
|
-
input: JSON.stringify({
|
|
3655
|
+
input: JSON.stringify({
|
|
3656
|
+
action: part.action
|
|
3657
|
+
}),
|
|
3111
3658
|
providerMetadata: {
|
|
3112
3659
|
openai: {
|
|
3113
3660
|
itemId: part.id
|
|
@@ -3135,7 +3682,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3135
3682
|
content.push({
|
|
3136
3683
|
type: "source",
|
|
3137
3684
|
sourceType: "url",
|
|
3138
|
-
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0,
|
|
3685
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0, import_provider_utils29.generateId)(),
|
|
3139
3686
|
url: annotation.url,
|
|
3140
3687
|
title: annotation.title
|
|
3141
3688
|
});
|
|
@@ -3143,7 +3690,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3143
3690
|
content.push({
|
|
3144
3691
|
type: "source",
|
|
3145
3692
|
sourceType: "document",
|
|
3146
|
-
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0,
|
|
3693
|
+
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils29.generateId)(),
|
|
3147
3694
|
mediaType: "text/plain",
|
|
3148
3695
|
title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
|
|
3149
3696
|
filename: (_l = annotation.filename) != null ? _l : annotation.file_id
|
|
@@ -3295,18 +3842,18 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3295
3842
|
warnings,
|
|
3296
3843
|
webSearchToolName
|
|
3297
3844
|
} = await this.getArgs(options);
|
|
3298
|
-
const { responseHeaders, value: response } = await (0,
|
|
3845
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils29.postJsonToApi)({
|
|
3299
3846
|
url: this.config.url({
|
|
3300
3847
|
path: "/responses",
|
|
3301
3848
|
modelId: this.modelId
|
|
3302
3849
|
}),
|
|
3303
|
-
headers: (0,
|
|
3850
|
+
headers: (0, import_provider_utils29.combineHeaders)(this.config.headers(), options.headers),
|
|
3304
3851
|
body: {
|
|
3305
3852
|
...body,
|
|
3306
3853
|
stream: true
|
|
3307
3854
|
},
|
|
3308
3855
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
3309
|
-
successfulResponseHandler: (0,
|
|
3856
|
+
successfulResponseHandler: (0, import_provider_utils29.createEventSourceResponseHandler)(
|
|
3310
3857
|
openaiResponsesChunkSchema
|
|
3311
3858
|
),
|
|
3312
3859
|
abortSignal: options.abortSignal,
|
|
@@ -3694,7 +4241,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3694
4241
|
controller.enqueue({
|
|
3695
4242
|
type: "source",
|
|
3696
4243
|
sourceType: "url",
|
|
3697
|
-
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0,
|
|
4244
|
+
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0, import_provider_utils29.generateId)(),
|
|
3698
4245
|
url: value.annotation.url,
|
|
3699
4246
|
title: value.annotation.title
|
|
3700
4247
|
});
|
|
@@ -3702,7 +4249,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3702
4249
|
controller.enqueue({
|
|
3703
4250
|
type: "source",
|
|
3704
4251
|
sourceType: "document",
|
|
3705
|
-
id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0,
|
|
4252
|
+
id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0, import_provider_utils29.generateId)(),
|
|
3706
4253
|
mediaType: "text/plain",
|
|
3707
4254
|
title: (_v = (_u = value.annotation.quote) != null ? _u : value.annotation.filename) != null ? _v : "Document",
|
|
3708
4255
|
filename: (_w = value.annotation.filename) != null ? _w : value.annotation.file_id
|
|
@@ -3738,203 +4285,6 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3738
4285
|
};
|
|
3739
4286
|
}
|
|
3740
4287
|
};
|
|
3741
|
-
var usageSchema2 = import_v419.z.object({
|
|
3742
|
-
input_tokens: import_v419.z.number(),
|
|
3743
|
-
input_tokens_details: import_v419.z.object({ cached_tokens: import_v419.z.number().nullish() }).nullish(),
|
|
3744
|
-
output_tokens: import_v419.z.number(),
|
|
3745
|
-
output_tokens_details: import_v419.z.object({ reasoning_tokens: import_v419.z.number().nullish() }).nullish()
|
|
3746
|
-
});
|
|
3747
|
-
var textDeltaChunkSchema = import_v419.z.object({
|
|
3748
|
-
type: import_v419.z.literal("response.output_text.delta"),
|
|
3749
|
-
item_id: import_v419.z.string(),
|
|
3750
|
-
delta: import_v419.z.string(),
|
|
3751
|
-
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
3752
|
-
});
|
|
3753
|
-
var errorChunkSchema = import_v419.z.object({
|
|
3754
|
-
type: import_v419.z.literal("error"),
|
|
3755
|
-
code: import_v419.z.string(),
|
|
3756
|
-
message: import_v419.z.string(),
|
|
3757
|
-
param: import_v419.z.string().nullish(),
|
|
3758
|
-
sequence_number: import_v419.z.number()
|
|
3759
|
-
});
|
|
3760
|
-
var responseFinishedChunkSchema = import_v419.z.object({
|
|
3761
|
-
type: import_v419.z.enum(["response.completed", "response.incomplete"]),
|
|
3762
|
-
response: import_v419.z.object({
|
|
3763
|
-
incomplete_details: import_v419.z.object({ reason: import_v419.z.string() }).nullish(),
|
|
3764
|
-
usage: usageSchema2,
|
|
3765
|
-
service_tier: import_v419.z.string().nullish()
|
|
3766
|
-
})
|
|
3767
|
-
});
|
|
3768
|
-
var responseCreatedChunkSchema = import_v419.z.object({
|
|
3769
|
-
type: import_v419.z.literal("response.created"),
|
|
3770
|
-
response: import_v419.z.object({
|
|
3771
|
-
id: import_v419.z.string(),
|
|
3772
|
-
created_at: import_v419.z.number(),
|
|
3773
|
-
model: import_v419.z.string(),
|
|
3774
|
-
service_tier: import_v419.z.string().nullish()
|
|
3775
|
-
})
|
|
3776
|
-
});
|
|
3777
|
-
var responseOutputItemAddedSchema = import_v419.z.object({
|
|
3778
|
-
type: import_v419.z.literal("response.output_item.added"),
|
|
3779
|
-
output_index: import_v419.z.number(),
|
|
3780
|
-
item: import_v419.z.discriminatedUnion("type", [
|
|
3781
|
-
import_v419.z.object({
|
|
3782
|
-
type: import_v419.z.literal("message"),
|
|
3783
|
-
id: import_v419.z.string()
|
|
3784
|
-
}),
|
|
3785
|
-
import_v419.z.object({
|
|
3786
|
-
type: import_v419.z.literal("reasoning"),
|
|
3787
|
-
id: import_v419.z.string(),
|
|
3788
|
-
encrypted_content: import_v419.z.string().nullish()
|
|
3789
|
-
}),
|
|
3790
|
-
import_v419.z.object({
|
|
3791
|
-
type: import_v419.z.literal("function_call"),
|
|
3792
|
-
id: import_v419.z.string(),
|
|
3793
|
-
call_id: import_v419.z.string(),
|
|
3794
|
-
name: import_v419.z.string(),
|
|
3795
|
-
arguments: import_v419.z.string()
|
|
3796
|
-
}),
|
|
3797
|
-
import_v419.z.object({
|
|
3798
|
-
type: import_v419.z.literal("web_search_call"),
|
|
3799
|
-
id: import_v419.z.string(),
|
|
3800
|
-
status: import_v419.z.string(),
|
|
3801
|
-
action: import_v419.z.object({
|
|
3802
|
-
type: import_v419.z.literal("search"),
|
|
3803
|
-
query: import_v419.z.string().optional()
|
|
3804
|
-
}).nullish()
|
|
3805
|
-
}),
|
|
3806
|
-
import_v419.z.object({
|
|
3807
|
-
type: import_v419.z.literal("computer_call"),
|
|
3808
|
-
id: import_v419.z.string(),
|
|
3809
|
-
status: import_v419.z.string()
|
|
3810
|
-
}),
|
|
3811
|
-
import_v419.z.object({
|
|
3812
|
-
type: import_v419.z.literal("file_search_call"),
|
|
3813
|
-
id: import_v419.z.string()
|
|
3814
|
-
}),
|
|
3815
|
-
import_v419.z.object({
|
|
3816
|
-
type: import_v419.z.literal("image_generation_call"),
|
|
3817
|
-
id: import_v419.z.string()
|
|
3818
|
-
}),
|
|
3819
|
-
import_v419.z.object({
|
|
3820
|
-
type: import_v419.z.literal("code_interpreter_call"),
|
|
3821
|
-
id: import_v419.z.string(),
|
|
3822
|
-
container_id: import_v419.z.string(),
|
|
3823
|
-
code: import_v419.z.string().nullable(),
|
|
3824
|
-
outputs: import_v419.z.array(
|
|
3825
|
-
import_v419.z.discriminatedUnion("type", [
|
|
3826
|
-
import_v419.z.object({ type: import_v419.z.literal("logs"), logs: import_v419.z.string() }),
|
|
3827
|
-
import_v419.z.object({ type: import_v419.z.literal("image"), url: import_v419.z.string() })
|
|
3828
|
-
])
|
|
3829
|
-
).nullable(),
|
|
3830
|
-
status: import_v419.z.string()
|
|
3831
|
-
})
|
|
3832
|
-
])
|
|
3833
|
-
});
|
|
3834
|
-
var responseOutputItemDoneSchema = import_v419.z.object({
|
|
3835
|
-
type: import_v419.z.literal("response.output_item.done"),
|
|
3836
|
-
output_index: import_v419.z.number(),
|
|
3837
|
-
item: import_v419.z.discriminatedUnion("type", [
|
|
3838
|
-
import_v419.z.object({
|
|
3839
|
-
type: import_v419.z.literal("message"),
|
|
3840
|
-
id: import_v419.z.string()
|
|
3841
|
-
}),
|
|
3842
|
-
import_v419.z.object({
|
|
3843
|
-
type: import_v419.z.literal("reasoning"),
|
|
3844
|
-
id: import_v419.z.string(),
|
|
3845
|
-
encrypted_content: import_v419.z.string().nullish()
|
|
3846
|
-
}),
|
|
3847
|
-
import_v419.z.object({
|
|
3848
|
-
type: import_v419.z.literal("function_call"),
|
|
3849
|
-
id: import_v419.z.string(),
|
|
3850
|
-
call_id: import_v419.z.string(),
|
|
3851
|
-
name: import_v419.z.string(),
|
|
3852
|
-
arguments: import_v419.z.string(),
|
|
3853
|
-
status: import_v419.z.literal("completed")
|
|
3854
|
-
}),
|
|
3855
|
-
codeInterpreterCallItem,
|
|
3856
|
-
imageGenerationCallItem,
|
|
3857
|
-
webSearchCallItem,
|
|
3858
|
-
fileSearchCallItem,
|
|
3859
|
-
localShellCallItem,
|
|
3860
|
-
import_v419.z.object({
|
|
3861
|
-
type: import_v419.z.literal("computer_call"),
|
|
3862
|
-
id: import_v419.z.string(),
|
|
3863
|
-
status: import_v419.z.literal("completed")
|
|
3864
|
-
})
|
|
3865
|
-
])
|
|
3866
|
-
});
|
|
3867
|
-
var responseFunctionCallArgumentsDeltaSchema = import_v419.z.object({
|
|
3868
|
-
type: import_v419.z.literal("response.function_call_arguments.delta"),
|
|
3869
|
-
item_id: import_v419.z.string(),
|
|
3870
|
-
output_index: import_v419.z.number(),
|
|
3871
|
-
delta: import_v419.z.string()
|
|
3872
|
-
});
|
|
3873
|
-
var responseImageGenerationCallPartialImageSchema = import_v419.z.object({
|
|
3874
|
-
type: import_v419.z.literal("response.image_generation_call.partial_image"),
|
|
3875
|
-
item_id: import_v419.z.string(),
|
|
3876
|
-
output_index: import_v419.z.number(),
|
|
3877
|
-
partial_image_b64: import_v419.z.string()
|
|
3878
|
-
});
|
|
3879
|
-
var responseCodeInterpreterCallCodeDeltaSchema = import_v419.z.object({
|
|
3880
|
-
type: import_v419.z.literal("response.code_interpreter_call_code.delta"),
|
|
3881
|
-
item_id: import_v419.z.string(),
|
|
3882
|
-
output_index: import_v419.z.number(),
|
|
3883
|
-
delta: import_v419.z.string()
|
|
3884
|
-
});
|
|
3885
|
-
var responseCodeInterpreterCallCodeDoneSchema = import_v419.z.object({
|
|
3886
|
-
type: import_v419.z.literal("response.code_interpreter_call_code.done"),
|
|
3887
|
-
item_id: import_v419.z.string(),
|
|
3888
|
-
output_index: import_v419.z.number(),
|
|
3889
|
-
code: import_v419.z.string()
|
|
3890
|
-
});
|
|
3891
|
-
var responseAnnotationAddedSchema = import_v419.z.object({
|
|
3892
|
-
type: import_v419.z.literal("response.output_text.annotation.added"),
|
|
3893
|
-
annotation: import_v419.z.discriminatedUnion("type", [
|
|
3894
|
-
import_v419.z.object({
|
|
3895
|
-
type: import_v419.z.literal("url_citation"),
|
|
3896
|
-
url: import_v419.z.string(),
|
|
3897
|
-
title: import_v419.z.string()
|
|
3898
|
-
}),
|
|
3899
|
-
import_v419.z.object({
|
|
3900
|
-
type: import_v419.z.literal("file_citation"),
|
|
3901
|
-
file_id: import_v419.z.string(),
|
|
3902
|
-
filename: import_v419.z.string().nullish(),
|
|
3903
|
-
index: import_v419.z.number().nullish(),
|
|
3904
|
-
start_index: import_v419.z.number().nullish(),
|
|
3905
|
-
end_index: import_v419.z.number().nullish(),
|
|
3906
|
-
quote: import_v419.z.string().nullish()
|
|
3907
|
-
})
|
|
3908
|
-
])
|
|
3909
|
-
});
|
|
3910
|
-
var responseReasoningSummaryPartAddedSchema = import_v419.z.object({
|
|
3911
|
-
type: import_v419.z.literal("response.reasoning_summary_part.added"),
|
|
3912
|
-
item_id: import_v419.z.string(),
|
|
3913
|
-
summary_index: import_v419.z.number()
|
|
3914
|
-
});
|
|
3915
|
-
var responseReasoningSummaryTextDeltaSchema = import_v419.z.object({
|
|
3916
|
-
type: import_v419.z.literal("response.reasoning_summary_text.delta"),
|
|
3917
|
-
item_id: import_v419.z.string(),
|
|
3918
|
-
summary_index: import_v419.z.number(),
|
|
3919
|
-
delta: import_v419.z.string()
|
|
3920
|
-
});
|
|
3921
|
-
var openaiResponsesChunkSchema = import_v419.z.union([
|
|
3922
|
-
textDeltaChunkSchema,
|
|
3923
|
-
responseFinishedChunkSchema,
|
|
3924
|
-
responseCreatedChunkSchema,
|
|
3925
|
-
responseOutputItemAddedSchema,
|
|
3926
|
-
responseOutputItemDoneSchema,
|
|
3927
|
-
responseFunctionCallArgumentsDeltaSchema,
|
|
3928
|
-
responseImageGenerationCallPartialImageSchema,
|
|
3929
|
-
responseCodeInterpreterCallCodeDeltaSchema,
|
|
3930
|
-
responseCodeInterpreterCallCodeDoneSchema,
|
|
3931
|
-
responseAnnotationAddedSchema,
|
|
3932
|
-
responseReasoningSummaryPartAddedSchema,
|
|
3933
|
-
responseReasoningSummaryTextDeltaSchema,
|
|
3934
|
-
errorChunkSchema,
|
|
3935
|
-
import_v419.z.object({ type: import_v419.z.string() }).loose()
|
|
3936
|
-
// fallback for unknown chunks
|
|
3937
|
-
]);
|
|
3938
4288
|
function isTextDeltaChunk(chunk) {
|
|
3939
4289
|
return chunk.type === "response.output_text.delta";
|
|
3940
4290
|
}
|
|
@@ -4014,47 +4364,6 @@ function getResponsesModelConfig(modelId) {
|
|
|
4014
4364
|
isReasoningModel: false
|
|
4015
4365
|
};
|
|
4016
4366
|
}
|
|
4017
|
-
var openaiResponsesProviderOptionsSchema = import_v419.z.object({
|
|
4018
|
-
include: import_v419.z.array(
|
|
4019
|
-
import_v419.z.enum([
|
|
4020
|
-
"reasoning.encrypted_content",
|
|
4021
|
-
"file_search_call.results",
|
|
4022
|
-
"message.output_text.logprobs"
|
|
4023
|
-
])
|
|
4024
|
-
).nullish(),
|
|
4025
|
-
instructions: import_v419.z.string().nullish(),
|
|
4026
|
-
/**
|
|
4027
|
-
* Return the log probabilities of the tokens.
|
|
4028
|
-
*
|
|
4029
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
4030
|
-
* were generated.
|
|
4031
|
-
*
|
|
4032
|
-
* Setting to a number will return the log probabilities of the top n
|
|
4033
|
-
* tokens that were generated.
|
|
4034
|
-
*
|
|
4035
|
-
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
4036
|
-
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
4037
|
-
*/
|
|
4038
|
-
logprobs: import_v419.z.union([import_v419.z.boolean(), import_v419.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
4039
|
-
/**
|
|
4040
|
-
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
4041
|
-
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
4042
|
-
* Any further attempts to call a tool by the model will be ignored.
|
|
4043
|
-
*/
|
|
4044
|
-
maxToolCalls: import_v419.z.number().nullish(),
|
|
4045
|
-
metadata: import_v419.z.any().nullish(),
|
|
4046
|
-
parallelToolCalls: import_v419.z.boolean().nullish(),
|
|
4047
|
-
previousResponseId: import_v419.z.string().nullish(),
|
|
4048
|
-
promptCacheKey: import_v419.z.string().nullish(),
|
|
4049
|
-
reasoningEffort: import_v419.z.string().nullish(),
|
|
4050
|
-
reasoningSummary: import_v419.z.string().nullish(),
|
|
4051
|
-
safetyIdentifier: import_v419.z.string().nullish(),
|
|
4052
|
-
serviceTier: import_v419.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
4053
|
-
store: import_v419.z.boolean().nullish(),
|
|
4054
|
-
strictJsonSchema: import_v419.z.boolean().nullish(),
|
|
4055
|
-
textVerbosity: import_v419.z.enum(["low", "medium", "high"]).nullish(),
|
|
4056
|
-
user: import_v419.z.string().nullish()
|
|
4057
|
-
});
|
|
4058
4367
|
// Annotate the CommonJS export names for ESM import in node:
|
|
4059
4368
|
0 && (module.exports = {
|
|
4060
4369
|
OpenAIChatLanguageModel,
|
|
@@ -4080,6 +4389,7 @@ var openaiResponsesProviderOptionsSchema = import_v419.z.object({
|
|
|
4080
4389
|
openAITranscriptionProviderOptions,
|
|
4081
4390
|
openaiChatLanguageModelOptions,
|
|
4082
4391
|
openaiCompletionProviderOptions,
|
|
4083
|
-
openaiEmbeddingProviderOptions
|
|
4392
|
+
openaiEmbeddingProviderOptions,
|
|
4393
|
+
openaiSpeechProviderOptionsSchema
|
|
4084
4394
|
});
|
|
4085
4395
|
//# sourceMappingURL=index.js.map
|