@ai-sdk/openai 3.0.0-beta.17 → 3.0.0-beta.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +10 -0
- package/dist/index.d.mts +38 -65
- package/dist/index.d.ts +38 -65
- package/dist/index.js +1339 -1033
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1293 -942
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +101 -183
- package/dist/internal/index.d.ts +101 -183
- package/dist/internal/index.js +1336 -1028
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +1305 -953
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/internal/index.js
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
+
var __create = Object.create;
|
|
2
3
|
var __defProp = Object.defineProperty;
|
|
3
4
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
5
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
5
7
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
8
|
var __export = (target, all) => {
|
|
7
9
|
for (var name in all)
|
|
@@ -15,6 +17,14 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
15
17
|
}
|
|
16
18
|
return to;
|
|
17
19
|
};
|
|
20
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
21
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
22
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
23
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
24
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
25
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
26
|
+
mod
|
|
27
|
+
));
|
|
18
28
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
29
|
|
|
20
30
|
// src/internal/index.ts
|
|
@@ -43,27 +53,27 @@ __export(internal_exports, {
|
|
|
43
53
|
openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions,
|
|
44
54
|
openaiChatLanguageModelOptions: () => openaiChatLanguageModelOptions,
|
|
45
55
|
openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
|
|
46
|
-
openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions
|
|
56
|
+
openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
|
|
57
|
+
openaiSpeechProviderOptionsSchema: () => openaiSpeechProviderOptionsSchema
|
|
47
58
|
});
|
|
48
59
|
module.exports = __toCommonJS(internal_exports);
|
|
49
60
|
|
|
50
61
|
// src/chat/openai-chat-language-model.ts
|
|
51
62
|
var import_provider3 = require("@ai-sdk/provider");
|
|
52
|
-
var
|
|
53
|
-
var import_v43 = require("zod/v4");
|
|
63
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
54
64
|
|
|
55
65
|
// src/openai-error.ts
|
|
56
|
-
var
|
|
66
|
+
var z = __toESM(require("zod/v4"));
|
|
57
67
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
58
|
-
var openaiErrorDataSchema =
|
|
59
|
-
error:
|
|
60
|
-
message:
|
|
68
|
+
var openaiErrorDataSchema = z.object({
|
|
69
|
+
error: z.object({
|
|
70
|
+
message: z.string(),
|
|
61
71
|
// The additional information below is handled loosely to support
|
|
62
72
|
// OpenAI-compatible providers that have slightly different error
|
|
63
73
|
// responses:
|
|
64
|
-
type:
|
|
65
|
-
param:
|
|
66
|
-
code:
|
|
74
|
+
type: z.string().nullish(),
|
|
75
|
+
param: z.any().nullish(),
|
|
76
|
+
code: z.union([z.string(), z.number()]).nullish()
|
|
67
77
|
})
|
|
68
78
|
});
|
|
69
79
|
var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
|
@@ -283,95 +293,238 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
283
293
|
}
|
|
284
294
|
}
|
|
285
295
|
|
|
296
|
+
// src/chat/openai-chat-api.ts
|
|
297
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
298
|
+
var z2 = __toESM(require("zod/v4"));
|
|
299
|
+
var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
|
300
|
+
() => (0, import_provider_utils3.zodSchema)(
|
|
301
|
+
z2.object({
|
|
302
|
+
id: z2.string().nullish(),
|
|
303
|
+
created: z2.number().nullish(),
|
|
304
|
+
model: z2.string().nullish(),
|
|
305
|
+
choices: z2.array(
|
|
306
|
+
z2.object({
|
|
307
|
+
message: z2.object({
|
|
308
|
+
role: z2.literal("assistant").nullish(),
|
|
309
|
+
content: z2.string().nullish(),
|
|
310
|
+
tool_calls: z2.array(
|
|
311
|
+
z2.object({
|
|
312
|
+
id: z2.string().nullish(),
|
|
313
|
+
type: z2.literal("function"),
|
|
314
|
+
function: z2.object({
|
|
315
|
+
name: z2.string(),
|
|
316
|
+
arguments: z2.string()
|
|
317
|
+
})
|
|
318
|
+
})
|
|
319
|
+
).nullish(),
|
|
320
|
+
annotations: z2.array(
|
|
321
|
+
z2.object({
|
|
322
|
+
type: z2.literal("url_citation"),
|
|
323
|
+
start_index: z2.number(),
|
|
324
|
+
end_index: z2.number(),
|
|
325
|
+
url: z2.string(),
|
|
326
|
+
title: z2.string()
|
|
327
|
+
})
|
|
328
|
+
).nullish()
|
|
329
|
+
}),
|
|
330
|
+
index: z2.number(),
|
|
331
|
+
logprobs: z2.object({
|
|
332
|
+
content: z2.array(
|
|
333
|
+
z2.object({
|
|
334
|
+
token: z2.string(),
|
|
335
|
+
logprob: z2.number(),
|
|
336
|
+
top_logprobs: z2.array(
|
|
337
|
+
z2.object({
|
|
338
|
+
token: z2.string(),
|
|
339
|
+
logprob: z2.number()
|
|
340
|
+
})
|
|
341
|
+
)
|
|
342
|
+
})
|
|
343
|
+
).nullish()
|
|
344
|
+
}).nullish(),
|
|
345
|
+
finish_reason: z2.string().nullish()
|
|
346
|
+
})
|
|
347
|
+
),
|
|
348
|
+
usage: z2.object({
|
|
349
|
+
prompt_tokens: z2.number().nullish(),
|
|
350
|
+
completion_tokens: z2.number().nullish(),
|
|
351
|
+
total_tokens: z2.number().nullish(),
|
|
352
|
+
prompt_tokens_details: z2.object({
|
|
353
|
+
cached_tokens: z2.number().nullish()
|
|
354
|
+
}).nullish(),
|
|
355
|
+
completion_tokens_details: z2.object({
|
|
356
|
+
reasoning_tokens: z2.number().nullish(),
|
|
357
|
+
accepted_prediction_tokens: z2.number().nullish(),
|
|
358
|
+
rejected_prediction_tokens: z2.number().nullish()
|
|
359
|
+
}).nullish()
|
|
360
|
+
}).nullish()
|
|
361
|
+
})
|
|
362
|
+
)
|
|
363
|
+
);
|
|
364
|
+
var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
|
365
|
+
() => (0, import_provider_utils3.zodSchema)(
|
|
366
|
+
z2.union([
|
|
367
|
+
z2.object({
|
|
368
|
+
id: z2.string().nullish(),
|
|
369
|
+
created: z2.number().nullish(),
|
|
370
|
+
model: z2.string().nullish(),
|
|
371
|
+
choices: z2.array(
|
|
372
|
+
z2.object({
|
|
373
|
+
delta: z2.object({
|
|
374
|
+
role: z2.enum(["assistant"]).nullish(),
|
|
375
|
+
content: z2.string().nullish(),
|
|
376
|
+
tool_calls: z2.array(
|
|
377
|
+
z2.object({
|
|
378
|
+
index: z2.number(),
|
|
379
|
+
id: z2.string().nullish(),
|
|
380
|
+
type: z2.literal("function").nullish(),
|
|
381
|
+
function: z2.object({
|
|
382
|
+
name: z2.string().nullish(),
|
|
383
|
+
arguments: z2.string().nullish()
|
|
384
|
+
})
|
|
385
|
+
})
|
|
386
|
+
).nullish(),
|
|
387
|
+
annotations: z2.array(
|
|
388
|
+
z2.object({
|
|
389
|
+
type: z2.literal("url_citation"),
|
|
390
|
+
start_index: z2.number(),
|
|
391
|
+
end_index: z2.number(),
|
|
392
|
+
url: z2.string(),
|
|
393
|
+
title: z2.string()
|
|
394
|
+
})
|
|
395
|
+
).nullish()
|
|
396
|
+
}).nullish(),
|
|
397
|
+
logprobs: z2.object({
|
|
398
|
+
content: z2.array(
|
|
399
|
+
z2.object({
|
|
400
|
+
token: z2.string(),
|
|
401
|
+
logprob: z2.number(),
|
|
402
|
+
top_logprobs: z2.array(
|
|
403
|
+
z2.object({
|
|
404
|
+
token: z2.string(),
|
|
405
|
+
logprob: z2.number()
|
|
406
|
+
})
|
|
407
|
+
)
|
|
408
|
+
})
|
|
409
|
+
).nullish()
|
|
410
|
+
}).nullish(),
|
|
411
|
+
finish_reason: z2.string().nullish(),
|
|
412
|
+
index: z2.number()
|
|
413
|
+
})
|
|
414
|
+
),
|
|
415
|
+
usage: z2.object({
|
|
416
|
+
prompt_tokens: z2.number().nullish(),
|
|
417
|
+
completion_tokens: z2.number().nullish(),
|
|
418
|
+
total_tokens: z2.number().nullish(),
|
|
419
|
+
prompt_tokens_details: z2.object({
|
|
420
|
+
cached_tokens: z2.number().nullish()
|
|
421
|
+
}).nullish(),
|
|
422
|
+
completion_tokens_details: z2.object({
|
|
423
|
+
reasoning_tokens: z2.number().nullish(),
|
|
424
|
+
accepted_prediction_tokens: z2.number().nullish(),
|
|
425
|
+
rejected_prediction_tokens: z2.number().nullish()
|
|
426
|
+
}).nullish()
|
|
427
|
+
}).nullish()
|
|
428
|
+
}),
|
|
429
|
+
openaiErrorDataSchema
|
|
430
|
+
])
|
|
431
|
+
)
|
|
432
|
+
);
|
|
433
|
+
|
|
286
434
|
// src/chat/openai-chat-options.ts
|
|
287
|
-
var
|
|
288
|
-
var
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
435
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
436
|
+
var z3 = __toESM(require("zod/v4"));
|
|
437
|
+
var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
|
|
438
|
+
() => (0, import_provider_utils4.zodSchema)(
|
|
439
|
+
z3.object({
|
|
440
|
+
/**
|
|
441
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
|
442
|
+
*
|
|
443
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
444
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
445
|
+
*/
|
|
446
|
+
logitBias: z3.record(z3.coerce.number(), z3.number()).optional(),
|
|
447
|
+
/**
|
|
448
|
+
* Return the log probabilities of the tokens.
|
|
449
|
+
*
|
|
450
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
451
|
+
* were generated.
|
|
452
|
+
*
|
|
453
|
+
* Setting to a number will return the log probabilities of the top n
|
|
454
|
+
* tokens that were generated.
|
|
455
|
+
*/
|
|
456
|
+
logprobs: z3.union([z3.boolean(), z3.number()]).optional(),
|
|
457
|
+
/**
|
|
458
|
+
* Whether to enable parallel function calling during tool use. Default to true.
|
|
459
|
+
*/
|
|
460
|
+
parallelToolCalls: z3.boolean().optional(),
|
|
461
|
+
/**
|
|
462
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
463
|
+
* monitor and detect abuse.
|
|
464
|
+
*/
|
|
465
|
+
user: z3.string().optional(),
|
|
466
|
+
/**
|
|
467
|
+
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
468
|
+
*/
|
|
469
|
+
reasoningEffort: z3.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
470
|
+
/**
|
|
471
|
+
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
472
|
+
*/
|
|
473
|
+
maxCompletionTokens: z3.number().optional(),
|
|
474
|
+
/**
|
|
475
|
+
* Whether to enable persistence in responses API.
|
|
476
|
+
*/
|
|
477
|
+
store: z3.boolean().optional(),
|
|
478
|
+
/**
|
|
479
|
+
* Metadata to associate with the request.
|
|
480
|
+
*/
|
|
481
|
+
metadata: z3.record(z3.string().max(64), z3.string().max(512)).optional(),
|
|
482
|
+
/**
|
|
483
|
+
* Parameters for prediction mode.
|
|
484
|
+
*/
|
|
485
|
+
prediction: z3.record(z3.string(), z3.any()).optional(),
|
|
486
|
+
/**
|
|
487
|
+
* Whether to use structured outputs.
|
|
488
|
+
*
|
|
489
|
+
* @default true
|
|
490
|
+
*/
|
|
491
|
+
structuredOutputs: z3.boolean().optional(),
|
|
492
|
+
/**
|
|
493
|
+
* Service tier for the request.
|
|
494
|
+
* - 'auto': Default service tier
|
|
495
|
+
* - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
496
|
+
* - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
|
|
497
|
+
*
|
|
498
|
+
* @default 'auto'
|
|
499
|
+
*/
|
|
500
|
+
serviceTier: z3.enum(["auto", "flex", "priority"]).optional(),
|
|
501
|
+
/**
|
|
502
|
+
* Whether to use strict JSON schema validation.
|
|
503
|
+
*
|
|
504
|
+
* @default false
|
|
505
|
+
*/
|
|
506
|
+
strictJsonSchema: z3.boolean().optional(),
|
|
507
|
+
/**
|
|
508
|
+
* Controls the verbosity of the model's responses.
|
|
509
|
+
* Lower values will result in more concise responses, while higher values will result in more verbose responses.
|
|
510
|
+
*/
|
|
511
|
+
textVerbosity: z3.enum(["low", "medium", "high"]).optional(),
|
|
512
|
+
/**
|
|
513
|
+
* A cache key for prompt caching. Allows manual control over prompt caching behavior.
|
|
514
|
+
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
515
|
+
*/
|
|
516
|
+
promptCacheKey: z3.string().optional(),
|
|
517
|
+
/**
|
|
518
|
+
* A stable identifier used to help detect users of your application
|
|
519
|
+
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
520
|
+
* string that uniquely identifies each user. We recommend hashing their
|
|
521
|
+
* username or email address, in order to avoid sending us any identifying
|
|
522
|
+
* information.
|
|
523
|
+
*/
|
|
524
|
+
safetyIdentifier: z3.string().optional()
|
|
525
|
+
})
|
|
526
|
+
)
|
|
527
|
+
);
|
|
375
528
|
|
|
376
529
|
// src/chat/openai-chat-prepare-tools.ts
|
|
377
530
|
var import_provider2 = require("@ai-sdk/provider");
|
|
@@ -464,7 +617,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
464
617
|
}) {
|
|
465
618
|
var _a, _b, _c, _d;
|
|
466
619
|
const warnings = [];
|
|
467
|
-
const openaiOptions = (_a = await (0,
|
|
620
|
+
const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
|
|
468
621
|
provider: "openai",
|
|
469
622
|
providerOptions,
|
|
470
623
|
schema: openaiChatLanguageModelOptions
|
|
@@ -643,15 +796,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
643
796
|
responseHeaders,
|
|
644
797
|
value: response,
|
|
645
798
|
rawValue: rawResponse
|
|
646
|
-
} = await (0,
|
|
799
|
+
} = await (0, import_provider_utils5.postJsonToApi)({
|
|
647
800
|
url: this.config.url({
|
|
648
801
|
path: "/chat/completions",
|
|
649
802
|
modelId: this.modelId
|
|
650
803
|
}),
|
|
651
|
-
headers: (0,
|
|
804
|
+
headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
|
|
652
805
|
body,
|
|
653
806
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
654
|
-
successfulResponseHandler: (0,
|
|
807
|
+
successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
|
|
655
808
|
openaiChatResponseSchema
|
|
656
809
|
),
|
|
657
810
|
abortSignal: options.abortSignal,
|
|
@@ -666,7 +819,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
666
819
|
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
|
667
820
|
content.push({
|
|
668
821
|
type: "tool-call",
|
|
669
|
-
toolCallId: (_b = toolCall.id) != null ? _b : (0,
|
|
822
|
+
toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils5.generateId)(),
|
|
670
823
|
toolName: toolCall.function.name,
|
|
671
824
|
input: toolCall.function.arguments
|
|
672
825
|
});
|
|
@@ -675,7 +828,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
675
828
|
content.push({
|
|
676
829
|
type: "source",
|
|
677
830
|
sourceType: "url",
|
|
678
|
-
id: (0,
|
|
831
|
+
id: (0, import_provider_utils5.generateId)(),
|
|
679
832
|
url: annotation.url,
|
|
680
833
|
title: annotation.title
|
|
681
834
|
});
|
|
@@ -721,15 +874,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
721
874
|
include_usage: true
|
|
722
875
|
}
|
|
723
876
|
};
|
|
724
|
-
const { responseHeaders, value: response } = await (0,
|
|
877
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
|
|
725
878
|
url: this.config.url({
|
|
726
879
|
path: "/chat/completions",
|
|
727
880
|
modelId: this.modelId
|
|
728
881
|
}),
|
|
729
|
-
headers: (0,
|
|
882
|
+
headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
|
|
730
883
|
body,
|
|
731
884
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
732
|
-
successfulResponseHandler: (0,
|
|
885
|
+
successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(
|
|
733
886
|
openaiChatChunkSchema
|
|
734
887
|
),
|
|
735
888
|
abortSignal: options.abortSignal,
|
|
@@ -854,14 +1007,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
854
1007
|
delta: toolCall2.function.arguments
|
|
855
1008
|
});
|
|
856
1009
|
}
|
|
857
|
-
if ((0,
|
|
1010
|
+
if ((0, import_provider_utils5.isParsableJson)(toolCall2.function.arguments)) {
|
|
858
1011
|
controller.enqueue({
|
|
859
1012
|
type: "tool-input-end",
|
|
860
1013
|
id: toolCall2.id
|
|
861
1014
|
});
|
|
862
1015
|
controller.enqueue({
|
|
863
1016
|
type: "tool-call",
|
|
864
|
-
toolCallId: (_q = toolCall2.id) != null ? _q : (0,
|
|
1017
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
|
|
865
1018
|
toolName: toolCall2.function.name,
|
|
866
1019
|
input: toolCall2.function.arguments
|
|
867
1020
|
});
|
|
@@ -882,14 +1035,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
882
1035
|
id: toolCall.id,
|
|
883
1036
|
delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
884
1037
|
});
|
|
885
|
-
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0,
|
|
1038
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
|
|
886
1039
|
controller.enqueue({
|
|
887
1040
|
type: "tool-input-end",
|
|
888
1041
|
id: toolCall.id
|
|
889
1042
|
});
|
|
890
1043
|
controller.enqueue({
|
|
891
1044
|
type: "tool-call",
|
|
892
|
-
toolCallId: (_x = toolCall.id) != null ? _x : (0,
|
|
1045
|
+
toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
|
|
893
1046
|
toolName: toolCall.function.name,
|
|
894
1047
|
input: toolCall.function.arguments
|
|
895
1048
|
});
|
|
@@ -902,7 +1055,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
902
1055
|
controller.enqueue({
|
|
903
1056
|
type: "source",
|
|
904
1057
|
sourceType: "url",
|
|
905
|
-
id: (0,
|
|
1058
|
+
id: (0, import_provider_utils5.generateId)(),
|
|
906
1059
|
url: annotation.url,
|
|
907
1060
|
title: annotation.title
|
|
908
1061
|
});
|
|
@@ -927,121 +1080,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
927
1080
|
};
|
|
928
1081
|
}
|
|
929
1082
|
};
|
|
930
|
-
var openaiTokenUsageSchema = import_v43.z.object({
|
|
931
|
-
prompt_tokens: import_v43.z.number().nullish(),
|
|
932
|
-
completion_tokens: import_v43.z.number().nullish(),
|
|
933
|
-
total_tokens: import_v43.z.number().nullish(),
|
|
934
|
-
prompt_tokens_details: import_v43.z.object({
|
|
935
|
-
cached_tokens: import_v43.z.number().nullish()
|
|
936
|
-
}).nullish(),
|
|
937
|
-
completion_tokens_details: import_v43.z.object({
|
|
938
|
-
reasoning_tokens: import_v43.z.number().nullish(),
|
|
939
|
-
accepted_prediction_tokens: import_v43.z.number().nullish(),
|
|
940
|
-
rejected_prediction_tokens: import_v43.z.number().nullish()
|
|
941
|
-
}).nullish()
|
|
942
|
-
}).nullish();
|
|
943
|
-
var openaiChatResponseSchema = import_v43.z.object({
|
|
944
|
-
id: import_v43.z.string().nullish(),
|
|
945
|
-
created: import_v43.z.number().nullish(),
|
|
946
|
-
model: import_v43.z.string().nullish(),
|
|
947
|
-
choices: import_v43.z.array(
|
|
948
|
-
import_v43.z.object({
|
|
949
|
-
message: import_v43.z.object({
|
|
950
|
-
role: import_v43.z.literal("assistant").nullish(),
|
|
951
|
-
content: import_v43.z.string().nullish(),
|
|
952
|
-
tool_calls: import_v43.z.array(
|
|
953
|
-
import_v43.z.object({
|
|
954
|
-
id: import_v43.z.string().nullish(),
|
|
955
|
-
type: import_v43.z.literal("function"),
|
|
956
|
-
function: import_v43.z.object({
|
|
957
|
-
name: import_v43.z.string(),
|
|
958
|
-
arguments: import_v43.z.string()
|
|
959
|
-
})
|
|
960
|
-
})
|
|
961
|
-
).nullish(),
|
|
962
|
-
annotations: import_v43.z.array(
|
|
963
|
-
import_v43.z.object({
|
|
964
|
-
type: import_v43.z.literal("url_citation"),
|
|
965
|
-
start_index: import_v43.z.number(),
|
|
966
|
-
end_index: import_v43.z.number(),
|
|
967
|
-
url: import_v43.z.string(),
|
|
968
|
-
title: import_v43.z.string()
|
|
969
|
-
})
|
|
970
|
-
).nullish()
|
|
971
|
-
}),
|
|
972
|
-
index: import_v43.z.number(),
|
|
973
|
-
logprobs: import_v43.z.object({
|
|
974
|
-
content: import_v43.z.array(
|
|
975
|
-
import_v43.z.object({
|
|
976
|
-
token: import_v43.z.string(),
|
|
977
|
-
logprob: import_v43.z.number(),
|
|
978
|
-
top_logprobs: import_v43.z.array(
|
|
979
|
-
import_v43.z.object({
|
|
980
|
-
token: import_v43.z.string(),
|
|
981
|
-
logprob: import_v43.z.number()
|
|
982
|
-
})
|
|
983
|
-
)
|
|
984
|
-
})
|
|
985
|
-
).nullish()
|
|
986
|
-
}).nullish(),
|
|
987
|
-
finish_reason: import_v43.z.string().nullish()
|
|
988
|
-
})
|
|
989
|
-
),
|
|
990
|
-
usage: openaiTokenUsageSchema
|
|
991
|
-
});
|
|
992
|
-
var openaiChatChunkSchema = import_v43.z.union([
|
|
993
|
-
import_v43.z.object({
|
|
994
|
-
id: import_v43.z.string().nullish(),
|
|
995
|
-
created: import_v43.z.number().nullish(),
|
|
996
|
-
model: import_v43.z.string().nullish(),
|
|
997
|
-
choices: import_v43.z.array(
|
|
998
|
-
import_v43.z.object({
|
|
999
|
-
delta: import_v43.z.object({
|
|
1000
|
-
role: import_v43.z.enum(["assistant"]).nullish(),
|
|
1001
|
-
content: import_v43.z.string().nullish(),
|
|
1002
|
-
tool_calls: import_v43.z.array(
|
|
1003
|
-
import_v43.z.object({
|
|
1004
|
-
index: import_v43.z.number(),
|
|
1005
|
-
id: import_v43.z.string().nullish(),
|
|
1006
|
-
type: import_v43.z.literal("function").nullish(),
|
|
1007
|
-
function: import_v43.z.object({
|
|
1008
|
-
name: import_v43.z.string().nullish(),
|
|
1009
|
-
arguments: import_v43.z.string().nullish()
|
|
1010
|
-
})
|
|
1011
|
-
})
|
|
1012
|
-
).nullish(),
|
|
1013
|
-
annotations: import_v43.z.array(
|
|
1014
|
-
import_v43.z.object({
|
|
1015
|
-
type: import_v43.z.literal("url_citation"),
|
|
1016
|
-
start_index: import_v43.z.number(),
|
|
1017
|
-
end_index: import_v43.z.number(),
|
|
1018
|
-
url: import_v43.z.string(),
|
|
1019
|
-
title: import_v43.z.string()
|
|
1020
|
-
})
|
|
1021
|
-
).nullish()
|
|
1022
|
-
}).nullish(),
|
|
1023
|
-
logprobs: import_v43.z.object({
|
|
1024
|
-
content: import_v43.z.array(
|
|
1025
|
-
import_v43.z.object({
|
|
1026
|
-
token: import_v43.z.string(),
|
|
1027
|
-
logprob: import_v43.z.number(),
|
|
1028
|
-
top_logprobs: import_v43.z.array(
|
|
1029
|
-
import_v43.z.object({
|
|
1030
|
-
token: import_v43.z.string(),
|
|
1031
|
-
logprob: import_v43.z.number()
|
|
1032
|
-
})
|
|
1033
|
-
)
|
|
1034
|
-
})
|
|
1035
|
-
).nullish()
|
|
1036
|
-
}).nullish(),
|
|
1037
|
-
finish_reason: import_v43.z.string().nullish(),
|
|
1038
|
-
index: import_v43.z.number()
|
|
1039
|
-
})
|
|
1040
|
-
),
|
|
1041
|
-
usage: openaiTokenUsageSchema
|
|
1042
|
-
}),
|
|
1043
|
-
openaiErrorDataSchema
|
|
1044
|
-
]);
|
|
1045
1083
|
function isReasoningModel(modelId) {
|
|
1046
1084
|
return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
|
|
1047
1085
|
}
|
|
@@ -1092,8 +1130,7 @@ var reasoningModels = {
|
|
|
1092
1130
|
};
|
|
1093
1131
|
|
|
1094
1132
|
// src/completion/openai-completion-language-model.ts
|
|
1095
|
-
var
|
|
1096
|
-
var import_v45 = require("zod/v4");
|
|
1133
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1097
1134
|
|
|
1098
1135
|
// src/completion/convert-to-openai-completion-prompt.ts
|
|
1099
1136
|
var import_provider4 = require("@ai-sdk/provider");
|
|
@@ -1200,48 +1237,111 @@ function mapOpenAIFinishReason2(finishReason) {
|
|
|
1200
1237
|
}
|
|
1201
1238
|
}
|
|
1202
1239
|
|
|
1240
|
+
// src/completion/openai-completion-api.ts
|
|
1241
|
+
var z4 = __toESM(require("zod/v4"));
|
|
1242
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
1243
|
+
var openaiCompletionResponseSchema = (0, import_provider_utils6.lazyValidator)(
|
|
1244
|
+
() => (0, import_provider_utils6.zodSchema)(
|
|
1245
|
+
z4.object({
|
|
1246
|
+
id: z4.string().nullish(),
|
|
1247
|
+
created: z4.number().nullish(),
|
|
1248
|
+
model: z4.string().nullish(),
|
|
1249
|
+
choices: z4.array(
|
|
1250
|
+
z4.object({
|
|
1251
|
+
text: z4.string(),
|
|
1252
|
+
finish_reason: z4.string(),
|
|
1253
|
+
logprobs: z4.object({
|
|
1254
|
+
tokens: z4.array(z4.string()),
|
|
1255
|
+
token_logprobs: z4.array(z4.number()),
|
|
1256
|
+
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullish()
|
|
1257
|
+
}).nullish()
|
|
1258
|
+
})
|
|
1259
|
+
),
|
|
1260
|
+
usage: z4.object({
|
|
1261
|
+
prompt_tokens: z4.number(),
|
|
1262
|
+
completion_tokens: z4.number(),
|
|
1263
|
+
total_tokens: z4.number()
|
|
1264
|
+
}).nullish()
|
|
1265
|
+
})
|
|
1266
|
+
)
|
|
1267
|
+
);
|
|
1268
|
+
var openaiCompletionChunkSchema = (0, import_provider_utils6.lazyValidator)(
|
|
1269
|
+
() => (0, import_provider_utils6.zodSchema)(
|
|
1270
|
+
z4.union([
|
|
1271
|
+
z4.object({
|
|
1272
|
+
id: z4.string().nullish(),
|
|
1273
|
+
created: z4.number().nullish(),
|
|
1274
|
+
model: z4.string().nullish(),
|
|
1275
|
+
choices: z4.array(
|
|
1276
|
+
z4.object({
|
|
1277
|
+
text: z4.string(),
|
|
1278
|
+
finish_reason: z4.string().nullish(),
|
|
1279
|
+
index: z4.number(),
|
|
1280
|
+
logprobs: z4.object({
|
|
1281
|
+
tokens: z4.array(z4.string()),
|
|
1282
|
+
token_logprobs: z4.array(z4.number()),
|
|
1283
|
+
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullish()
|
|
1284
|
+
}).nullish()
|
|
1285
|
+
})
|
|
1286
|
+
),
|
|
1287
|
+
usage: z4.object({
|
|
1288
|
+
prompt_tokens: z4.number(),
|
|
1289
|
+
completion_tokens: z4.number(),
|
|
1290
|
+
total_tokens: z4.number()
|
|
1291
|
+
}).nullish()
|
|
1292
|
+
}),
|
|
1293
|
+
openaiErrorDataSchema
|
|
1294
|
+
])
|
|
1295
|
+
)
|
|
1296
|
+
);
|
|
1297
|
+
|
|
1203
1298
|
// src/completion/openai-completion-options.ts
|
|
1204
|
-
var
|
|
1205
|
-
var
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1299
|
+
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
|
1300
|
+
var z5 = __toESM(require("zod/v4"));
|
|
1301
|
+
var openaiCompletionProviderOptions = (0, import_provider_utils7.lazyValidator)(
|
|
1302
|
+
() => (0, import_provider_utils7.zodSchema)(
|
|
1303
|
+
z5.object({
|
|
1304
|
+
/**
|
|
1305
|
+
Echo back the prompt in addition to the completion.
|
|
1306
|
+
*/
|
|
1307
|
+
echo: z5.boolean().optional(),
|
|
1308
|
+
/**
|
|
1309
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
1310
|
+
|
|
1311
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
1312
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
1313
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
1314
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
1315
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
1316
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
1317
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
1318
|
+
|
|
1319
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
1320
|
+
token from being generated.
|
|
1321
|
+
*/
|
|
1322
|
+
logitBias: z5.record(z5.string(), z5.number()).optional(),
|
|
1323
|
+
/**
|
|
1324
|
+
The suffix that comes after a completion of inserted text.
|
|
1325
|
+
*/
|
|
1326
|
+
suffix: z5.string().optional(),
|
|
1327
|
+
/**
|
|
1328
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1329
|
+
monitor and detect abuse. Learn more.
|
|
1330
|
+
*/
|
|
1331
|
+
user: z5.string().optional(),
|
|
1332
|
+
/**
|
|
1333
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1334
|
+
the response size and can slow down response times. However, it can
|
|
1335
|
+
be useful to better understand how the model is behaving.
|
|
1336
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1337
|
+
were generated.
|
|
1338
|
+
Setting to a number will return the log probabilities of the top n
|
|
1339
|
+
tokens that were generated.
|
|
1340
|
+
*/
|
|
1341
|
+
logprobs: z5.union([z5.boolean(), z5.number()]).optional()
|
|
1342
|
+
})
|
|
1343
|
+
)
|
|
1344
|
+
);
|
|
1245
1345
|
|
|
1246
1346
|
// src/completion/openai-completion-language-model.ts
|
|
1247
1347
|
var OpenAICompletionLanguageModel = class {
|
|
@@ -1276,12 +1376,12 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1276
1376
|
}) {
|
|
1277
1377
|
const warnings = [];
|
|
1278
1378
|
const openaiOptions = {
|
|
1279
|
-
...await (0,
|
|
1379
|
+
...await (0, import_provider_utils8.parseProviderOptions)({
|
|
1280
1380
|
provider: "openai",
|
|
1281
1381
|
providerOptions,
|
|
1282
1382
|
schema: openaiCompletionProviderOptions
|
|
1283
1383
|
}),
|
|
1284
|
-
...await (0,
|
|
1384
|
+
...await (0, import_provider_utils8.parseProviderOptions)({
|
|
1285
1385
|
provider: this.providerOptionsName,
|
|
1286
1386
|
providerOptions,
|
|
1287
1387
|
schema: openaiCompletionProviderOptions
|
|
@@ -1337,15 +1437,15 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1337
1437
|
responseHeaders,
|
|
1338
1438
|
value: response,
|
|
1339
1439
|
rawValue: rawResponse
|
|
1340
|
-
} = await (0,
|
|
1440
|
+
} = await (0, import_provider_utils8.postJsonToApi)({
|
|
1341
1441
|
url: this.config.url({
|
|
1342
1442
|
path: "/completions",
|
|
1343
1443
|
modelId: this.modelId
|
|
1344
1444
|
}),
|
|
1345
|
-
headers: (0,
|
|
1445
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
|
|
1346
1446
|
body: args,
|
|
1347
1447
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1348
|
-
successfulResponseHandler: (0,
|
|
1448
|
+
successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
|
|
1349
1449
|
openaiCompletionResponseSchema
|
|
1350
1450
|
),
|
|
1351
1451
|
abortSignal: options.abortSignal,
|
|
@@ -1383,15 +1483,15 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1383
1483
|
include_usage: true
|
|
1384
1484
|
}
|
|
1385
1485
|
};
|
|
1386
|
-
const { responseHeaders, value: response } = await (0,
|
|
1486
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
|
|
1387
1487
|
url: this.config.url({
|
|
1388
1488
|
path: "/completions",
|
|
1389
1489
|
modelId: this.modelId
|
|
1390
1490
|
}),
|
|
1391
|
-
headers: (0,
|
|
1491
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
|
|
1392
1492
|
body,
|
|
1393
1493
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1394
|
-
successfulResponseHandler: (0,
|
|
1494
|
+
successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
|
|
1395
1495
|
openaiCompletionChunkSchema
|
|
1396
1496
|
),
|
|
1397
1497
|
abortSignal: options.abortSignal,
|
|
@@ -1472,69 +1572,42 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1472
1572
|
};
|
|
1473
1573
|
}
|
|
1474
1574
|
};
|
|
1475
|
-
var usageSchema = import_v45.z.object({
|
|
1476
|
-
prompt_tokens: import_v45.z.number(),
|
|
1477
|
-
completion_tokens: import_v45.z.number(),
|
|
1478
|
-
total_tokens: import_v45.z.number()
|
|
1479
|
-
});
|
|
1480
|
-
var openaiCompletionResponseSchema = import_v45.z.object({
|
|
1481
|
-
id: import_v45.z.string().nullish(),
|
|
1482
|
-
created: import_v45.z.number().nullish(),
|
|
1483
|
-
model: import_v45.z.string().nullish(),
|
|
1484
|
-
choices: import_v45.z.array(
|
|
1485
|
-
import_v45.z.object({
|
|
1486
|
-
text: import_v45.z.string(),
|
|
1487
|
-
finish_reason: import_v45.z.string(),
|
|
1488
|
-
logprobs: import_v45.z.object({
|
|
1489
|
-
tokens: import_v45.z.array(import_v45.z.string()),
|
|
1490
|
-
token_logprobs: import_v45.z.array(import_v45.z.number()),
|
|
1491
|
-
top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullish()
|
|
1492
|
-
}).nullish()
|
|
1493
|
-
})
|
|
1494
|
-
),
|
|
1495
|
-
usage: usageSchema.nullish()
|
|
1496
|
-
});
|
|
1497
|
-
var openaiCompletionChunkSchema = import_v45.z.union([
|
|
1498
|
-
import_v45.z.object({
|
|
1499
|
-
id: import_v45.z.string().nullish(),
|
|
1500
|
-
created: import_v45.z.number().nullish(),
|
|
1501
|
-
model: import_v45.z.string().nullish(),
|
|
1502
|
-
choices: import_v45.z.array(
|
|
1503
|
-
import_v45.z.object({
|
|
1504
|
-
text: import_v45.z.string(),
|
|
1505
|
-
finish_reason: import_v45.z.string().nullish(),
|
|
1506
|
-
index: import_v45.z.number(),
|
|
1507
|
-
logprobs: import_v45.z.object({
|
|
1508
|
-
tokens: import_v45.z.array(import_v45.z.string()),
|
|
1509
|
-
token_logprobs: import_v45.z.array(import_v45.z.number()),
|
|
1510
|
-
top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullish()
|
|
1511
|
-
}).nullish()
|
|
1512
|
-
})
|
|
1513
|
-
),
|
|
1514
|
-
usage: usageSchema.nullish()
|
|
1515
|
-
}),
|
|
1516
|
-
openaiErrorDataSchema
|
|
1517
|
-
]);
|
|
1518
1575
|
|
|
1519
1576
|
// src/embedding/openai-embedding-model.ts
|
|
1520
1577
|
var import_provider5 = require("@ai-sdk/provider");
|
|
1521
|
-
var
|
|
1522
|
-
var import_v47 = require("zod/v4");
|
|
1578
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
|
1523
1579
|
|
|
1524
1580
|
// src/embedding/openai-embedding-options.ts
|
|
1525
|
-
var
|
|
1526
|
-
var
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1581
|
+
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
|
1582
|
+
var z6 = __toESM(require("zod/v4"));
|
|
1583
|
+
var openaiEmbeddingProviderOptions = (0, import_provider_utils9.lazyValidator)(
|
|
1584
|
+
() => (0, import_provider_utils9.zodSchema)(
|
|
1585
|
+
z6.object({
|
|
1586
|
+
/**
|
|
1587
|
+
The number of dimensions the resulting output embeddings should have.
|
|
1588
|
+
Only supported in text-embedding-3 and later models.
|
|
1589
|
+
*/
|
|
1590
|
+
dimensions: z6.number().optional(),
|
|
1591
|
+
/**
|
|
1592
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1593
|
+
monitor and detect abuse. Learn more.
|
|
1594
|
+
*/
|
|
1595
|
+
user: z6.string().optional()
|
|
1596
|
+
})
|
|
1597
|
+
)
|
|
1598
|
+
);
|
|
1599
|
+
|
|
1600
|
+
// src/embedding/openai-embedding-api.ts
|
|
1601
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
|
1602
|
+
var z7 = __toESM(require("zod/v4"));
|
|
1603
|
+
var openaiTextEmbeddingResponseSchema = (0, import_provider_utils10.lazyValidator)(
|
|
1604
|
+
() => (0, import_provider_utils10.zodSchema)(
|
|
1605
|
+
z7.object({
|
|
1606
|
+
data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
|
|
1607
|
+
usage: z7.object({ prompt_tokens: z7.number() }).nullish()
|
|
1608
|
+
})
|
|
1609
|
+
)
|
|
1610
|
+
);
|
|
1538
1611
|
|
|
1539
1612
|
// src/embedding/openai-embedding-model.ts
|
|
1540
1613
|
var OpenAIEmbeddingModel = class {
|
|
@@ -1563,7 +1636,7 @@ var OpenAIEmbeddingModel = class {
|
|
|
1563
1636
|
values
|
|
1564
1637
|
});
|
|
1565
1638
|
}
|
|
1566
|
-
const openaiOptions = (_a = await (0,
|
|
1639
|
+
const openaiOptions = (_a = await (0, import_provider_utils11.parseProviderOptions)({
|
|
1567
1640
|
provider: "openai",
|
|
1568
1641
|
providerOptions,
|
|
1569
1642
|
schema: openaiEmbeddingProviderOptions
|
|
@@ -1572,12 +1645,12 @@ var OpenAIEmbeddingModel = class {
|
|
|
1572
1645
|
responseHeaders,
|
|
1573
1646
|
value: response,
|
|
1574
1647
|
rawValue
|
|
1575
|
-
} = await (0,
|
|
1648
|
+
} = await (0, import_provider_utils11.postJsonToApi)({
|
|
1576
1649
|
url: this.config.url({
|
|
1577
1650
|
path: "/embeddings",
|
|
1578
1651
|
modelId: this.modelId
|
|
1579
1652
|
}),
|
|
1580
|
-
headers: (0,
|
|
1653
|
+
headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), headers),
|
|
1581
1654
|
body: {
|
|
1582
1655
|
model: this.modelId,
|
|
1583
1656
|
input: values,
|
|
@@ -1586,7 +1659,7 @@ var OpenAIEmbeddingModel = class {
|
|
|
1586
1659
|
user: openaiOptions.user
|
|
1587
1660
|
},
|
|
1588
1661
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1589
|
-
successfulResponseHandler: (0,
|
|
1662
|
+
successfulResponseHandler: (0, import_provider_utils11.createJsonResponseHandler)(
|
|
1590
1663
|
openaiTextEmbeddingResponseSchema
|
|
1591
1664
|
),
|
|
1592
1665
|
abortSignal,
|
|
@@ -1599,14 +1672,25 @@ var OpenAIEmbeddingModel = class {
|
|
|
1599
1672
|
};
|
|
1600
1673
|
}
|
|
1601
1674
|
};
|
|
1602
|
-
var openaiTextEmbeddingResponseSchema = import_v47.z.object({
|
|
1603
|
-
data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
|
|
1604
|
-
usage: import_v47.z.object({ prompt_tokens: import_v47.z.number() }).nullish()
|
|
1605
|
-
});
|
|
1606
1675
|
|
|
1607
1676
|
// src/image/openai-image-model.ts
|
|
1608
|
-
var
|
|
1609
|
-
|
|
1677
|
+
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
|
1678
|
+
|
|
1679
|
+
// src/image/openai-image-api.ts
|
|
1680
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
|
1681
|
+
var z8 = __toESM(require("zod/v4"));
|
|
1682
|
+
var openaiImageResponseSchema = (0, import_provider_utils12.lazyValidator)(
|
|
1683
|
+
() => (0, import_provider_utils12.zodSchema)(
|
|
1684
|
+
z8.object({
|
|
1685
|
+
data: z8.array(
|
|
1686
|
+
z8.object({
|
|
1687
|
+
b64_json: z8.string(),
|
|
1688
|
+
revised_prompt: z8.string().optional()
|
|
1689
|
+
})
|
|
1690
|
+
)
|
|
1691
|
+
})
|
|
1692
|
+
)
|
|
1693
|
+
);
|
|
1610
1694
|
|
|
1611
1695
|
// src/image/openai-image-options.ts
|
|
1612
1696
|
var modelMaxImagesPerCall = {
|
|
@@ -1657,12 +1741,12 @@ var OpenAIImageModel = class {
|
|
|
1657
1741
|
warnings.push({ type: "unsupported-setting", setting: "seed" });
|
|
1658
1742
|
}
|
|
1659
1743
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1660
|
-
const { value: response, responseHeaders } = await (0,
|
|
1744
|
+
const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
|
|
1661
1745
|
url: this.config.url({
|
|
1662
1746
|
path: "/images/generations",
|
|
1663
1747
|
modelId: this.modelId
|
|
1664
1748
|
}),
|
|
1665
|
-
headers: (0,
|
|
1749
|
+
headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), headers),
|
|
1666
1750
|
body: {
|
|
1667
1751
|
model: this.modelId,
|
|
1668
1752
|
prompt,
|
|
@@ -1672,7 +1756,7 @@ var OpenAIImageModel = class {
|
|
|
1672
1756
|
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1673
1757
|
},
|
|
1674
1758
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1675
|
-
successfulResponseHandler: (0,
|
|
1759
|
+
successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
|
|
1676
1760
|
openaiImageResponseSchema
|
|
1677
1761
|
),
|
|
1678
1762
|
abortSignal,
|
|
@@ -1698,42 +1782,75 @@ var OpenAIImageModel = class {
|
|
|
1698
1782
|
};
|
|
1699
1783
|
}
|
|
1700
1784
|
};
|
|
1701
|
-
var openaiImageResponseSchema = import_v48.z.object({
|
|
1702
|
-
data: import_v48.z.array(
|
|
1703
|
-
import_v48.z.object({ b64_json: import_v48.z.string(), revised_prompt: import_v48.z.string().optional() })
|
|
1704
|
-
)
|
|
1705
|
-
});
|
|
1706
1785
|
|
|
1707
1786
|
// src/transcription/openai-transcription-model.ts
|
|
1708
|
-
var
|
|
1709
|
-
|
|
1787
|
+
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
|
1788
|
+
|
|
1789
|
+
// src/transcription/openai-transcription-api.ts
|
|
1790
|
+
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
|
1791
|
+
var z9 = __toESM(require("zod/v4"));
|
|
1792
|
+
var openaiTranscriptionResponseSchema = (0, import_provider_utils14.lazyValidator)(
|
|
1793
|
+
() => (0, import_provider_utils14.zodSchema)(
|
|
1794
|
+
z9.object({
|
|
1795
|
+
text: z9.string(),
|
|
1796
|
+
language: z9.string().nullish(),
|
|
1797
|
+
duration: z9.number().nullish(),
|
|
1798
|
+
words: z9.array(
|
|
1799
|
+
z9.object({
|
|
1800
|
+
word: z9.string(),
|
|
1801
|
+
start: z9.number(),
|
|
1802
|
+
end: z9.number()
|
|
1803
|
+
})
|
|
1804
|
+
).nullish(),
|
|
1805
|
+
segments: z9.array(
|
|
1806
|
+
z9.object({
|
|
1807
|
+
id: z9.number(),
|
|
1808
|
+
seek: z9.number(),
|
|
1809
|
+
start: z9.number(),
|
|
1810
|
+
end: z9.number(),
|
|
1811
|
+
text: z9.string(),
|
|
1812
|
+
tokens: z9.array(z9.number()),
|
|
1813
|
+
temperature: z9.number(),
|
|
1814
|
+
avg_logprob: z9.number(),
|
|
1815
|
+
compression_ratio: z9.number(),
|
|
1816
|
+
no_speech_prob: z9.number()
|
|
1817
|
+
})
|
|
1818
|
+
).nullish()
|
|
1819
|
+
})
|
|
1820
|
+
)
|
|
1821
|
+
);
|
|
1710
1822
|
|
|
1711
1823
|
// src/transcription/openai-transcription-options.ts
|
|
1712
|
-
var
|
|
1713
|
-
var
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
|
|
1726
|
-
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
|
|
1731
|
-
|
|
1732
|
-
|
|
1733
|
-
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1824
|
+
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
|
1825
|
+
var z10 = __toESM(require("zod/v4"));
|
|
1826
|
+
var openAITranscriptionProviderOptions = (0, import_provider_utils15.lazyValidator)(
|
|
1827
|
+
() => (0, import_provider_utils15.zodSchema)(
|
|
1828
|
+
z10.object({
|
|
1829
|
+
/**
|
|
1830
|
+
* Additional information to include in the transcription response.
|
|
1831
|
+
*/
|
|
1832
|
+
include: z10.array(z10.string()).optional(),
|
|
1833
|
+
/**
|
|
1834
|
+
* The language of the input audio in ISO-639-1 format.
|
|
1835
|
+
*/
|
|
1836
|
+
language: z10.string().optional(),
|
|
1837
|
+
/**
|
|
1838
|
+
* An optional text to guide the model's style or continue a previous audio segment.
|
|
1839
|
+
*/
|
|
1840
|
+
prompt: z10.string().optional(),
|
|
1841
|
+
/**
|
|
1842
|
+
* The sampling temperature, between 0 and 1.
|
|
1843
|
+
* @default 0
|
|
1844
|
+
*/
|
|
1845
|
+
temperature: z10.number().min(0).max(1).default(0).optional(),
|
|
1846
|
+
/**
|
|
1847
|
+
* The timestamp granularities to populate for this transcription.
|
|
1848
|
+
* @default ['segment']
|
|
1849
|
+
*/
|
|
1850
|
+
timestampGranularities: z10.array(z10.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1851
|
+
})
|
|
1852
|
+
)
|
|
1853
|
+
);
|
|
1737
1854
|
|
|
1738
1855
|
// src/transcription/openai-transcription-model.ts
|
|
1739
1856
|
var languageMap = {
|
|
@@ -1810,15 +1927,15 @@ var OpenAITranscriptionModel = class {
|
|
|
1810
1927
|
providerOptions
|
|
1811
1928
|
}) {
|
|
1812
1929
|
const warnings = [];
|
|
1813
|
-
const openAIOptions = await (0,
|
|
1930
|
+
const openAIOptions = await (0, import_provider_utils16.parseProviderOptions)({
|
|
1814
1931
|
provider: "openai",
|
|
1815
1932
|
providerOptions,
|
|
1816
1933
|
schema: openAITranscriptionProviderOptions
|
|
1817
1934
|
});
|
|
1818
1935
|
const formData = new FormData();
|
|
1819
|
-
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0,
|
|
1936
|
+
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils16.convertBase64ToUint8Array)(audio)]);
|
|
1820
1937
|
formData.append("model", this.modelId);
|
|
1821
|
-
const fileExtension = (0,
|
|
1938
|
+
const fileExtension = (0, import_provider_utils16.mediaTypeToExtension)(mediaType);
|
|
1822
1939
|
formData.append(
|
|
1823
1940
|
"file",
|
|
1824
1941
|
new File([blob], "audio", { type: mediaType }),
|
|
@@ -1863,15 +1980,15 @@ var OpenAITranscriptionModel = class {
|
|
|
1863
1980
|
value: response,
|
|
1864
1981
|
responseHeaders,
|
|
1865
1982
|
rawValue: rawResponse
|
|
1866
|
-
} = await (0,
|
|
1983
|
+
} = await (0, import_provider_utils16.postFormDataToApi)({
|
|
1867
1984
|
url: this.config.url({
|
|
1868
1985
|
path: "/audio/transcriptions",
|
|
1869
1986
|
modelId: this.modelId
|
|
1870
1987
|
}),
|
|
1871
|
-
headers: (0,
|
|
1988
|
+
headers: (0, import_provider_utils16.combineHeaders)(this.config.headers(), options.headers),
|
|
1872
1989
|
formData,
|
|
1873
1990
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1874
|
-
successfulResponseHandler: (0,
|
|
1991
|
+
successfulResponseHandler: (0, import_provider_utils16.createJsonResponseHandler)(
|
|
1875
1992
|
openaiTranscriptionResponseSchema
|
|
1876
1993
|
),
|
|
1877
1994
|
abortSignal: options.abortSignal,
|
|
@@ -1901,40 +2018,23 @@ var OpenAITranscriptionModel = class {
|
|
|
1901
2018
|
};
|
|
1902
2019
|
}
|
|
1903
2020
|
};
|
|
1904
|
-
|
|
1905
|
-
|
|
1906
|
-
|
|
1907
|
-
|
|
1908
|
-
|
|
1909
|
-
|
|
1910
|
-
|
|
1911
|
-
|
|
1912
|
-
|
|
1913
|
-
|
|
1914
|
-
|
|
1915
|
-
|
|
1916
|
-
import_v410.z.object({
|
|
1917
|
-
id: import_v410.z.number(),
|
|
1918
|
-
seek: import_v410.z.number(),
|
|
1919
|
-
start: import_v410.z.number(),
|
|
1920
|
-
end: import_v410.z.number(),
|
|
1921
|
-
text: import_v410.z.string(),
|
|
1922
|
-
tokens: import_v410.z.array(import_v410.z.number()),
|
|
1923
|
-
temperature: import_v410.z.number(),
|
|
1924
|
-
avg_logprob: import_v410.z.number(),
|
|
1925
|
-
compression_ratio: import_v410.z.number(),
|
|
1926
|
-
no_speech_prob: import_v410.z.number()
|
|
2021
|
+
|
|
2022
|
+
// src/speech/openai-speech-model.ts
|
|
2023
|
+
var import_provider_utils18 = require("@ai-sdk/provider-utils");
|
|
2024
|
+
|
|
2025
|
+
// src/speech/openai-speech-options.ts
|
|
2026
|
+
var import_provider_utils17 = require("@ai-sdk/provider-utils");
|
|
2027
|
+
var z11 = __toESM(require("zod/v4"));
|
|
2028
|
+
var openaiSpeechProviderOptionsSchema = (0, import_provider_utils17.lazyValidator)(
|
|
2029
|
+
() => (0, import_provider_utils17.zodSchema)(
|
|
2030
|
+
z11.object({
|
|
2031
|
+
instructions: z11.string().nullish(),
|
|
2032
|
+
speed: z11.number().min(0.25).max(4).default(1).nullish()
|
|
1927
2033
|
})
|
|
1928
|
-
)
|
|
1929
|
-
|
|
2034
|
+
)
|
|
2035
|
+
);
|
|
1930
2036
|
|
|
1931
2037
|
// src/speech/openai-speech-model.ts
|
|
1932
|
-
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1933
|
-
var import_v411 = require("zod/v4");
|
|
1934
|
-
var OpenAIProviderOptionsSchema = import_v411.z.object({
|
|
1935
|
-
instructions: import_v411.z.string().nullish(),
|
|
1936
|
-
speed: import_v411.z.number().min(0.25).max(4).default(1).nullish()
|
|
1937
|
-
});
|
|
1938
2038
|
var OpenAISpeechModel = class {
|
|
1939
2039
|
constructor(modelId, config) {
|
|
1940
2040
|
this.modelId = modelId;
|
|
@@ -1954,10 +2054,10 @@ var OpenAISpeechModel = class {
|
|
|
1954
2054
|
providerOptions
|
|
1955
2055
|
}) {
|
|
1956
2056
|
const warnings = [];
|
|
1957
|
-
const openAIOptions = await (0,
|
|
2057
|
+
const openAIOptions = await (0, import_provider_utils18.parseProviderOptions)({
|
|
1958
2058
|
provider: "openai",
|
|
1959
2059
|
providerOptions,
|
|
1960
|
-
schema:
|
|
2060
|
+
schema: openaiSpeechProviderOptionsSchema
|
|
1961
2061
|
});
|
|
1962
2062
|
const requestBody = {
|
|
1963
2063
|
model: this.modelId,
|
|
@@ -2007,15 +2107,15 @@ var OpenAISpeechModel = class {
|
|
|
2007
2107
|
value: audio,
|
|
2008
2108
|
responseHeaders,
|
|
2009
2109
|
rawValue: rawResponse
|
|
2010
|
-
} = await (0,
|
|
2110
|
+
} = await (0, import_provider_utils18.postJsonToApi)({
|
|
2011
2111
|
url: this.config.url({
|
|
2012
2112
|
path: "/audio/speech",
|
|
2013
2113
|
modelId: this.modelId
|
|
2014
2114
|
}),
|
|
2015
|
-
headers: (0,
|
|
2115
|
+
headers: (0, import_provider_utils18.combineHeaders)(this.config.headers(), options.headers),
|
|
2016
2116
|
body: requestBody,
|
|
2017
2117
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2018
|
-
successfulResponseHandler: (0,
|
|
2118
|
+
successfulResponseHandler: (0, import_provider_utils18.createBinaryResponseHandler)(),
|
|
2019
2119
|
abortSignal: options.abortSignal,
|
|
2020
2120
|
fetch: this.config.fetch
|
|
2021
2121
|
});
|
|
@@ -2037,31 +2137,34 @@ var OpenAISpeechModel = class {
|
|
|
2037
2137
|
|
|
2038
2138
|
// src/responses/openai-responses-language-model.ts
|
|
2039
2139
|
var import_provider8 = require("@ai-sdk/provider");
|
|
2040
|
-
var
|
|
2041
|
-
var import_v419 = require("zod/v4");
|
|
2140
|
+
var import_provider_utils29 = require("@ai-sdk/provider-utils");
|
|
2042
2141
|
|
|
2043
2142
|
// src/responses/convert-to-openai-responses-input.ts
|
|
2044
2143
|
var import_provider6 = require("@ai-sdk/provider");
|
|
2045
|
-
var
|
|
2046
|
-
var
|
|
2144
|
+
var import_provider_utils20 = require("@ai-sdk/provider-utils");
|
|
2145
|
+
var z13 = __toESM(require("zod/v4"));
|
|
2047
2146
|
|
|
2048
2147
|
// src/tool/local-shell.ts
|
|
2049
|
-
var
|
|
2050
|
-
var
|
|
2051
|
-
var localShellInputSchema =
|
|
2052
|
-
|
|
2053
|
-
|
|
2054
|
-
|
|
2055
|
-
|
|
2056
|
-
|
|
2057
|
-
|
|
2058
|
-
|
|
2059
|
-
|
|
2060
|
-
|
|
2061
|
-
|
|
2062
|
-
|
|
2063
|
-
|
|
2064
|
-
|
|
2148
|
+
var import_provider_utils19 = require("@ai-sdk/provider-utils");
|
|
2149
|
+
var z12 = __toESM(require("zod/v4"));
|
|
2150
|
+
var localShellInputSchema = (0, import_provider_utils19.lazySchema)(
|
|
2151
|
+
() => (0, import_provider_utils19.zodSchema)(
|
|
2152
|
+
z12.object({
|
|
2153
|
+
action: z12.object({
|
|
2154
|
+
type: z12.literal("exec"),
|
|
2155
|
+
command: z12.array(z12.string()),
|
|
2156
|
+
timeoutMs: z12.number().optional(),
|
|
2157
|
+
user: z12.string().optional(),
|
|
2158
|
+
workingDirectory: z12.string().optional(),
|
|
2159
|
+
env: z12.record(z12.string(), z12.string()).optional()
|
|
2160
|
+
})
|
|
2161
|
+
})
|
|
2162
|
+
)
|
|
2163
|
+
);
|
|
2164
|
+
var localShellOutputSchema = (0, import_provider_utils19.lazySchema)(
|
|
2165
|
+
() => (0, import_provider_utils19.zodSchema)(z12.object({ output: z12.string() }))
|
|
2166
|
+
);
|
|
2167
|
+
var localShell = (0, import_provider_utils19.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2065
2168
|
id: "openai.local_shell",
|
|
2066
2169
|
name: "local_shell",
|
|
2067
2170
|
inputSchema: localShellInputSchema,
|
|
@@ -2126,7 +2229,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2126
2229
|
return {
|
|
2127
2230
|
type: "input_image",
|
|
2128
2231
|
...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
|
|
2129
|
-
image_url: `data:${mediaType};base64,${(0,
|
|
2232
|
+
image_url: `data:${mediaType};base64,${(0, import_provider_utils20.convertToBase64)(part.data)}`
|
|
2130
2233
|
},
|
|
2131
2234
|
detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
|
|
2132
2235
|
};
|
|
@@ -2141,7 +2244,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2141
2244
|
type: "input_file",
|
|
2142
2245
|
...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
|
|
2143
2246
|
filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
|
|
2144
|
-
file_data: `data:application/pdf;base64,${(0,
|
|
2247
|
+
file_data: `data:application/pdf;base64,${(0, import_provider_utils20.convertToBase64)(part.data)}`
|
|
2145
2248
|
}
|
|
2146
2249
|
};
|
|
2147
2250
|
} else {
|
|
@@ -2174,7 +2277,10 @@ async function convertToOpenAIResponsesInput({
|
|
|
2174
2277
|
break;
|
|
2175
2278
|
}
|
|
2176
2279
|
if (hasLocalShellTool && part.toolName === "local_shell") {
|
|
2177
|
-
const parsedInput =
|
|
2280
|
+
const parsedInput = await (0, import_provider_utils20.validateTypes)({
|
|
2281
|
+
value: part.input,
|
|
2282
|
+
schema: localShellInputSchema
|
|
2283
|
+
});
|
|
2178
2284
|
input.push({
|
|
2179
2285
|
type: "local_shell_call",
|
|
2180
2286
|
call_id: part.toolCallId,
|
|
@@ -2212,7 +2318,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2212
2318
|
break;
|
|
2213
2319
|
}
|
|
2214
2320
|
case "reasoning": {
|
|
2215
|
-
const providerOptions = await (0,
|
|
2321
|
+
const providerOptions = await (0, import_provider_utils20.parseProviderOptions)({
|
|
2216
2322
|
provider: "openai",
|
|
2217
2323
|
providerOptions: part.providerOptions,
|
|
2218
2324
|
schema: openaiResponsesReasoningProviderOptionsSchema
|
|
@@ -2270,10 +2376,14 @@ async function convertToOpenAIResponsesInput({
|
|
|
2270
2376
|
for (const part of content) {
|
|
2271
2377
|
const output = part.output;
|
|
2272
2378
|
if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
|
|
2379
|
+
const parsedOutput = await (0, import_provider_utils20.validateTypes)({
|
|
2380
|
+
value: output.value,
|
|
2381
|
+
schema: localShellOutputSchema
|
|
2382
|
+
});
|
|
2273
2383
|
input.push({
|
|
2274
2384
|
type: "local_shell_call_output",
|
|
2275
2385
|
call_id: part.toolCallId,
|
|
2276
|
-
output:
|
|
2386
|
+
output: parsedOutput.output
|
|
2277
2387
|
});
|
|
2278
2388
|
break;
|
|
2279
2389
|
}
|
|
@@ -2308,9 +2418,9 @@ async function convertToOpenAIResponsesInput({
|
|
|
2308
2418
|
}
|
|
2309
2419
|
return { input, warnings };
|
|
2310
2420
|
}
|
|
2311
|
-
var openaiResponsesReasoningProviderOptionsSchema =
|
|
2312
|
-
itemId:
|
|
2313
|
-
reasoningEncryptedContent:
|
|
2421
|
+
var openaiResponsesReasoningProviderOptionsSchema = z13.object({
|
|
2422
|
+
itemId: z13.string().nullish(),
|
|
2423
|
+
reasoningEncryptedContent: z13.string().nullish()
|
|
2314
2424
|
});
|
|
2315
2425
|
|
|
2316
2426
|
// src/responses/map-openai-responses-finish-reason.ts
|
|
@@ -2331,33 +2441,574 @@ function mapOpenAIResponseFinishReason({
|
|
|
2331
2441
|
}
|
|
2332
2442
|
}
|
|
2333
2443
|
|
|
2444
|
+
// src/responses/openai-responses-api.ts
|
|
2445
|
+
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
|
2446
|
+
var z14 = __toESM(require("zod/v4"));
|
|
2447
|
+
var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
|
|
2448
|
+
() => (0, import_provider_utils21.zodSchema)(
|
|
2449
|
+
z14.union([
|
|
2450
|
+
z14.object({
|
|
2451
|
+
type: z14.literal("response.output_text.delta"),
|
|
2452
|
+
item_id: z14.string(),
|
|
2453
|
+
delta: z14.string(),
|
|
2454
|
+
logprobs: z14.array(
|
|
2455
|
+
z14.object({
|
|
2456
|
+
token: z14.string(),
|
|
2457
|
+
logprob: z14.number(),
|
|
2458
|
+
top_logprobs: z14.array(
|
|
2459
|
+
z14.object({
|
|
2460
|
+
token: z14.string(),
|
|
2461
|
+
logprob: z14.number()
|
|
2462
|
+
})
|
|
2463
|
+
)
|
|
2464
|
+
})
|
|
2465
|
+
).nullish()
|
|
2466
|
+
}),
|
|
2467
|
+
z14.object({
|
|
2468
|
+
type: z14.enum(["response.completed", "response.incomplete"]),
|
|
2469
|
+
response: z14.object({
|
|
2470
|
+
incomplete_details: z14.object({ reason: z14.string() }).nullish(),
|
|
2471
|
+
usage: z14.object({
|
|
2472
|
+
input_tokens: z14.number(),
|
|
2473
|
+
input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
|
|
2474
|
+
output_tokens: z14.number(),
|
|
2475
|
+
output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
|
|
2476
|
+
}),
|
|
2477
|
+
service_tier: z14.string().nullish()
|
|
2478
|
+
})
|
|
2479
|
+
}),
|
|
2480
|
+
z14.object({
|
|
2481
|
+
type: z14.literal("response.created"),
|
|
2482
|
+
response: z14.object({
|
|
2483
|
+
id: z14.string(),
|
|
2484
|
+
created_at: z14.number(),
|
|
2485
|
+
model: z14.string(),
|
|
2486
|
+
service_tier: z14.string().nullish()
|
|
2487
|
+
})
|
|
2488
|
+
}),
|
|
2489
|
+
z14.object({
|
|
2490
|
+
type: z14.literal("response.output_item.added"),
|
|
2491
|
+
output_index: z14.number(),
|
|
2492
|
+
item: z14.discriminatedUnion("type", [
|
|
2493
|
+
z14.object({
|
|
2494
|
+
type: z14.literal("message"),
|
|
2495
|
+
id: z14.string()
|
|
2496
|
+
}),
|
|
2497
|
+
z14.object({
|
|
2498
|
+
type: z14.literal("reasoning"),
|
|
2499
|
+
id: z14.string(),
|
|
2500
|
+
encrypted_content: z14.string().nullish()
|
|
2501
|
+
}),
|
|
2502
|
+
z14.object({
|
|
2503
|
+
type: z14.literal("function_call"),
|
|
2504
|
+
id: z14.string(),
|
|
2505
|
+
call_id: z14.string(),
|
|
2506
|
+
name: z14.string(),
|
|
2507
|
+
arguments: z14.string()
|
|
2508
|
+
}),
|
|
2509
|
+
z14.object({
|
|
2510
|
+
type: z14.literal("web_search_call"),
|
|
2511
|
+
id: z14.string(),
|
|
2512
|
+
status: z14.string(),
|
|
2513
|
+
action: z14.object({
|
|
2514
|
+
type: z14.literal("search"),
|
|
2515
|
+
query: z14.string().optional()
|
|
2516
|
+
}).nullish()
|
|
2517
|
+
}),
|
|
2518
|
+
z14.object({
|
|
2519
|
+
type: z14.literal("computer_call"),
|
|
2520
|
+
id: z14.string(),
|
|
2521
|
+
status: z14.string()
|
|
2522
|
+
}),
|
|
2523
|
+
z14.object({
|
|
2524
|
+
type: z14.literal("file_search_call"),
|
|
2525
|
+
id: z14.string()
|
|
2526
|
+
}),
|
|
2527
|
+
z14.object({
|
|
2528
|
+
type: z14.literal("image_generation_call"),
|
|
2529
|
+
id: z14.string()
|
|
2530
|
+
}),
|
|
2531
|
+
z14.object({
|
|
2532
|
+
type: z14.literal("code_interpreter_call"),
|
|
2533
|
+
id: z14.string(),
|
|
2534
|
+
container_id: z14.string(),
|
|
2535
|
+
code: z14.string().nullable(),
|
|
2536
|
+
outputs: z14.array(
|
|
2537
|
+
z14.discriminatedUnion("type", [
|
|
2538
|
+
z14.object({ type: z14.literal("logs"), logs: z14.string() }),
|
|
2539
|
+
z14.object({ type: z14.literal("image"), url: z14.string() })
|
|
2540
|
+
])
|
|
2541
|
+
).nullable(),
|
|
2542
|
+
status: z14.string()
|
|
2543
|
+
})
|
|
2544
|
+
])
|
|
2545
|
+
}),
|
|
2546
|
+
z14.object({
|
|
2547
|
+
type: z14.literal("response.output_item.done"),
|
|
2548
|
+
output_index: z14.number(),
|
|
2549
|
+
item: z14.discriminatedUnion("type", [
|
|
2550
|
+
z14.object({
|
|
2551
|
+
type: z14.literal("message"),
|
|
2552
|
+
id: z14.string()
|
|
2553
|
+
}),
|
|
2554
|
+
z14.object({
|
|
2555
|
+
type: z14.literal("reasoning"),
|
|
2556
|
+
id: z14.string(),
|
|
2557
|
+
encrypted_content: z14.string().nullish()
|
|
2558
|
+
}),
|
|
2559
|
+
z14.object({
|
|
2560
|
+
type: z14.literal("function_call"),
|
|
2561
|
+
id: z14.string(),
|
|
2562
|
+
call_id: z14.string(),
|
|
2563
|
+
name: z14.string(),
|
|
2564
|
+
arguments: z14.string(),
|
|
2565
|
+
status: z14.literal("completed")
|
|
2566
|
+
}),
|
|
2567
|
+
z14.object({
|
|
2568
|
+
type: z14.literal("code_interpreter_call"),
|
|
2569
|
+
id: z14.string(),
|
|
2570
|
+
code: z14.string().nullable(),
|
|
2571
|
+
container_id: z14.string(),
|
|
2572
|
+
outputs: z14.array(
|
|
2573
|
+
z14.discriminatedUnion("type", [
|
|
2574
|
+
z14.object({ type: z14.literal("logs"), logs: z14.string() }),
|
|
2575
|
+
z14.object({ type: z14.literal("image"), url: z14.string() })
|
|
2576
|
+
])
|
|
2577
|
+
).nullable()
|
|
2578
|
+
}),
|
|
2579
|
+
z14.object({
|
|
2580
|
+
type: z14.literal("image_generation_call"),
|
|
2581
|
+
id: z14.string(),
|
|
2582
|
+
result: z14.string()
|
|
2583
|
+
}),
|
|
2584
|
+
z14.object({
|
|
2585
|
+
type: z14.literal("web_search_call"),
|
|
2586
|
+
id: z14.string(),
|
|
2587
|
+
status: z14.string(),
|
|
2588
|
+
action: z14.discriminatedUnion("type", [
|
|
2589
|
+
z14.object({
|
|
2590
|
+
type: z14.literal("search"),
|
|
2591
|
+
query: z14.string().nullish()
|
|
2592
|
+
}),
|
|
2593
|
+
z14.object({
|
|
2594
|
+
type: z14.literal("open_page"),
|
|
2595
|
+
url: z14.string()
|
|
2596
|
+
}),
|
|
2597
|
+
z14.object({
|
|
2598
|
+
type: z14.literal("find"),
|
|
2599
|
+
url: z14.string(),
|
|
2600
|
+
pattern: z14.string()
|
|
2601
|
+
})
|
|
2602
|
+
]).nullish()
|
|
2603
|
+
}),
|
|
2604
|
+
z14.object({
|
|
2605
|
+
type: z14.literal("file_search_call"),
|
|
2606
|
+
id: z14.string(),
|
|
2607
|
+
queries: z14.array(z14.string()),
|
|
2608
|
+
results: z14.array(
|
|
2609
|
+
z14.object({
|
|
2610
|
+
attributes: z14.record(z14.string(), z14.unknown()),
|
|
2611
|
+
file_id: z14.string(),
|
|
2612
|
+
filename: z14.string(),
|
|
2613
|
+
score: z14.number(),
|
|
2614
|
+
text: z14.string()
|
|
2615
|
+
})
|
|
2616
|
+
).nullish()
|
|
2617
|
+
}),
|
|
2618
|
+
z14.object({
|
|
2619
|
+
type: z14.literal("local_shell_call"),
|
|
2620
|
+
id: z14.string(),
|
|
2621
|
+
call_id: z14.string(),
|
|
2622
|
+
action: z14.object({
|
|
2623
|
+
type: z14.literal("exec"),
|
|
2624
|
+
command: z14.array(z14.string()),
|
|
2625
|
+
timeout_ms: z14.number().optional(),
|
|
2626
|
+
user: z14.string().optional(),
|
|
2627
|
+
working_directory: z14.string().optional(),
|
|
2628
|
+
env: z14.record(z14.string(), z14.string()).optional()
|
|
2629
|
+
})
|
|
2630
|
+
}),
|
|
2631
|
+
z14.object({
|
|
2632
|
+
type: z14.literal("computer_call"),
|
|
2633
|
+
id: z14.string(),
|
|
2634
|
+
status: z14.literal("completed")
|
|
2635
|
+
})
|
|
2636
|
+
])
|
|
2637
|
+
}),
|
|
2638
|
+
z14.object({
|
|
2639
|
+
type: z14.literal("response.function_call_arguments.delta"),
|
|
2640
|
+
item_id: z14.string(),
|
|
2641
|
+
output_index: z14.number(),
|
|
2642
|
+
delta: z14.string()
|
|
2643
|
+
}),
|
|
2644
|
+
z14.object({
|
|
2645
|
+
type: z14.literal("response.image_generation_call.partial_image"),
|
|
2646
|
+
item_id: z14.string(),
|
|
2647
|
+
output_index: z14.number(),
|
|
2648
|
+
partial_image_b64: z14.string()
|
|
2649
|
+
}),
|
|
2650
|
+
z14.object({
|
|
2651
|
+
type: z14.literal("response.code_interpreter_call_code.delta"),
|
|
2652
|
+
item_id: z14.string(),
|
|
2653
|
+
output_index: z14.number(),
|
|
2654
|
+
delta: z14.string()
|
|
2655
|
+
}),
|
|
2656
|
+
z14.object({
|
|
2657
|
+
type: z14.literal("response.code_interpreter_call_code.done"),
|
|
2658
|
+
item_id: z14.string(),
|
|
2659
|
+
output_index: z14.number(),
|
|
2660
|
+
code: z14.string()
|
|
2661
|
+
}),
|
|
2662
|
+
z14.object({
|
|
2663
|
+
type: z14.literal("response.output_text.annotation.added"),
|
|
2664
|
+
annotation: z14.discriminatedUnion("type", [
|
|
2665
|
+
z14.object({
|
|
2666
|
+
type: z14.literal("url_citation"),
|
|
2667
|
+
url: z14.string(),
|
|
2668
|
+
title: z14.string()
|
|
2669
|
+
}),
|
|
2670
|
+
z14.object({
|
|
2671
|
+
type: z14.literal("file_citation"),
|
|
2672
|
+
file_id: z14.string(),
|
|
2673
|
+
filename: z14.string().nullish(),
|
|
2674
|
+
index: z14.number().nullish(),
|
|
2675
|
+
start_index: z14.number().nullish(),
|
|
2676
|
+
end_index: z14.number().nullish(),
|
|
2677
|
+
quote: z14.string().nullish()
|
|
2678
|
+
})
|
|
2679
|
+
])
|
|
2680
|
+
}),
|
|
2681
|
+
z14.object({
|
|
2682
|
+
type: z14.literal("response.reasoning_summary_part.added"),
|
|
2683
|
+
item_id: z14.string(),
|
|
2684
|
+
summary_index: z14.number()
|
|
2685
|
+
}),
|
|
2686
|
+
z14.object({
|
|
2687
|
+
type: z14.literal("response.reasoning_summary_text.delta"),
|
|
2688
|
+
item_id: z14.string(),
|
|
2689
|
+
summary_index: z14.number(),
|
|
2690
|
+
delta: z14.string()
|
|
2691
|
+
}),
|
|
2692
|
+
z14.object({
|
|
2693
|
+
type: z14.literal("error"),
|
|
2694
|
+
code: z14.string(),
|
|
2695
|
+
message: z14.string(),
|
|
2696
|
+
param: z14.string().nullish(),
|
|
2697
|
+
sequence_number: z14.number()
|
|
2698
|
+
}),
|
|
2699
|
+
z14.object({ type: z14.string() }).loose().transform((value) => ({
|
|
2700
|
+
type: "unknown_chunk",
|
|
2701
|
+
message: value.type
|
|
2702
|
+
}))
|
|
2703
|
+
// fallback for unknown chunks
|
|
2704
|
+
])
|
|
2705
|
+
)
|
|
2706
|
+
);
|
|
2707
|
+
var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
|
|
2708
|
+
() => (0, import_provider_utils21.zodSchema)(
|
|
2709
|
+
z14.object({
|
|
2710
|
+
id: z14.string(),
|
|
2711
|
+
created_at: z14.number(),
|
|
2712
|
+
error: z14.object({
|
|
2713
|
+
code: z14.string(),
|
|
2714
|
+
message: z14.string()
|
|
2715
|
+
}).nullish(),
|
|
2716
|
+
model: z14.string(),
|
|
2717
|
+
output: z14.array(
|
|
2718
|
+
z14.discriminatedUnion("type", [
|
|
2719
|
+
z14.object({
|
|
2720
|
+
type: z14.literal("message"),
|
|
2721
|
+
role: z14.literal("assistant"),
|
|
2722
|
+
id: z14.string(),
|
|
2723
|
+
content: z14.array(
|
|
2724
|
+
z14.object({
|
|
2725
|
+
type: z14.literal("output_text"),
|
|
2726
|
+
text: z14.string(),
|
|
2727
|
+
logprobs: z14.array(
|
|
2728
|
+
z14.object({
|
|
2729
|
+
token: z14.string(),
|
|
2730
|
+
logprob: z14.number(),
|
|
2731
|
+
top_logprobs: z14.array(
|
|
2732
|
+
z14.object({
|
|
2733
|
+
token: z14.string(),
|
|
2734
|
+
logprob: z14.number()
|
|
2735
|
+
})
|
|
2736
|
+
)
|
|
2737
|
+
})
|
|
2738
|
+
).nullish(),
|
|
2739
|
+
annotations: z14.array(
|
|
2740
|
+
z14.discriminatedUnion("type", [
|
|
2741
|
+
z14.object({
|
|
2742
|
+
type: z14.literal("url_citation"),
|
|
2743
|
+
start_index: z14.number(),
|
|
2744
|
+
end_index: z14.number(),
|
|
2745
|
+
url: z14.string(),
|
|
2746
|
+
title: z14.string()
|
|
2747
|
+
}),
|
|
2748
|
+
z14.object({
|
|
2749
|
+
type: z14.literal("file_citation"),
|
|
2750
|
+
file_id: z14.string(),
|
|
2751
|
+
filename: z14.string().nullish(),
|
|
2752
|
+
index: z14.number().nullish(),
|
|
2753
|
+
start_index: z14.number().nullish(),
|
|
2754
|
+
end_index: z14.number().nullish(),
|
|
2755
|
+
quote: z14.string().nullish()
|
|
2756
|
+
}),
|
|
2757
|
+
z14.object({
|
|
2758
|
+
type: z14.literal("container_file_citation")
|
|
2759
|
+
})
|
|
2760
|
+
])
|
|
2761
|
+
)
|
|
2762
|
+
})
|
|
2763
|
+
)
|
|
2764
|
+
}),
|
|
2765
|
+
z14.object({
|
|
2766
|
+
type: z14.literal("web_search_call"),
|
|
2767
|
+
id: z14.string(),
|
|
2768
|
+
status: z14.string(),
|
|
2769
|
+
action: z14.discriminatedUnion("type", [
|
|
2770
|
+
z14.object({
|
|
2771
|
+
type: z14.literal("search"),
|
|
2772
|
+
query: z14.string().nullish()
|
|
2773
|
+
}),
|
|
2774
|
+
z14.object({
|
|
2775
|
+
type: z14.literal("open_page"),
|
|
2776
|
+
url: z14.string()
|
|
2777
|
+
}),
|
|
2778
|
+
z14.object({
|
|
2779
|
+
type: z14.literal("find"),
|
|
2780
|
+
url: z14.string(),
|
|
2781
|
+
pattern: z14.string()
|
|
2782
|
+
})
|
|
2783
|
+
]).nullish()
|
|
2784
|
+
}),
|
|
2785
|
+
z14.object({
|
|
2786
|
+
type: z14.literal("file_search_call"),
|
|
2787
|
+
id: z14.string(),
|
|
2788
|
+
queries: z14.array(z14.string()),
|
|
2789
|
+
results: z14.array(
|
|
2790
|
+
z14.object({
|
|
2791
|
+
attributes: z14.record(z14.string(), z14.unknown()),
|
|
2792
|
+
file_id: z14.string(),
|
|
2793
|
+
filename: z14.string(),
|
|
2794
|
+
score: z14.number(),
|
|
2795
|
+
text: z14.string()
|
|
2796
|
+
})
|
|
2797
|
+
).nullish()
|
|
2798
|
+
}),
|
|
2799
|
+
z14.object({
|
|
2800
|
+
type: z14.literal("code_interpreter_call"),
|
|
2801
|
+
id: z14.string(),
|
|
2802
|
+
code: z14.string().nullable(),
|
|
2803
|
+
container_id: z14.string(),
|
|
2804
|
+
outputs: z14.array(
|
|
2805
|
+
z14.discriminatedUnion("type", [
|
|
2806
|
+
z14.object({ type: z14.literal("logs"), logs: z14.string() }),
|
|
2807
|
+
z14.object({ type: z14.literal("image"), url: z14.string() })
|
|
2808
|
+
])
|
|
2809
|
+
).nullable()
|
|
2810
|
+
}),
|
|
2811
|
+
z14.object({
|
|
2812
|
+
type: z14.literal("image_generation_call"),
|
|
2813
|
+
id: z14.string(),
|
|
2814
|
+
result: z14.string()
|
|
2815
|
+
}),
|
|
2816
|
+
z14.object({
|
|
2817
|
+
type: z14.literal("local_shell_call"),
|
|
2818
|
+
id: z14.string(),
|
|
2819
|
+
call_id: z14.string(),
|
|
2820
|
+
action: z14.object({
|
|
2821
|
+
type: z14.literal("exec"),
|
|
2822
|
+
command: z14.array(z14.string()),
|
|
2823
|
+
timeout_ms: z14.number().optional(),
|
|
2824
|
+
user: z14.string().optional(),
|
|
2825
|
+
working_directory: z14.string().optional(),
|
|
2826
|
+
env: z14.record(z14.string(), z14.string()).optional()
|
|
2827
|
+
})
|
|
2828
|
+
}),
|
|
2829
|
+
z14.object({
|
|
2830
|
+
type: z14.literal("function_call"),
|
|
2831
|
+
call_id: z14.string(),
|
|
2832
|
+
name: z14.string(),
|
|
2833
|
+
arguments: z14.string(),
|
|
2834
|
+
id: z14.string()
|
|
2835
|
+
}),
|
|
2836
|
+
z14.object({
|
|
2837
|
+
type: z14.literal("computer_call"),
|
|
2838
|
+
id: z14.string(),
|
|
2839
|
+
status: z14.string().optional()
|
|
2840
|
+
}),
|
|
2841
|
+
z14.object({
|
|
2842
|
+
type: z14.literal("reasoning"),
|
|
2843
|
+
id: z14.string(),
|
|
2844
|
+
encrypted_content: z14.string().nullish(),
|
|
2845
|
+
summary: z14.array(
|
|
2846
|
+
z14.object({
|
|
2847
|
+
type: z14.literal("summary_text"),
|
|
2848
|
+
text: z14.string()
|
|
2849
|
+
})
|
|
2850
|
+
)
|
|
2851
|
+
})
|
|
2852
|
+
])
|
|
2853
|
+
),
|
|
2854
|
+
service_tier: z14.string().nullish(),
|
|
2855
|
+
incomplete_details: z14.object({ reason: z14.string() }).nullish(),
|
|
2856
|
+
usage: z14.object({
|
|
2857
|
+
input_tokens: z14.number(),
|
|
2858
|
+
input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
|
|
2859
|
+
output_tokens: z14.number(),
|
|
2860
|
+
output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
|
|
2861
|
+
})
|
|
2862
|
+
})
|
|
2863
|
+
)
|
|
2864
|
+
);
|
|
2865
|
+
|
|
2866
|
+
// src/responses/openai-responses-options.ts
|
|
2867
|
+
var import_provider_utils22 = require("@ai-sdk/provider-utils");
|
|
2868
|
+
var z15 = __toESM(require("zod/v4"));
|
|
2869
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2870
|
+
var openaiResponsesReasoningModelIds = [
|
|
2871
|
+
"o1",
|
|
2872
|
+
"o1-2024-12-17",
|
|
2873
|
+
"o3-mini",
|
|
2874
|
+
"o3-mini-2025-01-31",
|
|
2875
|
+
"o3",
|
|
2876
|
+
"o3-2025-04-16",
|
|
2877
|
+
"o4-mini",
|
|
2878
|
+
"o4-mini-2025-04-16",
|
|
2879
|
+
"codex-mini-latest",
|
|
2880
|
+
"computer-use-preview",
|
|
2881
|
+
"gpt-5",
|
|
2882
|
+
"gpt-5-2025-08-07",
|
|
2883
|
+
"gpt-5-codex",
|
|
2884
|
+
"gpt-5-mini",
|
|
2885
|
+
"gpt-5-mini-2025-08-07",
|
|
2886
|
+
"gpt-5-nano",
|
|
2887
|
+
"gpt-5-nano-2025-08-07",
|
|
2888
|
+
"gpt-5-pro",
|
|
2889
|
+
"gpt-5-pro-2025-10-06"
|
|
2890
|
+
];
|
|
2891
|
+
var openaiResponsesModelIds = [
|
|
2892
|
+
"gpt-4.1",
|
|
2893
|
+
"gpt-4.1-2025-04-14",
|
|
2894
|
+
"gpt-4.1-mini",
|
|
2895
|
+
"gpt-4.1-mini-2025-04-14",
|
|
2896
|
+
"gpt-4.1-nano",
|
|
2897
|
+
"gpt-4.1-nano-2025-04-14",
|
|
2898
|
+
"gpt-4o",
|
|
2899
|
+
"gpt-4o-2024-05-13",
|
|
2900
|
+
"gpt-4o-2024-08-06",
|
|
2901
|
+
"gpt-4o-2024-11-20",
|
|
2902
|
+
"gpt-4o-audio-preview",
|
|
2903
|
+
"gpt-4o-audio-preview-2024-10-01",
|
|
2904
|
+
"gpt-4o-audio-preview-2024-12-17",
|
|
2905
|
+
"gpt-4o-search-preview",
|
|
2906
|
+
"gpt-4o-search-preview-2025-03-11",
|
|
2907
|
+
"gpt-4o-mini-search-preview",
|
|
2908
|
+
"gpt-4o-mini-search-preview-2025-03-11",
|
|
2909
|
+
"gpt-4o-mini",
|
|
2910
|
+
"gpt-4o-mini-2024-07-18",
|
|
2911
|
+
"gpt-4-turbo",
|
|
2912
|
+
"gpt-4-turbo-2024-04-09",
|
|
2913
|
+
"gpt-4-turbo-preview",
|
|
2914
|
+
"gpt-4-0125-preview",
|
|
2915
|
+
"gpt-4-1106-preview",
|
|
2916
|
+
"gpt-4",
|
|
2917
|
+
"gpt-4-0613",
|
|
2918
|
+
"gpt-4.5-preview",
|
|
2919
|
+
"gpt-4.5-preview-2025-02-27",
|
|
2920
|
+
"gpt-3.5-turbo-0125",
|
|
2921
|
+
"gpt-3.5-turbo",
|
|
2922
|
+
"gpt-3.5-turbo-1106",
|
|
2923
|
+
"chatgpt-4o-latest",
|
|
2924
|
+
"gpt-5-chat-latest",
|
|
2925
|
+
...openaiResponsesReasoningModelIds
|
|
2926
|
+
];
|
|
2927
|
+
var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValidator)(
|
|
2928
|
+
() => (0, import_provider_utils22.zodSchema)(
|
|
2929
|
+
z15.object({
|
|
2930
|
+
include: z15.array(
|
|
2931
|
+
z15.enum([
|
|
2932
|
+
"reasoning.encrypted_content",
|
|
2933
|
+
"file_search_call.results",
|
|
2934
|
+
"message.output_text.logprobs"
|
|
2935
|
+
])
|
|
2936
|
+
).nullish(),
|
|
2937
|
+
instructions: z15.string().nullish(),
|
|
2938
|
+
/**
|
|
2939
|
+
* Return the log probabilities of the tokens.
|
|
2940
|
+
*
|
|
2941
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
2942
|
+
* were generated.
|
|
2943
|
+
*
|
|
2944
|
+
* Setting to a number will return the log probabilities of the top n
|
|
2945
|
+
* tokens that were generated.
|
|
2946
|
+
*
|
|
2947
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
2948
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
2949
|
+
*/
|
|
2950
|
+
logprobs: z15.union([z15.boolean(), z15.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
2951
|
+
/**
|
|
2952
|
+
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
2953
|
+
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
2954
|
+
* Any further attempts to call a tool by the model will be ignored.
|
|
2955
|
+
*/
|
|
2956
|
+
maxToolCalls: z15.number().nullish(),
|
|
2957
|
+
metadata: z15.any().nullish(),
|
|
2958
|
+
parallelToolCalls: z15.boolean().nullish(),
|
|
2959
|
+
previousResponseId: z15.string().nullish(),
|
|
2960
|
+
promptCacheKey: z15.string().nullish(),
|
|
2961
|
+
reasoningEffort: z15.string().nullish(),
|
|
2962
|
+
reasoningSummary: z15.string().nullish(),
|
|
2963
|
+
safetyIdentifier: z15.string().nullish(),
|
|
2964
|
+
serviceTier: z15.enum(["auto", "flex", "priority"]).nullish(),
|
|
2965
|
+
store: z15.boolean().nullish(),
|
|
2966
|
+
strictJsonSchema: z15.boolean().nullish(),
|
|
2967
|
+
textVerbosity: z15.enum(["low", "medium", "high"]).nullish(),
|
|
2968
|
+
user: z15.string().nullish()
|
|
2969
|
+
})
|
|
2970
|
+
)
|
|
2971
|
+
);
|
|
2972
|
+
|
|
2334
2973
|
// src/responses/openai-responses-prepare-tools.ts
|
|
2335
2974
|
var import_provider7 = require("@ai-sdk/provider");
|
|
2336
2975
|
|
|
2337
2976
|
// src/tool/code-interpreter.ts
|
|
2338
|
-
var
|
|
2339
|
-
var
|
|
2340
|
-
var codeInterpreterInputSchema =
|
|
2341
|
-
|
|
2342
|
-
|
|
2343
|
-
|
|
2344
|
-
|
|
2345
|
-
outputs: import_v414.z.array(
|
|
2346
|
-
import_v414.z.discriminatedUnion("type", [
|
|
2347
|
-
import_v414.z.object({ type: import_v414.z.literal("logs"), logs: import_v414.z.string() }),
|
|
2348
|
-
import_v414.z.object({ type: import_v414.z.literal("image"), url: import_v414.z.string() })
|
|
2349
|
-
])
|
|
2350
|
-
).nullish()
|
|
2351
|
-
});
|
|
2352
|
-
var codeInterpreterArgsSchema = import_v414.z.object({
|
|
2353
|
-
container: import_v414.z.union([
|
|
2354
|
-
import_v414.z.string(),
|
|
2355
|
-
import_v414.z.object({
|
|
2356
|
-
fileIds: import_v414.z.array(import_v414.z.string()).optional()
|
|
2977
|
+
var import_provider_utils23 = require("@ai-sdk/provider-utils");
|
|
2978
|
+
var z16 = __toESM(require("zod/v4"));
|
|
2979
|
+
var codeInterpreterInputSchema = (0, import_provider_utils23.lazySchema)(
|
|
2980
|
+
() => (0, import_provider_utils23.zodSchema)(
|
|
2981
|
+
z16.object({
|
|
2982
|
+
code: z16.string().nullish(),
|
|
2983
|
+
containerId: z16.string()
|
|
2357
2984
|
})
|
|
2358
|
-
|
|
2359
|
-
|
|
2360
|
-
var
|
|
2985
|
+
)
|
|
2986
|
+
);
|
|
2987
|
+
var codeInterpreterOutputSchema = (0, import_provider_utils23.lazySchema)(
|
|
2988
|
+
() => (0, import_provider_utils23.zodSchema)(
|
|
2989
|
+
z16.object({
|
|
2990
|
+
outputs: z16.array(
|
|
2991
|
+
z16.discriminatedUnion("type", [
|
|
2992
|
+
z16.object({ type: z16.literal("logs"), logs: z16.string() }),
|
|
2993
|
+
z16.object({ type: z16.literal("image"), url: z16.string() })
|
|
2994
|
+
])
|
|
2995
|
+
).nullish()
|
|
2996
|
+
})
|
|
2997
|
+
)
|
|
2998
|
+
);
|
|
2999
|
+
var codeInterpreterArgsSchema = (0, import_provider_utils23.lazySchema)(
|
|
3000
|
+
() => (0, import_provider_utils23.zodSchema)(
|
|
3001
|
+
z16.object({
|
|
3002
|
+
container: z16.union([
|
|
3003
|
+
z16.string(),
|
|
3004
|
+
z16.object({
|
|
3005
|
+
fileIds: z16.array(z16.string()).optional()
|
|
3006
|
+
})
|
|
3007
|
+
]).optional()
|
|
3008
|
+
})
|
|
3009
|
+
)
|
|
3010
|
+
);
|
|
3011
|
+
var codeInterpreterToolFactory = (0, import_provider_utils23.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2361
3012
|
id: "openai.code_interpreter",
|
|
2362
3013
|
name: "code_interpreter",
|
|
2363
3014
|
inputSchema: codeInterpreterInputSchema,
|
|
@@ -2368,169 +3019,200 @@ var codeInterpreter = (args = {}) => {
|
|
|
2368
3019
|
};
|
|
2369
3020
|
|
|
2370
3021
|
// src/tool/file-search.ts
|
|
2371
|
-
var
|
|
2372
|
-
var
|
|
2373
|
-
var comparisonFilterSchema =
|
|
2374
|
-
key:
|
|
2375
|
-
type:
|
|
2376
|
-
value:
|
|
3022
|
+
var import_provider_utils24 = require("@ai-sdk/provider-utils");
|
|
3023
|
+
var z17 = __toESM(require("zod/v4"));
|
|
3024
|
+
var comparisonFilterSchema = z17.object({
|
|
3025
|
+
key: z17.string(),
|
|
3026
|
+
type: z17.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
|
|
3027
|
+
value: z17.union([z17.string(), z17.number(), z17.boolean()])
|
|
2377
3028
|
});
|
|
2378
|
-
var compoundFilterSchema =
|
|
2379
|
-
type:
|
|
2380
|
-
filters:
|
|
2381
|
-
|
|
3029
|
+
var compoundFilterSchema = z17.object({
|
|
3030
|
+
type: z17.enum(["and", "or"]),
|
|
3031
|
+
filters: z17.array(
|
|
3032
|
+
z17.union([comparisonFilterSchema, z17.lazy(() => compoundFilterSchema)])
|
|
2382
3033
|
)
|
|
2383
3034
|
});
|
|
2384
|
-
var fileSearchArgsSchema =
|
|
2385
|
-
|
|
2386
|
-
|
|
2387
|
-
|
|
2388
|
-
|
|
2389
|
-
|
|
2390
|
-
|
|
2391
|
-
|
|
2392
|
-
})
|
|
2393
|
-
|
|
2394
|
-
queries: import_v415.z.array(import_v415.z.string()),
|
|
2395
|
-
results: import_v415.z.array(
|
|
2396
|
-
import_v415.z.object({
|
|
2397
|
-
attributes: import_v415.z.record(import_v415.z.string(), import_v415.z.unknown()),
|
|
2398
|
-
fileId: import_v415.z.string(),
|
|
2399
|
-
filename: import_v415.z.string(),
|
|
2400
|
-
score: import_v415.z.number(),
|
|
2401
|
-
text: import_v415.z.string()
|
|
3035
|
+
var fileSearchArgsSchema = (0, import_provider_utils24.lazySchema)(
|
|
3036
|
+
() => (0, import_provider_utils24.zodSchema)(
|
|
3037
|
+
z17.object({
|
|
3038
|
+
vectorStoreIds: z17.array(z17.string()),
|
|
3039
|
+
maxNumResults: z17.number().optional(),
|
|
3040
|
+
ranking: z17.object({
|
|
3041
|
+
ranker: z17.string().optional(),
|
|
3042
|
+
scoreThreshold: z17.number().optional()
|
|
3043
|
+
}).optional(),
|
|
3044
|
+
filters: z17.union([comparisonFilterSchema, compoundFilterSchema]).optional()
|
|
2402
3045
|
})
|
|
2403
|
-
)
|
|
2404
|
-
|
|
2405
|
-
var
|
|
3046
|
+
)
|
|
3047
|
+
);
|
|
3048
|
+
var fileSearchOutputSchema = (0, import_provider_utils24.lazySchema)(
|
|
3049
|
+
() => (0, import_provider_utils24.zodSchema)(
|
|
3050
|
+
z17.object({
|
|
3051
|
+
queries: z17.array(z17.string()),
|
|
3052
|
+
results: z17.array(
|
|
3053
|
+
z17.object({
|
|
3054
|
+
attributes: z17.record(z17.string(), z17.unknown()),
|
|
3055
|
+
fileId: z17.string(),
|
|
3056
|
+
filename: z17.string(),
|
|
3057
|
+
score: z17.number(),
|
|
3058
|
+
text: z17.string()
|
|
3059
|
+
})
|
|
3060
|
+
).nullable()
|
|
3061
|
+
})
|
|
3062
|
+
)
|
|
3063
|
+
);
|
|
3064
|
+
var fileSearch = (0, import_provider_utils24.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2406
3065
|
id: "openai.file_search",
|
|
2407
3066
|
name: "file_search",
|
|
2408
|
-
inputSchema:
|
|
3067
|
+
inputSchema: z17.object({}),
|
|
2409
3068
|
outputSchema: fileSearchOutputSchema
|
|
2410
3069
|
});
|
|
2411
3070
|
|
|
2412
3071
|
// src/tool/web-search.ts
|
|
2413
|
-
var
|
|
2414
|
-
var
|
|
2415
|
-
var webSearchArgsSchema =
|
|
2416
|
-
|
|
2417
|
-
|
|
2418
|
-
|
|
2419
|
-
|
|
2420
|
-
|
|
2421
|
-
|
|
2422
|
-
|
|
2423
|
-
|
|
2424
|
-
|
|
2425
|
-
|
|
2426
|
-
|
|
2427
|
-
|
|
2428
|
-
|
|
3072
|
+
var import_provider_utils25 = require("@ai-sdk/provider-utils");
|
|
3073
|
+
var z18 = __toESM(require("zod/v4"));
|
|
3074
|
+
var webSearchArgsSchema = (0, import_provider_utils25.lazySchema)(
|
|
3075
|
+
() => (0, import_provider_utils25.zodSchema)(
|
|
3076
|
+
z18.object({
|
|
3077
|
+
filters: z18.object({
|
|
3078
|
+
allowedDomains: z18.array(z18.string()).optional()
|
|
3079
|
+
}).optional(),
|
|
3080
|
+
searchContextSize: z18.enum(["low", "medium", "high"]).optional(),
|
|
3081
|
+
userLocation: z18.object({
|
|
3082
|
+
type: z18.literal("approximate"),
|
|
3083
|
+
country: z18.string().optional(),
|
|
3084
|
+
city: z18.string().optional(),
|
|
3085
|
+
region: z18.string().optional(),
|
|
3086
|
+
timezone: z18.string().optional()
|
|
3087
|
+
}).optional()
|
|
3088
|
+
})
|
|
3089
|
+
)
|
|
3090
|
+
);
|
|
3091
|
+
var webSearchInputSchema = (0, import_provider_utils25.lazySchema)(
|
|
3092
|
+
() => (0, import_provider_utils25.zodSchema)(
|
|
3093
|
+
z18.object({
|
|
3094
|
+
action: z18.discriminatedUnion("type", [
|
|
3095
|
+
z18.object({
|
|
3096
|
+
type: z18.literal("search"),
|
|
3097
|
+
query: z18.string().nullish()
|
|
3098
|
+
}),
|
|
3099
|
+
z18.object({
|
|
3100
|
+
type: z18.literal("open_page"),
|
|
3101
|
+
url: z18.string()
|
|
3102
|
+
}),
|
|
3103
|
+
z18.object({
|
|
3104
|
+
type: z18.literal("find"),
|
|
3105
|
+
url: z18.string(),
|
|
3106
|
+
pattern: z18.string()
|
|
3107
|
+
})
|
|
3108
|
+
]).nullish()
|
|
3109
|
+
})
|
|
3110
|
+
)
|
|
3111
|
+
);
|
|
3112
|
+
var webSearchToolFactory = (0, import_provider_utils25.createProviderDefinedToolFactory)({
|
|
2429
3113
|
id: "openai.web_search",
|
|
2430
3114
|
name: "web_search",
|
|
2431
|
-
inputSchema:
|
|
2432
|
-
action: import_v416.z.discriminatedUnion("type", [
|
|
2433
|
-
import_v416.z.object({
|
|
2434
|
-
type: import_v416.z.literal("search"),
|
|
2435
|
-
query: import_v416.z.string().nullish()
|
|
2436
|
-
}),
|
|
2437
|
-
import_v416.z.object({
|
|
2438
|
-
type: import_v416.z.literal("open_page"),
|
|
2439
|
-
url: import_v416.z.string()
|
|
2440
|
-
}),
|
|
2441
|
-
import_v416.z.object({
|
|
2442
|
-
type: import_v416.z.literal("find"),
|
|
2443
|
-
url: import_v416.z.string(),
|
|
2444
|
-
pattern: import_v416.z.string()
|
|
2445
|
-
})
|
|
2446
|
-
]).nullish()
|
|
2447
|
-
})
|
|
3115
|
+
inputSchema: webSearchInputSchema
|
|
2448
3116
|
});
|
|
2449
3117
|
|
|
2450
3118
|
// src/tool/web-search-preview.ts
|
|
2451
|
-
var
|
|
2452
|
-
var
|
|
2453
|
-
var webSearchPreviewArgsSchema =
|
|
2454
|
-
|
|
2455
|
-
|
|
2456
|
-
|
|
2457
|
-
|
|
2458
|
-
|
|
2459
|
-
|
|
2460
|
-
|
|
2461
|
-
|
|
2462
|
-
|
|
2463
|
-
|
|
2464
|
-
|
|
2465
|
-
|
|
2466
|
-
|
|
2467
|
-
|
|
2468
|
-
|
|
2469
|
-
|
|
2470
|
-
|
|
2471
|
-
|
|
2472
|
-
|
|
2473
|
-
|
|
2474
|
-
|
|
2475
|
-
|
|
2476
|
-
|
|
2477
|
-
|
|
2478
|
-
|
|
2479
|
-
|
|
2480
|
-
|
|
2481
|
-
|
|
2482
|
-
|
|
2483
|
-
|
|
2484
|
-
|
|
2485
|
-
|
|
2486
|
-
|
|
2487
|
-
|
|
3119
|
+
var import_provider_utils26 = require("@ai-sdk/provider-utils");
|
|
3120
|
+
var z19 = __toESM(require("zod/v4"));
|
|
3121
|
+
var webSearchPreviewArgsSchema = (0, import_provider_utils26.lazySchema)(
|
|
3122
|
+
() => (0, import_provider_utils26.zodSchema)(
|
|
3123
|
+
z19.object({
|
|
3124
|
+
/**
|
|
3125
|
+
* Search context size to use for the web search.
|
|
3126
|
+
* - high: Most comprehensive context, highest cost, slower response
|
|
3127
|
+
* - medium: Balanced context, cost, and latency (default)
|
|
3128
|
+
* - low: Least context, lowest cost, fastest response
|
|
3129
|
+
*/
|
|
3130
|
+
searchContextSize: z19.enum(["low", "medium", "high"]).optional(),
|
|
3131
|
+
/**
|
|
3132
|
+
* User location information to provide geographically relevant search results.
|
|
3133
|
+
*/
|
|
3134
|
+
userLocation: z19.object({
|
|
3135
|
+
/**
|
|
3136
|
+
* Type of location (always 'approximate')
|
|
3137
|
+
*/
|
|
3138
|
+
type: z19.literal("approximate"),
|
|
3139
|
+
/**
|
|
3140
|
+
* Two-letter ISO country code (e.g., 'US', 'GB')
|
|
3141
|
+
*/
|
|
3142
|
+
country: z19.string().optional(),
|
|
3143
|
+
/**
|
|
3144
|
+
* City name (free text, e.g., 'Minneapolis')
|
|
3145
|
+
*/
|
|
3146
|
+
city: z19.string().optional(),
|
|
3147
|
+
/**
|
|
3148
|
+
* Region name (free text, e.g., 'Minnesota')
|
|
3149
|
+
*/
|
|
3150
|
+
region: z19.string().optional(),
|
|
3151
|
+
/**
|
|
3152
|
+
* IANA timezone (e.g., 'America/Chicago')
|
|
3153
|
+
*/
|
|
3154
|
+
timezone: z19.string().optional()
|
|
3155
|
+
}).optional()
|
|
3156
|
+
})
|
|
3157
|
+
)
|
|
3158
|
+
);
|
|
3159
|
+
var webSearchPreviewInputSchema = (0, import_provider_utils26.lazySchema)(
|
|
3160
|
+
() => (0, import_provider_utils26.zodSchema)(
|
|
3161
|
+
z19.object({
|
|
3162
|
+
action: z19.discriminatedUnion("type", [
|
|
3163
|
+
z19.object({
|
|
3164
|
+
type: z19.literal("search"),
|
|
3165
|
+
query: z19.string().nullish()
|
|
3166
|
+
}),
|
|
3167
|
+
z19.object({
|
|
3168
|
+
type: z19.literal("open_page"),
|
|
3169
|
+
url: z19.string()
|
|
3170
|
+
}),
|
|
3171
|
+
z19.object({
|
|
3172
|
+
type: z19.literal("find"),
|
|
3173
|
+
url: z19.string(),
|
|
3174
|
+
pattern: z19.string()
|
|
3175
|
+
})
|
|
3176
|
+
]).nullish()
|
|
3177
|
+
})
|
|
3178
|
+
)
|
|
3179
|
+
);
|
|
3180
|
+
var webSearchPreview = (0, import_provider_utils26.createProviderDefinedToolFactory)({
|
|
2488
3181
|
id: "openai.web_search_preview",
|
|
2489
3182
|
name: "web_search_preview",
|
|
2490
|
-
inputSchema:
|
|
2491
|
-
action: import_v417.z.discriminatedUnion("type", [
|
|
2492
|
-
import_v417.z.object({
|
|
2493
|
-
type: import_v417.z.literal("search"),
|
|
2494
|
-
query: import_v417.z.string().nullish()
|
|
2495
|
-
}),
|
|
2496
|
-
import_v417.z.object({
|
|
2497
|
-
type: import_v417.z.literal("open_page"),
|
|
2498
|
-
url: import_v417.z.string()
|
|
2499
|
-
}),
|
|
2500
|
-
import_v417.z.object({
|
|
2501
|
-
type: import_v417.z.literal("find"),
|
|
2502
|
-
url: import_v417.z.string(),
|
|
2503
|
-
pattern: import_v417.z.string()
|
|
2504
|
-
})
|
|
2505
|
-
]).nullish()
|
|
2506
|
-
})
|
|
3183
|
+
inputSchema: webSearchPreviewInputSchema
|
|
2507
3184
|
});
|
|
2508
3185
|
|
|
2509
3186
|
// src/tool/image-generation.ts
|
|
2510
|
-
var
|
|
2511
|
-
var
|
|
2512
|
-
var imageGenerationArgsSchema =
|
|
2513
|
-
|
|
2514
|
-
|
|
2515
|
-
|
|
2516
|
-
|
|
2517
|
-
|
|
2518
|
-
|
|
2519
|
-
|
|
2520
|
-
|
|
2521
|
-
|
|
2522
|
-
|
|
2523
|
-
|
|
2524
|
-
|
|
2525
|
-
|
|
2526
|
-
|
|
2527
|
-
|
|
2528
|
-
|
|
2529
|
-
|
|
2530
|
-
|
|
3187
|
+
var import_provider_utils27 = require("@ai-sdk/provider-utils");
|
|
3188
|
+
var z20 = __toESM(require("zod/v4"));
|
|
3189
|
+
var imageGenerationArgsSchema = (0, import_provider_utils27.lazySchema)(
|
|
3190
|
+
() => (0, import_provider_utils27.zodSchema)(
|
|
3191
|
+
z20.object({
|
|
3192
|
+
background: z20.enum(["auto", "opaque", "transparent"]).optional(),
|
|
3193
|
+
inputFidelity: z20.enum(["low", "high"]).optional(),
|
|
3194
|
+
inputImageMask: z20.object({
|
|
3195
|
+
fileId: z20.string().optional(),
|
|
3196
|
+
imageUrl: z20.string().optional()
|
|
3197
|
+
}).optional(),
|
|
3198
|
+
model: z20.string().optional(),
|
|
3199
|
+
moderation: z20.enum(["auto"]).optional(),
|
|
3200
|
+
outputCompression: z20.number().int().min(0).max(100).optional(),
|
|
3201
|
+
outputFormat: z20.enum(["png", "jpeg", "webp"]).optional(),
|
|
3202
|
+
partialImages: z20.number().int().min(0).max(3).optional(),
|
|
3203
|
+
quality: z20.enum(["auto", "low", "medium", "high"]).optional(),
|
|
3204
|
+
size: z20.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
|
|
3205
|
+
}).strict()
|
|
3206
|
+
)
|
|
3207
|
+
);
|
|
3208
|
+
var imageGenerationInputSchema = (0, import_provider_utils27.lazySchema)(() => (0, import_provider_utils27.zodSchema)(z20.object({})));
|
|
3209
|
+
var imageGenerationOutputSchema = (0, import_provider_utils27.lazySchema)(
|
|
3210
|
+
() => (0, import_provider_utils27.zodSchema)(z20.object({ result: z20.string() }))
|
|
3211
|
+
);
|
|
3212
|
+
var imageGenerationToolFactory = (0, import_provider_utils27.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2531
3213
|
id: "openai.image_generation",
|
|
2532
3214
|
name: "image_generation",
|
|
2533
|
-
inputSchema:
|
|
3215
|
+
inputSchema: imageGenerationInputSchema,
|
|
2534
3216
|
outputSchema: imageGenerationOutputSchema
|
|
2535
3217
|
});
|
|
2536
3218
|
var imageGeneration = (args = {}) => {
|
|
@@ -2538,7 +3220,8 @@ var imageGeneration = (args = {}) => {
|
|
|
2538
3220
|
};
|
|
2539
3221
|
|
|
2540
3222
|
// src/responses/openai-responses-prepare-tools.ts
|
|
2541
|
-
|
|
3223
|
+
var import_provider_utils28 = require("@ai-sdk/provider-utils");
|
|
3224
|
+
async function prepareResponsesTools({
|
|
2542
3225
|
tools,
|
|
2543
3226
|
toolChoice,
|
|
2544
3227
|
strictJsonSchema
|
|
@@ -2563,7 +3246,10 @@ function prepareResponsesTools({
|
|
|
2563
3246
|
case "provider-defined": {
|
|
2564
3247
|
switch (tool.id) {
|
|
2565
3248
|
case "openai.file_search": {
|
|
2566
|
-
const args =
|
|
3249
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3250
|
+
value: tool.args,
|
|
3251
|
+
schema: fileSearchArgsSchema
|
|
3252
|
+
});
|
|
2567
3253
|
openaiTools.push({
|
|
2568
3254
|
type: "file_search",
|
|
2569
3255
|
vector_store_ids: args.vectorStoreIds,
|
|
@@ -2583,7 +3269,10 @@ function prepareResponsesTools({
|
|
|
2583
3269
|
break;
|
|
2584
3270
|
}
|
|
2585
3271
|
case "openai.web_search_preview": {
|
|
2586
|
-
const args =
|
|
3272
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3273
|
+
value: tool.args,
|
|
3274
|
+
schema: webSearchPreviewArgsSchema
|
|
3275
|
+
});
|
|
2587
3276
|
openaiTools.push({
|
|
2588
3277
|
type: "web_search_preview",
|
|
2589
3278
|
search_context_size: args.searchContextSize,
|
|
@@ -2592,7 +3281,10 @@ function prepareResponsesTools({
|
|
|
2592
3281
|
break;
|
|
2593
3282
|
}
|
|
2594
3283
|
case "openai.web_search": {
|
|
2595
|
-
const args =
|
|
3284
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3285
|
+
value: tool.args,
|
|
3286
|
+
schema: webSearchArgsSchema
|
|
3287
|
+
});
|
|
2596
3288
|
openaiTools.push({
|
|
2597
3289
|
type: "web_search",
|
|
2598
3290
|
filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
|
|
@@ -2602,7 +3294,10 @@ function prepareResponsesTools({
|
|
|
2602
3294
|
break;
|
|
2603
3295
|
}
|
|
2604
3296
|
case "openai.code_interpreter": {
|
|
2605
|
-
const args =
|
|
3297
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3298
|
+
value: tool.args,
|
|
3299
|
+
schema: codeInterpreterArgsSchema
|
|
3300
|
+
});
|
|
2606
3301
|
openaiTools.push({
|
|
2607
3302
|
type: "code_interpreter",
|
|
2608
3303
|
container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
|
|
@@ -2610,7 +3305,10 @@ function prepareResponsesTools({
|
|
|
2610
3305
|
break;
|
|
2611
3306
|
}
|
|
2612
3307
|
case "openai.image_generation": {
|
|
2613
|
-
const args =
|
|
3308
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3309
|
+
value: tool.args,
|
|
3310
|
+
schema: imageGenerationArgsSchema
|
|
3311
|
+
});
|
|
2614
3312
|
openaiTools.push({
|
|
2615
3313
|
type: "image_generation",
|
|
2616
3314
|
background: args.background,
|
|
@@ -2662,83 +3360,6 @@ function prepareResponsesTools({
|
|
|
2662
3360
|
}
|
|
2663
3361
|
|
|
2664
3362
|
// src/responses/openai-responses-language-model.ts
|
|
2665
|
-
var webSearchCallItem = import_v419.z.object({
|
|
2666
|
-
type: import_v419.z.literal("web_search_call"),
|
|
2667
|
-
id: import_v419.z.string(),
|
|
2668
|
-
status: import_v419.z.string(),
|
|
2669
|
-
action: import_v419.z.discriminatedUnion("type", [
|
|
2670
|
-
import_v419.z.object({
|
|
2671
|
-
type: import_v419.z.literal("search"),
|
|
2672
|
-
query: import_v419.z.string().nullish()
|
|
2673
|
-
}),
|
|
2674
|
-
import_v419.z.object({
|
|
2675
|
-
type: import_v419.z.literal("open_page"),
|
|
2676
|
-
url: import_v419.z.string()
|
|
2677
|
-
}),
|
|
2678
|
-
import_v419.z.object({
|
|
2679
|
-
type: import_v419.z.literal("find"),
|
|
2680
|
-
url: import_v419.z.string(),
|
|
2681
|
-
pattern: import_v419.z.string()
|
|
2682
|
-
})
|
|
2683
|
-
]).nullish()
|
|
2684
|
-
});
|
|
2685
|
-
var fileSearchCallItem = import_v419.z.object({
|
|
2686
|
-
type: import_v419.z.literal("file_search_call"),
|
|
2687
|
-
id: import_v419.z.string(),
|
|
2688
|
-
queries: import_v419.z.array(import_v419.z.string()),
|
|
2689
|
-
results: import_v419.z.array(
|
|
2690
|
-
import_v419.z.object({
|
|
2691
|
-
attributes: import_v419.z.record(import_v419.z.string(), import_v419.z.unknown()),
|
|
2692
|
-
file_id: import_v419.z.string(),
|
|
2693
|
-
filename: import_v419.z.string(),
|
|
2694
|
-
score: import_v419.z.number(),
|
|
2695
|
-
text: import_v419.z.string()
|
|
2696
|
-
})
|
|
2697
|
-
).nullish()
|
|
2698
|
-
});
|
|
2699
|
-
var codeInterpreterCallItem = import_v419.z.object({
|
|
2700
|
-
type: import_v419.z.literal("code_interpreter_call"),
|
|
2701
|
-
id: import_v419.z.string(),
|
|
2702
|
-
code: import_v419.z.string().nullable(),
|
|
2703
|
-
container_id: import_v419.z.string(),
|
|
2704
|
-
outputs: import_v419.z.array(
|
|
2705
|
-
import_v419.z.discriminatedUnion("type", [
|
|
2706
|
-
import_v419.z.object({ type: import_v419.z.literal("logs"), logs: import_v419.z.string() }),
|
|
2707
|
-
import_v419.z.object({ type: import_v419.z.literal("image"), url: import_v419.z.string() })
|
|
2708
|
-
])
|
|
2709
|
-
).nullable()
|
|
2710
|
-
});
|
|
2711
|
-
var localShellCallItem = import_v419.z.object({
|
|
2712
|
-
type: import_v419.z.literal("local_shell_call"),
|
|
2713
|
-
id: import_v419.z.string(),
|
|
2714
|
-
call_id: import_v419.z.string(),
|
|
2715
|
-
action: import_v419.z.object({
|
|
2716
|
-
type: import_v419.z.literal("exec"),
|
|
2717
|
-
command: import_v419.z.array(import_v419.z.string()),
|
|
2718
|
-
timeout_ms: import_v419.z.number().optional(),
|
|
2719
|
-
user: import_v419.z.string().optional(),
|
|
2720
|
-
working_directory: import_v419.z.string().optional(),
|
|
2721
|
-
env: import_v419.z.record(import_v419.z.string(), import_v419.z.string()).optional()
|
|
2722
|
-
})
|
|
2723
|
-
});
|
|
2724
|
-
var imageGenerationCallItem = import_v419.z.object({
|
|
2725
|
-
type: import_v419.z.literal("image_generation_call"),
|
|
2726
|
-
id: import_v419.z.string(),
|
|
2727
|
-
result: import_v419.z.string()
|
|
2728
|
-
});
|
|
2729
|
-
var TOP_LOGPROBS_MAX = 20;
|
|
2730
|
-
var LOGPROBS_SCHEMA = import_v419.z.array(
|
|
2731
|
-
import_v419.z.object({
|
|
2732
|
-
token: import_v419.z.string(),
|
|
2733
|
-
logprob: import_v419.z.number(),
|
|
2734
|
-
top_logprobs: import_v419.z.array(
|
|
2735
|
-
import_v419.z.object({
|
|
2736
|
-
token: import_v419.z.string(),
|
|
2737
|
-
logprob: import_v419.z.number()
|
|
2738
|
-
})
|
|
2739
|
-
)
|
|
2740
|
-
})
|
|
2741
|
-
);
|
|
2742
3363
|
var OpenAIResponsesLanguageModel = class {
|
|
2743
3364
|
constructor(modelId, config) {
|
|
2744
3365
|
this.specificationVersion = "v3";
|
|
@@ -2791,7 +3412,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2791
3412
|
if (stopSequences != null) {
|
|
2792
3413
|
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2793
3414
|
}
|
|
2794
|
-
const openaiOptions = await (0,
|
|
3415
|
+
const openaiOptions = await (0, import_provider_utils29.parseProviderOptions)({
|
|
2795
3416
|
provider: "openai",
|
|
2796
3417
|
providerOptions,
|
|
2797
3418
|
schema: openaiResponsesProviderOptionsSchema
|
|
@@ -2930,7 +3551,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2930
3551
|
tools: openaiTools,
|
|
2931
3552
|
toolChoice: openaiToolChoice,
|
|
2932
3553
|
toolWarnings
|
|
2933
|
-
} = prepareResponsesTools({
|
|
3554
|
+
} = await prepareResponsesTools({
|
|
2934
3555
|
tools,
|
|
2935
3556
|
toolChoice,
|
|
2936
3557
|
strictJsonSchema
|
|
@@ -2960,91 +3581,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2960
3581
|
responseHeaders,
|
|
2961
3582
|
value: response,
|
|
2962
3583
|
rawValue: rawResponse
|
|
2963
|
-
} = await (0,
|
|
3584
|
+
} = await (0, import_provider_utils29.postJsonToApi)({
|
|
2964
3585
|
url,
|
|
2965
|
-
headers: (0,
|
|
3586
|
+
headers: (0, import_provider_utils29.combineHeaders)(this.config.headers(), options.headers),
|
|
2966
3587
|
body,
|
|
2967
3588
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2968
|
-
successfulResponseHandler: (0,
|
|
2969
|
-
|
|
2970
|
-
id: import_v419.z.string(),
|
|
2971
|
-
created_at: import_v419.z.number(),
|
|
2972
|
-
error: import_v419.z.object({
|
|
2973
|
-
code: import_v419.z.string(),
|
|
2974
|
-
message: import_v419.z.string()
|
|
2975
|
-
}).nullish(),
|
|
2976
|
-
model: import_v419.z.string(),
|
|
2977
|
-
output: import_v419.z.array(
|
|
2978
|
-
import_v419.z.discriminatedUnion("type", [
|
|
2979
|
-
import_v419.z.object({
|
|
2980
|
-
type: import_v419.z.literal("message"),
|
|
2981
|
-
role: import_v419.z.literal("assistant"),
|
|
2982
|
-
id: import_v419.z.string(),
|
|
2983
|
-
content: import_v419.z.array(
|
|
2984
|
-
import_v419.z.object({
|
|
2985
|
-
type: import_v419.z.literal("output_text"),
|
|
2986
|
-
text: import_v419.z.string(),
|
|
2987
|
-
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2988
|
-
annotations: import_v419.z.array(
|
|
2989
|
-
import_v419.z.discriminatedUnion("type", [
|
|
2990
|
-
import_v419.z.object({
|
|
2991
|
-
type: import_v419.z.literal("url_citation"),
|
|
2992
|
-
start_index: import_v419.z.number(),
|
|
2993
|
-
end_index: import_v419.z.number(),
|
|
2994
|
-
url: import_v419.z.string(),
|
|
2995
|
-
title: import_v419.z.string()
|
|
2996
|
-
}),
|
|
2997
|
-
import_v419.z.object({
|
|
2998
|
-
type: import_v419.z.literal("file_citation"),
|
|
2999
|
-
file_id: import_v419.z.string(),
|
|
3000
|
-
filename: import_v419.z.string().nullish(),
|
|
3001
|
-
index: import_v419.z.number().nullish(),
|
|
3002
|
-
start_index: import_v419.z.number().nullish(),
|
|
3003
|
-
end_index: import_v419.z.number().nullish(),
|
|
3004
|
-
quote: import_v419.z.string().nullish()
|
|
3005
|
-
}),
|
|
3006
|
-
import_v419.z.object({
|
|
3007
|
-
type: import_v419.z.literal("container_file_citation")
|
|
3008
|
-
})
|
|
3009
|
-
])
|
|
3010
|
-
)
|
|
3011
|
-
})
|
|
3012
|
-
)
|
|
3013
|
-
}),
|
|
3014
|
-
webSearchCallItem,
|
|
3015
|
-
fileSearchCallItem,
|
|
3016
|
-
codeInterpreterCallItem,
|
|
3017
|
-
imageGenerationCallItem,
|
|
3018
|
-
localShellCallItem,
|
|
3019
|
-
import_v419.z.object({
|
|
3020
|
-
type: import_v419.z.literal("function_call"),
|
|
3021
|
-
call_id: import_v419.z.string(),
|
|
3022
|
-
name: import_v419.z.string(),
|
|
3023
|
-
arguments: import_v419.z.string(),
|
|
3024
|
-
id: import_v419.z.string()
|
|
3025
|
-
}),
|
|
3026
|
-
import_v419.z.object({
|
|
3027
|
-
type: import_v419.z.literal("computer_call"),
|
|
3028
|
-
id: import_v419.z.string(),
|
|
3029
|
-
status: import_v419.z.string().optional()
|
|
3030
|
-
}),
|
|
3031
|
-
import_v419.z.object({
|
|
3032
|
-
type: import_v419.z.literal("reasoning"),
|
|
3033
|
-
id: import_v419.z.string(),
|
|
3034
|
-
encrypted_content: import_v419.z.string().nullish(),
|
|
3035
|
-
summary: import_v419.z.array(
|
|
3036
|
-
import_v419.z.object({
|
|
3037
|
-
type: import_v419.z.literal("summary_text"),
|
|
3038
|
-
text: import_v419.z.string()
|
|
3039
|
-
})
|
|
3040
|
-
)
|
|
3041
|
-
})
|
|
3042
|
-
])
|
|
3043
|
-
),
|
|
3044
|
-
service_tier: import_v419.z.string().nullish(),
|
|
3045
|
-
incomplete_details: import_v419.z.object({ reason: import_v419.z.string() }).nullish(),
|
|
3046
|
-
usage: usageSchema2
|
|
3047
|
-
})
|
|
3589
|
+
successfulResponseHandler: (0, import_provider_utils29.createJsonResponseHandler)(
|
|
3590
|
+
openaiResponsesResponseSchema
|
|
3048
3591
|
),
|
|
3049
3592
|
abortSignal: options.abortSignal,
|
|
3050
3593
|
fetch: this.config.fetch
|
|
@@ -3107,7 +3650,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3107
3650
|
type: "tool-call",
|
|
3108
3651
|
toolCallId: part.call_id,
|
|
3109
3652
|
toolName: "local_shell",
|
|
3110
|
-
input: JSON.stringify({
|
|
3653
|
+
input: JSON.stringify({
|
|
3654
|
+
action: part.action
|
|
3655
|
+
}),
|
|
3111
3656
|
providerMetadata: {
|
|
3112
3657
|
openai: {
|
|
3113
3658
|
itemId: part.id
|
|
@@ -3135,7 +3680,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3135
3680
|
content.push({
|
|
3136
3681
|
type: "source",
|
|
3137
3682
|
sourceType: "url",
|
|
3138
|
-
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0,
|
|
3683
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0, import_provider_utils29.generateId)(),
|
|
3139
3684
|
url: annotation.url,
|
|
3140
3685
|
title: annotation.title
|
|
3141
3686
|
});
|
|
@@ -3143,7 +3688,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3143
3688
|
content.push({
|
|
3144
3689
|
type: "source",
|
|
3145
3690
|
sourceType: "document",
|
|
3146
|
-
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0,
|
|
3691
|
+
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils29.generateId)(),
|
|
3147
3692
|
mediaType: "text/plain",
|
|
3148
3693
|
title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
|
|
3149
3694
|
filename: (_l = annotation.filename) != null ? _l : annotation.file_id
|
|
@@ -3295,18 +3840,18 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3295
3840
|
warnings,
|
|
3296
3841
|
webSearchToolName
|
|
3297
3842
|
} = await this.getArgs(options);
|
|
3298
|
-
const { responseHeaders, value: response } = await (0,
|
|
3843
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils29.postJsonToApi)({
|
|
3299
3844
|
url: this.config.url({
|
|
3300
3845
|
path: "/responses",
|
|
3301
3846
|
modelId: this.modelId
|
|
3302
3847
|
}),
|
|
3303
|
-
headers: (0,
|
|
3848
|
+
headers: (0, import_provider_utils29.combineHeaders)(this.config.headers(), options.headers),
|
|
3304
3849
|
body: {
|
|
3305
3850
|
...body,
|
|
3306
3851
|
stream: true
|
|
3307
3852
|
},
|
|
3308
3853
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
3309
|
-
successfulResponseHandler: (0,
|
|
3854
|
+
successfulResponseHandler: (0, import_provider_utils29.createEventSourceResponseHandler)(
|
|
3310
3855
|
openaiResponsesChunkSchema
|
|
3311
3856
|
),
|
|
3312
3857
|
abortSignal: options.abortSignal,
|
|
@@ -3694,7 +4239,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3694
4239
|
controller.enqueue({
|
|
3695
4240
|
type: "source",
|
|
3696
4241
|
sourceType: "url",
|
|
3697
|
-
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0,
|
|
4242
|
+
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0, import_provider_utils29.generateId)(),
|
|
3698
4243
|
url: value.annotation.url,
|
|
3699
4244
|
title: value.annotation.title
|
|
3700
4245
|
});
|
|
@@ -3702,7 +4247,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3702
4247
|
controller.enqueue({
|
|
3703
4248
|
type: "source",
|
|
3704
4249
|
sourceType: "document",
|
|
3705
|
-
id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0,
|
|
4250
|
+
id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0, import_provider_utils29.generateId)(),
|
|
3706
4251
|
mediaType: "text/plain",
|
|
3707
4252
|
title: (_v = (_u = value.annotation.quote) != null ? _u : value.annotation.filename) != null ? _v : "Document",
|
|
3708
4253
|
filename: (_w = value.annotation.filename) != null ? _w : value.annotation.file_id
|
|
@@ -3738,203 +4283,6 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3738
4283
|
};
|
|
3739
4284
|
}
|
|
3740
4285
|
};
|
|
3741
|
-
var usageSchema2 = import_v419.z.object({
|
|
3742
|
-
input_tokens: import_v419.z.number(),
|
|
3743
|
-
input_tokens_details: import_v419.z.object({ cached_tokens: import_v419.z.number().nullish() }).nullish(),
|
|
3744
|
-
output_tokens: import_v419.z.number(),
|
|
3745
|
-
output_tokens_details: import_v419.z.object({ reasoning_tokens: import_v419.z.number().nullish() }).nullish()
|
|
3746
|
-
});
|
|
3747
|
-
var textDeltaChunkSchema = import_v419.z.object({
|
|
3748
|
-
type: import_v419.z.literal("response.output_text.delta"),
|
|
3749
|
-
item_id: import_v419.z.string(),
|
|
3750
|
-
delta: import_v419.z.string(),
|
|
3751
|
-
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
3752
|
-
});
|
|
3753
|
-
var errorChunkSchema = import_v419.z.object({
|
|
3754
|
-
type: import_v419.z.literal("error"),
|
|
3755
|
-
code: import_v419.z.string(),
|
|
3756
|
-
message: import_v419.z.string(),
|
|
3757
|
-
param: import_v419.z.string().nullish(),
|
|
3758
|
-
sequence_number: import_v419.z.number()
|
|
3759
|
-
});
|
|
3760
|
-
var responseFinishedChunkSchema = import_v419.z.object({
|
|
3761
|
-
type: import_v419.z.enum(["response.completed", "response.incomplete"]),
|
|
3762
|
-
response: import_v419.z.object({
|
|
3763
|
-
incomplete_details: import_v419.z.object({ reason: import_v419.z.string() }).nullish(),
|
|
3764
|
-
usage: usageSchema2,
|
|
3765
|
-
service_tier: import_v419.z.string().nullish()
|
|
3766
|
-
})
|
|
3767
|
-
});
|
|
3768
|
-
var responseCreatedChunkSchema = import_v419.z.object({
|
|
3769
|
-
type: import_v419.z.literal("response.created"),
|
|
3770
|
-
response: import_v419.z.object({
|
|
3771
|
-
id: import_v419.z.string(),
|
|
3772
|
-
created_at: import_v419.z.number(),
|
|
3773
|
-
model: import_v419.z.string(),
|
|
3774
|
-
service_tier: import_v419.z.string().nullish()
|
|
3775
|
-
})
|
|
3776
|
-
});
|
|
3777
|
-
var responseOutputItemAddedSchema = import_v419.z.object({
|
|
3778
|
-
type: import_v419.z.literal("response.output_item.added"),
|
|
3779
|
-
output_index: import_v419.z.number(),
|
|
3780
|
-
item: import_v419.z.discriminatedUnion("type", [
|
|
3781
|
-
import_v419.z.object({
|
|
3782
|
-
type: import_v419.z.literal("message"),
|
|
3783
|
-
id: import_v419.z.string()
|
|
3784
|
-
}),
|
|
3785
|
-
import_v419.z.object({
|
|
3786
|
-
type: import_v419.z.literal("reasoning"),
|
|
3787
|
-
id: import_v419.z.string(),
|
|
3788
|
-
encrypted_content: import_v419.z.string().nullish()
|
|
3789
|
-
}),
|
|
3790
|
-
import_v419.z.object({
|
|
3791
|
-
type: import_v419.z.literal("function_call"),
|
|
3792
|
-
id: import_v419.z.string(),
|
|
3793
|
-
call_id: import_v419.z.string(),
|
|
3794
|
-
name: import_v419.z.string(),
|
|
3795
|
-
arguments: import_v419.z.string()
|
|
3796
|
-
}),
|
|
3797
|
-
import_v419.z.object({
|
|
3798
|
-
type: import_v419.z.literal("web_search_call"),
|
|
3799
|
-
id: import_v419.z.string(),
|
|
3800
|
-
status: import_v419.z.string(),
|
|
3801
|
-
action: import_v419.z.object({
|
|
3802
|
-
type: import_v419.z.literal("search"),
|
|
3803
|
-
query: import_v419.z.string().optional()
|
|
3804
|
-
}).nullish()
|
|
3805
|
-
}),
|
|
3806
|
-
import_v419.z.object({
|
|
3807
|
-
type: import_v419.z.literal("computer_call"),
|
|
3808
|
-
id: import_v419.z.string(),
|
|
3809
|
-
status: import_v419.z.string()
|
|
3810
|
-
}),
|
|
3811
|
-
import_v419.z.object({
|
|
3812
|
-
type: import_v419.z.literal("file_search_call"),
|
|
3813
|
-
id: import_v419.z.string()
|
|
3814
|
-
}),
|
|
3815
|
-
import_v419.z.object({
|
|
3816
|
-
type: import_v419.z.literal("image_generation_call"),
|
|
3817
|
-
id: import_v419.z.string()
|
|
3818
|
-
}),
|
|
3819
|
-
import_v419.z.object({
|
|
3820
|
-
type: import_v419.z.literal("code_interpreter_call"),
|
|
3821
|
-
id: import_v419.z.string(),
|
|
3822
|
-
container_id: import_v419.z.string(),
|
|
3823
|
-
code: import_v419.z.string().nullable(),
|
|
3824
|
-
outputs: import_v419.z.array(
|
|
3825
|
-
import_v419.z.discriminatedUnion("type", [
|
|
3826
|
-
import_v419.z.object({ type: import_v419.z.literal("logs"), logs: import_v419.z.string() }),
|
|
3827
|
-
import_v419.z.object({ type: import_v419.z.literal("image"), url: import_v419.z.string() })
|
|
3828
|
-
])
|
|
3829
|
-
).nullable(),
|
|
3830
|
-
status: import_v419.z.string()
|
|
3831
|
-
})
|
|
3832
|
-
])
|
|
3833
|
-
});
|
|
3834
|
-
var responseOutputItemDoneSchema = import_v419.z.object({
|
|
3835
|
-
type: import_v419.z.literal("response.output_item.done"),
|
|
3836
|
-
output_index: import_v419.z.number(),
|
|
3837
|
-
item: import_v419.z.discriminatedUnion("type", [
|
|
3838
|
-
import_v419.z.object({
|
|
3839
|
-
type: import_v419.z.literal("message"),
|
|
3840
|
-
id: import_v419.z.string()
|
|
3841
|
-
}),
|
|
3842
|
-
import_v419.z.object({
|
|
3843
|
-
type: import_v419.z.literal("reasoning"),
|
|
3844
|
-
id: import_v419.z.string(),
|
|
3845
|
-
encrypted_content: import_v419.z.string().nullish()
|
|
3846
|
-
}),
|
|
3847
|
-
import_v419.z.object({
|
|
3848
|
-
type: import_v419.z.literal("function_call"),
|
|
3849
|
-
id: import_v419.z.string(),
|
|
3850
|
-
call_id: import_v419.z.string(),
|
|
3851
|
-
name: import_v419.z.string(),
|
|
3852
|
-
arguments: import_v419.z.string(),
|
|
3853
|
-
status: import_v419.z.literal("completed")
|
|
3854
|
-
}),
|
|
3855
|
-
codeInterpreterCallItem,
|
|
3856
|
-
imageGenerationCallItem,
|
|
3857
|
-
webSearchCallItem,
|
|
3858
|
-
fileSearchCallItem,
|
|
3859
|
-
localShellCallItem,
|
|
3860
|
-
import_v419.z.object({
|
|
3861
|
-
type: import_v419.z.literal("computer_call"),
|
|
3862
|
-
id: import_v419.z.string(),
|
|
3863
|
-
status: import_v419.z.literal("completed")
|
|
3864
|
-
})
|
|
3865
|
-
])
|
|
3866
|
-
});
|
|
3867
|
-
var responseFunctionCallArgumentsDeltaSchema = import_v419.z.object({
|
|
3868
|
-
type: import_v419.z.literal("response.function_call_arguments.delta"),
|
|
3869
|
-
item_id: import_v419.z.string(),
|
|
3870
|
-
output_index: import_v419.z.number(),
|
|
3871
|
-
delta: import_v419.z.string()
|
|
3872
|
-
});
|
|
3873
|
-
var responseImageGenerationCallPartialImageSchema = import_v419.z.object({
|
|
3874
|
-
type: import_v419.z.literal("response.image_generation_call.partial_image"),
|
|
3875
|
-
item_id: import_v419.z.string(),
|
|
3876
|
-
output_index: import_v419.z.number(),
|
|
3877
|
-
partial_image_b64: import_v419.z.string()
|
|
3878
|
-
});
|
|
3879
|
-
var responseCodeInterpreterCallCodeDeltaSchema = import_v419.z.object({
|
|
3880
|
-
type: import_v419.z.literal("response.code_interpreter_call_code.delta"),
|
|
3881
|
-
item_id: import_v419.z.string(),
|
|
3882
|
-
output_index: import_v419.z.number(),
|
|
3883
|
-
delta: import_v419.z.string()
|
|
3884
|
-
});
|
|
3885
|
-
var responseCodeInterpreterCallCodeDoneSchema = import_v419.z.object({
|
|
3886
|
-
type: import_v419.z.literal("response.code_interpreter_call_code.done"),
|
|
3887
|
-
item_id: import_v419.z.string(),
|
|
3888
|
-
output_index: import_v419.z.number(),
|
|
3889
|
-
code: import_v419.z.string()
|
|
3890
|
-
});
|
|
3891
|
-
var responseAnnotationAddedSchema = import_v419.z.object({
|
|
3892
|
-
type: import_v419.z.literal("response.output_text.annotation.added"),
|
|
3893
|
-
annotation: import_v419.z.discriminatedUnion("type", [
|
|
3894
|
-
import_v419.z.object({
|
|
3895
|
-
type: import_v419.z.literal("url_citation"),
|
|
3896
|
-
url: import_v419.z.string(),
|
|
3897
|
-
title: import_v419.z.string()
|
|
3898
|
-
}),
|
|
3899
|
-
import_v419.z.object({
|
|
3900
|
-
type: import_v419.z.literal("file_citation"),
|
|
3901
|
-
file_id: import_v419.z.string(),
|
|
3902
|
-
filename: import_v419.z.string().nullish(),
|
|
3903
|
-
index: import_v419.z.number().nullish(),
|
|
3904
|
-
start_index: import_v419.z.number().nullish(),
|
|
3905
|
-
end_index: import_v419.z.number().nullish(),
|
|
3906
|
-
quote: import_v419.z.string().nullish()
|
|
3907
|
-
})
|
|
3908
|
-
])
|
|
3909
|
-
});
|
|
3910
|
-
var responseReasoningSummaryPartAddedSchema = import_v419.z.object({
|
|
3911
|
-
type: import_v419.z.literal("response.reasoning_summary_part.added"),
|
|
3912
|
-
item_id: import_v419.z.string(),
|
|
3913
|
-
summary_index: import_v419.z.number()
|
|
3914
|
-
});
|
|
3915
|
-
var responseReasoningSummaryTextDeltaSchema = import_v419.z.object({
|
|
3916
|
-
type: import_v419.z.literal("response.reasoning_summary_text.delta"),
|
|
3917
|
-
item_id: import_v419.z.string(),
|
|
3918
|
-
summary_index: import_v419.z.number(),
|
|
3919
|
-
delta: import_v419.z.string()
|
|
3920
|
-
});
|
|
3921
|
-
var openaiResponsesChunkSchema = import_v419.z.union([
|
|
3922
|
-
textDeltaChunkSchema,
|
|
3923
|
-
responseFinishedChunkSchema,
|
|
3924
|
-
responseCreatedChunkSchema,
|
|
3925
|
-
responseOutputItemAddedSchema,
|
|
3926
|
-
responseOutputItemDoneSchema,
|
|
3927
|
-
responseFunctionCallArgumentsDeltaSchema,
|
|
3928
|
-
responseImageGenerationCallPartialImageSchema,
|
|
3929
|
-
responseCodeInterpreterCallCodeDeltaSchema,
|
|
3930
|
-
responseCodeInterpreterCallCodeDoneSchema,
|
|
3931
|
-
responseAnnotationAddedSchema,
|
|
3932
|
-
responseReasoningSummaryPartAddedSchema,
|
|
3933
|
-
responseReasoningSummaryTextDeltaSchema,
|
|
3934
|
-
errorChunkSchema,
|
|
3935
|
-
import_v419.z.object({ type: import_v419.z.string() }).loose()
|
|
3936
|
-
// fallback for unknown chunks
|
|
3937
|
-
]);
|
|
3938
4286
|
function isTextDeltaChunk(chunk) {
|
|
3939
4287
|
return chunk.type === "response.output_text.delta";
|
|
3940
4288
|
}
|
|
@@ -4014,47 +4362,6 @@ function getResponsesModelConfig(modelId) {
|
|
|
4014
4362
|
isReasoningModel: false
|
|
4015
4363
|
};
|
|
4016
4364
|
}
|
|
4017
|
-
var openaiResponsesProviderOptionsSchema = import_v419.z.object({
|
|
4018
|
-
include: import_v419.z.array(
|
|
4019
|
-
import_v419.z.enum([
|
|
4020
|
-
"reasoning.encrypted_content",
|
|
4021
|
-
"file_search_call.results",
|
|
4022
|
-
"message.output_text.logprobs"
|
|
4023
|
-
])
|
|
4024
|
-
).nullish(),
|
|
4025
|
-
instructions: import_v419.z.string().nullish(),
|
|
4026
|
-
/**
|
|
4027
|
-
* Return the log probabilities of the tokens.
|
|
4028
|
-
*
|
|
4029
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
4030
|
-
* were generated.
|
|
4031
|
-
*
|
|
4032
|
-
* Setting to a number will return the log probabilities of the top n
|
|
4033
|
-
* tokens that were generated.
|
|
4034
|
-
*
|
|
4035
|
-
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
4036
|
-
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
4037
|
-
*/
|
|
4038
|
-
logprobs: import_v419.z.union([import_v419.z.boolean(), import_v419.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
4039
|
-
/**
|
|
4040
|
-
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
4041
|
-
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
4042
|
-
* Any further attempts to call a tool by the model will be ignored.
|
|
4043
|
-
*/
|
|
4044
|
-
maxToolCalls: import_v419.z.number().nullish(),
|
|
4045
|
-
metadata: import_v419.z.any().nullish(),
|
|
4046
|
-
parallelToolCalls: import_v419.z.boolean().nullish(),
|
|
4047
|
-
previousResponseId: import_v419.z.string().nullish(),
|
|
4048
|
-
promptCacheKey: import_v419.z.string().nullish(),
|
|
4049
|
-
reasoningEffort: import_v419.z.string().nullish(),
|
|
4050
|
-
reasoningSummary: import_v419.z.string().nullish(),
|
|
4051
|
-
safetyIdentifier: import_v419.z.string().nullish(),
|
|
4052
|
-
serviceTier: import_v419.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
4053
|
-
store: import_v419.z.boolean().nullish(),
|
|
4054
|
-
strictJsonSchema: import_v419.z.boolean().nullish(),
|
|
4055
|
-
textVerbosity: import_v419.z.enum(["low", "medium", "high"]).nullish(),
|
|
4056
|
-
user: import_v419.z.string().nullish()
|
|
4057
|
-
});
|
|
4058
4365
|
// Annotate the CommonJS export names for ESM import in node:
|
|
4059
4366
|
0 && (module.exports = {
|
|
4060
4367
|
OpenAIChatLanguageModel,
|
|
@@ -4080,6 +4387,7 @@ var openaiResponsesProviderOptionsSchema = import_v419.z.object({
|
|
|
4080
4387
|
openAITranscriptionProviderOptions,
|
|
4081
4388
|
openaiChatLanguageModelOptions,
|
|
4082
4389
|
openaiCompletionProviderOptions,
|
|
4083
|
-
openaiEmbeddingProviderOptions
|
|
4390
|
+
openaiEmbeddingProviderOptions,
|
|
4391
|
+
openaiSpeechProviderOptionsSchema
|
|
4084
4392
|
});
|
|
4085
4393
|
//# sourceMappingURL=index.js.map
|