@ai-sdk/openai 2.0.43 → 2.0.45
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/index.d.mts +38 -65
- package/dist/index.d.ts +38 -65
- package/dist/index.js +1345 -1028
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1299 -937
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +101 -182
- package/dist/internal/index.d.ts +101 -182
- package/dist/internal/index.js +1342 -1023
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +1311 -948
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/internal/index.js
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
+
var __create = Object.create;
|
|
2
3
|
var __defProp = Object.defineProperty;
|
|
3
4
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
5
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
5
7
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
8
|
var __export = (target, all) => {
|
|
7
9
|
for (var name in all)
|
|
@@ -15,6 +17,14 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
15
17
|
}
|
|
16
18
|
return to;
|
|
17
19
|
};
|
|
20
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
21
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
22
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
23
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
24
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
25
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
26
|
+
mod
|
|
27
|
+
));
|
|
18
28
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
29
|
|
|
20
30
|
// src/internal/index.ts
|
|
@@ -43,27 +53,27 @@ __export(internal_exports, {
|
|
|
43
53
|
openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions,
|
|
44
54
|
openaiChatLanguageModelOptions: () => openaiChatLanguageModelOptions,
|
|
45
55
|
openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
|
|
46
|
-
openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions
|
|
56
|
+
openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
|
|
57
|
+
openaiSpeechProviderOptionsSchema: () => openaiSpeechProviderOptionsSchema
|
|
47
58
|
});
|
|
48
59
|
module.exports = __toCommonJS(internal_exports);
|
|
49
60
|
|
|
50
61
|
// src/chat/openai-chat-language-model.ts
|
|
51
62
|
var import_provider3 = require("@ai-sdk/provider");
|
|
52
|
-
var
|
|
53
|
-
var import_v43 = require("zod/v4");
|
|
63
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
54
64
|
|
|
55
65
|
// src/openai-error.ts
|
|
56
|
-
var
|
|
66
|
+
var z = __toESM(require("zod/v4"));
|
|
57
67
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
58
|
-
var openaiErrorDataSchema =
|
|
59
|
-
error:
|
|
60
|
-
message:
|
|
68
|
+
var openaiErrorDataSchema = z.object({
|
|
69
|
+
error: z.object({
|
|
70
|
+
message: z.string(),
|
|
61
71
|
// The additional information below is handled loosely to support
|
|
62
72
|
// OpenAI-compatible providers that have slightly different error
|
|
63
73
|
// responses:
|
|
64
|
-
type:
|
|
65
|
-
param:
|
|
66
|
-
code:
|
|
74
|
+
type: z.string().nullish(),
|
|
75
|
+
param: z.any().nullish(),
|
|
76
|
+
code: z.union([z.string(), z.number()]).nullish()
|
|
67
77
|
})
|
|
68
78
|
});
|
|
69
79
|
var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
|
@@ -279,95 +289,238 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
279
289
|
}
|
|
280
290
|
}
|
|
281
291
|
|
|
292
|
+
// src/chat/openai-chat-api.ts
|
|
293
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
294
|
+
var z2 = __toESM(require("zod/v4"));
|
|
295
|
+
var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
|
296
|
+
() => (0, import_provider_utils3.zodSchema)(
|
|
297
|
+
z2.object({
|
|
298
|
+
id: z2.string().nullish(),
|
|
299
|
+
created: z2.number().nullish(),
|
|
300
|
+
model: z2.string().nullish(),
|
|
301
|
+
choices: z2.array(
|
|
302
|
+
z2.object({
|
|
303
|
+
message: z2.object({
|
|
304
|
+
role: z2.literal("assistant").nullish(),
|
|
305
|
+
content: z2.string().nullish(),
|
|
306
|
+
tool_calls: z2.array(
|
|
307
|
+
z2.object({
|
|
308
|
+
id: z2.string().nullish(),
|
|
309
|
+
type: z2.literal("function"),
|
|
310
|
+
function: z2.object({
|
|
311
|
+
name: z2.string(),
|
|
312
|
+
arguments: z2.string()
|
|
313
|
+
})
|
|
314
|
+
})
|
|
315
|
+
).nullish(),
|
|
316
|
+
annotations: z2.array(
|
|
317
|
+
z2.object({
|
|
318
|
+
type: z2.literal("url_citation"),
|
|
319
|
+
start_index: z2.number(),
|
|
320
|
+
end_index: z2.number(),
|
|
321
|
+
url: z2.string(),
|
|
322
|
+
title: z2.string()
|
|
323
|
+
})
|
|
324
|
+
).nullish()
|
|
325
|
+
}),
|
|
326
|
+
index: z2.number(),
|
|
327
|
+
logprobs: z2.object({
|
|
328
|
+
content: z2.array(
|
|
329
|
+
z2.object({
|
|
330
|
+
token: z2.string(),
|
|
331
|
+
logprob: z2.number(),
|
|
332
|
+
top_logprobs: z2.array(
|
|
333
|
+
z2.object({
|
|
334
|
+
token: z2.string(),
|
|
335
|
+
logprob: z2.number()
|
|
336
|
+
})
|
|
337
|
+
)
|
|
338
|
+
})
|
|
339
|
+
).nullish()
|
|
340
|
+
}).nullish(),
|
|
341
|
+
finish_reason: z2.string().nullish()
|
|
342
|
+
})
|
|
343
|
+
),
|
|
344
|
+
usage: z2.object({
|
|
345
|
+
prompt_tokens: z2.number().nullish(),
|
|
346
|
+
completion_tokens: z2.number().nullish(),
|
|
347
|
+
total_tokens: z2.number().nullish(),
|
|
348
|
+
prompt_tokens_details: z2.object({
|
|
349
|
+
cached_tokens: z2.number().nullish()
|
|
350
|
+
}).nullish(),
|
|
351
|
+
completion_tokens_details: z2.object({
|
|
352
|
+
reasoning_tokens: z2.number().nullish(),
|
|
353
|
+
accepted_prediction_tokens: z2.number().nullish(),
|
|
354
|
+
rejected_prediction_tokens: z2.number().nullish()
|
|
355
|
+
}).nullish()
|
|
356
|
+
}).nullish()
|
|
357
|
+
})
|
|
358
|
+
)
|
|
359
|
+
);
|
|
360
|
+
var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
|
361
|
+
() => (0, import_provider_utils3.zodSchema)(
|
|
362
|
+
z2.union([
|
|
363
|
+
z2.object({
|
|
364
|
+
id: z2.string().nullish(),
|
|
365
|
+
created: z2.number().nullish(),
|
|
366
|
+
model: z2.string().nullish(),
|
|
367
|
+
choices: z2.array(
|
|
368
|
+
z2.object({
|
|
369
|
+
delta: z2.object({
|
|
370
|
+
role: z2.enum(["assistant"]).nullish(),
|
|
371
|
+
content: z2.string().nullish(),
|
|
372
|
+
tool_calls: z2.array(
|
|
373
|
+
z2.object({
|
|
374
|
+
index: z2.number(),
|
|
375
|
+
id: z2.string().nullish(),
|
|
376
|
+
type: z2.literal("function").nullish(),
|
|
377
|
+
function: z2.object({
|
|
378
|
+
name: z2.string().nullish(),
|
|
379
|
+
arguments: z2.string().nullish()
|
|
380
|
+
})
|
|
381
|
+
})
|
|
382
|
+
).nullish(),
|
|
383
|
+
annotations: z2.array(
|
|
384
|
+
z2.object({
|
|
385
|
+
type: z2.literal("url_citation"),
|
|
386
|
+
start_index: z2.number(),
|
|
387
|
+
end_index: z2.number(),
|
|
388
|
+
url: z2.string(),
|
|
389
|
+
title: z2.string()
|
|
390
|
+
})
|
|
391
|
+
).nullish()
|
|
392
|
+
}).nullish(),
|
|
393
|
+
logprobs: z2.object({
|
|
394
|
+
content: z2.array(
|
|
395
|
+
z2.object({
|
|
396
|
+
token: z2.string(),
|
|
397
|
+
logprob: z2.number(),
|
|
398
|
+
top_logprobs: z2.array(
|
|
399
|
+
z2.object({
|
|
400
|
+
token: z2.string(),
|
|
401
|
+
logprob: z2.number()
|
|
402
|
+
})
|
|
403
|
+
)
|
|
404
|
+
})
|
|
405
|
+
).nullish()
|
|
406
|
+
}).nullish(),
|
|
407
|
+
finish_reason: z2.string().nullish(),
|
|
408
|
+
index: z2.number()
|
|
409
|
+
})
|
|
410
|
+
),
|
|
411
|
+
usage: z2.object({
|
|
412
|
+
prompt_tokens: z2.number().nullish(),
|
|
413
|
+
completion_tokens: z2.number().nullish(),
|
|
414
|
+
total_tokens: z2.number().nullish(),
|
|
415
|
+
prompt_tokens_details: z2.object({
|
|
416
|
+
cached_tokens: z2.number().nullish()
|
|
417
|
+
}).nullish(),
|
|
418
|
+
completion_tokens_details: z2.object({
|
|
419
|
+
reasoning_tokens: z2.number().nullish(),
|
|
420
|
+
accepted_prediction_tokens: z2.number().nullish(),
|
|
421
|
+
rejected_prediction_tokens: z2.number().nullish()
|
|
422
|
+
}).nullish()
|
|
423
|
+
}).nullish()
|
|
424
|
+
}),
|
|
425
|
+
openaiErrorDataSchema
|
|
426
|
+
])
|
|
427
|
+
)
|
|
428
|
+
);
|
|
429
|
+
|
|
282
430
|
// src/chat/openai-chat-options.ts
|
|
283
|
-
var
|
|
284
|
-
var
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
431
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
432
|
+
var z3 = __toESM(require("zod/v4"));
|
|
433
|
+
var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
|
|
434
|
+
() => (0, import_provider_utils4.zodSchema)(
|
|
435
|
+
z3.object({
|
|
436
|
+
/**
|
|
437
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
|
438
|
+
*
|
|
439
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
440
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
441
|
+
*/
|
|
442
|
+
logitBias: z3.record(z3.coerce.number(), z3.number()).optional(),
|
|
443
|
+
/**
|
|
444
|
+
* Return the log probabilities of the tokens.
|
|
445
|
+
*
|
|
446
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
447
|
+
* were generated.
|
|
448
|
+
*
|
|
449
|
+
* Setting to a number will return the log probabilities of the top n
|
|
450
|
+
* tokens that were generated.
|
|
451
|
+
*/
|
|
452
|
+
logprobs: z3.union([z3.boolean(), z3.number()]).optional(),
|
|
453
|
+
/**
|
|
454
|
+
* Whether to enable parallel function calling during tool use. Default to true.
|
|
455
|
+
*/
|
|
456
|
+
parallelToolCalls: z3.boolean().optional(),
|
|
457
|
+
/**
|
|
458
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
459
|
+
* monitor and detect abuse.
|
|
460
|
+
*/
|
|
461
|
+
user: z3.string().optional(),
|
|
462
|
+
/**
|
|
463
|
+
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
464
|
+
*/
|
|
465
|
+
reasoningEffort: z3.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
466
|
+
/**
|
|
467
|
+
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
468
|
+
*/
|
|
469
|
+
maxCompletionTokens: z3.number().optional(),
|
|
470
|
+
/**
|
|
471
|
+
* Whether to enable persistence in responses API.
|
|
472
|
+
*/
|
|
473
|
+
store: z3.boolean().optional(),
|
|
474
|
+
/**
|
|
475
|
+
* Metadata to associate with the request.
|
|
476
|
+
*/
|
|
477
|
+
metadata: z3.record(z3.string().max(64), z3.string().max(512)).optional(),
|
|
478
|
+
/**
|
|
479
|
+
* Parameters for prediction mode.
|
|
480
|
+
*/
|
|
481
|
+
prediction: z3.record(z3.string(), z3.any()).optional(),
|
|
482
|
+
/**
|
|
483
|
+
* Whether to use structured outputs.
|
|
484
|
+
*
|
|
485
|
+
* @default true
|
|
486
|
+
*/
|
|
487
|
+
structuredOutputs: z3.boolean().optional(),
|
|
488
|
+
/**
|
|
489
|
+
* Service tier for the request.
|
|
490
|
+
* - 'auto': Default service tier
|
|
491
|
+
* - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
492
|
+
* - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
|
|
493
|
+
*
|
|
494
|
+
* @default 'auto'
|
|
495
|
+
*/
|
|
496
|
+
serviceTier: z3.enum(["auto", "flex", "priority"]).optional(),
|
|
497
|
+
/**
|
|
498
|
+
* Whether to use strict JSON schema validation.
|
|
499
|
+
*
|
|
500
|
+
* @default false
|
|
501
|
+
*/
|
|
502
|
+
strictJsonSchema: z3.boolean().optional(),
|
|
503
|
+
/**
|
|
504
|
+
* Controls the verbosity of the model's responses.
|
|
505
|
+
* Lower values will result in more concise responses, while higher values will result in more verbose responses.
|
|
506
|
+
*/
|
|
507
|
+
textVerbosity: z3.enum(["low", "medium", "high"]).optional(),
|
|
508
|
+
/**
|
|
509
|
+
* A cache key for prompt caching. Allows manual control over prompt caching behavior.
|
|
510
|
+
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
511
|
+
*/
|
|
512
|
+
promptCacheKey: z3.string().optional(),
|
|
513
|
+
/**
|
|
514
|
+
* A stable identifier used to help detect users of your application
|
|
515
|
+
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
516
|
+
* string that uniquely identifies each user. We recommend hashing their
|
|
517
|
+
* username or email address, in order to avoid sending us any identifying
|
|
518
|
+
* information.
|
|
519
|
+
*/
|
|
520
|
+
safetyIdentifier: z3.string().optional()
|
|
521
|
+
})
|
|
522
|
+
)
|
|
523
|
+
);
|
|
371
524
|
|
|
372
525
|
// src/chat/openai-chat-prepare-tools.ts
|
|
373
526
|
var import_provider2 = require("@ai-sdk/provider");
|
|
@@ -460,7 +613,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
460
613
|
}) {
|
|
461
614
|
var _a, _b, _c, _d;
|
|
462
615
|
const warnings = [];
|
|
463
|
-
const openaiOptions = (_a = await (0,
|
|
616
|
+
const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
|
|
464
617
|
provider: "openai",
|
|
465
618
|
providerOptions,
|
|
466
619
|
schema: openaiChatLanguageModelOptions
|
|
@@ -639,15 +792,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
639
792
|
responseHeaders,
|
|
640
793
|
value: response,
|
|
641
794
|
rawValue: rawResponse
|
|
642
|
-
} = await (0,
|
|
795
|
+
} = await (0, import_provider_utils5.postJsonToApi)({
|
|
643
796
|
url: this.config.url({
|
|
644
797
|
path: "/chat/completions",
|
|
645
798
|
modelId: this.modelId
|
|
646
799
|
}),
|
|
647
|
-
headers: (0,
|
|
800
|
+
headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
|
|
648
801
|
body,
|
|
649
802
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
650
|
-
successfulResponseHandler: (0,
|
|
803
|
+
successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
|
|
651
804
|
openaiChatResponseSchema
|
|
652
805
|
),
|
|
653
806
|
abortSignal: options.abortSignal,
|
|
@@ -662,7 +815,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
662
815
|
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
|
663
816
|
content.push({
|
|
664
817
|
type: "tool-call",
|
|
665
|
-
toolCallId: (_b = toolCall.id) != null ? _b : (0,
|
|
818
|
+
toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils5.generateId)(),
|
|
666
819
|
toolName: toolCall.function.name,
|
|
667
820
|
input: toolCall.function.arguments
|
|
668
821
|
});
|
|
@@ -671,7 +824,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
671
824
|
content.push({
|
|
672
825
|
type: "source",
|
|
673
826
|
sourceType: "url",
|
|
674
|
-
id: (0,
|
|
827
|
+
id: (0, import_provider_utils5.generateId)(),
|
|
675
828
|
url: annotation.url,
|
|
676
829
|
title: annotation.title
|
|
677
830
|
});
|
|
@@ -717,15 +870,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
717
870
|
include_usage: true
|
|
718
871
|
}
|
|
719
872
|
};
|
|
720
|
-
const { responseHeaders, value: response } = await (0,
|
|
873
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
|
|
721
874
|
url: this.config.url({
|
|
722
875
|
path: "/chat/completions",
|
|
723
876
|
modelId: this.modelId
|
|
724
877
|
}),
|
|
725
|
-
headers: (0,
|
|
878
|
+
headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
|
|
726
879
|
body,
|
|
727
880
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
728
|
-
successfulResponseHandler: (0,
|
|
881
|
+
successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(
|
|
729
882
|
openaiChatChunkSchema
|
|
730
883
|
),
|
|
731
884
|
abortSignal: options.abortSignal,
|
|
@@ -850,14 +1003,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
850
1003
|
delta: toolCall2.function.arguments
|
|
851
1004
|
});
|
|
852
1005
|
}
|
|
853
|
-
if ((0,
|
|
1006
|
+
if ((0, import_provider_utils5.isParsableJson)(toolCall2.function.arguments)) {
|
|
854
1007
|
controller.enqueue({
|
|
855
1008
|
type: "tool-input-end",
|
|
856
1009
|
id: toolCall2.id
|
|
857
1010
|
});
|
|
858
1011
|
controller.enqueue({
|
|
859
1012
|
type: "tool-call",
|
|
860
|
-
toolCallId: (_q = toolCall2.id) != null ? _q : (0,
|
|
1013
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
|
|
861
1014
|
toolName: toolCall2.function.name,
|
|
862
1015
|
input: toolCall2.function.arguments
|
|
863
1016
|
});
|
|
@@ -878,14 +1031,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
878
1031
|
id: toolCall.id,
|
|
879
1032
|
delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
880
1033
|
});
|
|
881
|
-
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0,
|
|
1034
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
|
|
882
1035
|
controller.enqueue({
|
|
883
1036
|
type: "tool-input-end",
|
|
884
1037
|
id: toolCall.id
|
|
885
1038
|
});
|
|
886
1039
|
controller.enqueue({
|
|
887
1040
|
type: "tool-call",
|
|
888
|
-
toolCallId: (_x = toolCall.id) != null ? _x : (0,
|
|
1041
|
+
toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
|
|
889
1042
|
toolName: toolCall.function.name,
|
|
890
1043
|
input: toolCall.function.arguments
|
|
891
1044
|
});
|
|
@@ -898,7 +1051,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
898
1051
|
controller.enqueue({
|
|
899
1052
|
type: "source",
|
|
900
1053
|
sourceType: "url",
|
|
901
|
-
id: (0,
|
|
1054
|
+
id: (0, import_provider_utils5.generateId)(),
|
|
902
1055
|
url: annotation.url,
|
|
903
1056
|
title: annotation.title
|
|
904
1057
|
});
|
|
@@ -923,121 +1076,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
923
1076
|
};
|
|
924
1077
|
}
|
|
925
1078
|
};
|
|
926
|
-
var openaiTokenUsageSchema = import_v43.z.object({
|
|
927
|
-
prompt_tokens: import_v43.z.number().nullish(),
|
|
928
|
-
completion_tokens: import_v43.z.number().nullish(),
|
|
929
|
-
total_tokens: import_v43.z.number().nullish(),
|
|
930
|
-
prompt_tokens_details: import_v43.z.object({
|
|
931
|
-
cached_tokens: import_v43.z.number().nullish()
|
|
932
|
-
}).nullish(),
|
|
933
|
-
completion_tokens_details: import_v43.z.object({
|
|
934
|
-
reasoning_tokens: import_v43.z.number().nullish(),
|
|
935
|
-
accepted_prediction_tokens: import_v43.z.number().nullish(),
|
|
936
|
-
rejected_prediction_tokens: import_v43.z.number().nullish()
|
|
937
|
-
}).nullish()
|
|
938
|
-
}).nullish();
|
|
939
|
-
var openaiChatResponseSchema = import_v43.z.object({
|
|
940
|
-
id: import_v43.z.string().nullish(),
|
|
941
|
-
created: import_v43.z.number().nullish(),
|
|
942
|
-
model: import_v43.z.string().nullish(),
|
|
943
|
-
choices: import_v43.z.array(
|
|
944
|
-
import_v43.z.object({
|
|
945
|
-
message: import_v43.z.object({
|
|
946
|
-
role: import_v43.z.literal("assistant").nullish(),
|
|
947
|
-
content: import_v43.z.string().nullish(),
|
|
948
|
-
tool_calls: import_v43.z.array(
|
|
949
|
-
import_v43.z.object({
|
|
950
|
-
id: import_v43.z.string().nullish(),
|
|
951
|
-
type: import_v43.z.literal("function"),
|
|
952
|
-
function: import_v43.z.object({
|
|
953
|
-
name: import_v43.z.string(),
|
|
954
|
-
arguments: import_v43.z.string()
|
|
955
|
-
})
|
|
956
|
-
})
|
|
957
|
-
).nullish(),
|
|
958
|
-
annotations: import_v43.z.array(
|
|
959
|
-
import_v43.z.object({
|
|
960
|
-
type: import_v43.z.literal("url_citation"),
|
|
961
|
-
start_index: import_v43.z.number(),
|
|
962
|
-
end_index: import_v43.z.number(),
|
|
963
|
-
url: import_v43.z.string(),
|
|
964
|
-
title: import_v43.z.string()
|
|
965
|
-
})
|
|
966
|
-
).nullish()
|
|
967
|
-
}),
|
|
968
|
-
index: import_v43.z.number(),
|
|
969
|
-
logprobs: import_v43.z.object({
|
|
970
|
-
content: import_v43.z.array(
|
|
971
|
-
import_v43.z.object({
|
|
972
|
-
token: import_v43.z.string(),
|
|
973
|
-
logprob: import_v43.z.number(),
|
|
974
|
-
top_logprobs: import_v43.z.array(
|
|
975
|
-
import_v43.z.object({
|
|
976
|
-
token: import_v43.z.string(),
|
|
977
|
-
logprob: import_v43.z.number()
|
|
978
|
-
})
|
|
979
|
-
)
|
|
980
|
-
})
|
|
981
|
-
).nullish()
|
|
982
|
-
}).nullish(),
|
|
983
|
-
finish_reason: import_v43.z.string().nullish()
|
|
984
|
-
})
|
|
985
|
-
),
|
|
986
|
-
usage: openaiTokenUsageSchema
|
|
987
|
-
});
|
|
988
|
-
var openaiChatChunkSchema = import_v43.z.union([
|
|
989
|
-
import_v43.z.object({
|
|
990
|
-
id: import_v43.z.string().nullish(),
|
|
991
|
-
created: import_v43.z.number().nullish(),
|
|
992
|
-
model: import_v43.z.string().nullish(),
|
|
993
|
-
choices: import_v43.z.array(
|
|
994
|
-
import_v43.z.object({
|
|
995
|
-
delta: import_v43.z.object({
|
|
996
|
-
role: import_v43.z.enum(["assistant"]).nullish(),
|
|
997
|
-
content: import_v43.z.string().nullish(),
|
|
998
|
-
tool_calls: import_v43.z.array(
|
|
999
|
-
import_v43.z.object({
|
|
1000
|
-
index: import_v43.z.number(),
|
|
1001
|
-
id: import_v43.z.string().nullish(),
|
|
1002
|
-
type: import_v43.z.literal("function").nullish(),
|
|
1003
|
-
function: import_v43.z.object({
|
|
1004
|
-
name: import_v43.z.string().nullish(),
|
|
1005
|
-
arguments: import_v43.z.string().nullish()
|
|
1006
|
-
})
|
|
1007
|
-
})
|
|
1008
|
-
).nullish(),
|
|
1009
|
-
annotations: import_v43.z.array(
|
|
1010
|
-
import_v43.z.object({
|
|
1011
|
-
type: import_v43.z.literal("url_citation"),
|
|
1012
|
-
start_index: import_v43.z.number(),
|
|
1013
|
-
end_index: import_v43.z.number(),
|
|
1014
|
-
url: import_v43.z.string(),
|
|
1015
|
-
title: import_v43.z.string()
|
|
1016
|
-
})
|
|
1017
|
-
).nullish()
|
|
1018
|
-
}).nullish(),
|
|
1019
|
-
logprobs: import_v43.z.object({
|
|
1020
|
-
content: import_v43.z.array(
|
|
1021
|
-
import_v43.z.object({
|
|
1022
|
-
token: import_v43.z.string(),
|
|
1023
|
-
logprob: import_v43.z.number(),
|
|
1024
|
-
top_logprobs: import_v43.z.array(
|
|
1025
|
-
import_v43.z.object({
|
|
1026
|
-
token: import_v43.z.string(),
|
|
1027
|
-
logprob: import_v43.z.number()
|
|
1028
|
-
})
|
|
1029
|
-
)
|
|
1030
|
-
})
|
|
1031
|
-
).nullish()
|
|
1032
|
-
}).nullish(),
|
|
1033
|
-
finish_reason: import_v43.z.string().nullish(),
|
|
1034
|
-
index: import_v43.z.number()
|
|
1035
|
-
})
|
|
1036
|
-
),
|
|
1037
|
-
usage: openaiTokenUsageSchema
|
|
1038
|
-
}),
|
|
1039
|
-
openaiErrorDataSchema
|
|
1040
|
-
]);
|
|
1041
1079
|
function isReasoningModel(modelId) {
|
|
1042
1080
|
return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
|
|
1043
1081
|
}
|
|
@@ -1088,8 +1126,7 @@ var reasoningModels = {
|
|
|
1088
1126
|
};
|
|
1089
1127
|
|
|
1090
1128
|
// src/completion/openai-completion-language-model.ts
|
|
1091
|
-
var
|
|
1092
|
-
var import_v45 = require("zod/v4");
|
|
1129
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1093
1130
|
|
|
1094
1131
|
// src/completion/convert-to-openai-completion-prompt.ts
|
|
1095
1132
|
var import_provider4 = require("@ai-sdk/provider");
|
|
@@ -1196,48 +1233,111 @@ function mapOpenAIFinishReason2(finishReason) {
|
|
|
1196
1233
|
}
|
|
1197
1234
|
}
|
|
1198
1235
|
|
|
1236
|
+
// src/completion/openai-completion-api.ts
|
|
1237
|
+
var z4 = __toESM(require("zod/v4"));
|
|
1238
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
1239
|
+
var openaiCompletionResponseSchema = (0, import_provider_utils6.lazyValidator)(
|
|
1240
|
+
() => (0, import_provider_utils6.zodSchema)(
|
|
1241
|
+
z4.object({
|
|
1242
|
+
id: z4.string().nullish(),
|
|
1243
|
+
created: z4.number().nullish(),
|
|
1244
|
+
model: z4.string().nullish(),
|
|
1245
|
+
choices: z4.array(
|
|
1246
|
+
z4.object({
|
|
1247
|
+
text: z4.string(),
|
|
1248
|
+
finish_reason: z4.string(),
|
|
1249
|
+
logprobs: z4.object({
|
|
1250
|
+
tokens: z4.array(z4.string()),
|
|
1251
|
+
token_logprobs: z4.array(z4.number()),
|
|
1252
|
+
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullish()
|
|
1253
|
+
}).nullish()
|
|
1254
|
+
})
|
|
1255
|
+
),
|
|
1256
|
+
usage: z4.object({
|
|
1257
|
+
prompt_tokens: z4.number(),
|
|
1258
|
+
completion_tokens: z4.number(),
|
|
1259
|
+
total_tokens: z4.number()
|
|
1260
|
+
}).nullish()
|
|
1261
|
+
})
|
|
1262
|
+
)
|
|
1263
|
+
);
|
|
1264
|
+
var openaiCompletionChunkSchema = (0, import_provider_utils6.lazyValidator)(
|
|
1265
|
+
() => (0, import_provider_utils6.zodSchema)(
|
|
1266
|
+
z4.union([
|
|
1267
|
+
z4.object({
|
|
1268
|
+
id: z4.string().nullish(),
|
|
1269
|
+
created: z4.number().nullish(),
|
|
1270
|
+
model: z4.string().nullish(),
|
|
1271
|
+
choices: z4.array(
|
|
1272
|
+
z4.object({
|
|
1273
|
+
text: z4.string(),
|
|
1274
|
+
finish_reason: z4.string().nullish(),
|
|
1275
|
+
index: z4.number(),
|
|
1276
|
+
logprobs: z4.object({
|
|
1277
|
+
tokens: z4.array(z4.string()),
|
|
1278
|
+
token_logprobs: z4.array(z4.number()),
|
|
1279
|
+
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullish()
|
|
1280
|
+
}).nullish()
|
|
1281
|
+
})
|
|
1282
|
+
),
|
|
1283
|
+
usage: z4.object({
|
|
1284
|
+
prompt_tokens: z4.number(),
|
|
1285
|
+
completion_tokens: z4.number(),
|
|
1286
|
+
total_tokens: z4.number()
|
|
1287
|
+
}).nullish()
|
|
1288
|
+
}),
|
|
1289
|
+
openaiErrorDataSchema
|
|
1290
|
+
])
|
|
1291
|
+
)
|
|
1292
|
+
);
|
|
1293
|
+
|
|
1199
1294
|
// src/completion/openai-completion-options.ts
|
|
1200
|
-
var
|
|
1201
|
-
var
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1295
|
+
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
|
1296
|
+
var z5 = __toESM(require("zod/v4"));
|
|
1297
|
+
var openaiCompletionProviderOptions = (0, import_provider_utils7.lazyValidator)(
|
|
1298
|
+
() => (0, import_provider_utils7.zodSchema)(
|
|
1299
|
+
z5.object({
|
|
1300
|
+
/**
|
|
1301
|
+
Echo back the prompt in addition to the completion.
|
|
1302
|
+
*/
|
|
1303
|
+
echo: z5.boolean().optional(),
|
|
1304
|
+
/**
|
|
1305
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
1306
|
+
|
|
1307
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
1308
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
1309
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
1310
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
1311
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
1312
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
1313
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
1314
|
+
|
|
1315
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
1316
|
+
token from being generated.
|
|
1317
|
+
*/
|
|
1318
|
+
logitBias: z5.record(z5.string(), z5.number()).optional(),
|
|
1319
|
+
/**
|
|
1320
|
+
The suffix that comes after a completion of inserted text.
|
|
1321
|
+
*/
|
|
1322
|
+
suffix: z5.string().optional(),
|
|
1323
|
+
/**
|
|
1324
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1325
|
+
monitor and detect abuse. Learn more.
|
|
1326
|
+
*/
|
|
1327
|
+
user: z5.string().optional(),
|
|
1328
|
+
/**
|
|
1329
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1330
|
+
the response size and can slow down response times. However, it can
|
|
1331
|
+
be useful to better understand how the model is behaving.
|
|
1332
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1333
|
+
were generated.
|
|
1334
|
+
Setting to a number will return the log probabilities of the top n
|
|
1335
|
+
tokens that were generated.
|
|
1336
|
+
*/
|
|
1337
|
+
logprobs: z5.union([z5.boolean(), z5.number()]).optional()
|
|
1338
|
+
})
|
|
1339
|
+
)
|
|
1340
|
+
);
|
|
1241
1341
|
|
|
1242
1342
|
// src/completion/openai-completion-language-model.ts
|
|
1243
1343
|
var OpenAICompletionLanguageModel = class {
|
|
@@ -1272,12 +1372,12 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1272
1372
|
}) {
|
|
1273
1373
|
const warnings = [];
|
|
1274
1374
|
const openaiOptions = {
|
|
1275
|
-
...await (0,
|
|
1375
|
+
...await (0, import_provider_utils8.parseProviderOptions)({
|
|
1276
1376
|
provider: "openai",
|
|
1277
1377
|
providerOptions,
|
|
1278
1378
|
schema: openaiCompletionProviderOptions
|
|
1279
1379
|
}),
|
|
1280
|
-
...await (0,
|
|
1380
|
+
...await (0, import_provider_utils8.parseProviderOptions)({
|
|
1281
1381
|
provider: this.providerOptionsName,
|
|
1282
1382
|
providerOptions,
|
|
1283
1383
|
schema: openaiCompletionProviderOptions
|
|
@@ -1333,15 +1433,15 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1333
1433
|
responseHeaders,
|
|
1334
1434
|
value: response,
|
|
1335
1435
|
rawValue: rawResponse
|
|
1336
|
-
} = await (0,
|
|
1436
|
+
} = await (0, import_provider_utils8.postJsonToApi)({
|
|
1337
1437
|
url: this.config.url({
|
|
1338
1438
|
path: "/completions",
|
|
1339
1439
|
modelId: this.modelId
|
|
1340
1440
|
}),
|
|
1341
|
-
headers: (0,
|
|
1441
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
|
|
1342
1442
|
body: args,
|
|
1343
1443
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1344
|
-
successfulResponseHandler: (0,
|
|
1444
|
+
successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
|
|
1345
1445
|
openaiCompletionResponseSchema
|
|
1346
1446
|
),
|
|
1347
1447
|
abortSignal: options.abortSignal,
|
|
@@ -1379,15 +1479,15 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1379
1479
|
include_usage: true
|
|
1380
1480
|
}
|
|
1381
1481
|
};
|
|
1382
|
-
const { responseHeaders, value: response } = await (0,
|
|
1482
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
|
|
1383
1483
|
url: this.config.url({
|
|
1384
1484
|
path: "/completions",
|
|
1385
1485
|
modelId: this.modelId
|
|
1386
1486
|
}),
|
|
1387
|
-
headers: (0,
|
|
1487
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
|
|
1388
1488
|
body,
|
|
1389
1489
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1390
|
-
successfulResponseHandler: (0,
|
|
1490
|
+
successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
|
|
1391
1491
|
openaiCompletionChunkSchema
|
|
1392
1492
|
),
|
|
1393
1493
|
abortSignal: options.abortSignal,
|
|
@@ -1468,69 +1568,42 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1468
1568
|
};
|
|
1469
1569
|
}
|
|
1470
1570
|
};
|
|
1471
|
-
var usageSchema = import_v45.z.object({
|
|
1472
|
-
prompt_tokens: import_v45.z.number(),
|
|
1473
|
-
completion_tokens: import_v45.z.number(),
|
|
1474
|
-
total_tokens: import_v45.z.number()
|
|
1475
|
-
});
|
|
1476
|
-
var openaiCompletionResponseSchema = import_v45.z.object({
|
|
1477
|
-
id: import_v45.z.string().nullish(),
|
|
1478
|
-
created: import_v45.z.number().nullish(),
|
|
1479
|
-
model: import_v45.z.string().nullish(),
|
|
1480
|
-
choices: import_v45.z.array(
|
|
1481
|
-
import_v45.z.object({
|
|
1482
|
-
text: import_v45.z.string(),
|
|
1483
|
-
finish_reason: import_v45.z.string(),
|
|
1484
|
-
logprobs: import_v45.z.object({
|
|
1485
|
-
tokens: import_v45.z.array(import_v45.z.string()),
|
|
1486
|
-
token_logprobs: import_v45.z.array(import_v45.z.number()),
|
|
1487
|
-
top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullish()
|
|
1488
|
-
}).nullish()
|
|
1489
|
-
})
|
|
1490
|
-
),
|
|
1491
|
-
usage: usageSchema.nullish()
|
|
1492
|
-
});
|
|
1493
|
-
var openaiCompletionChunkSchema = import_v45.z.union([
|
|
1494
|
-
import_v45.z.object({
|
|
1495
|
-
id: import_v45.z.string().nullish(),
|
|
1496
|
-
created: import_v45.z.number().nullish(),
|
|
1497
|
-
model: import_v45.z.string().nullish(),
|
|
1498
|
-
choices: import_v45.z.array(
|
|
1499
|
-
import_v45.z.object({
|
|
1500
|
-
text: import_v45.z.string(),
|
|
1501
|
-
finish_reason: import_v45.z.string().nullish(),
|
|
1502
|
-
index: import_v45.z.number(),
|
|
1503
|
-
logprobs: import_v45.z.object({
|
|
1504
|
-
tokens: import_v45.z.array(import_v45.z.string()),
|
|
1505
|
-
token_logprobs: import_v45.z.array(import_v45.z.number()),
|
|
1506
|
-
top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullish()
|
|
1507
|
-
}).nullish()
|
|
1508
|
-
})
|
|
1509
|
-
),
|
|
1510
|
-
usage: usageSchema.nullish()
|
|
1511
|
-
}),
|
|
1512
|
-
openaiErrorDataSchema
|
|
1513
|
-
]);
|
|
1514
1571
|
|
|
1515
1572
|
// src/embedding/openai-embedding-model.ts
|
|
1516
1573
|
var import_provider5 = require("@ai-sdk/provider");
|
|
1517
|
-
var
|
|
1518
|
-
var import_v47 = require("zod/v4");
|
|
1574
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
|
1519
1575
|
|
|
1520
1576
|
// src/embedding/openai-embedding-options.ts
|
|
1521
|
-
var
|
|
1522
|
-
var
|
|
1523
|
-
|
|
1524
|
-
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
|
-
|
|
1577
|
+
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
|
1578
|
+
var z6 = __toESM(require("zod/v4"));
|
|
1579
|
+
var openaiEmbeddingProviderOptions = (0, import_provider_utils9.lazyValidator)(
|
|
1580
|
+
() => (0, import_provider_utils9.zodSchema)(
|
|
1581
|
+
z6.object({
|
|
1582
|
+
/**
|
|
1583
|
+
The number of dimensions the resulting output embeddings should have.
|
|
1584
|
+
Only supported in text-embedding-3 and later models.
|
|
1585
|
+
*/
|
|
1586
|
+
dimensions: z6.number().optional(),
|
|
1587
|
+
/**
|
|
1588
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1589
|
+
monitor and detect abuse. Learn more.
|
|
1590
|
+
*/
|
|
1591
|
+
user: z6.string().optional()
|
|
1592
|
+
})
|
|
1593
|
+
)
|
|
1594
|
+
);
|
|
1595
|
+
|
|
1596
|
+
// src/embedding/openai-embedding-api.ts
|
|
1597
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
|
1598
|
+
var z7 = __toESM(require("zod/v4"));
|
|
1599
|
+
var openaiTextEmbeddingResponseSchema = (0, import_provider_utils10.lazyValidator)(
|
|
1600
|
+
() => (0, import_provider_utils10.zodSchema)(
|
|
1601
|
+
z7.object({
|
|
1602
|
+
data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
|
|
1603
|
+
usage: z7.object({ prompt_tokens: z7.number() }).nullish()
|
|
1604
|
+
})
|
|
1605
|
+
)
|
|
1606
|
+
);
|
|
1534
1607
|
|
|
1535
1608
|
// src/embedding/openai-embedding-model.ts
|
|
1536
1609
|
var OpenAIEmbeddingModel = class {
|
|
@@ -1559,7 +1632,7 @@ var OpenAIEmbeddingModel = class {
|
|
|
1559
1632
|
values
|
|
1560
1633
|
});
|
|
1561
1634
|
}
|
|
1562
|
-
const openaiOptions = (_a = await (0,
|
|
1635
|
+
const openaiOptions = (_a = await (0, import_provider_utils11.parseProviderOptions)({
|
|
1563
1636
|
provider: "openai",
|
|
1564
1637
|
providerOptions,
|
|
1565
1638
|
schema: openaiEmbeddingProviderOptions
|
|
@@ -1568,12 +1641,12 @@ var OpenAIEmbeddingModel = class {
|
|
|
1568
1641
|
responseHeaders,
|
|
1569
1642
|
value: response,
|
|
1570
1643
|
rawValue
|
|
1571
|
-
} = await (0,
|
|
1644
|
+
} = await (0, import_provider_utils11.postJsonToApi)({
|
|
1572
1645
|
url: this.config.url({
|
|
1573
1646
|
path: "/embeddings",
|
|
1574
1647
|
modelId: this.modelId
|
|
1575
1648
|
}),
|
|
1576
|
-
headers: (0,
|
|
1649
|
+
headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), headers),
|
|
1577
1650
|
body: {
|
|
1578
1651
|
model: this.modelId,
|
|
1579
1652
|
input: values,
|
|
@@ -1582,7 +1655,7 @@ var OpenAIEmbeddingModel = class {
|
|
|
1582
1655
|
user: openaiOptions.user
|
|
1583
1656
|
},
|
|
1584
1657
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1585
|
-
successfulResponseHandler: (0,
|
|
1658
|
+
successfulResponseHandler: (0, import_provider_utils11.createJsonResponseHandler)(
|
|
1586
1659
|
openaiTextEmbeddingResponseSchema
|
|
1587
1660
|
),
|
|
1588
1661
|
abortSignal,
|
|
@@ -1595,14 +1668,25 @@ var OpenAIEmbeddingModel = class {
|
|
|
1595
1668
|
};
|
|
1596
1669
|
}
|
|
1597
1670
|
};
|
|
1598
|
-
var openaiTextEmbeddingResponseSchema = import_v47.z.object({
|
|
1599
|
-
data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
|
|
1600
|
-
usage: import_v47.z.object({ prompt_tokens: import_v47.z.number() }).nullish()
|
|
1601
|
-
});
|
|
1602
1671
|
|
|
1603
1672
|
// src/image/openai-image-model.ts
|
|
1604
|
-
var
|
|
1605
|
-
|
|
1673
|
+
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
|
1674
|
+
|
|
1675
|
+
// src/image/openai-image-api.ts
|
|
1676
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
|
1677
|
+
var z8 = __toESM(require("zod/v4"));
|
|
1678
|
+
var openaiImageResponseSchema = (0, import_provider_utils12.lazyValidator)(
|
|
1679
|
+
() => (0, import_provider_utils12.zodSchema)(
|
|
1680
|
+
z8.object({
|
|
1681
|
+
data: z8.array(
|
|
1682
|
+
z8.object({
|
|
1683
|
+
b64_json: z8.string(),
|
|
1684
|
+
revised_prompt: z8.string().optional()
|
|
1685
|
+
})
|
|
1686
|
+
)
|
|
1687
|
+
})
|
|
1688
|
+
)
|
|
1689
|
+
);
|
|
1606
1690
|
|
|
1607
1691
|
// src/image/openai-image-options.ts
|
|
1608
1692
|
var modelMaxImagesPerCall = {
|
|
@@ -1653,12 +1737,12 @@ var OpenAIImageModel = class {
|
|
|
1653
1737
|
warnings.push({ type: "unsupported-setting", setting: "seed" });
|
|
1654
1738
|
}
|
|
1655
1739
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1656
|
-
const { value: response, responseHeaders } = await (0,
|
|
1740
|
+
const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
|
|
1657
1741
|
url: this.config.url({
|
|
1658
1742
|
path: "/images/generations",
|
|
1659
1743
|
modelId: this.modelId
|
|
1660
1744
|
}),
|
|
1661
|
-
headers: (0,
|
|
1745
|
+
headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), headers),
|
|
1662
1746
|
body: {
|
|
1663
1747
|
model: this.modelId,
|
|
1664
1748
|
prompt,
|
|
@@ -1668,7 +1752,7 @@ var OpenAIImageModel = class {
|
|
|
1668
1752
|
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1669
1753
|
},
|
|
1670
1754
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1671
|
-
successfulResponseHandler: (0,
|
|
1755
|
+
successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
|
|
1672
1756
|
openaiImageResponseSchema
|
|
1673
1757
|
),
|
|
1674
1758
|
abortSignal,
|
|
@@ -1694,42 +1778,75 @@ var OpenAIImageModel = class {
|
|
|
1694
1778
|
};
|
|
1695
1779
|
}
|
|
1696
1780
|
};
|
|
1697
|
-
var openaiImageResponseSchema = import_v48.z.object({
|
|
1698
|
-
data: import_v48.z.array(
|
|
1699
|
-
import_v48.z.object({ b64_json: import_v48.z.string(), revised_prompt: import_v48.z.string().optional() })
|
|
1700
|
-
)
|
|
1701
|
-
});
|
|
1702
1781
|
|
|
1703
1782
|
// src/transcription/openai-transcription-model.ts
|
|
1704
|
-
var
|
|
1705
|
-
|
|
1783
|
+
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
|
1784
|
+
|
|
1785
|
+
// src/transcription/openai-transcription-api.ts
|
|
1786
|
+
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
|
1787
|
+
var z9 = __toESM(require("zod/v4"));
|
|
1788
|
+
var openaiTranscriptionResponseSchema = (0, import_provider_utils14.lazyValidator)(
|
|
1789
|
+
() => (0, import_provider_utils14.zodSchema)(
|
|
1790
|
+
z9.object({
|
|
1791
|
+
text: z9.string(),
|
|
1792
|
+
language: z9.string().nullish(),
|
|
1793
|
+
duration: z9.number().nullish(),
|
|
1794
|
+
words: z9.array(
|
|
1795
|
+
z9.object({
|
|
1796
|
+
word: z9.string(),
|
|
1797
|
+
start: z9.number(),
|
|
1798
|
+
end: z9.number()
|
|
1799
|
+
})
|
|
1800
|
+
).nullish(),
|
|
1801
|
+
segments: z9.array(
|
|
1802
|
+
z9.object({
|
|
1803
|
+
id: z9.number(),
|
|
1804
|
+
seek: z9.number(),
|
|
1805
|
+
start: z9.number(),
|
|
1806
|
+
end: z9.number(),
|
|
1807
|
+
text: z9.string(),
|
|
1808
|
+
tokens: z9.array(z9.number()),
|
|
1809
|
+
temperature: z9.number(),
|
|
1810
|
+
avg_logprob: z9.number(),
|
|
1811
|
+
compression_ratio: z9.number(),
|
|
1812
|
+
no_speech_prob: z9.number()
|
|
1813
|
+
})
|
|
1814
|
+
).nullish()
|
|
1815
|
+
})
|
|
1816
|
+
)
|
|
1817
|
+
);
|
|
1706
1818
|
|
|
1707
1819
|
// src/transcription/openai-transcription-options.ts
|
|
1708
|
-
var
|
|
1709
|
-
var
|
|
1710
|
-
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
|
|
1726
|
-
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
|
|
1731
|
-
|
|
1732
|
-
|
|
1820
|
+
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
|
1821
|
+
var z10 = __toESM(require("zod/v4"));
|
|
1822
|
+
var openAITranscriptionProviderOptions = (0, import_provider_utils15.lazyValidator)(
|
|
1823
|
+
() => (0, import_provider_utils15.zodSchema)(
|
|
1824
|
+
z10.object({
|
|
1825
|
+
/**
|
|
1826
|
+
* Additional information to include in the transcription response.
|
|
1827
|
+
*/
|
|
1828
|
+
include: z10.array(z10.string()).optional(),
|
|
1829
|
+
/**
|
|
1830
|
+
* The language of the input audio in ISO-639-1 format.
|
|
1831
|
+
*/
|
|
1832
|
+
language: z10.string().optional(),
|
|
1833
|
+
/**
|
|
1834
|
+
* An optional text to guide the model's style or continue a previous audio segment.
|
|
1835
|
+
*/
|
|
1836
|
+
prompt: z10.string().optional(),
|
|
1837
|
+
/**
|
|
1838
|
+
* The sampling temperature, between 0 and 1.
|
|
1839
|
+
* @default 0
|
|
1840
|
+
*/
|
|
1841
|
+
temperature: z10.number().min(0).max(1).default(0).optional(),
|
|
1842
|
+
/**
|
|
1843
|
+
* The timestamp granularities to populate for this transcription.
|
|
1844
|
+
* @default ['segment']
|
|
1845
|
+
*/
|
|
1846
|
+
timestampGranularities: z10.array(z10.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1847
|
+
})
|
|
1848
|
+
)
|
|
1849
|
+
);
|
|
1733
1850
|
|
|
1734
1851
|
// src/transcription/openai-transcription-model.ts
|
|
1735
1852
|
var languageMap = {
|
|
@@ -1806,15 +1923,15 @@ var OpenAITranscriptionModel = class {
|
|
|
1806
1923
|
providerOptions
|
|
1807
1924
|
}) {
|
|
1808
1925
|
const warnings = [];
|
|
1809
|
-
const openAIOptions = await (0,
|
|
1926
|
+
const openAIOptions = await (0, import_provider_utils16.parseProviderOptions)({
|
|
1810
1927
|
provider: "openai",
|
|
1811
1928
|
providerOptions,
|
|
1812
1929
|
schema: openAITranscriptionProviderOptions
|
|
1813
1930
|
});
|
|
1814
1931
|
const formData = new FormData();
|
|
1815
|
-
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0,
|
|
1932
|
+
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils16.convertBase64ToUint8Array)(audio)]);
|
|
1816
1933
|
formData.append("model", this.modelId);
|
|
1817
|
-
const fileExtension = (0,
|
|
1934
|
+
const fileExtension = (0, import_provider_utils16.mediaTypeToExtension)(mediaType);
|
|
1818
1935
|
formData.append(
|
|
1819
1936
|
"file",
|
|
1820
1937
|
new File([blob], "audio", { type: mediaType }),
|
|
@@ -1859,15 +1976,15 @@ var OpenAITranscriptionModel = class {
|
|
|
1859
1976
|
value: response,
|
|
1860
1977
|
responseHeaders,
|
|
1861
1978
|
rawValue: rawResponse
|
|
1862
|
-
} = await (0,
|
|
1979
|
+
} = await (0, import_provider_utils16.postFormDataToApi)({
|
|
1863
1980
|
url: this.config.url({
|
|
1864
1981
|
path: "/audio/transcriptions",
|
|
1865
1982
|
modelId: this.modelId
|
|
1866
1983
|
}),
|
|
1867
|
-
headers: (0,
|
|
1984
|
+
headers: (0, import_provider_utils16.combineHeaders)(this.config.headers(), options.headers),
|
|
1868
1985
|
formData,
|
|
1869
1986
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1870
|
-
successfulResponseHandler: (0,
|
|
1987
|
+
successfulResponseHandler: (0, import_provider_utils16.createJsonResponseHandler)(
|
|
1871
1988
|
openaiTranscriptionResponseSchema
|
|
1872
1989
|
),
|
|
1873
1990
|
abortSignal: options.abortSignal,
|
|
@@ -1897,40 +2014,23 @@ var OpenAITranscriptionModel = class {
|
|
|
1897
2014
|
};
|
|
1898
2015
|
}
|
|
1899
2016
|
};
|
|
1900
|
-
|
|
1901
|
-
|
|
1902
|
-
|
|
1903
|
-
|
|
1904
|
-
|
|
1905
|
-
|
|
1906
|
-
|
|
1907
|
-
|
|
1908
|
-
|
|
1909
|
-
|
|
1910
|
-
|
|
1911
|
-
|
|
1912
|
-
import_v410.z.object({
|
|
1913
|
-
id: import_v410.z.number(),
|
|
1914
|
-
seek: import_v410.z.number(),
|
|
1915
|
-
start: import_v410.z.number(),
|
|
1916
|
-
end: import_v410.z.number(),
|
|
1917
|
-
text: import_v410.z.string(),
|
|
1918
|
-
tokens: import_v410.z.array(import_v410.z.number()),
|
|
1919
|
-
temperature: import_v410.z.number(),
|
|
1920
|
-
avg_logprob: import_v410.z.number(),
|
|
1921
|
-
compression_ratio: import_v410.z.number(),
|
|
1922
|
-
no_speech_prob: import_v410.z.number()
|
|
2017
|
+
|
|
2018
|
+
// src/speech/openai-speech-model.ts
|
|
2019
|
+
var import_provider_utils18 = require("@ai-sdk/provider-utils");
|
|
2020
|
+
|
|
2021
|
+
// src/speech/openai-speech-options.ts
|
|
2022
|
+
var import_provider_utils17 = require("@ai-sdk/provider-utils");
|
|
2023
|
+
var z11 = __toESM(require("zod/v4"));
|
|
2024
|
+
var openaiSpeechProviderOptionsSchema = (0, import_provider_utils17.lazyValidator)(
|
|
2025
|
+
() => (0, import_provider_utils17.zodSchema)(
|
|
2026
|
+
z11.object({
|
|
2027
|
+
instructions: z11.string().nullish(),
|
|
2028
|
+
speed: z11.number().min(0.25).max(4).default(1).nullish()
|
|
1923
2029
|
})
|
|
1924
|
-
)
|
|
1925
|
-
|
|
2030
|
+
)
|
|
2031
|
+
);
|
|
1926
2032
|
|
|
1927
2033
|
// src/speech/openai-speech-model.ts
|
|
1928
|
-
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1929
|
-
var import_v411 = require("zod/v4");
|
|
1930
|
-
var OpenAIProviderOptionsSchema = import_v411.z.object({
|
|
1931
|
-
instructions: import_v411.z.string().nullish(),
|
|
1932
|
-
speed: import_v411.z.number().min(0.25).max(4).default(1).nullish()
|
|
1933
|
-
});
|
|
1934
2034
|
var OpenAISpeechModel = class {
|
|
1935
2035
|
constructor(modelId, config) {
|
|
1936
2036
|
this.modelId = modelId;
|
|
@@ -1950,10 +2050,10 @@ var OpenAISpeechModel = class {
|
|
|
1950
2050
|
providerOptions
|
|
1951
2051
|
}) {
|
|
1952
2052
|
const warnings = [];
|
|
1953
|
-
const openAIOptions = await (0,
|
|
2053
|
+
const openAIOptions = await (0, import_provider_utils18.parseProviderOptions)({
|
|
1954
2054
|
provider: "openai",
|
|
1955
2055
|
providerOptions,
|
|
1956
|
-
schema:
|
|
2056
|
+
schema: openaiSpeechProviderOptionsSchema
|
|
1957
2057
|
});
|
|
1958
2058
|
const requestBody = {
|
|
1959
2059
|
model: this.modelId,
|
|
@@ -2003,15 +2103,15 @@ var OpenAISpeechModel = class {
|
|
|
2003
2103
|
value: audio,
|
|
2004
2104
|
responseHeaders,
|
|
2005
2105
|
rawValue: rawResponse
|
|
2006
|
-
} = await (0,
|
|
2106
|
+
} = await (0, import_provider_utils18.postJsonToApi)({
|
|
2007
2107
|
url: this.config.url({
|
|
2008
2108
|
path: "/audio/speech",
|
|
2009
2109
|
modelId: this.modelId
|
|
2010
2110
|
}),
|
|
2011
|
-
headers: (0,
|
|
2111
|
+
headers: (0, import_provider_utils18.combineHeaders)(this.config.headers(), options.headers),
|
|
2012
2112
|
body: requestBody,
|
|
2013
2113
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2014
|
-
successfulResponseHandler: (0,
|
|
2114
|
+
successfulResponseHandler: (0, import_provider_utils18.createBinaryResponseHandler)(),
|
|
2015
2115
|
abortSignal: options.abortSignal,
|
|
2016
2116
|
fetch: this.config.fetch
|
|
2017
2117
|
});
|
|
@@ -2033,31 +2133,34 @@ var OpenAISpeechModel = class {
|
|
|
2033
2133
|
|
|
2034
2134
|
// src/responses/openai-responses-language-model.ts
|
|
2035
2135
|
var import_provider8 = require("@ai-sdk/provider");
|
|
2036
|
-
var
|
|
2037
|
-
var import_v419 = require("zod/v4");
|
|
2136
|
+
var import_provider_utils29 = require("@ai-sdk/provider-utils");
|
|
2038
2137
|
|
|
2039
2138
|
// src/responses/convert-to-openai-responses-input.ts
|
|
2040
2139
|
var import_provider6 = require("@ai-sdk/provider");
|
|
2041
|
-
var
|
|
2042
|
-
var
|
|
2140
|
+
var import_provider_utils20 = require("@ai-sdk/provider-utils");
|
|
2141
|
+
var z13 = __toESM(require("zod/v4"));
|
|
2043
2142
|
|
|
2044
2143
|
// src/tool/local-shell.ts
|
|
2045
|
-
var
|
|
2046
|
-
var
|
|
2047
|
-
var localShellInputSchema =
|
|
2048
|
-
|
|
2049
|
-
|
|
2050
|
-
|
|
2051
|
-
|
|
2052
|
-
|
|
2053
|
-
|
|
2054
|
-
|
|
2055
|
-
|
|
2056
|
-
|
|
2057
|
-
|
|
2058
|
-
|
|
2059
|
-
|
|
2060
|
-
|
|
2144
|
+
var import_provider_utils19 = require("@ai-sdk/provider-utils");
|
|
2145
|
+
var z12 = __toESM(require("zod/v4"));
|
|
2146
|
+
var localShellInputSchema = (0, import_provider_utils19.lazySchema)(
|
|
2147
|
+
() => (0, import_provider_utils19.zodSchema)(
|
|
2148
|
+
z12.object({
|
|
2149
|
+
action: z12.object({
|
|
2150
|
+
type: z12.literal("exec"),
|
|
2151
|
+
command: z12.array(z12.string()),
|
|
2152
|
+
timeoutMs: z12.number().optional(),
|
|
2153
|
+
user: z12.string().optional(),
|
|
2154
|
+
workingDirectory: z12.string().optional(),
|
|
2155
|
+
env: z12.record(z12.string(), z12.string()).optional()
|
|
2156
|
+
})
|
|
2157
|
+
})
|
|
2158
|
+
)
|
|
2159
|
+
);
|
|
2160
|
+
var localShellOutputSchema = (0, import_provider_utils19.lazySchema)(
|
|
2161
|
+
() => (0, import_provider_utils19.zodSchema)(z12.object({ output: z12.string() }))
|
|
2162
|
+
);
|
|
2163
|
+
var localShell = (0, import_provider_utils19.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2061
2164
|
id: "openai.local_shell",
|
|
2062
2165
|
name: "local_shell",
|
|
2063
2166
|
inputSchema: localShellInputSchema,
|
|
@@ -2122,7 +2225,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2122
2225
|
return {
|
|
2123
2226
|
type: "input_image",
|
|
2124
2227
|
...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
|
|
2125
|
-
image_url: `data:${mediaType};base64,${(0,
|
|
2228
|
+
image_url: `data:${mediaType};base64,${(0, import_provider_utils20.convertToBase64)(part.data)}`
|
|
2126
2229
|
},
|
|
2127
2230
|
detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
|
|
2128
2231
|
};
|
|
@@ -2137,7 +2240,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2137
2240
|
type: "input_file",
|
|
2138
2241
|
...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
|
|
2139
2242
|
filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
|
|
2140
|
-
file_data: `data:application/pdf;base64,${(0,
|
|
2243
|
+
file_data: `data:application/pdf;base64,${(0, import_provider_utils20.convertToBase64)(part.data)}`
|
|
2141
2244
|
}
|
|
2142
2245
|
};
|
|
2143
2246
|
} else {
|
|
@@ -2170,7 +2273,10 @@ async function convertToOpenAIResponsesInput({
|
|
|
2170
2273
|
break;
|
|
2171
2274
|
}
|
|
2172
2275
|
if (hasLocalShellTool && part.toolName === "local_shell") {
|
|
2173
|
-
const parsedInput =
|
|
2276
|
+
const parsedInput = await (0, import_provider_utils20.validateTypes)({
|
|
2277
|
+
value: part.input,
|
|
2278
|
+
schema: localShellInputSchema
|
|
2279
|
+
});
|
|
2174
2280
|
input.push({
|
|
2175
2281
|
type: "local_shell_call",
|
|
2176
2282
|
call_id: part.toolCallId,
|
|
@@ -2208,7 +2314,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2208
2314
|
break;
|
|
2209
2315
|
}
|
|
2210
2316
|
case "reasoning": {
|
|
2211
|
-
const providerOptions = await (0,
|
|
2317
|
+
const providerOptions = await (0, import_provider_utils20.parseProviderOptions)({
|
|
2212
2318
|
provider: "openai",
|
|
2213
2319
|
providerOptions: part.providerOptions,
|
|
2214
2320
|
schema: openaiResponsesReasoningProviderOptionsSchema
|
|
@@ -2266,10 +2372,14 @@ async function convertToOpenAIResponsesInput({
|
|
|
2266
2372
|
for (const part of content) {
|
|
2267
2373
|
const output = part.output;
|
|
2268
2374
|
if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
|
|
2375
|
+
const parsedOutput = await (0, import_provider_utils20.validateTypes)({
|
|
2376
|
+
value: output.value,
|
|
2377
|
+
schema: localShellOutputSchema
|
|
2378
|
+
});
|
|
2269
2379
|
input.push({
|
|
2270
2380
|
type: "local_shell_call_output",
|
|
2271
2381
|
call_id: part.toolCallId,
|
|
2272
|
-
output:
|
|
2382
|
+
output: parsedOutput.output
|
|
2273
2383
|
});
|
|
2274
2384
|
break;
|
|
2275
2385
|
}
|
|
@@ -2301,9 +2411,9 @@ async function convertToOpenAIResponsesInput({
|
|
|
2301
2411
|
}
|
|
2302
2412
|
return { input, warnings };
|
|
2303
2413
|
}
|
|
2304
|
-
var openaiResponsesReasoningProviderOptionsSchema =
|
|
2305
|
-
itemId:
|
|
2306
|
-
reasoningEncryptedContent:
|
|
2414
|
+
var openaiResponsesReasoningProviderOptionsSchema = z13.object({
|
|
2415
|
+
itemId: z13.string().nullish(),
|
|
2416
|
+
reasoningEncryptedContent: z13.string().nullish()
|
|
2307
2417
|
});
|
|
2308
2418
|
|
|
2309
2419
|
// src/responses/map-openai-responses-finish-reason.ts
|
|
@@ -2324,33 +2434,574 @@ function mapOpenAIResponseFinishReason({
|
|
|
2324
2434
|
}
|
|
2325
2435
|
}
|
|
2326
2436
|
|
|
2437
|
+
// src/responses/openai-responses-api.ts
|
|
2438
|
+
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
|
2439
|
+
var z14 = __toESM(require("zod/v4"));
|
|
2440
|
+
var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
|
|
2441
|
+
() => (0, import_provider_utils21.zodSchema)(
|
|
2442
|
+
z14.union([
|
|
2443
|
+
z14.object({
|
|
2444
|
+
type: z14.literal("response.output_text.delta"),
|
|
2445
|
+
item_id: z14.string(),
|
|
2446
|
+
delta: z14.string(),
|
|
2447
|
+
logprobs: z14.array(
|
|
2448
|
+
z14.object({
|
|
2449
|
+
token: z14.string(),
|
|
2450
|
+
logprob: z14.number(),
|
|
2451
|
+
top_logprobs: z14.array(
|
|
2452
|
+
z14.object({
|
|
2453
|
+
token: z14.string(),
|
|
2454
|
+
logprob: z14.number()
|
|
2455
|
+
})
|
|
2456
|
+
)
|
|
2457
|
+
})
|
|
2458
|
+
).nullish()
|
|
2459
|
+
}),
|
|
2460
|
+
z14.object({
|
|
2461
|
+
type: z14.enum(["response.completed", "response.incomplete"]),
|
|
2462
|
+
response: z14.object({
|
|
2463
|
+
incomplete_details: z14.object({ reason: z14.string() }).nullish(),
|
|
2464
|
+
usage: z14.object({
|
|
2465
|
+
input_tokens: z14.number(),
|
|
2466
|
+
input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
|
|
2467
|
+
output_tokens: z14.number(),
|
|
2468
|
+
output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
|
|
2469
|
+
}),
|
|
2470
|
+
service_tier: z14.string().nullish()
|
|
2471
|
+
})
|
|
2472
|
+
}),
|
|
2473
|
+
z14.object({
|
|
2474
|
+
type: z14.literal("response.created"),
|
|
2475
|
+
response: z14.object({
|
|
2476
|
+
id: z14.string(),
|
|
2477
|
+
created_at: z14.number(),
|
|
2478
|
+
model: z14.string(),
|
|
2479
|
+
service_tier: z14.string().nullish()
|
|
2480
|
+
})
|
|
2481
|
+
}),
|
|
2482
|
+
z14.object({
|
|
2483
|
+
type: z14.literal("response.output_item.added"),
|
|
2484
|
+
output_index: z14.number(),
|
|
2485
|
+
item: z14.discriminatedUnion("type", [
|
|
2486
|
+
z14.object({
|
|
2487
|
+
type: z14.literal("message"),
|
|
2488
|
+
id: z14.string()
|
|
2489
|
+
}),
|
|
2490
|
+
z14.object({
|
|
2491
|
+
type: z14.literal("reasoning"),
|
|
2492
|
+
id: z14.string(),
|
|
2493
|
+
encrypted_content: z14.string().nullish()
|
|
2494
|
+
}),
|
|
2495
|
+
z14.object({
|
|
2496
|
+
type: z14.literal("function_call"),
|
|
2497
|
+
id: z14.string(),
|
|
2498
|
+
call_id: z14.string(),
|
|
2499
|
+
name: z14.string(),
|
|
2500
|
+
arguments: z14.string()
|
|
2501
|
+
}),
|
|
2502
|
+
z14.object({
|
|
2503
|
+
type: z14.literal("web_search_call"),
|
|
2504
|
+
id: z14.string(),
|
|
2505
|
+
status: z14.string(),
|
|
2506
|
+
action: z14.object({
|
|
2507
|
+
type: z14.literal("search"),
|
|
2508
|
+
query: z14.string().optional()
|
|
2509
|
+
}).nullish()
|
|
2510
|
+
}),
|
|
2511
|
+
z14.object({
|
|
2512
|
+
type: z14.literal("computer_call"),
|
|
2513
|
+
id: z14.string(),
|
|
2514
|
+
status: z14.string()
|
|
2515
|
+
}),
|
|
2516
|
+
z14.object({
|
|
2517
|
+
type: z14.literal("file_search_call"),
|
|
2518
|
+
id: z14.string()
|
|
2519
|
+
}),
|
|
2520
|
+
z14.object({
|
|
2521
|
+
type: z14.literal("image_generation_call"),
|
|
2522
|
+
id: z14.string()
|
|
2523
|
+
}),
|
|
2524
|
+
z14.object({
|
|
2525
|
+
type: z14.literal("code_interpreter_call"),
|
|
2526
|
+
id: z14.string(),
|
|
2527
|
+
container_id: z14.string(),
|
|
2528
|
+
code: z14.string().nullable(),
|
|
2529
|
+
outputs: z14.array(
|
|
2530
|
+
z14.discriminatedUnion("type", [
|
|
2531
|
+
z14.object({ type: z14.literal("logs"), logs: z14.string() }),
|
|
2532
|
+
z14.object({ type: z14.literal("image"), url: z14.string() })
|
|
2533
|
+
])
|
|
2534
|
+
).nullable(),
|
|
2535
|
+
status: z14.string()
|
|
2536
|
+
})
|
|
2537
|
+
])
|
|
2538
|
+
}),
|
|
2539
|
+
z14.object({
|
|
2540
|
+
type: z14.literal("response.output_item.done"),
|
|
2541
|
+
output_index: z14.number(),
|
|
2542
|
+
item: z14.discriminatedUnion("type", [
|
|
2543
|
+
z14.object({
|
|
2544
|
+
type: z14.literal("message"),
|
|
2545
|
+
id: z14.string()
|
|
2546
|
+
}),
|
|
2547
|
+
z14.object({
|
|
2548
|
+
type: z14.literal("reasoning"),
|
|
2549
|
+
id: z14.string(),
|
|
2550
|
+
encrypted_content: z14.string().nullish()
|
|
2551
|
+
}),
|
|
2552
|
+
z14.object({
|
|
2553
|
+
type: z14.literal("function_call"),
|
|
2554
|
+
id: z14.string(),
|
|
2555
|
+
call_id: z14.string(),
|
|
2556
|
+
name: z14.string(),
|
|
2557
|
+
arguments: z14.string(),
|
|
2558
|
+
status: z14.literal("completed")
|
|
2559
|
+
}),
|
|
2560
|
+
z14.object({
|
|
2561
|
+
type: z14.literal("code_interpreter_call"),
|
|
2562
|
+
id: z14.string(),
|
|
2563
|
+
code: z14.string().nullable(),
|
|
2564
|
+
container_id: z14.string(),
|
|
2565
|
+
outputs: z14.array(
|
|
2566
|
+
z14.discriminatedUnion("type", [
|
|
2567
|
+
z14.object({ type: z14.literal("logs"), logs: z14.string() }),
|
|
2568
|
+
z14.object({ type: z14.literal("image"), url: z14.string() })
|
|
2569
|
+
])
|
|
2570
|
+
).nullable()
|
|
2571
|
+
}),
|
|
2572
|
+
z14.object({
|
|
2573
|
+
type: z14.literal("image_generation_call"),
|
|
2574
|
+
id: z14.string(),
|
|
2575
|
+
result: z14.string()
|
|
2576
|
+
}),
|
|
2577
|
+
z14.object({
|
|
2578
|
+
type: z14.literal("web_search_call"),
|
|
2579
|
+
id: z14.string(),
|
|
2580
|
+
status: z14.string(),
|
|
2581
|
+
action: z14.discriminatedUnion("type", [
|
|
2582
|
+
z14.object({
|
|
2583
|
+
type: z14.literal("search"),
|
|
2584
|
+
query: z14.string().nullish()
|
|
2585
|
+
}),
|
|
2586
|
+
z14.object({
|
|
2587
|
+
type: z14.literal("open_page"),
|
|
2588
|
+
url: z14.string()
|
|
2589
|
+
}),
|
|
2590
|
+
z14.object({
|
|
2591
|
+
type: z14.literal("find"),
|
|
2592
|
+
url: z14.string(),
|
|
2593
|
+
pattern: z14.string()
|
|
2594
|
+
})
|
|
2595
|
+
]).nullish()
|
|
2596
|
+
}),
|
|
2597
|
+
z14.object({
|
|
2598
|
+
type: z14.literal("file_search_call"),
|
|
2599
|
+
id: z14.string(),
|
|
2600
|
+
queries: z14.array(z14.string()),
|
|
2601
|
+
results: z14.array(
|
|
2602
|
+
z14.object({
|
|
2603
|
+
attributes: z14.record(z14.string(), z14.unknown()),
|
|
2604
|
+
file_id: z14.string(),
|
|
2605
|
+
filename: z14.string(),
|
|
2606
|
+
score: z14.number(),
|
|
2607
|
+
text: z14.string()
|
|
2608
|
+
})
|
|
2609
|
+
).nullish()
|
|
2610
|
+
}),
|
|
2611
|
+
z14.object({
|
|
2612
|
+
type: z14.literal("local_shell_call"),
|
|
2613
|
+
id: z14.string(),
|
|
2614
|
+
call_id: z14.string(),
|
|
2615
|
+
action: z14.object({
|
|
2616
|
+
type: z14.literal("exec"),
|
|
2617
|
+
command: z14.array(z14.string()),
|
|
2618
|
+
timeout_ms: z14.number().optional(),
|
|
2619
|
+
user: z14.string().optional(),
|
|
2620
|
+
working_directory: z14.string().optional(),
|
|
2621
|
+
env: z14.record(z14.string(), z14.string()).optional()
|
|
2622
|
+
})
|
|
2623
|
+
}),
|
|
2624
|
+
z14.object({
|
|
2625
|
+
type: z14.literal("computer_call"),
|
|
2626
|
+
id: z14.string(),
|
|
2627
|
+
status: z14.literal("completed")
|
|
2628
|
+
})
|
|
2629
|
+
])
|
|
2630
|
+
}),
|
|
2631
|
+
z14.object({
|
|
2632
|
+
type: z14.literal("response.function_call_arguments.delta"),
|
|
2633
|
+
item_id: z14.string(),
|
|
2634
|
+
output_index: z14.number(),
|
|
2635
|
+
delta: z14.string()
|
|
2636
|
+
}),
|
|
2637
|
+
z14.object({
|
|
2638
|
+
type: z14.literal("response.image_generation_call.partial_image"),
|
|
2639
|
+
item_id: z14.string(),
|
|
2640
|
+
output_index: z14.number(),
|
|
2641
|
+
partial_image_b64: z14.string()
|
|
2642
|
+
}),
|
|
2643
|
+
z14.object({
|
|
2644
|
+
type: z14.literal("response.code_interpreter_call_code.delta"),
|
|
2645
|
+
item_id: z14.string(),
|
|
2646
|
+
output_index: z14.number(),
|
|
2647
|
+
delta: z14.string()
|
|
2648
|
+
}),
|
|
2649
|
+
z14.object({
|
|
2650
|
+
type: z14.literal("response.code_interpreter_call_code.done"),
|
|
2651
|
+
item_id: z14.string(),
|
|
2652
|
+
output_index: z14.number(),
|
|
2653
|
+
code: z14.string()
|
|
2654
|
+
}),
|
|
2655
|
+
z14.object({
|
|
2656
|
+
type: z14.literal("response.output_text.annotation.added"),
|
|
2657
|
+
annotation: z14.discriminatedUnion("type", [
|
|
2658
|
+
z14.object({
|
|
2659
|
+
type: z14.literal("url_citation"),
|
|
2660
|
+
url: z14.string(),
|
|
2661
|
+
title: z14.string()
|
|
2662
|
+
}),
|
|
2663
|
+
z14.object({
|
|
2664
|
+
type: z14.literal("file_citation"),
|
|
2665
|
+
file_id: z14.string(),
|
|
2666
|
+
filename: z14.string().nullish(),
|
|
2667
|
+
index: z14.number().nullish(),
|
|
2668
|
+
start_index: z14.number().nullish(),
|
|
2669
|
+
end_index: z14.number().nullish(),
|
|
2670
|
+
quote: z14.string().nullish()
|
|
2671
|
+
})
|
|
2672
|
+
])
|
|
2673
|
+
}),
|
|
2674
|
+
z14.object({
|
|
2675
|
+
type: z14.literal("response.reasoning_summary_part.added"),
|
|
2676
|
+
item_id: z14.string(),
|
|
2677
|
+
summary_index: z14.number()
|
|
2678
|
+
}),
|
|
2679
|
+
z14.object({
|
|
2680
|
+
type: z14.literal("response.reasoning_summary_text.delta"),
|
|
2681
|
+
item_id: z14.string(),
|
|
2682
|
+
summary_index: z14.number(),
|
|
2683
|
+
delta: z14.string()
|
|
2684
|
+
}),
|
|
2685
|
+
z14.object({
|
|
2686
|
+
type: z14.literal("error"),
|
|
2687
|
+
code: z14.string(),
|
|
2688
|
+
message: z14.string(),
|
|
2689
|
+
param: z14.string().nullish(),
|
|
2690
|
+
sequence_number: z14.number()
|
|
2691
|
+
}),
|
|
2692
|
+
z14.object({ type: z14.string() }).loose().transform((value) => ({
|
|
2693
|
+
type: "unknown_chunk",
|
|
2694
|
+
message: value.type
|
|
2695
|
+
}))
|
|
2696
|
+
// fallback for unknown chunks
|
|
2697
|
+
])
|
|
2698
|
+
)
|
|
2699
|
+
);
|
|
2700
|
+
var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
|
|
2701
|
+
() => (0, import_provider_utils21.zodSchema)(
|
|
2702
|
+
z14.object({
|
|
2703
|
+
id: z14.string(),
|
|
2704
|
+
created_at: z14.number(),
|
|
2705
|
+
error: z14.object({
|
|
2706
|
+
code: z14.string(),
|
|
2707
|
+
message: z14.string()
|
|
2708
|
+
}).nullish(),
|
|
2709
|
+
model: z14.string(),
|
|
2710
|
+
output: z14.array(
|
|
2711
|
+
z14.discriminatedUnion("type", [
|
|
2712
|
+
z14.object({
|
|
2713
|
+
type: z14.literal("message"),
|
|
2714
|
+
role: z14.literal("assistant"),
|
|
2715
|
+
id: z14.string(),
|
|
2716
|
+
content: z14.array(
|
|
2717
|
+
z14.object({
|
|
2718
|
+
type: z14.literal("output_text"),
|
|
2719
|
+
text: z14.string(),
|
|
2720
|
+
logprobs: z14.array(
|
|
2721
|
+
z14.object({
|
|
2722
|
+
token: z14.string(),
|
|
2723
|
+
logprob: z14.number(),
|
|
2724
|
+
top_logprobs: z14.array(
|
|
2725
|
+
z14.object({
|
|
2726
|
+
token: z14.string(),
|
|
2727
|
+
logprob: z14.number()
|
|
2728
|
+
})
|
|
2729
|
+
)
|
|
2730
|
+
})
|
|
2731
|
+
).nullish(),
|
|
2732
|
+
annotations: z14.array(
|
|
2733
|
+
z14.discriminatedUnion("type", [
|
|
2734
|
+
z14.object({
|
|
2735
|
+
type: z14.literal("url_citation"),
|
|
2736
|
+
start_index: z14.number(),
|
|
2737
|
+
end_index: z14.number(),
|
|
2738
|
+
url: z14.string(),
|
|
2739
|
+
title: z14.string()
|
|
2740
|
+
}),
|
|
2741
|
+
z14.object({
|
|
2742
|
+
type: z14.literal("file_citation"),
|
|
2743
|
+
file_id: z14.string(),
|
|
2744
|
+
filename: z14.string().nullish(),
|
|
2745
|
+
index: z14.number().nullish(),
|
|
2746
|
+
start_index: z14.number().nullish(),
|
|
2747
|
+
end_index: z14.number().nullish(),
|
|
2748
|
+
quote: z14.string().nullish()
|
|
2749
|
+
}),
|
|
2750
|
+
z14.object({
|
|
2751
|
+
type: z14.literal("container_file_citation")
|
|
2752
|
+
})
|
|
2753
|
+
])
|
|
2754
|
+
)
|
|
2755
|
+
})
|
|
2756
|
+
)
|
|
2757
|
+
}),
|
|
2758
|
+
z14.object({
|
|
2759
|
+
type: z14.literal("web_search_call"),
|
|
2760
|
+
id: z14.string(),
|
|
2761
|
+
status: z14.string(),
|
|
2762
|
+
action: z14.discriminatedUnion("type", [
|
|
2763
|
+
z14.object({
|
|
2764
|
+
type: z14.literal("search"),
|
|
2765
|
+
query: z14.string().nullish()
|
|
2766
|
+
}),
|
|
2767
|
+
z14.object({
|
|
2768
|
+
type: z14.literal("open_page"),
|
|
2769
|
+
url: z14.string()
|
|
2770
|
+
}),
|
|
2771
|
+
z14.object({
|
|
2772
|
+
type: z14.literal("find"),
|
|
2773
|
+
url: z14.string(),
|
|
2774
|
+
pattern: z14.string()
|
|
2775
|
+
})
|
|
2776
|
+
]).nullish()
|
|
2777
|
+
}),
|
|
2778
|
+
z14.object({
|
|
2779
|
+
type: z14.literal("file_search_call"),
|
|
2780
|
+
id: z14.string(),
|
|
2781
|
+
queries: z14.array(z14.string()),
|
|
2782
|
+
results: z14.array(
|
|
2783
|
+
z14.object({
|
|
2784
|
+
attributes: z14.record(z14.string(), z14.unknown()),
|
|
2785
|
+
file_id: z14.string(),
|
|
2786
|
+
filename: z14.string(),
|
|
2787
|
+
score: z14.number(),
|
|
2788
|
+
text: z14.string()
|
|
2789
|
+
})
|
|
2790
|
+
).nullish()
|
|
2791
|
+
}),
|
|
2792
|
+
z14.object({
|
|
2793
|
+
type: z14.literal("code_interpreter_call"),
|
|
2794
|
+
id: z14.string(),
|
|
2795
|
+
code: z14.string().nullable(),
|
|
2796
|
+
container_id: z14.string(),
|
|
2797
|
+
outputs: z14.array(
|
|
2798
|
+
z14.discriminatedUnion("type", [
|
|
2799
|
+
z14.object({ type: z14.literal("logs"), logs: z14.string() }),
|
|
2800
|
+
z14.object({ type: z14.literal("image"), url: z14.string() })
|
|
2801
|
+
])
|
|
2802
|
+
).nullable()
|
|
2803
|
+
}),
|
|
2804
|
+
z14.object({
|
|
2805
|
+
type: z14.literal("image_generation_call"),
|
|
2806
|
+
id: z14.string(),
|
|
2807
|
+
result: z14.string()
|
|
2808
|
+
}),
|
|
2809
|
+
z14.object({
|
|
2810
|
+
type: z14.literal("local_shell_call"),
|
|
2811
|
+
id: z14.string(),
|
|
2812
|
+
call_id: z14.string(),
|
|
2813
|
+
action: z14.object({
|
|
2814
|
+
type: z14.literal("exec"),
|
|
2815
|
+
command: z14.array(z14.string()),
|
|
2816
|
+
timeout_ms: z14.number().optional(),
|
|
2817
|
+
user: z14.string().optional(),
|
|
2818
|
+
working_directory: z14.string().optional(),
|
|
2819
|
+
env: z14.record(z14.string(), z14.string()).optional()
|
|
2820
|
+
})
|
|
2821
|
+
}),
|
|
2822
|
+
z14.object({
|
|
2823
|
+
type: z14.literal("function_call"),
|
|
2824
|
+
call_id: z14.string(),
|
|
2825
|
+
name: z14.string(),
|
|
2826
|
+
arguments: z14.string(),
|
|
2827
|
+
id: z14.string()
|
|
2828
|
+
}),
|
|
2829
|
+
z14.object({
|
|
2830
|
+
type: z14.literal("computer_call"),
|
|
2831
|
+
id: z14.string(),
|
|
2832
|
+
status: z14.string().optional()
|
|
2833
|
+
}),
|
|
2834
|
+
z14.object({
|
|
2835
|
+
type: z14.literal("reasoning"),
|
|
2836
|
+
id: z14.string(),
|
|
2837
|
+
encrypted_content: z14.string().nullish(),
|
|
2838
|
+
summary: z14.array(
|
|
2839
|
+
z14.object({
|
|
2840
|
+
type: z14.literal("summary_text"),
|
|
2841
|
+
text: z14.string()
|
|
2842
|
+
})
|
|
2843
|
+
)
|
|
2844
|
+
})
|
|
2845
|
+
])
|
|
2846
|
+
),
|
|
2847
|
+
service_tier: z14.string().nullish(),
|
|
2848
|
+
incomplete_details: z14.object({ reason: z14.string() }).nullish(),
|
|
2849
|
+
usage: z14.object({
|
|
2850
|
+
input_tokens: z14.number(),
|
|
2851
|
+
input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
|
|
2852
|
+
output_tokens: z14.number(),
|
|
2853
|
+
output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
|
|
2854
|
+
})
|
|
2855
|
+
})
|
|
2856
|
+
)
|
|
2857
|
+
);
|
|
2858
|
+
|
|
2859
|
+
// src/responses/openai-responses-options.ts
|
|
2860
|
+
var import_provider_utils22 = require("@ai-sdk/provider-utils");
|
|
2861
|
+
var z15 = __toESM(require("zod/v4"));
|
|
2862
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2863
|
+
var openaiResponsesReasoningModelIds = [
|
|
2864
|
+
"o1",
|
|
2865
|
+
"o1-2024-12-17",
|
|
2866
|
+
"o3-mini",
|
|
2867
|
+
"o3-mini-2025-01-31",
|
|
2868
|
+
"o3",
|
|
2869
|
+
"o3-2025-04-16",
|
|
2870
|
+
"o4-mini",
|
|
2871
|
+
"o4-mini-2025-04-16",
|
|
2872
|
+
"codex-mini-latest",
|
|
2873
|
+
"computer-use-preview",
|
|
2874
|
+
"gpt-5",
|
|
2875
|
+
"gpt-5-2025-08-07",
|
|
2876
|
+
"gpt-5-codex",
|
|
2877
|
+
"gpt-5-mini",
|
|
2878
|
+
"gpt-5-mini-2025-08-07",
|
|
2879
|
+
"gpt-5-nano",
|
|
2880
|
+
"gpt-5-nano-2025-08-07",
|
|
2881
|
+
"gpt-5-pro",
|
|
2882
|
+
"gpt-5-pro-2025-10-06"
|
|
2883
|
+
];
|
|
2884
|
+
var openaiResponsesModelIds = [
|
|
2885
|
+
"gpt-4.1",
|
|
2886
|
+
"gpt-4.1-2025-04-14",
|
|
2887
|
+
"gpt-4.1-mini",
|
|
2888
|
+
"gpt-4.1-mini-2025-04-14",
|
|
2889
|
+
"gpt-4.1-nano",
|
|
2890
|
+
"gpt-4.1-nano-2025-04-14",
|
|
2891
|
+
"gpt-4o",
|
|
2892
|
+
"gpt-4o-2024-05-13",
|
|
2893
|
+
"gpt-4o-2024-08-06",
|
|
2894
|
+
"gpt-4o-2024-11-20",
|
|
2895
|
+
"gpt-4o-audio-preview",
|
|
2896
|
+
"gpt-4o-audio-preview-2024-10-01",
|
|
2897
|
+
"gpt-4o-audio-preview-2024-12-17",
|
|
2898
|
+
"gpt-4o-search-preview",
|
|
2899
|
+
"gpt-4o-search-preview-2025-03-11",
|
|
2900
|
+
"gpt-4o-mini-search-preview",
|
|
2901
|
+
"gpt-4o-mini-search-preview-2025-03-11",
|
|
2902
|
+
"gpt-4o-mini",
|
|
2903
|
+
"gpt-4o-mini-2024-07-18",
|
|
2904
|
+
"gpt-4-turbo",
|
|
2905
|
+
"gpt-4-turbo-2024-04-09",
|
|
2906
|
+
"gpt-4-turbo-preview",
|
|
2907
|
+
"gpt-4-0125-preview",
|
|
2908
|
+
"gpt-4-1106-preview",
|
|
2909
|
+
"gpt-4",
|
|
2910
|
+
"gpt-4-0613",
|
|
2911
|
+
"gpt-4.5-preview",
|
|
2912
|
+
"gpt-4.5-preview-2025-02-27",
|
|
2913
|
+
"gpt-3.5-turbo-0125",
|
|
2914
|
+
"gpt-3.5-turbo",
|
|
2915
|
+
"gpt-3.5-turbo-1106",
|
|
2916
|
+
"chatgpt-4o-latest",
|
|
2917
|
+
"gpt-5-chat-latest",
|
|
2918
|
+
...openaiResponsesReasoningModelIds
|
|
2919
|
+
];
|
|
2920
|
+
var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValidator)(
|
|
2921
|
+
() => (0, import_provider_utils22.zodSchema)(
|
|
2922
|
+
z15.object({
|
|
2923
|
+
include: z15.array(
|
|
2924
|
+
z15.enum([
|
|
2925
|
+
"reasoning.encrypted_content",
|
|
2926
|
+
"file_search_call.results",
|
|
2927
|
+
"message.output_text.logprobs"
|
|
2928
|
+
])
|
|
2929
|
+
).nullish(),
|
|
2930
|
+
instructions: z15.string().nullish(),
|
|
2931
|
+
/**
|
|
2932
|
+
* Return the log probabilities of the tokens.
|
|
2933
|
+
*
|
|
2934
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
2935
|
+
* were generated.
|
|
2936
|
+
*
|
|
2937
|
+
* Setting to a number will return the log probabilities of the top n
|
|
2938
|
+
* tokens that were generated.
|
|
2939
|
+
*
|
|
2940
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
2941
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
2942
|
+
*/
|
|
2943
|
+
logprobs: z15.union([z15.boolean(), z15.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
2944
|
+
/**
|
|
2945
|
+
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
2946
|
+
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
2947
|
+
* Any further attempts to call a tool by the model will be ignored.
|
|
2948
|
+
*/
|
|
2949
|
+
maxToolCalls: z15.number().nullish(),
|
|
2950
|
+
metadata: z15.any().nullish(),
|
|
2951
|
+
parallelToolCalls: z15.boolean().nullish(),
|
|
2952
|
+
previousResponseId: z15.string().nullish(),
|
|
2953
|
+
promptCacheKey: z15.string().nullish(),
|
|
2954
|
+
reasoningEffort: z15.string().nullish(),
|
|
2955
|
+
reasoningSummary: z15.string().nullish(),
|
|
2956
|
+
safetyIdentifier: z15.string().nullish(),
|
|
2957
|
+
serviceTier: z15.enum(["auto", "flex", "priority"]).nullish(),
|
|
2958
|
+
store: z15.boolean().nullish(),
|
|
2959
|
+
strictJsonSchema: z15.boolean().nullish(),
|
|
2960
|
+
textVerbosity: z15.enum(["low", "medium", "high"]).nullish(),
|
|
2961
|
+
user: z15.string().nullish()
|
|
2962
|
+
})
|
|
2963
|
+
)
|
|
2964
|
+
);
|
|
2965
|
+
|
|
2327
2966
|
// src/responses/openai-responses-prepare-tools.ts
|
|
2328
2967
|
var import_provider7 = require("@ai-sdk/provider");
|
|
2329
2968
|
|
|
2330
2969
|
// src/tool/code-interpreter.ts
|
|
2331
|
-
var
|
|
2332
|
-
var
|
|
2333
|
-
var codeInterpreterInputSchema =
|
|
2334
|
-
|
|
2335
|
-
|
|
2336
|
-
|
|
2337
|
-
|
|
2338
|
-
outputs: import_v414.z.array(
|
|
2339
|
-
import_v414.z.discriminatedUnion("type", [
|
|
2340
|
-
import_v414.z.object({ type: import_v414.z.literal("logs"), logs: import_v414.z.string() }),
|
|
2341
|
-
import_v414.z.object({ type: import_v414.z.literal("image"), url: import_v414.z.string() })
|
|
2342
|
-
])
|
|
2343
|
-
).nullish()
|
|
2344
|
-
});
|
|
2345
|
-
var codeInterpreterArgsSchema = import_v414.z.object({
|
|
2346
|
-
container: import_v414.z.union([
|
|
2347
|
-
import_v414.z.string(),
|
|
2348
|
-
import_v414.z.object({
|
|
2349
|
-
fileIds: import_v414.z.array(import_v414.z.string()).optional()
|
|
2970
|
+
var import_provider_utils23 = require("@ai-sdk/provider-utils");
|
|
2971
|
+
var z16 = __toESM(require("zod/v4"));
|
|
2972
|
+
var codeInterpreterInputSchema = (0, import_provider_utils23.lazySchema)(
|
|
2973
|
+
() => (0, import_provider_utils23.zodSchema)(
|
|
2974
|
+
z16.object({
|
|
2975
|
+
code: z16.string().nullish(),
|
|
2976
|
+
containerId: z16.string()
|
|
2350
2977
|
})
|
|
2351
|
-
|
|
2352
|
-
|
|
2353
|
-
var
|
|
2978
|
+
)
|
|
2979
|
+
);
|
|
2980
|
+
var codeInterpreterOutputSchema = (0, import_provider_utils23.lazySchema)(
|
|
2981
|
+
() => (0, import_provider_utils23.zodSchema)(
|
|
2982
|
+
z16.object({
|
|
2983
|
+
outputs: z16.array(
|
|
2984
|
+
z16.discriminatedUnion("type", [
|
|
2985
|
+
z16.object({ type: z16.literal("logs"), logs: z16.string() }),
|
|
2986
|
+
z16.object({ type: z16.literal("image"), url: z16.string() })
|
|
2987
|
+
])
|
|
2988
|
+
).nullish()
|
|
2989
|
+
})
|
|
2990
|
+
)
|
|
2991
|
+
);
|
|
2992
|
+
var codeInterpreterArgsSchema = (0, import_provider_utils23.lazySchema)(
|
|
2993
|
+
() => (0, import_provider_utils23.zodSchema)(
|
|
2994
|
+
z16.object({
|
|
2995
|
+
container: z16.union([
|
|
2996
|
+
z16.string(),
|
|
2997
|
+
z16.object({
|
|
2998
|
+
fileIds: z16.array(z16.string()).optional()
|
|
2999
|
+
})
|
|
3000
|
+
]).optional()
|
|
3001
|
+
})
|
|
3002
|
+
)
|
|
3003
|
+
);
|
|
3004
|
+
var codeInterpreterToolFactory = (0, import_provider_utils23.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2354
3005
|
id: "openai.code_interpreter",
|
|
2355
3006
|
name: "code_interpreter",
|
|
2356
3007
|
inputSchema: codeInterpreterInputSchema,
|
|
@@ -2361,168 +3012,200 @@ var codeInterpreter = (args = {}) => {
|
|
|
2361
3012
|
};
|
|
2362
3013
|
|
|
2363
3014
|
// src/tool/file-search.ts
|
|
2364
|
-
var
|
|
2365
|
-
var
|
|
2366
|
-
var comparisonFilterSchema =
|
|
2367
|
-
key:
|
|
2368
|
-
type:
|
|
2369
|
-
value:
|
|
3015
|
+
var import_provider_utils24 = require("@ai-sdk/provider-utils");
|
|
3016
|
+
var z17 = __toESM(require("zod/v4"));
|
|
3017
|
+
var comparisonFilterSchema = z17.object({
|
|
3018
|
+
key: z17.string(),
|
|
3019
|
+
type: z17.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
|
|
3020
|
+
value: z17.union([z17.string(), z17.number(), z17.boolean()])
|
|
2370
3021
|
});
|
|
2371
|
-
var compoundFilterSchema =
|
|
2372
|
-
type:
|
|
2373
|
-
filters:
|
|
2374
|
-
|
|
3022
|
+
var compoundFilterSchema = z17.object({
|
|
3023
|
+
type: z17.enum(["and", "or"]),
|
|
3024
|
+
filters: z17.array(
|
|
3025
|
+
z17.union([comparisonFilterSchema, z17.lazy(() => compoundFilterSchema)])
|
|
2375
3026
|
)
|
|
2376
3027
|
});
|
|
2377
|
-
var fileSearchArgsSchema =
|
|
2378
|
-
|
|
2379
|
-
|
|
2380
|
-
|
|
2381
|
-
|
|
2382
|
-
|
|
2383
|
-
|
|
2384
|
-
|
|
2385
|
-
})
|
|
2386
|
-
|
|
2387
|
-
queries: import_v415.z.array(import_v415.z.string()),
|
|
2388
|
-
results: import_v415.z.array(
|
|
2389
|
-
import_v415.z.object({
|
|
2390
|
-
attributes: import_v415.z.record(import_v415.z.string(), import_v415.z.unknown()),
|
|
2391
|
-
fileId: import_v415.z.string(),
|
|
2392
|
-
filename: import_v415.z.string(),
|
|
2393
|
-
score: import_v415.z.number(),
|
|
2394
|
-
text: import_v415.z.string()
|
|
3028
|
+
var fileSearchArgsSchema = (0, import_provider_utils24.lazySchema)(
|
|
3029
|
+
() => (0, import_provider_utils24.zodSchema)(
|
|
3030
|
+
z17.object({
|
|
3031
|
+
vectorStoreIds: z17.array(z17.string()),
|
|
3032
|
+
maxNumResults: z17.number().optional(),
|
|
3033
|
+
ranking: z17.object({
|
|
3034
|
+
ranker: z17.string().optional(),
|
|
3035
|
+
scoreThreshold: z17.number().optional()
|
|
3036
|
+
}).optional(),
|
|
3037
|
+
filters: z17.union([comparisonFilterSchema, compoundFilterSchema]).optional()
|
|
2395
3038
|
})
|
|
2396
|
-
)
|
|
2397
|
-
|
|
2398
|
-
var
|
|
3039
|
+
)
|
|
3040
|
+
);
|
|
3041
|
+
var fileSearchOutputSchema = (0, import_provider_utils24.lazySchema)(
|
|
3042
|
+
() => (0, import_provider_utils24.zodSchema)(
|
|
3043
|
+
z17.object({
|
|
3044
|
+
queries: z17.array(z17.string()),
|
|
3045
|
+
results: z17.array(
|
|
3046
|
+
z17.object({
|
|
3047
|
+
attributes: z17.record(z17.string(), z17.unknown()),
|
|
3048
|
+
fileId: z17.string(),
|
|
3049
|
+
filename: z17.string(),
|
|
3050
|
+
score: z17.number(),
|
|
3051
|
+
text: z17.string()
|
|
3052
|
+
})
|
|
3053
|
+
).nullable()
|
|
3054
|
+
})
|
|
3055
|
+
)
|
|
3056
|
+
);
|
|
3057
|
+
var fileSearch = (0, import_provider_utils24.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2399
3058
|
id: "openai.file_search",
|
|
2400
3059
|
name: "file_search",
|
|
2401
|
-
inputSchema:
|
|
3060
|
+
inputSchema: z17.object({}),
|
|
2402
3061
|
outputSchema: fileSearchOutputSchema
|
|
2403
3062
|
});
|
|
2404
3063
|
|
|
2405
3064
|
// src/tool/web-search.ts
|
|
2406
|
-
var
|
|
2407
|
-
var
|
|
2408
|
-
var webSearchArgsSchema =
|
|
2409
|
-
|
|
2410
|
-
|
|
2411
|
-
|
|
2412
|
-
|
|
2413
|
-
|
|
2414
|
-
|
|
2415
|
-
|
|
2416
|
-
|
|
2417
|
-
|
|
2418
|
-
|
|
2419
|
-
|
|
2420
|
-
|
|
2421
|
-
|
|
3065
|
+
var import_provider_utils25 = require("@ai-sdk/provider-utils");
|
|
3066
|
+
var z18 = __toESM(require("zod/v4"));
|
|
3067
|
+
var webSearchArgsSchema = (0, import_provider_utils25.lazySchema)(
|
|
3068
|
+
() => (0, import_provider_utils25.zodSchema)(
|
|
3069
|
+
z18.object({
|
|
3070
|
+
filters: z18.object({
|
|
3071
|
+
allowedDomains: z18.array(z18.string()).optional()
|
|
3072
|
+
}).optional(),
|
|
3073
|
+
searchContextSize: z18.enum(["low", "medium", "high"]).optional(),
|
|
3074
|
+
userLocation: z18.object({
|
|
3075
|
+
type: z18.literal("approximate"),
|
|
3076
|
+
country: z18.string().optional(),
|
|
3077
|
+
city: z18.string().optional(),
|
|
3078
|
+
region: z18.string().optional(),
|
|
3079
|
+
timezone: z18.string().optional()
|
|
3080
|
+
}).optional()
|
|
3081
|
+
})
|
|
3082
|
+
)
|
|
3083
|
+
);
|
|
3084
|
+
var webSearchInputSchema = (0, import_provider_utils25.lazySchema)(
|
|
3085
|
+
() => (0, import_provider_utils25.zodSchema)(
|
|
3086
|
+
z18.object({
|
|
3087
|
+
action: z18.discriminatedUnion("type", [
|
|
3088
|
+
z18.object({
|
|
3089
|
+
type: z18.literal("search"),
|
|
3090
|
+
query: z18.string().nullish()
|
|
3091
|
+
}),
|
|
3092
|
+
z18.object({
|
|
3093
|
+
type: z18.literal("open_page"),
|
|
3094
|
+
url: z18.string()
|
|
3095
|
+
}),
|
|
3096
|
+
z18.object({
|
|
3097
|
+
type: z18.literal("find"),
|
|
3098
|
+
url: z18.string(),
|
|
3099
|
+
pattern: z18.string()
|
|
3100
|
+
})
|
|
3101
|
+
]).nullish()
|
|
3102
|
+
})
|
|
3103
|
+
)
|
|
3104
|
+
);
|
|
3105
|
+
var webSearchToolFactory = (0, import_provider_utils25.createProviderDefinedToolFactory)({
|
|
2422
3106
|
id: "openai.web_search",
|
|
2423
3107
|
name: "web_search",
|
|
2424
|
-
inputSchema:
|
|
2425
|
-
action: import_v416.z.discriminatedUnion("type", [
|
|
2426
|
-
import_v416.z.object({
|
|
2427
|
-
type: import_v416.z.literal("search"),
|
|
2428
|
-
query: import_v416.z.string().nullish()
|
|
2429
|
-
}),
|
|
2430
|
-
import_v416.z.object({
|
|
2431
|
-
type: import_v416.z.literal("open_page"),
|
|
2432
|
-
url: import_v416.z.string()
|
|
2433
|
-
}),
|
|
2434
|
-
import_v416.z.object({
|
|
2435
|
-
type: import_v416.z.literal("find"),
|
|
2436
|
-
url: import_v416.z.string(),
|
|
2437
|
-
pattern: import_v416.z.string()
|
|
2438
|
-
})
|
|
2439
|
-
]).nullish()
|
|
2440
|
-
})
|
|
3108
|
+
inputSchema: webSearchInputSchema
|
|
2441
3109
|
});
|
|
2442
3110
|
|
|
2443
3111
|
// src/tool/web-search-preview.ts
|
|
2444
|
-
var
|
|
2445
|
-
var
|
|
2446
|
-
var webSearchPreviewArgsSchema =
|
|
2447
|
-
|
|
2448
|
-
|
|
2449
|
-
|
|
2450
|
-
|
|
2451
|
-
|
|
2452
|
-
|
|
2453
|
-
|
|
2454
|
-
|
|
2455
|
-
|
|
2456
|
-
|
|
2457
|
-
|
|
2458
|
-
|
|
2459
|
-
|
|
2460
|
-
|
|
2461
|
-
|
|
2462
|
-
|
|
2463
|
-
|
|
2464
|
-
|
|
2465
|
-
|
|
2466
|
-
|
|
2467
|
-
|
|
2468
|
-
|
|
2469
|
-
|
|
2470
|
-
|
|
2471
|
-
|
|
2472
|
-
|
|
2473
|
-
|
|
2474
|
-
|
|
2475
|
-
|
|
2476
|
-
|
|
2477
|
-
|
|
2478
|
-
|
|
2479
|
-
|
|
2480
|
-
|
|
3112
|
+
var import_provider_utils26 = require("@ai-sdk/provider-utils");
|
|
3113
|
+
var z19 = __toESM(require("zod/v4"));
|
|
3114
|
+
var webSearchPreviewArgsSchema = (0, import_provider_utils26.lazySchema)(
|
|
3115
|
+
() => (0, import_provider_utils26.zodSchema)(
|
|
3116
|
+
z19.object({
|
|
3117
|
+
/**
|
|
3118
|
+
* Search context size to use for the web search.
|
|
3119
|
+
* - high: Most comprehensive context, highest cost, slower response
|
|
3120
|
+
* - medium: Balanced context, cost, and latency (default)
|
|
3121
|
+
* - low: Least context, lowest cost, fastest response
|
|
3122
|
+
*/
|
|
3123
|
+
searchContextSize: z19.enum(["low", "medium", "high"]).optional(),
|
|
3124
|
+
/**
|
|
3125
|
+
* User location information to provide geographically relevant search results.
|
|
3126
|
+
*/
|
|
3127
|
+
userLocation: z19.object({
|
|
3128
|
+
/**
|
|
3129
|
+
* Type of location (always 'approximate')
|
|
3130
|
+
*/
|
|
3131
|
+
type: z19.literal("approximate"),
|
|
3132
|
+
/**
|
|
3133
|
+
* Two-letter ISO country code (e.g., 'US', 'GB')
|
|
3134
|
+
*/
|
|
3135
|
+
country: z19.string().optional(),
|
|
3136
|
+
/**
|
|
3137
|
+
* City name (free text, e.g., 'Minneapolis')
|
|
3138
|
+
*/
|
|
3139
|
+
city: z19.string().optional(),
|
|
3140
|
+
/**
|
|
3141
|
+
* Region name (free text, e.g., 'Minnesota')
|
|
3142
|
+
*/
|
|
3143
|
+
region: z19.string().optional(),
|
|
3144
|
+
/**
|
|
3145
|
+
* IANA timezone (e.g., 'America/Chicago')
|
|
3146
|
+
*/
|
|
3147
|
+
timezone: z19.string().optional()
|
|
3148
|
+
}).optional()
|
|
3149
|
+
})
|
|
3150
|
+
)
|
|
3151
|
+
);
|
|
3152
|
+
var webSearchPreviewInputSchema = (0, import_provider_utils26.lazySchema)(
|
|
3153
|
+
() => (0, import_provider_utils26.zodSchema)(
|
|
3154
|
+
z19.object({
|
|
3155
|
+
action: z19.discriminatedUnion("type", [
|
|
3156
|
+
z19.object({
|
|
3157
|
+
type: z19.literal("search"),
|
|
3158
|
+
query: z19.string().nullish()
|
|
3159
|
+
}),
|
|
3160
|
+
z19.object({
|
|
3161
|
+
type: z19.literal("open_page"),
|
|
3162
|
+
url: z19.string()
|
|
3163
|
+
}),
|
|
3164
|
+
z19.object({
|
|
3165
|
+
type: z19.literal("find"),
|
|
3166
|
+
url: z19.string(),
|
|
3167
|
+
pattern: z19.string()
|
|
3168
|
+
})
|
|
3169
|
+
]).nullish()
|
|
3170
|
+
})
|
|
3171
|
+
)
|
|
3172
|
+
);
|
|
3173
|
+
var webSearchPreview = (0, import_provider_utils26.createProviderDefinedToolFactory)({
|
|
2481
3174
|
id: "openai.web_search_preview",
|
|
2482
3175
|
name: "web_search_preview",
|
|
2483
|
-
inputSchema:
|
|
2484
|
-
action: import_v417.z.discriminatedUnion("type", [
|
|
2485
|
-
import_v417.z.object({
|
|
2486
|
-
type: import_v417.z.literal("search"),
|
|
2487
|
-
query: import_v417.z.string().nullish()
|
|
2488
|
-
}),
|
|
2489
|
-
import_v417.z.object({
|
|
2490
|
-
type: import_v417.z.literal("open_page"),
|
|
2491
|
-
url: import_v417.z.string()
|
|
2492
|
-
}),
|
|
2493
|
-
import_v417.z.object({
|
|
2494
|
-
type: import_v417.z.literal("find"),
|
|
2495
|
-
url: import_v417.z.string(),
|
|
2496
|
-
pattern: import_v417.z.string()
|
|
2497
|
-
})
|
|
2498
|
-
]).nullish()
|
|
2499
|
-
})
|
|
3176
|
+
inputSchema: webSearchPreviewInputSchema
|
|
2500
3177
|
});
|
|
2501
3178
|
|
|
2502
3179
|
// src/tool/image-generation.ts
|
|
2503
|
-
var
|
|
2504
|
-
var
|
|
2505
|
-
var imageGenerationArgsSchema =
|
|
2506
|
-
|
|
2507
|
-
|
|
2508
|
-
|
|
2509
|
-
|
|
2510
|
-
|
|
2511
|
-
|
|
2512
|
-
|
|
2513
|
-
|
|
2514
|
-
|
|
2515
|
-
|
|
2516
|
-
|
|
2517
|
-
|
|
2518
|
-
|
|
2519
|
-
|
|
2520
|
-
|
|
2521
|
-
})
|
|
2522
|
-
|
|
3180
|
+
var import_provider_utils27 = require("@ai-sdk/provider-utils");
|
|
3181
|
+
var z20 = __toESM(require("zod/v4"));
|
|
3182
|
+
var imageGenerationArgsSchema = (0, import_provider_utils27.lazySchema)(
|
|
3183
|
+
() => (0, import_provider_utils27.zodSchema)(
|
|
3184
|
+
z20.object({
|
|
3185
|
+
background: z20.enum(["auto", "opaque", "transparent"]).optional(),
|
|
3186
|
+
inputFidelity: z20.enum(["low", "high"]).optional(),
|
|
3187
|
+
inputImageMask: z20.object({
|
|
3188
|
+
fileId: z20.string().optional(),
|
|
3189
|
+
imageUrl: z20.string().optional()
|
|
3190
|
+
}).optional(),
|
|
3191
|
+
model: z20.string().optional(),
|
|
3192
|
+
moderation: z20.enum(["auto"]).optional(),
|
|
3193
|
+
outputCompression: z20.number().int().min(0).max(100).optional(),
|
|
3194
|
+
outputFormat: z20.enum(["png", "jpeg", "webp"]).optional(),
|
|
3195
|
+
partialImages: z20.number().int().min(0).max(3).optional(),
|
|
3196
|
+
quality: z20.enum(["auto", "low", "medium", "high"]).optional(),
|
|
3197
|
+
size: z20.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
|
|
3198
|
+
}).strict()
|
|
3199
|
+
)
|
|
3200
|
+
);
|
|
3201
|
+
var imageGenerationInputSchema = (0, import_provider_utils27.lazySchema)(() => (0, import_provider_utils27.zodSchema)(z20.object({})));
|
|
3202
|
+
var imageGenerationOutputSchema = (0, import_provider_utils27.lazySchema)(
|
|
3203
|
+
() => (0, import_provider_utils27.zodSchema)(z20.object({ result: z20.string() }))
|
|
3204
|
+
);
|
|
3205
|
+
var imageGenerationToolFactory = (0, import_provider_utils27.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
2523
3206
|
id: "openai.image_generation",
|
|
2524
3207
|
name: "image_generation",
|
|
2525
|
-
inputSchema:
|
|
3208
|
+
inputSchema: imageGenerationInputSchema,
|
|
2526
3209
|
outputSchema: imageGenerationOutputSchema
|
|
2527
3210
|
});
|
|
2528
3211
|
var imageGeneration = (args = {}) => {
|
|
@@ -2530,7 +3213,8 @@ var imageGeneration = (args = {}) => {
|
|
|
2530
3213
|
};
|
|
2531
3214
|
|
|
2532
3215
|
// src/responses/openai-responses-prepare-tools.ts
|
|
2533
|
-
|
|
3216
|
+
var import_provider_utils28 = require("@ai-sdk/provider-utils");
|
|
3217
|
+
async function prepareResponsesTools({
|
|
2534
3218
|
tools,
|
|
2535
3219
|
toolChoice,
|
|
2536
3220
|
strictJsonSchema
|
|
@@ -2555,7 +3239,10 @@ function prepareResponsesTools({
|
|
|
2555
3239
|
case "provider-defined": {
|
|
2556
3240
|
switch (tool.id) {
|
|
2557
3241
|
case "openai.file_search": {
|
|
2558
|
-
const args =
|
|
3242
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3243
|
+
value: tool.args,
|
|
3244
|
+
schema: fileSearchArgsSchema
|
|
3245
|
+
});
|
|
2559
3246
|
openaiTools.push({
|
|
2560
3247
|
type: "file_search",
|
|
2561
3248
|
vector_store_ids: args.vectorStoreIds,
|
|
@@ -2575,7 +3262,10 @@ function prepareResponsesTools({
|
|
|
2575
3262
|
break;
|
|
2576
3263
|
}
|
|
2577
3264
|
case "openai.web_search_preview": {
|
|
2578
|
-
const args =
|
|
3265
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3266
|
+
value: tool.args,
|
|
3267
|
+
schema: webSearchPreviewArgsSchema
|
|
3268
|
+
});
|
|
2579
3269
|
openaiTools.push({
|
|
2580
3270
|
type: "web_search_preview",
|
|
2581
3271
|
search_context_size: args.searchContextSize,
|
|
@@ -2584,7 +3274,10 @@ function prepareResponsesTools({
|
|
|
2584
3274
|
break;
|
|
2585
3275
|
}
|
|
2586
3276
|
case "openai.web_search": {
|
|
2587
|
-
const args =
|
|
3277
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3278
|
+
value: tool.args,
|
|
3279
|
+
schema: webSearchArgsSchema
|
|
3280
|
+
});
|
|
2588
3281
|
openaiTools.push({
|
|
2589
3282
|
type: "web_search",
|
|
2590
3283
|
filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
|
|
@@ -2594,7 +3287,10 @@ function prepareResponsesTools({
|
|
|
2594
3287
|
break;
|
|
2595
3288
|
}
|
|
2596
3289
|
case "openai.code_interpreter": {
|
|
2597
|
-
const args =
|
|
3290
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3291
|
+
value: tool.args,
|
|
3292
|
+
schema: codeInterpreterArgsSchema
|
|
3293
|
+
});
|
|
2598
3294
|
openaiTools.push({
|
|
2599
3295
|
type: "code_interpreter",
|
|
2600
3296
|
container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
|
|
@@ -2602,7 +3298,10 @@ function prepareResponsesTools({
|
|
|
2602
3298
|
break;
|
|
2603
3299
|
}
|
|
2604
3300
|
case "openai.image_generation": {
|
|
2605
|
-
const args =
|
|
3301
|
+
const args = await (0, import_provider_utils28.validateTypes)({
|
|
3302
|
+
value: tool.args,
|
|
3303
|
+
schema: imageGenerationArgsSchema
|
|
3304
|
+
});
|
|
2606
3305
|
openaiTools.push({
|
|
2607
3306
|
type: "image_generation",
|
|
2608
3307
|
background: args.background,
|
|
@@ -2653,83 +3352,6 @@ function prepareResponsesTools({
|
|
|
2653
3352
|
}
|
|
2654
3353
|
|
|
2655
3354
|
// src/responses/openai-responses-language-model.ts
|
|
2656
|
-
var webSearchCallItem = import_v419.z.object({
|
|
2657
|
-
type: import_v419.z.literal("web_search_call"),
|
|
2658
|
-
id: import_v419.z.string(),
|
|
2659
|
-
status: import_v419.z.string(),
|
|
2660
|
-
action: import_v419.z.discriminatedUnion("type", [
|
|
2661
|
-
import_v419.z.object({
|
|
2662
|
-
type: import_v419.z.literal("search"),
|
|
2663
|
-
query: import_v419.z.string().nullish()
|
|
2664
|
-
}),
|
|
2665
|
-
import_v419.z.object({
|
|
2666
|
-
type: import_v419.z.literal("open_page"),
|
|
2667
|
-
url: import_v419.z.string()
|
|
2668
|
-
}),
|
|
2669
|
-
import_v419.z.object({
|
|
2670
|
-
type: import_v419.z.literal("find"),
|
|
2671
|
-
url: import_v419.z.string(),
|
|
2672
|
-
pattern: import_v419.z.string()
|
|
2673
|
-
})
|
|
2674
|
-
]).nullish()
|
|
2675
|
-
});
|
|
2676
|
-
var fileSearchCallItem = import_v419.z.object({
|
|
2677
|
-
type: import_v419.z.literal("file_search_call"),
|
|
2678
|
-
id: import_v419.z.string(),
|
|
2679
|
-
queries: import_v419.z.array(import_v419.z.string()),
|
|
2680
|
-
results: import_v419.z.array(
|
|
2681
|
-
import_v419.z.object({
|
|
2682
|
-
attributes: import_v419.z.record(import_v419.z.string(), import_v419.z.unknown()),
|
|
2683
|
-
file_id: import_v419.z.string(),
|
|
2684
|
-
filename: import_v419.z.string(),
|
|
2685
|
-
score: import_v419.z.number(),
|
|
2686
|
-
text: import_v419.z.string()
|
|
2687
|
-
})
|
|
2688
|
-
).nullish()
|
|
2689
|
-
});
|
|
2690
|
-
var codeInterpreterCallItem = import_v419.z.object({
|
|
2691
|
-
type: import_v419.z.literal("code_interpreter_call"),
|
|
2692
|
-
id: import_v419.z.string(),
|
|
2693
|
-
code: import_v419.z.string().nullable(),
|
|
2694
|
-
container_id: import_v419.z.string(),
|
|
2695
|
-
outputs: import_v419.z.array(
|
|
2696
|
-
import_v419.z.discriminatedUnion("type", [
|
|
2697
|
-
import_v419.z.object({ type: import_v419.z.literal("logs"), logs: import_v419.z.string() }),
|
|
2698
|
-
import_v419.z.object({ type: import_v419.z.literal("image"), url: import_v419.z.string() })
|
|
2699
|
-
])
|
|
2700
|
-
).nullable()
|
|
2701
|
-
});
|
|
2702
|
-
var localShellCallItem = import_v419.z.object({
|
|
2703
|
-
type: import_v419.z.literal("local_shell_call"),
|
|
2704
|
-
id: import_v419.z.string(),
|
|
2705
|
-
call_id: import_v419.z.string(),
|
|
2706
|
-
action: import_v419.z.object({
|
|
2707
|
-
type: import_v419.z.literal("exec"),
|
|
2708
|
-
command: import_v419.z.array(import_v419.z.string()),
|
|
2709
|
-
timeout_ms: import_v419.z.number().optional(),
|
|
2710
|
-
user: import_v419.z.string().optional(),
|
|
2711
|
-
working_directory: import_v419.z.string().optional(),
|
|
2712
|
-
env: import_v419.z.record(import_v419.z.string(), import_v419.z.string()).optional()
|
|
2713
|
-
})
|
|
2714
|
-
});
|
|
2715
|
-
var imageGenerationCallItem = import_v419.z.object({
|
|
2716
|
-
type: import_v419.z.literal("image_generation_call"),
|
|
2717
|
-
id: import_v419.z.string(),
|
|
2718
|
-
result: import_v419.z.string()
|
|
2719
|
-
});
|
|
2720
|
-
var TOP_LOGPROBS_MAX = 20;
|
|
2721
|
-
var LOGPROBS_SCHEMA = import_v419.z.array(
|
|
2722
|
-
import_v419.z.object({
|
|
2723
|
-
token: import_v419.z.string(),
|
|
2724
|
-
logprob: import_v419.z.number(),
|
|
2725
|
-
top_logprobs: import_v419.z.array(
|
|
2726
|
-
import_v419.z.object({
|
|
2727
|
-
token: import_v419.z.string(),
|
|
2728
|
-
logprob: import_v419.z.number()
|
|
2729
|
-
})
|
|
2730
|
-
)
|
|
2731
|
-
})
|
|
2732
|
-
);
|
|
2733
3355
|
var OpenAIResponsesLanguageModel = class {
|
|
2734
3356
|
constructor(modelId, config) {
|
|
2735
3357
|
this.specificationVersion = "v2";
|
|
@@ -2782,7 +3404,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2782
3404
|
if (stopSequences != null) {
|
|
2783
3405
|
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2784
3406
|
}
|
|
2785
|
-
const openaiOptions = await (0,
|
|
3407
|
+
const openaiOptions = await (0, import_provider_utils29.parseProviderOptions)({
|
|
2786
3408
|
provider: "openai",
|
|
2787
3409
|
providerOptions,
|
|
2788
3410
|
schema: openaiResponsesProviderOptionsSchema
|
|
@@ -2921,7 +3543,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2921
3543
|
tools: openaiTools,
|
|
2922
3544
|
toolChoice: openaiToolChoice,
|
|
2923
3545
|
toolWarnings
|
|
2924
|
-
} = prepareResponsesTools({
|
|
3546
|
+
} = await prepareResponsesTools({
|
|
2925
3547
|
tools,
|
|
2926
3548
|
toolChoice,
|
|
2927
3549
|
strictJsonSchema
|
|
@@ -2951,91 +3573,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2951
3573
|
responseHeaders,
|
|
2952
3574
|
value: response,
|
|
2953
3575
|
rawValue: rawResponse
|
|
2954
|
-
} = await (0,
|
|
3576
|
+
} = await (0, import_provider_utils29.postJsonToApi)({
|
|
2955
3577
|
url,
|
|
2956
|
-
headers: (0,
|
|
3578
|
+
headers: (0, import_provider_utils29.combineHeaders)(this.config.headers(), options.headers),
|
|
2957
3579
|
body,
|
|
2958
3580
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2959
|
-
successfulResponseHandler: (0,
|
|
2960
|
-
|
|
2961
|
-
id: import_v419.z.string(),
|
|
2962
|
-
created_at: import_v419.z.number(),
|
|
2963
|
-
error: import_v419.z.object({
|
|
2964
|
-
code: import_v419.z.string(),
|
|
2965
|
-
message: import_v419.z.string()
|
|
2966
|
-
}).nullish(),
|
|
2967
|
-
model: import_v419.z.string(),
|
|
2968
|
-
output: import_v419.z.array(
|
|
2969
|
-
import_v419.z.discriminatedUnion("type", [
|
|
2970
|
-
import_v419.z.object({
|
|
2971
|
-
type: import_v419.z.literal("message"),
|
|
2972
|
-
role: import_v419.z.literal("assistant"),
|
|
2973
|
-
id: import_v419.z.string(),
|
|
2974
|
-
content: import_v419.z.array(
|
|
2975
|
-
import_v419.z.object({
|
|
2976
|
-
type: import_v419.z.literal("output_text"),
|
|
2977
|
-
text: import_v419.z.string(),
|
|
2978
|
-
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2979
|
-
annotations: import_v419.z.array(
|
|
2980
|
-
import_v419.z.discriminatedUnion("type", [
|
|
2981
|
-
import_v419.z.object({
|
|
2982
|
-
type: import_v419.z.literal("url_citation"),
|
|
2983
|
-
start_index: import_v419.z.number(),
|
|
2984
|
-
end_index: import_v419.z.number(),
|
|
2985
|
-
url: import_v419.z.string(),
|
|
2986
|
-
title: import_v419.z.string()
|
|
2987
|
-
}),
|
|
2988
|
-
import_v419.z.object({
|
|
2989
|
-
type: import_v419.z.literal("file_citation"),
|
|
2990
|
-
file_id: import_v419.z.string(),
|
|
2991
|
-
filename: import_v419.z.string().nullish(),
|
|
2992
|
-
index: import_v419.z.number().nullish(),
|
|
2993
|
-
start_index: import_v419.z.number().nullish(),
|
|
2994
|
-
end_index: import_v419.z.number().nullish(),
|
|
2995
|
-
quote: import_v419.z.string().nullish()
|
|
2996
|
-
}),
|
|
2997
|
-
import_v419.z.object({
|
|
2998
|
-
type: import_v419.z.literal("container_file_citation")
|
|
2999
|
-
})
|
|
3000
|
-
])
|
|
3001
|
-
)
|
|
3002
|
-
})
|
|
3003
|
-
)
|
|
3004
|
-
}),
|
|
3005
|
-
webSearchCallItem,
|
|
3006
|
-
fileSearchCallItem,
|
|
3007
|
-
codeInterpreterCallItem,
|
|
3008
|
-
imageGenerationCallItem,
|
|
3009
|
-
localShellCallItem,
|
|
3010
|
-
import_v419.z.object({
|
|
3011
|
-
type: import_v419.z.literal("function_call"),
|
|
3012
|
-
call_id: import_v419.z.string(),
|
|
3013
|
-
name: import_v419.z.string(),
|
|
3014
|
-
arguments: import_v419.z.string(),
|
|
3015
|
-
id: import_v419.z.string()
|
|
3016
|
-
}),
|
|
3017
|
-
import_v419.z.object({
|
|
3018
|
-
type: import_v419.z.literal("computer_call"),
|
|
3019
|
-
id: import_v419.z.string(),
|
|
3020
|
-
status: import_v419.z.string().optional()
|
|
3021
|
-
}),
|
|
3022
|
-
import_v419.z.object({
|
|
3023
|
-
type: import_v419.z.literal("reasoning"),
|
|
3024
|
-
id: import_v419.z.string(),
|
|
3025
|
-
encrypted_content: import_v419.z.string().nullish(),
|
|
3026
|
-
summary: import_v419.z.array(
|
|
3027
|
-
import_v419.z.object({
|
|
3028
|
-
type: import_v419.z.literal("summary_text"),
|
|
3029
|
-
text: import_v419.z.string()
|
|
3030
|
-
})
|
|
3031
|
-
)
|
|
3032
|
-
})
|
|
3033
|
-
])
|
|
3034
|
-
),
|
|
3035
|
-
service_tier: import_v419.z.string().nullish(),
|
|
3036
|
-
incomplete_details: import_v419.z.object({ reason: import_v419.z.string() }).nullish(),
|
|
3037
|
-
usage: usageSchema2
|
|
3038
|
-
})
|
|
3581
|
+
successfulResponseHandler: (0, import_provider_utils29.createJsonResponseHandler)(
|
|
3582
|
+
openaiResponsesResponseSchema
|
|
3039
3583
|
),
|
|
3040
3584
|
abortSignal: options.abortSignal,
|
|
3041
3585
|
fetch: this.config.fetch
|
|
@@ -3098,7 +3642,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3098
3642
|
type: "tool-call",
|
|
3099
3643
|
toolCallId: part.call_id,
|
|
3100
3644
|
toolName: "local_shell",
|
|
3101
|
-
input: JSON.stringify({
|
|
3645
|
+
input: JSON.stringify({
|
|
3646
|
+
action: part.action
|
|
3647
|
+
}),
|
|
3102
3648
|
providerMetadata: {
|
|
3103
3649
|
openai: {
|
|
3104
3650
|
itemId: part.id
|
|
@@ -3126,7 +3672,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3126
3672
|
content.push({
|
|
3127
3673
|
type: "source",
|
|
3128
3674
|
sourceType: "url",
|
|
3129
|
-
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0,
|
|
3675
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0, import_provider_utils29.generateId)(),
|
|
3130
3676
|
url: annotation.url,
|
|
3131
3677
|
title: annotation.title
|
|
3132
3678
|
});
|
|
@@ -3134,7 +3680,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3134
3680
|
content.push({
|
|
3135
3681
|
type: "source",
|
|
3136
3682
|
sourceType: "document",
|
|
3137
|
-
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0,
|
|
3683
|
+
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils29.generateId)(),
|
|
3138
3684
|
mediaType: "text/plain",
|
|
3139
3685
|
title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
|
|
3140
3686
|
filename: (_l = annotation.filename) != null ? _l : annotation.file_id
|
|
@@ -3286,18 +3832,18 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3286
3832
|
warnings,
|
|
3287
3833
|
webSearchToolName
|
|
3288
3834
|
} = await this.getArgs(options);
|
|
3289
|
-
const { responseHeaders, value: response } = await (0,
|
|
3835
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils29.postJsonToApi)({
|
|
3290
3836
|
url: this.config.url({
|
|
3291
3837
|
path: "/responses",
|
|
3292
3838
|
modelId: this.modelId
|
|
3293
3839
|
}),
|
|
3294
|
-
headers: (0,
|
|
3840
|
+
headers: (0, import_provider_utils29.combineHeaders)(this.config.headers(), options.headers),
|
|
3295
3841
|
body: {
|
|
3296
3842
|
...body,
|
|
3297
3843
|
stream: true
|
|
3298
3844
|
},
|
|
3299
3845
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
3300
|
-
successfulResponseHandler: (0,
|
|
3846
|
+
successfulResponseHandler: (0, import_provider_utils29.createEventSourceResponseHandler)(
|
|
3301
3847
|
openaiResponsesChunkSchema
|
|
3302
3848
|
),
|
|
3303
3849
|
abortSignal: options.abortSignal,
|
|
@@ -3352,7 +3898,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3352
3898
|
controller.enqueue({
|
|
3353
3899
|
type: "tool-input-start",
|
|
3354
3900
|
id: value.item.id,
|
|
3355
|
-
toolName: webSearchToolName != null ? webSearchToolName : "web_search"
|
|
3901
|
+
toolName: webSearchToolName != null ? webSearchToolName : "web_search",
|
|
3902
|
+
providerExecuted: true
|
|
3356
3903
|
});
|
|
3357
3904
|
} else if (value.item.type === "computer_call") {
|
|
3358
3905
|
ongoingToolCalls[value.output_index] = {
|
|
@@ -3362,7 +3909,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3362
3909
|
controller.enqueue({
|
|
3363
3910
|
type: "tool-input-start",
|
|
3364
3911
|
id: value.item.id,
|
|
3365
|
-
toolName: "computer_use"
|
|
3912
|
+
toolName: "computer_use",
|
|
3913
|
+
providerExecuted: true
|
|
3366
3914
|
});
|
|
3367
3915
|
} else if (value.item.type === "code_interpreter_call") {
|
|
3368
3916
|
ongoingToolCalls[value.output_index] = {
|
|
@@ -3375,7 +3923,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3375
3923
|
controller.enqueue({
|
|
3376
3924
|
type: "tool-input-start",
|
|
3377
3925
|
id: value.item.id,
|
|
3378
|
-
toolName: "code_interpreter"
|
|
3926
|
+
toolName: "code_interpreter",
|
|
3927
|
+
providerExecuted: true
|
|
3379
3928
|
});
|
|
3380
3929
|
controller.enqueue({
|
|
3381
3930
|
type: "tool-input-delta",
|
|
@@ -3671,7 +4220,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3671
4220
|
controller.enqueue({
|
|
3672
4221
|
type: "source",
|
|
3673
4222
|
sourceType: "url",
|
|
3674
|
-
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0,
|
|
4223
|
+
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0, import_provider_utils29.generateId)(),
|
|
3675
4224
|
url: value.annotation.url,
|
|
3676
4225
|
title: value.annotation.title
|
|
3677
4226
|
});
|
|
@@ -3679,7 +4228,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3679
4228
|
controller.enqueue({
|
|
3680
4229
|
type: "source",
|
|
3681
4230
|
sourceType: "document",
|
|
3682
|
-
id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0,
|
|
4231
|
+
id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0, import_provider_utils29.generateId)(),
|
|
3683
4232
|
mediaType: "text/plain",
|
|
3684
4233
|
title: (_v = (_u = value.annotation.quote) != null ? _u : value.annotation.filename) != null ? _v : "Document",
|
|
3685
4234
|
filename: (_w = value.annotation.filename) != null ? _w : value.annotation.file_id
|
|
@@ -3715,196 +4264,6 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3715
4264
|
};
|
|
3716
4265
|
}
|
|
3717
4266
|
};
|
|
3718
|
-
var usageSchema2 = import_v419.z.object({
|
|
3719
|
-
input_tokens: import_v419.z.number(),
|
|
3720
|
-
input_tokens_details: import_v419.z.object({ cached_tokens: import_v419.z.number().nullish() }).nullish(),
|
|
3721
|
-
output_tokens: import_v419.z.number(),
|
|
3722
|
-
output_tokens_details: import_v419.z.object({ reasoning_tokens: import_v419.z.number().nullish() }).nullish()
|
|
3723
|
-
});
|
|
3724
|
-
var textDeltaChunkSchema = import_v419.z.object({
|
|
3725
|
-
type: import_v419.z.literal("response.output_text.delta"),
|
|
3726
|
-
item_id: import_v419.z.string(),
|
|
3727
|
-
delta: import_v419.z.string(),
|
|
3728
|
-
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
3729
|
-
});
|
|
3730
|
-
var errorChunkSchema = import_v419.z.object({
|
|
3731
|
-
type: import_v419.z.literal("error"),
|
|
3732
|
-
code: import_v419.z.string(),
|
|
3733
|
-
message: import_v419.z.string(),
|
|
3734
|
-
param: import_v419.z.string().nullish(),
|
|
3735
|
-
sequence_number: import_v419.z.number()
|
|
3736
|
-
});
|
|
3737
|
-
var responseFinishedChunkSchema = import_v419.z.object({
|
|
3738
|
-
type: import_v419.z.enum(["response.completed", "response.incomplete"]),
|
|
3739
|
-
response: import_v419.z.object({
|
|
3740
|
-
incomplete_details: import_v419.z.object({ reason: import_v419.z.string() }).nullish(),
|
|
3741
|
-
usage: usageSchema2,
|
|
3742
|
-
service_tier: import_v419.z.string().nullish()
|
|
3743
|
-
})
|
|
3744
|
-
});
|
|
3745
|
-
var responseCreatedChunkSchema = import_v419.z.object({
|
|
3746
|
-
type: import_v419.z.literal("response.created"),
|
|
3747
|
-
response: import_v419.z.object({
|
|
3748
|
-
id: import_v419.z.string(),
|
|
3749
|
-
created_at: import_v419.z.number(),
|
|
3750
|
-
model: import_v419.z.string(),
|
|
3751
|
-
service_tier: import_v419.z.string().nullish()
|
|
3752
|
-
})
|
|
3753
|
-
});
|
|
3754
|
-
var responseOutputItemAddedSchema = import_v419.z.object({
|
|
3755
|
-
type: import_v419.z.literal("response.output_item.added"),
|
|
3756
|
-
output_index: import_v419.z.number(),
|
|
3757
|
-
item: import_v419.z.discriminatedUnion("type", [
|
|
3758
|
-
import_v419.z.object({
|
|
3759
|
-
type: import_v419.z.literal("message"),
|
|
3760
|
-
id: import_v419.z.string()
|
|
3761
|
-
}),
|
|
3762
|
-
import_v419.z.object({
|
|
3763
|
-
type: import_v419.z.literal("reasoning"),
|
|
3764
|
-
id: import_v419.z.string(),
|
|
3765
|
-
encrypted_content: import_v419.z.string().nullish()
|
|
3766
|
-
}),
|
|
3767
|
-
import_v419.z.object({
|
|
3768
|
-
type: import_v419.z.literal("function_call"),
|
|
3769
|
-
id: import_v419.z.string(),
|
|
3770
|
-
call_id: import_v419.z.string(),
|
|
3771
|
-
name: import_v419.z.string(),
|
|
3772
|
-
arguments: import_v419.z.string()
|
|
3773
|
-
}),
|
|
3774
|
-
import_v419.z.object({
|
|
3775
|
-
type: import_v419.z.literal("web_search_call"),
|
|
3776
|
-
id: import_v419.z.string(),
|
|
3777
|
-
status: import_v419.z.string(),
|
|
3778
|
-
action: import_v419.z.object({
|
|
3779
|
-
type: import_v419.z.literal("search"),
|
|
3780
|
-
query: import_v419.z.string().optional()
|
|
3781
|
-
}).nullish()
|
|
3782
|
-
}),
|
|
3783
|
-
import_v419.z.object({
|
|
3784
|
-
type: import_v419.z.literal("computer_call"),
|
|
3785
|
-
id: import_v419.z.string(),
|
|
3786
|
-
status: import_v419.z.string()
|
|
3787
|
-
}),
|
|
3788
|
-
import_v419.z.object({
|
|
3789
|
-
type: import_v419.z.literal("file_search_call"),
|
|
3790
|
-
id: import_v419.z.string()
|
|
3791
|
-
}),
|
|
3792
|
-
import_v419.z.object({
|
|
3793
|
-
type: import_v419.z.literal("image_generation_call"),
|
|
3794
|
-
id: import_v419.z.string()
|
|
3795
|
-
}),
|
|
3796
|
-
import_v419.z.object({
|
|
3797
|
-
type: import_v419.z.literal("code_interpreter_call"),
|
|
3798
|
-
id: import_v419.z.string(),
|
|
3799
|
-
container_id: import_v419.z.string(),
|
|
3800
|
-
code: import_v419.z.string().nullable(),
|
|
3801
|
-
outputs: import_v419.z.array(
|
|
3802
|
-
import_v419.z.discriminatedUnion("type", [
|
|
3803
|
-
import_v419.z.object({ type: import_v419.z.literal("logs"), logs: import_v419.z.string() }),
|
|
3804
|
-
import_v419.z.object({ type: import_v419.z.literal("image"), url: import_v419.z.string() })
|
|
3805
|
-
])
|
|
3806
|
-
).nullable(),
|
|
3807
|
-
status: import_v419.z.string()
|
|
3808
|
-
})
|
|
3809
|
-
])
|
|
3810
|
-
});
|
|
3811
|
-
var responseOutputItemDoneSchema = import_v419.z.object({
|
|
3812
|
-
type: import_v419.z.literal("response.output_item.done"),
|
|
3813
|
-
output_index: import_v419.z.number(),
|
|
3814
|
-
item: import_v419.z.discriminatedUnion("type", [
|
|
3815
|
-
import_v419.z.object({
|
|
3816
|
-
type: import_v419.z.literal("message"),
|
|
3817
|
-
id: import_v419.z.string()
|
|
3818
|
-
}),
|
|
3819
|
-
import_v419.z.object({
|
|
3820
|
-
type: import_v419.z.literal("reasoning"),
|
|
3821
|
-
id: import_v419.z.string(),
|
|
3822
|
-
encrypted_content: import_v419.z.string().nullish()
|
|
3823
|
-
}),
|
|
3824
|
-
import_v419.z.object({
|
|
3825
|
-
type: import_v419.z.literal("function_call"),
|
|
3826
|
-
id: import_v419.z.string(),
|
|
3827
|
-
call_id: import_v419.z.string(),
|
|
3828
|
-
name: import_v419.z.string(),
|
|
3829
|
-
arguments: import_v419.z.string(),
|
|
3830
|
-
status: import_v419.z.literal("completed")
|
|
3831
|
-
}),
|
|
3832
|
-
codeInterpreterCallItem,
|
|
3833
|
-
imageGenerationCallItem,
|
|
3834
|
-
webSearchCallItem,
|
|
3835
|
-
fileSearchCallItem,
|
|
3836
|
-
localShellCallItem,
|
|
3837
|
-
import_v419.z.object({
|
|
3838
|
-
type: import_v419.z.literal("computer_call"),
|
|
3839
|
-
id: import_v419.z.string(),
|
|
3840
|
-
status: import_v419.z.literal("completed")
|
|
3841
|
-
})
|
|
3842
|
-
])
|
|
3843
|
-
});
|
|
3844
|
-
var responseFunctionCallArgumentsDeltaSchema = import_v419.z.object({
|
|
3845
|
-
type: import_v419.z.literal("response.function_call_arguments.delta"),
|
|
3846
|
-
item_id: import_v419.z.string(),
|
|
3847
|
-
output_index: import_v419.z.number(),
|
|
3848
|
-
delta: import_v419.z.string()
|
|
3849
|
-
});
|
|
3850
|
-
var responseCodeInterpreterCallCodeDeltaSchema = import_v419.z.object({
|
|
3851
|
-
type: import_v419.z.literal("response.code_interpreter_call_code.delta"),
|
|
3852
|
-
item_id: import_v419.z.string(),
|
|
3853
|
-
output_index: import_v419.z.number(),
|
|
3854
|
-
delta: import_v419.z.string()
|
|
3855
|
-
});
|
|
3856
|
-
var responseCodeInterpreterCallCodeDoneSchema = import_v419.z.object({
|
|
3857
|
-
type: import_v419.z.literal("response.code_interpreter_call_code.done"),
|
|
3858
|
-
item_id: import_v419.z.string(),
|
|
3859
|
-
output_index: import_v419.z.number(),
|
|
3860
|
-
code: import_v419.z.string()
|
|
3861
|
-
});
|
|
3862
|
-
var responseAnnotationAddedSchema = import_v419.z.object({
|
|
3863
|
-
type: import_v419.z.literal("response.output_text.annotation.added"),
|
|
3864
|
-
annotation: import_v419.z.discriminatedUnion("type", [
|
|
3865
|
-
import_v419.z.object({
|
|
3866
|
-
type: import_v419.z.literal("url_citation"),
|
|
3867
|
-
url: import_v419.z.string(),
|
|
3868
|
-
title: import_v419.z.string()
|
|
3869
|
-
}),
|
|
3870
|
-
import_v419.z.object({
|
|
3871
|
-
type: import_v419.z.literal("file_citation"),
|
|
3872
|
-
file_id: import_v419.z.string(),
|
|
3873
|
-
filename: import_v419.z.string().nullish(),
|
|
3874
|
-
index: import_v419.z.number().nullish(),
|
|
3875
|
-
start_index: import_v419.z.number().nullish(),
|
|
3876
|
-
end_index: import_v419.z.number().nullish(),
|
|
3877
|
-
quote: import_v419.z.string().nullish()
|
|
3878
|
-
})
|
|
3879
|
-
])
|
|
3880
|
-
});
|
|
3881
|
-
var responseReasoningSummaryPartAddedSchema = import_v419.z.object({
|
|
3882
|
-
type: import_v419.z.literal("response.reasoning_summary_part.added"),
|
|
3883
|
-
item_id: import_v419.z.string(),
|
|
3884
|
-
summary_index: import_v419.z.number()
|
|
3885
|
-
});
|
|
3886
|
-
var responseReasoningSummaryTextDeltaSchema = import_v419.z.object({
|
|
3887
|
-
type: import_v419.z.literal("response.reasoning_summary_text.delta"),
|
|
3888
|
-
item_id: import_v419.z.string(),
|
|
3889
|
-
summary_index: import_v419.z.number(),
|
|
3890
|
-
delta: import_v419.z.string()
|
|
3891
|
-
});
|
|
3892
|
-
var openaiResponsesChunkSchema = import_v419.z.union([
|
|
3893
|
-
textDeltaChunkSchema,
|
|
3894
|
-
responseFinishedChunkSchema,
|
|
3895
|
-
responseCreatedChunkSchema,
|
|
3896
|
-
responseOutputItemAddedSchema,
|
|
3897
|
-
responseOutputItemDoneSchema,
|
|
3898
|
-
responseFunctionCallArgumentsDeltaSchema,
|
|
3899
|
-
responseCodeInterpreterCallCodeDeltaSchema,
|
|
3900
|
-
responseCodeInterpreterCallCodeDoneSchema,
|
|
3901
|
-
responseAnnotationAddedSchema,
|
|
3902
|
-
responseReasoningSummaryPartAddedSchema,
|
|
3903
|
-
responseReasoningSummaryTextDeltaSchema,
|
|
3904
|
-
errorChunkSchema,
|
|
3905
|
-
import_v419.z.object({ type: import_v419.z.string() }).loose()
|
|
3906
|
-
// fallback for unknown chunks
|
|
3907
|
-
]);
|
|
3908
4267
|
function isTextDeltaChunk(chunk) {
|
|
3909
4268
|
return chunk.type === "response.output_text.delta";
|
|
3910
4269
|
}
|
|
@@ -3981,47 +4340,6 @@ function getResponsesModelConfig(modelId) {
|
|
|
3981
4340
|
isReasoningModel: false
|
|
3982
4341
|
};
|
|
3983
4342
|
}
|
|
3984
|
-
var openaiResponsesProviderOptionsSchema = import_v419.z.object({
|
|
3985
|
-
include: import_v419.z.array(
|
|
3986
|
-
import_v419.z.enum([
|
|
3987
|
-
"reasoning.encrypted_content",
|
|
3988
|
-
"file_search_call.results",
|
|
3989
|
-
"message.output_text.logprobs"
|
|
3990
|
-
])
|
|
3991
|
-
).nullish(),
|
|
3992
|
-
instructions: import_v419.z.string().nullish(),
|
|
3993
|
-
/**
|
|
3994
|
-
* Return the log probabilities of the tokens.
|
|
3995
|
-
*
|
|
3996
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
3997
|
-
* were generated.
|
|
3998
|
-
*
|
|
3999
|
-
* Setting to a number will return the log probabilities of the top n
|
|
4000
|
-
* tokens that were generated.
|
|
4001
|
-
*
|
|
4002
|
-
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
4003
|
-
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
4004
|
-
*/
|
|
4005
|
-
logprobs: import_v419.z.union([import_v419.z.boolean(), import_v419.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
4006
|
-
/**
|
|
4007
|
-
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
4008
|
-
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
4009
|
-
* Any further attempts to call a tool by the model will be ignored.
|
|
4010
|
-
*/
|
|
4011
|
-
maxToolCalls: import_v419.z.number().nullish(),
|
|
4012
|
-
metadata: import_v419.z.any().nullish(),
|
|
4013
|
-
parallelToolCalls: import_v419.z.boolean().nullish(),
|
|
4014
|
-
previousResponseId: import_v419.z.string().nullish(),
|
|
4015
|
-
promptCacheKey: import_v419.z.string().nullish(),
|
|
4016
|
-
reasoningEffort: import_v419.z.string().nullish(),
|
|
4017
|
-
reasoningSummary: import_v419.z.string().nullish(),
|
|
4018
|
-
safetyIdentifier: import_v419.z.string().nullish(),
|
|
4019
|
-
serviceTier: import_v419.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
4020
|
-
store: import_v419.z.boolean().nullish(),
|
|
4021
|
-
strictJsonSchema: import_v419.z.boolean().nullish(),
|
|
4022
|
-
textVerbosity: import_v419.z.enum(["low", "medium", "high"]).nullish(),
|
|
4023
|
-
user: import_v419.z.string().nullish()
|
|
4024
|
-
});
|
|
4025
4343
|
// Annotate the CommonJS export names for ESM import in node:
|
|
4026
4344
|
0 && (module.exports = {
|
|
4027
4345
|
OpenAIChatLanguageModel,
|
|
@@ -4047,6 +4365,7 @@ var openaiResponsesProviderOptionsSchema = import_v419.z.object({
|
|
|
4047
4365
|
openAITranscriptionProviderOptions,
|
|
4048
4366
|
openaiChatLanguageModelOptions,
|
|
4049
4367
|
openaiCompletionProviderOptions,
|
|
4050
|
-
openaiEmbeddingProviderOptions
|
|
4368
|
+
openaiEmbeddingProviderOptions,
|
|
4369
|
+
openaiSpeechProviderOptionsSchema
|
|
4051
4370
|
});
|
|
4052
4371
|
//# sourceMappingURL=index.js.map
|