@ai-sdk/openai 3.0.0-beta.17 → 3.0.0-beta.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/index.d.mts +38 -65
- package/dist/index.d.ts +38 -65
- package/dist/index.js +1341 -1033
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1295 -942
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +101 -183
- package/dist/internal/index.d.ts +101 -183
- package/dist/internal/index.js +1338 -1028
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +1307 -953
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/index.js
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
+
var __create = Object.create;
|
|
2
3
|
var __defProp = Object.defineProperty;
|
|
3
4
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
5
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
5
7
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
8
|
var __export = (target, all) => {
|
|
7
9
|
for (var name in all)
|
|
@@ -15,6 +17,14 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
15
17
|
}
|
|
16
18
|
return to;
|
|
17
19
|
};
|
|
20
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
21
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
22
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
23
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
24
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
25
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
26
|
+
mod
|
|
27
|
+
));
|
|
18
28
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
29
|
|
|
20
30
|
// src/index.ts
|
|
@@ -27,25 +37,24 @@ __export(src_exports, {
|
|
|
27
37
|
module.exports = __toCommonJS(src_exports);
|
|
28
38
|
|
|
29
39
|
// src/openai-provider.ts
|
|
30
|
-
var
|
|
40
|
+
var import_provider_utils30 = require("@ai-sdk/provider-utils");
|
|
31
41
|
|
|
32
42
|
// src/chat/openai-chat-language-model.ts
|
|
33
43
|
var import_provider3 = require("@ai-sdk/provider");
|
|
34
|
-
var
|
|
35
|
-
var import_v43 = require("zod/v4");
|
|
44
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
36
45
|
|
|
37
46
|
// src/openai-error.ts
|
|
38
|
-
var
|
|
47
|
+
var z = __toESM(require("zod/v4"));
|
|
39
48
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
40
|
-
var openaiErrorDataSchema =
|
|
41
|
-
error:
|
|
42
|
-
message:
|
|
49
|
+
var openaiErrorDataSchema = z.object({
|
|
50
|
+
error: z.object({
|
|
51
|
+
message: z.string(),
|
|
43
52
|
// The additional information below is handled loosely to support
|
|
44
53
|
// OpenAI-compatible providers that have slightly different error
|
|
45
54
|
// responses:
|
|
46
|
-
type:
|
|
47
|
-
param:
|
|
48
|
-
code:
|
|
55
|
+
type: z.string().nullish(),
|
|
56
|
+
param: z.any().nullish(),
|
|
57
|
+
code: z.union([z.string(), z.number()]).nullish()
|
|
49
58
|
})
|
|
50
59
|
});
|
|
51
60
|
var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
|
@@ -265,95 +274,240 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
265
274
|
}
|
|
266
275
|
}
|
|
267
276
|
|
|
277
|
+
// src/chat/openai-chat-api.ts
|
|
278
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
279
|
+
var z2 = __toESM(require("zod/v4"));
|
|
280
|
+
var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
|
281
|
+
() => (0, import_provider_utils3.zodSchema)(
|
|
282
|
+
z2.object({
|
|
283
|
+
id: z2.string().nullish(),
|
|
284
|
+
created: z2.number().nullish(),
|
|
285
|
+
model: z2.string().nullish(),
|
|
286
|
+
choices: z2.array(
|
|
287
|
+
z2.object({
|
|
288
|
+
message: z2.object({
|
|
289
|
+
role: z2.literal("assistant").nullish(),
|
|
290
|
+
content: z2.string().nullish(),
|
|
291
|
+
tool_calls: z2.array(
|
|
292
|
+
z2.object({
|
|
293
|
+
id: z2.string().nullish(),
|
|
294
|
+
type: z2.literal("function"),
|
|
295
|
+
function: z2.object({
|
|
296
|
+
name: z2.string(),
|
|
297
|
+
arguments: z2.string()
|
|
298
|
+
})
|
|
299
|
+
})
|
|
300
|
+
).nullish(),
|
|
301
|
+
annotations: z2.array(
|
|
302
|
+
z2.object({
|
|
303
|
+
type: z2.literal("url_citation"),
|
|
304
|
+
start_index: z2.number(),
|
|
305
|
+
end_index: z2.number(),
|
|
306
|
+
url: z2.string(),
|
|
307
|
+
title: z2.string()
|
|
308
|
+
})
|
|
309
|
+
).nullish()
|
|
310
|
+
}),
|
|
311
|
+
index: z2.number(),
|
|
312
|
+
logprobs: z2.object({
|
|
313
|
+
content: z2.array(
|
|
314
|
+
z2.object({
|
|
315
|
+
token: z2.string(),
|
|
316
|
+
logprob: z2.number(),
|
|
317
|
+
top_logprobs: z2.array(
|
|
318
|
+
z2.object({
|
|
319
|
+
token: z2.string(),
|
|
320
|
+
logprob: z2.number()
|
|
321
|
+
})
|
|
322
|
+
)
|
|
323
|
+
})
|
|
324
|
+
).nullish()
|
|
325
|
+
}).nullish(),
|
|
326
|
+
finish_reason: z2.string().nullish()
|
|
327
|
+
})
|
|
328
|
+
),
|
|
329
|
+
usage: z2.object({
|
|
330
|
+
prompt_tokens: z2.number().nullish(),
|
|
331
|
+
completion_tokens: z2.number().nullish(),
|
|
332
|
+
total_tokens: z2.number().nullish(),
|
|
333
|
+
prompt_tokens_details: z2.object({
|
|
334
|
+
cached_tokens: z2.number().nullish()
|
|
335
|
+
}).nullish(),
|
|
336
|
+
completion_tokens_details: z2.object({
|
|
337
|
+
reasoning_tokens: z2.number().nullish(),
|
|
338
|
+
accepted_prediction_tokens: z2.number().nullish(),
|
|
339
|
+
rejected_prediction_tokens: z2.number().nullish()
|
|
340
|
+
}).nullish()
|
|
341
|
+
}).nullish()
|
|
342
|
+
})
|
|
343
|
+
)
|
|
344
|
+
);
|
|
345
|
+
var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
|
346
|
+
() => (0, import_provider_utils3.zodSchema)(
|
|
347
|
+
z2.union([
|
|
348
|
+
z2.object({
|
|
349
|
+
id: z2.string().nullish(),
|
|
350
|
+
created: z2.number().nullish(),
|
|
351
|
+
model: z2.string().nullish(),
|
|
352
|
+
choices: z2.array(
|
|
353
|
+
z2.object({
|
|
354
|
+
delta: z2.object({
|
|
355
|
+
role: z2.enum(["assistant"]).nullish(),
|
|
356
|
+
content: z2.string().nullish(),
|
|
357
|
+
tool_calls: z2.array(
|
|
358
|
+
z2.object({
|
|
359
|
+
index: z2.number(),
|
|
360
|
+
id: z2.string().nullish(),
|
|
361
|
+
type: z2.literal("function").nullish(),
|
|
362
|
+
function: z2.object({
|
|
363
|
+
name: z2.string().nullish(),
|
|
364
|
+
arguments: z2.string().nullish()
|
|
365
|
+
})
|
|
366
|
+
})
|
|
367
|
+
).nullish(),
|
|
368
|
+
annotations: z2.array(
|
|
369
|
+
z2.object({
|
|
370
|
+
type: z2.literal("url_citation"),
|
|
371
|
+
start_index: z2.number(),
|
|
372
|
+
end_index: z2.number(),
|
|
373
|
+
url: z2.string(),
|
|
374
|
+
title: z2.string()
|
|
375
|
+
})
|
|
376
|
+
).nullish()
|
|
377
|
+
}).nullish(),
|
|
378
|
+
logprobs: z2.object({
|
|
379
|
+
content: z2.array(
|
|
380
|
+
z2.object({
|
|
381
|
+
token: z2.string(),
|
|
382
|
+
logprob: z2.number(),
|
|
383
|
+
top_logprobs: z2.array(
|
|
384
|
+
z2.object({
|
|
385
|
+
token: z2.string(),
|
|
386
|
+
logprob: z2.number()
|
|
387
|
+
})
|
|
388
|
+
)
|
|
389
|
+
})
|
|
390
|
+
).nullish()
|
|
391
|
+
}).nullish(),
|
|
392
|
+
finish_reason: z2.string().nullish(),
|
|
393
|
+
index: z2.number()
|
|
394
|
+
})
|
|
395
|
+
),
|
|
396
|
+
usage: z2.object({
|
|
397
|
+
prompt_tokens: z2.number().nullish(),
|
|
398
|
+
completion_tokens: z2.number().nullish(),
|
|
399
|
+
total_tokens: z2.number().nullish(),
|
|
400
|
+
prompt_tokens_details: z2.object({
|
|
401
|
+
cached_tokens: z2.number().nullish()
|
|
402
|
+
}).nullish(),
|
|
403
|
+
completion_tokens_details: z2.object({
|
|
404
|
+
reasoning_tokens: z2.number().nullish(),
|
|
405
|
+
accepted_prediction_tokens: z2.number().nullish(),
|
|
406
|
+
rejected_prediction_tokens: z2.number().nullish()
|
|
407
|
+
}).nullish()
|
|
408
|
+
}).nullish()
|
|
409
|
+
}),
|
|
410
|
+
openaiErrorDataSchema
|
|
411
|
+
])
|
|
412
|
+
)
|
|
413
|
+
);
|
|
414
|
+
|
|
268
415
|
// src/chat/openai-chat-options.ts
|
|
269
|
-
var
|
|
270
|
-
var
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
416
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
417
|
+
var z3 = __toESM(require("zod/v4"));
|
|
418
|
+
var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
|
|
419
|
+
() => (0, import_provider_utils4.zodSchema)(
|
|
420
|
+
z3.object({
|
|
421
|
+
/**
|
|
422
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
|
423
|
+
*
|
|
424
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
425
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
426
|
+
*/
|
|
427
|
+
logitBias: z3.record(z3.coerce.number(), z3.number()).optional(),
|
|
428
|
+
/**
|
|
429
|
+
* Return the log probabilities of the tokens.
|
|
430
|
+
*
|
|
431
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
432
|
+
* were generated.
|
|
433
|
+
*
|
|
434
|
+
* Setting to a number will return the log probabilities of the top n
|
|
435
|
+
* tokens that were generated.
|
|
436
|
+
*/
|
|
437
|
+
logprobs: z3.union([z3.boolean(), z3.number()]).optional(),
|
|
438
|
+
/**
|
|
439
|
+
* Whether to enable parallel function calling during tool use. Default to true.
|
|
440
|
+
*/
|
|
441
|
+
parallelToolCalls: z3.boolean().optional(),
|
|
442
|
+
/**
|
|
443
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
444
|
+
* monitor and detect abuse.
|
|
445
|
+
*/
|
|
446
|
+
user: z3.string().optional(),
|
|
447
|
+
/**
|
|
448
|
+
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
449
|
+
*/
|
|
450
|
+
reasoningEffort: z3.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
451
|
+
/**
|
|
452
|
+
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
453
|
+
*/
|
|
454
|
+
maxCompletionTokens: z3.number().optional(),
|
|
455
|
+
/**
|
|
456
|
+
* Whether to enable persistence in responses API.
|
|
457
|
+
*/
|
|
458
|
+
store: z3.boolean().optional(),
|
|
459
|
+
/**
|
|
460
|
+
* Metadata to associate with the request.
|
|
461
|
+
*/
|
|
462
|
+
metadata: z3.record(z3.string().max(64), z3.string().max(512)).optional(),
|
|
463
|
+
/**
|
|
464
|
+
* Parameters for prediction mode.
|
|
465
|
+
*/
|
|
466
|
+
prediction: z3.record(z3.string(), z3.any()).optional(),
|
|
467
|
+
/**
|
|
468
|
+
* Whether to use structured outputs.
|
|
469
|
+
*
|
|
470
|
+
* @default true
|
|
471
|
+
*/
|
|
472
|
+
structuredOutputs: z3.boolean().optional(),
|
|
473
|
+
/**
|
|
474
|
+
* Service tier for the request.
|
|
475
|
+
* - 'auto': Default service tier. The request will be processed with the service tier configured in the
|
|
476
|
+
* Project settings. Unless otherwise configured, the Project will use 'default'.
|
|
477
|
+
* - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
478
|
+
* - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
|
|
479
|
+
* - 'default': The request will be processed with the standard pricing and performance for the selected model.
|
|
480
|
+
*
|
|
481
|
+
* @default 'auto'
|
|
482
|
+
*/
|
|
483
|
+
serviceTier: z3.enum(["auto", "flex", "priority", "default"]).optional(),
|
|
484
|
+
/**
|
|
485
|
+
* Whether to use strict JSON schema validation.
|
|
486
|
+
*
|
|
487
|
+
* @default false
|
|
488
|
+
*/
|
|
489
|
+
strictJsonSchema: z3.boolean().optional(),
|
|
490
|
+
/**
|
|
491
|
+
* Controls the verbosity of the model's responses.
|
|
492
|
+
* Lower values will result in more concise responses, while higher values will result in more verbose responses.
|
|
493
|
+
*/
|
|
494
|
+
textVerbosity: z3.enum(["low", "medium", "high"]).optional(),
|
|
495
|
+
/**
|
|
496
|
+
* A cache key for prompt caching. Allows manual control over prompt caching behavior.
|
|
497
|
+
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
498
|
+
*/
|
|
499
|
+
promptCacheKey: z3.string().optional(),
|
|
500
|
+
/**
|
|
501
|
+
* A stable identifier used to help detect users of your application
|
|
502
|
+
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
503
|
+
* string that uniquely identifies each user. We recommend hashing their
|
|
504
|
+
* username or email address, in order to avoid sending us any identifying
|
|
505
|
+
* information.
|
|
506
|
+
*/
|
|
507
|
+
safetyIdentifier: z3.string().optional()
|
|
508
|
+
})
|
|
509
|
+
)
|
|
510
|
+
);
|
|
357
511
|
|
|
358
512
|
// src/chat/openai-chat-prepare-tools.ts
|
|
359
513
|
var import_provider2 = require("@ai-sdk/provider");
|
|
@@ -446,7 +600,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
446
600
|
}) {
|
|
447
601
|
var _a, _b, _c, _d;
|
|
448
602
|
const warnings = [];
|
|
449
|
-
const openaiOptions = (_a = await (0,
|
|
603
|
+
const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
|
|
450
604
|
provider: "openai",
|
|
451
605
|
providerOptions,
|
|
452
606
|
schema: openaiChatLanguageModelOptions
|
|
@@ -625,15 +779,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
625
779
|
responseHeaders,
|
|
626
780
|
value: response,
|
|
627
781
|
rawValue: rawResponse
|
|
628
|
-
} = await (0,
|
|
782
|
+
} = await (0, import_provider_utils5.postJsonToApi)({
|
|
629
783
|
url: this.config.url({
|
|
630
784
|
path: "/chat/completions",
|
|
631
785
|
modelId: this.modelId
|
|
632
786
|
}),
|
|
633
|
-
headers: (0,
|
|
787
|
+
headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
|
|
634
788
|
body,
|
|
635
789
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
636
|
-
successfulResponseHandler: (0,
|
|
790
|
+
successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
|
|
637
791
|
openaiChatResponseSchema
|
|
638
792
|
),
|
|
639
793
|
abortSignal: options.abortSignal,
|
|
@@ -648,7 +802,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
648
802
|
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
|
649
803
|
content.push({
|
|
650
804
|
type: "tool-call",
|
|
651
|
-
toolCallId: (_b = toolCall.id) != null ? _b : (0,
|
|
805
|
+
toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils5.generateId)(),
|
|
652
806
|
toolName: toolCall.function.name,
|
|
653
807
|
input: toolCall.function.arguments
|
|
654
808
|
});
|
|
@@ -657,7 +811,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
657
811
|
content.push({
|
|
658
812
|
type: "source",
|
|
659
813
|
sourceType: "url",
|
|
660
|
-
id: (0,
|
|
814
|
+
id: (0, import_provider_utils5.generateId)(),
|
|
661
815
|
url: annotation.url,
|
|
662
816
|
title: annotation.title
|
|
663
817
|
});
|
|
@@ -703,15 +857,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
703
857
|
include_usage: true
|
|
704
858
|
}
|
|
705
859
|
};
|
|
706
|
-
const { responseHeaders, value: response } = await (0,
|
|
860
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
|
|
707
861
|
url: this.config.url({
|
|
708
862
|
path: "/chat/completions",
|
|
709
863
|
modelId: this.modelId
|
|
710
864
|
}),
|
|
711
|
-
headers: (0,
|
|
865
|
+
headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
|
|
712
866
|
body,
|
|
713
867
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
714
|
-
successfulResponseHandler: (0,
|
|
868
|
+
successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(
|
|
715
869
|
openaiChatChunkSchema
|
|
716
870
|
),
|
|
717
871
|
abortSignal: options.abortSignal,
|
|
@@ -836,14 +990,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
836
990
|
delta: toolCall2.function.arguments
|
|
837
991
|
});
|
|
838
992
|
}
|
|
839
|
-
if ((0,
|
|
993
|
+
if ((0, import_provider_utils5.isParsableJson)(toolCall2.function.arguments)) {
|
|
840
994
|
controller.enqueue({
|
|
841
995
|
type: "tool-input-end",
|
|
842
996
|
id: toolCall2.id
|
|
843
997
|
});
|
|
844
998
|
controller.enqueue({
|
|
845
999
|
type: "tool-call",
|
|
846
|
-
toolCallId: (_q = toolCall2.id) != null ? _q : (0,
|
|
1000
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
|
|
847
1001
|
toolName: toolCall2.function.name,
|
|
848
1002
|
input: toolCall2.function.arguments
|
|
849
1003
|
});
|
|
@@ -864,14 +1018,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
864
1018
|
id: toolCall.id,
|
|
865
1019
|
delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
866
1020
|
});
|
|
867
|
-
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0,
|
|
1021
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
|
|
868
1022
|
controller.enqueue({
|
|
869
1023
|
type: "tool-input-end",
|
|
870
1024
|
id: toolCall.id
|
|
871
1025
|
});
|
|
872
1026
|
controller.enqueue({
|
|
873
1027
|
type: "tool-call",
|
|
874
|
-
toolCallId: (_x = toolCall.id) != null ? _x : (0,
|
|
1028
|
+
toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
|
|
875
1029
|
toolName: toolCall.function.name,
|
|
876
1030
|
input: toolCall.function.arguments
|
|
877
1031
|
});
|
|
@@ -884,7 +1038,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
884
1038
|
controller.enqueue({
|
|
885
1039
|
type: "source",
|
|
886
1040
|
sourceType: "url",
|
|
887
|
-
id: (0,
|
|
1041
|
+
id: (0, import_provider_utils5.generateId)(),
|
|
888
1042
|
url: annotation.url,
|
|
889
1043
|
title: annotation.title
|
|
890
1044
|
});
|
|
@@ -909,121 +1063,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
909
1063
|
};
|
|
910
1064
|
}
|
|
911
1065
|
};
|
|
912
|
-
var openaiTokenUsageSchema = import_v43.z.object({
|
|
913
|
-
prompt_tokens: import_v43.z.number().nullish(),
|
|
914
|
-
completion_tokens: import_v43.z.number().nullish(),
|
|
915
|
-
total_tokens: import_v43.z.number().nullish(),
|
|
916
|
-
prompt_tokens_details: import_v43.z.object({
|
|
917
|
-
cached_tokens: import_v43.z.number().nullish()
|
|
918
|
-
}).nullish(),
|
|
919
|
-
completion_tokens_details: import_v43.z.object({
|
|
920
|
-
reasoning_tokens: import_v43.z.number().nullish(),
|
|
921
|
-
accepted_prediction_tokens: import_v43.z.number().nullish(),
|
|
922
|
-
rejected_prediction_tokens: import_v43.z.number().nullish()
|
|
923
|
-
}).nullish()
|
|
924
|
-
}).nullish();
|
|
925
|
-
var openaiChatResponseSchema = import_v43.z.object({
|
|
926
|
-
id: import_v43.z.string().nullish(),
|
|
927
|
-
created: import_v43.z.number().nullish(),
|
|
928
|
-
model: import_v43.z.string().nullish(),
|
|
929
|
-
choices: import_v43.z.array(
|
|
930
|
-
import_v43.z.object({
|
|
931
|
-
message: import_v43.z.object({
|
|
932
|
-
role: import_v43.z.literal("assistant").nullish(),
|
|
933
|
-
content: import_v43.z.string().nullish(),
|
|
934
|
-
tool_calls: import_v43.z.array(
|
|
935
|
-
import_v43.z.object({
|
|
936
|
-
id: import_v43.z.string().nullish(),
|
|
937
|
-
type: import_v43.z.literal("function"),
|
|
938
|
-
function: import_v43.z.object({
|
|
939
|
-
name: import_v43.z.string(),
|
|
940
|
-
arguments: import_v43.z.string()
|
|
941
|
-
})
|
|
942
|
-
})
|
|
943
|
-
).nullish(),
|
|
944
|
-
annotations: import_v43.z.array(
|
|
945
|
-
import_v43.z.object({
|
|
946
|
-
type: import_v43.z.literal("url_citation"),
|
|
947
|
-
start_index: import_v43.z.number(),
|
|
948
|
-
end_index: import_v43.z.number(),
|
|
949
|
-
url: import_v43.z.string(),
|
|
950
|
-
title: import_v43.z.string()
|
|
951
|
-
})
|
|
952
|
-
).nullish()
|
|
953
|
-
}),
|
|
954
|
-
index: import_v43.z.number(),
|
|
955
|
-
logprobs: import_v43.z.object({
|
|
956
|
-
content: import_v43.z.array(
|
|
957
|
-
import_v43.z.object({
|
|
958
|
-
token: import_v43.z.string(),
|
|
959
|
-
logprob: import_v43.z.number(),
|
|
960
|
-
top_logprobs: import_v43.z.array(
|
|
961
|
-
import_v43.z.object({
|
|
962
|
-
token: import_v43.z.string(),
|
|
963
|
-
logprob: import_v43.z.number()
|
|
964
|
-
})
|
|
965
|
-
)
|
|
966
|
-
})
|
|
967
|
-
).nullish()
|
|
968
|
-
}).nullish(),
|
|
969
|
-
finish_reason: import_v43.z.string().nullish()
|
|
970
|
-
})
|
|
971
|
-
),
|
|
972
|
-
usage: openaiTokenUsageSchema
|
|
973
|
-
});
|
|
974
|
-
var openaiChatChunkSchema = import_v43.z.union([
|
|
975
|
-
import_v43.z.object({
|
|
976
|
-
id: import_v43.z.string().nullish(),
|
|
977
|
-
created: import_v43.z.number().nullish(),
|
|
978
|
-
model: import_v43.z.string().nullish(),
|
|
979
|
-
choices: import_v43.z.array(
|
|
980
|
-
import_v43.z.object({
|
|
981
|
-
delta: import_v43.z.object({
|
|
982
|
-
role: import_v43.z.enum(["assistant"]).nullish(),
|
|
983
|
-
content: import_v43.z.string().nullish(),
|
|
984
|
-
tool_calls: import_v43.z.array(
|
|
985
|
-
import_v43.z.object({
|
|
986
|
-
index: import_v43.z.number(),
|
|
987
|
-
id: import_v43.z.string().nullish(),
|
|
988
|
-
type: import_v43.z.literal("function").nullish(),
|
|
989
|
-
function: import_v43.z.object({
|
|
990
|
-
name: import_v43.z.string().nullish(),
|
|
991
|
-
arguments: import_v43.z.string().nullish()
|
|
992
|
-
})
|
|
993
|
-
})
|
|
994
|
-
).nullish(),
|
|
995
|
-
annotations: import_v43.z.array(
|
|
996
|
-
import_v43.z.object({
|
|
997
|
-
type: import_v43.z.literal("url_citation"),
|
|
998
|
-
start_index: import_v43.z.number(),
|
|
999
|
-
end_index: import_v43.z.number(),
|
|
1000
|
-
url: import_v43.z.string(),
|
|
1001
|
-
title: import_v43.z.string()
|
|
1002
|
-
})
|
|
1003
|
-
).nullish()
|
|
1004
|
-
}).nullish(),
|
|
1005
|
-
logprobs: import_v43.z.object({
|
|
1006
|
-
content: import_v43.z.array(
|
|
1007
|
-
import_v43.z.object({
|
|
1008
|
-
token: import_v43.z.string(),
|
|
1009
|
-
logprob: import_v43.z.number(),
|
|
1010
|
-
top_logprobs: import_v43.z.array(
|
|
1011
|
-
import_v43.z.object({
|
|
1012
|
-
token: import_v43.z.string(),
|
|
1013
|
-
logprob: import_v43.z.number()
|
|
1014
|
-
})
|
|
1015
|
-
)
|
|
1016
|
-
})
|
|
1017
|
-
).nullish()
|
|
1018
|
-
}).nullish(),
|
|
1019
|
-
finish_reason: import_v43.z.string().nullish(),
|
|
1020
|
-
index: import_v43.z.number()
|
|
1021
|
-
})
|
|
1022
|
-
),
|
|
1023
|
-
usage: openaiTokenUsageSchema
|
|
1024
|
-
}),
|
|
1025
|
-
openaiErrorDataSchema
|
|
1026
|
-
]);
|
|
1027
1066
|
function isReasoningModel(modelId) {
|
|
1028
1067
|
return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
|
|
1029
1068
|
}
|
|
@@ -1074,8 +1113,7 @@ var reasoningModels = {
|
|
|
1074
1113
|
};
|
|
1075
1114
|
|
|
1076
1115
|
// src/completion/openai-completion-language-model.ts
|
|
1077
|
-
var
|
|
1078
|
-
var import_v45 = require("zod/v4");
|
|
1116
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1079
1117
|
|
|
1080
1118
|
// src/completion/convert-to-openai-completion-prompt.ts
|
|
1081
1119
|
var import_provider4 = require("@ai-sdk/provider");
|
|
@@ -1182,48 +1220,111 @@ function mapOpenAIFinishReason2(finishReason) {
|
|
|
1182
1220
|
}
|
|
1183
1221
|
}
|
|
1184
1222
|
|
|
1223
|
+
// src/completion/openai-completion-api.ts
|
|
1224
|
+
var z4 = __toESM(require("zod/v4"));
|
|
1225
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
1226
|
+
var openaiCompletionResponseSchema = (0, import_provider_utils6.lazyValidator)(
|
|
1227
|
+
() => (0, import_provider_utils6.zodSchema)(
|
|
1228
|
+
z4.object({
|
|
1229
|
+
id: z4.string().nullish(),
|
|
1230
|
+
created: z4.number().nullish(),
|
|
1231
|
+
model: z4.string().nullish(),
|
|
1232
|
+
choices: z4.array(
|
|
1233
|
+
z4.object({
|
|
1234
|
+
text: z4.string(),
|
|
1235
|
+
finish_reason: z4.string(),
|
|
1236
|
+
logprobs: z4.object({
|
|
1237
|
+
tokens: z4.array(z4.string()),
|
|
1238
|
+
token_logprobs: z4.array(z4.number()),
|
|
1239
|
+
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullish()
|
|
1240
|
+
}).nullish()
|
|
1241
|
+
})
|
|
1242
|
+
),
|
|
1243
|
+
usage: z4.object({
|
|
1244
|
+
prompt_tokens: z4.number(),
|
|
1245
|
+
completion_tokens: z4.number(),
|
|
1246
|
+
total_tokens: z4.number()
|
|
1247
|
+
}).nullish()
|
|
1248
|
+
})
|
|
1249
|
+
)
|
|
1250
|
+
);
|
|
1251
|
+
var openaiCompletionChunkSchema = (0, import_provider_utils6.lazyValidator)(
|
|
1252
|
+
() => (0, import_provider_utils6.zodSchema)(
|
|
1253
|
+
z4.union([
|
|
1254
|
+
z4.object({
|
|
1255
|
+
id: z4.string().nullish(),
|
|
1256
|
+
created: z4.number().nullish(),
|
|
1257
|
+
model: z4.string().nullish(),
|
|
1258
|
+
choices: z4.array(
|
|
1259
|
+
z4.object({
|
|
1260
|
+
text: z4.string(),
|
|
1261
|
+
finish_reason: z4.string().nullish(),
|
|
1262
|
+
index: z4.number(),
|
|
1263
|
+
logprobs: z4.object({
|
|
1264
|
+
tokens: z4.array(z4.string()),
|
|
1265
|
+
token_logprobs: z4.array(z4.number()),
|
|
1266
|
+
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullish()
|
|
1267
|
+
}).nullish()
|
|
1268
|
+
})
|
|
1269
|
+
),
|
|
1270
|
+
usage: z4.object({
|
|
1271
|
+
prompt_tokens: z4.number(),
|
|
1272
|
+
completion_tokens: z4.number(),
|
|
1273
|
+
total_tokens: z4.number()
|
|
1274
|
+
}).nullish()
|
|
1275
|
+
}),
|
|
1276
|
+
openaiErrorDataSchema
|
|
1277
|
+
])
|
|
1278
|
+
)
|
|
1279
|
+
);
|
|
1280
|
+
|
|
1185
1281
|
// src/completion/openai-completion-options.ts
|
|
1186
|
-
var
|
|
1187
|
-
var
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1282
|
+
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
|
1283
|
+
var z5 = __toESM(require("zod/v4"));
|
|
1284
|
+
var openaiCompletionProviderOptions = (0, import_provider_utils7.lazyValidator)(
|
|
1285
|
+
() => (0, import_provider_utils7.zodSchema)(
|
|
1286
|
+
z5.object({
|
|
1287
|
+
/**
|
|
1288
|
+
Echo back the prompt in addition to the completion.
|
|
1289
|
+
*/
|
|
1290
|
+
echo: z5.boolean().optional(),
|
|
1291
|
+
/**
|
|
1292
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
1293
|
+
|
|
1294
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
1295
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
1296
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
1297
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
1298
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
1299
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
1300
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
1301
|
+
|
|
1302
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
1303
|
+
token from being generated.
|
|
1304
|
+
*/
|
|
1305
|
+
logitBias: z5.record(z5.string(), z5.number()).optional(),
|
|
1306
|
+
/**
|
|
1307
|
+
The suffix that comes after a completion of inserted text.
|
|
1308
|
+
*/
|
|
1309
|
+
suffix: z5.string().optional(),
|
|
1310
|
+
/**
|
|
1311
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1312
|
+
monitor and detect abuse. Learn more.
|
|
1313
|
+
*/
|
|
1314
|
+
user: z5.string().optional(),
|
|
1315
|
+
/**
|
|
1316
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1317
|
+
the response size and can slow down response times. However, it can
|
|
1318
|
+
be useful to better understand how the model is behaving.
|
|
1319
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1320
|
+
were generated.
|
|
1321
|
+
Setting to a number will return the log probabilities of the top n
|
|
1322
|
+
tokens that were generated.
|
|
1323
|
+
*/
|
|
1324
|
+
logprobs: z5.union([z5.boolean(), z5.number()]).optional()
|
|
1325
|
+
})
|
|
1326
|
+
)
|
|
1327
|
+
);
|
|
1227
1328
|
|
|
1228
1329
|
// src/completion/openai-completion-language-model.ts
|
|
1229
1330
|
var OpenAICompletionLanguageModel = class {
|
|
@@ -1258,12 +1359,12 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1258
1359
|
}) {
|
|
1259
1360
|
const warnings = [];
|
|
1260
1361
|
const openaiOptions = {
|
|
1261
|
-
...await (0,
|
|
1362
|
+
...await (0, import_provider_utils8.parseProviderOptions)({
|
|
1262
1363
|
provider: "openai",
|
|
1263
1364
|
providerOptions,
|
|
1264
1365
|
schema: openaiCompletionProviderOptions
|
|
1265
1366
|
}),
|
|
1266
|
-
...await (0,
|
|
1367
|
+
...await (0, import_provider_utils8.parseProviderOptions)({
|
|
1267
1368
|
provider: this.providerOptionsName,
|
|
1268
1369
|
providerOptions,
|
|
1269
1370
|
schema: openaiCompletionProviderOptions
|
|
@@ -1319,15 +1420,15 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1319
1420
|
responseHeaders,
|
|
1320
1421
|
value: response,
|
|
1321
1422
|
rawValue: rawResponse
|
|
1322
|
-
} = await (0,
|
|
1423
|
+
} = await (0, import_provider_utils8.postJsonToApi)({
|
|
1323
1424
|
url: this.config.url({
|
|
1324
1425
|
path: "/completions",
|
|
1325
1426
|
modelId: this.modelId
|
|
1326
1427
|
}),
|
|
1327
|
-
headers: (0,
|
|
1428
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
|
|
1328
1429
|
body: args,
|
|
1329
1430
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1330
|
-
successfulResponseHandler: (0,
|
|
1431
|
+
successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
|
|
1331
1432
|
openaiCompletionResponseSchema
|
|
1332
1433
|
),
|
|
1333
1434
|
abortSignal: options.abortSignal,
|
|
@@ -1365,15 +1466,15 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1365
1466
|
include_usage: true
|
|
1366
1467
|
}
|
|
1367
1468
|
};
|
|
1368
|
-
const { responseHeaders, value: response } = await (0,
|
|
1469
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
|
|
1369
1470
|
url: this.config.url({
|
|
1370
1471
|
path: "/completions",
|
|
1371
1472
|
modelId: this.modelId
|
|
1372
1473
|
}),
|
|
1373
|
-
headers: (0,
|
|
1474
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
|
|
1374
1475
|
body,
|
|
1375
1476
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1376
|
-
successfulResponseHandler: (0,
|
|
1477
|
+
successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
|
|
1377
1478
|
openaiCompletionChunkSchema
|
|
1378
1479
|
),
|
|
1379
1480
|
abortSignal: options.abortSignal,
|
|
@@ -1454,69 +1555,42 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1454
1555
|
};
|
|
1455
1556
|
}
|
|
1456
1557
|
};
|
|
1457
|
-
var usageSchema = import_v45.z.object({
|
|
1458
|
-
prompt_tokens: import_v45.z.number(),
|
|
1459
|
-
completion_tokens: import_v45.z.number(),
|
|
1460
|
-
total_tokens: import_v45.z.number()
|
|
1461
|
-
});
|
|
1462
|
-
var openaiCompletionResponseSchema = import_v45.z.object({
|
|
1463
|
-
id: import_v45.z.string().nullish(),
|
|
1464
|
-
created: import_v45.z.number().nullish(),
|
|
1465
|
-
model: import_v45.z.string().nullish(),
|
|
1466
|
-
choices: import_v45.z.array(
|
|
1467
|
-
import_v45.z.object({
|
|
1468
|
-
text: import_v45.z.string(),
|
|
1469
|
-
finish_reason: import_v45.z.string(),
|
|
1470
|
-
logprobs: import_v45.z.object({
|
|
1471
|
-
tokens: import_v45.z.array(import_v45.z.string()),
|
|
1472
|
-
token_logprobs: import_v45.z.array(import_v45.z.number()),
|
|
1473
|
-
top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullish()
|
|
1474
|
-
}).nullish()
|
|
1475
|
-
})
|
|
1476
|
-
),
|
|
1477
|
-
usage: usageSchema.nullish()
|
|
1478
|
-
});
|
|
1479
|
-
var openaiCompletionChunkSchema = import_v45.z.union([
|
|
1480
|
-
import_v45.z.object({
|
|
1481
|
-
id: import_v45.z.string().nullish(),
|
|
1482
|
-
created: import_v45.z.number().nullish(),
|
|
1483
|
-
model: import_v45.z.string().nullish(),
|
|
1484
|
-
choices: import_v45.z.array(
|
|
1485
|
-
import_v45.z.object({
|
|
1486
|
-
text: import_v45.z.string(),
|
|
1487
|
-
finish_reason: import_v45.z.string().nullish(),
|
|
1488
|
-
index: import_v45.z.number(),
|
|
1489
|
-
logprobs: import_v45.z.object({
|
|
1490
|
-
tokens: import_v45.z.array(import_v45.z.string()),
|
|
1491
|
-
token_logprobs: import_v45.z.array(import_v45.z.number()),
|
|
1492
|
-
top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullish()
|
|
1493
|
-
}).nullish()
|
|
1494
|
-
})
|
|
1495
|
-
),
|
|
1496
|
-
usage: usageSchema.nullish()
|
|
1497
|
-
}),
|
|
1498
|
-
openaiErrorDataSchema
|
|
1499
|
-
]);
|
|
1500
1558
|
|
|
1501
1559
|
// src/embedding/openai-embedding-model.ts
|
|
1502
1560
|
var import_provider5 = require("@ai-sdk/provider");
|
|
1503
|
-
var
|
|
1504
|
-
var import_v47 = require("zod/v4");
|
|
1561
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
|
1505
1562
|
|
|
1506
1563
|
// src/embedding/openai-embedding-options.ts
|
|
1507
|
-
var
|
|
1508
|
-
var
|
|
1509
|
-
|
|
1510
|
-
|
|
1511
|
-
|
|
1512
|
-
|
|
1513
|
-
|
|
1514
|
-
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
|
-
|
|
1518
|
-
|
|
1519
|
-
|
|
1564
|
+
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
|
1565
|
+
var z6 = __toESM(require("zod/v4"));
|
|
1566
|
+
var openaiEmbeddingProviderOptions = (0, import_provider_utils9.lazyValidator)(
|
|
1567
|
+
() => (0, import_provider_utils9.zodSchema)(
|
|
1568
|
+
z6.object({
|
|
1569
|
+
/**
|
|
1570
|
+
The number of dimensions the resulting output embeddings should have.
|
|
1571
|
+
Only supported in text-embedding-3 and later models.
|
|
1572
|
+
*/
|
|
1573
|
+
dimensions: z6.number().optional(),
|
|
1574
|
+
/**
|
|
1575
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1576
|
+
monitor and detect abuse. Learn more.
|
|
1577
|
+
*/
|
|
1578
|
+
user: z6.string().optional()
|
|
1579
|
+
})
|
|
1580
|
+
)
|
|
1581
|
+
);
|
|
1582
|
+
|
|
1583
|
+
// src/embedding/openai-embedding-api.ts
|
|
1584
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
|
1585
|
+
var z7 = __toESM(require("zod/v4"));
|
|
1586
|
+
var openaiTextEmbeddingResponseSchema = (0, import_provider_utils10.lazyValidator)(
|
|
1587
|
+
() => (0, import_provider_utils10.zodSchema)(
|
|
1588
|
+
z7.object({
|
|
1589
|
+
data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
|
|
1590
|
+
usage: z7.object({ prompt_tokens: z7.number() }).nullish()
|
|
1591
|
+
})
|
|
1592
|
+
)
|
|
1593
|
+
);
|
|
1520
1594
|
|
|
1521
1595
|
// src/embedding/openai-embedding-model.ts
|
|
1522
1596
|
var OpenAIEmbeddingModel = class {
|
|
@@ -1545,7 +1619,7 @@ var OpenAIEmbeddingModel = class {
|
|
|
1545
1619
|
values
|
|
1546
1620
|
});
|
|
1547
1621
|
}
|
|
1548
|
-
const openaiOptions = (_a = await (0,
|
|
1622
|
+
const openaiOptions = (_a = await (0, import_provider_utils11.parseProviderOptions)({
|
|
1549
1623
|
provider: "openai",
|
|
1550
1624
|
providerOptions,
|
|
1551
1625
|
schema: openaiEmbeddingProviderOptions
|
|
@@ -1554,12 +1628,12 @@ var OpenAIEmbeddingModel = class {
|
|
|
1554
1628
|
responseHeaders,
|
|
1555
1629
|
value: response,
|
|
1556
1630
|
rawValue
|
|
1557
|
-
} = await (0,
|
|
1631
|
+
} = await (0, import_provider_utils11.postJsonToApi)({
|
|
1558
1632
|
url: this.config.url({
|
|
1559
1633
|
path: "/embeddings",
|
|
1560
1634
|
modelId: this.modelId
|
|
1561
1635
|
}),
|
|
1562
|
-
headers: (0,
|
|
1636
|
+
headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), headers),
|
|
1563
1637
|
body: {
|
|
1564
1638
|
model: this.modelId,
|
|
1565
1639
|
input: values,
|
|
@@ -1568,7 +1642,7 @@ var OpenAIEmbeddingModel = class {
|
|
|
1568
1642
|
user: openaiOptions.user
|
|
1569
1643
|
},
|
|
1570
1644
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1571
|
-
successfulResponseHandler: (0,
|
|
1645
|
+
successfulResponseHandler: (0, import_provider_utils11.createJsonResponseHandler)(
|
|
1572
1646
|
openaiTextEmbeddingResponseSchema
|
|
1573
1647
|
),
|
|
1574
1648
|
abortSignal,
|
|
@@ -1581,14 +1655,25 @@ var OpenAIEmbeddingModel = class {
|
|
|
1581
1655
|
};
|
|
1582
1656
|
}
|
|
1583
1657
|
};
|
|
1584
|
-
var openaiTextEmbeddingResponseSchema = import_v47.z.object({
|
|
1585
|
-
data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
|
|
1586
|
-
usage: import_v47.z.object({ prompt_tokens: import_v47.z.number() }).nullish()
|
|
1587
|
-
});
|
|
1588
1658
|
|
|
1589
1659
|
// src/image/openai-image-model.ts
|
|
1590
|
-
var
|
|
1591
|
-
|
|
1660
|
+
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
|
1661
|
+
|
|
1662
|
+
// src/image/openai-image-api.ts
|
|
1663
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
|
1664
|
+
var z8 = __toESM(require("zod/v4"));
|
|
1665
|
+
var openaiImageResponseSchema = (0, import_provider_utils12.lazyValidator)(
|
|
1666
|
+
() => (0, import_provider_utils12.zodSchema)(
|
|
1667
|
+
z8.object({
|
|
1668
|
+
data: z8.array(
|
|
1669
|
+
z8.object({
|
|
1670
|
+
b64_json: z8.string(),
|
|
1671
|
+
revised_prompt: z8.string().optional()
|
|
1672
|
+
})
|
|
1673
|
+
)
|
|
1674
|
+
})
|
|
1675
|
+
)
|
|
1676
|
+
);
|
|
1592
1677
|
|
|
1593
1678
|
// src/image/openai-image-options.ts
|
|
1594
1679
|
var modelMaxImagesPerCall = {
|
|
@@ -1639,12 +1724,12 @@ var OpenAIImageModel = class {
|
|
|
1639
1724
|
warnings.push({ type: "unsupported-setting", setting: "seed" });
|
|
1640
1725
|
}
|
|
1641
1726
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1642
|
-
const { value: response, responseHeaders } = await (0,
|
|
1727
|
+
const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
|
|
1643
1728
|
url: this.config.url({
|
|
1644
1729
|
path: "/images/generations",
|
|
1645
1730
|
modelId: this.modelId
|
|
1646
1731
|
}),
|
|
1647
|
-
headers: (0,
|
|
1732
|
+
headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), headers),
|
|
1648
1733
|
body: {
|
|
1649
1734
|
model: this.modelId,
|
|
1650
1735
|
prompt,
|
|
@@ -1654,7 +1739,7 @@ var OpenAIImageModel = class {
|
|
|
1654
1739
|
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1655
1740
|
},
|
|
1656
1741
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1657
|
-
successfulResponseHandler: (0,
|
|
1742
|
+
successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
|
|
1658
1743
|
openaiImageResponseSchema
|
|
1659
1744
|
),
|
|
1660
1745
|
abortSignal,
|
|
@@ -1680,36 +1765,43 @@ var OpenAIImageModel = class {
|
|
|
1680
1765
|
};
|
|
1681
1766
|
}
|
|
1682
1767
|
};
|
|
1683
|
-
var openaiImageResponseSchema = import_v48.z.object({
|
|
1684
|
-
data: import_v48.z.array(
|
|
1685
|
-
import_v48.z.object({ b64_json: import_v48.z.string(), revised_prompt: import_v48.z.string().optional() })
|
|
1686
|
-
)
|
|
1687
|
-
});
|
|
1688
1768
|
|
|
1689
1769
|
// src/tool/code-interpreter.ts
|
|
1690
|
-
var
|
|
1691
|
-
var
|
|
1692
|
-
var codeInterpreterInputSchema =
|
|
1693
|
-
|
|
1694
|
-
|
|
1695
|
-
|
|
1696
|
-
|
|
1697
|
-
outputs: import_v49.z.array(
|
|
1698
|
-
import_v49.z.discriminatedUnion("type", [
|
|
1699
|
-
import_v49.z.object({ type: import_v49.z.literal("logs"), logs: import_v49.z.string() }),
|
|
1700
|
-
import_v49.z.object({ type: import_v49.z.literal("image"), url: import_v49.z.string() })
|
|
1701
|
-
])
|
|
1702
|
-
).nullish()
|
|
1703
|
-
});
|
|
1704
|
-
var codeInterpreterArgsSchema = import_v49.z.object({
|
|
1705
|
-
container: import_v49.z.union([
|
|
1706
|
-
import_v49.z.string(),
|
|
1707
|
-
import_v49.z.object({
|
|
1708
|
-
fileIds: import_v49.z.array(import_v49.z.string()).optional()
|
|
1770
|
+
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
|
1771
|
+
var z9 = __toESM(require("zod/v4"));
|
|
1772
|
+
var codeInterpreterInputSchema = (0, import_provider_utils14.lazySchema)(
|
|
1773
|
+
() => (0, import_provider_utils14.zodSchema)(
|
|
1774
|
+
z9.object({
|
|
1775
|
+
code: z9.string().nullish(),
|
|
1776
|
+
containerId: z9.string()
|
|
1709
1777
|
})
|
|
1710
|
-
|
|
1711
|
-
|
|
1712
|
-
var
|
|
1778
|
+
)
|
|
1779
|
+
);
|
|
1780
|
+
var codeInterpreterOutputSchema = (0, import_provider_utils14.lazySchema)(
|
|
1781
|
+
() => (0, import_provider_utils14.zodSchema)(
|
|
1782
|
+
z9.object({
|
|
1783
|
+
outputs: z9.array(
|
|
1784
|
+
z9.discriminatedUnion("type", [
|
|
1785
|
+
z9.object({ type: z9.literal("logs"), logs: z9.string() }),
|
|
1786
|
+
z9.object({ type: z9.literal("image"), url: z9.string() })
|
|
1787
|
+
])
|
|
1788
|
+
).nullish()
|
|
1789
|
+
})
|
|
1790
|
+
)
|
|
1791
|
+
);
|
|
1792
|
+
var codeInterpreterArgsSchema = (0, import_provider_utils14.lazySchema)(
|
|
1793
|
+
() => (0, import_provider_utils14.zodSchema)(
|
|
1794
|
+
z9.object({
|
|
1795
|
+
container: z9.union([
|
|
1796
|
+
z9.string(),
|
|
1797
|
+
z9.object({
|
|
1798
|
+
fileIds: z9.array(z9.string()).optional()
|
|
1799
|
+
})
|
|
1800
|
+
]).optional()
|
|
1801
|
+
})
|
|
1802
|
+
)
|
|
1803
|
+
);
|
|
1804
|
+
var codeInterpreterToolFactory = (0, import_provider_utils14.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
1713
1805
|
id: "openai.code_interpreter",
|
|
1714
1806
|
name: "code_interpreter",
|
|
1715
1807
|
inputSchema: codeInterpreterInputSchema,
|
|
@@ -1720,72 +1812,85 @@ var codeInterpreter = (args = {}) => {
|
|
|
1720
1812
|
};
|
|
1721
1813
|
|
|
1722
1814
|
// src/tool/file-search.ts
|
|
1723
|
-
var
|
|
1724
|
-
var
|
|
1725
|
-
var comparisonFilterSchema =
|
|
1726
|
-
key:
|
|
1727
|
-
type:
|
|
1728
|
-
value:
|
|
1815
|
+
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
|
1816
|
+
var z10 = __toESM(require("zod/v4"));
|
|
1817
|
+
var comparisonFilterSchema = z10.object({
|
|
1818
|
+
key: z10.string(),
|
|
1819
|
+
type: z10.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
|
|
1820
|
+
value: z10.union([z10.string(), z10.number(), z10.boolean()])
|
|
1729
1821
|
});
|
|
1730
|
-
var compoundFilterSchema =
|
|
1731
|
-
type:
|
|
1732
|
-
filters:
|
|
1733
|
-
|
|
1822
|
+
var compoundFilterSchema = z10.object({
|
|
1823
|
+
type: z10.enum(["and", "or"]),
|
|
1824
|
+
filters: z10.array(
|
|
1825
|
+
z10.union([comparisonFilterSchema, z10.lazy(() => compoundFilterSchema)])
|
|
1734
1826
|
)
|
|
1735
1827
|
});
|
|
1736
|
-
var fileSearchArgsSchema =
|
|
1737
|
-
|
|
1738
|
-
|
|
1739
|
-
|
|
1740
|
-
|
|
1741
|
-
|
|
1742
|
-
|
|
1743
|
-
|
|
1744
|
-
})
|
|
1745
|
-
|
|
1746
|
-
queries: import_v410.z.array(import_v410.z.string()),
|
|
1747
|
-
results: import_v410.z.array(
|
|
1748
|
-
import_v410.z.object({
|
|
1749
|
-
attributes: import_v410.z.record(import_v410.z.string(), import_v410.z.unknown()),
|
|
1750
|
-
fileId: import_v410.z.string(),
|
|
1751
|
-
filename: import_v410.z.string(),
|
|
1752
|
-
score: import_v410.z.number(),
|
|
1753
|
-
text: import_v410.z.string()
|
|
1828
|
+
var fileSearchArgsSchema = (0, import_provider_utils15.lazySchema)(
|
|
1829
|
+
() => (0, import_provider_utils15.zodSchema)(
|
|
1830
|
+
z10.object({
|
|
1831
|
+
vectorStoreIds: z10.array(z10.string()),
|
|
1832
|
+
maxNumResults: z10.number().optional(),
|
|
1833
|
+
ranking: z10.object({
|
|
1834
|
+
ranker: z10.string().optional(),
|
|
1835
|
+
scoreThreshold: z10.number().optional()
|
|
1836
|
+
}).optional(),
|
|
1837
|
+
filters: z10.union([comparisonFilterSchema, compoundFilterSchema]).optional()
|
|
1754
1838
|
})
|
|
1755
|
-
)
|
|
1756
|
-
|
|
1757
|
-
var
|
|
1839
|
+
)
|
|
1840
|
+
);
|
|
1841
|
+
var fileSearchOutputSchema = (0, import_provider_utils15.lazySchema)(
|
|
1842
|
+
() => (0, import_provider_utils15.zodSchema)(
|
|
1843
|
+
z10.object({
|
|
1844
|
+
queries: z10.array(z10.string()),
|
|
1845
|
+
results: z10.array(
|
|
1846
|
+
z10.object({
|
|
1847
|
+
attributes: z10.record(z10.string(), z10.unknown()),
|
|
1848
|
+
fileId: z10.string(),
|
|
1849
|
+
filename: z10.string(),
|
|
1850
|
+
score: z10.number(),
|
|
1851
|
+
text: z10.string()
|
|
1852
|
+
})
|
|
1853
|
+
).nullable()
|
|
1854
|
+
})
|
|
1855
|
+
)
|
|
1856
|
+
);
|
|
1857
|
+
var fileSearch = (0, import_provider_utils15.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
1758
1858
|
id: "openai.file_search",
|
|
1759
1859
|
name: "file_search",
|
|
1760
|
-
inputSchema:
|
|
1860
|
+
inputSchema: z10.object({}),
|
|
1761
1861
|
outputSchema: fileSearchOutputSchema
|
|
1762
1862
|
});
|
|
1763
1863
|
|
|
1764
1864
|
// src/tool/image-generation.ts
|
|
1765
|
-
var
|
|
1766
|
-
var
|
|
1767
|
-
var imageGenerationArgsSchema =
|
|
1768
|
-
|
|
1769
|
-
|
|
1770
|
-
|
|
1771
|
-
|
|
1772
|
-
|
|
1773
|
-
|
|
1774
|
-
|
|
1775
|
-
|
|
1776
|
-
|
|
1777
|
-
|
|
1778
|
-
|
|
1779
|
-
|
|
1780
|
-
|
|
1781
|
-
|
|
1782
|
-
|
|
1783
|
-
|
|
1784
|
-
|
|
1785
|
-
|
|
1865
|
+
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
|
1866
|
+
var z11 = __toESM(require("zod/v4"));
|
|
1867
|
+
var imageGenerationArgsSchema = (0, import_provider_utils16.lazySchema)(
|
|
1868
|
+
() => (0, import_provider_utils16.zodSchema)(
|
|
1869
|
+
z11.object({
|
|
1870
|
+
background: z11.enum(["auto", "opaque", "transparent"]).optional(),
|
|
1871
|
+
inputFidelity: z11.enum(["low", "high"]).optional(),
|
|
1872
|
+
inputImageMask: z11.object({
|
|
1873
|
+
fileId: z11.string().optional(),
|
|
1874
|
+
imageUrl: z11.string().optional()
|
|
1875
|
+
}).optional(),
|
|
1876
|
+
model: z11.string().optional(),
|
|
1877
|
+
moderation: z11.enum(["auto"]).optional(),
|
|
1878
|
+
outputCompression: z11.number().int().min(0).max(100).optional(),
|
|
1879
|
+
outputFormat: z11.enum(["png", "jpeg", "webp"]).optional(),
|
|
1880
|
+
partialImages: z11.number().int().min(0).max(3).optional(),
|
|
1881
|
+
quality: z11.enum(["auto", "low", "medium", "high"]).optional(),
|
|
1882
|
+
size: z11.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
|
|
1883
|
+
}).strict()
|
|
1884
|
+
)
|
|
1885
|
+
);
|
|
1886
|
+
var imageGenerationInputSchema = (0, import_provider_utils16.lazySchema)(() => (0, import_provider_utils16.zodSchema)(z11.object({})));
|
|
1887
|
+
var imageGenerationOutputSchema = (0, import_provider_utils16.lazySchema)(
|
|
1888
|
+
() => (0, import_provider_utils16.zodSchema)(z11.object({ result: z11.string() }))
|
|
1889
|
+
);
|
|
1890
|
+
var imageGenerationToolFactory = (0, import_provider_utils16.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
1786
1891
|
id: "openai.image_generation",
|
|
1787
1892
|
name: "image_generation",
|
|
1788
|
-
inputSchema:
|
|
1893
|
+
inputSchema: imageGenerationInputSchema,
|
|
1789
1894
|
outputSchema: imageGenerationOutputSchema
|
|
1790
1895
|
});
|
|
1791
1896
|
var imageGeneration = (args = {}) => {
|
|
@@ -1793,22 +1898,26 @@ var imageGeneration = (args = {}) => {
|
|
|
1793
1898
|
};
|
|
1794
1899
|
|
|
1795
1900
|
// src/tool/local-shell.ts
|
|
1796
|
-
var
|
|
1797
|
-
var
|
|
1798
|
-
var localShellInputSchema =
|
|
1799
|
-
|
|
1800
|
-
|
|
1801
|
-
|
|
1802
|
-
|
|
1803
|
-
|
|
1804
|
-
|
|
1805
|
-
|
|
1806
|
-
|
|
1807
|
-
|
|
1808
|
-
|
|
1809
|
-
|
|
1810
|
-
|
|
1811
|
-
|
|
1901
|
+
var import_provider_utils17 = require("@ai-sdk/provider-utils");
|
|
1902
|
+
var z12 = __toESM(require("zod/v4"));
|
|
1903
|
+
var localShellInputSchema = (0, import_provider_utils17.lazySchema)(
|
|
1904
|
+
() => (0, import_provider_utils17.zodSchema)(
|
|
1905
|
+
z12.object({
|
|
1906
|
+
action: z12.object({
|
|
1907
|
+
type: z12.literal("exec"),
|
|
1908
|
+
command: z12.array(z12.string()),
|
|
1909
|
+
timeoutMs: z12.number().optional(),
|
|
1910
|
+
user: z12.string().optional(),
|
|
1911
|
+
workingDirectory: z12.string().optional(),
|
|
1912
|
+
env: z12.record(z12.string(), z12.string()).optional()
|
|
1913
|
+
})
|
|
1914
|
+
})
|
|
1915
|
+
)
|
|
1916
|
+
);
|
|
1917
|
+
var localShellOutputSchema = (0, import_provider_utils17.lazySchema)(
|
|
1918
|
+
() => (0, import_provider_utils17.zodSchema)(z12.object({ output: z12.string() }))
|
|
1919
|
+
);
|
|
1920
|
+
var localShell = (0, import_provider_utils17.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
1812
1921
|
id: "openai.local_shell",
|
|
1813
1922
|
name: "local_shell",
|
|
1814
1923
|
inputSchema: localShellInputSchema,
|
|
@@ -1816,103 +1925,121 @@ var localShell = (0, import_provider_utils10.createProviderDefinedToolFactoryWit
|
|
|
1816
1925
|
});
|
|
1817
1926
|
|
|
1818
1927
|
// src/tool/web-search.ts
|
|
1819
|
-
var
|
|
1820
|
-
var
|
|
1821
|
-
var webSearchArgsSchema =
|
|
1822
|
-
|
|
1823
|
-
|
|
1824
|
-
|
|
1825
|
-
|
|
1826
|
-
|
|
1827
|
-
|
|
1828
|
-
|
|
1829
|
-
|
|
1830
|
-
|
|
1831
|
-
|
|
1832
|
-
|
|
1833
|
-
|
|
1834
|
-
|
|
1928
|
+
var import_provider_utils18 = require("@ai-sdk/provider-utils");
|
|
1929
|
+
var z13 = __toESM(require("zod/v4"));
|
|
1930
|
+
var webSearchArgsSchema = (0, import_provider_utils18.lazySchema)(
|
|
1931
|
+
() => (0, import_provider_utils18.zodSchema)(
|
|
1932
|
+
z13.object({
|
|
1933
|
+
filters: z13.object({
|
|
1934
|
+
allowedDomains: z13.array(z13.string()).optional()
|
|
1935
|
+
}).optional(),
|
|
1936
|
+
searchContextSize: z13.enum(["low", "medium", "high"]).optional(),
|
|
1937
|
+
userLocation: z13.object({
|
|
1938
|
+
type: z13.literal("approximate"),
|
|
1939
|
+
country: z13.string().optional(),
|
|
1940
|
+
city: z13.string().optional(),
|
|
1941
|
+
region: z13.string().optional(),
|
|
1942
|
+
timezone: z13.string().optional()
|
|
1943
|
+
}).optional()
|
|
1944
|
+
})
|
|
1945
|
+
)
|
|
1946
|
+
);
|
|
1947
|
+
var webSearchInputSchema = (0, import_provider_utils18.lazySchema)(
|
|
1948
|
+
() => (0, import_provider_utils18.zodSchema)(
|
|
1949
|
+
z13.object({
|
|
1950
|
+
action: z13.discriminatedUnion("type", [
|
|
1951
|
+
z13.object({
|
|
1952
|
+
type: z13.literal("search"),
|
|
1953
|
+
query: z13.string().nullish()
|
|
1954
|
+
}),
|
|
1955
|
+
z13.object({
|
|
1956
|
+
type: z13.literal("open_page"),
|
|
1957
|
+
url: z13.string()
|
|
1958
|
+
}),
|
|
1959
|
+
z13.object({
|
|
1960
|
+
type: z13.literal("find"),
|
|
1961
|
+
url: z13.string(),
|
|
1962
|
+
pattern: z13.string()
|
|
1963
|
+
})
|
|
1964
|
+
]).nullish()
|
|
1965
|
+
})
|
|
1966
|
+
)
|
|
1967
|
+
);
|
|
1968
|
+
var webSearchToolFactory = (0, import_provider_utils18.createProviderDefinedToolFactory)({
|
|
1835
1969
|
id: "openai.web_search",
|
|
1836
1970
|
name: "web_search",
|
|
1837
|
-
inputSchema:
|
|
1838
|
-
action: import_v413.z.discriminatedUnion("type", [
|
|
1839
|
-
import_v413.z.object({
|
|
1840
|
-
type: import_v413.z.literal("search"),
|
|
1841
|
-
query: import_v413.z.string().nullish()
|
|
1842
|
-
}),
|
|
1843
|
-
import_v413.z.object({
|
|
1844
|
-
type: import_v413.z.literal("open_page"),
|
|
1845
|
-
url: import_v413.z.string()
|
|
1846
|
-
}),
|
|
1847
|
-
import_v413.z.object({
|
|
1848
|
-
type: import_v413.z.literal("find"),
|
|
1849
|
-
url: import_v413.z.string(),
|
|
1850
|
-
pattern: import_v413.z.string()
|
|
1851
|
-
})
|
|
1852
|
-
]).nullish()
|
|
1853
|
-
})
|
|
1971
|
+
inputSchema: webSearchInputSchema
|
|
1854
1972
|
});
|
|
1855
1973
|
var webSearch = (args = {}) => {
|
|
1856
1974
|
return webSearchToolFactory(args);
|
|
1857
1975
|
};
|
|
1858
1976
|
|
|
1859
1977
|
// src/tool/web-search-preview.ts
|
|
1860
|
-
var
|
|
1861
|
-
var
|
|
1862
|
-
var webSearchPreviewArgsSchema =
|
|
1863
|
-
|
|
1864
|
-
|
|
1865
|
-
|
|
1866
|
-
|
|
1867
|
-
|
|
1868
|
-
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
|
|
1872
|
-
|
|
1873
|
-
|
|
1874
|
-
|
|
1875
|
-
|
|
1876
|
-
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
|
|
1883
|
-
|
|
1884
|
-
|
|
1885
|
-
|
|
1886
|
-
|
|
1887
|
-
|
|
1888
|
-
|
|
1889
|
-
|
|
1890
|
-
|
|
1891
|
-
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
|
|
1895
|
-
|
|
1896
|
-
|
|
1978
|
+
var import_provider_utils19 = require("@ai-sdk/provider-utils");
|
|
1979
|
+
var z14 = __toESM(require("zod/v4"));
|
|
1980
|
+
var webSearchPreviewArgsSchema = (0, import_provider_utils19.lazySchema)(
|
|
1981
|
+
() => (0, import_provider_utils19.zodSchema)(
|
|
1982
|
+
z14.object({
|
|
1983
|
+
/**
|
|
1984
|
+
* Search context size to use for the web search.
|
|
1985
|
+
* - high: Most comprehensive context, highest cost, slower response
|
|
1986
|
+
* - medium: Balanced context, cost, and latency (default)
|
|
1987
|
+
* - low: Least context, lowest cost, fastest response
|
|
1988
|
+
*/
|
|
1989
|
+
searchContextSize: z14.enum(["low", "medium", "high"]).optional(),
|
|
1990
|
+
/**
|
|
1991
|
+
* User location information to provide geographically relevant search results.
|
|
1992
|
+
*/
|
|
1993
|
+
userLocation: z14.object({
|
|
1994
|
+
/**
|
|
1995
|
+
* Type of location (always 'approximate')
|
|
1996
|
+
*/
|
|
1997
|
+
type: z14.literal("approximate"),
|
|
1998
|
+
/**
|
|
1999
|
+
* Two-letter ISO country code (e.g., 'US', 'GB')
|
|
2000
|
+
*/
|
|
2001
|
+
country: z14.string().optional(),
|
|
2002
|
+
/**
|
|
2003
|
+
* City name (free text, e.g., 'Minneapolis')
|
|
2004
|
+
*/
|
|
2005
|
+
city: z14.string().optional(),
|
|
2006
|
+
/**
|
|
2007
|
+
* Region name (free text, e.g., 'Minnesota')
|
|
2008
|
+
*/
|
|
2009
|
+
region: z14.string().optional(),
|
|
2010
|
+
/**
|
|
2011
|
+
* IANA timezone (e.g., 'America/Chicago')
|
|
2012
|
+
*/
|
|
2013
|
+
timezone: z14.string().optional()
|
|
2014
|
+
}).optional()
|
|
2015
|
+
})
|
|
2016
|
+
)
|
|
2017
|
+
);
|
|
2018
|
+
var webSearchPreviewInputSchema = (0, import_provider_utils19.lazySchema)(
|
|
2019
|
+
() => (0, import_provider_utils19.zodSchema)(
|
|
2020
|
+
z14.object({
|
|
2021
|
+
action: z14.discriminatedUnion("type", [
|
|
2022
|
+
z14.object({
|
|
2023
|
+
type: z14.literal("search"),
|
|
2024
|
+
query: z14.string().nullish()
|
|
2025
|
+
}),
|
|
2026
|
+
z14.object({
|
|
2027
|
+
type: z14.literal("open_page"),
|
|
2028
|
+
url: z14.string()
|
|
2029
|
+
}),
|
|
2030
|
+
z14.object({
|
|
2031
|
+
type: z14.literal("find"),
|
|
2032
|
+
url: z14.string(),
|
|
2033
|
+
pattern: z14.string()
|
|
2034
|
+
})
|
|
2035
|
+
]).nullish()
|
|
2036
|
+
})
|
|
2037
|
+
)
|
|
2038
|
+
);
|
|
2039
|
+
var webSearchPreview = (0, import_provider_utils19.createProviderDefinedToolFactory)({
|
|
1897
2040
|
id: "openai.web_search_preview",
|
|
1898
2041
|
name: "web_search_preview",
|
|
1899
|
-
inputSchema:
|
|
1900
|
-
action: import_v414.z.discriminatedUnion("type", [
|
|
1901
|
-
import_v414.z.object({
|
|
1902
|
-
type: import_v414.z.literal("search"),
|
|
1903
|
-
query: import_v414.z.string().nullish()
|
|
1904
|
-
}),
|
|
1905
|
-
import_v414.z.object({
|
|
1906
|
-
type: import_v414.z.literal("open_page"),
|
|
1907
|
-
url: import_v414.z.string()
|
|
1908
|
-
}),
|
|
1909
|
-
import_v414.z.object({
|
|
1910
|
-
type: import_v414.z.literal("find"),
|
|
1911
|
-
url: import_v414.z.string(),
|
|
1912
|
-
pattern: import_v414.z.string()
|
|
1913
|
-
})
|
|
1914
|
-
]).nullish()
|
|
1915
|
-
})
|
|
2042
|
+
inputSchema: webSearchPreviewInputSchema
|
|
1916
2043
|
});
|
|
1917
2044
|
|
|
1918
2045
|
// src/openai-tools.ts
|
|
@@ -1995,13 +2122,12 @@ var openaiTools = {
|
|
|
1995
2122
|
|
|
1996
2123
|
// src/responses/openai-responses-language-model.ts
|
|
1997
2124
|
var import_provider8 = require("@ai-sdk/provider");
|
|
1998
|
-
var
|
|
1999
|
-
var import_v416 = require("zod/v4");
|
|
2125
|
+
var import_provider_utils24 = require("@ai-sdk/provider-utils");
|
|
2000
2126
|
|
|
2001
2127
|
// src/responses/convert-to-openai-responses-input.ts
|
|
2002
2128
|
var import_provider6 = require("@ai-sdk/provider");
|
|
2003
|
-
var
|
|
2004
|
-
var
|
|
2129
|
+
var import_provider_utils20 = require("@ai-sdk/provider-utils");
|
|
2130
|
+
var z15 = __toESM(require("zod/v4"));
|
|
2005
2131
|
function isFileId(data, prefixes) {
|
|
2006
2132
|
if (!prefixes) return false;
|
|
2007
2133
|
return prefixes.some((prefix) => data.startsWith(prefix));
|
|
@@ -2059,7 +2185,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2059
2185
|
return {
|
|
2060
2186
|
type: "input_image",
|
|
2061
2187
|
...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
|
|
2062
|
-
image_url: `data:${mediaType};base64,${(0,
|
|
2188
|
+
image_url: `data:${mediaType};base64,${(0, import_provider_utils20.convertToBase64)(part.data)}`
|
|
2063
2189
|
},
|
|
2064
2190
|
detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
|
|
2065
2191
|
};
|
|
@@ -2074,7 +2200,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2074
2200
|
type: "input_file",
|
|
2075
2201
|
...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
|
|
2076
2202
|
filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
|
|
2077
|
-
file_data: `data:application/pdf;base64,${(0,
|
|
2203
|
+
file_data: `data:application/pdf;base64,${(0, import_provider_utils20.convertToBase64)(part.data)}`
|
|
2078
2204
|
}
|
|
2079
2205
|
};
|
|
2080
2206
|
} else {
|
|
@@ -2107,7 +2233,10 @@ async function convertToOpenAIResponsesInput({
|
|
|
2107
2233
|
break;
|
|
2108
2234
|
}
|
|
2109
2235
|
if (hasLocalShellTool && part.toolName === "local_shell") {
|
|
2110
|
-
const parsedInput =
|
|
2236
|
+
const parsedInput = await (0, import_provider_utils20.validateTypes)({
|
|
2237
|
+
value: part.input,
|
|
2238
|
+
schema: localShellInputSchema
|
|
2239
|
+
});
|
|
2111
2240
|
input.push({
|
|
2112
2241
|
type: "local_shell_call",
|
|
2113
2242
|
call_id: part.toolCallId,
|
|
@@ -2145,7 +2274,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2145
2274
|
break;
|
|
2146
2275
|
}
|
|
2147
2276
|
case "reasoning": {
|
|
2148
|
-
const providerOptions = await (0,
|
|
2277
|
+
const providerOptions = await (0, import_provider_utils20.parseProviderOptions)({
|
|
2149
2278
|
provider: "openai",
|
|
2150
2279
|
providerOptions: part.providerOptions,
|
|
2151
2280
|
schema: openaiResponsesReasoningProviderOptionsSchema
|
|
@@ -2203,10 +2332,14 @@ async function convertToOpenAIResponsesInput({
|
|
|
2203
2332
|
for (const part of content) {
|
|
2204
2333
|
const output = part.output;
|
|
2205
2334
|
if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
|
|
2335
|
+
const parsedOutput = await (0, import_provider_utils20.validateTypes)({
|
|
2336
|
+
value: output.value,
|
|
2337
|
+
schema: localShellOutputSchema
|
|
2338
|
+
});
|
|
2206
2339
|
input.push({
|
|
2207
2340
|
type: "local_shell_call_output",
|
|
2208
2341
|
call_id: part.toolCallId,
|
|
2209
|
-
output:
|
|
2342
|
+
output: parsedOutput.output
|
|
2210
2343
|
});
|
|
2211
2344
|
break;
|
|
2212
2345
|
}
|
|
@@ -2241,9 +2374,9 @@ async function convertToOpenAIResponsesInput({
|
|
|
2241
2374
|
}
|
|
2242
2375
|
return { input, warnings };
|
|
2243
2376
|
}
|
|
2244
|
-
var openaiResponsesReasoningProviderOptionsSchema =
|
|
2245
|
-
itemId:
|
|
2246
|
-
reasoningEncryptedContent:
|
|
2377
|
+
var openaiResponsesReasoningProviderOptionsSchema = z15.object({
|
|
2378
|
+
itemId: z15.string().nullish(),
|
|
2379
|
+
reasoningEncryptedContent: z15.string().nullish()
|
|
2247
2380
|
});
|
|
2248
2381
|
|
|
2249
2382
|
// src/responses/map-openai-responses-finish-reason.ts
|
|
@@ -2264,9 +2397,539 @@ function mapOpenAIResponseFinishReason({
|
|
|
2264
2397
|
}
|
|
2265
2398
|
}
|
|
2266
2399
|
|
|
2400
|
+
// src/responses/openai-responses-api.ts
|
|
2401
|
+
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
|
2402
|
+
var z16 = __toESM(require("zod/v4"));
|
|
2403
|
+
var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
|
|
2404
|
+
() => (0, import_provider_utils21.zodSchema)(
|
|
2405
|
+
z16.union([
|
|
2406
|
+
z16.object({
|
|
2407
|
+
type: z16.literal("response.output_text.delta"),
|
|
2408
|
+
item_id: z16.string(),
|
|
2409
|
+
delta: z16.string(),
|
|
2410
|
+
logprobs: z16.array(
|
|
2411
|
+
z16.object({
|
|
2412
|
+
token: z16.string(),
|
|
2413
|
+
logprob: z16.number(),
|
|
2414
|
+
top_logprobs: z16.array(
|
|
2415
|
+
z16.object({
|
|
2416
|
+
token: z16.string(),
|
|
2417
|
+
logprob: z16.number()
|
|
2418
|
+
})
|
|
2419
|
+
)
|
|
2420
|
+
})
|
|
2421
|
+
).nullish()
|
|
2422
|
+
}),
|
|
2423
|
+
z16.object({
|
|
2424
|
+
type: z16.enum(["response.completed", "response.incomplete"]),
|
|
2425
|
+
response: z16.object({
|
|
2426
|
+
incomplete_details: z16.object({ reason: z16.string() }).nullish(),
|
|
2427
|
+
usage: z16.object({
|
|
2428
|
+
input_tokens: z16.number(),
|
|
2429
|
+
input_tokens_details: z16.object({ cached_tokens: z16.number().nullish() }).nullish(),
|
|
2430
|
+
output_tokens: z16.number(),
|
|
2431
|
+
output_tokens_details: z16.object({ reasoning_tokens: z16.number().nullish() }).nullish()
|
|
2432
|
+
}),
|
|
2433
|
+
service_tier: z16.string().nullish()
|
|
2434
|
+
})
|
|
2435
|
+
}),
|
|
2436
|
+
z16.object({
|
|
2437
|
+
type: z16.literal("response.created"),
|
|
2438
|
+
response: z16.object({
|
|
2439
|
+
id: z16.string(),
|
|
2440
|
+
created_at: z16.number(),
|
|
2441
|
+
model: z16.string(),
|
|
2442
|
+
service_tier: z16.string().nullish()
|
|
2443
|
+
})
|
|
2444
|
+
}),
|
|
2445
|
+
z16.object({
|
|
2446
|
+
type: z16.literal("response.output_item.added"),
|
|
2447
|
+
output_index: z16.number(),
|
|
2448
|
+
item: z16.discriminatedUnion("type", [
|
|
2449
|
+
z16.object({
|
|
2450
|
+
type: z16.literal("message"),
|
|
2451
|
+
id: z16.string()
|
|
2452
|
+
}),
|
|
2453
|
+
z16.object({
|
|
2454
|
+
type: z16.literal("reasoning"),
|
|
2455
|
+
id: z16.string(),
|
|
2456
|
+
encrypted_content: z16.string().nullish()
|
|
2457
|
+
}),
|
|
2458
|
+
z16.object({
|
|
2459
|
+
type: z16.literal("function_call"),
|
|
2460
|
+
id: z16.string(),
|
|
2461
|
+
call_id: z16.string(),
|
|
2462
|
+
name: z16.string(),
|
|
2463
|
+
arguments: z16.string()
|
|
2464
|
+
}),
|
|
2465
|
+
z16.object({
|
|
2466
|
+
type: z16.literal("web_search_call"),
|
|
2467
|
+
id: z16.string(),
|
|
2468
|
+
status: z16.string(),
|
|
2469
|
+
action: z16.object({
|
|
2470
|
+
type: z16.literal("search"),
|
|
2471
|
+
query: z16.string().optional()
|
|
2472
|
+
}).nullish()
|
|
2473
|
+
}),
|
|
2474
|
+
z16.object({
|
|
2475
|
+
type: z16.literal("computer_call"),
|
|
2476
|
+
id: z16.string(),
|
|
2477
|
+
status: z16.string()
|
|
2478
|
+
}),
|
|
2479
|
+
z16.object({
|
|
2480
|
+
type: z16.literal("file_search_call"),
|
|
2481
|
+
id: z16.string()
|
|
2482
|
+
}),
|
|
2483
|
+
z16.object({
|
|
2484
|
+
type: z16.literal("image_generation_call"),
|
|
2485
|
+
id: z16.string()
|
|
2486
|
+
}),
|
|
2487
|
+
z16.object({
|
|
2488
|
+
type: z16.literal("code_interpreter_call"),
|
|
2489
|
+
id: z16.string(),
|
|
2490
|
+
container_id: z16.string(),
|
|
2491
|
+
code: z16.string().nullable(),
|
|
2492
|
+
outputs: z16.array(
|
|
2493
|
+
z16.discriminatedUnion("type", [
|
|
2494
|
+
z16.object({ type: z16.literal("logs"), logs: z16.string() }),
|
|
2495
|
+
z16.object({ type: z16.literal("image"), url: z16.string() })
|
|
2496
|
+
])
|
|
2497
|
+
).nullable(),
|
|
2498
|
+
status: z16.string()
|
|
2499
|
+
})
|
|
2500
|
+
])
|
|
2501
|
+
}),
|
|
2502
|
+
z16.object({
|
|
2503
|
+
type: z16.literal("response.output_item.done"),
|
|
2504
|
+
output_index: z16.number(),
|
|
2505
|
+
item: z16.discriminatedUnion("type", [
|
|
2506
|
+
z16.object({
|
|
2507
|
+
type: z16.literal("message"),
|
|
2508
|
+
id: z16.string()
|
|
2509
|
+
}),
|
|
2510
|
+
z16.object({
|
|
2511
|
+
type: z16.literal("reasoning"),
|
|
2512
|
+
id: z16.string(),
|
|
2513
|
+
encrypted_content: z16.string().nullish()
|
|
2514
|
+
}),
|
|
2515
|
+
z16.object({
|
|
2516
|
+
type: z16.literal("function_call"),
|
|
2517
|
+
id: z16.string(),
|
|
2518
|
+
call_id: z16.string(),
|
|
2519
|
+
name: z16.string(),
|
|
2520
|
+
arguments: z16.string(),
|
|
2521
|
+
status: z16.literal("completed")
|
|
2522
|
+
}),
|
|
2523
|
+
z16.object({
|
|
2524
|
+
type: z16.literal("code_interpreter_call"),
|
|
2525
|
+
id: z16.string(),
|
|
2526
|
+
code: z16.string().nullable(),
|
|
2527
|
+
container_id: z16.string(),
|
|
2528
|
+
outputs: z16.array(
|
|
2529
|
+
z16.discriminatedUnion("type", [
|
|
2530
|
+
z16.object({ type: z16.literal("logs"), logs: z16.string() }),
|
|
2531
|
+
z16.object({ type: z16.literal("image"), url: z16.string() })
|
|
2532
|
+
])
|
|
2533
|
+
).nullable()
|
|
2534
|
+
}),
|
|
2535
|
+
z16.object({
|
|
2536
|
+
type: z16.literal("image_generation_call"),
|
|
2537
|
+
id: z16.string(),
|
|
2538
|
+
result: z16.string()
|
|
2539
|
+
}),
|
|
2540
|
+
z16.object({
|
|
2541
|
+
type: z16.literal("web_search_call"),
|
|
2542
|
+
id: z16.string(),
|
|
2543
|
+
status: z16.string(),
|
|
2544
|
+
action: z16.discriminatedUnion("type", [
|
|
2545
|
+
z16.object({
|
|
2546
|
+
type: z16.literal("search"),
|
|
2547
|
+
query: z16.string().nullish()
|
|
2548
|
+
}),
|
|
2549
|
+
z16.object({
|
|
2550
|
+
type: z16.literal("open_page"),
|
|
2551
|
+
url: z16.string()
|
|
2552
|
+
}),
|
|
2553
|
+
z16.object({
|
|
2554
|
+
type: z16.literal("find"),
|
|
2555
|
+
url: z16.string(),
|
|
2556
|
+
pattern: z16.string()
|
|
2557
|
+
})
|
|
2558
|
+
]).nullish()
|
|
2559
|
+
}),
|
|
2560
|
+
z16.object({
|
|
2561
|
+
type: z16.literal("file_search_call"),
|
|
2562
|
+
id: z16.string(),
|
|
2563
|
+
queries: z16.array(z16.string()),
|
|
2564
|
+
results: z16.array(
|
|
2565
|
+
z16.object({
|
|
2566
|
+
attributes: z16.record(z16.string(), z16.unknown()),
|
|
2567
|
+
file_id: z16.string(),
|
|
2568
|
+
filename: z16.string(),
|
|
2569
|
+
score: z16.number(),
|
|
2570
|
+
text: z16.string()
|
|
2571
|
+
})
|
|
2572
|
+
).nullish()
|
|
2573
|
+
}),
|
|
2574
|
+
z16.object({
|
|
2575
|
+
type: z16.literal("local_shell_call"),
|
|
2576
|
+
id: z16.string(),
|
|
2577
|
+
call_id: z16.string(),
|
|
2578
|
+
action: z16.object({
|
|
2579
|
+
type: z16.literal("exec"),
|
|
2580
|
+
command: z16.array(z16.string()),
|
|
2581
|
+
timeout_ms: z16.number().optional(),
|
|
2582
|
+
user: z16.string().optional(),
|
|
2583
|
+
working_directory: z16.string().optional(),
|
|
2584
|
+
env: z16.record(z16.string(), z16.string()).optional()
|
|
2585
|
+
})
|
|
2586
|
+
}),
|
|
2587
|
+
z16.object({
|
|
2588
|
+
type: z16.literal("computer_call"),
|
|
2589
|
+
id: z16.string(),
|
|
2590
|
+
status: z16.literal("completed")
|
|
2591
|
+
})
|
|
2592
|
+
])
|
|
2593
|
+
}),
|
|
2594
|
+
z16.object({
|
|
2595
|
+
type: z16.literal("response.function_call_arguments.delta"),
|
|
2596
|
+
item_id: z16.string(),
|
|
2597
|
+
output_index: z16.number(),
|
|
2598
|
+
delta: z16.string()
|
|
2599
|
+
}),
|
|
2600
|
+
z16.object({
|
|
2601
|
+
type: z16.literal("response.image_generation_call.partial_image"),
|
|
2602
|
+
item_id: z16.string(),
|
|
2603
|
+
output_index: z16.number(),
|
|
2604
|
+
partial_image_b64: z16.string()
|
|
2605
|
+
}),
|
|
2606
|
+
z16.object({
|
|
2607
|
+
type: z16.literal("response.code_interpreter_call_code.delta"),
|
|
2608
|
+
item_id: z16.string(),
|
|
2609
|
+
output_index: z16.number(),
|
|
2610
|
+
delta: z16.string()
|
|
2611
|
+
}),
|
|
2612
|
+
z16.object({
|
|
2613
|
+
type: z16.literal("response.code_interpreter_call_code.done"),
|
|
2614
|
+
item_id: z16.string(),
|
|
2615
|
+
output_index: z16.number(),
|
|
2616
|
+
code: z16.string()
|
|
2617
|
+
}),
|
|
2618
|
+
z16.object({
|
|
2619
|
+
type: z16.literal("response.output_text.annotation.added"),
|
|
2620
|
+
annotation: z16.discriminatedUnion("type", [
|
|
2621
|
+
z16.object({
|
|
2622
|
+
type: z16.literal("url_citation"),
|
|
2623
|
+
url: z16.string(),
|
|
2624
|
+
title: z16.string()
|
|
2625
|
+
}),
|
|
2626
|
+
z16.object({
|
|
2627
|
+
type: z16.literal("file_citation"),
|
|
2628
|
+
file_id: z16.string(),
|
|
2629
|
+
filename: z16.string().nullish(),
|
|
2630
|
+
index: z16.number().nullish(),
|
|
2631
|
+
start_index: z16.number().nullish(),
|
|
2632
|
+
end_index: z16.number().nullish(),
|
|
2633
|
+
quote: z16.string().nullish()
|
|
2634
|
+
})
|
|
2635
|
+
])
|
|
2636
|
+
}),
|
|
2637
|
+
z16.object({
|
|
2638
|
+
type: z16.literal("response.reasoning_summary_part.added"),
|
|
2639
|
+
item_id: z16.string(),
|
|
2640
|
+
summary_index: z16.number()
|
|
2641
|
+
}),
|
|
2642
|
+
z16.object({
|
|
2643
|
+
type: z16.literal("response.reasoning_summary_text.delta"),
|
|
2644
|
+
item_id: z16.string(),
|
|
2645
|
+
summary_index: z16.number(),
|
|
2646
|
+
delta: z16.string()
|
|
2647
|
+
}),
|
|
2648
|
+
z16.object({
|
|
2649
|
+
type: z16.literal("error"),
|
|
2650
|
+
code: z16.string(),
|
|
2651
|
+
message: z16.string(),
|
|
2652
|
+
param: z16.string().nullish(),
|
|
2653
|
+
sequence_number: z16.number()
|
|
2654
|
+
}),
|
|
2655
|
+
z16.object({ type: z16.string() }).loose().transform((value) => ({
|
|
2656
|
+
type: "unknown_chunk",
|
|
2657
|
+
message: value.type
|
|
2658
|
+
}))
|
|
2659
|
+
// fallback for unknown chunks
|
|
2660
|
+
])
|
|
2661
|
+
)
|
|
2662
|
+
);
|
|
2663
|
+
var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
|
|
2664
|
+
() => (0, import_provider_utils21.zodSchema)(
|
|
2665
|
+
z16.object({
|
|
2666
|
+
id: z16.string(),
|
|
2667
|
+
created_at: z16.number(),
|
|
2668
|
+
error: z16.object({
|
|
2669
|
+
code: z16.string(),
|
|
2670
|
+
message: z16.string()
|
|
2671
|
+
}).nullish(),
|
|
2672
|
+
model: z16.string(),
|
|
2673
|
+
output: z16.array(
|
|
2674
|
+
z16.discriminatedUnion("type", [
|
|
2675
|
+
z16.object({
|
|
2676
|
+
type: z16.literal("message"),
|
|
2677
|
+
role: z16.literal("assistant"),
|
|
2678
|
+
id: z16.string(),
|
|
2679
|
+
content: z16.array(
|
|
2680
|
+
z16.object({
|
|
2681
|
+
type: z16.literal("output_text"),
|
|
2682
|
+
text: z16.string(),
|
|
2683
|
+
logprobs: z16.array(
|
|
2684
|
+
z16.object({
|
|
2685
|
+
token: z16.string(),
|
|
2686
|
+
logprob: z16.number(),
|
|
2687
|
+
top_logprobs: z16.array(
|
|
2688
|
+
z16.object({
|
|
2689
|
+
token: z16.string(),
|
|
2690
|
+
logprob: z16.number()
|
|
2691
|
+
})
|
|
2692
|
+
)
|
|
2693
|
+
})
|
|
2694
|
+
).nullish(),
|
|
2695
|
+
annotations: z16.array(
|
|
2696
|
+
z16.discriminatedUnion("type", [
|
|
2697
|
+
z16.object({
|
|
2698
|
+
type: z16.literal("url_citation"),
|
|
2699
|
+
start_index: z16.number(),
|
|
2700
|
+
end_index: z16.number(),
|
|
2701
|
+
url: z16.string(),
|
|
2702
|
+
title: z16.string()
|
|
2703
|
+
}),
|
|
2704
|
+
z16.object({
|
|
2705
|
+
type: z16.literal("file_citation"),
|
|
2706
|
+
file_id: z16.string(),
|
|
2707
|
+
filename: z16.string().nullish(),
|
|
2708
|
+
index: z16.number().nullish(),
|
|
2709
|
+
start_index: z16.number().nullish(),
|
|
2710
|
+
end_index: z16.number().nullish(),
|
|
2711
|
+
quote: z16.string().nullish()
|
|
2712
|
+
}),
|
|
2713
|
+
z16.object({
|
|
2714
|
+
type: z16.literal("container_file_citation")
|
|
2715
|
+
})
|
|
2716
|
+
])
|
|
2717
|
+
)
|
|
2718
|
+
})
|
|
2719
|
+
)
|
|
2720
|
+
}),
|
|
2721
|
+
z16.object({
|
|
2722
|
+
type: z16.literal("web_search_call"),
|
|
2723
|
+
id: z16.string(),
|
|
2724
|
+
status: z16.string(),
|
|
2725
|
+
action: z16.discriminatedUnion("type", [
|
|
2726
|
+
z16.object({
|
|
2727
|
+
type: z16.literal("search"),
|
|
2728
|
+
query: z16.string().nullish()
|
|
2729
|
+
}),
|
|
2730
|
+
z16.object({
|
|
2731
|
+
type: z16.literal("open_page"),
|
|
2732
|
+
url: z16.string()
|
|
2733
|
+
}),
|
|
2734
|
+
z16.object({
|
|
2735
|
+
type: z16.literal("find"),
|
|
2736
|
+
url: z16.string(),
|
|
2737
|
+
pattern: z16.string()
|
|
2738
|
+
})
|
|
2739
|
+
]).nullish()
|
|
2740
|
+
}),
|
|
2741
|
+
z16.object({
|
|
2742
|
+
type: z16.literal("file_search_call"),
|
|
2743
|
+
id: z16.string(),
|
|
2744
|
+
queries: z16.array(z16.string()),
|
|
2745
|
+
results: z16.array(
|
|
2746
|
+
z16.object({
|
|
2747
|
+
attributes: z16.record(z16.string(), z16.unknown()),
|
|
2748
|
+
file_id: z16.string(),
|
|
2749
|
+
filename: z16.string(),
|
|
2750
|
+
score: z16.number(),
|
|
2751
|
+
text: z16.string()
|
|
2752
|
+
})
|
|
2753
|
+
).nullish()
|
|
2754
|
+
}),
|
|
2755
|
+
z16.object({
|
|
2756
|
+
type: z16.literal("code_interpreter_call"),
|
|
2757
|
+
id: z16.string(),
|
|
2758
|
+
code: z16.string().nullable(),
|
|
2759
|
+
container_id: z16.string(),
|
|
2760
|
+
outputs: z16.array(
|
|
2761
|
+
z16.discriminatedUnion("type", [
|
|
2762
|
+
z16.object({ type: z16.literal("logs"), logs: z16.string() }),
|
|
2763
|
+
z16.object({ type: z16.literal("image"), url: z16.string() })
|
|
2764
|
+
])
|
|
2765
|
+
).nullable()
|
|
2766
|
+
}),
|
|
2767
|
+
z16.object({
|
|
2768
|
+
type: z16.literal("image_generation_call"),
|
|
2769
|
+
id: z16.string(),
|
|
2770
|
+
result: z16.string()
|
|
2771
|
+
}),
|
|
2772
|
+
z16.object({
|
|
2773
|
+
type: z16.literal("local_shell_call"),
|
|
2774
|
+
id: z16.string(),
|
|
2775
|
+
call_id: z16.string(),
|
|
2776
|
+
action: z16.object({
|
|
2777
|
+
type: z16.literal("exec"),
|
|
2778
|
+
command: z16.array(z16.string()),
|
|
2779
|
+
timeout_ms: z16.number().optional(),
|
|
2780
|
+
user: z16.string().optional(),
|
|
2781
|
+
working_directory: z16.string().optional(),
|
|
2782
|
+
env: z16.record(z16.string(), z16.string()).optional()
|
|
2783
|
+
})
|
|
2784
|
+
}),
|
|
2785
|
+
z16.object({
|
|
2786
|
+
type: z16.literal("function_call"),
|
|
2787
|
+
call_id: z16.string(),
|
|
2788
|
+
name: z16.string(),
|
|
2789
|
+
arguments: z16.string(),
|
|
2790
|
+
id: z16.string()
|
|
2791
|
+
}),
|
|
2792
|
+
z16.object({
|
|
2793
|
+
type: z16.literal("computer_call"),
|
|
2794
|
+
id: z16.string(),
|
|
2795
|
+
status: z16.string().optional()
|
|
2796
|
+
}),
|
|
2797
|
+
z16.object({
|
|
2798
|
+
type: z16.literal("reasoning"),
|
|
2799
|
+
id: z16.string(),
|
|
2800
|
+
encrypted_content: z16.string().nullish(),
|
|
2801
|
+
summary: z16.array(
|
|
2802
|
+
z16.object({
|
|
2803
|
+
type: z16.literal("summary_text"),
|
|
2804
|
+
text: z16.string()
|
|
2805
|
+
})
|
|
2806
|
+
)
|
|
2807
|
+
})
|
|
2808
|
+
])
|
|
2809
|
+
),
|
|
2810
|
+
service_tier: z16.string().nullish(),
|
|
2811
|
+
incomplete_details: z16.object({ reason: z16.string() }).nullish(),
|
|
2812
|
+
usage: z16.object({
|
|
2813
|
+
input_tokens: z16.number(),
|
|
2814
|
+
input_tokens_details: z16.object({ cached_tokens: z16.number().nullish() }).nullish(),
|
|
2815
|
+
output_tokens: z16.number(),
|
|
2816
|
+
output_tokens_details: z16.object({ reasoning_tokens: z16.number().nullish() }).nullish()
|
|
2817
|
+
})
|
|
2818
|
+
})
|
|
2819
|
+
)
|
|
2820
|
+
);
|
|
2821
|
+
|
|
2822
|
+
// src/responses/openai-responses-options.ts
|
|
2823
|
+
var import_provider_utils22 = require("@ai-sdk/provider-utils");
|
|
2824
|
+
var z17 = __toESM(require("zod/v4"));
|
|
2825
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2826
|
+
var openaiResponsesReasoningModelIds = [
|
|
2827
|
+
"o1",
|
|
2828
|
+
"o1-2024-12-17",
|
|
2829
|
+
"o3-mini",
|
|
2830
|
+
"o3-mini-2025-01-31",
|
|
2831
|
+
"o3",
|
|
2832
|
+
"o3-2025-04-16",
|
|
2833
|
+
"o4-mini",
|
|
2834
|
+
"o4-mini-2025-04-16",
|
|
2835
|
+
"codex-mini-latest",
|
|
2836
|
+
"computer-use-preview",
|
|
2837
|
+
"gpt-5",
|
|
2838
|
+
"gpt-5-2025-08-07",
|
|
2839
|
+
"gpt-5-codex",
|
|
2840
|
+
"gpt-5-mini",
|
|
2841
|
+
"gpt-5-mini-2025-08-07",
|
|
2842
|
+
"gpt-5-nano",
|
|
2843
|
+
"gpt-5-nano-2025-08-07",
|
|
2844
|
+
"gpt-5-pro",
|
|
2845
|
+
"gpt-5-pro-2025-10-06"
|
|
2846
|
+
];
|
|
2847
|
+
var openaiResponsesModelIds = [
|
|
2848
|
+
"gpt-4.1",
|
|
2849
|
+
"gpt-4.1-2025-04-14",
|
|
2850
|
+
"gpt-4.1-mini",
|
|
2851
|
+
"gpt-4.1-mini-2025-04-14",
|
|
2852
|
+
"gpt-4.1-nano",
|
|
2853
|
+
"gpt-4.1-nano-2025-04-14",
|
|
2854
|
+
"gpt-4o",
|
|
2855
|
+
"gpt-4o-2024-05-13",
|
|
2856
|
+
"gpt-4o-2024-08-06",
|
|
2857
|
+
"gpt-4o-2024-11-20",
|
|
2858
|
+
"gpt-4o-audio-preview",
|
|
2859
|
+
"gpt-4o-audio-preview-2024-10-01",
|
|
2860
|
+
"gpt-4o-audio-preview-2024-12-17",
|
|
2861
|
+
"gpt-4o-search-preview",
|
|
2862
|
+
"gpt-4o-search-preview-2025-03-11",
|
|
2863
|
+
"gpt-4o-mini-search-preview",
|
|
2864
|
+
"gpt-4o-mini-search-preview-2025-03-11",
|
|
2865
|
+
"gpt-4o-mini",
|
|
2866
|
+
"gpt-4o-mini-2024-07-18",
|
|
2867
|
+
"gpt-4-turbo",
|
|
2868
|
+
"gpt-4-turbo-2024-04-09",
|
|
2869
|
+
"gpt-4-turbo-preview",
|
|
2870
|
+
"gpt-4-0125-preview",
|
|
2871
|
+
"gpt-4-1106-preview",
|
|
2872
|
+
"gpt-4",
|
|
2873
|
+
"gpt-4-0613",
|
|
2874
|
+
"gpt-4.5-preview",
|
|
2875
|
+
"gpt-4.5-preview-2025-02-27",
|
|
2876
|
+
"gpt-3.5-turbo-0125",
|
|
2877
|
+
"gpt-3.5-turbo",
|
|
2878
|
+
"gpt-3.5-turbo-1106",
|
|
2879
|
+
"chatgpt-4o-latest",
|
|
2880
|
+
"gpt-5-chat-latest",
|
|
2881
|
+
...openaiResponsesReasoningModelIds
|
|
2882
|
+
];
|
|
2883
|
+
var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValidator)(
|
|
2884
|
+
() => (0, import_provider_utils22.zodSchema)(
|
|
2885
|
+
z17.object({
|
|
2886
|
+
include: z17.array(
|
|
2887
|
+
z17.enum([
|
|
2888
|
+
"reasoning.encrypted_content",
|
|
2889
|
+
"file_search_call.results",
|
|
2890
|
+
"message.output_text.logprobs"
|
|
2891
|
+
])
|
|
2892
|
+
).nullish(),
|
|
2893
|
+
instructions: z17.string().nullish(),
|
|
2894
|
+
/**
|
|
2895
|
+
* Return the log probabilities of the tokens.
|
|
2896
|
+
*
|
|
2897
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
2898
|
+
* were generated.
|
|
2899
|
+
*
|
|
2900
|
+
* Setting to a number will return the log probabilities of the top n
|
|
2901
|
+
* tokens that were generated.
|
|
2902
|
+
*
|
|
2903
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
2904
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
2905
|
+
*/
|
|
2906
|
+
logprobs: z17.union([z17.boolean(), z17.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
2907
|
+
/**
|
|
2908
|
+
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
2909
|
+
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
2910
|
+
* Any further attempts to call a tool by the model will be ignored.
|
|
2911
|
+
*/
|
|
2912
|
+
maxToolCalls: z17.number().nullish(),
|
|
2913
|
+
metadata: z17.any().nullish(),
|
|
2914
|
+
parallelToolCalls: z17.boolean().nullish(),
|
|
2915
|
+
previousResponseId: z17.string().nullish(),
|
|
2916
|
+
promptCacheKey: z17.string().nullish(),
|
|
2917
|
+
reasoningEffort: z17.string().nullish(),
|
|
2918
|
+
reasoningSummary: z17.string().nullish(),
|
|
2919
|
+
safetyIdentifier: z17.string().nullish(),
|
|
2920
|
+
serviceTier: z17.enum(["auto", "flex", "priority", "default"]).nullish(),
|
|
2921
|
+
store: z17.boolean().nullish(),
|
|
2922
|
+
strictJsonSchema: z17.boolean().nullish(),
|
|
2923
|
+
textVerbosity: z17.enum(["low", "medium", "high"]).nullish(),
|
|
2924
|
+
user: z17.string().nullish()
|
|
2925
|
+
})
|
|
2926
|
+
)
|
|
2927
|
+
);
|
|
2928
|
+
|
|
2267
2929
|
// src/responses/openai-responses-prepare-tools.ts
|
|
2268
2930
|
var import_provider7 = require("@ai-sdk/provider");
|
|
2269
|
-
|
|
2931
|
+
var import_provider_utils23 = require("@ai-sdk/provider-utils");
|
|
2932
|
+
async function prepareResponsesTools({
|
|
2270
2933
|
tools,
|
|
2271
2934
|
toolChoice,
|
|
2272
2935
|
strictJsonSchema
|
|
@@ -2291,7 +2954,10 @@ function prepareResponsesTools({
|
|
|
2291
2954
|
case "provider-defined": {
|
|
2292
2955
|
switch (tool.id) {
|
|
2293
2956
|
case "openai.file_search": {
|
|
2294
|
-
const args =
|
|
2957
|
+
const args = await (0, import_provider_utils23.validateTypes)({
|
|
2958
|
+
value: tool.args,
|
|
2959
|
+
schema: fileSearchArgsSchema
|
|
2960
|
+
});
|
|
2295
2961
|
openaiTools2.push({
|
|
2296
2962
|
type: "file_search",
|
|
2297
2963
|
vector_store_ids: args.vectorStoreIds,
|
|
@@ -2311,7 +2977,10 @@ function prepareResponsesTools({
|
|
|
2311
2977
|
break;
|
|
2312
2978
|
}
|
|
2313
2979
|
case "openai.web_search_preview": {
|
|
2314
|
-
const args =
|
|
2980
|
+
const args = await (0, import_provider_utils23.validateTypes)({
|
|
2981
|
+
value: tool.args,
|
|
2982
|
+
schema: webSearchPreviewArgsSchema
|
|
2983
|
+
});
|
|
2315
2984
|
openaiTools2.push({
|
|
2316
2985
|
type: "web_search_preview",
|
|
2317
2986
|
search_context_size: args.searchContextSize,
|
|
@@ -2320,7 +2989,10 @@ function prepareResponsesTools({
|
|
|
2320
2989
|
break;
|
|
2321
2990
|
}
|
|
2322
2991
|
case "openai.web_search": {
|
|
2323
|
-
const args =
|
|
2992
|
+
const args = await (0, import_provider_utils23.validateTypes)({
|
|
2993
|
+
value: tool.args,
|
|
2994
|
+
schema: webSearchArgsSchema
|
|
2995
|
+
});
|
|
2324
2996
|
openaiTools2.push({
|
|
2325
2997
|
type: "web_search",
|
|
2326
2998
|
filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
|
|
@@ -2330,7 +3002,10 @@ function prepareResponsesTools({
|
|
|
2330
3002
|
break;
|
|
2331
3003
|
}
|
|
2332
3004
|
case "openai.code_interpreter": {
|
|
2333
|
-
const args =
|
|
3005
|
+
const args = await (0, import_provider_utils23.validateTypes)({
|
|
3006
|
+
value: tool.args,
|
|
3007
|
+
schema: codeInterpreterArgsSchema
|
|
3008
|
+
});
|
|
2334
3009
|
openaiTools2.push({
|
|
2335
3010
|
type: "code_interpreter",
|
|
2336
3011
|
container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
|
|
@@ -2338,7 +3013,10 @@ function prepareResponsesTools({
|
|
|
2338
3013
|
break;
|
|
2339
3014
|
}
|
|
2340
3015
|
case "openai.image_generation": {
|
|
2341
|
-
const args =
|
|
3016
|
+
const args = await (0, import_provider_utils23.validateTypes)({
|
|
3017
|
+
value: tool.args,
|
|
3018
|
+
schema: imageGenerationArgsSchema
|
|
3019
|
+
});
|
|
2342
3020
|
openaiTools2.push({
|
|
2343
3021
|
type: "image_generation",
|
|
2344
3022
|
background: args.background,
|
|
@@ -2390,83 +3068,6 @@ function prepareResponsesTools({
|
|
|
2390
3068
|
}
|
|
2391
3069
|
|
|
2392
3070
|
// src/responses/openai-responses-language-model.ts
|
|
2393
|
-
var webSearchCallItem = import_v416.z.object({
|
|
2394
|
-
type: import_v416.z.literal("web_search_call"),
|
|
2395
|
-
id: import_v416.z.string(),
|
|
2396
|
-
status: import_v416.z.string(),
|
|
2397
|
-
action: import_v416.z.discriminatedUnion("type", [
|
|
2398
|
-
import_v416.z.object({
|
|
2399
|
-
type: import_v416.z.literal("search"),
|
|
2400
|
-
query: import_v416.z.string().nullish()
|
|
2401
|
-
}),
|
|
2402
|
-
import_v416.z.object({
|
|
2403
|
-
type: import_v416.z.literal("open_page"),
|
|
2404
|
-
url: import_v416.z.string()
|
|
2405
|
-
}),
|
|
2406
|
-
import_v416.z.object({
|
|
2407
|
-
type: import_v416.z.literal("find"),
|
|
2408
|
-
url: import_v416.z.string(),
|
|
2409
|
-
pattern: import_v416.z.string()
|
|
2410
|
-
})
|
|
2411
|
-
]).nullish()
|
|
2412
|
-
});
|
|
2413
|
-
var fileSearchCallItem = import_v416.z.object({
|
|
2414
|
-
type: import_v416.z.literal("file_search_call"),
|
|
2415
|
-
id: import_v416.z.string(),
|
|
2416
|
-
queries: import_v416.z.array(import_v416.z.string()),
|
|
2417
|
-
results: import_v416.z.array(
|
|
2418
|
-
import_v416.z.object({
|
|
2419
|
-
attributes: import_v416.z.record(import_v416.z.string(), import_v416.z.unknown()),
|
|
2420
|
-
file_id: import_v416.z.string(),
|
|
2421
|
-
filename: import_v416.z.string(),
|
|
2422
|
-
score: import_v416.z.number(),
|
|
2423
|
-
text: import_v416.z.string()
|
|
2424
|
-
})
|
|
2425
|
-
).nullish()
|
|
2426
|
-
});
|
|
2427
|
-
var codeInterpreterCallItem = import_v416.z.object({
|
|
2428
|
-
type: import_v416.z.literal("code_interpreter_call"),
|
|
2429
|
-
id: import_v416.z.string(),
|
|
2430
|
-
code: import_v416.z.string().nullable(),
|
|
2431
|
-
container_id: import_v416.z.string(),
|
|
2432
|
-
outputs: import_v416.z.array(
|
|
2433
|
-
import_v416.z.discriminatedUnion("type", [
|
|
2434
|
-
import_v416.z.object({ type: import_v416.z.literal("logs"), logs: import_v416.z.string() }),
|
|
2435
|
-
import_v416.z.object({ type: import_v416.z.literal("image"), url: import_v416.z.string() })
|
|
2436
|
-
])
|
|
2437
|
-
).nullable()
|
|
2438
|
-
});
|
|
2439
|
-
var localShellCallItem = import_v416.z.object({
|
|
2440
|
-
type: import_v416.z.literal("local_shell_call"),
|
|
2441
|
-
id: import_v416.z.string(),
|
|
2442
|
-
call_id: import_v416.z.string(),
|
|
2443
|
-
action: import_v416.z.object({
|
|
2444
|
-
type: import_v416.z.literal("exec"),
|
|
2445
|
-
command: import_v416.z.array(import_v416.z.string()),
|
|
2446
|
-
timeout_ms: import_v416.z.number().optional(),
|
|
2447
|
-
user: import_v416.z.string().optional(),
|
|
2448
|
-
working_directory: import_v416.z.string().optional(),
|
|
2449
|
-
env: import_v416.z.record(import_v416.z.string(), import_v416.z.string()).optional()
|
|
2450
|
-
})
|
|
2451
|
-
});
|
|
2452
|
-
var imageGenerationCallItem = import_v416.z.object({
|
|
2453
|
-
type: import_v416.z.literal("image_generation_call"),
|
|
2454
|
-
id: import_v416.z.string(),
|
|
2455
|
-
result: import_v416.z.string()
|
|
2456
|
-
});
|
|
2457
|
-
var TOP_LOGPROBS_MAX = 20;
|
|
2458
|
-
var LOGPROBS_SCHEMA = import_v416.z.array(
|
|
2459
|
-
import_v416.z.object({
|
|
2460
|
-
token: import_v416.z.string(),
|
|
2461
|
-
logprob: import_v416.z.number(),
|
|
2462
|
-
top_logprobs: import_v416.z.array(
|
|
2463
|
-
import_v416.z.object({
|
|
2464
|
-
token: import_v416.z.string(),
|
|
2465
|
-
logprob: import_v416.z.number()
|
|
2466
|
-
})
|
|
2467
|
-
)
|
|
2468
|
-
})
|
|
2469
|
-
);
|
|
2470
3071
|
var OpenAIResponsesLanguageModel = class {
|
|
2471
3072
|
constructor(modelId, config) {
|
|
2472
3073
|
this.specificationVersion = "v3";
|
|
@@ -2519,7 +3120,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2519
3120
|
if (stopSequences != null) {
|
|
2520
3121
|
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2521
3122
|
}
|
|
2522
|
-
const openaiOptions = await (0,
|
|
3123
|
+
const openaiOptions = await (0, import_provider_utils24.parseProviderOptions)({
|
|
2523
3124
|
provider: "openai",
|
|
2524
3125
|
providerOptions,
|
|
2525
3126
|
schema: openaiResponsesProviderOptionsSchema
|
|
@@ -2658,7 +3259,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2658
3259
|
tools: openaiTools2,
|
|
2659
3260
|
toolChoice: openaiToolChoice,
|
|
2660
3261
|
toolWarnings
|
|
2661
|
-
} = prepareResponsesTools({
|
|
3262
|
+
} = await prepareResponsesTools({
|
|
2662
3263
|
tools,
|
|
2663
3264
|
toolChoice,
|
|
2664
3265
|
strictJsonSchema
|
|
@@ -2688,91 +3289,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2688
3289
|
responseHeaders,
|
|
2689
3290
|
value: response,
|
|
2690
3291
|
rawValue: rawResponse
|
|
2691
|
-
} = await (0,
|
|
3292
|
+
} = await (0, import_provider_utils24.postJsonToApi)({
|
|
2692
3293
|
url,
|
|
2693
|
-
headers: (0,
|
|
3294
|
+
headers: (0, import_provider_utils24.combineHeaders)(this.config.headers(), options.headers),
|
|
2694
3295
|
body,
|
|
2695
3296
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2696
|
-
successfulResponseHandler: (0,
|
|
2697
|
-
|
|
2698
|
-
id: import_v416.z.string(),
|
|
2699
|
-
created_at: import_v416.z.number(),
|
|
2700
|
-
error: import_v416.z.object({
|
|
2701
|
-
code: import_v416.z.string(),
|
|
2702
|
-
message: import_v416.z.string()
|
|
2703
|
-
}).nullish(),
|
|
2704
|
-
model: import_v416.z.string(),
|
|
2705
|
-
output: import_v416.z.array(
|
|
2706
|
-
import_v416.z.discriminatedUnion("type", [
|
|
2707
|
-
import_v416.z.object({
|
|
2708
|
-
type: import_v416.z.literal("message"),
|
|
2709
|
-
role: import_v416.z.literal("assistant"),
|
|
2710
|
-
id: import_v416.z.string(),
|
|
2711
|
-
content: import_v416.z.array(
|
|
2712
|
-
import_v416.z.object({
|
|
2713
|
-
type: import_v416.z.literal("output_text"),
|
|
2714
|
-
text: import_v416.z.string(),
|
|
2715
|
-
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2716
|
-
annotations: import_v416.z.array(
|
|
2717
|
-
import_v416.z.discriminatedUnion("type", [
|
|
2718
|
-
import_v416.z.object({
|
|
2719
|
-
type: import_v416.z.literal("url_citation"),
|
|
2720
|
-
start_index: import_v416.z.number(),
|
|
2721
|
-
end_index: import_v416.z.number(),
|
|
2722
|
-
url: import_v416.z.string(),
|
|
2723
|
-
title: import_v416.z.string()
|
|
2724
|
-
}),
|
|
2725
|
-
import_v416.z.object({
|
|
2726
|
-
type: import_v416.z.literal("file_citation"),
|
|
2727
|
-
file_id: import_v416.z.string(),
|
|
2728
|
-
filename: import_v416.z.string().nullish(),
|
|
2729
|
-
index: import_v416.z.number().nullish(),
|
|
2730
|
-
start_index: import_v416.z.number().nullish(),
|
|
2731
|
-
end_index: import_v416.z.number().nullish(),
|
|
2732
|
-
quote: import_v416.z.string().nullish()
|
|
2733
|
-
}),
|
|
2734
|
-
import_v416.z.object({
|
|
2735
|
-
type: import_v416.z.literal("container_file_citation")
|
|
2736
|
-
})
|
|
2737
|
-
])
|
|
2738
|
-
)
|
|
2739
|
-
})
|
|
2740
|
-
)
|
|
2741
|
-
}),
|
|
2742
|
-
webSearchCallItem,
|
|
2743
|
-
fileSearchCallItem,
|
|
2744
|
-
codeInterpreterCallItem,
|
|
2745
|
-
imageGenerationCallItem,
|
|
2746
|
-
localShellCallItem,
|
|
2747
|
-
import_v416.z.object({
|
|
2748
|
-
type: import_v416.z.literal("function_call"),
|
|
2749
|
-
call_id: import_v416.z.string(),
|
|
2750
|
-
name: import_v416.z.string(),
|
|
2751
|
-
arguments: import_v416.z.string(),
|
|
2752
|
-
id: import_v416.z.string()
|
|
2753
|
-
}),
|
|
2754
|
-
import_v416.z.object({
|
|
2755
|
-
type: import_v416.z.literal("computer_call"),
|
|
2756
|
-
id: import_v416.z.string(),
|
|
2757
|
-
status: import_v416.z.string().optional()
|
|
2758
|
-
}),
|
|
2759
|
-
import_v416.z.object({
|
|
2760
|
-
type: import_v416.z.literal("reasoning"),
|
|
2761
|
-
id: import_v416.z.string(),
|
|
2762
|
-
encrypted_content: import_v416.z.string().nullish(),
|
|
2763
|
-
summary: import_v416.z.array(
|
|
2764
|
-
import_v416.z.object({
|
|
2765
|
-
type: import_v416.z.literal("summary_text"),
|
|
2766
|
-
text: import_v416.z.string()
|
|
2767
|
-
})
|
|
2768
|
-
)
|
|
2769
|
-
})
|
|
2770
|
-
])
|
|
2771
|
-
),
|
|
2772
|
-
service_tier: import_v416.z.string().nullish(),
|
|
2773
|
-
incomplete_details: import_v416.z.object({ reason: import_v416.z.string() }).nullish(),
|
|
2774
|
-
usage: usageSchema2
|
|
2775
|
-
})
|
|
3297
|
+
successfulResponseHandler: (0, import_provider_utils24.createJsonResponseHandler)(
|
|
3298
|
+
openaiResponsesResponseSchema
|
|
2776
3299
|
),
|
|
2777
3300
|
abortSignal: options.abortSignal,
|
|
2778
3301
|
fetch: this.config.fetch
|
|
@@ -2835,7 +3358,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2835
3358
|
type: "tool-call",
|
|
2836
3359
|
toolCallId: part.call_id,
|
|
2837
3360
|
toolName: "local_shell",
|
|
2838
|
-
input: JSON.stringify({
|
|
3361
|
+
input: JSON.stringify({
|
|
3362
|
+
action: part.action
|
|
3363
|
+
}),
|
|
2839
3364
|
providerMetadata: {
|
|
2840
3365
|
openai: {
|
|
2841
3366
|
itemId: part.id
|
|
@@ -2863,7 +3388,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2863
3388
|
content.push({
|
|
2864
3389
|
type: "source",
|
|
2865
3390
|
sourceType: "url",
|
|
2866
|
-
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0,
|
|
3391
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0, import_provider_utils24.generateId)(),
|
|
2867
3392
|
url: annotation.url,
|
|
2868
3393
|
title: annotation.title
|
|
2869
3394
|
});
|
|
@@ -2871,7 +3396,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2871
3396
|
content.push({
|
|
2872
3397
|
type: "source",
|
|
2873
3398
|
sourceType: "document",
|
|
2874
|
-
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0,
|
|
3399
|
+
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils24.generateId)(),
|
|
2875
3400
|
mediaType: "text/plain",
|
|
2876
3401
|
title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
|
|
2877
3402
|
filename: (_l = annotation.filename) != null ? _l : annotation.file_id
|
|
@@ -3023,18 +3548,18 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3023
3548
|
warnings,
|
|
3024
3549
|
webSearchToolName
|
|
3025
3550
|
} = await this.getArgs(options);
|
|
3026
|
-
const { responseHeaders, value: response } = await (0,
|
|
3551
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils24.postJsonToApi)({
|
|
3027
3552
|
url: this.config.url({
|
|
3028
3553
|
path: "/responses",
|
|
3029
3554
|
modelId: this.modelId
|
|
3030
3555
|
}),
|
|
3031
|
-
headers: (0,
|
|
3556
|
+
headers: (0, import_provider_utils24.combineHeaders)(this.config.headers(), options.headers),
|
|
3032
3557
|
body: {
|
|
3033
3558
|
...body,
|
|
3034
3559
|
stream: true
|
|
3035
3560
|
},
|
|
3036
3561
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
3037
|
-
successfulResponseHandler: (0,
|
|
3562
|
+
successfulResponseHandler: (0, import_provider_utils24.createEventSourceResponseHandler)(
|
|
3038
3563
|
openaiResponsesChunkSchema
|
|
3039
3564
|
),
|
|
3040
3565
|
abortSignal: options.abortSignal,
|
|
@@ -3422,7 +3947,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3422
3947
|
controller.enqueue({
|
|
3423
3948
|
type: "source",
|
|
3424
3949
|
sourceType: "url",
|
|
3425
|
-
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0,
|
|
3950
|
+
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0, import_provider_utils24.generateId)(),
|
|
3426
3951
|
url: value.annotation.url,
|
|
3427
3952
|
title: value.annotation.title
|
|
3428
3953
|
});
|
|
@@ -3430,7 +3955,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3430
3955
|
controller.enqueue({
|
|
3431
3956
|
type: "source",
|
|
3432
3957
|
sourceType: "document",
|
|
3433
|
-
id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0,
|
|
3958
|
+
id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0, import_provider_utils24.generateId)(),
|
|
3434
3959
|
mediaType: "text/plain",
|
|
3435
3960
|
title: (_v = (_u = value.annotation.quote) != null ? _u : value.annotation.filename) != null ? _v : "Document",
|
|
3436
3961
|
filename: (_w = value.annotation.filename) != null ? _w : value.annotation.file_id
|
|
@@ -3466,203 +3991,6 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3466
3991
|
};
|
|
3467
3992
|
}
|
|
3468
3993
|
};
|
|
3469
|
-
var usageSchema2 = import_v416.z.object({
|
|
3470
|
-
input_tokens: import_v416.z.number(),
|
|
3471
|
-
input_tokens_details: import_v416.z.object({ cached_tokens: import_v416.z.number().nullish() }).nullish(),
|
|
3472
|
-
output_tokens: import_v416.z.number(),
|
|
3473
|
-
output_tokens_details: import_v416.z.object({ reasoning_tokens: import_v416.z.number().nullish() }).nullish()
|
|
3474
|
-
});
|
|
3475
|
-
var textDeltaChunkSchema = import_v416.z.object({
|
|
3476
|
-
type: import_v416.z.literal("response.output_text.delta"),
|
|
3477
|
-
item_id: import_v416.z.string(),
|
|
3478
|
-
delta: import_v416.z.string(),
|
|
3479
|
-
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
3480
|
-
});
|
|
3481
|
-
var errorChunkSchema = import_v416.z.object({
|
|
3482
|
-
type: import_v416.z.literal("error"),
|
|
3483
|
-
code: import_v416.z.string(),
|
|
3484
|
-
message: import_v416.z.string(),
|
|
3485
|
-
param: import_v416.z.string().nullish(),
|
|
3486
|
-
sequence_number: import_v416.z.number()
|
|
3487
|
-
});
|
|
3488
|
-
var responseFinishedChunkSchema = import_v416.z.object({
|
|
3489
|
-
type: import_v416.z.enum(["response.completed", "response.incomplete"]),
|
|
3490
|
-
response: import_v416.z.object({
|
|
3491
|
-
incomplete_details: import_v416.z.object({ reason: import_v416.z.string() }).nullish(),
|
|
3492
|
-
usage: usageSchema2,
|
|
3493
|
-
service_tier: import_v416.z.string().nullish()
|
|
3494
|
-
})
|
|
3495
|
-
});
|
|
3496
|
-
var responseCreatedChunkSchema = import_v416.z.object({
|
|
3497
|
-
type: import_v416.z.literal("response.created"),
|
|
3498
|
-
response: import_v416.z.object({
|
|
3499
|
-
id: import_v416.z.string(),
|
|
3500
|
-
created_at: import_v416.z.number(),
|
|
3501
|
-
model: import_v416.z.string(),
|
|
3502
|
-
service_tier: import_v416.z.string().nullish()
|
|
3503
|
-
})
|
|
3504
|
-
});
|
|
3505
|
-
var responseOutputItemAddedSchema = import_v416.z.object({
|
|
3506
|
-
type: import_v416.z.literal("response.output_item.added"),
|
|
3507
|
-
output_index: import_v416.z.number(),
|
|
3508
|
-
item: import_v416.z.discriminatedUnion("type", [
|
|
3509
|
-
import_v416.z.object({
|
|
3510
|
-
type: import_v416.z.literal("message"),
|
|
3511
|
-
id: import_v416.z.string()
|
|
3512
|
-
}),
|
|
3513
|
-
import_v416.z.object({
|
|
3514
|
-
type: import_v416.z.literal("reasoning"),
|
|
3515
|
-
id: import_v416.z.string(),
|
|
3516
|
-
encrypted_content: import_v416.z.string().nullish()
|
|
3517
|
-
}),
|
|
3518
|
-
import_v416.z.object({
|
|
3519
|
-
type: import_v416.z.literal("function_call"),
|
|
3520
|
-
id: import_v416.z.string(),
|
|
3521
|
-
call_id: import_v416.z.string(),
|
|
3522
|
-
name: import_v416.z.string(),
|
|
3523
|
-
arguments: import_v416.z.string()
|
|
3524
|
-
}),
|
|
3525
|
-
import_v416.z.object({
|
|
3526
|
-
type: import_v416.z.literal("web_search_call"),
|
|
3527
|
-
id: import_v416.z.string(),
|
|
3528
|
-
status: import_v416.z.string(),
|
|
3529
|
-
action: import_v416.z.object({
|
|
3530
|
-
type: import_v416.z.literal("search"),
|
|
3531
|
-
query: import_v416.z.string().optional()
|
|
3532
|
-
}).nullish()
|
|
3533
|
-
}),
|
|
3534
|
-
import_v416.z.object({
|
|
3535
|
-
type: import_v416.z.literal("computer_call"),
|
|
3536
|
-
id: import_v416.z.string(),
|
|
3537
|
-
status: import_v416.z.string()
|
|
3538
|
-
}),
|
|
3539
|
-
import_v416.z.object({
|
|
3540
|
-
type: import_v416.z.literal("file_search_call"),
|
|
3541
|
-
id: import_v416.z.string()
|
|
3542
|
-
}),
|
|
3543
|
-
import_v416.z.object({
|
|
3544
|
-
type: import_v416.z.literal("image_generation_call"),
|
|
3545
|
-
id: import_v416.z.string()
|
|
3546
|
-
}),
|
|
3547
|
-
import_v416.z.object({
|
|
3548
|
-
type: import_v416.z.literal("code_interpreter_call"),
|
|
3549
|
-
id: import_v416.z.string(),
|
|
3550
|
-
container_id: import_v416.z.string(),
|
|
3551
|
-
code: import_v416.z.string().nullable(),
|
|
3552
|
-
outputs: import_v416.z.array(
|
|
3553
|
-
import_v416.z.discriminatedUnion("type", [
|
|
3554
|
-
import_v416.z.object({ type: import_v416.z.literal("logs"), logs: import_v416.z.string() }),
|
|
3555
|
-
import_v416.z.object({ type: import_v416.z.literal("image"), url: import_v416.z.string() })
|
|
3556
|
-
])
|
|
3557
|
-
).nullable(),
|
|
3558
|
-
status: import_v416.z.string()
|
|
3559
|
-
})
|
|
3560
|
-
])
|
|
3561
|
-
});
|
|
3562
|
-
var responseOutputItemDoneSchema = import_v416.z.object({
|
|
3563
|
-
type: import_v416.z.literal("response.output_item.done"),
|
|
3564
|
-
output_index: import_v416.z.number(),
|
|
3565
|
-
item: import_v416.z.discriminatedUnion("type", [
|
|
3566
|
-
import_v416.z.object({
|
|
3567
|
-
type: import_v416.z.literal("message"),
|
|
3568
|
-
id: import_v416.z.string()
|
|
3569
|
-
}),
|
|
3570
|
-
import_v416.z.object({
|
|
3571
|
-
type: import_v416.z.literal("reasoning"),
|
|
3572
|
-
id: import_v416.z.string(),
|
|
3573
|
-
encrypted_content: import_v416.z.string().nullish()
|
|
3574
|
-
}),
|
|
3575
|
-
import_v416.z.object({
|
|
3576
|
-
type: import_v416.z.literal("function_call"),
|
|
3577
|
-
id: import_v416.z.string(),
|
|
3578
|
-
call_id: import_v416.z.string(),
|
|
3579
|
-
name: import_v416.z.string(),
|
|
3580
|
-
arguments: import_v416.z.string(),
|
|
3581
|
-
status: import_v416.z.literal("completed")
|
|
3582
|
-
}),
|
|
3583
|
-
codeInterpreterCallItem,
|
|
3584
|
-
imageGenerationCallItem,
|
|
3585
|
-
webSearchCallItem,
|
|
3586
|
-
fileSearchCallItem,
|
|
3587
|
-
localShellCallItem,
|
|
3588
|
-
import_v416.z.object({
|
|
3589
|
-
type: import_v416.z.literal("computer_call"),
|
|
3590
|
-
id: import_v416.z.string(),
|
|
3591
|
-
status: import_v416.z.literal("completed")
|
|
3592
|
-
})
|
|
3593
|
-
])
|
|
3594
|
-
});
|
|
3595
|
-
var responseFunctionCallArgumentsDeltaSchema = import_v416.z.object({
|
|
3596
|
-
type: import_v416.z.literal("response.function_call_arguments.delta"),
|
|
3597
|
-
item_id: import_v416.z.string(),
|
|
3598
|
-
output_index: import_v416.z.number(),
|
|
3599
|
-
delta: import_v416.z.string()
|
|
3600
|
-
});
|
|
3601
|
-
var responseImageGenerationCallPartialImageSchema = import_v416.z.object({
|
|
3602
|
-
type: import_v416.z.literal("response.image_generation_call.partial_image"),
|
|
3603
|
-
item_id: import_v416.z.string(),
|
|
3604
|
-
output_index: import_v416.z.number(),
|
|
3605
|
-
partial_image_b64: import_v416.z.string()
|
|
3606
|
-
});
|
|
3607
|
-
var responseCodeInterpreterCallCodeDeltaSchema = import_v416.z.object({
|
|
3608
|
-
type: import_v416.z.literal("response.code_interpreter_call_code.delta"),
|
|
3609
|
-
item_id: import_v416.z.string(),
|
|
3610
|
-
output_index: import_v416.z.number(),
|
|
3611
|
-
delta: import_v416.z.string()
|
|
3612
|
-
});
|
|
3613
|
-
var responseCodeInterpreterCallCodeDoneSchema = import_v416.z.object({
|
|
3614
|
-
type: import_v416.z.literal("response.code_interpreter_call_code.done"),
|
|
3615
|
-
item_id: import_v416.z.string(),
|
|
3616
|
-
output_index: import_v416.z.number(),
|
|
3617
|
-
code: import_v416.z.string()
|
|
3618
|
-
});
|
|
3619
|
-
var responseAnnotationAddedSchema = import_v416.z.object({
|
|
3620
|
-
type: import_v416.z.literal("response.output_text.annotation.added"),
|
|
3621
|
-
annotation: import_v416.z.discriminatedUnion("type", [
|
|
3622
|
-
import_v416.z.object({
|
|
3623
|
-
type: import_v416.z.literal("url_citation"),
|
|
3624
|
-
url: import_v416.z.string(),
|
|
3625
|
-
title: import_v416.z.string()
|
|
3626
|
-
}),
|
|
3627
|
-
import_v416.z.object({
|
|
3628
|
-
type: import_v416.z.literal("file_citation"),
|
|
3629
|
-
file_id: import_v416.z.string(),
|
|
3630
|
-
filename: import_v416.z.string().nullish(),
|
|
3631
|
-
index: import_v416.z.number().nullish(),
|
|
3632
|
-
start_index: import_v416.z.number().nullish(),
|
|
3633
|
-
end_index: import_v416.z.number().nullish(),
|
|
3634
|
-
quote: import_v416.z.string().nullish()
|
|
3635
|
-
})
|
|
3636
|
-
])
|
|
3637
|
-
});
|
|
3638
|
-
var responseReasoningSummaryPartAddedSchema = import_v416.z.object({
|
|
3639
|
-
type: import_v416.z.literal("response.reasoning_summary_part.added"),
|
|
3640
|
-
item_id: import_v416.z.string(),
|
|
3641
|
-
summary_index: import_v416.z.number()
|
|
3642
|
-
});
|
|
3643
|
-
var responseReasoningSummaryTextDeltaSchema = import_v416.z.object({
|
|
3644
|
-
type: import_v416.z.literal("response.reasoning_summary_text.delta"),
|
|
3645
|
-
item_id: import_v416.z.string(),
|
|
3646
|
-
summary_index: import_v416.z.number(),
|
|
3647
|
-
delta: import_v416.z.string()
|
|
3648
|
-
});
|
|
3649
|
-
var openaiResponsesChunkSchema = import_v416.z.union([
|
|
3650
|
-
textDeltaChunkSchema,
|
|
3651
|
-
responseFinishedChunkSchema,
|
|
3652
|
-
responseCreatedChunkSchema,
|
|
3653
|
-
responseOutputItemAddedSchema,
|
|
3654
|
-
responseOutputItemDoneSchema,
|
|
3655
|
-
responseFunctionCallArgumentsDeltaSchema,
|
|
3656
|
-
responseImageGenerationCallPartialImageSchema,
|
|
3657
|
-
responseCodeInterpreterCallCodeDeltaSchema,
|
|
3658
|
-
responseCodeInterpreterCallCodeDoneSchema,
|
|
3659
|
-
responseAnnotationAddedSchema,
|
|
3660
|
-
responseReasoningSummaryPartAddedSchema,
|
|
3661
|
-
responseReasoningSummaryTextDeltaSchema,
|
|
3662
|
-
errorChunkSchema,
|
|
3663
|
-
import_v416.z.object({ type: import_v416.z.string() }).loose()
|
|
3664
|
-
// fallback for unknown chunks
|
|
3665
|
-
]);
|
|
3666
3994
|
function isTextDeltaChunk(chunk) {
|
|
3667
3995
|
return chunk.type === "response.output_text.delta";
|
|
3668
3996
|
}
|
|
@@ -3742,55 +4070,23 @@ function getResponsesModelConfig(modelId) {
|
|
|
3742
4070
|
isReasoningModel: false
|
|
3743
4071
|
};
|
|
3744
4072
|
}
|
|
3745
|
-
var openaiResponsesProviderOptionsSchema = import_v416.z.object({
|
|
3746
|
-
include: import_v416.z.array(
|
|
3747
|
-
import_v416.z.enum([
|
|
3748
|
-
"reasoning.encrypted_content",
|
|
3749
|
-
"file_search_call.results",
|
|
3750
|
-
"message.output_text.logprobs"
|
|
3751
|
-
])
|
|
3752
|
-
).nullish(),
|
|
3753
|
-
instructions: import_v416.z.string().nullish(),
|
|
3754
|
-
/**
|
|
3755
|
-
* Return the log probabilities of the tokens.
|
|
3756
|
-
*
|
|
3757
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
3758
|
-
* were generated.
|
|
3759
|
-
*
|
|
3760
|
-
* Setting to a number will return the log probabilities of the top n
|
|
3761
|
-
* tokens that were generated.
|
|
3762
|
-
*
|
|
3763
|
-
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
3764
|
-
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
3765
|
-
*/
|
|
3766
|
-
logprobs: import_v416.z.union([import_v416.z.boolean(), import_v416.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
3767
|
-
/**
|
|
3768
|
-
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
3769
|
-
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
3770
|
-
* Any further attempts to call a tool by the model will be ignored.
|
|
3771
|
-
*/
|
|
3772
|
-
maxToolCalls: import_v416.z.number().nullish(),
|
|
3773
|
-
metadata: import_v416.z.any().nullish(),
|
|
3774
|
-
parallelToolCalls: import_v416.z.boolean().nullish(),
|
|
3775
|
-
previousResponseId: import_v416.z.string().nullish(),
|
|
3776
|
-
promptCacheKey: import_v416.z.string().nullish(),
|
|
3777
|
-
reasoningEffort: import_v416.z.string().nullish(),
|
|
3778
|
-
reasoningSummary: import_v416.z.string().nullish(),
|
|
3779
|
-
safetyIdentifier: import_v416.z.string().nullish(),
|
|
3780
|
-
serviceTier: import_v416.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
3781
|
-
store: import_v416.z.boolean().nullish(),
|
|
3782
|
-
strictJsonSchema: import_v416.z.boolean().nullish(),
|
|
3783
|
-
textVerbosity: import_v416.z.enum(["low", "medium", "high"]).nullish(),
|
|
3784
|
-
user: import_v416.z.string().nullish()
|
|
3785
|
-
});
|
|
3786
4073
|
|
|
3787
4074
|
// src/speech/openai-speech-model.ts
|
|
3788
|
-
var
|
|
3789
|
-
|
|
3790
|
-
|
|
3791
|
-
|
|
3792
|
-
|
|
3793
|
-
|
|
4075
|
+
var import_provider_utils26 = require("@ai-sdk/provider-utils");
|
|
4076
|
+
|
|
4077
|
+
// src/speech/openai-speech-options.ts
|
|
4078
|
+
var import_provider_utils25 = require("@ai-sdk/provider-utils");
|
|
4079
|
+
var z18 = __toESM(require("zod/v4"));
|
|
4080
|
+
var openaiSpeechProviderOptionsSchema = (0, import_provider_utils25.lazyValidator)(
|
|
4081
|
+
() => (0, import_provider_utils25.zodSchema)(
|
|
4082
|
+
z18.object({
|
|
4083
|
+
instructions: z18.string().nullish(),
|
|
4084
|
+
speed: z18.number().min(0.25).max(4).default(1).nullish()
|
|
4085
|
+
})
|
|
4086
|
+
)
|
|
4087
|
+
);
|
|
4088
|
+
|
|
4089
|
+
// src/speech/openai-speech-model.ts
|
|
3794
4090
|
var OpenAISpeechModel = class {
|
|
3795
4091
|
constructor(modelId, config) {
|
|
3796
4092
|
this.modelId = modelId;
|
|
@@ -3810,10 +4106,10 @@ var OpenAISpeechModel = class {
|
|
|
3810
4106
|
providerOptions
|
|
3811
4107
|
}) {
|
|
3812
4108
|
const warnings = [];
|
|
3813
|
-
const openAIOptions = await (0,
|
|
4109
|
+
const openAIOptions = await (0, import_provider_utils26.parseProviderOptions)({
|
|
3814
4110
|
provider: "openai",
|
|
3815
4111
|
providerOptions,
|
|
3816
|
-
schema:
|
|
4112
|
+
schema: openaiSpeechProviderOptionsSchema
|
|
3817
4113
|
});
|
|
3818
4114
|
const requestBody = {
|
|
3819
4115
|
model: this.modelId,
|
|
@@ -3863,15 +4159,15 @@ var OpenAISpeechModel = class {
|
|
|
3863
4159
|
value: audio,
|
|
3864
4160
|
responseHeaders,
|
|
3865
4161
|
rawValue: rawResponse
|
|
3866
|
-
} = await (0,
|
|
4162
|
+
} = await (0, import_provider_utils26.postJsonToApi)({
|
|
3867
4163
|
url: this.config.url({
|
|
3868
4164
|
path: "/audio/speech",
|
|
3869
4165
|
modelId: this.modelId
|
|
3870
4166
|
}),
|
|
3871
|
-
headers: (0,
|
|
4167
|
+
headers: (0, import_provider_utils26.combineHeaders)(this.config.headers(), options.headers),
|
|
3872
4168
|
body: requestBody,
|
|
3873
4169
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
3874
|
-
successfulResponseHandler: (0,
|
|
4170
|
+
successfulResponseHandler: (0, import_provider_utils26.createBinaryResponseHandler)(),
|
|
3875
4171
|
abortSignal: options.abortSignal,
|
|
3876
4172
|
fetch: this.config.fetch
|
|
3877
4173
|
});
|
|
@@ -3892,35 +4188,73 @@ var OpenAISpeechModel = class {
|
|
|
3892
4188
|
};
|
|
3893
4189
|
|
|
3894
4190
|
// src/transcription/openai-transcription-model.ts
|
|
3895
|
-
var
|
|
3896
|
-
|
|
4191
|
+
var import_provider_utils29 = require("@ai-sdk/provider-utils");
|
|
4192
|
+
|
|
4193
|
+
// src/transcription/openai-transcription-api.ts
|
|
4194
|
+
var import_provider_utils27 = require("@ai-sdk/provider-utils");
|
|
4195
|
+
var z19 = __toESM(require("zod/v4"));
|
|
4196
|
+
var openaiTranscriptionResponseSchema = (0, import_provider_utils27.lazyValidator)(
|
|
4197
|
+
() => (0, import_provider_utils27.zodSchema)(
|
|
4198
|
+
z19.object({
|
|
4199
|
+
text: z19.string(),
|
|
4200
|
+
language: z19.string().nullish(),
|
|
4201
|
+
duration: z19.number().nullish(),
|
|
4202
|
+
words: z19.array(
|
|
4203
|
+
z19.object({
|
|
4204
|
+
word: z19.string(),
|
|
4205
|
+
start: z19.number(),
|
|
4206
|
+
end: z19.number()
|
|
4207
|
+
})
|
|
4208
|
+
).nullish(),
|
|
4209
|
+
segments: z19.array(
|
|
4210
|
+
z19.object({
|
|
4211
|
+
id: z19.number(),
|
|
4212
|
+
seek: z19.number(),
|
|
4213
|
+
start: z19.number(),
|
|
4214
|
+
end: z19.number(),
|
|
4215
|
+
text: z19.string(),
|
|
4216
|
+
tokens: z19.array(z19.number()),
|
|
4217
|
+
temperature: z19.number(),
|
|
4218
|
+
avg_logprob: z19.number(),
|
|
4219
|
+
compression_ratio: z19.number(),
|
|
4220
|
+
no_speech_prob: z19.number()
|
|
4221
|
+
})
|
|
4222
|
+
).nullish()
|
|
4223
|
+
})
|
|
4224
|
+
)
|
|
4225
|
+
);
|
|
3897
4226
|
|
|
3898
4227
|
// src/transcription/openai-transcription-options.ts
|
|
3899
|
-
var
|
|
3900
|
-
var
|
|
3901
|
-
|
|
3902
|
-
|
|
3903
|
-
|
|
3904
|
-
|
|
3905
|
-
|
|
3906
|
-
|
|
3907
|
-
|
|
3908
|
-
|
|
3909
|
-
|
|
3910
|
-
|
|
3911
|
-
|
|
3912
|
-
|
|
3913
|
-
|
|
3914
|
-
|
|
3915
|
-
|
|
3916
|
-
|
|
3917
|
-
|
|
3918
|
-
|
|
3919
|
-
|
|
3920
|
-
|
|
3921
|
-
|
|
3922
|
-
|
|
3923
|
-
|
|
4228
|
+
var import_provider_utils28 = require("@ai-sdk/provider-utils");
|
|
4229
|
+
var z20 = __toESM(require("zod/v4"));
|
|
4230
|
+
var openAITranscriptionProviderOptions = (0, import_provider_utils28.lazyValidator)(
|
|
4231
|
+
() => (0, import_provider_utils28.zodSchema)(
|
|
4232
|
+
z20.object({
|
|
4233
|
+
/**
|
|
4234
|
+
* Additional information to include in the transcription response.
|
|
4235
|
+
*/
|
|
4236
|
+
include: z20.array(z20.string()).optional(),
|
|
4237
|
+
/**
|
|
4238
|
+
* The language of the input audio in ISO-639-1 format.
|
|
4239
|
+
*/
|
|
4240
|
+
language: z20.string().optional(),
|
|
4241
|
+
/**
|
|
4242
|
+
* An optional text to guide the model's style or continue a previous audio segment.
|
|
4243
|
+
*/
|
|
4244
|
+
prompt: z20.string().optional(),
|
|
4245
|
+
/**
|
|
4246
|
+
* The sampling temperature, between 0 and 1.
|
|
4247
|
+
* @default 0
|
|
4248
|
+
*/
|
|
4249
|
+
temperature: z20.number().min(0).max(1).default(0).optional(),
|
|
4250
|
+
/**
|
|
4251
|
+
* The timestamp granularities to populate for this transcription.
|
|
4252
|
+
* @default ['segment']
|
|
4253
|
+
*/
|
|
4254
|
+
timestampGranularities: z20.array(z20.enum(["word", "segment"])).default(["segment"]).optional()
|
|
4255
|
+
})
|
|
4256
|
+
)
|
|
4257
|
+
);
|
|
3924
4258
|
|
|
3925
4259
|
// src/transcription/openai-transcription-model.ts
|
|
3926
4260
|
var languageMap = {
|
|
@@ -3997,15 +4331,15 @@ var OpenAITranscriptionModel = class {
|
|
|
3997
4331
|
providerOptions
|
|
3998
4332
|
}) {
|
|
3999
4333
|
const warnings = [];
|
|
4000
|
-
const openAIOptions = await (0,
|
|
4334
|
+
const openAIOptions = await (0, import_provider_utils29.parseProviderOptions)({
|
|
4001
4335
|
provider: "openai",
|
|
4002
4336
|
providerOptions,
|
|
4003
4337
|
schema: openAITranscriptionProviderOptions
|
|
4004
4338
|
});
|
|
4005
4339
|
const formData = new FormData();
|
|
4006
|
-
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0,
|
|
4340
|
+
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils29.convertBase64ToUint8Array)(audio)]);
|
|
4007
4341
|
formData.append("model", this.modelId);
|
|
4008
|
-
const fileExtension = (0,
|
|
4342
|
+
const fileExtension = (0, import_provider_utils29.mediaTypeToExtension)(mediaType);
|
|
4009
4343
|
formData.append(
|
|
4010
4344
|
"file",
|
|
4011
4345
|
new File([blob], "audio", { type: mediaType }),
|
|
@@ -4050,15 +4384,15 @@ var OpenAITranscriptionModel = class {
|
|
|
4050
4384
|
value: response,
|
|
4051
4385
|
responseHeaders,
|
|
4052
4386
|
rawValue: rawResponse
|
|
4053
|
-
} = await (0,
|
|
4387
|
+
} = await (0, import_provider_utils29.postFormDataToApi)({
|
|
4054
4388
|
url: this.config.url({
|
|
4055
4389
|
path: "/audio/transcriptions",
|
|
4056
4390
|
modelId: this.modelId
|
|
4057
4391
|
}),
|
|
4058
|
-
headers: (0,
|
|
4392
|
+
headers: (0, import_provider_utils29.combineHeaders)(this.config.headers(), options.headers),
|
|
4059
4393
|
formData,
|
|
4060
4394
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
4061
|
-
successfulResponseHandler: (0,
|
|
4395
|
+
successfulResponseHandler: (0, import_provider_utils29.createJsonResponseHandler)(
|
|
4062
4396
|
openaiTranscriptionResponseSchema
|
|
4063
4397
|
),
|
|
4064
4398
|
abortSignal: options.abortSignal,
|
|
@@ -4088,49 +4422,23 @@ var OpenAITranscriptionModel = class {
|
|
|
4088
4422
|
};
|
|
4089
4423
|
}
|
|
4090
4424
|
};
|
|
4091
|
-
var openaiTranscriptionResponseSchema = import_v419.z.object({
|
|
4092
|
-
text: import_v419.z.string(),
|
|
4093
|
-
language: import_v419.z.string().nullish(),
|
|
4094
|
-
duration: import_v419.z.number().nullish(),
|
|
4095
|
-
words: import_v419.z.array(
|
|
4096
|
-
import_v419.z.object({
|
|
4097
|
-
word: import_v419.z.string(),
|
|
4098
|
-
start: import_v419.z.number(),
|
|
4099
|
-
end: import_v419.z.number()
|
|
4100
|
-
})
|
|
4101
|
-
).nullish(),
|
|
4102
|
-
segments: import_v419.z.array(
|
|
4103
|
-
import_v419.z.object({
|
|
4104
|
-
id: import_v419.z.number(),
|
|
4105
|
-
seek: import_v419.z.number(),
|
|
4106
|
-
start: import_v419.z.number(),
|
|
4107
|
-
end: import_v419.z.number(),
|
|
4108
|
-
text: import_v419.z.string(),
|
|
4109
|
-
tokens: import_v419.z.array(import_v419.z.number()),
|
|
4110
|
-
temperature: import_v419.z.number(),
|
|
4111
|
-
avg_logprob: import_v419.z.number(),
|
|
4112
|
-
compression_ratio: import_v419.z.number(),
|
|
4113
|
-
no_speech_prob: import_v419.z.number()
|
|
4114
|
-
})
|
|
4115
|
-
).nullish()
|
|
4116
|
-
});
|
|
4117
4425
|
|
|
4118
4426
|
// src/version.ts
|
|
4119
|
-
var VERSION = true ? "3.0.0-beta.
|
|
4427
|
+
var VERSION = true ? "3.0.0-beta.19" : "0.0.0-test";
|
|
4120
4428
|
|
|
4121
4429
|
// src/openai-provider.ts
|
|
4122
4430
|
function createOpenAI(options = {}) {
|
|
4123
4431
|
var _a, _b;
|
|
4124
|
-
const baseURL = (_a = (0,
|
|
4125
|
-
(0,
|
|
4432
|
+
const baseURL = (_a = (0, import_provider_utils30.withoutTrailingSlash)(
|
|
4433
|
+
(0, import_provider_utils30.loadOptionalSetting)({
|
|
4126
4434
|
settingValue: options.baseURL,
|
|
4127
4435
|
environmentVariableName: "OPENAI_BASE_URL"
|
|
4128
4436
|
})
|
|
4129
4437
|
)) != null ? _a : "https://api.openai.com/v1";
|
|
4130
4438
|
const providerName = (_b = options.name) != null ? _b : "openai";
|
|
4131
|
-
const getHeaders = () => (0,
|
|
4439
|
+
const getHeaders = () => (0, import_provider_utils30.withUserAgentSuffix)(
|
|
4132
4440
|
{
|
|
4133
|
-
Authorization: `Bearer ${(0,
|
|
4441
|
+
Authorization: `Bearer ${(0, import_provider_utils30.loadApiKey)({
|
|
4134
4442
|
apiKey: options.apiKey,
|
|
4135
4443
|
environmentVariableName: "OPENAI_API_KEY",
|
|
4136
4444
|
description: "OpenAI"
|