@ai-sdk/openai 2.0.44 → 2.0.46
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/index.d.mts +38 -65
- package/dist/index.d.ts +38 -65
- package/dist/index.js +1341 -1025
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1295 -934
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +101 -182
- package/dist/internal/index.d.ts +101 -182
- package/dist/internal/index.js +1338 -1020
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +1307 -945
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.js
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
+
var __create = Object.create;
|
|
2
3
|
var __defProp = Object.defineProperty;
|
|
3
4
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
5
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
5
7
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
8
|
var __export = (target, all) => {
|
|
7
9
|
for (var name in all)
|
|
@@ -15,6 +17,14 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
15
17
|
}
|
|
16
18
|
return to;
|
|
17
19
|
};
|
|
20
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
21
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
22
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
23
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
24
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
25
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
26
|
+
mod
|
|
27
|
+
));
|
|
18
28
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
29
|
|
|
20
30
|
// src/index.ts
|
|
@@ -27,25 +37,24 @@ __export(src_exports, {
|
|
|
27
37
|
module.exports = __toCommonJS(src_exports);
|
|
28
38
|
|
|
29
39
|
// src/openai-provider.ts
|
|
30
|
-
var
|
|
40
|
+
var import_provider_utils30 = require("@ai-sdk/provider-utils");
|
|
31
41
|
|
|
32
42
|
// src/chat/openai-chat-language-model.ts
|
|
33
43
|
var import_provider3 = require("@ai-sdk/provider");
|
|
34
|
-
var
|
|
35
|
-
var import_v43 = require("zod/v4");
|
|
44
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
36
45
|
|
|
37
46
|
// src/openai-error.ts
|
|
38
|
-
var
|
|
47
|
+
var z = __toESM(require("zod/v4"));
|
|
39
48
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
40
|
-
var openaiErrorDataSchema =
|
|
41
|
-
error:
|
|
42
|
-
message:
|
|
49
|
+
var openaiErrorDataSchema = z.object({
|
|
50
|
+
error: z.object({
|
|
51
|
+
message: z.string(),
|
|
43
52
|
// The additional information below is handled loosely to support
|
|
44
53
|
// OpenAI-compatible providers that have slightly different error
|
|
45
54
|
// responses:
|
|
46
|
-
type:
|
|
47
|
-
param:
|
|
48
|
-
code:
|
|
55
|
+
type: z.string().nullish(),
|
|
56
|
+
param: z.any().nullish(),
|
|
57
|
+
code: z.union([z.string(), z.number()]).nullish()
|
|
49
58
|
})
|
|
50
59
|
});
|
|
51
60
|
var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
|
@@ -261,95 +270,240 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
261
270
|
}
|
|
262
271
|
}
|
|
263
272
|
|
|
273
|
+
// src/chat/openai-chat-api.ts
|
|
274
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
275
|
+
var z2 = __toESM(require("zod/v4"));
|
|
276
|
+
var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
|
277
|
+
() => (0, import_provider_utils3.zodSchema)(
|
|
278
|
+
z2.object({
|
|
279
|
+
id: z2.string().nullish(),
|
|
280
|
+
created: z2.number().nullish(),
|
|
281
|
+
model: z2.string().nullish(),
|
|
282
|
+
choices: z2.array(
|
|
283
|
+
z2.object({
|
|
284
|
+
message: z2.object({
|
|
285
|
+
role: z2.literal("assistant").nullish(),
|
|
286
|
+
content: z2.string().nullish(),
|
|
287
|
+
tool_calls: z2.array(
|
|
288
|
+
z2.object({
|
|
289
|
+
id: z2.string().nullish(),
|
|
290
|
+
type: z2.literal("function"),
|
|
291
|
+
function: z2.object({
|
|
292
|
+
name: z2.string(),
|
|
293
|
+
arguments: z2.string()
|
|
294
|
+
})
|
|
295
|
+
})
|
|
296
|
+
).nullish(),
|
|
297
|
+
annotations: z2.array(
|
|
298
|
+
z2.object({
|
|
299
|
+
type: z2.literal("url_citation"),
|
|
300
|
+
start_index: z2.number(),
|
|
301
|
+
end_index: z2.number(),
|
|
302
|
+
url: z2.string(),
|
|
303
|
+
title: z2.string()
|
|
304
|
+
})
|
|
305
|
+
).nullish()
|
|
306
|
+
}),
|
|
307
|
+
index: z2.number(),
|
|
308
|
+
logprobs: z2.object({
|
|
309
|
+
content: z2.array(
|
|
310
|
+
z2.object({
|
|
311
|
+
token: z2.string(),
|
|
312
|
+
logprob: z2.number(),
|
|
313
|
+
top_logprobs: z2.array(
|
|
314
|
+
z2.object({
|
|
315
|
+
token: z2.string(),
|
|
316
|
+
logprob: z2.number()
|
|
317
|
+
})
|
|
318
|
+
)
|
|
319
|
+
})
|
|
320
|
+
).nullish()
|
|
321
|
+
}).nullish(),
|
|
322
|
+
finish_reason: z2.string().nullish()
|
|
323
|
+
})
|
|
324
|
+
),
|
|
325
|
+
usage: z2.object({
|
|
326
|
+
prompt_tokens: z2.number().nullish(),
|
|
327
|
+
completion_tokens: z2.number().nullish(),
|
|
328
|
+
total_tokens: z2.number().nullish(),
|
|
329
|
+
prompt_tokens_details: z2.object({
|
|
330
|
+
cached_tokens: z2.number().nullish()
|
|
331
|
+
}).nullish(),
|
|
332
|
+
completion_tokens_details: z2.object({
|
|
333
|
+
reasoning_tokens: z2.number().nullish(),
|
|
334
|
+
accepted_prediction_tokens: z2.number().nullish(),
|
|
335
|
+
rejected_prediction_tokens: z2.number().nullish()
|
|
336
|
+
}).nullish()
|
|
337
|
+
}).nullish()
|
|
338
|
+
})
|
|
339
|
+
)
|
|
340
|
+
);
|
|
341
|
+
var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
|
342
|
+
() => (0, import_provider_utils3.zodSchema)(
|
|
343
|
+
z2.union([
|
|
344
|
+
z2.object({
|
|
345
|
+
id: z2.string().nullish(),
|
|
346
|
+
created: z2.number().nullish(),
|
|
347
|
+
model: z2.string().nullish(),
|
|
348
|
+
choices: z2.array(
|
|
349
|
+
z2.object({
|
|
350
|
+
delta: z2.object({
|
|
351
|
+
role: z2.enum(["assistant"]).nullish(),
|
|
352
|
+
content: z2.string().nullish(),
|
|
353
|
+
tool_calls: z2.array(
|
|
354
|
+
z2.object({
|
|
355
|
+
index: z2.number(),
|
|
356
|
+
id: z2.string().nullish(),
|
|
357
|
+
type: z2.literal("function").nullish(),
|
|
358
|
+
function: z2.object({
|
|
359
|
+
name: z2.string().nullish(),
|
|
360
|
+
arguments: z2.string().nullish()
|
|
361
|
+
})
|
|
362
|
+
})
|
|
363
|
+
).nullish(),
|
|
364
|
+
annotations: z2.array(
|
|
365
|
+
z2.object({
|
|
366
|
+
type: z2.literal("url_citation"),
|
|
367
|
+
start_index: z2.number(),
|
|
368
|
+
end_index: z2.number(),
|
|
369
|
+
url: z2.string(),
|
|
370
|
+
title: z2.string()
|
|
371
|
+
})
|
|
372
|
+
).nullish()
|
|
373
|
+
}).nullish(),
|
|
374
|
+
logprobs: z2.object({
|
|
375
|
+
content: z2.array(
|
|
376
|
+
z2.object({
|
|
377
|
+
token: z2.string(),
|
|
378
|
+
logprob: z2.number(),
|
|
379
|
+
top_logprobs: z2.array(
|
|
380
|
+
z2.object({
|
|
381
|
+
token: z2.string(),
|
|
382
|
+
logprob: z2.number()
|
|
383
|
+
})
|
|
384
|
+
)
|
|
385
|
+
})
|
|
386
|
+
).nullish()
|
|
387
|
+
}).nullish(),
|
|
388
|
+
finish_reason: z2.string().nullish(),
|
|
389
|
+
index: z2.number()
|
|
390
|
+
})
|
|
391
|
+
),
|
|
392
|
+
usage: z2.object({
|
|
393
|
+
prompt_tokens: z2.number().nullish(),
|
|
394
|
+
completion_tokens: z2.number().nullish(),
|
|
395
|
+
total_tokens: z2.number().nullish(),
|
|
396
|
+
prompt_tokens_details: z2.object({
|
|
397
|
+
cached_tokens: z2.number().nullish()
|
|
398
|
+
}).nullish(),
|
|
399
|
+
completion_tokens_details: z2.object({
|
|
400
|
+
reasoning_tokens: z2.number().nullish(),
|
|
401
|
+
accepted_prediction_tokens: z2.number().nullish(),
|
|
402
|
+
rejected_prediction_tokens: z2.number().nullish()
|
|
403
|
+
}).nullish()
|
|
404
|
+
}).nullish()
|
|
405
|
+
}),
|
|
406
|
+
openaiErrorDataSchema
|
|
407
|
+
])
|
|
408
|
+
)
|
|
409
|
+
);
|
|
410
|
+
|
|
264
411
|
// src/chat/openai-chat-options.ts
|
|
265
|
-
var
|
|
266
|
-
var
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
412
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
413
|
+
var z3 = __toESM(require("zod/v4"));
|
|
414
|
+
var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
|
|
415
|
+
() => (0, import_provider_utils4.zodSchema)(
|
|
416
|
+
z3.object({
|
|
417
|
+
/**
|
|
418
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
|
419
|
+
*
|
|
420
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
421
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
422
|
+
*/
|
|
423
|
+
logitBias: z3.record(z3.coerce.number(), z3.number()).optional(),
|
|
424
|
+
/**
|
|
425
|
+
* Return the log probabilities of the tokens.
|
|
426
|
+
*
|
|
427
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
428
|
+
* were generated.
|
|
429
|
+
*
|
|
430
|
+
* Setting to a number will return the log probabilities of the top n
|
|
431
|
+
* tokens that were generated.
|
|
432
|
+
*/
|
|
433
|
+
logprobs: z3.union([z3.boolean(), z3.number()]).optional(),
|
|
434
|
+
/**
|
|
435
|
+
* Whether to enable parallel function calling during tool use. Default to true.
|
|
436
|
+
*/
|
|
437
|
+
parallelToolCalls: z3.boolean().optional(),
|
|
438
|
+
/**
|
|
439
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
440
|
+
* monitor and detect abuse.
|
|
441
|
+
*/
|
|
442
|
+
user: z3.string().optional(),
|
|
443
|
+
/**
|
|
444
|
+
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
445
|
+
*/
|
|
446
|
+
reasoningEffort: z3.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
447
|
+
/**
|
|
448
|
+
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
449
|
+
*/
|
|
450
|
+
maxCompletionTokens: z3.number().optional(),
|
|
451
|
+
/**
|
|
452
|
+
* Whether to enable persistence in responses API.
|
|
453
|
+
*/
|
|
454
|
+
store: z3.boolean().optional(),
|
|
455
|
+
/**
|
|
456
|
+
* Metadata to associate with the request.
|
|
457
|
+
*/
|
|
458
|
+
metadata: z3.record(z3.string().max(64), z3.string().max(512)).optional(),
|
|
459
|
+
/**
|
|
460
|
+
* Parameters for prediction mode.
|
|
461
|
+
*/
|
|
462
|
+
prediction: z3.record(z3.string(), z3.any()).optional(),
|
|
463
|
+
/**
|
|
464
|
+
* Whether to use structured outputs.
|
|
465
|
+
*
|
|
466
|
+
* @default true
|
|
467
|
+
*/
|
|
468
|
+
structuredOutputs: z3.boolean().optional(),
|
|
469
|
+
/**
|
|
470
|
+
* Service tier for the request.
|
|
471
|
+
* - 'auto': Default service tier. The request will be processed with the service tier configured in the
|
|
472
|
+
* Project settings. Unless otherwise configured, the Project will use 'default'.
|
|
473
|
+
* - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
474
|
+
* - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
|
|
475
|
+
* - 'default': The request will be processed with the standard pricing and performance for the selected model.
|
|
476
|
+
*
|
|
477
|
+
* @default 'auto'
|
|
478
|
+
*/
|
|
479
|
+
serviceTier: z3.enum(["auto", "flex", "priority", "default"]).optional(),
|
|
480
|
+
/**
|
|
481
|
+
* Whether to use strict JSON schema validation.
|
|
482
|
+
*
|
|
483
|
+
* @default false
|
|
484
|
+
*/
|
|
485
|
+
strictJsonSchema: z3.boolean().optional(),
|
|
486
|
+
/**
|
|
487
|
+
* Controls the verbosity of the model's responses.
|
|
488
|
+
* Lower values will result in more concise responses, while higher values will result in more verbose responses.
|
|
489
|
+
*/
|
|
490
|
+
textVerbosity: z3.enum(["low", "medium", "high"]).optional(),
|
|
491
|
+
/**
|
|
492
|
+
* A cache key for prompt caching. Allows manual control over prompt caching behavior.
|
|
493
|
+
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
494
|
+
*/
|
|
495
|
+
promptCacheKey: z3.string().optional(),
|
|
496
|
+
/**
|
|
497
|
+
* A stable identifier used to help detect users of your application
|
|
498
|
+
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
499
|
+
* string that uniquely identifies each user. We recommend hashing their
|
|
500
|
+
* username or email address, in order to avoid sending us any identifying
|
|
501
|
+
* information.
|
|
502
|
+
*/
|
|
503
|
+
safetyIdentifier: z3.string().optional()
|
|
504
|
+
})
|
|
505
|
+
)
|
|
506
|
+
);
|
|
353
507
|
|
|
354
508
|
// src/chat/openai-chat-prepare-tools.ts
|
|
355
509
|
var import_provider2 = require("@ai-sdk/provider");
|
|
@@ -442,7 +596,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
442
596
|
}) {
|
|
443
597
|
var _a, _b, _c, _d;
|
|
444
598
|
const warnings = [];
|
|
445
|
-
const openaiOptions = (_a = await (0,
|
|
599
|
+
const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
|
|
446
600
|
provider: "openai",
|
|
447
601
|
providerOptions,
|
|
448
602
|
schema: openaiChatLanguageModelOptions
|
|
@@ -621,15 +775,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
621
775
|
responseHeaders,
|
|
622
776
|
value: response,
|
|
623
777
|
rawValue: rawResponse
|
|
624
|
-
} = await (0,
|
|
778
|
+
} = await (0, import_provider_utils5.postJsonToApi)({
|
|
625
779
|
url: this.config.url({
|
|
626
780
|
path: "/chat/completions",
|
|
627
781
|
modelId: this.modelId
|
|
628
782
|
}),
|
|
629
|
-
headers: (0,
|
|
783
|
+
headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
|
|
630
784
|
body,
|
|
631
785
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
632
|
-
successfulResponseHandler: (0,
|
|
786
|
+
successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
|
|
633
787
|
openaiChatResponseSchema
|
|
634
788
|
),
|
|
635
789
|
abortSignal: options.abortSignal,
|
|
@@ -644,7 +798,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
644
798
|
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
|
645
799
|
content.push({
|
|
646
800
|
type: "tool-call",
|
|
647
|
-
toolCallId: (_b = toolCall.id) != null ? _b : (0,
|
|
801
|
+
toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils5.generateId)(),
|
|
648
802
|
toolName: toolCall.function.name,
|
|
649
803
|
input: toolCall.function.arguments
|
|
650
804
|
});
|
|
@@ -653,7 +807,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
653
807
|
content.push({
|
|
654
808
|
type: "source",
|
|
655
809
|
sourceType: "url",
|
|
656
|
-
id: (0,
|
|
810
|
+
id: (0, import_provider_utils5.generateId)(),
|
|
657
811
|
url: annotation.url,
|
|
658
812
|
title: annotation.title
|
|
659
813
|
});
|
|
@@ -699,15 +853,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
699
853
|
include_usage: true
|
|
700
854
|
}
|
|
701
855
|
};
|
|
702
|
-
const { responseHeaders, value: response } = await (0,
|
|
856
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
|
|
703
857
|
url: this.config.url({
|
|
704
858
|
path: "/chat/completions",
|
|
705
859
|
modelId: this.modelId
|
|
706
860
|
}),
|
|
707
|
-
headers: (0,
|
|
861
|
+
headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
|
|
708
862
|
body,
|
|
709
863
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
710
|
-
successfulResponseHandler: (0,
|
|
864
|
+
successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(
|
|
711
865
|
openaiChatChunkSchema
|
|
712
866
|
),
|
|
713
867
|
abortSignal: options.abortSignal,
|
|
@@ -832,14 +986,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
832
986
|
delta: toolCall2.function.arguments
|
|
833
987
|
});
|
|
834
988
|
}
|
|
835
|
-
if ((0,
|
|
989
|
+
if ((0, import_provider_utils5.isParsableJson)(toolCall2.function.arguments)) {
|
|
836
990
|
controller.enqueue({
|
|
837
991
|
type: "tool-input-end",
|
|
838
992
|
id: toolCall2.id
|
|
839
993
|
});
|
|
840
994
|
controller.enqueue({
|
|
841
995
|
type: "tool-call",
|
|
842
|
-
toolCallId: (_q = toolCall2.id) != null ? _q : (0,
|
|
996
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
|
|
843
997
|
toolName: toolCall2.function.name,
|
|
844
998
|
input: toolCall2.function.arguments
|
|
845
999
|
});
|
|
@@ -860,14 +1014,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
860
1014
|
id: toolCall.id,
|
|
861
1015
|
delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
862
1016
|
});
|
|
863
|
-
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0,
|
|
1017
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
|
|
864
1018
|
controller.enqueue({
|
|
865
1019
|
type: "tool-input-end",
|
|
866
1020
|
id: toolCall.id
|
|
867
1021
|
});
|
|
868
1022
|
controller.enqueue({
|
|
869
1023
|
type: "tool-call",
|
|
870
|
-
toolCallId: (_x = toolCall.id) != null ? _x : (0,
|
|
1024
|
+
toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
|
|
871
1025
|
toolName: toolCall.function.name,
|
|
872
1026
|
input: toolCall.function.arguments
|
|
873
1027
|
});
|
|
@@ -880,7 +1034,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
880
1034
|
controller.enqueue({
|
|
881
1035
|
type: "source",
|
|
882
1036
|
sourceType: "url",
|
|
883
|
-
id: (0,
|
|
1037
|
+
id: (0, import_provider_utils5.generateId)(),
|
|
884
1038
|
url: annotation.url,
|
|
885
1039
|
title: annotation.title
|
|
886
1040
|
});
|
|
@@ -905,121 +1059,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
905
1059
|
};
|
|
906
1060
|
}
|
|
907
1061
|
};
|
|
908
|
-
var openaiTokenUsageSchema = import_v43.z.object({
|
|
909
|
-
prompt_tokens: import_v43.z.number().nullish(),
|
|
910
|
-
completion_tokens: import_v43.z.number().nullish(),
|
|
911
|
-
total_tokens: import_v43.z.number().nullish(),
|
|
912
|
-
prompt_tokens_details: import_v43.z.object({
|
|
913
|
-
cached_tokens: import_v43.z.number().nullish()
|
|
914
|
-
}).nullish(),
|
|
915
|
-
completion_tokens_details: import_v43.z.object({
|
|
916
|
-
reasoning_tokens: import_v43.z.number().nullish(),
|
|
917
|
-
accepted_prediction_tokens: import_v43.z.number().nullish(),
|
|
918
|
-
rejected_prediction_tokens: import_v43.z.number().nullish()
|
|
919
|
-
}).nullish()
|
|
920
|
-
}).nullish();
|
|
921
|
-
var openaiChatResponseSchema = import_v43.z.object({
|
|
922
|
-
id: import_v43.z.string().nullish(),
|
|
923
|
-
created: import_v43.z.number().nullish(),
|
|
924
|
-
model: import_v43.z.string().nullish(),
|
|
925
|
-
choices: import_v43.z.array(
|
|
926
|
-
import_v43.z.object({
|
|
927
|
-
message: import_v43.z.object({
|
|
928
|
-
role: import_v43.z.literal("assistant").nullish(),
|
|
929
|
-
content: import_v43.z.string().nullish(),
|
|
930
|
-
tool_calls: import_v43.z.array(
|
|
931
|
-
import_v43.z.object({
|
|
932
|
-
id: import_v43.z.string().nullish(),
|
|
933
|
-
type: import_v43.z.literal("function"),
|
|
934
|
-
function: import_v43.z.object({
|
|
935
|
-
name: import_v43.z.string(),
|
|
936
|
-
arguments: import_v43.z.string()
|
|
937
|
-
})
|
|
938
|
-
})
|
|
939
|
-
).nullish(),
|
|
940
|
-
annotations: import_v43.z.array(
|
|
941
|
-
import_v43.z.object({
|
|
942
|
-
type: import_v43.z.literal("url_citation"),
|
|
943
|
-
start_index: import_v43.z.number(),
|
|
944
|
-
end_index: import_v43.z.number(),
|
|
945
|
-
url: import_v43.z.string(),
|
|
946
|
-
title: import_v43.z.string()
|
|
947
|
-
})
|
|
948
|
-
).nullish()
|
|
949
|
-
}),
|
|
950
|
-
index: import_v43.z.number(),
|
|
951
|
-
logprobs: import_v43.z.object({
|
|
952
|
-
content: import_v43.z.array(
|
|
953
|
-
import_v43.z.object({
|
|
954
|
-
token: import_v43.z.string(),
|
|
955
|
-
logprob: import_v43.z.number(),
|
|
956
|
-
top_logprobs: import_v43.z.array(
|
|
957
|
-
import_v43.z.object({
|
|
958
|
-
token: import_v43.z.string(),
|
|
959
|
-
logprob: import_v43.z.number()
|
|
960
|
-
})
|
|
961
|
-
)
|
|
962
|
-
})
|
|
963
|
-
).nullish()
|
|
964
|
-
}).nullish(),
|
|
965
|
-
finish_reason: import_v43.z.string().nullish()
|
|
966
|
-
})
|
|
967
|
-
),
|
|
968
|
-
usage: openaiTokenUsageSchema
|
|
969
|
-
});
|
|
970
|
-
var openaiChatChunkSchema = import_v43.z.union([
|
|
971
|
-
import_v43.z.object({
|
|
972
|
-
id: import_v43.z.string().nullish(),
|
|
973
|
-
created: import_v43.z.number().nullish(),
|
|
974
|
-
model: import_v43.z.string().nullish(),
|
|
975
|
-
choices: import_v43.z.array(
|
|
976
|
-
import_v43.z.object({
|
|
977
|
-
delta: import_v43.z.object({
|
|
978
|
-
role: import_v43.z.enum(["assistant"]).nullish(),
|
|
979
|
-
content: import_v43.z.string().nullish(),
|
|
980
|
-
tool_calls: import_v43.z.array(
|
|
981
|
-
import_v43.z.object({
|
|
982
|
-
index: import_v43.z.number(),
|
|
983
|
-
id: import_v43.z.string().nullish(),
|
|
984
|
-
type: import_v43.z.literal("function").nullish(),
|
|
985
|
-
function: import_v43.z.object({
|
|
986
|
-
name: import_v43.z.string().nullish(),
|
|
987
|
-
arguments: import_v43.z.string().nullish()
|
|
988
|
-
})
|
|
989
|
-
})
|
|
990
|
-
).nullish(),
|
|
991
|
-
annotations: import_v43.z.array(
|
|
992
|
-
import_v43.z.object({
|
|
993
|
-
type: import_v43.z.literal("url_citation"),
|
|
994
|
-
start_index: import_v43.z.number(),
|
|
995
|
-
end_index: import_v43.z.number(),
|
|
996
|
-
url: import_v43.z.string(),
|
|
997
|
-
title: import_v43.z.string()
|
|
998
|
-
})
|
|
999
|
-
).nullish()
|
|
1000
|
-
}).nullish(),
|
|
1001
|
-
logprobs: import_v43.z.object({
|
|
1002
|
-
content: import_v43.z.array(
|
|
1003
|
-
import_v43.z.object({
|
|
1004
|
-
token: import_v43.z.string(),
|
|
1005
|
-
logprob: import_v43.z.number(),
|
|
1006
|
-
top_logprobs: import_v43.z.array(
|
|
1007
|
-
import_v43.z.object({
|
|
1008
|
-
token: import_v43.z.string(),
|
|
1009
|
-
logprob: import_v43.z.number()
|
|
1010
|
-
})
|
|
1011
|
-
)
|
|
1012
|
-
})
|
|
1013
|
-
).nullish()
|
|
1014
|
-
}).nullish(),
|
|
1015
|
-
finish_reason: import_v43.z.string().nullish(),
|
|
1016
|
-
index: import_v43.z.number()
|
|
1017
|
-
})
|
|
1018
|
-
),
|
|
1019
|
-
usage: openaiTokenUsageSchema
|
|
1020
|
-
}),
|
|
1021
|
-
openaiErrorDataSchema
|
|
1022
|
-
]);
|
|
1023
1062
|
function isReasoningModel(modelId) {
|
|
1024
1063
|
return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
|
|
1025
1064
|
}
|
|
@@ -1070,8 +1109,7 @@ var reasoningModels = {
|
|
|
1070
1109
|
};
|
|
1071
1110
|
|
|
1072
1111
|
// src/completion/openai-completion-language-model.ts
|
|
1073
|
-
var
|
|
1074
|
-
var import_v45 = require("zod/v4");
|
|
1112
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1075
1113
|
|
|
1076
1114
|
// src/completion/convert-to-openai-completion-prompt.ts
|
|
1077
1115
|
var import_provider4 = require("@ai-sdk/provider");
|
|
@@ -1178,48 +1216,111 @@ function mapOpenAIFinishReason2(finishReason) {
|
|
|
1178
1216
|
}
|
|
1179
1217
|
}
|
|
1180
1218
|
|
|
1219
|
+
// src/completion/openai-completion-api.ts
|
|
1220
|
+
var z4 = __toESM(require("zod/v4"));
|
|
1221
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
1222
|
+
var openaiCompletionResponseSchema = (0, import_provider_utils6.lazyValidator)(
|
|
1223
|
+
() => (0, import_provider_utils6.zodSchema)(
|
|
1224
|
+
z4.object({
|
|
1225
|
+
id: z4.string().nullish(),
|
|
1226
|
+
created: z4.number().nullish(),
|
|
1227
|
+
model: z4.string().nullish(),
|
|
1228
|
+
choices: z4.array(
|
|
1229
|
+
z4.object({
|
|
1230
|
+
text: z4.string(),
|
|
1231
|
+
finish_reason: z4.string(),
|
|
1232
|
+
logprobs: z4.object({
|
|
1233
|
+
tokens: z4.array(z4.string()),
|
|
1234
|
+
token_logprobs: z4.array(z4.number()),
|
|
1235
|
+
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullish()
|
|
1236
|
+
}).nullish()
|
|
1237
|
+
})
|
|
1238
|
+
),
|
|
1239
|
+
usage: z4.object({
|
|
1240
|
+
prompt_tokens: z4.number(),
|
|
1241
|
+
completion_tokens: z4.number(),
|
|
1242
|
+
total_tokens: z4.number()
|
|
1243
|
+
}).nullish()
|
|
1244
|
+
})
|
|
1245
|
+
)
|
|
1246
|
+
);
|
|
1247
|
+
var openaiCompletionChunkSchema = (0, import_provider_utils6.lazyValidator)(
|
|
1248
|
+
() => (0, import_provider_utils6.zodSchema)(
|
|
1249
|
+
z4.union([
|
|
1250
|
+
z4.object({
|
|
1251
|
+
id: z4.string().nullish(),
|
|
1252
|
+
created: z4.number().nullish(),
|
|
1253
|
+
model: z4.string().nullish(),
|
|
1254
|
+
choices: z4.array(
|
|
1255
|
+
z4.object({
|
|
1256
|
+
text: z4.string(),
|
|
1257
|
+
finish_reason: z4.string().nullish(),
|
|
1258
|
+
index: z4.number(),
|
|
1259
|
+
logprobs: z4.object({
|
|
1260
|
+
tokens: z4.array(z4.string()),
|
|
1261
|
+
token_logprobs: z4.array(z4.number()),
|
|
1262
|
+
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullish()
|
|
1263
|
+
}).nullish()
|
|
1264
|
+
})
|
|
1265
|
+
),
|
|
1266
|
+
usage: z4.object({
|
|
1267
|
+
prompt_tokens: z4.number(),
|
|
1268
|
+
completion_tokens: z4.number(),
|
|
1269
|
+
total_tokens: z4.number()
|
|
1270
|
+
}).nullish()
|
|
1271
|
+
}),
|
|
1272
|
+
openaiErrorDataSchema
|
|
1273
|
+
])
|
|
1274
|
+
)
|
|
1275
|
+
);
|
|
1276
|
+
|
|
1181
1277
|
// src/completion/openai-completion-options.ts
|
|
1182
|
-
var
|
|
1183
|
-
var
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1278
|
+
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
|
1279
|
+
var z5 = __toESM(require("zod/v4"));
|
|
1280
|
+
var openaiCompletionProviderOptions = (0, import_provider_utils7.lazyValidator)(
|
|
1281
|
+
() => (0, import_provider_utils7.zodSchema)(
|
|
1282
|
+
z5.object({
|
|
1283
|
+
/**
|
|
1284
|
+
Echo back the prompt in addition to the completion.
|
|
1285
|
+
*/
|
|
1286
|
+
echo: z5.boolean().optional(),
|
|
1287
|
+
/**
|
|
1288
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
1289
|
+
|
|
1290
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
1291
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
1292
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
1293
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
1294
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
1295
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
1296
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
1297
|
+
|
|
1298
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
1299
|
+
token from being generated.
|
|
1300
|
+
*/
|
|
1301
|
+
logitBias: z5.record(z5.string(), z5.number()).optional(),
|
|
1302
|
+
/**
|
|
1303
|
+
The suffix that comes after a completion of inserted text.
|
|
1304
|
+
*/
|
|
1305
|
+
suffix: z5.string().optional(),
|
|
1306
|
+
/**
|
|
1307
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1308
|
+
monitor and detect abuse. Learn more.
|
|
1309
|
+
*/
|
|
1310
|
+
user: z5.string().optional(),
|
|
1311
|
+
/**
|
|
1312
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1313
|
+
the response size and can slow down response times. However, it can
|
|
1314
|
+
be useful to better understand how the model is behaving.
|
|
1315
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1316
|
+
were generated.
|
|
1317
|
+
Setting to a number will return the log probabilities of the top n
|
|
1318
|
+
tokens that were generated.
|
|
1319
|
+
*/
|
|
1320
|
+
logprobs: z5.union([z5.boolean(), z5.number()]).optional()
|
|
1321
|
+
})
|
|
1322
|
+
)
|
|
1323
|
+
);
|
|
1223
1324
|
|
|
1224
1325
|
// src/completion/openai-completion-language-model.ts
|
|
1225
1326
|
var OpenAICompletionLanguageModel = class {
|
|
@@ -1254,12 +1355,12 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1254
1355
|
}) {
|
|
1255
1356
|
const warnings = [];
|
|
1256
1357
|
const openaiOptions = {
|
|
1257
|
-
...await (0,
|
|
1358
|
+
...await (0, import_provider_utils8.parseProviderOptions)({
|
|
1258
1359
|
provider: "openai",
|
|
1259
1360
|
providerOptions,
|
|
1260
1361
|
schema: openaiCompletionProviderOptions
|
|
1261
1362
|
}),
|
|
1262
|
-
...await (0,
|
|
1363
|
+
...await (0, import_provider_utils8.parseProviderOptions)({
|
|
1263
1364
|
provider: this.providerOptionsName,
|
|
1264
1365
|
providerOptions,
|
|
1265
1366
|
schema: openaiCompletionProviderOptions
|
|
@@ -1315,15 +1416,15 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1315
1416
|
responseHeaders,
|
|
1316
1417
|
value: response,
|
|
1317
1418
|
rawValue: rawResponse
|
|
1318
|
-
} = await (0,
|
|
1419
|
+
} = await (0, import_provider_utils8.postJsonToApi)({
|
|
1319
1420
|
url: this.config.url({
|
|
1320
1421
|
path: "/completions",
|
|
1321
1422
|
modelId: this.modelId
|
|
1322
1423
|
}),
|
|
1323
|
-
headers: (0,
|
|
1424
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
|
|
1324
1425
|
body: args,
|
|
1325
1426
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1326
|
-
successfulResponseHandler: (0,
|
|
1427
|
+
successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
|
|
1327
1428
|
openaiCompletionResponseSchema
|
|
1328
1429
|
),
|
|
1329
1430
|
abortSignal: options.abortSignal,
|
|
@@ -1361,15 +1462,15 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1361
1462
|
include_usage: true
|
|
1362
1463
|
}
|
|
1363
1464
|
};
|
|
1364
|
-
const { responseHeaders, value: response } = await (0,
|
|
1465
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
|
|
1365
1466
|
url: this.config.url({
|
|
1366
1467
|
path: "/completions",
|
|
1367
1468
|
modelId: this.modelId
|
|
1368
1469
|
}),
|
|
1369
|
-
headers: (0,
|
|
1470
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
|
|
1370
1471
|
body,
|
|
1371
1472
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1372
|
-
successfulResponseHandler: (0,
|
|
1473
|
+
successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
|
|
1373
1474
|
openaiCompletionChunkSchema
|
|
1374
1475
|
),
|
|
1375
1476
|
abortSignal: options.abortSignal,
|
|
@@ -1450,69 +1551,42 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1450
1551
|
};
|
|
1451
1552
|
}
|
|
1452
1553
|
};
|
|
1453
|
-
var usageSchema = import_v45.z.object({
|
|
1454
|
-
prompt_tokens: import_v45.z.number(),
|
|
1455
|
-
completion_tokens: import_v45.z.number(),
|
|
1456
|
-
total_tokens: import_v45.z.number()
|
|
1457
|
-
});
|
|
1458
|
-
var openaiCompletionResponseSchema = import_v45.z.object({
|
|
1459
|
-
id: import_v45.z.string().nullish(),
|
|
1460
|
-
created: import_v45.z.number().nullish(),
|
|
1461
|
-
model: import_v45.z.string().nullish(),
|
|
1462
|
-
choices: import_v45.z.array(
|
|
1463
|
-
import_v45.z.object({
|
|
1464
|
-
text: import_v45.z.string(),
|
|
1465
|
-
finish_reason: import_v45.z.string(),
|
|
1466
|
-
logprobs: import_v45.z.object({
|
|
1467
|
-
tokens: import_v45.z.array(import_v45.z.string()),
|
|
1468
|
-
token_logprobs: import_v45.z.array(import_v45.z.number()),
|
|
1469
|
-
top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullish()
|
|
1470
|
-
}).nullish()
|
|
1471
|
-
})
|
|
1472
|
-
),
|
|
1473
|
-
usage: usageSchema.nullish()
|
|
1474
|
-
});
|
|
1475
|
-
var openaiCompletionChunkSchema = import_v45.z.union([
|
|
1476
|
-
import_v45.z.object({
|
|
1477
|
-
id: import_v45.z.string().nullish(),
|
|
1478
|
-
created: import_v45.z.number().nullish(),
|
|
1479
|
-
model: import_v45.z.string().nullish(),
|
|
1480
|
-
choices: import_v45.z.array(
|
|
1481
|
-
import_v45.z.object({
|
|
1482
|
-
text: import_v45.z.string(),
|
|
1483
|
-
finish_reason: import_v45.z.string().nullish(),
|
|
1484
|
-
index: import_v45.z.number(),
|
|
1485
|
-
logprobs: import_v45.z.object({
|
|
1486
|
-
tokens: import_v45.z.array(import_v45.z.string()),
|
|
1487
|
-
token_logprobs: import_v45.z.array(import_v45.z.number()),
|
|
1488
|
-
top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullish()
|
|
1489
|
-
}).nullish()
|
|
1490
|
-
})
|
|
1491
|
-
),
|
|
1492
|
-
usage: usageSchema.nullish()
|
|
1493
|
-
}),
|
|
1494
|
-
openaiErrorDataSchema
|
|
1495
|
-
]);
|
|
1496
1554
|
|
|
1497
1555
|
// src/embedding/openai-embedding-model.ts
|
|
1498
1556
|
var import_provider5 = require("@ai-sdk/provider");
|
|
1499
|
-
var
|
|
1500
|
-
var import_v47 = require("zod/v4");
|
|
1557
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
|
1501
1558
|
|
|
1502
1559
|
// src/embedding/openai-embedding-options.ts
|
|
1503
|
-
var
|
|
1504
|
-
var
|
|
1505
|
-
|
|
1506
|
-
|
|
1507
|
-
|
|
1508
|
-
|
|
1509
|
-
|
|
1510
|
-
|
|
1511
|
-
|
|
1512
|
-
|
|
1513
|
-
|
|
1514
|
-
|
|
1515
|
-
|
|
1560
|
+
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
|
1561
|
+
var z6 = __toESM(require("zod/v4"));
|
|
1562
|
+
var openaiEmbeddingProviderOptions = (0, import_provider_utils9.lazyValidator)(
|
|
1563
|
+
() => (0, import_provider_utils9.zodSchema)(
|
|
1564
|
+
z6.object({
|
|
1565
|
+
/**
|
|
1566
|
+
The number of dimensions the resulting output embeddings should have.
|
|
1567
|
+
Only supported in text-embedding-3 and later models.
|
|
1568
|
+
*/
|
|
1569
|
+
dimensions: z6.number().optional(),
|
|
1570
|
+
/**
|
|
1571
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1572
|
+
monitor and detect abuse. Learn more.
|
|
1573
|
+
*/
|
|
1574
|
+
user: z6.string().optional()
|
|
1575
|
+
})
|
|
1576
|
+
)
|
|
1577
|
+
);
|
|
1578
|
+
|
|
1579
|
+
// src/embedding/openai-embedding-api.ts
|
|
1580
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
|
1581
|
+
var z7 = __toESM(require("zod/v4"));
|
|
1582
|
+
var openaiTextEmbeddingResponseSchema = (0, import_provider_utils10.lazyValidator)(
|
|
1583
|
+
() => (0, import_provider_utils10.zodSchema)(
|
|
1584
|
+
z7.object({
|
|
1585
|
+
data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
|
|
1586
|
+
usage: z7.object({ prompt_tokens: z7.number() }).nullish()
|
|
1587
|
+
})
|
|
1588
|
+
)
|
|
1589
|
+
);
|
|
1516
1590
|
|
|
1517
1591
|
// src/embedding/openai-embedding-model.ts
|
|
1518
1592
|
var OpenAIEmbeddingModel = class {
|
|
@@ -1541,7 +1615,7 @@ var OpenAIEmbeddingModel = class {
|
|
|
1541
1615
|
values
|
|
1542
1616
|
});
|
|
1543
1617
|
}
|
|
1544
|
-
const openaiOptions = (_a = await (0,
|
|
1618
|
+
const openaiOptions = (_a = await (0, import_provider_utils11.parseProviderOptions)({
|
|
1545
1619
|
provider: "openai",
|
|
1546
1620
|
providerOptions,
|
|
1547
1621
|
schema: openaiEmbeddingProviderOptions
|
|
@@ -1550,12 +1624,12 @@ var OpenAIEmbeddingModel = class {
|
|
|
1550
1624
|
responseHeaders,
|
|
1551
1625
|
value: response,
|
|
1552
1626
|
rawValue
|
|
1553
|
-
} = await (0,
|
|
1627
|
+
} = await (0, import_provider_utils11.postJsonToApi)({
|
|
1554
1628
|
url: this.config.url({
|
|
1555
1629
|
path: "/embeddings",
|
|
1556
1630
|
modelId: this.modelId
|
|
1557
1631
|
}),
|
|
1558
|
-
headers: (0,
|
|
1632
|
+
headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), headers),
|
|
1559
1633
|
body: {
|
|
1560
1634
|
model: this.modelId,
|
|
1561
1635
|
input: values,
|
|
@@ -1564,7 +1638,7 @@ var OpenAIEmbeddingModel = class {
|
|
|
1564
1638
|
user: openaiOptions.user
|
|
1565
1639
|
},
|
|
1566
1640
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1567
|
-
successfulResponseHandler: (0,
|
|
1641
|
+
successfulResponseHandler: (0, import_provider_utils11.createJsonResponseHandler)(
|
|
1568
1642
|
openaiTextEmbeddingResponseSchema
|
|
1569
1643
|
),
|
|
1570
1644
|
abortSignal,
|
|
@@ -1577,14 +1651,25 @@ var OpenAIEmbeddingModel = class {
|
|
|
1577
1651
|
};
|
|
1578
1652
|
}
|
|
1579
1653
|
};
|
|
1580
|
-
var openaiTextEmbeddingResponseSchema = import_v47.z.object({
|
|
1581
|
-
data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
|
|
1582
|
-
usage: import_v47.z.object({ prompt_tokens: import_v47.z.number() }).nullish()
|
|
1583
|
-
});
|
|
1584
1654
|
|
|
1585
1655
|
// src/image/openai-image-model.ts
|
|
1586
|
-
var
|
|
1587
|
-
|
|
1656
|
+
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
|
1657
|
+
|
|
1658
|
+
// src/image/openai-image-api.ts
|
|
1659
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
|
1660
|
+
var z8 = __toESM(require("zod/v4"));
|
|
1661
|
+
var openaiImageResponseSchema = (0, import_provider_utils12.lazyValidator)(
|
|
1662
|
+
() => (0, import_provider_utils12.zodSchema)(
|
|
1663
|
+
z8.object({
|
|
1664
|
+
data: z8.array(
|
|
1665
|
+
z8.object({
|
|
1666
|
+
b64_json: z8.string(),
|
|
1667
|
+
revised_prompt: z8.string().optional()
|
|
1668
|
+
})
|
|
1669
|
+
)
|
|
1670
|
+
})
|
|
1671
|
+
)
|
|
1672
|
+
);
|
|
1588
1673
|
|
|
1589
1674
|
// src/image/openai-image-options.ts
|
|
1590
1675
|
var modelMaxImagesPerCall = {
|
|
@@ -1635,12 +1720,12 @@ var OpenAIImageModel = class {
|
|
|
1635
1720
|
warnings.push({ type: "unsupported-setting", setting: "seed" });
|
|
1636
1721
|
}
|
|
1637
1722
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1638
|
-
const { value: response, responseHeaders } = await (0,
|
|
1723
|
+
const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
|
|
1639
1724
|
url: this.config.url({
|
|
1640
1725
|
path: "/images/generations",
|
|
1641
1726
|
modelId: this.modelId
|
|
1642
1727
|
}),
|
|
1643
|
-
headers: (0,
|
|
1728
|
+
headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), headers),
|
|
1644
1729
|
body: {
|
|
1645
1730
|
model: this.modelId,
|
|
1646
1731
|
prompt,
|
|
@@ -1650,7 +1735,7 @@ var OpenAIImageModel = class {
|
|
|
1650
1735
|
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1651
1736
|
},
|
|
1652
1737
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1653
|
-
successfulResponseHandler: (0,
|
|
1738
|
+
successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
|
|
1654
1739
|
openaiImageResponseSchema
|
|
1655
1740
|
),
|
|
1656
1741
|
abortSignal,
|
|
@@ -1676,36 +1761,43 @@ var OpenAIImageModel = class {
|
|
|
1676
1761
|
};
|
|
1677
1762
|
}
|
|
1678
1763
|
};
|
|
1679
|
-
var openaiImageResponseSchema = import_v48.z.object({
|
|
1680
|
-
data: import_v48.z.array(
|
|
1681
|
-
import_v48.z.object({ b64_json: import_v48.z.string(), revised_prompt: import_v48.z.string().optional() })
|
|
1682
|
-
)
|
|
1683
|
-
});
|
|
1684
1764
|
|
|
1685
1765
|
// src/tool/code-interpreter.ts
|
|
1686
|
-
var
|
|
1687
|
-
var
|
|
1688
|
-
var codeInterpreterInputSchema =
|
|
1689
|
-
|
|
1690
|
-
|
|
1691
|
-
|
|
1692
|
-
|
|
1693
|
-
outputs: import_v49.z.array(
|
|
1694
|
-
import_v49.z.discriminatedUnion("type", [
|
|
1695
|
-
import_v49.z.object({ type: import_v49.z.literal("logs"), logs: import_v49.z.string() }),
|
|
1696
|
-
import_v49.z.object({ type: import_v49.z.literal("image"), url: import_v49.z.string() })
|
|
1697
|
-
])
|
|
1698
|
-
).nullish()
|
|
1699
|
-
});
|
|
1700
|
-
var codeInterpreterArgsSchema = import_v49.z.object({
|
|
1701
|
-
container: import_v49.z.union([
|
|
1702
|
-
import_v49.z.string(),
|
|
1703
|
-
import_v49.z.object({
|
|
1704
|
-
fileIds: import_v49.z.array(import_v49.z.string()).optional()
|
|
1766
|
+
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
|
1767
|
+
var z9 = __toESM(require("zod/v4"));
|
|
1768
|
+
var codeInterpreterInputSchema = (0, import_provider_utils14.lazySchema)(
|
|
1769
|
+
() => (0, import_provider_utils14.zodSchema)(
|
|
1770
|
+
z9.object({
|
|
1771
|
+
code: z9.string().nullish(),
|
|
1772
|
+
containerId: z9.string()
|
|
1705
1773
|
})
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
var
|
|
1774
|
+
)
|
|
1775
|
+
);
|
|
1776
|
+
var codeInterpreterOutputSchema = (0, import_provider_utils14.lazySchema)(
|
|
1777
|
+
() => (0, import_provider_utils14.zodSchema)(
|
|
1778
|
+
z9.object({
|
|
1779
|
+
outputs: z9.array(
|
|
1780
|
+
z9.discriminatedUnion("type", [
|
|
1781
|
+
z9.object({ type: z9.literal("logs"), logs: z9.string() }),
|
|
1782
|
+
z9.object({ type: z9.literal("image"), url: z9.string() })
|
|
1783
|
+
])
|
|
1784
|
+
).nullish()
|
|
1785
|
+
})
|
|
1786
|
+
)
|
|
1787
|
+
);
|
|
1788
|
+
var codeInterpreterArgsSchema = (0, import_provider_utils14.lazySchema)(
|
|
1789
|
+
() => (0, import_provider_utils14.zodSchema)(
|
|
1790
|
+
z9.object({
|
|
1791
|
+
container: z9.union([
|
|
1792
|
+
z9.string(),
|
|
1793
|
+
z9.object({
|
|
1794
|
+
fileIds: z9.array(z9.string()).optional()
|
|
1795
|
+
})
|
|
1796
|
+
]).optional()
|
|
1797
|
+
})
|
|
1798
|
+
)
|
|
1799
|
+
);
|
|
1800
|
+
var codeInterpreterToolFactory = (0, import_provider_utils14.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
1709
1801
|
id: "openai.code_interpreter",
|
|
1710
1802
|
name: "code_interpreter",
|
|
1711
1803
|
inputSchema: codeInterpreterInputSchema,
|
|
@@ -1716,71 +1808,85 @@ var codeInterpreter = (args = {}) => {
|
|
|
1716
1808
|
};
|
|
1717
1809
|
|
|
1718
1810
|
// src/tool/file-search.ts
|
|
1719
|
-
var
|
|
1720
|
-
var
|
|
1721
|
-
var comparisonFilterSchema =
|
|
1722
|
-
key:
|
|
1723
|
-
type:
|
|
1724
|
-
value:
|
|
1811
|
+
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
|
1812
|
+
var z10 = __toESM(require("zod/v4"));
|
|
1813
|
+
var comparisonFilterSchema = z10.object({
|
|
1814
|
+
key: z10.string(),
|
|
1815
|
+
type: z10.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
|
|
1816
|
+
value: z10.union([z10.string(), z10.number(), z10.boolean()])
|
|
1725
1817
|
});
|
|
1726
|
-
var compoundFilterSchema =
|
|
1727
|
-
type:
|
|
1728
|
-
filters:
|
|
1729
|
-
|
|
1818
|
+
var compoundFilterSchema = z10.object({
|
|
1819
|
+
type: z10.enum(["and", "or"]),
|
|
1820
|
+
filters: z10.array(
|
|
1821
|
+
z10.union([comparisonFilterSchema, z10.lazy(() => compoundFilterSchema)])
|
|
1730
1822
|
)
|
|
1731
1823
|
});
|
|
1732
|
-
var fileSearchArgsSchema =
|
|
1733
|
-
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
|
|
1738
|
-
|
|
1739
|
-
|
|
1740
|
-
})
|
|
1741
|
-
|
|
1742
|
-
queries: import_v410.z.array(import_v410.z.string()),
|
|
1743
|
-
results: import_v410.z.array(
|
|
1744
|
-
import_v410.z.object({
|
|
1745
|
-
attributes: import_v410.z.record(import_v410.z.string(), import_v410.z.unknown()),
|
|
1746
|
-
fileId: import_v410.z.string(),
|
|
1747
|
-
filename: import_v410.z.string(),
|
|
1748
|
-
score: import_v410.z.number(),
|
|
1749
|
-
text: import_v410.z.string()
|
|
1824
|
+
var fileSearchArgsSchema = (0, import_provider_utils15.lazySchema)(
|
|
1825
|
+
() => (0, import_provider_utils15.zodSchema)(
|
|
1826
|
+
z10.object({
|
|
1827
|
+
vectorStoreIds: z10.array(z10.string()),
|
|
1828
|
+
maxNumResults: z10.number().optional(),
|
|
1829
|
+
ranking: z10.object({
|
|
1830
|
+
ranker: z10.string().optional(),
|
|
1831
|
+
scoreThreshold: z10.number().optional()
|
|
1832
|
+
}).optional(),
|
|
1833
|
+
filters: z10.union([comparisonFilterSchema, compoundFilterSchema]).optional()
|
|
1750
1834
|
})
|
|
1751
|
-
)
|
|
1752
|
-
|
|
1753
|
-
var
|
|
1835
|
+
)
|
|
1836
|
+
);
|
|
1837
|
+
var fileSearchOutputSchema = (0, import_provider_utils15.lazySchema)(
|
|
1838
|
+
() => (0, import_provider_utils15.zodSchema)(
|
|
1839
|
+
z10.object({
|
|
1840
|
+
queries: z10.array(z10.string()),
|
|
1841
|
+
results: z10.array(
|
|
1842
|
+
z10.object({
|
|
1843
|
+
attributes: z10.record(z10.string(), z10.unknown()),
|
|
1844
|
+
fileId: z10.string(),
|
|
1845
|
+
filename: z10.string(),
|
|
1846
|
+
score: z10.number(),
|
|
1847
|
+
text: z10.string()
|
|
1848
|
+
})
|
|
1849
|
+
).nullable()
|
|
1850
|
+
})
|
|
1851
|
+
)
|
|
1852
|
+
);
|
|
1853
|
+
var fileSearch = (0, import_provider_utils15.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
1754
1854
|
id: "openai.file_search",
|
|
1755
1855
|
name: "file_search",
|
|
1756
|
-
inputSchema:
|
|
1856
|
+
inputSchema: z10.object({}),
|
|
1757
1857
|
outputSchema: fileSearchOutputSchema
|
|
1758
1858
|
});
|
|
1759
1859
|
|
|
1760
1860
|
// src/tool/image-generation.ts
|
|
1761
|
-
var
|
|
1762
|
-
var
|
|
1763
|
-
var imageGenerationArgsSchema =
|
|
1764
|
-
|
|
1765
|
-
|
|
1766
|
-
|
|
1767
|
-
|
|
1768
|
-
|
|
1769
|
-
|
|
1770
|
-
|
|
1771
|
-
|
|
1772
|
-
|
|
1773
|
-
|
|
1774
|
-
|
|
1775
|
-
|
|
1776
|
-
|
|
1777
|
-
|
|
1778
|
-
|
|
1779
|
-
})
|
|
1780
|
-
|
|
1861
|
+
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
|
1862
|
+
var z11 = __toESM(require("zod/v4"));
|
|
1863
|
+
var imageGenerationArgsSchema = (0, import_provider_utils16.lazySchema)(
|
|
1864
|
+
() => (0, import_provider_utils16.zodSchema)(
|
|
1865
|
+
z11.object({
|
|
1866
|
+
background: z11.enum(["auto", "opaque", "transparent"]).optional(),
|
|
1867
|
+
inputFidelity: z11.enum(["low", "high"]).optional(),
|
|
1868
|
+
inputImageMask: z11.object({
|
|
1869
|
+
fileId: z11.string().optional(),
|
|
1870
|
+
imageUrl: z11.string().optional()
|
|
1871
|
+
}).optional(),
|
|
1872
|
+
model: z11.string().optional(),
|
|
1873
|
+
moderation: z11.enum(["auto"]).optional(),
|
|
1874
|
+
outputCompression: z11.number().int().min(0).max(100).optional(),
|
|
1875
|
+
outputFormat: z11.enum(["png", "jpeg", "webp"]).optional(),
|
|
1876
|
+
partialImages: z11.number().int().min(0).max(3).optional(),
|
|
1877
|
+
quality: z11.enum(["auto", "low", "medium", "high"]).optional(),
|
|
1878
|
+
size: z11.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
|
|
1879
|
+
}).strict()
|
|
1880
|
+
)
|
|
1881
|
+
);
|
|
1882
|
+
var imageGenerationInputSchema = (0, import_provider_utils16.lazySchema)(() => (0, import_provider_utils16.zodSchema)(z11.object({})));
|
|
1883
|
+
var imageGenerationOutputSchema = (0, import_provider_utils16.lazySchema)(
|
|
1884
|
+
() => (0, import_provider_utils16.zodSchema)(z11.object({ result: z11.string() }))
|
|
1885
|
+
);
|
|
1886
|
+
var imageGenerationToolFactory = (0, import_provider_utils16.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
1781
1887
|
id: "openai.image_generation",
|
|
1782
1888
|
name: "image_generation",
|
|
1783
|
-
inputSchema:
|
|
1889
|
+
inputSchema: imageGenerationInputSchema,
|
|
1784
1890
|
outputSchema: imageGenerationOutputSchema
|
|
1785
1891
|
});
|
|
1786
1892
|
var imageGeneration = (args = {}) => {
|
|
@@ -1788,22 +1894,26 @@ var imageGeneration = (args = {}) => {
|
|
|
1788
1894
|
};
|
|
1789
1895
|
|
|
1790
1896
|
// src/tool/local-shell.ts
|
|
1791
|
-
var
|
|
1792
|
-
var
|
|
1793
|
-
var localShellInputSchema =
|
|
1794
|
-
|
|
1795
|
-
|
|
1796
|
-
|
|
1797
|
-
|
|
1798
|
-
|
|
1799
|
-
|
|
1800
|
-
|
|
1801
|
-
|
|
1802
|
-
|
|
1803
|
-
|
|
1804
|
-
|
|
1805
|
-
|
|
1806
|
-
|
|
1897
|
+
var import_provider_utils17 = require("@ai-sdk/provider-utils");
|
|
1898
|
+
var z12 = __toESM(require("zod/v4"));
|
|
1899
|
+
var localShellInputSchema = (0, import_provider_utils17.lazySchema)(
|
|
1900
|
+
() => (0, import_provider_utils17.zodSchema)(
|
|
1901
|
+
z12.object({
|
|
1902
|
+
action: z12.object({
|
|
1903
|
+
type: z12.literal("exec"),
|
|
1904
|
+
command: z12.array(z12.string()),
|
|
1905
|
+
timeoutMs: z12.number().optional(),
|
|
1906
|
+
user: z12.string().optional(),
|
|
1907
|
+
workingDirectory: z12.string().optional(),
|
|
1908
|
+
env: z12.record(z12.string(), z12.string()).optional()
|
|
1909
|
+
})
|
|
1910
|
+
})
|
|
1911
|
+
)
|
|
1912
|
+
);
|
|
1913
|
+
var localShellOutputSchema = (0, import_provider_utils17.lazySchema)(
|
|
1914
|
+
() => (0, import_provider_utils17.zodSchema)(z12.object({ output: z12.string() }))
|
|
1915
|
+
);
|
|
1916
|
+
var localShell = (0, import_provider_utils17.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
1807
1917
|
id: "openai.local_shell",
|
|
1808
1918
|
name: "local_shell",
|
|
1809
1919
|
inputSchema: localShellInputSchema,
|
|
@@ -1811,103 +1921,121 @@ var localShell = (0, import_provider_utils10.createProviderDefinedToolFactoryWit
|
|
|
1811
1921
|
});
|
|
1812
1922
|
|
|
1813
1923
|
// src/tool/web-search.ts
|
|
1814
|
-
var
|
|
1815
|
-
var
|
|
1816
|
-
var webSearchArgsSchema =
|
|
1817
|
-
|
|
1818
|
-
|
|
1819
|
-
|
|
1820
|
-
|
|
1821
|
-
|
|
1822
|
-
|
|
1823
|
-
|
|
1824
|
-
|
|
1825
|
-
|
|
1826
|
-
|
|
1827
|
-
|
|
1828
|
-
|
|
1829
|
-
|
|
1924
|
+
var import_provider_utils18 = require("@ai-sdk/provider-utils");
|
|
1925
|
+
var z13 = __toESM(require("zod/v4"));
|
|
1926
|
+
var webSearchArgsSchema = (0, import_provider_utils18.lazySchema)(
|
|
1927
|
+
() => (0, import_provider_utils18.zodSchema)(
|
|
1928
|
+
z13.object({
|
|
1929
|
+
filters: z13.object({
|
|
1930
|
+
allowedDomains: z13.array(z13.string()).optional()
|
|
1931
|
+
}).optional(),
|
|
1932
|
+
searchContextSize: z13.enum(["low", "medium", "high"]).optional(),
|
|
1933
|
+
userLocation: z13.object({
|
|
1934
|
+
type: z13.literal("approximate"),
|
|
1935
|
+
country: z13.string().optional(),
|
|
1936
|
+
city: z13.string().optional(),
|
|
1937
|
+
region: z13.string().optional(),
|
|
1938
|
+
timezone: z13.string().optional()
|
|
1939
|
+
}).optional()
|
|
1940
|
+
})
|
|
1941
|
+
)
|
|
1942
|
+
);
|
|
1943
|
+
var webSearchInputSchema = (0, import_provider_utils18.lazySchema)(
|
|
1944
|
+
() => (0, import_provider_utils18.zodSchema)(
|
|
1945
|
+
z13.object({
|
|
1946
|
+
action: z13.discriminatedUnion("type", [
|
|
1947
|
+
z13.object({
|
|
1948
|
+
type: z13.literal("search"),
|
|
1949
|
+
query: z13.string().nullish()
|
|
1950
|
+
}),
|
|
1951
|
+
z13.object({
|
|
1952
|
+
type: z13.literal("open_page"),
|
|
1953
|
+
url: z13.string()
|
|
1954
|
+
}),
|
|
1955
|
+
z13.object({
|
|
1956
|
+
type: z13.literal("find"),
|
|
1957
|
+
url: z13.string(),
|
|
1958
|
+
pattern: z13.string()
|
|
1959
|
+
})
|
|
1960
|
+
]).nullish()
|
|
1961
|
+
})
|
|
1962
|
+
)
|
|
1963
|
+
);
|
|
1964
|
+
var webSearchToolFactory = (0, import_provider_utils18.createProviderDefinedToolFactory)({
|
|
1830
1965
|
id: "openai.web_search",
|
|
1831
1966
|
name: "web_search",
|
|
1832
|
-
inputSchema:
|
|
1833
|
-
action: import_v413.z.discriminatedUnion("type", [
|
|
1834
|
-
import_v413.z.object({
|
|
1835
|
-
type: import_v413.z.literal("search"),
|
|
1836
|
-
query: import_v413.z.string().nullish()
|
|
1837
|
-
}),
|
|
1838
|
-
import_v413.z.object({
|
|
1839
|
-
type: import_v413.z.literal("open_page"),
|
|
1840
|
-
url: import_v413.z.string()
|
|
1841
|
-
}),
|
|
1842
|
-
import_v413.z.object({
|
|
1843
|
-
type: import_v413.z.literal("find"),
|
|
1844
|
-
url: import_v413.z.string(),
|
|
1845
|
-
pattern: import_v413.z.string()
|
|
1846
|
-
})
|
|
1847
|
-
]).nullish()
|
|
1848
|
-
})
|
|
1967
|
+
inputSchema: webSearchInputSchema
|
|
1849
1968
|
});
|
|
1850
1969
|
var webSearch = (args = {}) => {
|
|
1851
1970
|
return webSearchToolFactory(args);
|
|
1852
1971
|
};
|
|
1853
1972
|
|
|
1854
1973
|
// src/tool/web-search-preview.ts
|
|
1855
|
-
var
|
|
1856
|
-
var
|
|
1857
|
-
var webSearchPreviewArgsSchema =
|
|
1858
|
-
|
|
1859
|
-
|
|
1860
|
-
|
|
1861
|
-
|
|
1862
|
-
|
|
1863
|
-
|
|
1864
|
-
|
|
1865
|
-
|
|
1866
|
-
|
|
1867
|
-
|
|
1868
|
-
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
|
|
1872
|
-
|
|
1873
|
-
|
|
1874
|
-
|
|
1875
|
-
|
|
1876
|
-
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
|
|
1883
|
-
|
|
1884
|
-
|
|
1885
|
-
|
|
1886
|
-
|
|
1887
|
-
|
|
1888
|
-
|
|
1889
|
-
|
|
1890
|
-
|
|
1891
|
-
|
|
1974
|
+
var import_provider_utils19 = require("@ai-sdk/provider-utils");
|
|
1975
|
+
var z14 = __toESM(require("zod/v4"));
|
|
1976
|
+
var webSearchPreviewArgsSchema = (0, import_provider_utils19.lazySchema)(
|
|
1977
|
+
() => (0, import_provider_utils19.zodSchema)(
|
|
1978
|
+
z14.object({
|
|
1979
|
+
/**
|
|
1980
|
+
* Search context size to use for the web search.
|
|
1981
|
+
* - high: Most comprehensive context, highest cost, slower response
|
|
1982
|
+
* - medium: Balanced context, cost, and latency (default)
|
|
1983
|
+
* - low: Least context, lowest cost, fastest response
|
|
1984
|
+
*/
|
|
1985
|
+
searchContextSize: z14.enum(["low", "medium", "high"]).optional(),
|
|
1986
|
+
/**
|
|
1987
|
+
* User location information to provide geographically relevant search results.
|
|
1988
|
+
*/
|
|
1989
|
+
userLocation: z14.object({
|
|
1990
|
+
/**
|
|
1991
|
+
* Type of location (always 'approximate')
|
|
1992
|
+
*/
|
|
1993
|
+
type: z14.literal("approximate"),
|
|
1994
|
+
/**
|
|
1995
|
+
* Two-letter ISO country code (e.g., 'US', 'GB')
|
|
1996
|
+
*/
|
|
1997
|
+
country: z14.string().optional(),
|
|
1998
|
+
/**
|
|
1999
|
+
* City name (free text, e.g., 'Minneapolis')
|
|
2000
|
+
*/
|
|
2001
|
+
city: z14.string().optional(),
|
|
2002
|
+
/**
|
|
2003
|
+
* Region name (free text, e.g., 'Minnesota')
|
|
2004
|
+
*/
|
|
2005
|
+
region: z14.string().optional(),
|
|
2006
|
+
/**
|
|
2007
|
+
* IANA timezone (e.g., 'America/Chicago')
|
|
2008
|
+
*/
|
|
2009
|
+
timezone: z14.string().optional()
|
|
2010
|
+
}).optional()
|
|
2011
|
+
})
|
|
2012
|
+
)
|
|
2013
|
+
);
|
|
2014
|
+
var webSearchPreviewInputSchema = (0, import_provider_utils19.lazySchema)(
|
|
2015
|
+
() => (0, import_provider_utils19.zodSchema)(
|
|
2016
|
+
z14.object({
|
|
2017
|
+
action: z14.discriminatedUnion("type", [
|
|
2018
|
+
z14.object({
|
|
2019
|
+
type: z14.literal("search"),
|
|
2020
|
+
query: z14.string().nullish()
|
|
2021
|
+
}),
|
|
2022
|
+
z14.object({
|
|
2023
|
+
type: z14.literal("open_page"),
|
|
2024
|
+
url: z14.string()
|
|
2025
|
+
}),
|
|
2026
|
+
z14.object({
|
|
2027
|
+
type: z14.literal("find"),
|
|
2028
|
+
url: z14.string(),
|
|
2029
|
+
pattern: z14.string()
|
|
2030
|
+
})
|
|
2031
|
+
]).nullish()
|
|
2032
|
+
})
|
|
2033
|
+
)
|
|
2034
|
+
);
|
|
2035
|
+
var webSearchPreview = (0, import_provider_utils19.createProviderDefinedToolFactory)({
|
|
1892
2036
|
id: "openai.web_search_preview",
|
|
1893
2037
|
name: "web_search_preview",
|
|
1894
|
-
inputSchema:
|
|
1895
|
-
action: import_v414.z.discriminatedUnion("type", [
|
|
1896
|
-
import_v414.z.object({
|
|
1897
|
-
type: import_v414.z.literal("search"),
|
|
1898
|
-
query: import_v414.z.string().nullish()
|
|
1899
|
-
}),
|
|
1900
|
-
import_v414.z.object({
|
|
1901
|
-
type: import_v414.z.literal("open_page"),
|
|
1902
|
-
url: import_v414.z.string()
|
|
1903
|
-
}),
|
|
1904
|
-
import_v414.z.object({
|
|
1905
|
-
type: import_v414.z.literal("find"),
|
|
1906
|
-
url: import_v414.z.string(),
|
|
1907
|
-
pattern: import_v414.z.string()
|
|
1908
|
-
})
|
|
1909
|
-
]).nullish()
|
|
1910
|
-
})
|
|
2038
|
+
inputSchema: webSearchPreviewInputSchema
|
|
1911
2039
|
});
|
|
1912
2040
|
|
|
1913
2041
|
// src/openai-tools.ts
|
|
@@ -1985,13 +2113,12 @@ var openaiTools = {
|
|
|
1985
2113
|
|
|
1986
2114
|
// src/responses/openai-responses-language-model.ts
|
|
1987
2115
|
var import_provider8 = require("@ai-sdk/provider");
|
|
1988
|
-
var
|
|
1989
|
-
var import_v416 = require("zod/v4");
|
|
2116
|
+
var import_provider_utils24 = require("@ai-sdk/provider-utils");
|
|
1990
2117
|
|
|
1991
2118
|
// src/responses/convert-to-openai-responses-input.ts
|
|
1992
2119
|
var import_provider6 = require("@ai-sdk/provider");
|
|
1993
|
-
var
|
|
1994
|
-
var
|
|
2120
|
+
var import_provider_utils20 = require("@ai-sdk/provider-utils");
|
|
2121
|
+
var z15 = __toESM(require("zod/v4"));
|
|
1995
2122
|
function isFileId(data, prefixes) {
|
|
1996
2123
|
if (!prefixes) return false;
|
|
1997
2124
|
return prefixes.some((prefix) => data.startsWith(prefix));
|
|
@@ -2049,7 +2176,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2049
2176
|
return {
|
|
2050
2177
|
type: "input_image",
|
|
2051
2178
|
...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
|
|
2052
|
-
image_url: `data:${mediaType};base64,${(0,
|
|
2179
|
+
image_url: `data:${mediaType};base64,${(0, import_provider_utils20.convertToBase64)(part.data)}`
|
|
2053
2180
|
},
|
|
2054
2181
|
detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
|
|
2055
2182
|
};
|
|
@@ -2064,7 +2191,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2064
2191
|
type: "input_file",
|
|
2065
2192
|
...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
|
|
2066
2193
|
filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
|
|
2067
|
-
file_data: `data:application/pdf;base64,${(0,
|
|
2194
|
+
file_data: `data:application/pdf;base64,${(0, import_provider_utils20.convertToBase64)(part.data)}`
|
|
2068
2195
|
}
|
|
2069
2196
|
};
|
|
2070
2197
|
} else {
|
|
@@ -2097,7 +2224,10 @@ async function convertToOpenAIResponsesInput({
|
|
|
2097
2224
|
break;
|
|
2098
2225
|
}
|
|
2099
2226
|
if (hasLocalShellTool && part.toolName === "local_shell") {
|
|
2100
|
-
const parsedInput =
|
|
2227
|
+
const parsedInput = await (0, import_provider_utils20.validateTypes)({
|
|
2228
|
+
value: part.input,
|
|
2229
|
+
schema: localShellInputSchema
|
|
2230
|
+
});
|
|
2101
2231
|
input.push({
|
|
2102
2232
|
type: "local_shell_call",
|
|
2103
2233
|
call_id: part.toolCallId,
|
|
@@ -2135,7 +2265,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2135
2265
|
break;
|
|
2136
2266
|
}
|
|
2137
2267
|
case "reasoning": {
|
|
2138
|
-
const providerOptions = await (0,
|
|
2268
|
+
const providerOptions = await (0, import_provider_utils20.parseProviderOptions)({
|
|
2139
2269
|
provider: "openai",
|
|
2140
2270
|
providerOptions: part.providerOptions,
|
|
2141
2271
|
schema: openaiResponsesReasoningProviderOptionsSchema
|
|
@@ -2193,10 +2323,14 @@ async function convertToOpenAIResponsesInput({
|
|
|
2193
2323
|
for (const part of content) {
|
|
2194
2324
|
const output = part.output;
|
|
2195
2325
|
if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
|
|
2326
|
+
const parsedOutput = await (0, import_provider_utils20.validateTypes)({
|
|
2327
|
+
value: output.value,
|
|
2328
|
+
schema: localShellOutputSchema
|
|
2329
|
+
});
|
|
2196
2330
|
input.push({
|
|
2197
2331
|
type: "local_shell_call_output",
|
|
2198
2332
|
call_id: part.toolCallId,
|
|
2199
|
-
output:
|
|
2333
|
+
output: parsedOutput.output
|
|
2200
2334
|
});
|
|
2201
2335
|
break;
|
|
2202
2336
|
}
|
|
@@ -2228,9 +2362,9 @@ async function convertToOpenAIResponsesInput({
|
|
|
2228
2362
|
}
|
|
2229
2363
|
return { input, warnings };
|
|
2230
2364
|
}
|
|
2231
|
-
var openaiResponsesReasoningProviderOptionsSchema =
|
|
2232
|
-
itemId:
|
|
2233
|
-
reasoningEncryptedContent:
|
|
2365
|
+
var openaiResponsesReasoningProviderOptionsSchema = z15.object({
|
|
2366
|
+
itemId: z15.string().nullish(),
|
|
2367
|
+
reasoningEncryptedContent: z15.string().nullish()
|
|
2234
2368
|
});
|
|
2235
2369
|
|
|
2236
2370
|
// src/responses/map-openai-responses-finish-reason.ts
|
|
@@ -2251,9 +2385,539 @@ function mapOpenAIResponseFinishReason({
|
|
|
2251
2385
|
}
|
|
2252
2386
|
}
|
|
2253
2387
|
|
|
2388
|
+
// src/responses/openai-responses-api.ts
|
|
2389
|
+
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
|
2390
|
+
var z16 = __toESM(require("zod/v4"));
|
|
2391
|
+
var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
|
|
2392
|
+
() => (0, import_provider_utils21.zodSchema)(
|
|
2393
|
+
z16.union([
|
|
2394
|
+
z16.object({
|
|
2395
|
+
type: z16.literal("response.output_text.delta"),
|
|
2396
|
+
item_id: z16.string(),
|
|
2397
|
+
delta: z16.string(),
|
|
2398
|
+
logprobs: z16.array(
|
|
2399
|
+
z16.object({
|
|
2400
|
+
token: z16.string(),
|
|
2401
|
+
logprob: z16.number(),
|
|
2402
|
+
top_logprobs: z16.array(
|
|
2403
|
+
z16.object({
|
|
2404
|
+
token: z16.string(),
|
|
2405
|
+
logprob: z16.number()
|
|
2406
|
+
})
|
|
2407
|
+
)
|
|
2408
|
+
})
|
|
2409
|
+
).nullish()
|
|
2410
|
+
}),
|
|
2411
|
+
z16.object({
|
|
2412
|
+
type: z16.enum(["response.completed", "response.incomplete"]),
|
|
2413
|
+
response: z16.object({
|
|
2414
|
+
incomplete_details: z16.object({ reason: z16.string() }).nullish(),
|
|
2415
|
+
usage: z16.object({
|
|
2416
|
+
input_tokens: z16.number(),
|
|
2417
|
+
input_tokens_details: z16.object({ cached_tokens: z16.number().nullish() }).nullish(),
|
|
2418
|
+
output_tokens: z16.number(),
|
|
2419
|
+
output_tokens_details: z16.object({ reasoning_tokens: z16.number().nullish() }).nullish()
|
|
2420
|
+
}),
|
|
2421
|
+
service_tier: z16.string().nullish()
|
|
2422
|
+
})
|
|
2423
|
+
}),
|
|
2424
|
+
z16.object({
|
|
2425
|
+
type: z16.literal("response.created"),
|
|
2426
|
+
response: z16.object({
|
|
2427
|
+
id: z16.string(),
|
|
2428
|
+
created_at: z16.number(),
|
|
2429
|
+
model: z16.string(),
|
|
2430
|
+
service_tier: z16.string().nullish()
|
|
2431
|
+
})
|
|
2432
|
+
}),
|
|
2433
|
+
z16.object({
|
|
2434
|
+
type: z16.literal("response.output_item.added"),
|
|
2435
|
+
output_index: z16.number(),
|
|
2436
|
+
item: z16.discriminatedUnion("type", [
|
|
2437
|
+
z16.object({
|
|
2438
|
+
type: z16.literal("message"),
|
|
2439
|
+
id: z16.string()
|
|
2440
|
+
}),
|
|
2441
|
+
z16.object({
|
|
2442
|
+
type: z16.literal("reasoning"),
|
|
2443
|
+
id: z16.string(),
|
|
2444
|
+
encrypted_content: z16.string().nullish()
|
|
2445
|
+
}),
|
|
2446
|
+
z16.object({
|
|
2447
|
+
type: z16.literal("function_call"),
|
|
2448
|
+
id: z16.string(),
|
|
2449
|
+
call_id: z16.string(),
|
|
2450
|
+
name: z16.string(),
|
|
2451
|
+
arguments: z16.string()
|
|
2452
|
+
}),
|
|
2453
|
+
z16.object({
|
|
2454
|
+
type: z16.literal("web_search_call"),
|
|
2455
|
+
id: z16.string(),
|
|
2456
|
+
status: z16.string(),
|
|
2457
|
+
action: z16.object({
|
|
2458
|
+
type: z16.literal("search"),
|
|
2459
|
+
query: z16.string().optional()
|
|
2460
|
+
}).nullish()
|
|
2461
|
+
}),
|
|
2462
|
+
z16.object({
|
|
2463
|
+
type: z16.literal("computer_call"),
|
|
2464
|
+
id: z16.string(),
|
|
2465
|
+
status: z16.string()
|
|
2466
|
+
}),
|
|
2467
|
+
z16.object({
|
|
2468
|
+
type: z16.literal("file_search_call"),
|
|
2469
|
+
id: z16.string()
|
|
2470
|
+
}),
|
|
2471
|
+
z16.object({
|
|
2472
|
+
type: z16.literal("image_generation_call"),
|
|
2473
|
+
id: z16.string()
|
|
2474
|
+
}),
|
|
2475
|
+
z16.object({
|
|
2476
|
+
type: z16.literal("code_interpreter_call"),
|
|
2477
|
+
id: z16.string(),
|
|
2478
|
+
container_id: z16.string(),
|
|
2479
|
+
code: z16.string().nullable(),
|
|
2480
|
+
outputs: z16.array(
|
|
2481
|
+
z16.discriminatedUnion("type", [
|
|
2482
|
+
z16.object({ type: z16.literal("logs"), logs: z16.string() }),
|
|
2483
|
+
z16.object({ type: z16.literal("image"), url: z16.string() })
|
|
2484
|
+
])
|
|
2485
|
+
).nullable(),
|
|
2486
|
+
status: z16.string()
|
|
2487
|
+
})
|
|
2488
|
+
])
|
|
2489
|
+
}),
|
|
2490
|
+
z16.object({
|
|
2491
|
+
type: z16.literal("response.output_item.done"),
|
|
2492
|
+
output_index: z16.number(),
|
|
2493
|
+
item: z16.discriminatedUnion("type", [
|
|
2494
|
+
z16.object({
|
|
2495
|
+
type: z16.literal("message"),
|
|
2496
|
+
id: z16.string()
|
|
2497
|
+
}),
|
|
2498
|
+
z16.object({
|
|
2499
|
+
type: z16.literal("reasoning"),
|
|
2500
|
+
id: z16.string(),
|
|
2501
|
+
encrypted_content: z16.string().nullish()
|
|
2502
|
+
}),
|
|
2503
|
+
z16.object({
|
|
2504
|
+
type: z16.literal("function_call"),
|
|
2505
|
+
id: z16.string(),
|
|
2506
|
+
call_id: z16.string(),
|
|
2507
|
+
name: z16.string(),
|
|
2508
|
+
arguments: z16.string(),
|
|
2509
|
+
status: z16.literal("completed")
|
|
2510
|
+
}),
|
|
2511
|
+
z16.object({
|
|
2512
|
+
type: z16.literal("code_interpreter_call"),
|
|
2513
|
+
id: z16.string(),
|
|
2514
|
+
code: z16.string().nullable(),
|
|
2515
|
+
container_id: z16.string(),
|
|
2516
|
+
outputs: z16.array(
|
|
2517
|
+
z16.discriminatedUnion("type", [
|
|
2518
|
+
z16.object({ type: z16.literal("logs"), logs: z16.string() }),
|
|
2519
|
+
z16.object({ type: z16.literal("image"), url: z16.string() })
|
|
2520
|
+
])
|
|
2521
|
+
).nullable()
|
|
2522
|
+
}),
|
|
2523
|
+
z16.object({
|
|
2524
|
+
type: z16.literal("image_generation_call"),
|
|
2525
|
+
id: z16.string(),
|
|
2526
|
+
result: z16.string()
|
|
2527
|
+
}),
|
|
2528
|
+
z16.object({
|
|
2529
|
+
type: z16.literal("web_search_call"),
|
|
2530
|
+
id: z16.string(),
|
|
2531
|
+
status: z16.string(),
|
|
2532
|
+
action: z16.discriminatedUnion("type", [
|
|
2533
|
+
z16.object({
|
|
2534
|
+
type: z16.literal("search"),
|
|
2535
|
+
query: z16.string().nullish()
|
|
2536
|
+
}),
|
|
2537
|
+
z16.object({
|
|
2538
|
+
type: z16.literal("open_page"),
|
|
2539
|
+
url: z16.string()
|
|
2540
|
+
}),
|
|
2541
|
+
z16.object({
|
|
2542
|
+
type: z16.literal("find"),
|
|
2543
|
+
url: z16.string(),
|
|
2544
|
+
pattern: z16.string()
|
|
2545
|
+
})
|
|
2546
|
+
]).nullish()
|
|
2547
|
+
}),
|
|
2548
|
+
z16.object({
|
|
2549
|
+
type: z16.literal("file_search_call"),
|
|
2550
|
+
id: z16.string(),
|
|
2551
|
+
queries: z16.array(z16.string()),
|
|
2552
|
+
results: z16.array(
|
|
2553
|
+
z16.object({
|
|
2554
|
+
attributes: z16.record(z16.string(), z16.unknown()),
|
|
2555
|
+
file_id: z16.string(),
|
|
2556
|
+
filename: z16.string(),
|
|
2557
|
+
score: z16.number(),
|
|
2558
|
+
text: z16.string()
|
|
2559
|
+
})
|
|
2560
|
+
).nullish()
|
|
2561
|
+
}),
|
|
2562
|
+
z16.object({
|
|
2563
|
+
type: z16.literal("local_shell_call"),
|
|
2564
|
+
id: z16.string(),
|
|
2565
|
+
call_id: z16.string(),
|
|
2566
|
+
action: z16.object({
|
|
2567
|
+
type: z16.literal("exec"),
|
|
2568
|
+
command: z16.array(z16.string()),
|
|
2569
|
+
timeout_ms: z16.number().optional(),
|
|
2570
|
+
user: z16.string().optional(),
|
|
2571
|
+
working_directory: z16.string().optional(),
|
|
2572
|
+
env: z16.record(z16.string(), z16.string()).optional()
|
|
2573
|
+
})
|
|
2574
|
+
}),
|
|
2575
|
+
z16.object({
|
|
2576
|
+
type: z16.literal("computer_call"),
|
|
2577
|
+
id: z16.string(),
|
|
2578
|
+
status: z16.literal("completed")
|
|
2579
|
+
})
|
|
2580
|
+
])
|
|
2581
|
+
}),
|
|
2582
|
+
z16.object({
|
|
2583
|
+
type: z16.literal("response.function_call_arguments.delta"),
|
|
2584
|
+
item_id: z16.string(),
|
|
2585
|
+
output_index: z16.number(),
|
|
2586
|
+
delta: z16.string()
|
|
2587
|
+
}),
|
|
2588
|
+
z16.object({
|
|
2589
|
+
type: z16.literal("response.image_generation_call.partial_image"),
|
|
2590
|
+
item_id: z16.string(),
|
|
2591
|
+
output_index: z16.number(),
|
|
2592
|
+
partial_image_b64: z16.string()
|
|
2593
|
+
}),
|
|
2594
|
+
z16.object({
|
|
2595
|
+
type: z16.literal("response.code_interpreter_call_code.delta"),
|
|
2596
|
+
item_id: z16.string(),
|
|
2597
|
+
output_index: z16.number(),
|
|
2598
|
+
delta: z16.string()
|
|
2599
|
+
}),
|
|
2600
|
+
z16.object({
|
|
2601
|
+
type: z16.literal("response.code_interpreter_call_code.done"),
|
|
2602
|
+
item_id: z16.string(),
|
|
2603
|
+
output_index: z16.number(),
|
|
2604
|
+
code: z16.string()
|
|
2605
|
+
}),
|
|
2606
|
+
z16.object({
|
|
2607
|
+
type: z16.literal("response.output_text.annotation.added"),
|
|
2608
|
+
annotation: z16.discriminatedUnion("type", [
|
|
2609
|
+
z16.object({
|
|
2610
|
+
type: z16.literal("url_citation"),
|
|
2611
|
+
url: z16.string(),
|
|
2612
|
+
title: z16.string()
|
|
2613
|
+
}),
|
|
2614
|
+
z16.object({
|
|
2615
|
+
type: z16.literal("file_citation"),
|
|
2616
|
+
file_id: z16.string(),
|
|
2617
|
+
filename: z16.string().nullish(),
|
|
2618
|
+
index: z16.number().nullish(),
|
|
2619
|
+
start_index: z16.number().nullish(),
|
|
2620
|
+
end_index: z16.number().nullish(),
|
|
2621
|
+
quote: z16.string().nullish()
|
|
2622
|
+
})
|
|
2623
|
+
])
|
|
2624
|
+
}),
|
|
2625
|
+
z16.object({
|
|
2626
|
+
type: z16.literal("response.reasoning_summary_part.added"),
|
|
2627
|
+
item_id: z16.string(),
|
|
2628
|
+
summary_index: z16.number()
|
|
2629
|
+
}),
|
|
2630
|
+
z16.object({
|
|
2631
|
+
type: z16.literal("response.reasoning_summary_text.delta"),
|
|
2632
|
+
item_id: z16.string(),
|
|
2633
|
+
summary_index: z16.number(),
|
|
2634
|
+
delta: z16.string()
|
|
2635
|
+
}),
|
|
2636
|
+
z16.object({
|
|
2637
|
+
type: z16.literal("error"),
|
|
2638
|
+
code: z16.string(),
|
|
2639
|
+
message: z16.string(),
|
|
2640
|
+
param: z16.string().nullish(),
|
|
2641
|
+
sequence_number: z16.number()
|
|
2642
|
+
}),
|
|
2643
|
+
z16.object({ type: z16.string() }).loose().transform((value) => ({
|
|
2644
|
+
type: "unknown_chunk",
|
|
2645
|
+
message: value.type
|
|
2646
|
+
}))
|
|
2647
|
+
// fallback for unknown chunks
|
|
2648
|
+
])
|
|
2649
|
+
)
|
|
2650
|
+
);
|
|
2651
|
+
var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
|
|
2652
|
+
() => (0, import_provider_utils21.zodSchema)(
|
|
2653
|
+
z16.object({
|
|
2654
|
+
id: z16.string(),
|
|
2655
|
+
created_at: z16.number(),
|
|
2656
|
+
error: z16.object({
|
|
2657
|
+
code: z16.string(),
|
|
2658
|
+
message: z16.string()
|
|
2659
|
+
}).nullish(),
|
|
2660
|
+
model: z16.string(),
|
|
2661
|
+
output: z16.array(
|
|
2662
|
+
z16.discriminatedUnion("type", [
|
|
2663
|
+
z16.object({
|
|
2664
|
+
type: z16.literal("message"),
|
|
2665
|
+
role: z16.literal("assistant"),
|
|
2666
|
+
id: z16.string(),
|
|
2667
|
+
content: z16.array(
|
|
2668
|
+
z16.object({
|
|
2669
|
+
type: z16.literal("output_text"),
|
|
2670
|
+
text: z16.string(),
|
|
2671
|
+
logprobs: z16.array(
|
|
2672
|
+
z16.object({
|
|
2673
|
+
token: z16.string(),
|
|
2674
|
+
logprob: z16.number(),
|
|
2675
|
+
top_logprobs: z16.array(
|
|
2676
|
+
z16.object({
|
|
2677
|
+
token: z16.string(),
|
|
2678
|
+
logprob: z16.number()
|
|
2679
|
+
})
|
|
2680
|
+
)
|
|
2681
|
+
})
|
|
2682
|
+
).nullish(),
|
|
2683
|
+
annotations: z16.array(
|
|
2684
|
+
z16.discriminatedUnion("type", [
|
|
2685
|
+
z16.object({
|
|
2686
|
+
type: z16.literal("url_citation"),
|
|
2687
|
+
start_index: z16.number(),
|
|
2688
|
+
end_index: z16.number(),
|
|
2689
|
+
url: z16.string(),
|
|
2690
|
+
title: z16.string()
|
|
2691
|
+
}),
|
|
2692
|
+
z16.object({
|
|
2693
|
+
type: z16.literal("file_citation"),
|
|
2694
|
+
file_id: z16.string(),
|
|
2695
|
+
filename: z16.string().nullish(),
|
|
2696
|
+
index: z16.number().nullish(),
|
|
2697
|
+
start_index: z16.number().nullish(),
|
|
2698
|
+
end_index: z16.number().nullish(),
|
|
2699
|
+
quote: z16.string().nullish()
|
|
2700
|
+
}),
|
|
2701
|
+
z16.object({
|
|
2702
|
+
type: z16.literal("container_file_citation")
|
|
2703
|
+
})
|
|
2704
|
+
])
|
|
2705
|
+
)
|
|
2706
|
+
})
|
|
2707
|
+
)
|
|
2708
|
+
}),
|
|
2709
|
+
z16.object({
|
|
2710
|
+
type: z16.literal("web_search_call"),
|
|
2711
|
+
id: z16.string(),
|
|
2712
|
+
status: z16.string(),
|
|
2713
|
+
action: z16.discriminatedUnion("type", [
|
|
2714
|
+
z16.object({
|
|
2715
|
+
type: z16.literal("search"),
|
|
2716
|
+
query: z16.string().nullish()
|
|
2717
|
+
}),
|
|
2718
|
+
z16.object({
|
|
2719
|
+
type: z16.literal("open_page"),
|
|
2720
|
+
url: z16.string()
|
|
2721
|
+
}),
|
|
2722
|
+
z16.object({
|
|
2723
|
+
type: z16.literal("find"),
|
|
2724
|
+
url: z16.string(),
|
|
2725
|
+
pattern: z16.string()
|
|
2726
|
+
})
|
|
2727
|
+
]).nullish()
|
|
2728
|
+
}),
|
|
2729
|
+
z16.object({
|
|
2730
|
+
type: z16.literal("file_search_call"),
|
|
2731
|
+
id: z16.string(),
|
|
2732
|
+
queries: z16.array(z16.string()),
|
|
2733
|
+
results: z16.array(
|
|
2734
|
+
z16.object({
|
|
2735
|
+
attributes: z16.record(z16.string(), z16.unknown()),
|
|
2736
|
+
file_id: z16.string(),
|
|
2737
|
+
filename: z16.string(),
|
|
2738
|
+
score: z16.number(),
|
|
2739
|
+
text: z16.string()
|
|
2740
|
+
})
|
|
2741
|
+
).nullish()
|
|
2742
|
+
}),
|
|
2743
|
+
z16.object({
|
|
2744
|
+
type: z16.literal("code_interpreter_call"),
|
|
2745
|
+
id: z16.string(),
|
|
2746
|
+
code: z16.string().nullable(),
|
|
2747
|
+
container_id: z16.string(),
|
|
2748
|
+
outputs: z16.array(
|
|
2749
|
+
z16.discriminatedUnion("type", [
|
|
2750
|
+
z16.object({ type: z16.literal("logs"), logs: z16.string() }),
|
|
2751
|
+
z16.object({ type: z16.literal("image"), url: z16.string() })
|
|
2752
|
+
])
|
|
2753
|
+
).nullable()
|
|
2754
|
+
}),
|
|
2755
|
+
z16.object({
|
|
2756
|
+
type: z16.literal("image_generation_call"),
|
|
2757
|
+
id: z16.string(),
|
|
2758
|
+
result: z16.string()
|
|
2759
|
+
}),
|
|
2760
|
+
z16.object({
|
|
2761
|
+
type: z16.literal("local_shell_call"),
|
|
2762
|
+
id: z16.string(),
|
|
2763
|
+
call_id: z16.string(),
|
|
2764
|
+
action: z16.object({
|
|
2765
|
+
type: z16.literal("exec"),
|
|
2766
|
+
command: z16.array(z16.string()),
|
|
2767
|
+
timeout_ms: z16.number().optional(),
|
|
2768
|
+
user: z16.string().optional(),
|
|
2769
|
+
working_directory: z16.string().optional(),
|
|
2770
|
+
env: z16.record(z16.string(), z16.string()).optional()
|
|
2771
|
+
})
|
|
2772
|
+
}),
|
|
2773
|
+
z16.object({
|
|
2774
|
+
type: z16.literal("function_call"),
|
|
2775
|
+
call_id: z16.string(),
|
|
2776
|
+
name: z16.string(),
|
|
2777
|
+
arguments: z16.string(),
|
|
2778
|
+
id: z16.string()
|
|
2779
|
+
}),
|
|
2780
|
+
z16.object({
|
|
2781
|
+
type: z16.literal("computer_call"),
|
|
2782
|
+
id: z16.string(),
|
|
2783
|
+
status: z16.string().optional()
|
|
2784
|
+
}),
|
|
2785
|
+
z16.object({
|
|
2786
|
+
type: z16.literal("reasoning"),
|
|
2787
|
+
id: z16.string(),
|
|
2788
|
+
encrypted_content: z16.string().nullish(),
|
|
2789
|
+
summary: z16.array(
|
|
2790
|
+
z16.object({
|
|
2791
|
+
type: z16.literal("summary_text"),
|
|
2792
|
+
text: z16.string()
|
|
2793
|
+
})
|
|
2794
|
+
)
|
|
2795
|
+
})
|
|
2796
|
+
])
|
|
2797
|
+
),
|
|
2798
|
+
service_tier: z16.string().nullish(),
|
|
2799
|
+
incomplete_details: z16.object({ reason: z16.string() }).nullish(),
|
|
2800
|
+
usage: z16.object({
|
|
2801
|
+
input_tokens: z16.number(),
|
|
2802
|
+
input_tokens_details: z16.object({ cached_tokens: z16.number().nullish() }).nullish(),
|
|
2803
|
+
output_tokens: z16.number(),
|
|
2804
|
+
output_tokens_details: z16.object({ reasoning_tokens: z16.number().nullish() }).nullish()
|
|
2805
|
+
})
|
|
2806
|
+
})
|
|
2807
|
+
)
|
|
2808
|
+
);
|
|
2809
|
+
|
|
2810
|
+
// src/responses/openai-responses-options.ts
|
|
2811
|
+
var import_provider_utils22 = require("@ai-sdk/provider-utils");
|
|
2812
|
+
var z17 = __toESM(require("zod/v4"));
|
|
2813
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2814
|
+
var openaiResponsesReasoningModelIds = [
|
|
2815
|
+
"o1",
|
|
2816
|
+
"o1-2024-12-17",
|
|
2817
|
+
"o3-mini",
|
|
2818
|
+
"o3-mini-2025-01-31",
|
|
2819
|
+
"o3",
|
|
2820
|
+
"o3-2025-04-16",
|
|
2821
|
+
"o4-mini",
|
|
2822
|
+
"o4-mini-2025-04-16",
|
|
2823
|
+
"codex-mini-latest",
|
|
2824
|
+
"computer-use-preview",
|
|
2825
|
+
"gpt-5",
|
|
2826
|
+
"gpt-5-2025-08-07",
|
|
2827
|
+
"gpt-5-codex",
|
|
2828
|
+
"gpt-5-mini",
|
|
2829
|
+
"gpt-5-mini-2025-08-07",
|
|
2830
|
+
"gpt-5-nano",
|
|
2831
|
+
"gpt-5-nano-2025-08-07",
|
|
2832
|
+
"gpt-5-pro",
|
|
2833
|
+
"gpt-5-pro-2025-10-06"
|
|
2834
|
+
];
|
|
2835
|
+
var openaiResponsesModelIds = [
|
|
2836
|
+
"gpt-4.1",
|
|
2837
|
+
"gpt-4.1-2025-04-14",
|
|
2838
|
+
"gpt-4.1-mini",
|
|
2839
|
+
"gpt-4.1-mini-2025-04-14",
|
|
2840
|
+
"gpt-4.1-nano",
|
|
2841
|
+
"gpt-4.1-nano-2025-04-14",
|
|
2842
|
+
"gpt-4o",
|
|
2843
|
+
"gpt-4o-2024-05-13",
|
|
2844
|
+
"gpt-4o-2024-08-06",
|
|
2845
|
+
"gpt-4o-2024-11-20",
|
|
2846
|
+
"gpt-4o-audio-preview",
|
|
2847
|
+
"gpt-4o-audio-preview-2024-10-01",
|
|
2848
|
+
"gpt-4o-audio-preview-2024-12-17",
|
|
2849
|
+
"gpt-4o-search-preview",
|
|
2850
|
+
"gpt-4o-search-preview-2025-03-11",
|
|
2851
|
+
"gpt-4o-mini-search-preview",
|
|
2852
|
+
"gpt-4o-mini-search-preview-2025-03-11",
|
|
2853
|
+
"gpt-4o-mini",
|
|
2854
|
+
"gpt-4o-mini-2024-07-18",
|
|
2855
|
+
"gpt-4-turbo",
|
|
2856
|
+
"gpt-4-turbo-2024-04-09",
|
|
2857
|
+
"gpt-4-turbo-preview",
|
|
2858
|
+
"gpt-4-0125-preview",
|
|
2859
|
+
"gpt-4-1106-preview",
|
|
2860
|
+
"gpt-4",
|
|
2861
|
+
"gpt-4-0613",
|
|
2862
|
+
"gpt-4.5-preview",
|
|
2863
|
+
"gpt-4.5-preview-2025-02-27",
|
|
2864
|
+
"gpt-3.5-turbo-0125",
|
|
2865
|
+
"gpt-3.5-turbo",
|
|
2866
|
+
"gpt-3.5-turbo-1106",
|
|
2867
|
+
"chatgpt-4o-latest",
|
|
2868
|
+
"gpt-5-chat-latest",
|
|
2869
|
+
...openaiResponsesReasoningModelIds
|
|
2870
|
+
];
|
|
2871
|
+
var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValidator)(
|
|
2872
|
+
() => (0, import_provider_utils22.zodSchema)(
|
|
2873
|
+
z17.object({
|
|
2874
|
+
include: z17.array(
|
|
2875
|
+
z17.enum([
|
|
2876
|
+
"reasoning.encrypted_content",
|
|
2877
|
+
"file_search_call.results",
|
|
2878
|
+
"message.output_text.logprobs"
|
|
2879
|
+
])
|
|
2880
|
+
).nullish(),
|
|
2881
|
+
instructions: z17.string().nullish(),
|
|
2882
|
+
/**
|
|
2883
|
+
* Return the log probabilities of the tokens.
|
|
2884
|
+
*
|
|
2885
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
2886
|
+
* were generated.
|
|
2887
|
+
*
|
|
2888
|
+
* Setting to a number will return the log probabilities of the top n
|
|
2889
|
+
* tokens that were generated.
|
|
2890
|
+
*
|
|
2891
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
2892
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
2893
|
+
*/
|
|
2894
|
+
logprobs: z17.union([z17.boolean(), z17.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
2895
|
+
/**
|
|
2896
|
+
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
2897
|
+
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
2898
|
+
* Any further attempts to call a tool by the model will be ignored.
|
|
2899
|
+
*/
|
|
2900
|
+
maxToolCalls: z17.number().nullish(),
|
|
2901
|
+
metadata: z17.any().nullish(),
|
|
2902
|
+
parallelToolCalls: z17.boolean().nullish(),
|
|
2903
|
+
previousResponseId: z17.string().nullish(),
|
|
2904
|
+
promptCacheKey: z17.string().nullish(),
|
|
2905
|
+
reasoningEffort: z17.string().nullish(),
|
|
2906
|
+
reasoningSummary: z17.string().nullish(),
|
|
2907
|
+
safetyIdentifier: z17.string().nullish(),
|
|
2908
|
+
serviceTier: z17.enum(["auto", "flex", "priority", "default"]).nullish(),
|
|
2909
|
+
store: z17.boolean().nullish(),
|
|
2910
|
+
strictJsonSchema: z17.boolean().nullish(),
|
|
2911
|
+
textVerbosity: z17.enum(["low", "medium", "high"]).nullish(),
|
|
2912
|
+
user: z17.string().nullish()
|
|
2913
|
+
})
|
|
2914
|
+
)
|
|
2915
|
+
);
|
|
2916
|
+
|
|
2254
2917
|
// src/responses/openai-responses-prepare-tools.ts
|
|
2255
2918
|
var import_provider7 = require("@ai-sdk/provider");
|
|
2256
|
-
|
|
2919
|
+
var import_provider_utils23 = require("@ai-sdk/provider-utils");
|
|
2920
|
+
async function prepareResponsesTools({
|
|
2257
2921
|
tools,
|
|
2258
2922
|
toolChoice,
|
|
2259
2923
|
strictJsonSchema
|
|
@@ -2278,7 +2942,10 @@ function prepareResponsesTools({
|
|
|
2278
2942
|
case "provider-defined": {
|
|
2279
2943
|
switch (tool.id) {
|
|
2280
2944
|
case "openai.file_search": {
|
|
2281
|
-
const args =
|
|
2945
|
+
const args = await (0, import_provider_utils23.validateTypes)({
|
|
2946
|
+
value: tool.args,
|
|
2947
|
+
schema: fileSearchArgsSchema
|
|
2948
|
+
});
|
|
2282
2949
|
openaiTools2.push({
|
|
2283
2950
|
type: "file_search",
|
|
2284
2951
|
vector_store_ids: args.vectorStoreIds,
|
|
@@ -2298,7 +2965,10 @@ function prepareResponsesTools({
|
|
|
2298
2965
|
break;
|
|
2299
2966
|
}
|
|
2300
2967
|
case "openai.web_search_preview": {
|
|
2301
|
-
const args =
|
|
2968
|
+
const args = await (0, import_provider_utils23.validateTypes)({
|
|
2969
|
+
value: tool.args,
|
|
2970
|
+
schema: webSearchPreviewArgsSchema
|
|
2971
|
+
});
|
|
2302
2972
|
openaiTools2.push({
|
|
2303
2973
|
type: "web_search_preview",
|
|
2304
2974
|
search_context_size: args.searchContextSize,
|
|
@@ -2307,7 +2977,10 @@ function prepareResponsesTools({
|
|
|
2307
2977
|
break;
|
|
2308
2978
|
}
|
|
2309
2979
|
case "openai.web_search": {
|
|
2310
|
-
const args =
|
|
2980
|
+
const args = await (0, import_provider_utils23.validateTypes)({
|
|
2981
|
+
value: tool.args,
|
|
2982
|
+
schema: webSearchArgsSchema
|
|
2983
|
+
});
|
|
2311
2984
|
openaiTools2.push({
|
|
2312
2985
|
type: "web_search",
|
|
2313
2986
|
filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
|
|
@@ -2317,7 +2990,10 @@ function prepareResponsesTools({
|
|
|
2317
2990
|
break;
|
|
2318
2991
|
}
|
|
2319
2992
|
case "openai.code_interpreter": {
|
|
2320
|
-
const args =
|
|
2993
|
+
const args = await (0, import_provider_utils23.validateTypes)({
|
|
2994
|
+
value: tool.args,
|
|
2995
|
+
schema: codeInterpreterArgsSchema
|
|
2996
|
+
});
|
|
2321
2997
|
openaiTools2.push({
|
|
2322
2998
|
type: "code_interpreter",
|
|
2323
2999
|
container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
|
|
@@ -2325,7 +3001,10 @@ function prepareResponsesTools({
|
|
|
2325
3001
|
break;
|
|
2326
3002
|
}
|
|
2327
3003
|
case "openai.image_generation": {
|
|
2328
|
-
const args =
|
|
3004
|
+
const args = await (0, import_provider_utils23.validateTypes)({
|
|
3005
|
+
value: tool.args,
|
|
3006
|
+
schema: imageGenerationArgsSchema
|
|
3007
|
+
});
|
|
2329
3008
|
openaiTools2.push({
|
|
2330
3009
|
type: "image_generation",
|
|
2331
3010
|
background: args.background,
|
|
@@ -2376,83 +3055,6 @@ function prepareResponsesTools({
|
|
|
2376
3055
|
}
|
|
2377
3056
|
|
|
2378
3057
|
// src/responses/openai-responses-language-model.ts
|
|
2379
|
-
var webSearchCallItem = import_v416.z.object({
|
|
2380
|
-
type: import_v416.z.literal("web_search_call"),
|
|
2381
|
-
id: import_v416.z.string(),
|
|
2382
|
-
status: import_v416.z.string(),
|
|
2383
|
-
action: import_v416.z.discriminatedUnion("type", [
|
|
2384
|
-
import_v416.z.object({
|
|
2385
|
-
type: import_v416.z.literal("search"),
|
|
2386
|
-
query: import_v416.z.string().nullish()
|
|
2387
|
-
}),
|
|
2388
|
-
import_v416.z.object({
|
|
2389
|
-
type: import_v416.z.literal("open_page"),
|
|
2390
|
-
url: import_v416.z.string()
|
|
2391
|
-
}),
|
|
2392
|
-
import_v416.z.object({
|
|
2393
|
-
type: import_v416.z.literal("find"),
|
|
2394
|
-
url: import_v416.z.string(),
|
|
2395
|
-
pattern: import_v416.z.string()
|
|
2396
|
-
})
|
|
2397
|
-
]).nullish()
|
|
2398
|
-
});
|
|
2399
|
-
var fileSearchCallItem = import_v416.z.object({
|
|
2400
|
-
type: import_v416.z.literal("file_search_call"),
|
|
2401
|
-
id: import_v416.z.string(),
|
|
2402
|
-
queries: import_v416.z.array(import_v416.z.string()),
|
|
2403
|
-
results: import_v416.z.array(
|
|
2404
|
-
import_v416.z.object({
|
|
2405
|
-
attributes: import_v416.z.record(import_v416.z.string(), import_v416.z.unknown()),
|
|
2406
|
-
file_id: import_v416.z.string(),
|
|
2407
|
-
filename: import_v416.z.string(),
|
|
2408
|
-
score: import_v416.z.number(),
|
|
2409
|
-
text: import_v416.z.string()
|
|
2410
|
-
})
|
|
2411
|
-
).nullish()
|
|
2412
|
-
});
|
|
2413
|
-
var codeInterpreterCallItem = import_v416.z.object({
|
|
2414
|
-
type: import_v416.z.literal("code_interpreter_call"),
|
|
2415
|
-
id: import_v416.z.string(),
|
|
2416
|
-
code: import_v416.z.string().nullable(),
|
|
2417
|
-
container_id: import_v416.z.string(),
|
|
2418
|
-
outputs: import_v416.z.array(
|
|
2419
|
-
import_v416.z.discriminatedUnion("type", [
|
|
2420
|
-
import_v416.z.object({ type: import_v416.z.literal("logs"), logs: import_v416.z.string() }),
|
|
2421
|
-
import_v416.z.object({ type: import_v416.z.literal("image"), url: import_v416.z.string() })
|
|
2422
|
-
])
|
|
2423
|
-
).nullable()
|
|
2424
|
-
});
|
|
2425
|
-
var localShellCallItem = import_v416.z.object({
|
|
2426
|
-
type: import_v416.z.literal("local_shell_call"),
|
|
2427
|
-
id: import_v416.z.string(),
|
|
2428
|
-
call_id: import_v416.z.string(),
|
|
2429
|
-
action: import_v416.z.object({
|
|
2430
|
-
type: import_v416.z.literal("exec"),
|
|
2431
|
-
command: import_v416.z.array(import_v416.z.string()),
|
|
2432
|
-
timeout_ms: import_v416.z.number().optional(),
|
|
2433
|
-
user: import_v416.z.string().optional(),
|
|
2434
|
-
working_directory: import_v416.z.string().optional(),
|
|
2435
|
-
env: import_v416.z.record(import_v416.z.string(), import_v416.z.string()).optional()
|
|
2436
|
-
})
|
|
2437
|
-
});
|
|
2438
|
-
var imageGenerationCallItem = import_v416.z.object({
|
|
2439
|
-
type: import_v416.z.literal("image_generation_call"),
|
|
2440
|
-
id: import_v416.z.string(),
|
|
2441
|
-
result: import_v416.z.string()
|
|
2442
|
-
});
|
|
2443
|
-
var TOP_LOGPROBS_MAX = 20;
|
|
2444
|
-
var LOGPROBS_SCHEMA = import_v416.z.array(
|
|
2445
|
-
import_v416.z.object({
|
|
2446
|
-
token: import_v416.z.string(),
|
|
2447
|
-
logprob: import_v416.z.number(),
|
|
2448
|
-
top_logprobs: import_v416.z.array(
|
|
2449
|
-
import_v416.z.object({
|
|
2450
|
-
token: import_v416.z.string(),
|
|
2451
|
-
logprob: import_v416.z.number()
|
|
2452
|
-
})
|
|
2453
|
-
)
|
|
2454
|
-
})
|
|
2455
|
-
);
|
|
2456
3058
|
var OpenAIResponsesLanguageModel = class {
|
|
2457
3059
|
constructor(modelId, config) {
|
|
2458
3060
|
this.specificationVersion = "v2";
|
|
@@ -2505,7 +3107,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2505
3107
|
if (stopSequences != null) {
|
|
2506
3108
|
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2507
3109
|
}
|
|
2508
|
-
const openaiOptions = await (0,
|
|
3110
|
+
const openaiOptions = await (0, import_provider_utils24.parseProviderOptions)({
|
|
2509
3111
|
provider: "openai",
|
|
2510
3112
|
providerOptions,
|
|
2511
3113
|
schema: openaiResponsesProviderOptionsSchema
|
|
@@ -2644,7 +3246,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2644
3246
|
tools: openaiTools2,
|
|
2645
3247
|
toolChoice: openaiToolChoice,
|
|
2646
3248
|
toolWarnings
|
|
2647
|
-
} = prepareResponsesTools({
|
|
3249
|
+
} = await prepareResponsesTools({
|
|
2648
3250
|
tools,
|
|
2649
3251
|
toolChoice,
|
|
2650
3252
|
strictJsonSchema
|
|
@@ -2674,91 +3276,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2674
3276
|
responseHeaders,
|
|
2675
3277
|
value: response,
|
|
2676
3278
|
rawValue: rawResponse
|
|
2677
|
-
} = await (0,
|
|
3279
|
+
} = await (0, import_provider_utils24.postJsonToApi)({
|
|
2678
3280
|
url,
|
|
2679
|
-
headers: (0,
|
|
3281
|
+
headers: (0, import_provider_utils24.combineHeaders)(this.config.headers(), options.headers),
|
|
2680
3282
|
body,
|
|
2681
3283
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2682
|
-
successfulResponseHandler: (0,
|
|
2683
|
-
|
|
2684
|
-
id: import_v416.z.string(),
|
|
2685
|
-
created_at: import_v416.z.number(),
|
|
2686
|
-
error: import_v416.z.object({
|
|
2687
|
-
code: import_v416.z.string(),
|
|
2688
|
-
message: import_v416.z.string()
|
|
2689
|
-
}).nullish(),
|
|
2690
|
-
model: import_v416.z.string(),
|
|
2691
|
-
output: import_v416.z.array(
|
|
2692
|
-
import_v416.z.discriminatedUnion("type", [
|
|
2693
|
-
import_v416.z.object({
|
|
2694
|
-
type: import_v416.z.literal("message"),
|
|
2695
|
-
role: import_v416.z.literal("assistant"),
|
|
2696
|
-
id: import_v416.z.string(),
|
|
2697
|
-
content: import_v416.z.array(
|
|
2698
|
-
import_v416.z.object({
|
|
2699
|
-
type: import_v416.z.literal("output_text"),
|
|
2700
|
-
text: import_v416.z.string(),
|
|
2701
|
-
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2702
|
-
annotations: import_v416.z.array(
|
|
2703
|
-
import_v416.z.discriminatedUnion("type", [
|
|
2704
|
-
import_v416.z.object({
|
|
2705
|
-
type: import_v416.z.literal("url_citation"),
|
|
2706
|
-
start_index: import_v416.z.number(),
|
|
2707
|
-
end_index: import_v416.z.number(),
|
|
2708
|
-
url: import_v416.z.string(),
|
|
2709
|
-
title: import_v416.z.string()
|
|
2710
|
-
}),
|
|
2711
|
-
import_v416.z.object({
|
|
2712
|
-
type: import_v416.z.literal("file_citation"),
|
|
2713
|
-
file_id: import_v416.z.string(),
|
|
2714
|
-
filename: import_v416.z.string().nullish(),
|
|
2715
|
-
index: import_v416.z.number().nullish(),
|
|
2716
|
-
start_index: import_v416.z.number().nullish(),
|
|
2717
|
-
end_index: import_v416.z.number().nullish(),
|
|
2718
|
-
quote: import_v416.z.string().nullish()
|
|
2719
|
-
}),
|
|
2720
|
-
import_v416.z.object({
|
|
2721
|
-
type: import_v416.z.literal("container_file_citation")
|
|
2722
|
-
})
|
|
2723
|
-
])
|
|
2724
|
-
)
|
|
2725
|
-
})
|
|
2726
|
-
)
|
|
2727
|
-
}),
|
|
2728
|
-
webSearchCallItem,
|
|
2729
|
-
fileSearchCallItem,
|
|
2730
|
-
codeInterpreterCallItem,
|
|
2731
|
-
imageGenerationCallItem,
|
|
2732
|
-
localShellCallItem,
|
|
2733
|
-
import_v416.z.object({
|
|
2734
|
-
type: import_v416.z.literal("function_call"),
|
|
2735
|
-
call_id: import_v416.z.string(),
|
|
2736
|
-
name: import_v416.z.string(),
|
|
2737
|
-
arguments: import_v416.z.string(),
|
|
2738
|
-
id: import_v416.z.string()
|
|
2739
|
-
}),
|
|
2740
|
-
import_v416.z.object({
|
|
2741
|
-
type: import_v416.z.literal("computer_call"),
|
|
2742
|
-
id: import_v416.z.string(),
|
|
2743
|
-
status: import_v416.z.string().optional()
|
|
2744
|
-
}),
|
|
2745
|
-
import_v416.z.object({
|
|
2746
|
-
type: import_v416.z.literal("reasoning"),
|
|
2747
|
-
id: import_v416.z.string(),
|
|
2748
|
-
encrypted_content: import_v416.z.string().nullish(),
|
|
2749
|
-
summary: import_v416.z.array(
|
|
2750
|
-
import_v416.z.object({
|
|
2751
|
-
type: import_v416.z.literal("summary_text"),
|
|
2752
|
-
text: import_v416.z.string()
|
|
2753
|
-
})
|
|
2754
|
-
)
|
|
2755
|
-
})
|
|
2756
|
-
])
|
|
2757
|
-
),
|
|
2758
|
-
service_tier: import_v416.z.string().nullish(),
|
|
2759
|
-
incomplete_details: import_v416.z.object({ reason: import_v416.z.string() }).nullish(),
|
|
2760
|
-
usage: usageSchema2
|
|
2761
|
-
})
|
|
3284
|
+
successfulResponseHandler: (0, import_provider_utils24.createJsonResponseHandler)(
|
|
3285
|
+
openaiResponsesResponseSchema
|
|
2762
3286
|
),
|
|
2763
3287
|
abortSignal: options.abortSignal,
|
|
2764
3288
|
fetch: this.config.fetch
|
|
@@ -2821,7 +3345,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2821
3345
|
type: "tool-call",
|
|
2822
3346
|
toolCallId: part.call_id,
|
|
2823
3347
|
toolName: "local_shell",
|
|
2824
|
-
input: JSON.stringify({
|
|
3348
|
+
input: JSON.stringify({
|
|
3349
|
+
action: part.action
|
|
3350
|
+
}),
|
|
2825
3351
|
providerMetadata: {
|
|
2826
3352
|
openai: {
|
|
2827
3353
|
itemId: part.id
|
|
@@ -2849,7 +3375,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2849
3375
|
content.push({
|
|
2850
3376
|
type: "source",
|
|
2851
3377
|
sourceType: "url",
|
|
2852
|
-
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0,
|
|
3378
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0, import_provider_utils24.generateId)(),
|
|
2853
3379
|
url: annotation.url,
|
|
2854
3380
|
title: annotation.title
|
|
2855
3381
|
});
|
|
@@ -2857,7 +3383,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2857
3383
|
content.push({
|
|
2858
3384
|
type: "source",
|
|
2859
3385
|
sourceType: "document",
|
|
2860
|
-
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0,
|
|
3386
|
+
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils24.generateId)(),
|
|
2861
3387
|
mediaType: "text/plain",
|
|
2862
3388
|
title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
|
|
2863
3389
|
filename: (_l = annotation.filename) != null ? _l : annotation.file_id
|
|
@@ -3009,18 +3535,18 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3009
3535
|
warnings,
|
|
3010
3536
|
webSearchToolName
|
|
3011
3537
|
} = await this.getArgs(options);
|
|
3012
|
-
const { responseHeaders, value: response } = await (0,
|
|
3538
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils24.postJsonToApi)({
|
|
3013
3539
|
url: this.config.url({
|
|
3014
3540
|
path: "/responses",
|
|
3015
3541
|
modelId: this.modelId
|
|
3016
3542
|
}),
|
|
3017
|
-
headers: (0,
|
|
3543
|
+
headers: (0, import_provider_utils24.combineHeaders)(this.config.headers(), options.headers),
|
|
3018
3544
|
body: {
|
|
3019
3545
|
...body,
|
|
3020
3546
|
stream: true
|
|
3021
3547
|
},
|
|
3022
3548
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
3023
|
-
successfulResponseHandler: (0,
|
|
3549
|
+
successfulResponseHandler: (0, import_provider_utils24.createEventSourceResponseHandler)(
|
|
3024
3550
|
openaiResponsesChunkSchema
|
|
3025
3551
|
),
|
|
3026
3552
|
abortSignal: options.abortSignal,
|
|
@@ -3397,7 +3923,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3397
3923
|
controller.enqueue({
|
|
3398
3924
|
type: "source",
|
|
3399
3925
|
sourceType: "url",
|
|
3400
|
-
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0,
|
|
3926
|
+
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0, import_provider_utils24.generateId)(),
|
|
3401
3927
|
url: value.annotation.url,
|
|
3402
3928
|
title: value.annotation.title
|
|
3403
3929
|
});
|
|
@@ -3405,7 +3931,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3405
3931
|
controller.enqueue({
|
|
3406
3932
|
type: "source",
|
|
3407
3933
|
sourceType: "document",
|
|
3408
|
-
id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0,
|
|
3934
|
+
id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0, import_provider_utils24.generateId)(),
|
|
3409
3935
|
mediaType: "text/plain",
|
|
3410
3936
|
title: (_v = (_u = value.annotation.quote) != null ? _u : value.annotation.filename) != null ? _v : "Document",
|
|
3411
3937
|
filename: (_w = value.annotation.filename) != null ? _w : value.annotation.file_id
|
|
@@ -3441,196 +3967,6 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3441
3967
|
};
|
|
3442
3968
|
}
|
|
3443
3969
|
};
|
|
3444
|
-
var usageSchema2 = import_v416.z.object({
|
|
3445
|
-
input_tokens: import_v416.z.number(),
|
|
3446
|
-
input_tokens_details: import_v416.z.object({ cached_tokens: import_v416.z.number().nullish() }).nullish(),
|
|
3447
|
-
output_tokens: import_v416.z.number(),
|
|
3448
|
-
output_tokens_details: import_v416.z.object({ reasoning_tokens: import_v416.z.number().nullish() }).nullish()
|
|
3449
|
-
});
|
|
3450
|
-
var textDeltaChunkSchema = import_v416.z.object({
|
|
3451
|
-
type: import_v416.z.literal("response.output_text.delta"),
|
|
3452
|
-
item_id: import_v416.z.string(),
|
|
3453
|
-
delta: import_v416.z.string(),
|
|
3454
|
-
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
3455
|
-
});
|
|
3456
|
-
var errorChunkSchema = import_v416.z.object({
|
|
3457
|
-
type: import_v416.z.literal("error"),
|
|
3458
|
-
code: import_v416.z.string(),
|
|
3459
|
-
message: import_v416.z.string(),
|
|
3460
|
-
param: import_v416.z.string().nullish(),
|
|
3461
|
-
sequence_number: import_v416.z.number()
|
|
3462
|
-
});
|
|
3463
|
-
var responseFinishedChunkSchema = import_v416.z.object({
|
|
3464
|
-
type: import_v416.z.enum(["response.completed", "response.incomplete"]),
|
|
3465
|
-
response: import_v416.z.object({
|
|
3466
|
-
incomplete_details: import_v416.z.object({ reason: import_v416.z.string() }).nullish(),
|
|
3467
|
-
usage: usageSchema2,
|
|
3468
|
-
service_tier: import_v416.z.string().nullish()
|
|
3469
|
-
})
|
|
3470
|
-
});
|
|
3471
|
-
var responseCreatedChunkSchema = import_v416.z.object({
|
|
3472
|
-
type: import_v416.z.literal("response.created"),
|
|
3473
|
-
response: import_v416.z.object({
|
|
3474
|
-
id: import_v416.z.string(),
|
|
3475
|
-
created_at: import_v416.z.number(),
|
|
3476
|
-
model: import_v416.z.string(),
|
|
3477
|
-
service_tier: import_v416.z.string().nullish()
|
|
3478
|
-
})
|
|
3479
|
-
});
|
|
3480
|
-
var responseOutputItemAddedSchema = import_v416.z.object({
|
|
3481
|
-
type: import_v416.z.literal("response.output_item.added"),
|
|
3482
|
-
output_index: import_v416.z.number(),
|
|
3483
|
-
item: import_v416.z.discriminatedUnion("type", [
|
|
3484
|
-
import_v416.z.object({
|
|
3485
|
-
type: import_v416.z.literal("message"),
|
|
3486
|
-
id: import_v416.z.string()
|
|
3487
|
-
}),
|
|
3488
|
-
import_v416.z.object({
|
|
3489
|
-
type: import_v416.z.literal("reasoning"),
|
|
3490
|
-
id: import_v416.z.string(),
|
|
3491
|
-
encrypted_content: import_v416.z.string().nullish()
|
|
3492
|
-
}),
|
|
3493
|
-
import_v416.z.object({
|
|
3494
|
-
type: import_v416.z.literal("function_call"),
|
|
3495
|
-
id: import_v416.z.string(),
|
|
3496
|
-
call_id: import_v416.z.string(),
|
|
3497
|
-
name: import_v416.z.string(),
|
|
3498
|
-
arguments: import_v416.z.string()
|
|
3499
|
-
}),
|
|
3500
|
-
import_v416.z.object({
|
|
3501
|
-
type: import_v416.z.literal("web_search_call"),
|
|
3502
|
-
id: import_v416.z.string(),
|
|
3503
|
-
status: import_v416.z.string(),
|
|
3504
|
-
action: import_v416.z.object({
|
|
3505
|
-
type: import_v416.z.literal("search"),
|
|
3506
|
-
query: import_v416.z.string().optional()
|
|
3507
|
-
}).nullish()
|
|
3508
|
-
}),
|
|
3509
|
-
import_v416.z.object({
|
|
3510
|
-
type: import_v416.z.literal("computer_call"),
|
|
3511
|
-
id: import_v416.z.string(),
|
|
3512
|
-
status: import_v416.z.string()
|
|
3513
|
-
}),
|
|
3514
|
-
import_v416.z.object({
|
|
3515
|
-
type: import_v416.z.literal("file_search_call"),
|
|
3516
|
-
id: import_v416.z.string()
|
|
3517
|
-
}),
|
|
3518
|
-
import_v416.z.object({
|
|
3519
|
-
type: import_v416.z.literal("image_generation_call"),
|
|
3520
|
-
id: import_v416.z.string()
|
|
3521
|
-
}),
|
|
3522
|
-
import_v416.z.object({
|
|
3523
|
-
type: import_v416.z.literal("code_interpreter_call"),
|
|
3524
|
-
id: import_v416.z.string(),
|
|
3525
|
-
container_id: import_v416.z.string(),
|
|
3526
|
-
code: import_v416.z.string().nullable(),
|
|
3527
|
-
outputs: import_v416.z.array(
|
|
3528
|
-
import_v416.z.discriminatedUnion("type", [
|
|
3529
|
-
import_v416.z.object({ type: import_v416.z.literal("logs"), logs: import_v416.z.string() }),
|
|
3530
|
-
import_v416.z.object({ type: import_v416.z.literal("image"), url: import_v416.z.string() })
|
|
3531
|
-
])
|
|
3532
|
-
).nullable(),
|
|
3533
|
-
status: import_v416.z.string()
|
|
3534
|
-
})
|
|
3535
|
-
])
|
|
3536
|
-
});
|
|
3537
|
-
var responseOutputItemDoneSchema = import_v416.z.object({
|
|
3538
|
-
type: import_v416.z.literal("response.output_item.done"),
|
|
3539
|
-
output_index: import_v416.z.number(),
|
|
3540
|
-
item: import_v416.z.discriminatedUnion("type", [
|
|
3541
|
-
import_v416.z.object({
|
|
3542
|
-
type: import_v416.z.literal("message"),
|
|
3543
|
-
id: import_v416.z.string()
|
|
3544
|
-
}),
|
|
3545
|
-
import_v416.z.object({
|
|
3546
|
-
type: import_v416.z.literal("reasoning"),
|
|
3547
|
-
id: import_v416.z.string(),
|
|
3548
|
-
encrypted_content: import_v416.z.string().nullish()
|
|
3549
|
-
}),
|
|
3550
|
-
import_v416.z.object({
|
|
3551
|
-
type: import_v416.z.literal("function_call"),
|
|
3552
|
-
id: import_v416.z.string(),
|
|
3553
|
-
call_id: import_v416.z.string(),
|
|
3554
|
-
name: import_v416.z.string(),
|
|
3555
|
-
arguments: import_v416.z.string(),
|
|
3556
|
-
status: import_v416.z.literal("completed")
|
|
3557
|
-
}),
|
|
3558
|
-
codeInterpreterCallItem,
|
|
3559
|
-
imageGenerationCallItem,
|
|
3560
|
-
webSearchCallItem,
|
|
3561
|
-
fileSearchCallItem,
|
|
3562
|
-
localShellCallItem,
|
|
3563
|
-
import_v416.z.object({
|
|
3564
|
-
type: import_v416.z.literal("computer_call"),
|
|
3565
|
-
id: import_v416.z.string(),
|
|
3566
|
-
status: import_v416.z.literal("completed")
|
|
3567
|
-
})
|
|
3568
|
-
])
|
|
3569
|
-
});
|
|
3570
|
-
var responseFunctionCallArgumentsDeltaSchema = import_v416.z.object({
|
|
3571
|
-
type: import_v416.z.literal("response.function_call_arguments.delta"),
|
|
3572
|
-
item_id: import_v416.z.string(),
|
|
3573
|
-
output_index: import_v416.z.number(),
|
|
3574
|
-
delta: import_v416.z.string()
|
|
3575
|
-
});
|
|
3576
|
-
var responseCodeInterpreterCallCodeDeltaSchema = import_v416.z.object({
|
|
3577
|
-
type: import_v416.z.literal("response.code_interpreter_call_code.delta"),
|
|
3578
|
-
item_id: import_v416.z.string(),
|
|
3579
|
-
output_index: import_v416.z.number(),
|
|
3580
|
-
delta: import_v416.z.string()
|
|
3581
|
-
});
|
|
3582
|
-
var responseCodeInterpreterCallCodeDoneSchema = import_v416.z.object({
|
|
3583
|
-
type: import_v416.z.literal("response.code_interpreter_call_code.done"),
|
|
3584
|
-
item_id: import_v416.z.string(),
|
|
3585
|
-
output_index: import_v416.z.number(),
|
|
3586
|
-
code: import_v416.z.string()
|
|
3587
|
-
});
|
|
3588
|
-
var responseAnnotationAddedSchema = import_v416.z.object({
|
|
3589
|
-
type: import_v416.z.literal("response.output_text.annotation.added"),
|
|
3590
|
-
annotation: import_v416.z.discriminatedUnion("type", [
|
|
3591
|
-
import_v416.z.object({
|
|
3592
|
-
type: import_v416.z.literal("url_citation"),
|
|
3593
|
-
url: import_v416.z.string(),
|
|
3594
|
-
title: import_v416.z.string()
|
|
3595
|
-
}),
|
|
3596
|
-
import_v416.z.object({
|
|
3597
|
-
type: import_v416.z.literal("file_citation"),
|
|
3598
|
-
file_id: import_v416.z.string(),
|
|
3599
|
-
filename: import_v416.z.string().nullish(),
|
|
3600
|
-
index: import_v416.z.number().nullish(),
|
|
3601
|
-
start_index: import_v416.z.number().nullish(),
|
|
3602
|
-
end_index: import_v416.z.number().nullish(),
|
|
3603
|
-
quote: import_v416.z.string().nullish()
|
|
3604
|
-
})
|
|
3605
|
-
])
|
|
3606
|
-
});
|
|
3607
|
-
var responseReasoningSummaryPartAddedSchema = import_v416.z.object({
|
|
3608
|
-
type: import_v416.z.literal("response.reasoning_summary_part.added"),
|
|
3609
|
-
item_id: import_v416.z.string(),
|
|
3610
|
-
summary_index: import_v416.z.number()
|
|
3611
|
-
});
|
|
3612
|
-
var responseReasoningSummaryTextDeltaSchema = import_v416.z.object({
|
|
3613
|
-
type: import_v416.z.literal("response.reasoning_summary_text.delta"),
|
|
3614
|
-
item_id: import_v416.z.string(),
|
|
3615
|
-
summary_index: import_v416.z.number(),
|
|
3616
|
-
delta: import_v416.z.string()
|
|
3617
|
-
});
|
|
3618
|
-
var openaiResponsesChunkSchema = import_v416.z.union([
|
|
3619
|
-
textDeltaChunkSchema,
|
|
3620
|
-
responseFinishedChunkSchema,
|
|
3621
|
-
responseCreatedChunkSchema,
|
|
3622
|
-
responseOutputItemAddedSchema,
|
|
3623
|
-
responseOutputItemDoneSchema,
|
|
3624
|
-
responseFunctionCallArgumentsDeltaSchema,
|
|
3625
|
-
responseCodeInterpreterCallCodeDeltaSchema,
|
|
3626
|
-
responseCodeInterpreterCallCodeDoneSchema,
|
|
3627
|
-
responseAnnotationAddedSchema,
|
|
3628
|
-
responseReasoningSummaryPartAddedSchema,
|
|
3629
|
-
responseReasoningSummaryTextDeltaSchema,
|
|
3630
|
-
errorChunkSchema,
|
|
3631
|
-
import_v416.z.object({ type: import_v416.z.string() }).loose()
|
|
3632
|
-
// fallback for unknown chunks
|
|
3633
|
-
]);
|
|
3634
3970
|
function isTextDeltaChunk(chunk) {
|
|
3635
3971
|
return chunk.type === "response.output_text.delta";
|
|
3636
3972
|
}
|
|
@@ -3707,55 +4043,23 @@ function getResponsesModelConfig(modelId) {
|
|
|
3707
4043
|
isReasoningModel: false
|
|
3708
4044
|
};
|
|
3709
4045
|
}
|
|
3710
|
-
var openaiResponsesProviderOptionsSchema = import_v416.z.object({
|
|
3711
|
-
include: import_v416.z.array(
|
|
3712
|
-
import_v416.z.enum([
|
|
3713
|
-
"reasoning.encrypted_content",
|
|
3714
|
-
"file_search_call.results",
|
|
3715
|
-
"message.output_text.logprobs"
|
|
3716
|
-
])
|
|
3717
|
-
).nullish(),
|
|
3718
|
-
instructions: import_v416.z.string().nullish(),
|
|
3719
|
-
/**
|
|
3720
|
-
* Return the log probabilities of the tokens.
|
|
3721
|
-
*
|
|
3722
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
3723
|
-
* were generated.
|
|
3724
|
-
*
|
|
3725
|
-
* Setting to a number will return the log probabilities of the top n
|
|
3726
|
-
* tokens that were generated.
|
|
3727
|
-
*
|
|
3728
|
-
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
3729
|
-
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
3730
|
-
*/
|
|
3731
|
-
logprobs: import_v416.z.union([import_v416.z.boolean(), import_v416.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
3732
|
-
/**
|
|
3733
|
-
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
3734
|
-
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
3735
|
-
* Any further attempts to call a tool by the model will be ignored.
|
|
3736
|
-
*/
|
|
3737
|
-
maxToolCalls: import_v416.z.number().nullish(),
|
|
3738
|
-
metadata: import_v416.z.any().nullish(),
|
|
3739
|
-
parallelToolCalls: import_v416.z.boolean().nullish(),
|
|
3740
|
-
previousResponseId: import_v416.z.string().nullish(),
|
|
3741
|
-
promptCacheKey: import_v416.z.string().nullish(),
|
|
3742
|
-
reasoningEffort: import_v416.z.string().nullish(),
|
|
3743
|
-
reasoningSummary: import_v416.z.string().nullish(),
|
|
3744
|
-
safetyIdentifier: import_v416.z.string().nullish(),
|
|
3745
|
-
serviceTier: import_v416.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
3746
|
-
store: import_v416.z.boolean().nullish(),
|
|
3747
|
-
strictJsonSchema: import_v416.z.boolean().nullish(),
|
|
3748
|
-
textVerbosity: import_v416.z.enum(["low", "medium", "high"]).nullish(),
|
|
3749
|
-
user: import_v416.z.string().nullish()
|
|
3750
|
-
});
|
|
3751
4046
|
|
|
3752
4047
|
// src/speech/openai-speech-model.ts
|
|
3753
|
-
var
|
|
3754
|
-
|
|
3755
|
-
|
|
3756
|
-
|
|
3757
|
-
|
|
3758
|
-
|
|
4048
|
+
var import_provider_utils26 = require("@ai-sdk/provider-utils");
|
|
4049
|
+
|
|
4050
|
+
// src/speech/openai-speech-options.ts
|
|
4051
|
+
var import_provider_utils25 = require("@ai-sdk/provider-utils");
|
|
4052
|
+
var z18 = __toESM(require("zod/v4"));
|
|
4053
|
+
var openaiSpeechProviderOptionsSchema = (0, import_provider_utils25.lazyValidator)(
|
|
4054
|
+
() => (0, import_provider_utils25.zodSchema)(
|
|
4055
|
+
z18.object({
|
|
4056
|
+
instructions: z18.string().nullish(),
|
|
4057
|
+
speed: z18.number().min(0.25).max(4).default(1).nullish()
|
|
4058
|
+
})
|
|
4059
|
+
)
|
|
4060
|
+
);
|
|
4061
|
+
|
|
4062
|
+
// src/speech/openai-speech-model.ts
|
|
3759
4063
|
var OpenAISpeechModel = class {
|
|
3760
4064
|
constructor(modelId, config) {
|
|
3761
4065
|
this.modelId = modelId;
|
|
@@ -3775,10 +4079,10 @@ var OpenAISpeechModel = class {
|
|
|
3775
4079
|
providerOptions
|
|
3776
4080
|
}) {
|
|
3777
4081
|
const warnings = [];
|
|
3778
|
-
const openAIOptions = await (0,
|
|
4082
|
+
const openAIOptions = await (0, import_provider_utils26.parseProviderOptions)({
|
|
3779
4083
|
provider: "openai",
|
|
3780
4084
|
providerOptions,
|
|
3781
|
-
schema:
|
|
4085
|
+
schema: openaiSpeechProviderOptionsSchema
|
|
3782
4086
|
});
|
|
3783
4087
|
const requestBody = {
|
|
3784
4088
|
model: this.modelId,
|
|
@@ -3828,15 +4132,15 @@ var OpenAISpeechModel = class {
|
|
|
3828
4132
|
value: audio,
|
|
3829
4133
|
responseHeaders,
|
|
3830
4134
|
rawValue: rawResponse
|
|
3831
|
-
} = await (0,
|
|
4135
|
+
} = await (0, import_provider_utils26.postJsonToApi)({
|
|
3832
4136
|
url: this.config.url({
|
|
3833
4137
|
path: "/audio/speech",
|
|
3834
4138
|
modelId: this.modelId
|
|
3835
4139
|
}),
|
|
3836
|
-
headers: (0,
|
|
4140
|
+
headers: (0, import_provider_utils26.combineHeaders)(this.config.headers(), options.headers),
|
|
3837
4141
|
body: requestBody,
|
|
3838
4142
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
3839
|
-
successfulResponseHandler: (0,
|
|
4143
|
+
successfulResponseHandler: (0, import_provider_utils26.createBinaryResponseHandler)(),
|
|
3840
4144
|
abortSignal: options.abortSignal,
|
|
3841
4145
|
fetch: this.config.fetch
|
|
3842
4146
|
});
|
|
@@ -3857,35 +4161,73 @@ var OpenAISpeechModel = class {
|
|
|
3857
4161
|
};
|
|
3858
4162
|
|
|
3859
4163
|
// src/transcription/openai-transcription-model.ts
|
|
3860
|
-
var
|
|
3861
|
-
|
|
4164
|
+
var import_provider_utils29 = require("@ai-sdk/provider-utils");
|
|
4165
|
+
|
|
4166
|
+
// src/transcription/openai-transcription-api.ts
|
|
4167
|
+
var import_provider_utils27 = require("@ai-sdk/provider-utils");
|
|
4168
|
+
var z19 = __toESM(require("zod/v4"));
|
|
4169
|
+
var openaiTranscriptionResponseSchema = (0, import_provider_utils27.lazyValidator)(
|
|
4170
|
+
() => (0, import_provider_utils27.zodSchema)(
|
|
4171
|
+
z19.object({
|
|
4172
|
+
text: z19.string(),
|
|
4173
|
+
language: z19.string().nullish(),
|
|
4174
|
+
duration: z19.number().nullish(),
|
|
4175
|
+
words: z19.array(
|
|
4176
|
+
z19.object({
|
|
4177
|
+
word: z19.string(),
|
|
4178
|
+
start: z19.number(),
|
|
4179
|
+
end: z19.number()
|
|
4180
|
+
})
|
|
4181
|
+
).nullish(),
|
|
4182
|
+
segments: z19.array(
|
|
4183
|
+
z19.object({
|
|
4184
|
+
id: z19.number(),
|
|
4185
|
+
seek: z19.number(),
|
|
4186
|
+
start: z19.number(),
|
|
4187
|
+
end: z19.number(),
|
|
4188
|
+
text: z19.string(),
|
|
4189
|
+
tokens: z19.array(z19.number()),
|
|
4190
|
+
temperature: z19.number(),
|
|
4191
|
+
avg_logprob: z19.number(),
|
|
4192
|
+
compression_ratio: z19.number(),
|
|
4193
|
+
no_speech_prob: z19.number()
|
|
4194
|
+
})
|
|
4195
|
+
).nullish()
|
|
4196
|
+
})
|
|
4197
|
+
)
|
|
4198
|
+
);
|
|
3862
4199
|
|
|
3863
4200
|
// src/transcription/openai-transcription-options.ts
|
|
3864
|
-
var
|
|
3865
|
-
var
|
|
3866
|
-
|
|
3867
|
-
|
|
3868
|
-
|
|
3869
|
-
|
|
3870
|
-
|
|
3871
|
-
|
|
3872
|
-
|
|
3873
|
-
|
|
3874
|
-
|
|
3875
|
-
|
|
3876
|
-
|
|
3877
|
-
|
|
3878
|
-
|
|
3879
|
-
|
|
3880
|
-
|
|
3881
|
-
|
|
3882
|
-
|
|
3883
|
-
|
|
3884
|
-
|
|
3885
|
-
|
|
3886
|
-
|
|
3887
|
-
|
|
3888
|
-
|
|
4201
|
+
var import_provider_utils28 = require("@ai-sdk/provider-utils");
|
|
4202
|
+
var z20 = __toESM(require("zod/v4"));
|
|
4203
|
+
var openAITranscriptionProviderOptions = (0, import_provider_utils28.lazyValidator)(
|
|
4204
|
+
() => (0, import_provider_utils28.zodSchema)(
|
|
4205
|
+
z20.object({
|
|
4206
|
+
/**
|
|
4207
|
+
* Additional information to include in the transcription response.
|
|
4208
|
+
*/
|
|
4209
|
+
include: z20.array(z20.string()).optional(),
|
|
4210
|
+
/**
|
|
4211
|
+
* The language of the input audio in ISO-639-1 format.
|
|
4212
|
+
*/
|
|
4213
|
+
language: z20.string().optional(),
|
|
4214
|
+
/**
|
|
4215
|
+
* An optional text to guide the model's style or continue a previous audio segment.
|
|
4216
|
+
*/
|
|
4217
|
+
prompt: z20.string().optional(),
|
|
4218
|
+
/**
|
|
4219
|
+
* The sampling temperature, between 0 and 1.
|
|
4220
|
+
* @default 0
|
|
4221
|
+
*/
|
|
4222
|
+
temperature: z20.number().min(0).max(1).default(0).optional(),
|
|
4223
|
+
/**
|
|
4224
|
+
* The timestamp granularities to populate for this transcription.
|
|
4225
|
+
* @default ['segment']
|
|
4226
|
+
*/
|
|
4227
|
+
timestampGranularities: z20.array(z20.enum(["word", "segment"])).default(["segment"]).optional()
|
|
4228
|
+
})
|
|
4229
|
+
)
|
|
4230
|
+
);
|
|
3889
4231
|
|
|
3890
4232
|
// src/transcription/openai-transcription-model.ts
|
|
3891
4233
|
var languageMap = {
|
|
@@ -3962,15 +4304,15 @@ var OpenAITranscriptionModel = class {
|
|
|
3962
4304
|
providerOptions
|
|
3963
4305
|
}) {
|
|
3964
4306
|
const warnings = [];
|
|
3965
|
-
const openAIOptions = await (0,
|
|
4307
|
+
const openAIOptions = await (0, import_provider_utils29.parseProviderOptions)({
|
|
3966
4308
|
provider: "openai",
|
|
3967
4309
|
providerOptions,
|
|
3968
4310
|
schema: openAITranscriptionProviderOptions
|
|
3969
4311
|
});
|
|
3970
4312
|
const formData = new FormData();
|
|
3971
|
-
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0,
|
|
4313
|
+
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils29.convertBase64ToUint8Array)(audio)]);
|
|
3972
4314
|
formData.append("model", this.modelId);
|
|
3973
|
-
const fileExtension = (0,
|
|
4315
|
+
const fileExtension = (0, import_provider_utils29.mediaTypeToExtension)(mediaType);
|
|
3974
4316
|
formData.append(
|
|
3975
4317
|
"file",
|
|
3976
4318
|
new File([blob], "audio", { type: mediaType }),
|
|
@@ -4015,15 +4357,15 @@ var OpenAITranscriptionModel = class {
|
|
|
4015
4357
|
value: response,
|
|
4016
4358
|
responseHeaders,
|
|
4017
4359
|
rawValue: rawResponse
|
|
4018
|
-
} = await (0,
|
|
4360
|
+
} = await (0, import_provider_utils29.postFormDataToApi)({
|
|
4019
4361
|
url: this.config.url({
|
|
4020
4362
|
path: "/audio/transcriptions",
|
|
4021
4363
|
modelId: this.modelId
|
|
4022
4364
|
}),
|
|
4023
|
-
headers: (0,
|
|
4365
|
+
headers: (0, import_provider_utils29.combineHeaders)(this.config.headers(), options.headers),
|
|
4024
4366
|
formData,
|
|
4025
4367
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
4026
|
-
successfulResponseHandler: (0,
|
|
4368
|
+
successfulResponseHandler: (0, import_provider_utils29.createJsonResponseHandler)(
|
|
4027
4369
|
openaiTranscriptionResponseSchema
|
|
4028
4370
|
),
|
|
4029
4371
|
abortSignal: options.abortSignal,
|
|
@@ -4053,49 +4395,23 @@ var OpenAITranscriptionModel = class {
|
|
|
4053
4395
|
};
|
|
4054
4396
|
}
|
|
4055
4397
|
};
|
|
4056
|
-
var openaiTranscriptionResponseSchema = import_v419.z.object({
|
|
4057
|
-
text: import_v419.z.string(),
|
|
4058
|
-
language: import_v419.z.string().nullish(),
|
|
4059
|
-
duration: import_v419.z.number().nullish(),
|
|
4060
|
-
words: import_v419.z.array(
|
|
4061
|
-
import_v419.z.object({
|
|
4062
|
-
word: import_v419.z.string(),
|
|
4063
|
-
start: import_v419.z.number(),
|
|
4064
|
-
end: import_v419.z.number()
|
|
4065
|
-
})
|
|
4066
|
-
).nullish(),
|
|
4067
|
-
segments: import_v419.z.array(
|
|
4068
|
-
import_v419.z.object({
|
|
4069
|
-
id: import_v419.z.number(),
|
|
4070
|
-
seek: import_v419.z.number(),
|
|
4071
|
-
start: import_v419.z.number(),
|
|
4072
|
-
end: import_v419.z.number(),
|
|
4073
|
-
text: import_v419.z.string(),
|
|
4074
|
-
tokens: import_v419.z.array(import_v419.z.number()),
|
|
4075
|
-
temperature: import_v419.z.number(),
|
|
4076
|
-
avg_logprob: import_v419.z.number(),
|
|
4077
|
-
compression_ratio: import_v419.z.number(),
|
|
4078
|
-
no_speech_prob: import_v419.z.number()
|
|
4079
|
-
})
|
|
4080
|
-
).nullish()
|
|
4081
|
-
});
|
|
4082
4398
|
|
|
4083
4399
|
// src/version.ts
|
|
4084
|
-
var VERSION = true ? "2.0.
|
|
4400
|
+
var VERSION = true ? "2.0.46" : "0.0.0-test";
|
|
4085
4401
|
|
|
4086
4402
|
// src/openai-provider.ts
|
|
4087
4403
|
function createOpenAI(options = {}) {
|
|
4088
4404
|
var _a, _b;
|
|
4089
|
-
const baseURL = (_a = (0,
|
|
4090
|
-
(0,
|
|
4405
|
+
const baseURL = (_a = (0, import_provider_utils30.withoutTrailingSlash)(
|
|
4406
|
+
(0, import_provider_utils30.loadOptionalSetting)({
|
|
4091
4407
|
settingValue: options.baseURL,
|
|
4092
4408
|
environmentVariableName: "OPENAI_BASE_URL"
|
|
4093
4409
|
})
|
|
4094
4410
|
)) != null ? _a : "https://api.openai.com/v1";
|
|
4095
4411
|
const providerName = (_b = options.name) != null ? _b : "openai";
|
|
4096
|
-
const getHeaders = () => (0,
|
|
4412
|
+
const getHeaders = () => (0, import_provider_utils30.withUserAgentSuffix)(
|
|
4097
4413
|
{
|
|
4098
|
-
Authorization: `Bearer ${(0,
|
|
4414
|
+
Authorization: `Bearer ${(0, import_provider_utils30.loadApiKey)({
|
|
4099
4415
|
apiKey: options.apiKey,
|
|
4100
4416
|
environmentVariableName: "OPENAI_API_KEY",
|
|
4101
4417
|
description: "OpenAI"
|