@ai-sdk/openai 2.0.46 → 2.0.48
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/index.js +673 -665
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +40 -22
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +672 -664
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +39 -21
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.js
CHANGED
|
@@ -1,9 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __create = Object.create;
|
|
3
2
|
var __defProp = Object.defineProperty;
|
|
4
3
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
4
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
-
var __getProtoOf = Object.getPrototypeOf;
|
|
7
5
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
6
|
var __export = (target, all) => {
|
|
9
7
|
for (var name in all)
|
|
@@ -17,14 +15,6 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
17
15
|
}
|
|
18
16
|
return to;
|
|
19
17
|
};
|
|
20
|
-
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
21
|
-
// If the importer is in node compatibility mode or this is not an ESM
|
|
22
|
-
// file that has been converted to a CommonJS file using a Babel-
|
|
23
|
-
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
24
|
-
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
25
|
-
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
26
|
-
mod
|
|
27
|
-
));
|
|
28
18
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
29
19
|
|
|
30
20
|
// src/index.ts
|
|
@@ -44,17 +34,17 @@ var import_provider3 = require("@ai-sdk/provider");
|
|
|
44
34
|
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
45
35
|
|
|
46
36
|
// src/openai-error.ts
|
|
47
|
-
var
|
|
37
|
+
var import_v4 = require("zod/v4");
|
|
48
38
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
49
|
-
var openaiErrorDataSchema = z.object({
|
|
50
|
-
error: z.object({
|
|
51
|
-
message: z.string(),
|
|
39
|
+
var openaiErrorDataSchema = import_v4.z.object({
|
|
40
|
+
error: import_v4.z.object({
|
|
41
|
+
message: import_v4.z.string(),
|
|
52
42
|
// The additional information below is handled loosely to support
|
|
53
43
|
// OpenAI-compatible providers that have slightly different error
|
|
54
44
|
// responses:
|
|
55
|
-
type: z.string().nullish(),
|
|
56
|
-
param: z.any().nullish(),
|
|
57
|
-
code: z.union([z.string(), z.number()]).nullish()
|
|
45
|
+
type: import_v4.z.string().nullish(),
|
|
46
|
+
param: import_v4.z.any().nullish(),
|
|
47
|
+
code: import_v4.z.union([import_v4.z.string(), import_v4.z.number()]).nullish()
|
|
58
48
|
})
|
|
59
49
|
});
|
|
60
50
|
var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
|
@@ -272,67 +262,67 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
272
262
|
|
|
273
263
|
// src/chat/openai-chat-api.ts
|
|
274
264
|
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
275
|
-
var
|
|
265
|
+
var import_v42 = require("zod/v4");
|
|
276
266
|
var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
|
277
267
|
() => (0, import_provider_utils3.zodSchema)(
|
|
278
|
-
|
|
279
|
-
id:
|
|
280
|
-
created:
|
|
281
|
-
model:
|
|
282
|
-
choices:
|
|
283
|
-
|
|
284
|
-
message:
|
|
285
|
-
role:
|
|
286
|
-
content:
|
|
287
|
-
tool_calls:
|
|
288
|
-
|
|
289
|
-
id:
|
|
290
|
-
type:
|
|
291
|
-
function:
|
|
292
|
-
name:
|
|
293
|
-
arguments:
|
|
268
|
+
import_v42.z.object({
|
|
269
|
+
id: import_v42.z.string().nullish(),
|
|
270
|
+
created: import_v42.z.number().nullish(),
|
|
271
|
+
model: import_v42.z.string().nullish(),
|
|
272
|
+
choices: import_v42.z.array(
|
|
273
|
+
import_v42.z.object({
|
|
274
|
+
message: import_v42.z.object({
|
|
275
|
+
role: import_v42.z.literal("assistant").nullish(),
|
|
276
|
+
content: import_v42.z.string().nullish(),
|
|
277
|
+
tool_calls: import_v42.z.array(
|
|
278
|
+
import_v42.z.object({
|
|
279
|
+
id: import_v42.z.string().nullish(),
|
|
280
|
+
type: import_v42.z.literal("function"),
|
|
281
|
+
function: import_v42.z.object({
|
|
282
|
+
name: import_v42.z.string(),
|
|
283
|
+
arguments: import_v42.z.string()
|
|
294
284
|
})
|
|
295
285
|
})
|
|
296
286
|
).nullish(),
|
|
297
|
-
annotations:
|
|
298
|
-
|
|
299
|
-
type:
|
|
300
|
-
start_index:
|
|
301
|
-
end_index:
|
|
302
|
-
url:
|
|
303
|
-
title:
|
|
287
|
+
annotations: import_v42.z.array(
|
|
288
|
+
import_v42.z.object({
|
|
289
|
+
type: import_v42.z.literal("url_citation"),
|
|
290
|
+
start_index: import_v42.z.number(),
|
|
291
|
+
end_index: import_v42.z.number(),
|
|
292
|
+
url: import_v42.z.string(),
|
|
293
|
+
title: import_v42.z.string()
|
|
304
294
|
})
|
|
305
295
|
).nullish()
|
|
306
296
|
}),
|
|
307
|
-
index:
|
|
308
|
-
logprobs:
|
|
309
|
-
content:
|
|
310
|
-
|
|
311
|
-
token:
|
|
312
|
-
logprob:
|
|
313
|
-
top_logprobs:
|
|
314
|
-
|
|
315
|
-
token:
|
|
316
|
-
logprob:
|
|
297
|
+
index: import_v42.z.number(),
|
|
298
|
+
logprobs: import_v42.z.object({
|
|
299
|
+
content: import_v42.z.array(
|
|
300
|
+
import_v42.z.object({
|
|
301
|
+
token: import_v42.z.string(),
|
|
302
|
+
logprob: import_v42.z.number(),
|
|
303
|
+
top_logprobs: import_v42.z.array(
|
|
304
|
+
import_v42.z.object({
|
|
305
|
+
token: import_v42.z.string(),
|
|
306
|
+
logprob: import_v42.z.number()
|
|
317
307
|
})
|
|
318
308
|
)
|
|
319
309
|
})
|
|
320
310
|
).nullish()
|
|
321
311
|
}).nullish(),
|
|
322
|
-
finish_reason:
|
|
312
|
+
finish_reason: import_v42.z.string().nullish()
|
|
323
313
|
})
|
|
324
314
|
),
|
|
325
|
-
usage:
|
|
326
|
-
prompt_tokens:
|
|
327
|
-
completion_tokens:
|
|
328
|
-
total_tokens:
|
|
329
|
-
prompt_tokens_details:
|
|
330
|
-
cached_tokens:
|
|
315
|
+
usage: import_v42.z.object({
|
|
316
|
+
prompt_tokens: import_v42.z.number().nullish(),
|
|
317
|
+
completion_tokens: import_v42.z.number().nullish(),
|
|
318
|
+
total_tokens: import_v42.z.number().nullish(),
|
|
319
|
+
prompt_tokens_details: import_v42.z.object({
|
|
320
|
+
cached_tokens: import_v42.z.number().nullish()
|
|
331
321
|
}).nullish(),
|
|
332
|
-
completion_tokens_details:
|
|
333
|
-
reasoning_tokens:
|
|
334
|
-
accepted_prediction_tokens:
|
|
335
|
-
rejected_prediction_tokens:
|
|
322
|
+
completion_tokens_details: import_v42.z.object({
|
|
323
|
+
reasoning_tokens: import_v42.z.number().nullish(),
|
|
324
|
+
accepted_prediction_tokens: import_v42.z.number().nullish(),
|
|
325
|
+
rejected_prediction_tokens: import_v42.z.number().nullish()
|
|
336
326
|
}).nullish()
|
|
337
327
|
}).nullish()
|
|
338
328
|
})
|
|
@@ -340,66 +330,66 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
|
|
340
330
|
);
|
|
341
331
|
var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
|
342
332
|
() => (0, import_provider_utils3.zodSchema)(
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
id:
|
|
346
|
-
created:
|
|
347
|
-
model:
|
|
348
|
-
choices:
|
|
349
|
-
|
|
350
|
-
delta:
|
|
351
|
-
role:
|
|
352
|
-
content:
|
|
353
|
-
tool_calls:
|
|
354
|
-
|
|
355
|
-
index:
|
|
356
|
-
id:
|
|
357
|
-
type:
|
|
358
|
-
function:
|
|
359
|
-
name:
|
|
360
|
-
arguments:
|
|
333
|
+
import_v42.z.union([
|
|
334
|
+
import_v42.z.object({
|
|
335
|
+
id: import_v42.z.string().nullish(),
|
|
336
|
+
created: import_v42.z.number().nullish(),
|
|
337
|
+
model: import_v42.z.string().nullish(),
|
|
338
|
+
choices: import_v42.z.array(
|
|
339
|
+
import_v42.z.object({
|
|
340
|
+
delta: import_v42.z.object({
|
|
341
|
+
role: import_v42.z.enum(["assistant"]).nullish(),
|
|
342
|
+
content: import_v42.z.string().nullish(),
|
|
343
|
+
tool_calls: import_v42.z.array(
|
|
344
|
+
import_v42.z.object({
|
|
345
|
+
index: import_v42.z.number(),
|
|
346
|
+
id: import_v42.z.string().nullish(),
|
|
347
|
+
type: import_v42.z.literal("function").nullish(),
|
|
348
|
+
function: import_v42.z.object({
|
|
349
|
+
name: import_v42.z.string().nullish(),
|
|
350
|
+
arguments: import_v42.z.string().nullish()
|
|
361
351
|
})
|
|
362
352
|
})
|
|
363
353
|
).nullish(),
|
|
364
|
-
annotations:
|
|
365
|
-
|
|
366
|
-
type:
|
|
367
|
-
start_index:
|
|
368
|
-
end_index:
|
|
369
|
-
url:
|
|
370
|
-
title:
|
|
354
|
+
annotations: import_v42.z.array(
|
|
355
|
+
import_v42.z.object({
|
|
356
|
+
type: import_v42.z.literal("url_citation"),
|
|
357
|
+
start_index: import_v42.z.number(),
|
|
358
|
+
end_index: import_v42.z.number(),
|
|
359
|
+
url: import_v42.z.string(),
|
|
360
|
+
title: import_v42.z.string()
|
|
371
361
|
})
|
|
372
362
|
).nullish()
|
|
373
363
|
}).nullish(),
|
|
374
|
-
logprobs:
|
|
375
|
-
content:
|
|
376
|
-
|
|
377
|
-
token:
|
|
378
|
-
logprob:
|
|
379
|
-
top_logprobs:
|
|
380
|
-
|
|
381
|
-
token:
|
|
382
|
-
logprob:
|
|
364
|
+
logprobs: import_v42.z.object({
|
|
365
|
+
content: import_v42.z.array(
|
|
366
|
+
import_v42.z.object({
|
|
367
|
+
token: import_v42.z.string(),
|
|
368
|
+
logprob: import_v42.z.number(),
|
|
369
|
+
top_logprobs: import_v42.z.array(
|
|
370
|
+
import_v42.z.object({
|
|
371
|
+
token: import_v42.z.string(),
|
|
372
|
+
logprob: import_v42.z.number()
|
|
383
373
|
})
|
|
384
374
|
)
|
|
385
375
|
})
|
|
386
376
|
).nullish()
|
|
387
377
|
}).nullish(),
|
|
388
|
-
finish_reason:
|
|
389
|
-
index:
|
|
378
|
+
finish_reason: import_v42.z.string().nullish(),
|
|
379
|
+
index: import_v42.z.number()
|
|
390
380
|
})
|
|
391
381
|
),
|
|
392
|
-
usage:
|
|
393
|
-
prompt_tokens:
|
|
394
|
-
completion_tokens:
|
|
395
|
-
total_tokens:
|
|
396
|
-
prompt_tokens_details:
|
|
397
|
-
cached_tokens:
|
|
382
|
+
usage: import_v42.z.object({
|
|
383
|
+
prompt_tokens: import_v42.z.number().nullish(),
|
|
384
|
+
completion_tokens: import_v42.z.number().nullish(),
|
|
385
|
+
total_tokens: import_v42.z.number().nullish(),
|
|
386
|
+
prompt_tokens_details: import_v42.z.object({
|
|
387
|
+
cached_tokens: import_v42.z.number().nullish()
|
|
398
388
|
}).nullish(),
|
|
399
|
-
completion_tokens_details:
|
|
400
|
-
reasoning_tokens:
|
|
401
|
-
accepted_prediction_tokens:
|
|
402
|
-
rejected_prediction_tokens:
|
|
389
|
+
completion_tokens_details: import_v42.z.object({
|
|
390
|
+
reasoning_tokens: import_v42.z.number().nullish(),
|
|
391
|
+
accepted_prediction_tokens: import_v42.z.number().nullish(),
|
|
392
|
+
rejected_prediction_tokens: import_v42.z.number().nullish()
|
|
403
393
|
}).nullish()
|
|
404
394
|
}).nullish()
|
|
405
395
|
}),
|
|
@@ -410,17 +400,17 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
|
|
410
400
|
|
|
411
401
|
// src/chat/openai-chat-options.ts
|
|
412
402
|
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
413
|
-
var
|
|
403
|
+
var import_v43 = require("zod/v4");
|
|
414
404
|
var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
|
|
415
405
|
() => (0, import_provider_utils4.zodSchema)(
|
|
416
|
-
|
|
406
|
+
import_v43.z.object({
|
|
417
407
|
/**
|
|
418
408
|
* Modify the likelihood of specified tokens appearing in the completion.
|
|
419
409
|
*
|
|
420
410
|
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
421
411
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
422
412
|
*/
|
|
423
|
-
logitBias:
|
|
413
|
+
logitBias: import_v43.z.record(import_v43.z.coerce.number(), import_v43.z.number()).optional(),
|
|
424
414
|
/**
|
|
425
415
|
* Return the log probabilities of the tokens.
|
|
426
416
|
*
|
|
@@ -430,42 +420,42 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
|
|
|
430
420
|
* Setting to a number will return the log probabilities of the top n
|
|
431
421
|
* tokens that were generated.
|
|
432
422
|
*/
|
|
433
|
-
logprobs:
|
|
423
|
+
logprobs: import_v43.z.union([import_v43.z.boolean(), import_v43.z.number()]).optional(),
|
|
434
424
|
/**
|
|
435
425
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
436
426
|
*/
|
|
437
|
-
parallelToolCalls:
|
|
427
|
+
parallelToolCalls: import_v43.z.boolean().optional(),
|
|
438
428
|
/**
|
|
439
429
|
* A unique identifier representing your end-user, which can help OpenAI to
|
|
440
430
|
* monitor and detect abuse.
|
|
441
431
|
*/
|
|
442
|
-
user:
|
|
432
|
+
user: import_v43.z.string().optional(),
|
|
443
433
|
/**
|
|
444
434
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
445
435
|
*/
|
|
446
|
-
reasoningEffort:
|
|
436
|
+
reasoningEffort: import_v43.z.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
447
437
|
/**
|
|
448
438
|
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
449
439
|
*/
|
|
450
|
-
maxCompletionTokens:
|
|
440
|
+
maxCompletionTokens: import_v43.z.number().optional(),
|
|
451
441
|
/**
|
|
452
442
|
* Whether to enable persistence in responses API.
|
|
453
443
|
*/
|
|
454
|
-
store:
|
|
444
|
+
store: import_v43.z.boolean().optional(),
|
|
455
445
|
/**
|
|
456
446
|
* Metadata to associate with the request.
|
|
457
447
|
*/
|
|
458
|
-
metadata:
|
|
448
|
+
metadata: import_v43.z.record(import_v43.z.string().max(64), import_v43.z.string().max(512)).optional(),
|
|
459
449
|
/**
|
|
460
450
|
* Parameters for prediction mode.
|
|
461
451
|
*/
|
|
462
|
-
prediction:
|
|
452
|
+
prediction: import_v43.z.record(import_v43.z.string(), import_v43.z.any()).optional(),
|
|
463
453
|
/**
|
|
464
454
|
* Whether to use structured outputs.
|
|
465
455
|
*
|
|
466
456
|
* @default true
|
|
467
457
|
*/
|
|
468
|
-
structuredOutputs:
|
|
458
|
+
structuredOutputs: import_v43.z.boolean().optional(),
|
|
469
459
|
/**
|
|
470
460
|
* Service tier for the request.
|
|
471
461
|
* - 'auto': Default service tier. The request will be processed with the service tier configured in the
|
|
@@ -476,23 +466,23 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
|
|
|
476
466
|
*
|
|
477
467
|
* @default 'auto'
|
|
478
468
|
*/
|
|
479
|
-
serviceTier:
|
|
469
|
+
serviceTier: import_v43.z.enum(["auto", "flex", "priority", "default"]).optional(),
|
|
480
470
|
/**
|
|
481
471
|
* Whether to use strict JSON schema validation.
|
|
482
472
|
*
|
|
483
473
|
* @default false
|
|
484
474
|
*/
|
|
485
|
-
strictJsonSchema:
|
|
475
|
+
strictJsonSchema: import_v43.z.boolean().optional(),
|
|
486
476
|
/**
|
|
487
477
|
* Controls the verbosity of the model's responses.
|
|
488
478
|
* Lower values will result in more concise responses, while higher values will result in more verbose responses.
|
|
489
479
|
*/
|
|
490
|
-
textVerbosity:
|
|
480
|
+
textVerbosity: import_v43.z.enum(["low", "medium", "high"]).optional(),
|
|
491
481
|
/**
|
|
492
482
|
* A cache key for prompt caching. Allows manual control over prompt caching behavior.
|
|
493
483
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
494
484
|
*/
|
|
495
|
-
promptCacheKey:
|
|
485
|
+
promptCacheKey: import_v43.z.string().optional(),
|
|
496
486
|
/**
|
|
497
487
|
* A stable identifier used to help detect users of your application
|
|
498
488
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -500,7 +490,7 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
|
|
|
500
490
|
* username or email address, in order to avoid sending us any identifying
|
|
501
491
|
* information.
|
|
502
492
|
*/
|
|
503
|
-
safetyIdentifier:
|
|
493
|
+
safetyIdentifier: import_v43.z.string().optional()
|
|
504
494
|
})
|
|
505
495
|
)
|
|
506
496
|
);
|
|
@@ -1217,56 +1207,56 @@ function mapOpenAIFinishReason2(finishReason) {
|
|
|
1217
1207
|
}
|
|
1218
1208
|
|
|
1219
1209
|
// src/completion/openai-completion-api.ts
|
|
1220
|
-
var
|
|
1210
|
+
var import_v44 = require("zod/v4");
|
|
1221
1211
|
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
1222
1212
|
var openaiCompletionResponseSchema = (0, import_provider_utils6.lazyValidator)(
|
|
1223
1213
|
() => (0, import_provider_utils6.zodSchema)(
|
|
1224
|
-
|
|
1225
|
-
id:
|
|
1226
|
-
created:
|
|
1227
|
-
model:
|
|
1228
|
-
choices:
|
|
1229
|
-
|
|
1230
|
-
text:
|
|
1231
|
-
finish_reason:
|
|
1232
|
-
logprobs:
|
|
1233
|
-
tokens:
|
|
1234
|
-
token_logprobs:
|
|
1235
|
-
top_logprobs:
|
|
1214
|
+
import_v44.z.object({
|
|
1215
|
+
id: import_v44.z.string().nullish(),
|
|
1216
|
+
created: import_v44.z.number().nullish(),
|
|
1217
|
+
model: import_v44.z.string().nullish(),
|
|
1218
|
+
choices: import_v44.z.array(
|
|
1219
|
+
import_v44.z.object({
|
|
1220
|
+
text: import_v44.z.string(),
|
|
1221
|
+
finish_reason: import_v44.z.string(),
|
|
1222
|
+
logprobs: import_v44.z.object({
|
|
1223
|
+
tokens: import_v44.z.array(import_v44.z.string()),
|
|
1224
|
+
token_logprobs: import_v44.z.array(import_v44.z.number()),
|
|
1225
|
+
top_logprobs: import_v44.z.array(import_v44.z.record(import_v44.z.string(), import_v44.z.number())).nullish()
|
|
1236
1226
|
}).nullish()
|
|
1237
1227
|
})
|
|
1238
1228
|
),
|
|
1239
|
-
usage:
|
|
1240
|
-
prompt_tokens:
|
|
1241
|
-
completion_tokens:
|
|
1242
|
-
total_tokens:
|
|
1229
|
+
usage: import_v44.z.object({
|
|
1230
|
+
prompt_tokens: import_v44.z.number(),
|
|
1231
|
+
completion_tokens: import_v44.z.number(),
|
|
1232
|
+
total_tokens: import_v44.z.number()
|
|
1243
1233
|
}).nullish()
|
|
1244
1234
|
})
|
|
1245
1235
|
)
|
|
1246
1236
|
);
|
|
1247
1237
|
var openaiCompletionChunkSchema = (0, import_provider_utils6.lazyValidator)(
|
|
1248
1238
|
() => (0, import_provider_utils6.zodSchema)(
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
id:
|
|
1252
|
-
created:
|
|
1253
|
-
model:
|
|
1254
|
-
choices:
|
|
1255
|
-
|
|
1256
|
-
text:
|
|
1257
|
-
finish_reason:
|
|
1258
|
-
index:
|
|
1259
|
-
logprobs:
|
|
1260
|
-
tokens:
|
|
1261
|
-
token_logprobs:
|
|
1262
|
-
top_logprobs:
|
|
1239
|
+
import_v44.z.union([
|
|
1240
|
+
import_v44.z.object({
|
|
1241
|
+
id: import_v44.z.string().nullish(),
|
|
1242
|
+
created: import_v44.z.number().nullish(),
|
|
1243
|
+
model: import_v44.z.string().nullish(),
|
|
1244
|
+
choices: import_v44.z.array(
|
|
1245
|
+
import_v44.z.object({
|
|
1246
|
+
text: import_v44.z.string(),
|
|
1247
|
+
finish_reason: import_v44.z.string().nullish(),
|
|
1248
|
+
index: import_v44.z.number(),
|
|
1249
|
+
logprobs: import_v44.z.object({
|
|
1250
|
+
tokens: import_v44.z.array(import_v44.z.string()),
|
|
1251
|
+
token_logprobs: import_v44.z.array(import_v44.z.number()),
|
|
1252
|
+
top_logprobs: import_v44.z.array(import_v44.z.record(import_v44.z.string(), import_v44.z.number())).nullish()
|
|
1263
1253
|
}).nullish()
|
|
1264
1254
|
})
|
|
1265
1255
|
),
|
|
1266
|
-
usage:
|
|
1267
|
-
prompt_tokens:
|
|
1268
|
-
completion_tokens:
|
|
1269
|
-
total_tokens:
|
|
1256
|
+
usage: import_v44.z.object({
|
|
1257
|
+
prompt_tokens: import_v44.z.number(),
|
|
1258
|
+
completion_tokens: import_v44.z.number(),
|
|
1259
|
+
total_tokens: import_v44.z.number()
|
|
1270
1260
|
}).nullish()
|
|
1271
1261
|
}),
|
|
1272
1262
|
openaiErrorDataSchema
|
|
@@ -1276,14 +1266,14 @@ var openaiCompletionChunkSchema = (0, import_provider_utils6.lazyValidator)(
|
|
|
1276
1266
|
|
|
1277
1267
|
// src/completion/openai-completion-options.ts
|
|
1278
1268
|
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
|
1279
|
-
var
|
|
1269
|
+
var import_v45 = require("zod/v4");
|
|
1280
1270
|
var openaiCompletionProviderOptions = (0, import_provider_utils7.lazyValidator)(
|
|
1281
1271
|
() => (0, import_provider_utils7.zodSchema)(
|
|
1282
|
-
|
|
1272
|
+
import_v45.z.object({
|
|
1283
1273
|
/**
|
|
1284
1274
|
Echo back the prompt in addition to the completion.
|
|
1285
1275
|
*/
|
|
1286
|
-
echo:
|
|
1276
|
+
echo: import_v45.z.boolean().optional(),
|
|
1287
1277
|
/**
|
|
1288
1278
|
Modify the likelihood of specified tokens appearing in the completion.
|
|
1289
1279
|
|
|
@@ -1298,16 +1288,16 @@ var openaiCompletionProviderOptions = (0, import_provider_utils7.lazyValidator)(
|
|
|
1298
1288
|
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
1299
1289
|
token from being generated.
|
|
1300
1290
|
*/
|
|
1301
|
-
logitBias:
|
|
1291
|
+
logitBias: import_v45.z.record(import_v45.z.string(), import_v45.z.number()).optional(),
|
|
1302
1292
|
/**
|
|
1303
1293
|
The suffix that comes after a completion of inserted text.
|
|
1304
1294
|
*/
|
|
1305
|
-
suffix:
|
|
1295
|
+
suffix: import_v45.z.string().optional(),
|
|
1306
1296
|
/**
|
|
1307
1297
|
A unique identifier representing your end-user, which can help OpenAI to
|
|
1308
1298
|
monitor and detect abuse. Learn more.
|
|
1309
1299
|
*/
|
|
1310
|
-
user:
|
|
1300
|
+
user: import_v45.z.string().optional(),
|
|
1311
1301
|
/**
|
|
1312
1302
|
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1313
1303
|
the response size and can slow down response times. However, it can
|
|
@@ -1317,7 +1307,7 @@ var openaiCompletionProviderOptions = (0, import_provider_utils7.lazyValidator)(
|
|
|
1317
1307
|
Setting to a number will return the log probabilities of the top n
|
|
1318
1308
|
tokens that were generated.
|
|
1319
1309
|
*/
|
|
1320
|
-
logprobs:
|
|
1310
|
+
logprobs: import_v45.z.union([import_v45.z.boolean(), import_v45.z.number()]).optional()
|
|
1321
1311
|
})
|
|
1322
1312
|
)
|
|
1323
1313
|
);
|
|
@@ -1558,32 +1548,32 @@ var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
|
|
1558
1548
|
|
|
1559
1549
|
// src/embedding/openai-embedding-options.ts
|
|
1560
1550
|
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
|
1561
|
-
var
|
|
1551
|
+
var import_v46 = require("zod/v4");
|
|
1562
1552
|
var openaiEmbeddingProviderOptions = (0, import_provider_utils9.lazyValidator)(
|
|
1563
1553
|
() => (0, import_provider_utils9.zodSchema)(
|
|
1564
|
-
|
|
1554
|
+
import_v46.z.object({
|
|
1565
1555
|
/**
|
|
1566
1556
|
The number of dimensions the resulting output embeddings should have.
|
|
1567
1557
|
Only supported in text-embedding-3 and later models.
|
|
1568
1558
|
*/
|
|
1569
|
-
dimensions:
|
|
1559
|
+
dimensions: import_v46.z.number().optional(),
|
|
1570
1560
|
/**
|
|
1571
1561
|
A unique identifier representing your end-user, which can help OpenAI to
|
|
1572
1562
|
monitor and detect abuse. Learn more.
|
|
1573
1563
|
*/
|
|
1574
|
-
user:
|
|
1564
|
+
user: import_v46.z.string().optional()
|
|
1575
1565
|
})
|
|
1576
1566
|
)
|
|
1577
1567
|
);
|
|
1578
1568
|
|
|
1579
1569
|
// src/embedding/openai-embedding-api.ts
|
|
1580
1570
|
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
|
1581
|
-
var
|
|
1571
|
+
var import_v47 = require("zod/v4");
|
|
1582
1572
|
var openaiTextEmbeddingResponseSchema = (0, import_provider_utils10.lazyValidator)(
|
|
1583
1573
|
() => (0, import_provider_utils10.zodSchema)(
|
|
1584
|
-
|
|
1585
|
-
data:
|
|
1586
|
-
usage:
|
|
1574
|
+
import_v47.z.object({
|
|
1575
|
+
data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
|
|
1576
|
+
usage: import_v47.z.object({ prompt_tokens: import_v47.z.number() }).nullish()
|
|
1587
1577
|
})
|
|
1588
1578
|
)
|
|
1589
1579
|
);
|
|
@@ -1657,14 +1647,14 @@ var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
|
|
1657
1647
|
|
|
1658
1648
|
// src/image/openai-image-api.ts
|
|
1659
1649
|
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
|
1660
|
-
var
|
|
1650
|
+
var import_v48 = require("zod/v4");
|
|
1661
1651
|
var openaiImageResponseSchema = (0, import_provider_utils12.lazyValidator)(
|
|
1662
1652
|
() => (0, import_provider_utils12.zodSchema)(
|
|
1663
|
-
|
|
1664
|
-
data:
|
|
1665
|
-
|
|
1666
|
-
b64_json:
|
|
1667
|
-
revised_prompt:
|
|
1653
|
+
import_v48.z.object({
|
|
1654
|
+
data: import_v48.z.array(
|
|
1655
|
+
import_v48.z.object({
|
|
1656
|
+
b64_json: import_v48.z.string(),
|
|
1657
|
+
revised_prompt: import_v48.z.string().optional()
|
|
1668
1658
|
})
|
|
1669
1659
|
)
|
|
1670
1660
|
})
|
|
@@ -1764,22 +1754,22 @@ var OpenAIImageModel = class {
|
|
|
1764
1754
|
|
|
1765
1755
|
// src/tool/code-interpreter.ts
|
|
1766
1756
|
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
|
1767
|
-
var
|
|
1757
|
+
var import_v49 = require("zod/v4");
|
|
1768
1758
|
var codeInterpreterInputSchema = (0, import_provider_utils14.lazySchema)(
|
|
1769
1759
|
() => (0, import_provider_utils14.zodSchema)(
|
|
1770
|
-
|
|
1771
|
-
code:
|
|
1772
|
-
containerId:
|
|
1760
|
+
import_v49.z.object({
|
|
1761
|
+
code: import_v49.z.string().nullish(),
|
|
1762
|
+
containerId: import_v49.z.string()
|
|
1773
1763
|
})
|
|
1774
1764
|
)
|
|
1775
1765
|
);
|
|
1776
1766
|
var codeInterpreterOutputSchema = (0, import_provider_utils14.lazySchema)(
|
|
1777
1767
|
() => (0, import_provider_utils14.zodSchema)(
|
|
1778
|
-
|
|
1779
|
-
outputs:
|
|
1780
|
-
|
|
1781
|
-
|
|
1782
|
-
|
|
1768
|
+
import_v49.z.object({
|
|
1769
|
+
outputs: import_v49.z.array(
|
|
1770
|
+
import_v49.z.discriminatedUnion("type", [
|
|
1771
|
+
import_v49.z.object({ type: import_v49.z.literal("logs"), logs: import_v49.z.string() }),
|
|
1772
|
+
import_v49.z.object({ type: import_v49.z.literal("image"), url: import_v49.z.string() })
|
|
1783
1773
|
])
|
|
1784
1774
|
).nullish()
|
|
1785
1775
|
})
|
|
@@ -1787,11 +1777,11 @@ var codeInterpreterOutputSchema = (0, import_provider_utils14.lazySchema)(
|
|
|
1787
1777
|
);
|
|
1788
1778
|
var codeInterpreterArgsSchema = (0, import_provider_utils14.lazySchema)(
|
|
1789
1779
|
() => (0, import_provider_utils14.zodSchema)(
|
|
1790
|
-
|
|
1791
|
-
container:
|
|
1792
|
-
|
|
1793
|
-
|
|
1794
|
-
fileIds:
|
|
1780
|
+
import_v49.z.object({
|
|
1781
|
+
container: import_v49.z.union([
|
|
1782
|
+
import_v49.z.string(),
|
|
1783
|
+
import_v49.z.object({
|
|
1784
|
+
fileIds: import_v49.z.array(import_v49.z.string()).optional()
|
|
1795
1785
|
})
|
|
1796
1786
|
]).optional()
|
|
1797
1787
|
})
|
|
@@ -1809,42 +1799,42 @@ var codeInterpreter = (args = {}) => {
|
|
|
1809
1799
|
|
|
1810
1800
|
// src/tool/file-search.ts
|
|
1811
1801
|
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
|
1812
|
-
var
|
|
1813
|
-
var comparisonFilterSchema =
|
|
1814
|
-
key:
|
|
1815
|
-
type:
|
|
1816
|
-
value:
|
|
1802
|
+
var import_v410 = require("zod/v4");
|
|
1803
|
+
var comparisonFilterSchema = import_v410.z.object({
|
|
1804
|
+
key: import_v410.z.string(),
|
|
1805
|
+
type: import_v410.z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
|
|
1806
|
+
value: import_v410.z.union([import_v410.z.string(), import_v410.z.number(), import_v410.z.boolean()])
|
|
1817
1807
|
});
|
|
1818
|
-
var compoundFilterSchema =
|
|
1819
|
-
type:
|
|
1820
|
-
filters:
|
|
1821
|
-
|
|
1808
|
+
var compoundFilterSchema = import_v410.z.object({
|
|
1809
|
+
type: import_v410.z.enum(["and", "or"]),
|
|
1810
|
+
filters: import_v410.z.array(
|
|
1811
|
+
import_v410.z.union([comparisonFilterSchema, import_v410.z.lazy(() => compoundFilterSchema)])
|
|
1822
1812
|
)
|
|
1823
1813
|
});
|
|
1824
1814
|
var fileSearchArgsSchema = (0, import_provider_utils15.lazySchema)(
|
|
1825
1815
|
() => (0, import_provider_utils15.zodSchema)(
|
|
1826
|
-
|
|
1827
|
-
vectorStoreIds:
|
|
1828
|
-
maxNumResults:
|
|
1829
|
-
ranking:
|
|
1830
|
-
ranker:
|
|
1831
|
-
scoreThreshold:
|
|
1816
|
+
import_v410.z.object({
|
|
1817
|
+
vectorStoreIds: import_v410.z.array(import_v410.z.string()),
|
|
1818
|
+
maxNumResults: import_v410.z.number().optional(),
|
|
1819
|
+
ranking: import_v410.z.object({
|
|
1820
|
+
ranker: import_v410.z.string().optional(),
|
|
1821
|
+
scoreThreshold: import_v410.z.number().optional()
|
|
1832
1822
|
}).optional(),
|
|
1833
|
-
filters:
|
|
1823
|
+
filters: import_v410.z.union([comparisonFilterSchema, compoundFilterSchema]).optional()
|
|
1834
1824
|
})
|
|
1835
1825
|
)
|
|
1836
1826
|
);
|
|
1837
1827
|
var fileSearchOutputSchema = (0, import_provider_utils15.lazySchema)(
|
|
1838
1828
|
() => (0, import_provider_utils15.zodSchema)(
|
|
1839
|
-
|
|
1840
|
-
queries:
|
|
1841
|
-
results:
|
|
1842
|
-
|
|
1843
|
-
attributes:
|
|
1844
|
-
fileId:
|
|
1845
|
-
filename:
|
|
1846
|
-
score:
|
|
1847
|
-
text:
|
|
1829
|
+
import_v410.z.object({
|
|
1830
|
+
queries: import_v410.z.array(import_v410.z.string()),
|
|
1831
|
+
results: import_v410.z.array(
|
|
1832
|
+
import_v410.z.object({
|
|
1833
|
+
attributes: import_v410.z.record(import_v410.z.string(), import_v410.z.unknown()),
|
|
1834
|
+
fileId: import_v410.z.string(),
|
|
1835
|
+
filename: import_v410.z.string(),
|
|
1836
|
+
score: import_v410.z.number(),
|
|
1837
|
+
text: import_v410.z.string()
|
|
1848
1838
|
})
|
|
1849
1839
|
).nullable()
|
|
1850
1840
|
})
|
|
@@ -1853,35 +1843,35 @@ var fileSearchOutputSchema = (0, import_provider_utils15.lazySchema)(
|
|
|
1853
1843
|
var fileSearch = (0, import_provider_utils15.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
1854
1844
|
id: "openai.file_search",
|
|
1855
1845
|
name: "file_search",
|
|
1856
|
-
inputSchema:
|
|
1846
|
+
inputSchema: import_v410.z.object({}),
|
|
1857
1847
|
outputSchema: fileSearchOutputSchema
|
|
1858
1848
|
});
|
|
1859
1849
|
|
|
1860
1850
|
// src/tool/image-generation.ts
|
|
1861
1851
|
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
|
1862
|
-
var
|
|
1852
|
+
var import_v411 = require("zod/v4");
|
|
1863
1853
|
var imageGenerationArgsSchema = (0, import_provider_utils16.lazySchema)(
|
|
1864
1854
|
() => (0, import_provider_utils16.zodSchema)(
|
|
1865
|
-
|
|
1866
|
-
background:
|
|
1867
|
-
inputFidelity:
|
|
1868
|
-
inputImageMask:
|
|
1869
|
-
fileId:
|
|
1870
|
-
imageUrl:
|
|
1855
|
+
import_v411.z.object({
|
|
1856
|
+
background: import_v411.z.enum(["auto", "opaque", "transparent"]).optional(),
|
|
1857
|
+
inputFidelity: import_v411.z.enum(["low", "high"]).optional(),
|
|
1858
|
+
inputImageMask: import_v411.z.object({
|
|
1859
|
+
fileId: import_v411.z.string().optional(),
|
|
1860
|
+
imageUrl: import_v411.z.string().optional()
|
|
1871
1861
|
}).optional(),
|
|
1872
|
-
model:
|
|
1873
|
-
moderation:
|
|
1874
|
-
outputCompression:
|
|
1875
|
-
outputFormat:
|
|
1876
|
-
partialImages:
|
|
1877
|
-
quality:
|
|
1878
|
-
size:
|
|
1862
|
+
model: import_v411.z.string().optional(),
|
|
1863
|
+
moderation: import_v411.z.enum(["auto"]).optional(),
|
|
1864
|
+
outputCompression: import_v411.z.number().int().min(0).max(100).optional(),
|
|
1865
|
+
outputFormat: import_v411.z.enum(["png", "jpeg", "webp"]).optional(),
|
|
1866
|
+
partialImages: import_v411.z.number().int().min(0).max(3).optional(),
|
|
1867
|
+
quality: import_v411.z.enum(["auto", "low", "medium", "high"]).optional(),
|
|
1868
|
+
size: import_v411.z.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
|
|
1879
1869
|
}).strict()
|
|
1880
1870
|
)
|
|
1881
1871
|
);
|
|
1882
|
-
var imageGenerationInputSchema = (0, import_provider_utils16.lazySchema)(() => (0, import_provider_utils16.zodSchema)(
|
|
1872
|
+
var imageGenerationInputSchema = (0, import_provider_utils16.lazySchema)(() => (0, import_provider_utils16.zodSchema)(import_v411.z.object({})));
|
|
1883
1873
|
var imageGenerationOutputSchema = (0, import_provider_utils16.lazySchema)(
|
|
1884
|
-
() => (0, import_provider_utils16.zodSchema)(
|
|
1874
|
+
() => (0, import_provider_utils16.zodSchema)(import_v411.z.object({ result: import_v411.z.string() }))
|
|
1885
1875
|
);
|
|
1886
1876
|
var imageGenerationToolFactory = (0, import_provider_utils16.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
1887
1877
|
id: "openai.image_generation",
|
|
@@ -1895,23 +1885,23 @@ var imageGeneration = (args = {}) => {
|
|
|
1895
1885
|
|
|
1896
1886
|
// src/tool/local-shell.ts
|
|
1897
1887
|
var import_provider_utils17 = require("@ai-sdk/provider-utils");
|
|
1898
|
-
var
|
|
1888
|
+
var import_v412 = require("zod/v4");
|
|
1899
1889
|
var localShellInputSchema = (0, import_provider_utils17.lazySchema)(
|
|
1900
1890
|
() => (0, import_provider_utils17.zodSchema)(
|
|
1901
|
-
|
|
1902
|
-
action:
|
|
1903
|
-
type:
|
|
1904
|
-
command:
|
|
1905
|
-
timeoutMs:
|
|
1906
|
-
user:
|
|
1907
|
-
workingDirectory:
|
|
1908
|
-
env:
|
|
1891
|
+
import_v412.z.object({
|
|
1892
|
+
action: import_v412.z.object({
|
|
1893
|
+
type: import_v412.z.literal("exec"),
|
|
1894
|
+
command: import_v412.z.array(import_v412.z.string()),
|
|
1895
|
+
timeoutMs: import_v412.z.number().optional(),
|
|
1896
|
+
user: import_v412.z.string().optional(),
|
|
1897
|
+
workingDirectory: import_v412.z.string().optional(),
|
|
1898
|
+
env: import_v412.z.record(import_v412.z.string(), import_v412.z.string()).optional()
|
|
1909
1899
|
})
|
|
1910
1900
|
})
|
|
1911
1901
|
)
|
|
1912
1902
|
);
|
|
1913
1903
|
var localShellOutputSchema = (0, import_provider_utils17.lazySchema)(
|
|
1914
|
-
() => (0, import_provider_utils17.zodSchema)(
|
|
1904
|
+
() => (0, import_provider_utils17.zodSchema)(import_v412.z.object({ output: import_v412.z.string() }))
|
|
1915
1905
|
);
|
|
1916
1906
|
var localShell = (0, import_provider_utils17.createProviderDefinedToolFactoryWithOutputSchema)({
|
|
1917
1907
|
id: "openai.local_shell",
|
|
@@ -1922,40 +1912,40 @@ var localShell = (0, import_provider_utils17.createProviderDefinedToolFactoryWit
|
|
|
1922
1912
|
|
|
1923
1913
|
// src/tool/web-search.ts
|
|
1924
1914
|
var import_provider_utils18 = require("@ai-sdk/provider-utils");
|
|
1925
|
-
var
|
|
1915
|
+
var import_v413 = require("zod/v4");
|
|
1926
1916
|
var webSearchArgsSchema = (0, import_provider_utils18.lazySchema)(
|
|
1927
1917
|
() => (0, import_provider_utils18.zodSchema)(
|
|
1928
|
-
|
|
1929
|
-
filters:
|
|
1930
|
-
allowedDomains:
|
|
1918
|
+
import_v413.z.object({
|
|
1919
|
+
filters: import_v413.z.object({
|
|
1920
|
+
allowedDomains: import_v413.z.array(import_v413.z.string()).optional()
|
|
1931
1921
|
}).optional(),
|
|
1932
|
-
searchContextSize:
|
|
1933
|
-
userLocation:
|
|
1934
|
-
type:
|
|
1935
|
-
country:
|
|
1936
|
-
city:
|
|
1937
|
-
region:
|
|
1938
|
-
timezone:
|
|
1922
|
+
searchContextSize: import_v413.z.enum(["low", "medium", "high"]).optional(),
|
|
1923
|
+
userLocation: import_v413.z.object({
|
|
1924
|
+
type: import_v413.z.literal("approximate"),
|
|
1925
|
+
country: import_v413.z.string().optional(),
|
|
1926
|
+
city: import_v413.z.string().optional(),
|
|
1927
|
+
region: import_v413.z.string().optional(),
|
|
1928
|
+
timezone: import_v413.z.string().optional()
|
|
1939
1929
|
}).optional()
|
|
1940
1930
|
})
|
|
1941
1931
|
)
|
|
1942
1932
|
);
|
|
1943
1933
|
var webSearchInputSchema = (0, import_provider_utils18.lazySchema)(
|
|
1944
1934
|
() => (0, import_provider_utils18.zodSchema)(
|
|
1945
|
-
|
|
1946
|
-
action:
|
|
1947
|
-
|
|
1948
|
-
type:
|
|
1949
|
-
query:
|
|
1935
|
+
import_v413.z.object({
|
|
1936
|
+
action: import_v413.z.discriminatedUnion("type", [
|
|
1937
|
+
import_v413.z.object({
|
|
1938
|
+
type: import_v413.z.literal("search"),
|
|
1939
|
+
query: import_v413.z.string().nullish()
|
|
1950
1940
|
}),
|
|
1951
|
-
|
|
1952
|
-
type:
|
|
1953
|
-
url:
|
|
1941
|
+
import_v413.z.object({
|
|
1942
|
+
type: import_v413.z.literal("open_page"),
|
|
1943
|
+
url: import_v413.z.string()
|
|
1954
1944
|
}),
|
|
1955
|
-
|
|
1956
|
-
type:
|
|
1957
|
-
url:
|
|
1958
|
-
pattern:
|
|
1945
|
+
import_v413.z.object({
|
|
1946
|
+
type: import_v413.z.literal("find"),
|
|
1947
|
+
url: import_v413.z.string(),
|
|
1948
|
+
pattern: import_v413.z.string()
|
|
1959
1949
|
})
|
|
1960
1950
|
]).nullish()
|
|
1961
1951
|
})
|
|
@@ -1972,61 +1962,61 @@ var webSearch = (args = {}) => {
|
|
|
1972
1962
|
|
|
1973
1963
|
// src/tool/web-search-preview.ts
|
|
1974
1964
|
var import_provider_utils19 = require("@ai-sdk/provider-utils");
|
|
1975
|
-
var
|
|
1965
|
+
var import_v414 = require("zod/v4");
|
|
1976
1966
|
var webSearchPreviewArgsSchema = (0, import_provider_utils19.lazySchema)(
|
|
1977
1967
|
() => (0, import_provider_utils19.zodSchema)(
|
|
1978
|
-
|
|
1968
|
+
import_v414.z.object({
|
|
1979
1969
|
/**
|
|
1980
1970
|
* Search context size to use for the web search.
|
|
1981
1971
|
* - high: Most comprehensive context, highest cost, slower response
|
|
1982
1972
|
* - medium: Balanced context, cost, and latency (default)
|
|
1983
1973
|
* - low: Least context, lowest cost, fastest response
|
|
1984
1974
|
*/
|
|
1985
|
-
searchContextSize:
|
|
1975
|
+
searchContextSize: import_v414.z.enum(["low", "medium", "high"]).optional(),
|
|
1986
1976
|
/**
|
|
1987
1977
|
* User location information to provide geographically relevant search results.
|
|
1988
1978
|
*/
|
|
1989
|
-
userLocation:
|
|
1979
|
+
userLocation: import_v414.z.object({
|
|
1990
1980
|
/**
|
|
1991
1981
|
* Type of location (always 'approximate')
|
|
1992
1982
|
*/
|
|
1993
|
-
type:
|
|
1983
|
+
type: import_v414.z.literal("approximate"),
|
|
1994
1984
|
/**
|
|
1995
1985
|
* Two-letter ISO country code (e.g., 'US', 'GB')
|
|
1996
1986
|
*/
|
|
1997
|
-
country:
|
|
1987
|
+
country: import_v414.z.string().optional(),
|
|
1998
1988
|
/**
|
|
1999
1989
|
* City name (free text, e.g., 'Minneapolis')
|
|
2000
1990
|
*/
|
|
2001
|
-
city:
|
|
1991
|
+
city: import_v414.z.string().optional(),
|
|
2002
1992
|
/**
|
|
2003
1993
|
* Region name (free text, e.g., 'Minnesota')
|
|
2004
1994
|
*/
|
|
2005
|
-
region:
|
|
1995
|
+
region: import_v414.z.string().optional(),
|
|
2006
1996
|
/**
|
|
2007
1997
|
* IANA timezone (e.g., 'America/Chicago')
|
|
2008
1998
|
*/
|
|
2009
|
-
timezone:
|
|
1999
|
+
timezone: import_v414.z.string().optional()
|
|
2010
2000
|
}).optional()
|
|
2011
2001
|
})
|
|
2012
2002
|
)
|
|
2013
2003
|
);
|
|
2014
2004
|
var webSearchPreviewInputSchema = (0, import_provider_utils19.lazySchema)(
|
|
2015
2005
|
() => (0, import_provider_utils19.zodSchema)(
|
|
2016
|
-
|
|
2017
|
-
action:
|
|
2018
|
-
|
|
2019
|
-
type:
|
|
2020
|
-
query:
|
|
2006
|
+
import_v414.z.object({
|
|
2007
|
+
action: import_v414.z.discriminatedUnion("type", [
|
|
2008
|
+
import_v414.z.object({
|
|
2009
|
+
type: import_v414.z.literal("search"),
|
|
2010
|
+
query: import_v414.z.string().nullish()
|
|
2021
2011
|
}),
|
|
2022
|
-
|
|
2023
|
-
type:
|
|
2024
|
-
url:
|
|
2012
|
+
import_v414.z.object({
|
|
2013
|
+
type: import_v414.z.literal("open_page"),
|
|
2014
|
+
url: import_v414.z.string()
|
|
2025
2015
|
}),
|
|
2026
|
-
|
|
2027
|
-
type:
|
|
2028
|
-
url:
|
|
2029
|
-
pattern:
|
|
2016
|
+
import_v414.z.object({
|
|
2017
|
+
type: import_v414.z.literal("find"),
|
|
2018
|
+
url: import_v414.z.string(),
|
|
2019
|
+
pattern: import_v414.z.string()
|
|
2030
2020
|
})
|
|
2031
2021
|
]).nullish()
|
|
2032
2022
|
})
|
|
@@ -2118,7 +2108,7 @@ var import_provider_utils24 = require("@ai-sdk/provider-utils");
|
|
|
2118
2108
|
// src/responses/convert-to-openai-responses-input.ts
|
|
2119
2109
|
var import_provider6 = require("@ai-sdk/provider");
|
|
2120
2110
|
var import_provider_utils20 = require("@ai-sdk/provider-utils");
|
|
2121
|
-
var
|
|
2111
|
+
var import_v415 = require("zod/v4");
|
|
2122
2112
|
function isFileId(data, prefixes) {
|
|
2123
2113
|
if (!prefixes) return false;
|
|
2124
2114
|
return prefixes.some((prefix) => data.startsWith(prefix));
|
|
@@ -2340,11 +2330,29 @@ async function convertToOpenAIResponsesInput({
|
|
|
2340
2330
|
case "error-text":
|
|
2341
2331
|
contentValue = output.value;
|
|
2342
2332
|
break;
|
|
2343
|
-
case "content":
|
|
2344
2333
|
case "json":
|
|
2345
2334
|
case "error-json":
|
|
2346
2335
|
contentValue = JSON.stringify(output.value);
|
|
2347
2336
|
break;
|
|
2337
|
+
case "content":
|
|
2338
|
+
contentValue = output.value.map((item) => {
|
|
2339
|
+
switch (item.type) {
|
|
2340
|
+
case "text": {
|
|
2341
|
+
return { type: "input_text", text: item.text };
|
|
2342
|
+
}
|
|
2343
|
+
case "media": {
|
|
2344
|
+
return item.mediaType.startsWith("image/") ? {
|
|
2345
|
+
type: "input_image",
|
|
2346
|
+
image_url: `data:${item.mediaType};base64,${item.data}`
|
|
2347
|
+
} : {
|
|
2348
|
+
type: "input_file",
|
|
2349
|
+
filename: "data",
|
|
2350
|
+
file_data: `data:${item.mediaType};base64,${item.data}`
|
|
2351
|
+
};
|
|
2352
|
+
}
|
|
2353
|
+
}
|
|
2354
|
+
});
|
|
2355
|
+
break;
|
|
2348
2356
|
}
|
|
2349
2357
|
input.push({
|
|
2350
2358
|
type: "function_call_output",
|
|
@@ -2362,9 +2370,9 @@ async function convertToOpenAIResponsesInput({
|
|
|
2362
2370
|
}
|
|
2363
2371
|
return { input, warnings };
|
|
2364
2372
|
}
|
|
2365
|
-
var openaiResponsesReasoningProviderOptionsSchema =
|
|
2366
|
-
itemId:
|
|
2367
|
-
reasoningEncryptedContent:
|
|
2373
|
+
var openaiResponsesReasoningProviderOptionsSchema = import_v415.z.object({
|
|
2374
|
+
itemId: import_v415.z.string().nullish(),
|
|
2375
|
+
reasoningEncryptedContent: import_v415.z.string().nullish()
|
|
2368
2376
|
});
|
|
2369
2377
|
|
|
2370
2378
|
// src/responses/map-openai-responses-finish-reason.ts
|
|
@@ -2387,260 +2395,260 @@ function mapOpenAIResponseFinishReason({
|
|
|
2387
2395
|
|
|
2388
2396
|
// src/responses/openai-responses-api.ts
|
|
2389
2397
|
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
|
2390
|
-
var
|
|
2398
|
+
var import_v416 = require("zod/v4");
|
|
2391
2399
|
var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
|
|
2392
2400
|
() => (0, import_provider_utils21.zodSchema)(
|
|
2393
|
-
|
|
2394
|
-
|
|
2395
|
-
type:
|
|
2396
|
-
item_id:
|
|
2397
|
-
delta:
|
|
2398
|
-
logprobs:
|
|
2399
|
-
|
|
2400
|
-
token:
|
|
2401
|
-
logprob:
|
|
2402
|
-
top_logprobs:
|
|
2403
|
-
|
|
2404
|
-
token:
|
|
2405
|
-
logprob:
|
|
2401
|
+
import_v416.z.union([
|
|
2402
|
+
import_v416.z.object({
|
|
2403
|
+
type: import_v416.z.literal("response.output_text.delta"),
|
|
2404
|
+
item_id: import_v416.z.string(),
|
|
2405
|
+
delta: import_v416.z.string(),
|
|
2406
|
+
logprobs: import_v416.z.array(
|
|
2407
|
+
import_v416.z.object({
|
|
2408
|
+
token: import_v416.z.string(),
|
|
2409
|
+
logprob: import_v416.z.number(),
|
|
2410
|
+
top_logprobs: import_v416.z.array(
|
|
2411
|
+
import_v416.z.object({
|
|
2412
|
+
token: import_v416.z.string(),
|
|
2413
|
+
logprob: import_v416.z.number()
|
|
2406
2414
|
})
|
|
2407
2415
|
)
|
|
2408
2416
|
})
|
|
2409
2417
|
).nullish()
|
|
2410
2418
|
}),
|
|
2411
|
-
|
|
2412
|
-
type:
|
|
2413
|
-
response:
|
|
2414
|
-
incomplete_details:
|
|
2415
|
-
usage:
|
|
2416
|
-
input_tokens:
|
|
2417
|
-
input_tokens_details:
|
|
2418
|
-
output_tokens:
|
|
2419
|
-
output_tokens_details:
|
|
2419
|
+
import_v416.z.object({
|
|
2420
|
+
type: import_v416.z.enum(["response.completed", "response.incomplete"]),
|
|
2421
|
+
response: import_v416.z.object({
|
|
2422
|
+
incomplete_details: import_v416.z.object({ reason: import_v416.z.string() }).nullish(),
|
|
2423
|
+
usage: import_v416.z.object({
|
|
2424
|
+
input_tokens: import_v416.z.number(),
|
|
2425
|
+
input_tokens_details: import_v416.z.object({ cached_tokens: import_v416.z.number().nullish() }).nullish(),
|
|
2426
|
+
output_tokens: import_v416.z.number(),
|
|
2427
|
+
output_tokens_details: import_v416.z.object({ reasoning_tokens: import_v416.z.number().nullish() }).nullish()
|
|
2420
2428
|
}),
|
|
2421
|
-
service_tier:
|
|
2429
|
+
service_tier: import_v416.z.string().nullish()
|
|
2422
2430
|
})
|
|
2423
2431
|
}),
|
|
2424
|
-
|
|
2425
|
-
type:
|
|
2426
|
-
response:
|
|
2427
|
-
id:
|
|
2428
|
-
created_at:
|
|
2429
|
-
model:
|
|
2430
|
-
service_tier:
|
|
2432
|
+
import_v416.z.object({
|
|
2433
|
+
type: import_v416.z.literal("response.created"),
|
|
2434
|
+
response: import_v416.z.object({
|
|
2435
|
+
id: import_v416.z.string(),
|
|
2436
|
+
created_at: import_v416.z.number(),
|
|
2437
|
+
model: import_v416.z.string(),
|
|
2438
|
+
service_tier: import_v416.z.string().nullish()
|
|
2431
2439
|
})
|
|
2432
2440
|
}),
|
|
2433
|
-
|
|
2434
|
-
type:
|
|
2435
|
-
output_index:
|
|
2436
|
-
item:
|
|
2437
|
-
|
|
2438
|
-
type:
|
|
2439
|
-
id:
|
|
2441
|
+
import_v416.z.object({
|
|
2442
|
+
type: import_v416.z.literal("response.output_item.added"),
|
|
2443
|
+
output_index: import_v416.z.number(),
|
|
2444
|
+
item: import_v416.z.discriminatedUnion("type", [
|
|
2445
|
+
import_v416.z.object({
|
|
2446
|
+
type: import_v416.z.literal("message"),
|
|
2447
|
+
id: import_v416.z.string()
|
|
2440
2448
|
}),
|
|
2441
|
-
|
|
2442
|
-
type:
|
|
2443
|
-
id:
|
|
2444
|
-
encrypted_content:
|
|
2449
|
+
import_v416.z.object({
|
|
2450
|
+
type: import_v416.z.literal("reasoning"),
|
|
2451
|
+
id: import_v416.z.string(),
|
|
2452
|
+
encrypted_content: import_v416.z.string().nullish()
|
|
2445
2453
|
}),
|
|
2446
|
-
|
|
2447
|
-
type:
|
|
2448
|
-
id:
|
|
2449
|
-
call_id:
|
|
2450
|
-
name:
|
|
2451
|
-
arguments:
|
|
2454
|
+
import_v416.z.object({
|
|
2455
|
+
type: import_v416.z.literal("function_call"),
|
|
2456
|
+
id: import_v416.z.string(),
|
|
2457
|
+
call_id: import_v416.z.string(),
|
|
2458
|
+
name: import_v416.z.string(),
|
|
2459
|
+
arguments: import_v416.z.string()
|
|
2452
2460
|
}),
|
|
2453
|
-
|
|
2454
|
-
type:
|
|
2455
|
-
id:
|
|
2456
|
-
status:
|
|
2457
|
-
action:
|
|
2458
|
-
type:
|
|
2459
|
-
query:
|
|
2461
|
+
import_v416.z.object({
|
|
2462
|
+
type: import_v416.z.literal("web_search_call"),
|
|
2463
|
+
id: import_v416.z.string(),
|
|
2464
|
+
status: import_v416.z.string(),
|
|
2465
|
+
action: import_v416.z.object({
|
|
2466
|
+
type: import_v416.z.literal("search"),
|
|
2467
|
+
query: import_v416.z.string().optional()
|
|
2460
2468
|
}).nullish()
|
|
2461
2469
|
}),
|
|
2462
|
-
|
|
2463
|
-
type:
|
|
2464
|
-
id:
|
|
2465
|
-
status:
|
|
2470
|
+
import_v416.z.object({
|
|
2471
|
+
type: import_v416.z.literal("computer_call"),
|
|
2472
|
+
id: import_v416.z.string(),
|
|
2473
|
+
status: import_v416.z.string()
|
|
2466
2474
|
}),
|
|
2467
|
-
|
|
2468
|
-
type:
|
|
2469
|
-
id:
|
|
2475
|
+
import_v416.z.object({
|
|
2476
|
+
type: import_v416.z.literal("file_search_call"),
|
|
2477
|
+
id: import_v416.z.string()
|
|
2470
2478
|
}),
|
|
2471
|
-
|
|
2472
|
-
type:
|
|
2473
|
-
id:
|
|
2479
|
+
import_v416.z.object({
|
|
2480
|
+
type: import_v416.z.literal("image_generation_call"),
|
|
2481
|
+
id: import_v416.z.string()
|
|
2474
2482
|
}),
|
|
2475
|
-
|
|
2476
|
-
type:
|
|
2477
|
-
id:
|
|
2478
|
-
container_id:
|
|
2479
|
-
code:
|
|
2480
|
-
outputs:
|
|
2481
|
-
|
|
2482
|
-
|
|
2483
|
-
|
|
2483
|
+
import_v416.z.object({
|
|
2484
|
+
type: import_v416.z.literal("code_interpreter_call"),
|
|
2485
|
+
id: import_v416.z.string(),
|
|
2486
|
+
container_id: import_v416.z.string(),
|
|
2487
|
+
code: import_v416.z.string().nullable(),
|
|
2488
|
+
outputs: import_v416.z.array(
|
|
2489
|
+
import_v416.z.discriminatedUnion("type", [
|
|
2490
|
+
import_v416.z.object({ type: import_v416.z.literal("logs"), logs: import_v416.z.string() }),
|
|
2491
|
+
import_v416.z.object({ type: import_v416.z.literal("image"), url: import_v416.z.string() })
|
|
2484
2492
|
])
|
|
2485
2493
|
).nullable(),
|
|
2486
|
-
status:
|
|
2494
|
+
status: import_v416.z.string()
|
|
2487
2495
|
})
|
|
2488
2496
|
])
|
|
2489
2497
|
}),
|
|
2490
|
-
|
|
2491
|
-
type:
|
|
2492
|
-
output_index:
|
|
2493
|
-
item:
|
|
2494
|
-
|
|
2495
|
-
type:
|
|
2496
|
-
id:
|
|
2498
|
+
import_v416.z.object({
|
|
2499
|
+
type: import_v416.z.literal("response.output_item.done"),
|
|
2500
|
+
output_index: import_v416.z.number(),
|
|
2501
|
+
item: import_v416.z.discriminatedUnion("type", [
|
|
2502
|
+
import_v416.z.object({
|
|
2503
|
+
type: import_v416.z.literal("message"),
|
|
2504
|
+
id: import_v416.z.string()
|
|
2497
2505
|
}),
|
|
2498
|
-
|
|
2499
|
-
type:
|
|
2500
|
-
id:
|
|
2501
|
-
encrypted_content:
|
|
2506
|
+
import_v416.z.object({
|
|
2507
|
+
type: import_v416.z.literal("reasoning"),
|
|
2508
|
+
id: import_v416.z.string(),
|
|
2509
|
+
encrypted_content: import_v416.z.string().nullish()
|
|
2502
2510
|
}),
|
|
2503
|
-
|
|
2504
|
-
type:
|
|
2505
|
-
id:
|
|
2506
|
-
call_id:
|
|
2507
|
-
name:
|
|
2508
|
-
arguments:
|
|
2509
|
-
status:
|
|
2511
|
+
import_v416.z.object({
|
|
2512
|
+
type: import_v416.z.literal("function_call"),
|
|
2513
|
+
id: import_v416.z.string(),
|
|
2514
|
+
call_id: import_v416.z.string(),
|
|
2515
|
+
name: import_v416.z.string(),
|
|
2516
|
+
arguments: import_v416.z.string(),
|
|
2517
|
+
status: import_v416.z.literal("completed")
|
|
2510
2518
|
}),
|
|
2511
|
-
|
|
2512
|
-
type:
|
|
2513
|
-
id:
|
|
2514
|
-
code:
|
|
2515
|
-
container_id:
|
|
2516
|
-
outputs:
|
|
2517
|
-
|
|
2518
|
-
|
|
2519
|
-
|
|
2519
|
+
import_v416.z.object({
|
|
2520
|
+
type: import_v416.z.literal("code_interpreter_call"),
|
|
2521
|
+
id: import_v416.z.string(),
|
|
2522
|
+
code: import_v416.z.string().nullable(),
|
|
2523
|
+
container_id: import_v416.z.string(),
|
|
2524
|
+
outputs: import_v416.z.array(
|
|
2525
|
+
import_v416.z.discriminatedUnion("type", [
|
|
2526
|
+
import_v416.z.object({ type: import_v416.z.literal("logs"), logs: import_v416.z.string() }),
|
|
2527
|
+
import_v416.z.object({ type: import_v416.z.literal("image"), url: import_v416.z.string() })
|
|
2520
2528
|
])
|
|
2521
2529
|
).nullable()
|
|
2522
2530
|
}),
|
|
2523
|
-
|
|
2524
|
-
type:
|
|
2525
|
-
id:
|
|
2526
|
-
result:
|
|
2531
|
+
import_v416.z.object({
|
|
2532
|
+
type: import_v416.z.literal("image_generation_call"),
|
|
2533
|
+
id: import_v416.z.string(),
|
|
2534
|
+
result: import_v416.z.string()
|
|
2527
2535
|
}),
|
|
2528
|
-
|
|
2529
|
-
type:
|
|
2530
|
-
id:
|
|
2531
|
-
status:
|
|
2532
|
-
action:
|
|
2533
|
-
|
|
2534
|
-
type:
|
|
2535
|
-
query:
|
|
2536
|
+
import_v416.z.object({
|
|
2537
|
+
type: import_v416.z.literal("web_search_call"),
|
|
2538
|
+
id: import_v416.z.string(),
|
|
2539
|
+
status: import_v416.z.string(),
|
|
2540
|
+
action: import_v416.z.discriminatedUnion("type", [
|
|
2541
|
+
import_v416.z.object({
|
|
2542
|
+
type: import_v416.z.literal("search"),
|
|
2543
|
+
query: import_v416.z.string().nullish()
|
|
2536
2544
|
}),
|
|
2537
|
-
|
|
2538
|
-
type:
|
|
2539
|
-
url:
|
|
2545
|
+
import_v416.z.object({
|
|
2546
|
+
type: import_v416.z.literal("open_page"),
|
|
2547
|
+
url: import_v416.z.string()
|
|
2540
2548
|
}),
|
|
2541
|
-
|
|
2542
|
-
type:
|
|
2543
|
-
url:
|
|
2544
|
-
pattern:
|
|
2549
|
+
import_v416.z.object({
|
|
2550
|
+
type: import_v416.z.literal("find"),
|
|
2551
|
+
url: import_v416.z.string(),
|
|
2552
|
+
pattern: import_v416.z.string()
|
|
2545
2553
|
})
|
|
2546
2554
|
]).nullish()
|
|
2547
2555
|
}),
|
|
2548
|
-
|
|
2549
|
-
type:
|
|
2550
|
-
id:
|
|
2551
|
-
queries:
|
|
2552
|
-
results:
|
|
2553
|
-
|
|
2554
|
-
attributes:
|
|
2555
|
-
file_id:
|
|
2556
|
-
filename:
|
|
2557
|
-
score:
|
|
2558
|
-
text:
|
|
2556
|
+
import_v416.z.object({
|
|
2557
|
+
type: import_v416.z.literal("file_search_call"),
|
|
2558
|
+
id: import_v416.z.string(),
|
|
2559
|
+
queries: import_v416.z.array(import_v416.z.string()),
|
|
2560
|
+
results: import_v416.z.array(
|
|
2561
|
+
import_v416.z.object({
|
|
2562
|
+
attributes: import_v416.z.record(import_v416.z.string(), import_v416.z.unknown()),
|
|
2563
|
+
file_id: import_v416.z.string(),
|
|
2564
|
+
filename: import_v416.z.string(),
|
|
2565
|
+
score: import_v416.z.number(),
|
|
2566
|
+
text: import_v416.z.string()
|
|
2559
2567
|
})
|
|
2560
2568
|
).nullish()
|
|
2561
2569
|
}),
|
|
2562
|
-
|
|
2563
|
-
type:
|
|
2564
|
-
id:
|
|
2565
|
-
call_id:
|
|
2566
|
-
action:
|
|
2567
|
-
type:
|
|
2568
|
-
command:
|
|
2569
|
-
timeout_ms:
|
|
2570
|
-
user:
|
|
2571
|
-
working_directory:
|
|
2572
|
-
env:
|
|
2570
|
+
import_v416.z.object({
|
|
2571
|
+
type: import_v416.z.literal("local_shell_call"),
|
|
2572
|
+
id: import_v416.z.string(),
|
|
2573
|
+
call_id: import_v416.z.string(),
|
|
2574
|
+
action: import_v416.z.object({
|
|
2575
|
+
type: import_v416.z.literal("exec"),
|
|
2576
|
+
command: import_v416.z.array(import_v416.z.string()),
|
|
2577
|
+
timeout_ms: import_v416.z.number().optional(),
|
|
2578
|
+
user: import_v416.z.string().optional(),
|
|
2579
|
+
working_directory: import_v416.z.string().optional(),
|
|
2580
|
+
env: import_v416.z.record(import_v416.z.string(), import_v416.z.string()).optional()
|
|
2573
2581
|
})
|
|
2574
2582
|
}),
|
|
2575
|
-
|
|
2576
|
-
type:
|
|
2577
|
-
id:
|
|
2578
|
-
status:
|
|
2583
|
+
import_v416.z.object({
|
|
2584
|
+
type: import_v416.z.literal("computer_call"),
|
|
2585
|
+
id: import_v416.z.string(),
|
|
2586
|
+
status: import_v416.z.literal("completed")
|
|
2579
2587
|
})
|
|
2580
2588
|
])
|
|
2581
2589
|
}),
|
|
2582
|
-
|
|
2583
|
-
type:
|
|
2584
|
-
item_id:
|
|
2585
|
-
output_index:
|
|
2586
|
-
delta:
|
|
2590
|
+
import_v416.z.object({
|
|
2591
|
+
type: import_v416.z.literal("response.function_call_arguments.delta"),
|
|
2592
|
+
item_id: import_v416.z.string(),
|
|
2593
|
+
output_index: import_v416.z.number(),
|
|
2594
|
+
delta: import_v416.z.string()
|
|
2587
2595
|
}),
|
|
2588
|
-
|
|
2589
|
-
type:
|
|
2590
|
-
item_id:
|
|
2591
|
-
output_index:
|
|
2592
|
-
partial_image_b64:
|
|
2596
|
+
import_v416.z.object({
|
|
2597
|
+
type: import_v416.z.literal("response.image_generation_call.partial_image"),
|
|
2598
|
+
item_id: import_v416.z.string(),
|
|
2599
|
+
output_index: import_v416.z.number(),
|
|
2600
|
+
partial_image_b64: import_v416.z.string()
|
|
2593
2601
|
}),
|
|
2594
|
-
|
|
2595
|
-
type:
|
|
2596
|
-
item_id:
|
|
2597
|
-
output_index:
|
|
2598
|
-
delta:
|
|
2602
|
+
import_v416.z.object({
|
|
2603
|
+
type: import_v416.z.literal("response.code_interpreter_call_code.delta"),
|
|
2604
|
+
item_id: import_v416.z.string(),
|
|
2605
|
+
output_index: import_v416.z.number(),
|
|
2606
|
+
delta: import_v416.z.string()
|
|
2599
2607
|
}),
|
|
2600
|
-
|
|
2601
|
-
type:
|
|
2602
|
-
item_id:
|
|
2603
|
-
output_index:
|
|
2604
|
-
code:
|
|
2608
|
+
import_v416.z.object({
|
|
2609
|
+
type: import_v416.z.literal("response.code_interpreter_call_code.done"),
|
|
2610
|
+
item_id: import_v416.z.string(),
|
|
2611
|
+
output_index: import_v416.z.number(),
|
|
2612
|
+
code: import_v416.z.string()
|
|
2605
2613
|
}),
|
|
2606
|
-
|
|
2607
|
-
type:
|
|
2608
|
-
annotation:
|
|
2609
|
-
|
|
2610
|
-
type:
|
|
2611
|
-
url:
|
|
2612
|
-
title:
|
|
2614
|
+
import_v416.z.object({
|
|
2615
|
+
type: import_v416.z.literal("response.output_text.annotation.added"),
|
|
2616
|
+
annotation: import_v416.z.discriminatedUnion("type", [
|
|
2617
|
+
import_v416.z.object({
|
|
2618
|
+
type: import_v416.z.literal("url_citation"),
|
|
2619
|
+
url: import_v416.z.string(),
|
|
2620
|
+
title: import_v416.z.string()
|
|
2613
2621
|
}),
|
|
2614
|
-
|
|
2615
|
-
type:
|
|
2616
|
-
file_id:
|
|
2617
|
-
filename:
|
|
2618
|
-
index:
|
|
2619
|
-
start_index:
|
|
2620
|
-
end_index:
|
|
2621
|
-
quote:
|
|
2622
|
+
import_v416.z.object({
|
|
2623
|
+
type: import_v416.z.literal("file_citation"),
|
|
2624
|
+
file_id: import_v416.z.string(),
|
|
2625
|
+
filename: import_v416.z.string().nullish(),
|
|
2626
|
+
index: import_v416.z.number().nullish(),
|
|
2627
|
+
start_index: import_v416.z.number().nullish(),
|
|
2628
|
+
end_index: import_v416.z.number().nullish(),
|
|
2629
|
+
quote: import_v416.z.string().nullish()
|
|
2622
2630
|
})
|
|
2623
2631
|
])
|
|
2624
2632
|
}),
|
|
2625
|
-
|
|
2626
|
-
type:
|
|
2627
|
-
item_id:
|
|
2628
|
-
summary_index:
|
|
2633
|
+
import_v416.z.object({
|
|
2634
|
+
type: import_v416.z.literal("response.reasoning_summary_part.added"),
|
|
2635
|
+
item_id: import_v416.z.string(),
|
|
2636
|
+
summary_index: import_v416.z.number()
|
|
2629
2637
|
}),
|
|
2630
|
-
|
|
2631
|
-
type:
|
|
2632
|
-
item_id:
|
|
2633
|
-
summary_index:
|
|
2634
|
-
delta:
|
|
2638
|
+
import_v416.z.object({
|
|
2639
|
+
type: import_v416.z.literal("response.reasoning_summary_text.delta"),
|
|
2640
|
+
item_id: import_v416.z.string(),
|
|
2641
|
+
summary_index: import_v416.z.number(),
|
|
2642
|
+
delta: import_v416.z.string()
|
|
2635
2643
|
}),
|
|
2636
|
-
|
|
2637
|
-
type:
|
|
2638
|
-
code:
|
|
2639
|
-
message:
|
|
2640
|
-
param:
|
|
2641
|
-
sequence_number:
|
|
2644
|
+
import_v416.z.object({
|
|
2645
|
+
type: import_v416.z.literal("error"),
|
|
2646
|
+
code: import_v416.z.string(),
|
|
2647
|
+
message: import_v416.z.string(),
|
|
2648
|
+
param: import_v416.z.string().nullish(),
|
|
2649
|
+
sequence_number: import_v416.z.number()
|
|
2642
2650
|
}),
|
|
2643
|
-
|
|
2651
|
+
import_v416.z.object({ type: import_v416.z.string() }).loose().transform((value) => ({
|
|
2644
2652
|
type: "unknown_chunk",
|
|
2645
2653
|
message: value.type
|
|
2646
2654
|
}))
|
|
@@ -2650,158 +2658,158 @@ var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
|
|
|
2650
2658
|
);
|
|
2651
2659
|
var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
|
|
2652
2660
|
() => (0, import_provider_utils21.zodSchema)(
|
|
2653
|
-
|
|
2654
|
-
id:
|
|
2655
|
-
created_at:
|
|
2656
|
-
error:
|
|
2657
|
-
code:
|
|
2658
|
-
message:
|
|
2661
|
+
import_v416.z.object({
|
|
2662
|
+
id: import_v416.z.string(),
|
|
2663
|
+
created_at: import_v416.z.number(),
|
|
2664
|
+
error: import_v416.z.object({
|
|
2665
|
+
code: import_v416.z.string(),
|
|
2666
|
+
message: import_v416.z.string()
|
|
2659
2667
|
}).nullish(),
|
|
2660
|
-
model:
|
|
2661
|
-
output:
|
|
2662
|
-
|
|
2663
|
-
|
|
2664
|
-
type:
|
|
2665
|
-
role:
|
|
2666
|
-
id:
|
|
2667
|
-
content:
|
|
2668
|
-
|
|
2669
|
-
type:
|
|
2670
|
-
text:
|
|
2671
|
-
logprobs:
|
|
2672
|
-
|
|
2673
|
-
token:
|
|
2674
|
-
logprob:
|
|
2675
|
-
top_logprobs:
|
|
2676
|
-
|
|
2677
|
-
token:
|
|
2678
|
-
logprob:
|
|
2668
|
+
model: import_v416.z.string(),
|
|
2669
|
+
output: import_v416.z.array(
|
|
2670
|
+
import_v416.z.discriminatedUnion("type", [
|
|
2671
|
+
import_v416.z.object({
|
|
2672
|
+
type: import_v416.z.literal("message"),
|
|
2673
|
+
role: import_v416.z.literal("assistant"),
|
|
2674
|
+
id: import_v416.z.string(),
|
|
2675
|
+
content: import_v416.z.array(
|
|
2676
|
+
import_v416.z.object({
|
|
2677
|
+
type: import_v416.z.literal("output_text"),
|
|
2678
|
+
text: import_v416.z.string(),
|
|
2679
|
+
logprobs: import_v416.z.array(
|
|
2680
|
+
import_v416.z.object({
|
|
2681
|
+
token: import_v416.z.string(),
|
|
2682
|
+
logprob: import_v416.z.number(),
|
|
2683
|
+
top_logprobs: import_v416.z.array(
|
|
2684
|
+
import_v416.z.object({
|
|
2685
|
+
token: import_v416.z.string(),
|
|
2686
|
+
logprob: import_v416.z.number()
|
|
2679
2687
|
})
|
|
2680
2688
|
)
|
|
2681
2689
|
})
|
|
2682
2690
|
).nullish(),
|
|
2683
|
-
annotations:
|
|
2684
|
-
|
|
2685
|
-
|
|
2686
|
-
type:
|
|
2687
|
-
start_index:
|
|
2688
|
-
end_index:
|
|
2689
|
-
url:
|
|
2690
|
-
title:
|
|
2691
|
+
annotations: import_v416.z.array(
|
|
2692
|
+
import_v416.z.discriminatedUnion("type", [
|
|
2693
|
+
import_v416.z.object({
|
|
2694
|
+
type: import_v416.z.literal("url_citation"),
|
|
2695
|
+
start_index: import_v416.z.number(),
|
|
2696
|
+
end_index: import_v416.z.number(),
|
|
2697
|
+
url: import_v416.z.string(),
|
|
2698
|
+
title: import_v416.z.string()
|
|
2691
2699
|
}),
|
|
2692
|
-
|
|
2693
|
-
type:
|
|
2694
|
-
file_id:
|
|
2695
|
-
filename:
|
|
2696
|
-
index:
|
|
2697
|
-
start_index:
|
|
2698
|
-
end_index:
|
|
2699
|
-
quote:
|
|
2700
|
+
import_v416.z.object({
|
|
2701
|
+
type: import_v416.z.literal("file_citation"),
|
|
2702
|
+
file_id: import_v416.z.string(),
|
|
2703
|
+
filename: import_v416.z.string().nullish(),
|
|
2704
|
+
index: import_v416.z.number().nullish(),
|
|
2705
|
+
start_index: import_v416.z.number().nullish(),
|
|
2706
|
+
end_index: import_v416.z.number().nullish(),
|
|
2707
|
+
quote: import_v416.z.string().nullish()
|
|
2700
2708
|
}),
|
|
2701
|
-
|
|
2702
|
-
type:
|
|
2709
|
+
import_v416.z.object({
|
|
2710
|
+
type: import_v416.z.literal("container_file_citation")
|
|
2703
2711
|
})
|
|
2704
2712
|
])
|
|
2705
2713
|
)
|
|
2706
2714
|
})
|
|
2707
2715
|
)
|
|
2708
2716
|
}),
|
|
2709
|
-
|
|
2710
|
-
type:
|
|
2711
|
-
id:
|
|
2712
|
-
status:
|
|
2713
|
-
action:
|
|
2714
|
-
|
|
2715
|
-
type:
|
|
2716
|
-
query:
|
|
2717
|
+
import_v416.z.object({
|
|
2718
|
+
type: import_v416.z.literal("web_search_call"),
|
|
2719
|
+
id: import_v416.z.string(),
|
|
2720
|
+
status: import_v416.z.string(),
|
|
2721
|
+
action: import_v416.z.discriminatedUnion("type", [
|
|
2722
|
+
import_v416.z.object({
|
|
2723
|
+
type: import_v416.z.literal("search"),
|
|
2724
|
+
query: import_v416.z.string().nullish()
|
|
2717
2725
|
}),
|
|
2718
|
-
|
|
2719
|
-
type:
|
|
2720
|
-
url:
|
|
2726
|
+
import_v416.z.object({
|
|
2727
|
+
type: import_v416.z.literal("open_page"),
|
|
2728
|
+
url: import_v416.z.string()
|
|
2721
2729
|
}),
|
|
2722
|
-
|
|
2723
|
-
type:
|
|
2724
|
-
url:
|
|
2725
|
-
pattern:
|
|
2730
|
+
import_v416.z.object({
|
|
2731
|
+
type: import_v416.z.literal("find"),
|
|
2732
|
+
url: import_v416.z.string(),
|
|
2733
|
+
pattern: import_v416.z.string()
|
|
2726
2734
|
})
|
|
2727
2735
|
]).nullish()
|
|
2728
2736
|
}),
|
|
2729
|
-
|
|
2730
|
-
type:
|
|
2731
|
-
id:
|
|
2732
|
-
queries:
|
|
2733
|
-
results:
|
|
2734
|
-
|
|
2735
|
-
attributes:
|
|
2736
|
-
file_id:
|
|
2737
|
-
filename:
|
|
2738
|
-
score:
|
|
2739
|
-
text:
|
|
2737
|
+
import_v416.z.object({
|
|
2738
|
+
type: import_v416.z.literal("file_search_call"),
|
|
2739
|
+
id: import_v416.z.string(),
|
|
2740
|
+
queries: import_v416.z.array(import_v416.z.string()),
|
|
2741
|
+
results: import_v416.z.array(
|
|
2742
|
+
import_v416.z.object({
|
|
2743
|
+
attributes: import_v416.z.record(import_v416.z.string(), import_v416.z.unknown()),
|
|
2744
|
+
file_id: import_v416.z.string(),
|
|
2745
|
+
filename: import_v416.z.string(),
|
|
2746
|
+
score: import_v416.z.number(),
|
|
2747
|
+
text: import_v416.z.string()
|
|
2740
2748
|
})
|
|
2741
2749
|
).nullish()
|
|
2742
2750
|
}),
|
|
2743
|
-
|
|
2744
|
-
type:
|
|
2745
|
-
id:
|
|
2746
|
-
code:
|
|
2747
|
-
container_id:
|
|
2748
|
-
outputs:
|
|
2749
|
-
|
|
2750
|
-
|
|
2751
|
-
|
|
2751
|
+
import_v416.z.object({
|
|
2752
|
+
type: import_v416.z.literal("code_interpreter_call"),
|
|
2753
|
+
id: import_v416.z.string(),
|
|
2754
|
+
code: import_v416.z.string().nullable(),
|
|
2755
|
+
container_id: import_v416.z.string(),
|
|
2756
|
+
outputs: import_v416.z.array(
|
|
2757
|
+
import_v416.z.discriminatedUnion("type", [
|
|
2758
|
+
import_v416.z.object({ type: import_v416.z.literal("logs"), logs: import_v416.z.string() }),
|
|
2759
|
+
import_v416.z.object({ type: import_v416.z.literal("image"), url: import_v416.z.string() })
|
|
2752
2760
|
])
|
|
2753
2761
|
).nullable()
|
|
2754
2762
|
}),
|
|
2755
|
-
|
|
2756
|
-
type:
|
|
2757
|
-
id:
|
|
2758
|
-
result:
|
|
2763
|
+
import_v416.z.object({
|
|
2764
|
+
type: import_v416.z.literal("image_generation_call"),
|
|
2765
|
+
id: import_v416.z.string(),
|
|
2766
|
+
result: import_v416.z.string()
|
|
2759
2767
|
}),
|
|
2760
|
-
|
|
2761
|
-
type:
|
|
2762
|
-
id:
|
|
2763
|
-
call_id:
|
|
2764
|
-
action:
|
|
2765
|
-
type:
|
|
2766
|
-
command:
|
|
2767
|
-
timeout_ms:
|
|
2768
|
-
user:
|
|
2769
|
-
working_directory:
|
|
2770
|
-
env:
|
|
2768
|
+
import_v416.z.object({
|
|
2769
|
+
type: import_v416.z.literal("local_shell_call"),
|
|
2770
|
+
id: import_v416.z.string(),
|
|
2771
|
+
call_id: import_v416.z.string(),
|
|
2772
|
+
action: import_v416.z.object({
|
|
2773
|
+
type: import_v416.z.literal("exec"),
|
|
2774
|
+
command: import_v416.z.array(import_v416.z.string()),
|
|
2775
|
+
timeout_ms: import_v416.z.number().optional(),
|
|
2776
|
+
user: import_v416.z.string().optional(),
|
|
2777
|
+
working_directory: import_v416.z.string().optional(),
|
|
2778
|
+
env: import_v416.z.record(import_v416.z.string(), import_v416.z.string()).optional()
|
|
2771
2779
|
})
|
|
2772
2780
|
}),
|
|
2773
|
-
|
|
2774
|
-
type:
|
|
2775
|
-
call_id:
|
|
2776
|
-
name:
|
|
2777
|
-
arguments:
|
|
2778
|
-
id:
|
|
2781
|
+
import_v416.z.object({
|
|
2782
|
+
type: import_v416.z.literal("function_call"),
|
|
2783
|
+
call_id: import_v416.z.string(),
|
|
2784
|
+
name: import_v416.z.string(),
|
|
2785
|
+
arguments: import_v416.z.string(),
|
|
2786
|
+
id: import_v416.z.string()
|
|
2779
2787
|
}),
|
|
2780
|
-
|
|
2781
|
-
type:
|
|
2782
|
-
id:
|
|
2783
|
-
status:
|
|
2788
|
+
import_v416.z.object({
|
|
2789
|
+
type: import_v416.z.literal("computer_call"),
|
|
2790
|
+
id: import_v416.z.string(),
|
|
2791
|
+
status: import_v416.z.string().optional()
|
|
2784
2792
|
}),
|
|
2785
|
-
|
|
2786
|
-
type:
|
|
2787
|
-
id:
|
|
2788
|
-
encrypted_content:
|
|
2789
|
-
summary:
|
|
2790
|
-
|
|
2791
|
-
type:
|
|
2792
|
-
text:
|
|
2793
|
+
import_v416.z.object({
|
|
2794
|
+
type: import_v416.z.literal("reasoning"),
|
|
2795
|
+
id: import_v416.z.string(),
|
|
2796
|
+
encrypted_content: import_v416.z.string().nullish(),
|
|
2797
|
+
summary: import_v416.z.array(
|
|
2798
|
+
import_v416.z.object({
|
|
2799
|
+
type: import_v416.z.literal("summary_text"),
|
|
2800
|
+
text: import_v416.z.string()
|
|
2793
2801
|
})
|
|
2794
2802
|
)
|
|
2795
2803
|
})
|
|
2796
2804
|
])
|
|
2797
2805
|
),
|
|
2798
|
-
service_tier:
|
|
2799
|
-
incomplete_details:
|
|
2800
|
-
usage:
|
|
2801
|
-
input_tokens:
|
|
2802
|
-
input_tokens_details:
|
|
2803
|
-
output_tokens:
|
|
2804
|
-
output_tokens_details:
|
|
2806
|
+
service_tier: import_v416.z.string().nullish(),
|
|
2807
|
+
incomplete_details: import_v416.z.object({ reason: import_v416.z.string() }).nullish(),
|
|
2808
|
+
usage: import_v416.z.object({
|
|
2809
|
+
input_tokens: import_v416.z.number(),
|
|
2810
|
+
input_tokens_details: import_v416.z.object({ cached_tokens: import_v416.z.number().nullish() }).nullish(),
|
|
2811
|
+
output_tokens: import_v416.z.number(),
|
|
2812
|
+
output_tokens_details: import_v416.z.object({ reasoning_tokens: import_v416.z.number().nullish() }).nullish()
|
|
2805
2813
|
})
|
|
2806
2814
|
})
|
|
2807
2815
|
)
|
|
@@ -2809,7 +2817,7 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
|
|
|
2809
2817
|
|
|
2810
2818
|
// src/responses/openai-responses-options.ts
|
|
2811
2819
|
var import_provider_utils22 = require("@ai-sdk/provider-utils");
|
|
2812
|
-
var
|
|
2820
|
+
var import_v417 = require("zod/v4");
|
|
2813
2821
|
var TOP_LOGPROBS_MAX = 20;
|
|
2814
2822
|
var openaiResponsesReasoningModelIds = [
|
|
2815
2823
|
"o1",
|
|
@@ -2870,15 +2878,15 @@ var openaiResponsesModelIds = [
|
|
|
2870
2878
|
];
|
|
2871
2879
|
var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValidator)(
|
|
2872
2880
|
() => (0, import_provider_utils22.zodSchema)(
|
|
2873
|
-
|
|
2874
|
-
include:
|
|
2875
|
-
|
|
2881
|
+
import_v417.z.object({
|
|
2882
|
+
include: import_v417.z.array(
|
|
2883
|
+
import_v417.z.enum([
|
|
2876
2884
|
"reasoning.encrypted_content",
|
|
2877
2885
|
"file_search_call.results",
|
|
2878
2886
|
"message.output_text.logprobs"
|
|
2879
2887
|
])
|
|
2880
2888
|
).nullish(),
|
|
2881
|
-
instructions:
|
|
2889
|
+
instructions: import_v417.z.string().nullish(),
|
|
2882
2890
|
/**
|
|
2883
2891
|
* Return the log probabilities of the tokens.
|
|
2884
2892
|
*
|
|
@@ -2891,25 +2899,25 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValid
|
|
|
2891
2899
|
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
2892
2900
|
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
2893
2901
|
*/
|
|
2894
|
-
logprobs:
|
|
2902
|
+
logprobs: import_v417.z.union([import_v417.z.boolean(), import_v417.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
2895
2903
|
/**
|
|
2896
2904
|
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
2897
2905
|
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
2898
2906
|
* Any further attempts to call a tool by the model will be ignored.
|
|
2899
2907
|
*/
|
|
2900
|
-
maxToolCalls:
|
|
2901
|
-
metadata:
|
|
2902
|
-
parallelToolCalls:
|
|
2903
|
-
previousResponseId:
|
|
2904
|
-
promptCacheKey:
|
|
2905
|
-
reasoningEffort:
|
|
2906
|
-
reasoningSummary:
|
|
2907
|
-
safetyIdentifier:
|
|
2908
|
-
serviceTier:
|
|
2909
|
-
store:
|
|
2910
|
-
strictJsonSchema:
|
|
2911
|
-
textVerbosity:
|
|
2912
|
-
user:
|
|
2908
|
+
maxToolCalls: import_v417.z.number().nullish(),
|
|
2909
|
+
metadata: import_v417.z.any().nullish(),
|
|
2910
|
+
parallelToolCalls: import_v417.z.boolean().nullish(),
|
|
2911
|
+
previousResponseId: import_v417.z.string().nullish(),
|
|
2912
|
+
promptCacheKey: import_v417.z.string().nullish(),
|
|
2913
|
+
reasoningEffort: import_v417.z.string().nullish(),
|
|
2914
|
+
reasoningSummary: import_v417.z.string().nullish(),
|
|
2915
|
+
safetyIdentifier: import_v417.z.string().nullish(),
|
|
2916
|
+
serviceTier: import_v417.z.enum(["auto", "flex", "priority", "default"]).nullish(),
|
|
2917
|
+
store: import_v417.z.boolean().nullish(),
|
|
2918
|
+
strictJsonSchema: import_v417.z.boolean().nullish(),
|
|
2919
|
+
textVerbosity: import_v417.z.enum(["low", "medium", "high"]).nullish(),
|
|
2920
|
+
user: import_v417.z.string().nullish()
|
|
2913
2921
|
})
|
|
2914
2922
|
)
|
|
2915
2923
|
);
|
|
@@ -4049,12 +4057,12 @@ var import_provider_utils26 = require("@ai-sdk/provider-utils");
|
|
|
4049
4057
|
|
|
4050
4058
|
// src/speech/openai-speech-options.ts
|
|
4051
4059
|
var import_provider_utils25 = require("@ai-sdk/provider-utils");
|
|
4052
|
-
var
|
|
4060
|
+
var import_v418 = require("zod/v4");
|
|
4053
4061
|
var openaiSpeechProviderOptionsSchema = (0, import_provider_utils25.lazyValidator)(
|
|
4054
4062
|
() => (0, import_provider_utils25.zodSchema)(
|
|
4055
|
-
|
|
4056
|
-
instructions:
|
|
4057
|
-
speed:
|
|
4063
|
+
import_v418.z.object({
|
|
4064
|
+
instructions: import_v418.z.string().nullish(),
|
|
4065
|
+
speed: import_v418.z.number().min(0.25).max(4).default(1).nullish()
|
|
4058
4066
|
})
|
|
4059
4067
|
)
|
|
4060
4068
|
);
|
|
@@ -4165,32 +4173,32 @@ var import_provider_utils29 = require("@ai-sdk/provider-utils");
|
|
|
4165
4173
|
|
|
4166
4174
|
// src/transcription/openai-transcription-api.ts
|
|
4167
4175
|
var import_provider_utils27 = require("@ai-sdk/provider-utils");
|
|
4168
|
-
var
|
|
4176
|
+
var import_v419 = require("zod/v4");
|
|
4169
4177
|
var openaiTranscriptionResponseSchema = (0, import_provider_utils27.lazyValidator)(
|
|
4170
4178
|
() => (0, import_provider_utils27.zodSchema)(
|
|
4171
|
-
|
|
4172
|
-
text:
|
|
4173
|
-
language:
|
|
4174
|
-
duration:
|
|
4175
|
-
words:
|
|
4176
|
-
|
|
4177
|
-
word:
|
|
4178
|
-
start:
|
|
4179
|
-
end:
|
|
4179
|
+
import_v419.z.object({
|
|
4180
|
+
text: import_v419.z.string(),
|
|
4181
|
+
language: import_v419.z.string().nullish(),
|
|
4182
|
+
duration: import_v419.z.number().nullish(),
|
|
4183
|
+
words: import_v419.z.array(
|
|
4184
|
+
import_v419.z.object({
|
|
4185
|
+
word: import_v419.z.string(),
|
|
4186
|
+
start: import_v419.z.number(),
|
|
4187
|
+
end: import_v419.z.number()
|
|
4180
4188
|
})
|
|
4181
4189
|
).nullish(),
|
|
4182
|
-
segments:
|
|
4183
|
-
|
|
4184
|
-
id:
|
|
4185
|
-
seek:
|
|
4186
|
-
start:
|
|
4187
|
-
end:
|
|
4188
|
-
text:
|
|
4189
|
-
tokens:
|
|
4190
|
-
temperature:
|
|
4191
|
-
avg_logprob:
|
|
4192
|
-
compression_ratio:
|
|
4193
|
-
no_speech_prob:
|
|
4190
|
+
segments: import_v419.z.array(
|
|
4191
|
+
import_v419.z.object({
|
|
4192
|
+
id: import_v419.z.number(),
|
|
4193
|
+
seek: import_v419.z.number(),
|
|
4194
|
+
start: import_v419.z.number(),
|
|
4195
|
+
end: import_v419.z.number(),
|
|
4196
|
+
text: import_v419.z.string(),
|
|
4197
|
+
tokens: import_v419.z.array(import_v419.z.number()),
|
|
4198
|
+
temperature: import_v419.z.number(),
|
|
4199
|
+
avg_logprob: import_v419.z.number(),
|
|
4200
|
+
compression_ratio: import_v419.z.number(),
|
|
4201
|
+
no_speech_prob: import_v419.z.number()
|
|
4194
4202
|
})
|
|
4195
4203
|
).nullish()
|
|
4196
4204
|
})
|
|
@@ -4199,32 +4207,32 @@ var openaiTranscriptionResponseSchema = (0, import_provider_utils27.lazyValidato
|
|
|
4199
4207
|
|
|
4200
4208
|
// src/transcription/openai-transcription-options.ts
|
|
4201
4209
|
var import_provider_utils28 = require("@ai-sdk/provider-utils");
|
|
4202
|
-
var
|
|
4210
|
+
var import_v420 = require("zod/v4");
|
|
4203
4211
|
var openAITranscriptionProviderOptions = (0, import_provider_utils28.lazyValidator)(
|
|
4204
4212
|
() => (0, import_provider_utils28.zodSchema)(
|
|
4205
|
-
|
|
4213
|
+
import_v420.z.object({
|
|
4206
4214
|
/**
|
|
4207
4215
|
* Additional information to include in the transcription response.
|
|
4208
4216
|
*/
|
|
4209
|
-
include:
|
|
4217
|
+
include: import_v420.z.array(import_v420.z.string()).optional(),
|
|
4210
4218
|
/**
|
|
4211
4219
|
* The language of the input audio in ISO-639-1 format.
|
|
4212
4220
|
*/
|
|
4213
|
-
language:
|
|
4221
|
+
language: import_v420.z.string().optional(),
|
|
4214
4222
|
/**
|
|
4215
4223
|
* An optional text to guide the model's style or continue a previous audio segment.
|
|
4216
4224
|
*/
|
|
4217
|
-
prompt:
|
|
4225
|
+
prompt: import_v420.z.string().optional(),
|
|
4218
4226
|
/**
|
|
4219
4227
|
* The sampling temperature, between 0 and 1.
|
|
4220
4228
|
* @default 0
|
|
4221
4229
|
*/
|
|
4222
|
-
temperature:
|
|
4230
|
+
temperature: import_v420.z.number().min(0).max(1).default(0).optional(),
|
|
4223
4231
|
/**
|
|
4224
4232
|
* The timestamp granularities to populate for this transcription.
|
|
4225
4233
|
* @default ['segment']
|
|
4226
4234
|
*/
|
|
4227
|
-
timestampGranularities:
|
|
4235
|
+
timestampGranularities: import_v420.z.array(import_v420.z.enum(["word", "segment"])).default(["segment"]).optional()
|
|
4228
4236
|
})
|
|
4229
4237
|
)
|
|
4230
4238
|
);
|
|
@@ -4397,7 +4405,7 @@ var OpenAITranscriptionModel = class {
|
|
|
4397
4405
|
};
|
|
4398
4406
|
|
|
4399
4407
|
// src/version.ts
|
|
4400
|
-
var VERSION = true ? "2.0.
|
|
4408
|
+
var VERSION = true ? "2.0.48" : "0.0.0-test";
|
|
4401
4409
|
|
|
4402
4410
|
// src/openai-provider.ts
|
|
4403
4411
|
function createOpenAI(options = {}) {
|