@ai-sdk/openai 2.1.0-beta.9 → 3.0.0-beta.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,7 +1,9 @@
1
1
  "use strict";
2
+ var __create = Object.create;
2
3
  var __defProp = Object.defineProperty;
3
4
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
5
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
5
7
  var __hasOwnProp = Object.prototype.hasOwnProperty;
6
8
  var __export = (target, all) => {
7
9
  for (var name in all)
@@ -15,6 +17,14 @@ var __copyProps = (to, from, except, desc) => {
15
17
  }
16
18
  return to;
17
19
  };
20
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
+ // If the importer is in node compatibility mode or this is not an ESM
22
+ // file that has been converted to a CommonJS file using a Babel-
23
+ // compatible transform (i.e. "__esModule" has not been set), then set
24
+ // "default" to the CommonJS "module.exports" for node compatibility.
25
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
+ mod
27
+ ));
18
28
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
29
 
20
30
  // src/index.ts
@@ -27,25 +37,24 @@ __export(src_exports, {
27
37
  module.exports = __toCommonJS(src_exports);
28
38
 
29
39
  // src/openai-provider.ts
30
- var import_provider_utils17 = require("@ai-sdk/provider-utils");
40
+ var import_provider_utils30 = require("@ai-sdk/provider-utils");
31
41
 
32
42
  // src/chat/openai-chat-language-model.ts
33
43
  var import_provider3 = require("@ai-sdk/provider");
34
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
35
- var import_v43 = require("zod/v4");
44
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
36
45
 
37
46
  // src/openai-error.ts
38
- var import_v4 = require("zod/v4");
47
+ var z = __toESM(require("zod/v4"));
39
48
  var import_provider_utils = require("@ai-sdk/provider-utils");
40
- var openaiErrorDataSchema = import_v4.z.object({
41
- error: import_v4.z.object({
42
- message: import_v4.z.string(),
49
+ var openaiErrorDataSchema = z.object({
50
+ error: z.object({
51
+ message: z.string(),
43
52
  // The additional information below is handled loosely to support
44
53
  // OpenAI-compatible providers that have slightly different error
45
54
  // responses:
46
- type: import_v4.z.string().nullish(),
47
- param: import_v4.z.any().nullish(),
48
- code: import_v4.z.union([import_v4.z.string(), import_v4.z.number()]).nullish()
55
+ type: z.string().nullish(),
56
+ param: z.any().nullish(),
57
+ code: z.union([z.string(), z.number()]).nullish()
49
58
  })
50
59
  });
51
60
  var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
@@ -60,6 +69,7 @@ function convertToOpenAIChatMessages({
60
69
  prompt,
61
70
  systemMessageMode = "system"
62
71
  }) {
72
+ var _a;
63
73
  const messages = [];
64
74
  const warnings = [];
65
75
  for (const { role, content } of prompt) {
@@ -98,7 +108,7 @@ function convertToOpenAIChatMessages({
98
108
  messages.push({
99
109
  role: "user",
100
110
  content: content.map((part, index) => {
101
- var _a, _b, _c;
111
+ var _a2, _b, _c;
102
112
  switch (part.type) {
103
113
  case "text": {
104
114
  return { type: "text", text: part.text };
@@ -111,7 +121,7 @@ function convertToOpenAIChatMessages({
111
121
  image_url: {
112
122
  url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils2.convertToBase64)(part.data)}`,
113
123
  // OpenAI specific extension: image detail
114
- detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
124
+ detail: (_b = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b.imageDetail
115
125
  }
116
126
  };
117
127
  } else if (part.mediaType.startsWith("audio/")) {
@@ -208,6 +218,9 @@ function convertToOpenAIChatMessages({
208
218
  case "error-text":
209
219
  contentValue = output.value;
210
220
  break;
221
+ case "execution-denied":
222
+ contentValue = (_a = output.reason) != null ? _a : "Tool execution denied.";
223
+ break;
211
224
  case "content":
212
225
  case "json":
213
226
  case "error-json":
@@ -261,95 +274,238 @@ function mapOpenAIFinishReason(finishReason) {
261
274
  }
262
275
  }
263
276
 
277
+ // src/chat/openai-chat-api.ts
278
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
279
+ var z2 = __toESM(require("zod/v4"));
280
+ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
281
+ () => (0, import_provider_utils3.zodSchema)(
282
+ z2.object({
283
+ id: z2.string().nullish(),
284
+ created: z2.number().nullish(),
285
+ model: z2.string().nullish(),
286
+ choices: z2.array(
287
+ z2.object({
288
+ message: z2.object({
289
+ role: z2.literal("assistant").nullish(),
290
+ content: z2.string().nullish(),
291
+ tool_calls: z2.array(
292
+ z2.object({
293
+ id: z2.string().nullish(),
294
+ type: z2.literal("function"),
295
+ function: z2.object({
296
+ name: z2.string(),
297
+ arguments: z2.string()
298
+ })
299
+ })
300
+ ).nullish(),
301
+ annotations: z2.array(
302
+ z2.object({
303
+ type: z2.literal("url_citation"),
304
+ start_index: z2.number(),
305
+ end_index: z2.number(),
306
+ url: z2.string(),
307
+ title: z2.string()
308
+ })
309
+ ).nullish()
310
+ }),
311
+ index: z2.number(),
312
+ logprobs: z2.object({
313
+ content: z2.array(
314
+ z2.object({
315
+ token: z2.string(),
316
+ logprob: z2.number(),
317
+ top_logprobs: z2.array(
318
+ z2.object({
319
+ token: z2.string(),
320
+ logprob: z2.number()
321
+ })
322
+ )
323
+ })
324
+ ).nullish()
325
+ }).nullish(),
326
+ finish_reason: z2.string().nullish()
327
+ })
328
+ ),
329
+ usage: z2.object({
330
+ prompt_tokens: z2.number().nullish(),
331
+ completion_tokens: z2.number().nullish(),
332
+ total_tokens: z2.number().nullish(),
333
+ prompt_tokens_details: z2.object({
334
+ cached_tokens: z2.number().nullish()
335
+ }).nullish(),
336
+ completion_tokens_details: z2.object({
337
+ reasoning_tokens: z2.number().nullish(),
338
+ accepted_prediction_tokens: z2.number().nullish(),
339
+ rejected_prediction_tokens: z2.number().nullish()
340
+ }).nullish()
341
+ }).nullish()
342
+ })
343
+ )
344
+ );
345
+ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
346
+ () => (0, import_provider_utils3.zodSchema)(
347
+ z2.union([
348
+ z2.object({
349
+ id: z2.string().nullish(),
350
+ created: z2.number().nullish(),
351
+ model: z2.string().nullish(),
352
+ choices: z2.array(
353
+ z2.object({
354
+ delta: z2.object({
355
+ role: z2.enum(["assistant"]).nullish(),
356
+ content: z2.string().nullish(),
357
+ tool_calls: z2.array(
358
+ z2.object({
359
+ index: z2.number(),
360
+ id: z2.string().nullish(),
361
+ type: z2.literal("function").nullish(),
362
+ function: z2.object({
363
+ name: z2.string().nullish(),
364
+ arguments: z2.string().nullish()
365
+ })
366
+ })
367
+ ).nullish(),
368
+ annotations: z2.array(
369
+ z2.object({
370
+ type: z2.literal("url_citation"),
371
+ start_index: z2.number(),
372
+ end_index: z2.number(),
373
+ url: z2.string(),
374
+ title: z2.string()
375
+ })
376
+ ).nullish()
377
+ }).nullish(),
378
+ logprobs: z2.object({
379
+ content: z2.array(
380
+ z2.object({
381
+ token: z2.string(),
382
+ logprob: z2.number(),
383
+ top_logprobs: z2.array(
384
+ z2.object({
385
+ token: z2.string(),
386
+ logprob: z2.number()
387
+ })
388
+ )
389
+ })
390
+ ).nullish()
391
+ }).nullish(),
392
+ finish_reason: z2.string().nullish(),
393
+ index: z2.number()
394
+ })
395
+ ),
396
+ usage: z2.object({
397
+ prompt_tokens: z2.number().nullish(),
398
+ completion_tokens: z2.number().nullish(),
399
+ total_tokens: z2.number().nullish(),
400
+ prompt_tokens_details: z2.object({
401
+ cached_tokens: z2.number().nullish()
402
+ }).nullish(),
403
+ completion_tokens_details: z2.object({
404
+ reasoning_tokens: z2.number().nullish(),
405
+ accepted_prediction_tokens: z2.number().nullish(),
406
+ rejected_prediction_tokens: z2.number().nullish()
407
+ }).nullish()
408
+ }).nullish()
409
+ }),
410
+ openaiErrorDataSchema
411
+ ])
412
+ )
413
+ );
414
+
264
415
  // src/chat/openai-chat-options.ts
265
- var import_v42 = require("zod/v4");
266
- var openaiChatLanguageModelOptions = import_v42.z.object({
267
- /**
268
- * Modify the likelihood of specified tokens appearing in the completion.
269
- *
270
- * Accepts a JSON object that maps tokens (specified by their token ID in
271
- * the GPT tokenizer) to an associated bias value from -100 to 100.
272
- */
273
- logitBias: import_v42.z.record(import_v42.z.coerce.number(), import_v42.z.number()).optional(),
274
- /**
275
- * Return the log probabilities of the tokens.
276
- *
277
- * Setting to true will return the log probabilities of the tokens that
278
- * were generated.
279
- *
280
- * Setting to a number will return the log probabilities of the top n
281
- * tokens that were generated.
282
- */
283
- logprobs: import_v42.z.union([import_v42.z.boolean(), import_v42.z.number()]).optional(),
284
- /**
285
- * Whether to enable parallel function calling during tool use. Default to true.
286
- */
287
- parallelToolCalls: import_v42.z.boolean().optional(),
288
- /**
289
- * A unique identifier representing your end-user, which can help OpenAI to
290
- * monitor and detect abuse.
291
- */
292
- user: import_v42.z.string().optional(),
293
- /**
294
- * Reasoning effort for reasoning models. Defaults to `medium`.
295
- */
296
- reasoningEffort: import_v42.z.enum(["minimal", "low", "medium", "high"]).optional(),
297
- /**
298
- * Maximum number of completion tokens to generate. Useful for reasoning models.
299
- */
300
- maxCompletionTokens: import_v42.z.number().optional(),
301
- /**
302
- * Whether to enable persistence in responses API.
303
- */
304
- store: import_v42.z.boolean().optional(),
305
- /**
306
- * Metadata to associate with the request.
307
- */
308
- metadata: import_v42.z.record(import_v42.z.string().max(64), import_v42.z.string().max(512)).optional(),
309
- /**
310
- * Parameters for prediction mode.
311
- */
312
- prediction: import_v42.z.record(import_v42.z.string(), import_v42.z.any()).optional(),
313
- /**
314
- * Whether to use structured outputs.
315
- *
316
- * @default true
317
- */
318
- structuredOutputs: import_v42.z.boolean().optional(),
319
- /**
320
- * Service tier for the request.
321
- * - 'auto': Default service tier
322
- * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
323
- * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
324
- *
325
- * @default 'auto'
326
- */
327
- serviceTier: import_v42.z.enum(["auto", "flex", "priority"]).optional(),
328
- /**
329
- * Whether to use strict JSON schema validation.
330
- *
331
- * @default false
332
- */
333
- strictJsonSchema: import_v42.z.boolean().optional(),
334
- /**
335
- * Controls the verbosity of the model's responses.
336
- * Lower values will result in more concise responses, while higher values will result in more verbose responses.
337
- */
338
- textVerbosity: import_v42.z.enum(["low", "medium", "high"]).optional(),
339
- /**
340
- * A cache key for prompt caching. Allows manual control over prompt caching behavior.
341
- * Useful for improving cache hit rates and working around automatic caching issues.
342
- */
343
- promptCacheKey: import_v42.z.string().optional(),
344
- /**
345
- * A stable identifier used to help detect users of your application
346
- * that may be violating OpenAI's usage policies. The IDs should be a
347
- * string that uniquely identifies each user. We recommend hashing their
348
- * username or email address, in order to avoid sending us any identifying
349
- * information.
350
- */
351
- safetyIdentifier: import_v42.z.string().optional()
352
- });
416
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
417
+ var z3 = __toESM(require("zod/v4"));
418
+ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
419
+ () => (0, import_provider_utils4.zodSchema)(
420
+ z3.object({
421
+ /**
422
+ * Modify the likelihood of specified tokens appearing in the completion.
423
+ *
424
+ * Accepts a JSON object that maps tokens (specified by their token ID in
425
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
426
+ */
427
+ logitBias: z3.record(z3.coerce.number(), z3.number()).optional(),
428
+ /**
429
+ * Return the log probabilities of the tokens.
430
+ *
431
+ * Setting to true will return the log probabilities of the tokens that
432
+ * were generated.
433
+ *
434
+ * Setting to a number will return the log probabilities of the top n
435
+ * tokens that were generated.
436
+ */
437
+ logprobs: z3.union([z3.boolean(), z3.number()]).optional(),
438
+ /**
439
+ * Whether to enable parallel function calling during tool use. Default to true.
440
+ */
441
+ parallelToolCalls: z3.boolean().optional(),
442
+ /**
443
+ * A unique identifier representing your end-user, which can help OpenAI to
444
+ * monitor and detect abuse.
445
+ */
446
+ user: z3.string().optional(),
447
+ /**
448
+ * Reasoning effort for reasoning models. Defaults to `medium`.
449
+ */
450
+ reasoningEffort: z3.enum(["minimal", "low", "medium", "high"]).optional(),
451
+ /**
452
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
453
+ */
454
+ maxCompletionTokens: z3.number().optional(),
455
+ /**
456
+ * Whether to enable persistence in responses API.
457
+ */
458
+ store: z3.boolean().optional(),
459
+ /**
460
+ * Metadata to associate with the request.
461
+ */
462
+ metadata: z3.record(z3.string().max(64), z3.string().max(512)).optional(),
463
+ /**
464
+ * Parameters for prediction mode.
465
+ */
466
+ prediction: z3.record(z3.string(), z3.any()).optional(),
467
+ /**
468
+ * Whether to use structured outputs.
469
+ *
470
+ * @default true
471
+ */
472
+ structuredOutputs: z3.boolean().optional(),
473
+ /**
474
+ * Service tier for the request.
475
+ * - 'auto': Default service tier
476
+ * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
477
+ * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
478
+ *
479
+ * @default 'auto'
480
+ */
481
+ serviceTier: z3.enum(["auto", "flex", "priority"]).optional(),
482
+ /**
483
+ * Whether to use strict JSON schema validation.
484
+ *
485
+ * @default false
486
+ */
487
+ strictJsonSchema: z3.boolean().optional(),
488
+ /**
489
+ * Controls the verbosity of the model's responses.
490
+ * Lower values will result in more concise responses, while higher values will result in more verbose responses.
491
+ */
492
+ textVerbosity: z3.enum(["low", "medium", "high"]).optional(),
493
+ /**
494
+ * A cache key for prompt caching. Allows manual control over prompt caching behavior.
495
+ * Useful for improving cache hit rates and working around automatic caching issues.
496
+ */
497
+ promptCacheKey: z3.string().optional(),
498
+ /**
499
+ * A stable identifier used to help detect users of your application
500
+ * that may be violating OpenAI's usage policies. The IDs should be a
501
+ * string that uniquely identifies each user. We recommend hashing their
502
+ * username or email address, in order to avoid sending us any identifying
503
+ * information.
504
+ */
505
+ safetyIdentifier: z3.string().optional()
506
+ })
507
+ )
508
+ );
353
509
 
354
510
  // src/chat/openai-chat-prepare-tools.ts
355
511
  var import_provider2 = require("@ai-sdk/provider");
@@ -442,7 +598,7 @@ var OpenAIChatLanguageModel = class {
442
598
  }) {
443
599
  var _a, _b, _c, _d;
444
600
  const warnings = [];
445
- const openaiOptions = (_a = await (0, import_provider_utils3.parseProviderOptions)({
601
+ const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
446
602
  provider: "openai",
447
603
  providerOptions,
448
604
  schema: openaiChatLanguageModelOptions
@@ -621,15 +777,15 @@ var OpenAIChatLanguageModel = class {
621
777
  responseHeaders,
622
778
  value: response,
623
779
  rawValue: rawResponse
624
- } = await (0, import_provider_utils3.postJsonToApi)({
780
+ } = await (0, import_provider_utils5.postJsonToApi)({
625
781
  url: this.config.url({
626
782
  path: "/chat/completions",
627
783
  modelId: this.modelId
628
784
  }),
629
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
785
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
630
786
  body,
631
787
  failedResponseHandler: openaiFailedResponseHandler,
632
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
788
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
633
789
  openaiChatResponseSchema
634
790
  ),
635
791
  abortSignal: options.abortSignal,
@@ -644,7 +800,7 @@ var OpenAIChatLanguageModel = class {
644
800
  for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
645
801
  content.push({
646
802
  type: "tool-call",
647
- toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils3.generateId)(),
803
+ toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils5.generateId)(),
648
804
  toolName: toolCall.function.name,
649
805
  input: toolCall.function.arguments
650
806
  });
@@ -653,7 +809,7 @@ var OpenAIChatLanguageModel = class {
653
809
  content.push({
654
810
  type: "source",
655
811
  sourceType: "url",
656
- id: (0, import_provider_utils3.generateId)(),
812
+ id: (0, import_provider_utils5.generateId)(),
657
813
  url: annotation.url,
658
814
  title: annotation.title
659
815
  });
@@ -699,15 +855,15 @@ var OpenAIChatLanguageModel = class {
699
855
  include_usage: true
700
856
  }
701
857
  };
702
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
858
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
703
859
  url: this.config.url({
704
860
  path: "/chat/completions",
705
861
  modelId: this.modelId
706
862
  }),
707
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
863
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
708
864
  body,
709
865
  failedResponseHandler: openaiFailedResponseHandler,
710
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
866
+ successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(
711
867
  openaiChatChunkSchema
712
868
  ),
713
869
  abortSignal: options.abortSignal,
@@ -832,14 +988,14 @@ var OpenAIChatLanguageModel = class {
832
988
  delta: toolCall2.function.arguments
833
989
  });
834
990
  }
835
- if ((0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
991
+ if ((0, import_provider_utils5.isParsableJson)(toolCall2.function.arguments)) {
836
992
  controller.enqueue({
837
993
  type: "tool-input-end",
838
994
  id: toolCall2.id
839
995
  });
840
996
  controller.enqueue({
841
997
  type: "tool-call",
842
- toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils3.generateId)(),
998
+ toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
843
999
  toolName: toolCall2.function.name,
844
1000
  input: toolCall2.function.arguments
845
1001
  });
@@ -860,14 +1016,14 @@ var OpenAIChatLanguageModel = class {
860
1016
  id: toolCall.id,
861
1017
  delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
862
1018
  });
863
- if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
1019
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
864
1020
  controller.enqueue({
865
1021
  type: "tool-input-end",
866
1022
  id: toolCall.id
867
1023
  });
868
1024
  controller.enqueue({
869
1025
  type: "tool-call",
870
- toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils3.generateId)(),
1026
+ toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
871
1027
  toolName: toolCall.function.name,
872
1028
  input: toolCall.function.arguments
873
1029
  });
@@ -880,7 +1036,7 @@ var OpenAIChatLanguageModel = class {
880
1036
  controller.enqueue({
881
1037
  type: "source",
882
1038
  sourceType: "url",
883
- id: (0, import_provider_utils3.generateId)(),
1039
+ id: (0, import_provider_utils5.generateId)(),
884
1040
  url: annotation.url,
885
1041
  title: annotation.title
886
1042
  });
@@ -905,121 +1061,6 @@ var OpenAIChatLanguageModel = class {
905
1061
  };
906
1062
  }
907
1063
  };
908
- var openaiTokenUsageSchema = import_v43.z.object({
909
- prompt_tokens: import_v43.z.number().nullish(),
910
- completion_tokens: import_v43.z.number().nullish(),
911
- total_tokens: import_v43.z.number().nullish(),
912
- prompt_tokens_details: import_v43.z.object({
913
- cached_tokens: import_v43.z.number().nullish()
914
- }).nullish(),
915
- completion_tokens_details: import_v43.z.object({
916
- reasoning_tokens: import_v43.z.number().nullish(),
917
- accepted_prediction_tokens: import_v43.z.number().nullish(),
918
- rejected_prediction_tokens: import_v43.z.number().nullish()
919
- }).nullish()
920
- }).nullish();
921
- var openaiChatResponseSchema = import_v43.z.object({
922
- id: import_v43.z.string().nullish(),
923
- created: import_v43.z.number().nullish(),
924
- model: import_v43.z.string().nullish(),
925
- choices: import_v43.z.array(
926
- import_v43.z.object({
927
- message: import_v43.z.object({
928
- role: import_v43.z.literal("assistant").nullish(),
929
- content: import_v43.z.string().nullish(),
930
- tool_calls: import_v43.z.array(
931
- import_v43.z.object({
932
- id: import_v43.z.string().nullish(),
933
- type: import_v43.z.literal("function"),
934
- function: import_v43.z.object({
935
- name: import_v43.z.string(),
936
- arguments: import_v43.z.string()
937
- })
938
- })
939
- ).nullish(),
940
- annotations: import_v43.z.array(
941
- import_v43.z.object({
942
- type: import_v43.z.literal("url_citation"),
943
- start_index: import_v43.z.number(),
944
- end_index: import_v43.z.number(),
945
- url: import_v43.z.string(),
946
- title: import_v43.z.string()
947
- })
948
- ).nullish()
949
- }),
950
- index: import_v43.z.number(),
951
- logprobs: import_v43.z.object({
952
- content: import_v43.z.array(
953
- import_v43.z.object({
954
- token: import_v43.z.string(),
955
- logprob: import_v43.z.number(),
956
- top_logprobs: import_v43.z.array(
957
- import_v43.z.object({
958
- token: import_v43.z.string(),
959
- logprob: import_v43.z.number()
960
- })
961
- )
962
- })
963
- ).nullish()
964
- }).nullish(),
965
- finish_reason: import_v43.z.string().nullish()
966
- })
967
- ),
968
- usage: openaiTokenUsageSchema
969
- });
970
- var openaiChatChunkSchema = import_v43.z.union([
971
- import_v43.z.object({
972
- id: import_v43.z.string().nullish(),
973
- created: import_v43.z.number().nullish(),
974
- model: import_v43.z.string().nullish(),
975
- choices: import_v43.z.array(
976
- import_v43.z.object({
977
- delta: import_v43.z.object({
978
- role: import_v43.z.enum(["assistant"]).nullish(),
979
- content: import_v43.z.string().nullish(),
980
- tool_calls: import_v43.z.array(
981
- import_v43.z.object({
982
- index: import_v43.z.number(),
983
- id: import_v43.z.string().nullish(),
984
- type: import_v43.z.literal("function").nullish(),
985
- function: import_v43.z.object({
986
- name: import_v43.z.string().nullish(),
987
- arguments: import_v43.z.string().nullish()
988
- })
989
- })
990
- ).nullish(),
991
- annotations: import_v43.z.array(
992
- import_v43.z.object({
993
- type: import_v43.z.literal("url_citation"),
994
- start_index: import_v43.z.number(),
995
- end_index: import_v43.z.number(),
996
- url: import_v43.z.string(),
997
- title: import_v43.z.string()
998
- })
999
- ).nullish()
1000
- }).nullish(),
1001
- logprobs: import_v43.z.object({
1002
- content: import_v43.z.array(
1003
- import_v43.z.object({
1004
- token: import_v43.z.string(),
1005
- logprob: import_v43.z.number(),
1006
- top_logprobs: import_v43.z.array(
1007
- import_v43.z.object({
1008
- token: import_v43.z.string(),
1009
- logprob: import_v43.z.number()
1010
- })
1011
- )
1012
- })
1013
- ).nullish()
1014
- }).nullish(),
1015
- finish_reason: import_v43.z.string().nullish(),
1016
- index: import_v43.z.number()
1017
- })
1018
- ),
1019
- usage: openaiTokenUsageSchema
1020
- }),
1021
- openaiErrorDataSchema
1022
- ]);
1023
1064
  function isReasoningModel(modelId) {
1024
1065
  return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
1025
1066
  }
@@ -1070,8 +1111,7 @@ var reasoningModels = {
1070
1111
  };
1071
1112
 
1072
1113
  // src/completion/openai-completion-language-model.ts
1073
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
1074
- var import_v45 = require("zod/v4");
1114
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1075
1115
 
1076
1116
  // src/completion/convert-to-openai-completion-prompt.ts
1077
1117
  var import_provider4 = require("@ai-sdk/provider");
@@ -1178,48 +1218,111 @@ function mapOpenAIFinishReason2(finishReason) {
1178
1218
  }
1179
1219
  }
1180
1220
 
1221
+ // src/completion/openai-completion-api.ts
1222
+ var z4 = __toESM(require("zod/v4"));
1223
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1224
+ var openaiCompletionResponseSchema = (0, import_provider_utils6.lazyValidator)(
1225
+ () => (0, import_provider_utils6.zodSchema)(
1226
+ z4.object({
1227
+ id: z4.string().nullish(),
1228
+ created: z4.number().nullish(),
1229
+ model: z4.string().nullish(),
1230
+ choices: z4.array(
1231
+ z4.object({
1232
+ text: z4.string(),
1233
+ finish_reason: z4.string(),
1234
+ logprobs: z4.object({
1235
+ tokens: z4.array(z4.string()),
1236
+ token_logprobs: z4.array(z4.number()),
1237
+ top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullish()
1238
+ }).nullish()
1239
+ })
1240
+ ),
1241
+ usage: z4.object({
1242
+ prompt_tokens: z4.number(),
1243
+ completion_tokens: z4.number(),
1244
+ total_tokens: z4.number()
1245
+ }).nullish()
1246
+ })
1247
+ )
1248
+ );
1249
+ var openaiCompletionChunkSchema = (0, import_provider_utils6.lazyValidator)(
1250
+ () => (0, import_provider_utils6.zodSchema)(
1251
+ z4.union([
1252
+ z4.object({
1253
+ id: z4.string().nullish(),
1254
+ created: z4.number().nullish(),
1255
+ model: z4.string().nullish(),
1256
+ choices: z4.array(
1257
+ z4.object({
1258
+ text: z4.string(),
1259
+ finish_reason: z4.string().nullish(),
1260
+ index: z4.number(),
1261
+ logprobs: z4.object({
1262
+ tokens: z4.array(z4.string()),
1263
+ token_logprobs: z4.array(z4.number()),
1264
+ top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullish()
1265
+ }).nullish()
1266
+ })
1267
+ ),
1268
+ usage: z4.object({
1269
+ prompt_tokens: z4.number(),
1270
+ completion_tokens: z4.number(),
1271
+ total_tokens: z4.number()
1272
+ }).nullish()
1273
+ }),
1274
+ openaiErrorDataSchema
1275
+ ])
1276
+ )
1277
+ );
1278
+
1181
1279
  // src/completion/openai-completion-options.ts
1182
- var import_v44 = require("zod/v4");
1183
- var openaiCompletionProviderOptions = import_v44.z.object({
1184
- /**
1185
- Echo back the prompt in addition to the completion.
1186
- */
1187
- echo: import_v44.z.boolean().optional(),
1188
- /**
1189
- Modify the likelihood of specified tokens appearing in the completion.
1190
-
1191
- Accepts a JSON object that maps tokens (specified by their token ID in
1192
- the GPT tokenizer) to an associated bias value from -100 to 100. You
1193
- can use this tokenizer tool to convert text to token IDs. Mathematically,
1194
- the bias is added to the logits generated by the model prior to sampling.
1195
- The exact effect will vary per model, but values between -1 and 1 should
1196
- decrease or increase likelihood of selection; values like -100 or 100
1197
- should result in a ban or exclusive selection of the relevant token.
1198
-
1199
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1200
- token from being generated.
1201
- */
1202
- logitBias: import_v44.z.record(import_v44.z.string(), import_v44.z.number()).optional(),
1203
- /**
1204
- The suffix that comes after a completion of inserted text.
1205
- */
1206
- suffix: import_v44.z.string().optional(),
1207
- /**
1208
- A unique identifier representing your end-user, which can help OpenAI to
1209
- monitor and detect abuse. Learn more.
1210
- */
1211
- user: import_v44.z.string().optional(),
1212
- /**
1213
- Return the log probabilities of the tokens. Including logprobs will increase
1214
- the response size and can slow down response times. However, it can
1215
- be useful to better understand how the model is behaving.
1216
- Setting to true will return the log probabilities of the tokens that
1217
- were generated.
1218
- Setting to a number will return the log probabilities of the top n
1219
- tokens that were generated.
1220
- */
1221
- logprobs: import_v44.z.union([import_v44.z.boolean(), import_v44.z.number()]).optional()
1222
- });
1280
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1281
+ var z5 = __toESM(require("zod/v4"));
1282
+ var openaiCompletionProviderOptions = (0, import_provider_utils7.lazyValidator)(
1283
+ () => (0, import_provider_utils7.zodSchema)(
1284
+ z5.object({
1285
+ /**
1286
+ Echo back the prompt in addition to the completion.
1287
+ */
1288
+ echo: z5.boolean().optional(),
1289
+ /**
1290
+ Modify the likelihood of specified tokens appearing in the completion.
1291
+
1292
+ Accepts a JSON object that maps tokens (specified by their token ID in
1293
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1294
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1295
+ the bias is added to the logits generated by the model prior to sampling.
1296
+ The exact effect will vary per model, but values between -1 and 1 should
1297
+ decrease or increase likelihood of selection; values like -100 or 100
1298
+ should result in a ban or exclusive selection of the relevant token.
1299
+
1300
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1301
+ token from being generated.
1302
+ */
1303
+ logitBias: z5.record(z5.string(), z5.number()).optional(),
1304
+ /**
1305
+ The suffix that comes after a completion of inserted text.
1306
+ */
1307
+ suffix: z5.string().optional(),
1308
+ /**
1309
+ A unique identifier representing your end-user, which can help OpenAI to
1310
+ monitor and detect abuse. Learn more.
1311
+ */
1312
+ user: z5.string().optional(),
1313
+ /**
1314
+ Return the log probabilities of the tokens. Including logprobs will increase
1315
+ the response size and can slow down response times. However, it can
1316
+ be useful to better understand how the model is behaving.
1317
+ Setting to true will return the log probabilities of the tokens that
1318
+ were generated.
1319
+ Setting to a number will return the log probabilities of the top n
1320
+ tokens that were generated.
1321
+ */
1322
+ logprobs: z5.union([z5.boolean(), z5.number()]).optional()
1323
+ })
1324
+ )
1325
+ );
1223
1326
 
1224
1327
  // src/completion/openai-completion-language-model.ts
1225
1328
  var OpenAICompletionLanguageModel = class {
@@ -1254,12 +1357,12 @@ var OpenAICompletionLanguageModel = class {
1254
1357
  }) {
1255
1358
  const warnings = [];
1256
1359
  const openaiOptions = {
1257
- ...await (0, import_provider_utils4.parseProviderOptions)({
1360
+ ...await (0, import_provider_utils8.parseProviderOptions)({
1258
1361
  provider: "openai",
1259
1362
  providerOptions,
1260
1363
  schema: openaiCompletionProviderOptions
1261
1364
  }),
1262
- ...await (0, import_provider_utils4.parseProviderOptions)({
1365
+ ...await (0, import_provider_utils8.parseProviderOptions)({
1263
1366
  provider: this.providerOptionsName,
1264
1367
  providerOptions,
1265
1368
  schema: openaiCompletionProviderOptions
@@ -1315,15 +1418,15 @@ var OpenAICompletionLanguageModel = class {
1315
1418
  responseHeaders,
1316
1419
  value: response,
1317
1420
  rawValue: rawResponse
1318
- } = await (0, import_provider_utils4.postJsonToApi)({
1421
+ } = await (0, import_provider_utils8.postJsonToApi)({
1319
1422
  url: this.config.url({
1320
1423
  path: "/completions",
1321
1424
  modelId: this.modelId
1322
1425
  }),
1323
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1426
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
1324
1427
  body: args,
1325
1428
  failedResponseHandler: openaiFailedResponseHandler,
1326
- successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1429
+ successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1327
1430
  openaiCompletionResponseSchema
1328
1431
  ),
1329
1432
  abortSignal: options.abortSignal,
@@ -1361,15 +1464,15 @@ var OpenAICompletionLanguageModel = class {
1361
1464
  include_usage: true
1362
1465
  }
1363
1466
  };
1364
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1467
+ const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
1365
1468
  url: this.config.url({
1366
1469
  path: "/completions",
1367
1470
  modelId: this.modelId
1368
1471
  }),
1369
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1472
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
1370
1473
  body,
1371
1474
  failedResponseHandler: openaiFailedResponseHandler,
1372
- successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
1475
+ successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
1373
1476
  openaiCompletionChunkSchema
1374
1477
  ),
1375
1478
  abortSignal: options.abortSignal,
@@ -1450,69 +1553,42 @@ var OpenAICompletionLanguageModel = class {
1450
1553
  };
1451
1554
  }
1452
1555
  };
1453
- var usageSchema = import_v45.z.object({
1454
- prompt_tokens: import_v45.z.number(),
1455
- completion_tokens: import_v45.z.number(),
1456
- total_tokens: import_v45.z.number()
1457
- });
1458
- var openaiCompletionResponseSchema = import_v45.z.object({
1459
- id: import_v45.z.string().nullish(),
1460
- created: import_v45.z.number().nullish(),
1461
- model: import_v45.z.string().nullish(),
1462
- choices: import_v45.z.array(
1463
- import_v45.z.object({
1464
- text: import_v45.z.string(),
1465
- finish_reason: import_v45.z.string(),
1466
- logprobs: import_v45.z.object({
1467
- tokens: import_v45.z.array(import_v45.z.string()),
1468
- token_logprobs: import_v45.z.array(import_v45.z.number()),
1469
- top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullish()
1470
- }).nullish()
1471
- })
1472
- ),
1473
- usage: usageSchema.nullish()
1474
- });
1475
- var openaiCompletionChunkSchema = import_v45.z.union([
1476
- import_v45.z.object({
1477
- id: import_v45.z.string().nullish(),
1478
- created: import_v45.z.number().nullish(),
1479
- model: import_v45.z.string().nullish(),
1480
- choices: import_v45.z.array(
1481
- import_v45.z.object({
1482
- text: import_v45.z.string(),
1483
- finish_reason: import_v45.z.string().nullish(),
1484
- index: import_v45.z.number(),
1485
- logprobs: import_v45.z.object({
1486
- tokens: import_v45.z.array(import_v45.z.string()),
1487
- token_logprobs: import_v45.z.array(import_v45.z.number()),
1488
- top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullish()
1489
- }).nullish()
1490
- })
1491
- ),
1492
- usage: usageSchema.nullish()
1493
- }),
1494
- openaiErrorDataSchema
1495
- ]);
1496
1556
 
1497
1557
  // src/embedding/openai-embedding-model.ts
1498
1558
  var import_provider5 = require("@ai-sdk/provider");
1499
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
1500
- var import_v47 = require("zod/v4");
1559
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
1501
1560
 
1502
1561
  // src/embedding/openai-embedding-options.ts
1503
- var import_v46 = require("zod/v4");
1504
- var openaiEmbeddingProviderOptions = import_v46.z.object({
1505
- /**
1506
- The number of dimensions the resulting output embeddings should have.
1507
- Only supported in text-embedding-3 and later models.
1508
- */
1509
- dimensions: import_v46.z.number().optional(),
1510
- /**
1511
- A unique identifier representing your end-user, which can help OpenAI to
1512
- monitor and detect abuse. Learn more.
1513
- */
1514
- user: import_v46.z.string().optional()
1515
- });
1562
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
1563
+ var z6 = __toESM(require("zod/v4"));
1564
+ var openaiEmbeddingProviderOptions = (0, import_provider_utils9.lazyValidator)(
1565
+ () => (0, import_provider_utils9.zodSchema)(
1566
+ z6.object({
1567
+ /**
1568
+ The number of dimensions the resulting output embeddings should have.
1569
+ Only supported in text-embedding-3 and later models.
1570
+ */
1571
+ dimensions: z6.number().optional(),
1572
+ /**
1573
+ A unique identifier representing your end-user, which can help OpenAI to
1574
+ monitor and detect abuse. Learn more.
1575
+ */
1576
+ user: z6.string().optional()
1577
+ })
1578
+ )
1579
+ );
1580
+
1581
+ // src/embedding/openai-embedding-api.ts
1582
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
1583
+ var z7 = __toESM(require("zod/v4"));
1584
+ var openaiTextEmbeddingResponseSchema = (0, import_provider_utils10.lazyValidator)(
1585
+ () => (0, import_provider_utils10.zodSchema)(
1586
+ z7.object({
1587
+ data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1588
+ usage: z7.object({ prompt_tokens: z7.number() }).nullish()
1589
+ })
1590
+ )
1591
+ );
1516
1592
 
1517
1593
  // src/embedding/openai-embedding-model.ts
1518
1594
  var OpenAIEmbeddingModel = class {
@@ -1541,7 +1617,7 @@ var OpenAIEmbeddingModel = class {
1541
1617
  values
1542
1618
  });
1543
1619
  }
1544
- const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
1620
+ const openaiOptions = (_a = await (0, import_provider_utils11.parseProviderOptions)({
1545
1621
  provider: "openai",
1546
1622
  providerOptions,
1547
1623
  schema: openaiEmbeddingProviderOptions
@@ -1550,12 +1626,12 @@ var OpenAIEmbeddingModel = class {
1550
1626
  responseHeaders,
1551
1627
  value: response,
1552
1628
  rawValue
1553
- } = await (0, import_provider_utils5.postJsonToApi)({
1629
+ } = await (0, import_provider_utils11.postJsonToApi)({
1554
1630
  url: this.config.url({
1555
1631
  path: "/embeddings",
1556
1632
  modelId: this.modelId
1557
1633
  }),
1558
- headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1634
+ headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), headers),
1559
1635
  body: {
1560
1636
  model: this.modelId,
1561
1637
  input: values,
@@ -1564,7 +1640,7 @@ var OpenAIEmbeddingModel = class {
1564
1640
  user: openaiOptions.user
1565
1641
  },
1566
1642
  failedResponseHandler: openaiFailedResponseHandler,
1567
- successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1643
+ successfulResponseHandler: (0, import_provider_utils11.createJsonResponseHandler)(
1568
1644
  openaiTextEmbeddingResponseSchema
1569
1645
  ),
1570
1646
  abortSignal,
@@ -1577,22 +1653,37 @@ var OpenAIEmbeddingModel = class {
1577
1653
  };
1578
1654
  }
1579
1655
  };
1580
- var openaiTextEmbeddingResponseSchema = import_v47.z.object({
1581
- data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
1582
- usage: import_v47.z.object({ prompt_tokens: import_v47.z.number() }).nullish()
1583
- });
1584
1656
 
1585
1657
  // src/image/openai-image-model.ts
1586
- var import_provider_utils6 = require("@ai-sdk/provider-utils");
1587
- var import_v48 = require("zod/v4");
1658
+ var import_provider_utils13 = require("@ai-sdk/provider-utils");
1659
+
1660
+ // src/image/openai-image-api.ts
1661
+ var import_provider_utils12 = require("@ai-sdk/provider-utils");
1662
+ var z8 = __toESM(require("zod/v4"));
1663
+ var openaiImageResponseSchema = (0, import_provider_utils12.lazyValidator)(
1664
+ () => (0, import_provider_utils12.zodSchema)(
1665
+ z8.object({
1666
+ data: z8.array(
1667
+ z8.object({
1668
+ b64_json: z8.string(),
1669
+ revised_prompt: z8.string().optional()
1670
+ })
1671
+ )
1672
+ })
1673
+ )
1674
+ );
1588
1675
 
1589
1676
  // src/image/openai-image-options.ts
1590
1677
  var modelMaxImagesPerCall = {
1591
1678
  "dall-e-3": 1,
1592
1679
  "dall-e-2": 10,
1593
- "gpt-image-1": 10
1680
+ "gpt-image-1": 10,
1681
+ "gpt-image-1-mini": 10
1594
1682
  };
1595
- var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1683
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
1684
+ "gpt-image-1",
1685
+ "gpt-image-1-mini"
1686
+ ]);
1596
1687
 
1597
1688
  // src/image/openai-image-model.ts
1598
1689
  var OpenAIImageModel = class {
@@ -1631,12 +1722,12 @@ var OpenAIImageModel = class {
1631
1722
  warnings.push({ type: "unsupported-setting", setting: "seed" });
1632
1723
  }
1633
1724
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1634
- const { value: response, responseHeaders } = await (0, import_provider_utils6.postJsonToApi)({
1725
+ const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
1635
1726
  url: this.config.url({
1636
1727
  path: "/images/generations",
1637
1728
  modelId: this.modelId
1638
1729
  }),
1639
- headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), headers),
1730
+ headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), headers),
1640
1731
  body: {
1641
1732
  model: this.modelId,
1642
1733
  prompt,
@@ -1646,7 +1737,7 @@ var OpenAIImageModel = class {
1646
1737
  ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1647
1738
  },
1648
1739
  failedResponseHandler: openaiFailedResponseHandler,
1649
- successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1740
+ successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
1650
1741
  openaiImageResponseSchema
1651
1742
  ),
1652
1743
  abortSignal,
@@ -1672,36 +1763,43 @@ var OpenAIImageModel = class {
1672
1763
  };
1673
1764
  }
1674
1765
  };
1675
- var openaiImageResponseSchema = import_v48.z.object({
1676
- data: import_v48.z.array(
1677
- import_v48.z.object({ b64_json: import_v48.z.string(), revised_prompt: import_v48.z.string().optional() })
1678
- )
1679
- });
1680
1766
 
1681
1767
  // src/tool/code-interpreter.ts
1682
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1683
- var import_v49 = require("zod/v4");
1684
- var codeInterpreterInputSchema = import_v49.z.object({
1685
- code: import_v49.z.string().nullish(),
1686
- containerId: import_v49.z.string()
1687
- });
1688
- var codeInterpreterOutputSchema = import_v49.z.object({
1689
- outputs: import_v49.z.array(
1690
- import_v49.z.discriminatedUnion("type", [
1691
- import_v49.z.object({ type: import_v49.z.literal("logs"), logs: import_v49.z.string() }),
1692
- import_v49.z.object({ type: import_v49.z.literal("image"), url: import_v49.z.string() })
1693
- ])
1694
- ).nullish()
1695
- });
1696
- var codeInterpreterArgsSchema = import_v49.z.object({
1697
- container: import_v49.z.union([
1698
- import_v49.z.string(),
1699
- import_v49.z.object({
1700
- fileIds: import_v49.z.array(import_v49.z.string()).optional()
1768
+ var import_provider_utils14 = require("@ai-sdk/provider-utils");
1769
+ var z9 = __toESM(require("zod/v4"));
1770
+ var codeInterpreterInputSchema = (0, import_provider_utils14.lazySchema)(
1771
+ () => (0, import_provider_utils14.zodSchema)(
1772
+ z9.object({
1773
+ code: z9.string().nullish(),
1774
+ containerId: z9.string()
1701
1775
  })
1702
- ]).optional()
1703
- });
1704
- var codeInterpreterToolFactory = (0, import_provider_utils7.createProviderDefinedToolFactoryWithOutputSchema)({
1776
+ )
1777
+ );
1778
+ var codeInterpreterOutputSchema = (0, import_provider_utils14.lazySchema)(
1779
+ () => (0, import_provider_utils14.zodSchema)(
1780
+ z9.object({
1781
+ outputs: z9.array(
1782
+ z9.discriminatedUnion("type", [
1783
+ z9.object({ type: z9.literal("logs"), logs: z9.string() }),
1784
+ z9.object({ type: z9.literal("image"), url: z9.string() })
1785
+ ])
1786
+ ).nullish()
1787
+ })
1788
+ )
1789
+ );
1790
+ var codeInterpreterArgsSchema = (0, import_provider_utils14.lazySchema)(
1791
+ () => (0, import_provider_utils14.zodSchema)(
1792
+ z9.object({
1793
+ container: z9.union([
1794
+ z9.string(),
1795
+ z9.object({
1796
+ fileIds: z9.array(z9.string()).optional()
1797
+ })
1798
+ ]).optional()
1799
+ })
1800
+ )
1801
+ );
1802
+ var codeInterpreterToolFactory = (0, import_provider_utils14.createProviderDefinedToolFactoryWithOutputSchema)({
1705
1803
  id: "openai.code_interpreter",
1706
1804
  name: "code_interpreter",
1707
1805
  inputSchema: codeInterpreterInputSchema,
@@ -1712,71 +1810,85 @@ var codeInterpreter = (args = {}) => {
1712
1810
  };
1713
1811
 
1714
1812
  // src/tool/file-search.ts
1715
- var import_provider_utils8 = require("@ai-sdk/provider-utils");
1716
- var import_v410 = require("zod/v4");
1717
- var comparisonFilterSchema = import_v410.z.object({
1718
- key: import_v410.z.string(),
1719
- type: import_v410.z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
1720
- value: import_v410.z.union([import_v410.z.string(), import_v410.z.number(), import_v410.z.boolean()])
1813
+ var import_provider_utils15 = require("@ai-sdk/provider-utils");
1814
+ var z10 = __toESM(require("zod/v4"));
1815
+ var comparisonFilterSchema = z10.object({
1816
+ key: z10.string(),
1817
+ type: z10.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
1818
+ value: z10.union([z10.string(), z10.number(), z10.boolean()])
1721
1819
  });
1722
- var compoundFilterSchema = import_v410.z.object({
1723
- type: import_v410.z.enum(["and", "or"]),
1724
- filters: import_v410.z.array(
1725
- import_v410.z.union([comparisonFilterSchema, import_v410.z.lazy(() => compoundFilterSchema)])
1820
+ var compoundFilterSchema = z10.object({
1821
+ type: z10.enum(["and", "or"]),
1822
+ filters: z10.array(
1823
+ z10.union([comparisonFilterSchema, z10.lazy(() => compoundFilterSchema)])
1726
1824
  )
1727
1825
  });
1728
- var fileSearchArgsSchema = import_v410.z.object({
1729
- vectorStoreIds: import_v410.z.array(import_v410.z.string()),
1730
- maxNumResults: import_v410.z.number().optional(),
1731
- ranking: import_v410.z.object({
1732
- ranker: import_v410.z.string().optional(),
1733
- scoreThreshold: import_v410.z.number().optional()
1734
- }).optional(),
1735
- filters: import_v410.z.union([comparisonFilterSchema, compoundFilterSchema]).optional()
1736
- });
1737
- var fileSearchOutputSchema = import_v410.z.object({
1738
- queries: import_v410.z.array(import_v410.z.string()),
1739
- results: import_v410.z.array(
1740
- import_v410.z.object({
1741
- attributes: import_v410.z.record(import_v410.z.string(), import_v410.z.unknown()),
1742
- fileId: import_v410.z.string(),
1743
- filename: import_v410.z.string(),
1744
- score: import_v410.z.number(),
1745
- text: import_v410.z.string()
1826
+ var fileSearchArgsSchema = (0, import_provider_utils15.lazySchema)(
1827
+ () => (0, import_provider_utils15.zodSchema)(
1828
+ z10.object({
1829
+ vectorStoreIds: z10.array(z10.string()),
1830
+ maxNumResults: z10.number().optional(),
1831
+ ranking: z10.object({
1832
+ ranker: z10.string().optional(),
1833
+ scoreThreshold: z10.number().optional()
1834
+ }).optional(),
1835
+ filters: z10.union([comparisonFilterSchema, compoundFilterSchema]).optional()
1746
1836
  })
1747
- ).nullable()
1748
- });
1749
- var fileSearch = (0, import_provider_utils8.createProviderDefinedToolFactoryWithOutputSchema)({
1837
+ )
1838
+ );
1839
+ var fileSearchOutputSchema = (0, import_provider_utils15.lazySchema)(
1840
+ () => (0, import_provider_utils15.zodSchema)(
1841
+ z10.object({
1842
+ queries: z10.array(z10.string()),
1843
+ results: z10.array(
1844
+ z10.object({
1845
+ attributes: z10.record(z10.string(), z10.unknown()),
1846
+ fileId: z10.string(),
1847
+ filename: z10.string(),
1848
+ score: z10.number(),
1849
+ text: z10.string()
1850
+ })
1851
+ ).nullable()
1852
+ })
1853
+ )
1854
+ );
1855
+ var fileSearch = (0, import_provider_utils15.createProviderDefinedToolFactoryWithOutputSchema)({
1750
1856
  id: "openai.file_search",
1751
1857
  name: "file_search",
1752
- inputSchema: import_v410.z.object({}),
1858
+ inputSchema: z10.object({}),
1753
1859
  outputSchema: fileSearchOutputSchema
1754
1860
  });
1755
1861
 
1756
1862
  // src/tool/image-generation.ts
1757
- var import_provider_utils9 = require("@ai-sdk/provider-utils");
1758
- var import_v411 = require("zod/v4");
1759
- var imageGenerationArgsSchema = import_v411.z.object({
1760
- background: import_v411.z.enum(["auto", "opaque", "transparent"]).optional(),
1761
- inputFidelity: import_v411.z.enum(["low", "high"]).optional(),
1762
- inputImageMask: import_v411.z.object({
1763
- fileId: import_v411.z.string().optional(),
1764
- imageUrl: import_v411.z.string().optional()
1765
- }).optional(),
1766
- model: import_v411.z.string().optional(),
1767
- moderation: import_v411.z.enum(["auto"]).optional(),
1768
- outputCompression: import_v411.z.number().int().min(0).max(100).optional(),
1769
- outputFormat: import_v411.z.enum(["png", "jpeg", "webp"]).optional(),
1770
- quality: import_v411.z.enum(["auto", "low", "medium", "high"]).optional(),
1771
- size: import_v411.z.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
1772
- }).strict();
1773
- var imageGenerationOutputSchema = import_v411.z.object({
1774
- result: import_v411.z.string()
1775
- });
1776
- var imageGenerationToolFactory = (0, import_provider_utils9.createProviderDefinedToolFactoryWithOutputSchema)({
1863
+ var import_provider_utils16 = require("@ai-sdk/provider-utils");
1864
+ var z11 = __toESM(require("zod/v4"));
1865
+ var imageGenerationArgsSchema = (0, import_provider_utils16.lazySchema)(
1866
+ () => (0, import_provider_utils16.zodSchema)(
1867
+ z11.object({
1868
+ background: z11.enum(["auto", "opaque", "transparent"]).optional(),
1869
+ inputFidelity: z11.enum(["low", "high"]).optional(),
1870
+ inputImageMask: z11.object({
1871
+ fileId: z11.string().optional(),
1872
+ imageUrl: z11.string().optional()
1873
+ }).optional(),
1874
+ model: z11.string().optional(),
1875
+ moderation: z11.enum(["auto"]).optional(),
1876
+ outputCompression: z11.number().int().min(0).max(100).optional(),
1877
+ outputFormat: z11.enum(["png", "jpeg", "webp"]).optional(),
1878
+ partialImages: z11.number().int().min(0).max(3).optional(),
1879
+ quality: z11.enum(["auto", "low", "medium", "high"]).optional(),
1880
+ size: z11.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
1881
+ }).strict()
1882
+ )
1883
+ );
1884
+ var imageGenerationInputSchema = (0, import_provider_utils16.lazySchema)(() => (0, import_provider_utils16.zodSchema)(z11.object({})));
1885
+ var imageGenerationOutputSchema = (0, import_provider_utils16.lazySchema)(
1886
+ () => (0, import_provider_utils16.zodSchema)(z11.object({ result: z11.string() }))
1887
+ );
1888
+ var imageGenerationToolFactory = (0, import_provider_utils16.createProviderDefinedToolFactoryWithOutputSchema)({
1777
1889
  id: "openai.image_generation",
1778
1890
  name: "image_generation",
1779
- inputSchema: import_v411.z.object({}),
1891
+ inputSchema: imageGenerationInputSchema,
1780
1892
  outputSchema: imageGenerationOutputSchema
1781
1893
  });
1782
1894
  var imageGeneration = (args = {}) => {
@@ -1784,22 +1896,26 @@ var imageGeneration = (args = {}) => {
1784
1896
  };
1785
1897
 
1786
1898
  // src/tool/local-shell.ts
1787
- var import_provider_utils10 = require("@ai-sdk/provider-utils");
1788
- var import_v412 = require("zod/v4");
1789
- var localShellInputSchema = import_v412.z.object({
1790
- action: import_v412.z.object({
1791
- type: import_v412.z.literal("exec"),
1792
- command: import_v412.z.array(import_v412.z.string()),
1793
- timeoutMs: import_v412.z.number().optional(),
1794
- user: import_v412.z.string().optional(),
1795
- workingDirectory: import_v412.z.string().optional(),
1796
- env: import_v412.z.record(import_v412.z.string(), import_v412.z.string()).optional()
1797
- })
1798
- });
1799
- var localShellOutputSchema = import_v412.z.object({
1800
- output: import_v412.z.string()
1801
- });
1802
- var localShell = (0, import_provider_utils10.createProviderDefinedToolFactoryWithOutputSchema)({
1899
+ var import_provider_utils17 = require("@ai-sdk/provider-utils");
1900
+ var z12 = __toESM(require("zod/v4"));
1901
+ var localShellInputSchema = (0, import_provider_utils17.lazySchema)(
1902
+ () => (0, import_provider_utils17.zodSchema)(
1903
+ z12.object({
1904
+ action: z12.object({
1905
+ type: z12.literal("exec"),
1906
+ command: z12.array(z12.string()),
1907
+ timeoutMs: z12.number().optional(),
1908
+ user: z12.string().optional(),
1909
+ workingDirectory: z12.string().optional(),
1910
+ env: z12.record(z12.string(), z12.string()).optional()
1911
+ })
1912
+ })
1913
+ )
1914
+ );
1915
+ var localShellOutputSchema = (0, import_provider_utils17.lazySchema)(
1916
+ () => (0, import_provider_utils17.zodSchema)(z12.object({ output: z12.string() }))
1917
+ );
1918
+ var localShell = (0, import_provider_utils17.createProviderDefinedToolFactoryWithOutputSchema)({
1803
1919
  id: "openai.local_shell",
1804
1920
  name: "local_shell",
1805
1921
  inputSchema: localShellInputSchema,
@@ -1807,103 +1923,121 @@ var localShell = (0, import_provider_utils10.createProviderDefinedToolFactoryWit
1807
1923
  });
1808
1924
 
1809
1925
  // src/tool/web-search.ts
1810
- var import_provider_utils11 = require("@ai-sdk/provider-utils");
1811
- var import_v413 = require("zod/v4");
1812
- var webSearchArgsSchema = import_v413.z.object({
1813
- filters: import_v413.z.object({
1814
- allowedDomains: import_v413.z.array(import_v413.z.string()).optional()
1815
- }).optional(),
1816
- searchContextSize: import_v413.z.enum(["low", "medium", "high"]).optional(),
1817
- userLocation: import_v413.z.object({
1818
- type: import_v413.z.literal("approximate"),
1819
- country: import_v413.z.string().optional(),
1820
- city: import_v413.z.string().optional(),
1821
- region: import_v413.z.string().optional(),
1822
- timezone: import_v413.z.string().optional()
1823
- }).optional()
1824
- });
1825
- var webSearchToolFactory = (0, import_provider_utils11.createProviderDefinedToolFactory)({
1926
+ var import_provider_utils18 = require("@ai-sdk/provider-utils");
1927
+ var z13 = __toESM(require("zod/v4"));
1928
+ var webSearchArgsSchema = (0, import_provider_utils18.lazySchema)(
1929
+ () => (0, import_provider_utils18.zodSchema)(
1930
+ z13.object({
1931
+ filters: z13.object({
1932
+ allowedDomains: z13.array(z13.string()).optional()
1933
+ }).optional(),
1934
+ searchContextSize: z13.enum(["low", "medium", "high"]).optional(),
1935
+ userLocation: z13.object({
1936
+ type: z13.literal("approximate"),
1937
+ country: z13.string().optional(),
1938
+ city: z13.string().optional(),
1939
+ region: z13.string().optional(),
1940
+ timezone: z13.string().optional()
1941
+ }).optional()
1942
+ })
1943
+ )
1944
+ );
1945
+ var webSearchInputSchema = (0, import_provider_utils18.lazySchema)(
1946
+ () => (0, import_provider_utils18.zodSchema)(
1947
+ z13.object({
1948
+ action: z13.discriminatedUnion("type", [
1949
+ z13.object({
1950
+ type: z13.literal("search"),
1951
+ query: z13.string().nullish()
1952
+ }),
1953
+ z13.object({
1954
+ type: z13.literal("open_page"),
1955
+ url: z13.string()
1956
+ }),
1957
+ z13.object({
1958
+ type: z13.literal("find"),
1959
+ url: z13.string(),
1960
+ pattern: z13.string()
1961
+ })
1962
+ ]).nullish()
1963
+ })
1964
+ )
1965
+ );
1966
+ var webSearchToolFactory = (0, import_provider_utils18.createProviderDefinedToolFactory)({
1826
1967
  id: "openai.web_search",
1827
1968
  name: "web_search",
1828
- inputSchema: import_v413.z.object({
1829
- action: import_v413.z.discriminatedUnion("type", [
1830
- import_v413.z.object({
1831
- type: import_v413.z.literal("search"),
1832
- query: import_v413.z.string().nullish()
1833
- }),
1834
- import_v413.z.object({
1835
- type: import_v413.z.literal("open_page"),
1836
- url: import_v413.z.string()
1837
- }),
1838
- import_v413.z.object({
1839
- type: import_v413.z.literal("find"),
1840
- url: import_v413.z.string(),
1841
- pattern: import_v413.z.string()
1842
- })
1843
- ]).nullish()
1844
- })
1969
+ inputSchema: webSearchInputSchema
1845
1970
  });
1846
1971
  var webSearch = (args = {}) => {
1847
1972
  return webSearchToolFactory(args);
1848
1973
  };
1849
1974
 
1850
1975
  // src/tool/web-search-preview.ts
1851
- var import_provider_utils12 = require("@ai-sdk/provider-utils");
1852
- var import_v414 = require("zod/v4");
1853
- var webSearchPreviewArgsSchema = import_v414.z.object({
1854
- /**
1855
- * Search context size to use for the web search.
1856
- * - high: Most comprehensive context, highest cost, slower response
1857
- * - medium: Balanced context, cost, and latency (default)
1858
- * - low: Least context, lowest cost, fastest response
1859
- */
1860
- searchContextSize: import_v414.z.enum(["low", "medium", "high"]).optional(),
1861
- /**
1862
- * User location information to provide geographically relevant search results.
1863
- */
1864
- userLocation: import_v414.z.object({
1865
- /**
1866
- * Type of location (always 'approximate')
1867
- */
1868
- type: import_v414.z.literal("approximate"),
1869
- /**
1870
- * Two-letter ISO country code (e.g., 'US', 'GB')
1871
- */
1872
- country: import_v414.z.string().optional(),
1873
- /**
1874
- * City name (free text, e.g., 'Minneapolis')
1875
- */
1876
- city: import_v414.z.string().optional(),
1877
- /**
1878
- * Region name (free text, e.g., 'Minnesota')
1879
- */
1880
- region: import_v414.z.string().optional(),
1881
- /**
1882
- * IANA timezone (e.g., 'America/Chicago')
1883
- */
1884
- timezone: import_v414.z.string().optional()
1885
- }).optional()
1886
- });
1887
- var webSearchPreview = (0, import_provider_utils12.createProviderDefinedToolFactory)({
1976
+ var import_provider_utils19 = require("@ai-sdk/provider-utils");
1977
+ var z14 = __toESM(require("zod/v4"));
1978
+ var webSearchPreviewArgsSchema = (0, import_provider_utils19.lazySchema)(
1979
+ () => (0, import_provider_utils19.zodSchema)(
1980
+ z14.object({
1981
+ /**
1982
+ * Search context size to use for the web search.
1983
+ * - high: Most comprehensive context, highest cost, slower response
1984
+ * - medium: Balanced context, cost, and latency (default)
1985
+ * - low: Least context, lowest cost, fastest response
1986
+ */
1987
+ searchContextSize: z14.enum(["low", "medium", "high"]).optional(),
1988
+ /**
1989
+ * User location information to provide geographically relevant search results.
1990
+ */
1991
+ userLocation: z14.object({
1992
+ /**
1993
+ * Type of location (always 'approximate')
1994
+ */
1995
+ type: z14.literal("approximate"),
1996
+ /**
1997
+ * Two-letter ISO country code (e.g., 'US', 'GB')
1998
+ */
1999
+ country: z14.string().optional(),
2000
+ /**
2001
+ * City name (free text, e.g., 'Minneapolis')
2002
+ */
2003
+ city: z14.string().optional(),
2004
+ /**
2005
+ * Region name (free text, e.g., 'Minnesota')
2006
+ */
2007
+ region: z14.string().optional(),
2008
+ /**
2009
+ * IANA timezone (e.g., 'America/Chicago')
2010
+ */
2011
+ timezone: z14.string().optional()
2012
+ }).optional()
2013
+ })
2014
+ )
2015
+ );
2016
+ var webSearchPreviewInputSchema = (0, import_provider_utils19.lazySchema)(
2017
+ () => (0, import_provider_utils19.zodSchema)(
2018
+ z14.object({
2019
+ action: z14.discriminatedUnion("type", [
2020
+ z14.object({
2021
+ type: z14.literal("search"),
2022
+ query: z14.string().nullish()
2023
+ }),
2024
+ z14.object({
2025
+ type: z14.literal("open_page"),
2026
+ url: z14.string()
2027
+ }),
2028
+ z14.object({
2029
+ type: z14.literal("find"),
2030
+ url: z14.string(),
2031
+ pattern: z14.string()
2032
+ })
2033
+ ]).nullish()
2034
+ })
2035
+ )
2036
+ );
2037
+ var webSearchPreview = (0, import_provider_utils19.createProviderDefinedToolFactory)({
1888
2038
  id: "openai.web_search_preview",
1889
2039
  name: "web_search_preview",
1890
- inputSchema: import_v414.z.object({
1891
- action: import_v414.z.discriminatedUnion("type", [
1892
- import_v414.z.object({
1893
- type: import_v414.z.literal("search"),
1894
- query: import_v414.z.string().nullish()
1895
- }),
1896
- import_v414.z.object({
1897
- type: import_v414.z.literal("open_page"),
1898
- url: import_v414.z.string()
1899
- }),
1900
- import_v414.z.object({
1901
- type: import_v414.z.literal("find"),
1902
- url: import_v414.z.string(),
1903
- pattern: import_v414.z.string()
1904
- })
1905
- ]).nullish()
1906
- })
2040
+ inputSchema: webSearchPreviewInputSchema
1907
2041
  });
1908
2042
 
1909
2043
  // src/openai-tools.ts
@@ -1938,11 +2072,16 @@ var openaiTools = {
1938
2072
  *
1939
2073
  * Must have name `image_generation`.
1940
2074
  *
1941
- * @param size - Image dimensions (e.g., 1024x1024, 1024x1536)
1942
- * @param quality - Rendering quality (e.g. low, medium, high)
1943
- * @param format - File output format
1944
- * @param compression - Compression level (0-100%) for JPEG and WebP formats
1945
- * @param background - Transparent or opaque
2075
+ * @param background - Background type for the generated image. One of 'auto', 'opaque', or 'transparent'.
2076
+ * @param inputFidelity - Input fidelity for the generated image. One of 'low' or 'high'.
2077
+ * @param inputImageMask - Optional mask for inpainting. Contains fileId and/or imageUrl.
2078
+ * @param model - The image generation model to use. Default: gpt-image-1.
2079
+ * @param moderation - Moderation level for the generated image. Default: 'auto'.
2080
+ * @param outputCompression - Compression level for the output image (0-100).
2081
+ * @param outputFormat - The output format of the generated image. One of 'png', 'jpeg', or 'webp'.
2082
+ * @param partialImages - Number of partial images to generate in streaming mode (0-3).
2083
+ * @param quality - The quality of the generated image. One of 'auto', 'low', 'medium', or 'high'.
2084
+ * @param size - The size of the generated image. One of 'auto', '1024x1024', '1024x1536', or '1536x1024'.
1946
2085
  */
1947
2086
  imageGeneration,
1948
2087
  /**
@@ -1981,13 +2120,12 @@ var openaiTools = {
1981
2120
 
1982
2121
  // src/responses/openai-responses-language-model.ts
1983
2122
  var import_provider8 = require("@ai-sdk/provider");
1984
- var import_provider_utils14 = require("@ai-sdk/provider-utils");
1985
- var import_v416 = require("zod/v4");
2123
+ var import_provider_utils24 = require("@ai-sdk/provider-utils");
1986
2124
 
1987
2125
  // src/responses/convert-to-openai-responses-input.ts
1988
2126
  var import_provider6 = require("@ai-sdk/provider");
1989
- var import_provider_utils13 = require("@ai-sdk/provider-utils");
1990
- var import_v415 = require("zod/v4");
2127
+ var import_provider_utils20 = require("@ai-sdk/provider-utils");
2128
+ var z15 = __toESM(require("zod/v4"));
1991
2129
  function isFileId(data, prefixes) {
1992
2130
  if (!prefixes) return false;
1993
2131
  return prefixes.some((prefix) => data.startsWith(prefix));
@@ -1999,7 +2137,7 @@ async function convertToOpenAIResponsesInput({
1999
2137
  store,
2000
2138
  hasLocalShellTool = false
2001
2139
  }) {
2002
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2140
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
2003
2141
  const input = [];
2004
2142
  const warnings = [];
2005
2143
  for (const { role, content } of prompt) {
@@ -2045,7 +2183,7 @@ async function convertToOpenAIResponsesInput({
2045
2183
  return {
2046
2184
  type: "input_image",
2047
2185
  ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2048
- image_url: `data:${mediaType};base64,${(0, import_provider_utils13.convertToBase64)(part.data)}`
2186
+ image_url: `data:${mediaType};base64,${(0, import_provider_utils20.convertToBase64)(part.data)}`
2049
2187
  },
2050
2188
  detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
2051
2189
  };
@@ -2060,7 +2198,7 @@ async function convertToOpenAIResponsesInput({
2060
2198
  type: "input_file",
2061
2199
  ...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2062
2200
  filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
2063
- file_data: `data:application/pdf;base64,${(0, import_provider_utils13.convertToBase64)(part.data)}`
2201
+ file_data: `data:application/pdf;base64,${(0, import_provider_utils20.convertToBase64)(part.data)}`
2064
2202
  }
2065
2203
  };
2066
2204
  } else {
@@ -2093,7 +2231,10 @@ async function convertToOpenAIResponsesInput({
2093
2231
  break;
2094
2232
  }
2095
2233
  if (hasLocalShellTool && part.toolName === "local_shell") {
2096
- const parsedInput = localShellInputSchema.parse(part.input);
2234
+ const parsedInput = await (0, import_provider_utils20.validateTypes)({
2235
+ value: part.input,
2236
+ schema: localShellInputSchema
2237
+ });
2097
2238
  input.push({
2098
2239
  type: "local_shell_call",
2099
2240
  call_id: part.toolCallId,
@@ -2131,7 +2272,7 @@ async function convertToOpenAIResponsesInput({
2131
2272
  break;
2132
2273
  }
2133
2274
  case "reasoning": {
2134
- const providerOptions = await (0, import_provider_utils13.parseProviderOptions)({
2275
+ const providerOptions = await (0, import_provider_utils20.parseProviderOptions)({
2135
2276
  provider: "openai",
2136
2277
  providerOptions: part.providerOptions,
2137
2278
  schema: openaiResponsesReasoningProviderOptionsSchema
@@ -2189,10 +2330,14 @@ async function convertToOpenAIResponsesInput({
2189
2330
  for (const part of content) {
2190
2331
  const output = part.output;
2191
2332
  if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
2333
+ const parsedOutput = await (0, import_provider_utils20.validateTypes)({
2334
+ value: output.value,
2335
+ schema: localShellOutputSchema
2336
+ });
2192
2337
  input.push({
2193
2338
  type: "local_shell_call_output",
2194
2339
  call_id: part.toolCallId,
2195
- output: localShellOutputSchema.parse(output.value).output
2340
+ output: parsedOutput.output
2196
2341
  });
2197
2342
  break;
2198
2343
  }
@@ -2202,6 +2347,9 @@ async function convertToOpenAIResponsesInput({
2202
2347
  case "error-text":
2203
2348
  contentValue = output.value;
2204
2349
  break;
2350
+ case "execution-denied":
2351
+ contentValue = (_j = output.reason) != null ? _j : "Tool execution denied.";
2352
+ break;
2205
2353
  case "content":
2206
2354
  case "json":
2207
2355
  case "error-json":
@@ -2224,9 +2372,9 @@ async function convertToOpenAIResponsesInput({
2224
2372
  }
2225
2373
  return { input, warnings };
2226
2374
  }
2227
- var openaiResponsesReasoningProviderOptionsSchema = import_v415.z.object({
2228
- itemId: import_v415.z.string().nullish(),
2229
- reasoningEncryptedContent: import_v415.z.string().nullish()
2375
+ var openaiResponsesReasoningProviderOptionsSchema = z15.object({
2376
+ itemId: z15.string().nullish(),
2377
+ reasoningEncryptedContent: z15.string().nullish()
2230
2378
  });
2231
2379
 
2232
2380
  // src/responses/map-openai-responses-finish-reason.ts
@@ -2247,9 +2395,539 @@ function mapOpenAIResponseFinishReason({
2247
2395
  }
2248
2396
  }
2249
2397
 
2398
+ // src/responses/openai-responses-api.ts
2399
+ var import_provider_utils21 = require("@ai-sdk/provider-utils");
2400
+ var z16 = __toESM(require("zod/v4"));
2401
+ var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
2402
+ () => (0, import_provider_utils21.zodSchema)(
2403
+ z16.union([
2404
+ z16.object({
2405
+ type: z16.literal("response.output_text.delta"),
2406
+ item_id: z16.string(),
2407
+ delta: z16.string(),
2408
+ logprobs: z16.array(
2409
+ z16.object({
2410
+ token: z16.string(),
2411
+ logprob: z16.number(),
2412
+ top_logprobs: z16.array(
2413
+ z16.object({
2414
+ token: z16.string(),
2415
+ logprob: z16.number()
2416
+ })
2417
+ )
2418
+ })
2419
+ ).nullish()
2420
+ }),
2421
+ z16.object({
2422
+ type: z16.enum(["response.completed", "response.incomplete"]),
2423
+ response: z16.object({
2424
+ incomplete_details: z16.object({ reason: z16.string() }).nullish(),
2425
+ usage: z16.object({
2426
+ input_tokens: z16.number(),
2427
+ input_tokens_details: z16.object({ cached_tokens: z16.number().nullish() }).nullish(),
2428
+ output_tokens: z16.number(),
2429
+ output_tokens_details: z16.object({ reasoning_tokens: z16.number().nullish() }).nullish()
2430
+ }),
2431
+ service_tier: z16.string().nullish()
2432
+ })
2433
+ }),
2434
+ z16.object({
2435
+ type: z16.literal("response.created"),
2436
+ response: z16.object({
2437
+ id: z16.string(),
2438
+ created_at: z16.number(),
2439
+ model: z16.string(),
2440
+ service_tier: z16.string().nullish()
2441
+ })
2442
+ }),
2443
+ z16.object({
2444
+ type: z16.literal("response.output_item.added"),
2445
+ output_index: z16.number(),
2446
+ item: z16.discriminatedUnion("type", [
2447
+ z16.object({
2448
+ type: z16.literal("message"),
2449
+ id: z16.string()
2450
+ }),
2451
+ z16.object({
2452
+ type: z16.literal("reasoning"),
2453
+ id: z16.string(),
2454
+ encrypted_content: z16.string().nullish()
2455
+ }),
2456
+ z16.object({
2457
+ type: z16.literal("function_call"),
2458
+ id: z16.string(),
2459
+ call_id: z16.string(),
2460
+ name: z16.string(),
2461
+ arguments: z16.string()
2462
+ }),
2463
+ z16.object({
2464
+ type: z16.literal("web_search_call"),
2465
+ id: z16.string(),
2466
+ status: z16.string(),
2467
+ action: z16.object({
2468
+ type: z16.literal("search"),
2469
+ query: z16.string().optional()
2470
+ }).nullish()
2471
+ }),
2472
+ z16.object({
2473
+ type: z16.literal("computer_call"),
2474
+ id: z16.string(),
2475
+ status: z16.string()
2476
+ }),
2477
+ z16.object({
2478
+ type: z16.literal("file_search_call"),
2479
+ id: z16.string()
2480
+ }),
2481
+ z16.object({
2482
+ type: z16.literal("image_generation_call"),
2483
+ id: z16.string()
2484
+ }),
2485
+ z16.object({
2486
+ type: z16.literal("code_interpreter_call"),
2487
+ id: z16.string(),
2488
+ container_id: z16.string(),
2489
+ code: z16.string().nullable(),
2490
+ outputs: z16.array(
2491
+ z16.discriminatedUnion("type", [
2492
+ z16.object({ type: z16.literal("logs"), logs: z16.string() }),
2493
+ z16.object({ type: z16.literal("image"), url: z16.string() })
2494
+ ])
2495
+ ).nullable(),
2496
+ status: z16.string()
2497
+ })
2498
+ ])
2499
+ }),
2500
+ z16.object({
2501
+ type: z16.literal("response.output_item.done"),
2502
+ output_index: z16.number(),
2503
+ item: z16.discriminatedUnion("type", [
2504
+ z16.object({
2505
+ type: z16.literal("message"),
2506
+ id: z16.string()
2507
+ }),
2508
+ z16.object({
2509
+ type: z16.literal("reasoning"),
2510
+ id: z16.string(),
2511
+ encrypted_content: z16.string().nullish()
2512
+ }),
2513
+ z16.object({
2514
+ type: z16.literal("function_call"),
2515
+ id: z16.string(),
2516
+ call_id: z16.string(),
2517
+ name: z16.string(),
2518
+ arguments: z16.string(),
2519
+ status: z16.literal("completed")
2520
+ }),
2521
+ z16.object({
2522
+ type: z16.literal("code_interpreter_call"),
2523
+ id: z16.string(),
2524
+ code: z16.string().nullable(),
2525
+ container_id: z16.string(),
2526
+ outputs: z16.array(
2527
+ z16.discriminatedUnion("type", [
2528
+ z16.object({ type: z16.literal("logs"), logs: z16.string() }),
2529
+ z16.object({ type: z16.literal("image"), url: z16.string() })
2530
+ ])
2531
+ ).nullable()
2532
+ }),
2533
+ z16.object({
2534
+ type: z16.literal("image_generation_call"),
2535
+ id: z16.string(),
2536
+ result: z16.string()
2537
+ }),
2538
+ z16.object({
2539
+ type: z16.literal("web_search_call"),
2540
+ id: z16.string(),
2541
+ status: z16.string(),
2542
+ action: z16.discriminatedUnion("type", [
2543
+ z16.object({
2544
+ type: z16.literal("search"),
2545
+ query: z16.string().nullish()
2546
+ }),
2547
+ z16.object({
2548
+ type: z16.literal("open_page"),
2549
+ url: z16.string()
2550
+ }),
2551
+ z16.object({
2552
+ type: z16.literal("find"),
2553
+ url: z16.string(),
2554
+ pattern: z16.string()
2555
+ })
2556
+ ]).nullish()
2557
+ }),
2558
+ z16.object({
2559
+ type: z16.literal("file_search_call"),
2560
+ id: z16.string(),
2561
+ queries: z16.array(z16.string()),
2562
+ results: z16.array(
2563
+ z16.object({
2564
+ attributes: z16.record(z16.string(), z16.unknown()),
2565
+ file_id: z16.string(),
2566
+ filename: z16.string(),
2567
+ score: z16.number(),
2568
+ text: z16.string()
2569
+ })
2570
+ ).nullish()
2571
+ }),
2572
+ z16.object({
2573
+ type: z16.literal("local_shell_call"),
2574
+ id: z16.string(),
2575
+ call_id: z16.string(),
2576
+ action: z16.object({
2577
+ type: z16.literal("exec"),
2578
+ command: z16.array(z16.string()),
2579
+ timeout_ms: z16.number().optional(),
2580
+ user: z16.string().optional(),
2581
+ working_directory: z16.string().optional(),
2582
+ env: z16.record(z16.string(), z16.string()).optional()
2583
+ })
2584
+ }),
2585
+ z16.object({
2586
+ type: z16.literal("computer_call"),
2587
+ id: z16.string(),
2588
+ status: z16.literal("completed")
2589
+ })
2590
+ ])
2591
+ }),
2592
+ z16.object({
2593
+ type: z16.literal("response.function_call_arguments.delta"),
2594
+ item_id: z16.string(),
2595
+ output_index: z16.number(),
2596
+ delta: z16.string()
2597
+ }),
2598
+ z16.object({
2599
+ type: z16.literal("response.image_generation_call.partial_image"),
2600
+ item_id: z16.string(),
2601
+ output_index: z16.number(),
2602
+ partial_image_b64: z16.string()
2603
+ }),
2604
+ z16.object({
2605
+ type: z16.literal("response.code_interpreter_call_code.delta"),
2606
+ item_id: z16.string(),
2607
+ output_index: z16.number(),
2608
+ delta: z16.string()
2609
+ }),
2610
+ z16.object({
2611
+ type: z16.literal("response.code_interpreter_call_code.done"),
2612
+ item_id: z16.string(),
2613
+ output_index: z16.number(),
2614
+ code: z16.string()
2615
+ }),
2616
+ z16.object({
2617
+ type: z16.literal("response.output_text.annotation.added"),
2618
+ annotation: z16.discriminatedUnion("type", [
2619
+ z16.object({
2620
+ type: z16.literal("url_citation"),
2621
+ url: z16.string(),
2622
+ title: z16.string()
2623
+ }),
2624
+ z16.object({
2625
+ type: z16.literal("file_citation"),
2626
+ file_id: z16.string(),
2627
+ filename: z16.string().nullish(),
2628
+ index: z16.number().nullish(),
2629
+ start_index: z16.number().nullish(),
2630
+ end_index: z16.number().nullish(),
2631
+ quote: z16.string().nullish()
2632
+ })
2633
+ ])
2634
+ }),
2635
+ z16.object({
2636
+ type: z16.literal("response.reasoning_summary_part.added"),
2637
+ item_id: z16.string(),
2638
+ summary_index: z16.number()
2639
+ }),
2640
+ z16.object({
2641
+ type: z16.literal("response.reasoning_summary_text.delta"),
2642
+ item_id: z16.string(),
2643
+ summary_index: z16.number(),
2644
+ delta: z16.string()
2645
+ }),
2646
+ z16.object({
2647
+ type: z16.literal("error"),
2648
+ code: z16.string(),
2649
+ message: z16.string(),
2650
+ param: z16.string().nullish(),
2651
+ sequence_number: z16.number()
2652
+ }),
2653
+ z16.object({ type: z16.string() }).loose().transform((value) => ({
2654
+ type: "unknown_chunk",
2655
+ message: value.type
2656
+ }))
2657
+ // fallback for unknown chunks
2658
+ ])
2659
+ )
2660
+ );
2661
+ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2662
+ () => (0, import_provider_utils21.zodSchema)(
2663
+ z16.object({
2664
+ id: z16.string(),
2665
+ created_at: z16.number(),
2666
+ error: z16.object({
2667
+ code: z16.string(),
2668
+ message: z16.string()
2669
+ }).nullish(),
2670
+ model: z16.string(),
2671
+ output: z16.array(
2672
+ z16.discriminatedUnion("type", [
2673
+ z16.object({
2674
+ type: z16.literal("message"),
2675
+ role: z16.literal("assistant"),
2676
+ id: z16.string(),
2677
+ content: z16.array(
2678
+ z16.object({
2679
+ type: z16.literal("output_text"),
2680
+ text: z16.string(),
2681
+ logprobs: z16.array(
2682
+ z16.object({
2683
+ token: z16.string(),
2684
+ logprob: z16.number(),
2685
+ top_logprobs: z16.array(
2686
+ z16.object({
2687
+ token: z16.string(),
2688
+ logprob: z16.number()
2689
+ })
2690
+ )
2691
+ })
2692
+ ).nullish(),
2693
+ annotations: z16.array(
2694
+ z16.discriminatedUnion("type", [
2695
+ z16.object({
2696
+ type: z16.literal("url_citation"),
2697
+ start_index: z16.number(),
2698
+ end_index: z16.number(),
2699
+ url: z16.string(),
2700
+ title: z16.string()
2701
+ }),
2702
+ z16.object({
2703
+ type: z16.literal("file_citation"),
2704
+ file_id: z16.string(),
2705
+ filename: z16.string().nullish(),
2706
+ index: z16.number().nullish(),
2707
+ start_index: z16.number().nullish(),
2708
+ end_index: z16.number().nullish(),
2709
+ quote: z16.string().nullish()
2710
+ }),
2711
+ z16.object({
2712
+ type: z16.literal("container_file_citation")
2713
+ })
2714
+ ])
2715
+ )
2716
+ })
2717
+ )
2718
+ }),
2719
+ z16.object({
2720
+ type: z16.literal("web_search_call"),
2721
+ id: z16.string(),
2722
+ status: z16.string(),
2723
+ action: z16.discriminatedUnion("type", [
2724
+ z16.object({
2725
+ type: z16.literal("search"),
2726
+ query: z16.string().nullish()
2727
+ }),
2728
+ z16.object({
2729
+ type: z16.literal("open_page"),
2730
+ url: z16.string()
2731
+ }),
2732
+ z16.object({
2733
+ type: z16.literal("find"),
2734
+ url: z16.string(),
2735
+ pattern: z16.string()
2736
+ })
2737
+ ]).nullish()
2738
+ }),
2739
+ z16.object({
2740
+ type: z16.literal("file_search_call"),
2741
+ id: z16.string(),
2742
+ queries: z16.array(z16.string()),
2743
+ results: z16.array(
2744
+ z16.object({
2745
+ attributes: z16.record(z16.string(), z16.unknown()),
2746
+ file_id: z16.string(),
2747
+ filename: z16.string(),
2748
+ score: z16.number(),
2749
+ text: z16.string()
2750
+ })
2751
+ ).nullish()
2752
+ }),
2753
+ z16.object({
2754
+ type: z16.literal("code_interpreter_call"),
2755
+ id: z16.string(),
2756
+ code: z16.string().nullable(),
2757
+ container_id: z16.string(),
2758
+ outputs: z16.array(
2759
+ z16.discriminatedUnion("type", [
2760
+ z16.object({ type: z16.literal("logs"), logs: z16.string() }),
2761
+ z16.object({ type: z16.literal("image"), url: z16.string() })
2762
+ ])
2763
+ ).nullable()
2764
+ }),
2765
+ z16.object({
2766
+ type: z16.literal("image_generation_call"),
2767
+ id: z16.string(),
2768
+ result: z16.string()
2769
+ }),
2770
+ z16.object({
2771
+ type: z16.literal("local_shell_call"),
2772
+ id: z16.string(),
2773
+ call_id: z16.string(),
2774
+ action: z16.object({
2775
+ type: z16.literal("exec"),
2776
+ command: z16.array(z16.string()),
2777
+ timeout_ms: z16.number().optional(),
2778
+ user: z16.string().optional(),
2779
+ working_directory: z16.string().optional(),
2780
+ env: z16.record(z16.string(), z16.string()).optional()
2781
+ })
2782
+ }),
2783
+ z16.object({
2784
+ type: z16.literal("function_call"),
2785
+ call_id: z16.string(),
2786
+ name: z16.string(),
2787
+ arguments: z16.string(),
2788
+ id: z16.string()
2789
+ }),
2790
+ z16.object({
2791
+ type: z16.literal("computer_call"),
2792
+ id: z16.string(),
2793
+ status: z16.string().optional()
2794
+ }),
2795
+ z16.object({
2796
+ type: z16.literal("reasoning"),
2797
+ id: z16.string(),
2798
+ encrypted_content: z16.string().nullish(),
2799
+ summary: z16.array(
2800
+ z16.object({
2801
+ type: z16.literal("summary_text"),
2802
+ text: z16.string()
2803
+ })
2804
+ )
2805
+ })
2806
+ ])
2807
+ ),
2808
+ service_tier: z16.string().nullish(),
2809
+ incomplete_details: z16.object({ reason: z16.string() }).nullish(),
2810
+ usage: z16.object({
2811
+ input_tokens: z16.number(),
2812
+ input_tokens_details: z16.object({ cached_tokens: z16.number().nullish() }).nullish(),
2813
+ output_tokens: z16.number(),
2814
+ output_tokens_details: z16.object({ reasoning_tokens: z16.number().nullish() }).nullish()
2815
+ })
2816
+ })
2817
+ )
2818
+ );
2819
+
2820
+ // src/responses/openai-responses-options.ts
2821
+ var import_provider_utils22 = require("@ai-sdk/provider-utils");
2822
+ var z17 = __toESM(require("zod/v4"));
2823
+ var TOP_LOGPROBS_MAX = 20;
2824
+ var openaiResponsesReasoningModelIds = [
2825
+ "o1",
2826
+ "o1-2024-12-17",
2827
+ "o3-mini",
2828
+ "o3-mini-2025-01-31",
2829
+ "o3",
2830
+ "o3-2025-04-16",
2831
+ "o4-mini",
2832
+ "o4-mini-2025-04-16",
2833
+ "codex-mini-latest",
2834
+ "computer-use-preview",
2835
+ "gpt-5",
2836
+ "gpt-5-2025-08-07",
2837
+ "gpt-5-codex",
2838
+ "gpt-5-mini",
2839
+ "gpt-5-mini-2025-08-07",
2840
+ "gpt-5-nano",
2841
+ "gpt-5-nano-2025-08-07",
2842
+ "gpt-5-pro",
2843
+ "gpt-5-pro-2025-10-06"
2844
+ ];
2845
+ var openaiResponsesModelIds = [
2846
+ "gpt-4.1",
2847
+ "gpt-4.1-2025-04-14",
2848
+ "gpt-4.1-mini",
2849
+ "gpt-4.1-mini-2025-04-14",
2850
+ "gpt-4.1-nano",
2851
+ "gpt-4.1-nano-2025-04-14",
2852
+ "gpt-4o",
2853
+ "gpt-4o-2024-05-13",
2854
+ "gpt-4o-2024-08-06",
2855
+ "gpt-4o-2024-11-20",
2856
+ "gpt-4o-audio-preview",
2857
+ "gpt-4o-audio-preview-2024-10-01",
2858
+ "gpt-4o-audio-preview-2024-12-17",
2859
+ "gpt-4o-search-preview",
2860
+ "gpt-4o-search-preview-2025-03-11",
2861
+ "gpt-4o-mini-search-preview",
2862
+ "gpt-4o-mini-search-preview-2025-03-11",
2863
+ "gpt-4o-mini",
2864
+ "gpt-4o-mini-2024-07-18",
2865
+ "gpt-4-turbo",
2866
+ "gpt-4-turbo-2024-04-09",
2867
+ "gpt-4-turbo-preview",
2868
+ "gpt-4-0125-preview",
2869
+ "gpt-4-1106-preview",
2870
+ "gpt-4",
2871
+ "gpt-4-0613",
2872
+ "gpt-4.5-preview",
2873
+ "gpt-4.5-preview-2025-02-27",
2874
+ "gpt-3.5-turbo-0125",
2875
+ "gpt-3.5-turbo",
2876
+ "gpt-3.5-turbo-1106",
2877
+ "chatgpt-4o-latest",
2878
+ "gpt-5-chat-latest",
2879
+ ...openaiResponsesReasoningModelIds
2880
+ ];
2881
+ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValidator)(
2882
+ () => (0, import_provider_utils22.zodSchema)(
2883
+ z17.object({
2884
+ include: z17.array(
2885
+ z17.enum([
2886
+ "reasoning.encrypted_content",
2887
+ "file_search_call.results",
2888
+ "message.output_text.logprobs"
2889
+ ])
2890
+ ).nullish(),
2891
+ instructions: z17.string().nullish(),
2892
+ /**
2893
+ * Return the log probabilities of the tokens.
2894
+ *
2895
+ * Setting to true will return the log probabilities of the tokens that
2896
+ * were generated.
2897
+ *
2898
+ * Setting to a number will return the log probabilities of the top n
2899
+ * tokens that were generated.
2900
+ *
2901
+ * @see https://platform.openai.com/docs/api-reference/responses/create
2902
+ * @see https://cookbook.openai.com/examples/using_logprobs
2903
+ */
2904
+ logprobs: z17.union([z17.boolean(), z17.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
2905
+ /**
2906
+ * The maximum number of total calls to built-in tools that can be processed in a response.
2907
+ * This maximum number applies across all built-in tool calls, not per individual tool.
2908
+ * Any further attempts to call a tool by the model will be ignored.
2909
+ */
2910
+ maxToolCalls: z17.number().nullish(),
2911
+ metadata: z17.any().nullish(),
2912
+ parallelToolCalls: z17.boolean().nullish(),
2913
+ previousResponseId: z17.string().nullish(),
2914
+ promptCacheKey: z17.string().nullish(),
2915
+ reasoningEffort: z17.string().nullish(),
2916
+ reasoningSummary: z17.string().nullish(),
2917
+ safetyIdentifier: z17.string().nullish(),
2918
+ serviceTier: z17.enum(["auto", "flex", "priority"]).nullish(),
2919
+ store: z17.boolean().nullish(),
2920
+ strictJsonSchema: z17.boolean().nullish(),
2921
+ textVerbosity: z17.enum(["low", "medium", "high"]).nullish(),
2922
+ user: z17.string().nullish()
2923
+ })
2924
+ )
2925
+ );
2926
+
2250
2927
  // src/responses/openai-responses-prepare-tools.ts
2251
2928
  var import_provider7 = require("@ai-sdk/provider");
2252
- function prepareResponsesTools({
2929
+ var import_provider_utils23 = require("@ai-sdk/provider-utils");
2930
+ async function prepareResponsesTools({
2253
2931
  tools,
2254
2932
  toolChoice,
2255
2933
  strictJsonSchema
@@ -2274,7 +2952,10 @@ function prepareResponsesTools({
2274
2952
  case "provider-defined": {
2275
2953
  switch (tool.id) {
2276
2954
  case "openai.file_search": {
2277
- const args = fileSearchArgsSchema.parse(tool.args);
2955
+ const args = await (0, import_provider_utils23.validateTypes)({
2956
+ value: tool.args,
2957
+ schema: fileSearchArgsSchema
2958
+ });
2278
2959
  openaiTools2.push({
2279
2960
  type: "file_search",
2280
2961
  vector_store_ids: args.vectorStoreIds,
@@ -2294,7 +2975,10 @@ function prepareResponsesTools({
2294
2975
  break;
2295
2976
  }
2296
2977
  case "openai.web_search_preview": {
2297
- const args = webSearchPreviewArgsSchema.parse(tool.args);
2978
+ const args = await (0, import_provider_utils23.validateTypes)({
2979
+ value: tool.args,
2980
+ schema: webSearchPreviewArgsSchema
2981
+ });
2298
2982
  openaiTools2.push({
2299
2983
  type: "web_search_preview",
2300
2984
  search_context_size: args.searchContextSize,
@@ -2303,7 +2987,10 @@ function prepareResponsesTools({
2303
2987
  break;
2304
2988
  }
2305
2989
  case "openai.web_search": {
2306
- const args = webSearchArgsSchema.parse(tool.args);
2990
+ const args = await (0, import_provider_utils23.validateTypes)({
2991
+ value: tool.args,
2992
+ schema: webSearchArgsSchema
2993
+ });
2307
2994
  openaiTools2.push({
2308
2995
  type: "web_search",
2309
2996
  filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
@@ -2313,7 +3000,10 @@ function prepareResponsesTools({
2313
3000
  break;
2314
3001
  }
2315
3002
  case "openai.code_interpreter": {
2316
- const args = codeInterpreterArgsSchema.parse(tool.args);
3003
+ const args = await (0, import_provider_utils23.validateTypes)({
3004
+ value: tool.args,
3005
+ schema: codeInterpreterArgsSchema
3006
+ });
2317
3007
  openaiTools2.push({
2318
3008
  type: "code_interpreter",
2319
3009
  container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
@@ -2321,7 +3011,10 @@ function prepareResponsesTools({
2321
3011
  break;
2322
3012
  }
2323
3013
  case "openai.image_generation": {
2324
- const args = imageGenerationArgsSchema.parse(tool.args);
3014
+ const args = await (0, import_provider_utils23.validateTypes)({
3015
+ value: tool.args,
3016
+ schema: imageGenerationArgsSchema
3017
+ });
2325
3018
  openaiTools2.push({
2326
3019
  type: "image_generation",
2327
3020
  background: args.background,
@@ -2331,11 +3024,12 @@ function prepareResponsesTools({
2331
3024
  image_url: args.inputImageMask.imageUrl
2332
3025
  } : void 0,
2333
3026
  model: args.model,
2334
- size: args.size,
2335
- quality: args.quality,
2336
3027
  moderation: args.moderation,
3028
+ partial_images: args.partialImages,
3029
+ quality: args.quality,
3030
+ output_compression: args.outputCompression,
2337
3031
  output_format: args.outputFormat,
2338
- output_compression: args.outputCompression
3032
+ size: args.size
2339
3033
  });
2340
3034
  break;
2341
3035
  }
@@ -2372,83 +3066,6 @@ function prepareResponsesTools({
2372
3066
  }
2373
3067
 
2374
3068
  // src/responses/openai-responses-language-model.ts
2375
- var webSearchCallItem = import_v416.z.object({
2376
- type: import_v416.z.literal("web_search_call"),
2377
- id: import_v416.z.string(),
2378
- status: import_v416.z.string(),
2379
- action: import_v416.z.discriminatedUnion("type", [
2380
- import_v416.z.object({
2381
- type: import_v416.z.literal("search"),
2382
- query: import_v416.z.string().nullish()
2383
- }),
2384
- import_v416.z.object({
2385
- type: import_v416.z.literal("open_page"),
2386
- url: import_v416.z.string()
2387
- }),
2388
- import_v416.z.object({
2389
- type: import_v416.z.literal("find"),
2390
- url: import_v416.z.string(),
2391
- pattern: import_v416.z.string()
2392
- })
2393
- ]).nullish()
2394
- });
2395
- var fileSearchCallItem = import_v416.z.object({
2396
- type: import_v416.z.literal("file_search_call"),
2397
- id: import_v416.z.string(),
2398
- queries: import_v416.z.array(import_v416.z.string()),
2399
- results: import_v416.z.array(
2400
- import_v416.z.object({
2401
- attributes: import_v416.z.record(import_v416.z.string(), import_v416.z.unknown()),
2402
- file_id: import_v416.z.string(),
2403
- filename: import_v416.z.string(),
2404
- score: import_v416.z.number(),
2405
- text: import_v416.z.string()
2406
- })
2407
- ).nullish()
2408
- });
2409
- var codeInterpreterCallItem = import_v416.z.object({
2410
- type: import_v416.z.literal("code_interpreter_call"),
2411
- id: import_v416.z.string(),
2412
- code: import_v416.z.string().nullable(),
2413
- container_id: import_v416.z.string(),
2414
- outputs: import_v416.z.array(
2415
- import_v416.z.discriminatedUnion("type", [
2416
- import_v416.z.object({ type: import_v416.z.literal("logs"), logs: import_v416.z.string() }),
2417
- import_v416.z.object({ type: import_v416.z.literal("image"), url: import_v416.z.string() })
2418
- ])
2419
- ).nullable()
2420
- });
2421
- var localShellCallItem = import_v416.z.object({
2422
- type: import_v416.z.literal("local_shell_call"),
2423
- id: import_v416.z.string(),
2424
- call_id: import_v416.z.string(),
2425
- action: import_v416.z.object({
2426
- type: import_v416.z.literal("exec"),
2427
- command: import_v416.z.array(import_v416.z.string()),
2428
- timeout_ms: import_v416.z.number().optional(),
2429
- user: import_v416.z.string().optional(),
2430
- working_directory: import_v416.z.string().optional(),
2431
- env: import_v416.z.record(import_v416.z.string(), import_v416.z.string()).optional()
2432
- })
2433
- });
2434
- var imageGenerationCallItem = import_v416.z.object({
2435
- type: import_v416.z.literal("image_generation_call"),
2436
- id: import_v416.z.string(),
2437
- result: import_v416.z.string()
2438
- });
2439
- var TOP_LOGPROBS_MAX = 20;
2440
- var LOGPROBS_SCHEMA = import_v416.z.array(
2441
- import_v416.z.object({
2442
- token: import_v416.z.string(),
2443
- logprob: import_v416.z.number(),
2444
- top_logprobs: import_v416.z.array(
2445
- import_v416.z.object({
2446
- token: import_v416.z.string(),
2447
- logprob: import_v416.z.number()
2448
- })
2449
- )
2450
- })
2451
- );
2452
3069
  var OpenAIResponsesLanguageModel = class {
2453
3070
  constructor(modelId, config) {
2454
3071
  this.specificationVersion = "v3";
@@ -2501,7 +3118,7 @@ var OpenAIResponsesLanguageModel = class {
2501
3118
  if (stopSequences != null) {
2502
3119
  warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2503
3120
  }
2504
- const openaiOptions = await (0, import_provider_utils14.parseProviderOptions)({
3121
+ const openaiOptions = await (0, import_provider_utils24.parseProviderOptions)({
2505
3122
  provider: "openai",
2506
3123
  providerOptions,
2507
3124
  schema: openaiResponsesProviderOptionsSchema
@@ -2640,7 +3257,7 @@ var OpenAIResponsesLanguageModel = class {
2640
3257
  tools: openaiTools2,
2641
3258
  toolChoice: openaiToolChoice,
2642
3259
  toolWarnings
2643
- } = prepareResponsesTools({
3260
+ } = await prepareResponsesTools({
2644
3261
  tools,
2645
3262
  toolChoice,
2646
3263
  strictJsonSchema
@@ -2670,91 +3287,13 @@ var OpenAIResponsesLanguageModel = class {
2670
3287
  responseHeaders,
2671
3288
  value: response,
2672
3289
  rawValue: rawResponse
2673
- } = await (0, import_provider_utils14.postJsonToApi)({
3290
+ } = await (0, import_provider_utils24.postJsonToApi)({
2674
3291
  url,
2675
- headers: (0, import_provider_utils14.combineHeaders)(this.config.headers(), options.headers),
3292
+ headers: (0, import_provider_utils24.combineHeaders)(this.config.headers(), options.headers),
2676
3293
  body,
2677
3294
  failedResponseHandler: openaiFailedResponseHandler,
2678
- successfulResponseHandler: (0, import_provider_utils14.createJsonResponseHandler)(
2679
- import_v416.z.object({
2680
- id: import_v416.z.string(),
2681
- created_at: import_v416.z.number(),
2682
- error: import_v416.z.object({
2683
- code: import_v416.z.string(),
2684
- message: import_v416.z.string()
2685
- }).nullish(),
2686
- model: import_v416.z.string(),
2687
- output: import_v416.z.array(
2688
- import_v416.z.discriminatedUnion("type", [
2689
- import_v416.z.object({
2690
- type: import_v416.z.literal("message"),
2691
- role: import_v416.z.literal("assistant"),
2692
- id: import_v416.z.string(),
2693
- content: import_v416.z.array(
2694
- import_v416.z.object({
2695
- type: import_v416.z.literal("output_text"),
2696
- text: import_v416.z.string(),
2697
- logprobs: LOGPROBS_SCHEMA.nullish(),
2698
- annotations: import_v416.z.array(
2699
- import_v416.z.discriminatedUnion("type", [
2700
- import_v416.z.object({
2701
- type: import_v416.z.literal("url_citation"),
2702
- start_index: import_v416.z.number(),
2703
- end_index: import_v416.z.number(),
2704
- url: import_v416.z.string(),
2705
- title: import_v416.z.string()
2706
- }),
2707
- import_v416.z.object({
2708
- type: import_v416.z.literal("file_citation"),
2709
- file_id: import_v416.z.string(),
2710
- filename: import_v416.z.string().nullish(),
2711
- index: import_v416.z.number().nullish(),
2712
- start_index: import_v416.z.number().nullish(),
2713
- end_index: import_v416.z.number().nullish(),
2714
- quote: import_v416.z.string().nullish()
2715
- }),
2716
- import_v416.z.object({
2717
- type: import_v416.z.literal("container_file_citation")
2718
- })
2719
- ])
2720
- )
2721
- })
2722
- )
2723
- }),
2724
- webSearchCallItem,
2725
- fileSearchCallItem,
2726
- codeInterpreterCallItem,
2727
- imageGenerationCallItem,
2728
- localShellCallItem,
2729
- import_v416.z.object({
2730
- type: import_v416.z.literal("function_call"),
2731
- call_id: import_v416.z.string(),
2732
- name: import_v416.z.string(),
2733
- arguments: import_v416.z.string(),
2734
- id: import_v416.z.string()
2735
- }),
2736
- import_v416.z.object({
2737
- type: import_v416.z.literal("computer_call"),
2738
- id: import_v416.z.string(),
2739
- status: import_v416.z.string().optional()
2740
- }),
2741
- import_v416.z.object({
2742
- type: import_v416.z.literal("reasoning"),
2743
- id: import_v416.z.string(),
2744
- encrypted_content: import_v416.z.string().nullish(),
2745
- summary: import_v416.z.array(
2746
- import_v416.z.object({
2747
- type: import_v416.z.literal("summary_text"),
2748
- text: import_v416.z.string()
2749
- })
2750
- )
2751
- })
2752
- ])
2753
- ),
2754
- service_tier: import_v416.z.string().nullish(),
2755
- incomplete_details: import_v416.z.object({ reason: import_v416.z.string() }).nullish(),
2756
- usage: usageSchema2
2757
- })
3295
+ successfulResponseHandler: (0, import_provider_utils24.createJsonResponseHandler)(
3296
+ openaiResponsesResponseSchema
2758
3297
  ),
2759
3298
  abortSignal: options.abortSignal,
2760
3299
  fetch: this.config.fetch
@@ -2817,7 +3356,9 @@ var OpenAIResponsesLanguageModel = class {
2817
3356
  type: "tool-call",
2818
3357
  toolCallId: part.call_id,
2819
3358
  toolName: "local_shell",
2820
- input: JSON.stringify({ action: part.action }),
3359
+ input: JSON.stringify({
3360
+ action: part.action
3361
+ }),
2821
3362
  providerMetadata: {
2822
3363
  openai: {
2823
3364
  itemId: part.id
@@ -2845,7 +3386,7 @@ var OpenAIResponsesLanguageModel = class {
2845
3386
  content.push({
2846
3387
  type: "source",
2847
3388
  sourceType: "url",
2848
- id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0, import_provider_utils14.generateId)(),
3389
+ id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0, import_provider_utils24.generateId)(),
2849
3390
  url: annotation.url,
2850
3391
  title: annotation.title
2851
3392
  });
@@ -2853,7 +3394,7 @@ var OpenAIResponsesLanguageModel = class {
2853
3394
  content.push({
2854
3395
  type: "source",
2855
3396
  sourceType: "document",
2856
- id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils14.generateId)(),
3397
+ id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils24.generateId)(),
2857
3398
  mediaType: "text/plain",
2858
3399
  title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
2859
3400
  filename: (_l = annotation.filename) != null ? _l : annotation.file_id
@@ -3005,18 +3546,18 @@ var OpenAIResponsesLanguageModel = class {
3005
3546
  warnings,
3006
3547
  webSearchToolName
3007
3548
  } = await this.getArgs(options);
3008
- const { responseHeaders, value: response } = await (0, import_provider_utils14.postJsonToApi)({
3549
+ const { responseHeaders, value: response } = await (0, import_provider_utils24.postJsonToApi)({
3009
3550
  url: this.config.url({
3010
3551
  path: "/responses",
3011
3552
  modelId: this.modelId
3012
3553
  }),
3013
- headers: (0, import_provider_utils14.combineHeaders)(this.config.headers(), options.headers),
3554
+ headers: (0, import_provider_utils24.combineHeaders)(this.config.headers(), options.headers),
3014
3555
  body: {
3015
3556
  ...body,
3016
3557
  stream: true
3017
3558
  },
3018
3559
  failedResponseHandler: openaiFailedResponseHandler,
3019
- successfulResponseHandler: (0, import_provider_utils14.createEventSourceResponseHandler)(
3560
+ successfulResponseHandler: (0, import_provider_utils24.createEventSourceResponseHandler)(
3020
3561
  openaiResponsesChunkSchema
3021
3562
  ),
3022
3563
  abortSignal: options.abortSignal,
@@ -3071,7 +3612,8 @@ var OpenAIResponsesLanguageModel = class {
3071
3612
  controller.enqueue({
3072
3613
  type: "tool-input-start",
3073
3614
  id: value.item.id,
3074
- toolName: webSearchToolName != null ? webSearchToolName : "web_search"
3615
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3616
+ providerExecuted: true
3075
3617
  });
3076
3618
  } else if (value.item.type === "computer_call") {
3077
3619
  ongoingToolCalls[value.output_index] = {
@@ -3081,7 +3623,8 @@ var OpenAIResponsesLanguageModel = class {
3081
3623
  controller.enqueue({
3082
3624
  type: "tool-input-start",
3083
3625
  id: value.item.id,
3084
- toolName: "computer_use"
3626
+ toolName: "computer_use",
3627
+ providerExecuted: true
3085
3628
  });
3086
3629
  } else if (value.item.type === "code_interpreter_call") {
3087
3630
  ongoingToolCalls[value.output_index] = {
@@ -3094,7 +3637,8 @@ var OpenAIResponsesLanguageModel = class {
3094
3637
  controller.enqueue({
3095
3638
  type: "tool-input-start",
3096
3639
  id: value.item.id,
3097
- toolName: "code_interpreter"
3640
+ toolName: "code_interpreter",
3641
+ providerExecuted: true
3098
3642
  });
3099
3643
  controller.enqueue({
3100
3644
  type: "tool-input-delta",
@@ -3294,6 +3838,17 @@ var OpenAIResponsesLanguageModel = class {
3294
3838
  delta: value.delta
3295
3839
  });
3296
3840
  }
3841
+ } else if (isResponseImageGenerationCallPartialImageChunk(value)) {
3842
+ controller.enqueue({
3843
+ type: "tool-result",
3844
+ toolCallId: value.item_id,
3845
+ toolName: "image_generation",
3846
+ result: {
3847
+ result: value.partial_image_b64
3848
+ },
3849
+ providerExecuted: true,
3850
+ preliminary: true
3851
+ });
3297
3852
  } else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) {
3298
3853
  const toolCall = ongoingToolCalls[value.output_index];
3299
3854
  if (toolCall != null) {
@@ -3390,7 +3945,7 @@ var OpenAIResponsesLanguageModel = class {
3390
3945
  controller.enqueue({
3391
3946
  type: "source",
3392
3947
  sourceType: "url",
3393
- id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0, import_provider_utils14.generateId)(),
3948
+ id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0, import_provider_utils24.generateId)(),
3394
3949
  url: value.annotation.url,
3395
3950
  title: value.annotation.title
3396
3951
  });
@@ -3398,7 +3953,7 @@ var OpenAIResponsesLanguageModel = class {
3398
3953
  controller.enqueue({
3399
3954
  type: "source",
3400
3955
  sourceType: "document",
3401
- id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0, import_provider_utils14.generateId)(),
3956
+ id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0, import_provider_utils24.generateId)(),
3402
3957
  mediaType: "text/plain",
3403
3958
  title: (_v = (_u = value.annotation.quote) != null ? _u : value.annotation.filename) != null ? _v : "Document",
3404
3959
  filename: (_w = value.annotation.filename) != null ? _w : value.annotation.file_id
@@ -3434,196 +3989,6 @@ var OpenAIResponsesLanguageModel = class {
3434
3989
  };
3435
3990
  }
3436
3991
  };
3437
- var usageSchema2 = import_v416.z.object({
3438
- input_tokens: import_v416.z.number(),
3439
- input_tokens_details: import_v416.z.object({ cached_tokens: import_v416.z.number().nullish() }).nullish(),
3440
- output_tokens: import_v416.z.number(),
3441
- output_tokens_details: import_v416.z.object({ reasoning_tokens: import_v416.z.number().nullish() }).nullish()
3442
- });
3443
- var textDeltaChunkSchema = import_v416.z.object({
3444
- type: import_v416.z.literal("response.output_text.delta"),
3445
- item_id: import_v416.z.string(),
3446
- delta: import_v416.z.string(),
3447
- logprobs: LOGPROBS_SCHEMA.nullish()
3448
- });
3449
- var errorChunkSchema = import_v416.z.object({
3450
- type: import_v416.z.literal("error"),
3451
- code: import_v416.z.string(),
3452
- message: import_v416.z.string(),
3453
- param: import_v416.z.string().nullish(),
3454
- sequence_number: import_v416.z.number()
3455
- });
3456
- var responseFinishedChunkSchema = import_v416.z.object({
3457
- type: import_v416.z.enum(["response.completed", "response.incomplete"]),
3458
- response: import_v416.z.object({
3459
- incomplete_details: import_v416.z.object({ reason: import_v416.z.string() }).nullish(),
3460
- usage: usageSchema2,
3461
- service_tier: import_v416.z.string().nullish()
3462
- })
3463
- });
3464
- var responseCreatedChunkSchema = import_v416.z.object({
3465
- type: import_v416.z.literal("response.created"),
3466
- response: import_v416.z.object({
3467
- id: import_v416.z.string(),
3468
- created_at: import_v416.z.number(),
3469
- model: import_v416.z.string(),
3470
- service_tier: import_v416.z.string().nullish()
3471
- })
3472
- });
3473
- var responseOutputItemAddedSchema = import_v416.z.object({
3474
- type: import_v416.z.literal("response.output_item.added"),
3475
- output_index: import_v416.z.number(),
3476
- item: import_v416.z.discriminatedUnion("type", [
3477
- import_v416.z.object({
3478
- type: import_v416.z.literal("message"),
3479
- id: import_v416.z.string()
3480
- }),
3481
- import_v416.z.object({
3482
- type: import_v416.z.literal("reasoning"),
3483
- id: import_v416.z.string(),
3484
- encrypted_content: import_v416.z.string().nullish()
3485
- }),
3486
- import_v416.z.object({
3487
- type: import_v416.z.literal("function_call"),
3488
- id: import_v416.z.string(),
3489
- call_id: import_v416.z.string(),
3490
- name: import_v416.z.string(),
3491
- arguments: import_v416.z.string()
3492
- }),
3493
- import_v416.z.object({
3494
- type: import_v416.z.literal("web_search_call"),
3495
- id: import_v416.z.string(),
3496
- status: import_v416.z.string(),
3497
- action: import_v416.z.object({
3498
- type: import_v416.z.literal("search"),
3499
- query: import_v416.z.string().optional()
3500
- }).nullish()
3501
- }),
3502
- import_v416.z.object({
3503
- type: import_v416.z.literal("computer_call"),
3504
- id: import_v416.z.string(),
3505
- status: import_v416.z.string()
3506
- }),
3507
- import_v416.z.object({
3508
- type: import_v416.z.literal("file_search_call"),
3509
- id: import_v416.z.string()
3510
- }),
3511
- import_v416.z.object({
3512
- type: import_v416.z.literal("image_generation_call"),
3513
- id: import_v416.z.string()
3514
- }),
3515
- import_v416.z.object({
3516
- type: import_v416.z.literal("code_interpreter_call"),
3517
- id: import_v416.z.string(),
3518
- container_id: import_v416.z.string(),
3519
- code: import_v416.z.string().nullable(),
3520
- outputs: import_v416.z.array(
3521
- import_v416.z.discriminatedUnion("type", [
3522
- import_v416.z.object({ type: import_v416.z.literal("logs"), logs: import_v416.z.string() }),
3523
- import_v416.z.object({ type: import_v416.z.literal("image"), url: import_v416.z.string() })
3524
- ])
3525
- ).nullable(),
3526
- status: import_v416.z.string()
3527
- })
3528
- ])
3529
- });
3530
- var responseOutputItemDoneSchema = import_v416.z.object({
3531
- type: import_v416.z.literal("response.output_item.done"),
3532
- output_index: import_v416.z.number(),
3533
- item: import_v416.z.discriminatedUnion("type", [
3534
- import_v416.z.object({
3535
- type: import_v416.z.literal("message"),
3536
- id: import_v416.z.string()
3537
- }),
3538
- import_v416.z.object({
3539
- type: import_v416.z.literal("reasoning"),
3540
- id: import_v416.z.string(),
3541
- encrypted_content: import_v416.z.string().nullish()
3542
- }),
3543
- import_v416.z.object({
3544
- type: import_v416.z.literal("function_call"),
3545
- id: import_v416.z.string(),
3546
- call_id: import_v416.z.string(),
3547
- name: import_v416.z.string(),
3548
- arguments: import_v416.z.string(),
3549
- status: import_v416.z.literal("completed")
3550
- }),
3551
- codeInterpreterCallItem,
3552
- imageGenerationCallItem,
3553
- webSearchCallItem,
3554
- fileSearchCallItem,
3555
- localShellCallItem,
3556
- import_v416.z.object({
3557
- type: import_v416.z.literal("computer_call"),
3558
- id: import_v416.z.string(),
3559
- status: import_v416.z.literal("completed")
3560
- })
3561
- ])
3562
- });
3563
- var responseFunctionCallArgumentsDeltaSchema = import_v416.z.object({
3564
- type: import_v416.z.literal("response.function_call_arguments.delta"),
3565
- item_id: import_v416.z.string(),
3566
- output_index: import_v416.z.number(),
3567
- delta: import_v416.z.string()
3568
- });
3569
- var responseCodeInterpreterCallCodeDeltaSchema = import_v416.z.object({
3570
- type: import_v416.z.literal("response.code_interpreter_call_code.delta"),
3571
- item_id: import_v416.z.string(),
3572
- output_index: import_v416.z.number(),
3573
- delta: import_v416.z.string()
3574
- });
3575
- var responseCodeInterpreterCallCodeDoneSchema = import_v416.z.object({
3576
- type: import_v416.z.literal("response.code_interpreter_call_code.done"),
3577
- item_id: import_v416.z.string(),
3578
- output_index: import_v416.z.number(),
3579
- code: import_v416.z.string()
3580
- });
3581
- var responseAnnotationAddedSchema = import_v416.z.object({
3582
- type: import_v416.z.literal("response.output_text.annotation.added"),
3583
- annotation: import_v416.z.discriminatedUnion("type", [
3584
- import_v416.z.object({
3585
- type: import_v416.z.literal("url_citation"),
3586
- url: import_v416.z.string(),
3587
- title: import_v416.z.string()
3588
- }),
3589
- import_v416.z.object({
3590
- type: import_v416.z.literal("file_citation"),
3591
- file_id: import_v416.z.string(),
3592
- filename: import_v416.z.string().nullish(),
3593
- index: import_v416.z.number().nullish(),
3594
- start_index: import_v416.z.number().nullish(),
3595
- end_index: import_v416.z.number().nullish(),
3596
- quote: import_v416.z.string().nullish()
3597
- })
3598
- ])
3599
- });
3600
- var responseReasoningSummaryPartAddedSchema = import_v416.z.object({
3601
- type: import_v416.z.literal("response.reasoning_summary_part.added"),
3602
- item_id: import_v416.z.string(),
3603
- summary_index: import_v416.z.number()
3604
- });
3605
- var responseReasoningSummaryTextDeltaSchema = import_v416.z.object({
3606
- type: import_v416.z.literal("response.reasoning_summary_text.delta"),
3607
- item_id: import_v416.z.string(),
3608
- summary_index: import_v416.z.number(),
3609
- delta: import_v416.z.string()
3610
- });
3611
- var openaiResponsesChunkSchema = import_v416.z.union([
3612
- textDeltaChunkSchema,
3613
- responseFinishedChunkSchema,
3614
- responseCreatedChunkSchema,
3615
- responseOutputItemAddedSchema,
3616
- responseOutputItemDoneSchema,
3617
- responseFunctionCallArgumentsDeltaSchema,
3618
- responseCodeInterpreterCallCodeDeltaSchema,
3619
- responseCodeInterpreterCallCodeDoneSchema,
3620
- responseAnnotationAddedSchema,
3621
- responseReasoningSummaryPartAddedSchema,
3622
- responseReasoningSummaryTextDeltaSchema,
3623
- errorChunkSchema,
3624
- import_v416.z.object({ type: import_v416.z.string() }).loose()
3625
- // fallback for unknown chunks
3626
- ]);
3627
3992
  function isTextDeltaChunk(chunk) {
3628
3993
  return chunk.type === "response.output_text.delta";
3629
3994
  }
@@ -3642,6 +4007,9 @@ function isResponseCreatedChunk(chunk) {
3642
4007
  function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
3643
4008
  return chunk.type === "response.function_call_arguments.delta";
3644
4009
  }
4010
+ function isResponseImageGenerationCallPartialImageChunk(chunk) {
4011
+ return chunk.type === "response.image_generation_call.partial_image";
4012
+ }
3645
4013
  function isResponseCodeInterpreterCallCodeDeltaChunk(chunk) {
3646
4014
  return chunk.type === "response.code_interpreter_call_code.delta";
3647
4015
  }
@@ -3700,60 +4068,28 @@ function getResponsesModelConfig(modelId) {
3700
4068
  isReasoningModel: false
3701
4069
  };
3702
4070
  }
3703
- var openaiResponsesProviderOptionsSchema = import_v416.z.object({
3704
- include: import_v416.z.array(
3705
- import_v416.z.enum([
3706
- "reasoning.encrypted_content",
3707
- "file_search_call.results",
3708
- "message.output_text.logprobs"
3709
- ])
3710
- ).nullish(),
3711
- instructions: import_v416.z.string().nullish(),
3712
- /**
3713
- * Return the log probabilities of the tokens.
3714
- *
3715
- * Setting to true will return the log probabilities of the tokens that
3716
- * were generated.
3717
- *
3718
- * Setting to a number will return the log probabilities of the top n
3719
- * tokens that were generated.
3720
- *
3721
- * @see https://platform.openai.com/docs/api-reference/responses/create
3722
- * @see https://cookbook.openai.com/examples/using_logprobs
3723
- */
3724
- logprobs: import_v416.z.union([import_v416.z.boolean(), import_v416.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
3725
- /**
3726
- * The maximum number of total calls to built-in tools that can be processed in a response.
3727
- * This maximum number applies across all built-in tool calls, not per individual tool.
3728
- * Any further attempts to call a tool by the model will be ignored.
3729
- */
3730
- maxToolCalls: import_v416.z.number().nullish(),
3731
- metadata: import_v416.z.any().nullish(),
3732
- parallelToolCalls: import_v416.z.boolean().nullish(),
3733
- previousResponseId: import_v416.z.string().nullish(),
3734
- promptCacheKey: import_v416.z.string().nullish(),
3735
- reasoningEffort: import_v416.z.string().nullish(),
3736
- reasoningSummary: import_v416.z.string().nullish(),
3737
- safetyIdentifier: import_v416.z.string().nullish(),
3738
- serviceTier: import_v416.z.enum(["auto", "flex", "priority"]).nullish(),
3739
- store: import_v416.z.boolean().nullish(),
3740
- strictJsonSchema: import_v416.z.boolean().nullish(),
3741
- textVerbosity: import_v416.z.enum(["low", "medium", "high"]).nullish(),
3742
- user: import_v416.z.string().nullish()
3743
- });
3744
4071
 
3745
4072
  // src/speech/openai-speech-model.ts
3746
- var import_provider_utils15 = require("@ai-sdk/provider-utils");
3747
- var import_v417 = require("zod/v4");
3748
- var OpenAIProviderOptionsSchema = import_v417.z.object({
3749
- instructions: import_v417.z.string().nullish(),
3750
- speed: import_v417.z.number().min(0.25).max(4).default(1).nullish()
3751
- });
4073
+ var import_provider_utils26 = require("@ai-sdk/provider-utils");
4074
+
4075
+ // src/speech/openai-speech-options.ts
4076
+ var import_provider_utils25 = require("@ai-sdk/provider-utils");
4077
+ var z18 = __toESM(require("zod/v4"));
4078
+ var openaiSpeechProviderOptionsSchema = (0, import_provider_utils25.lazyValidator)(
4079
+ () => (0, import_provider_utils25.zodSchema)(
4080
+ z18.object({
4081
+ instructions: z18.string().nullish(),
4082
+ speed: z18.number().min(0.25).max(4).default(1).nullish()
4083
+ })
4084
+ )
4085
+ );
4086
+
4087
+ // src/speech/openai-speech-model.ts
3752
4088
  var OpenAISpeechModel = class {
3753
4089
  constructor(modelId, config) {
3754
4090
  this.modelId = modelId;
3755
4091
  this.config = config;
3756
- this.specificationVersion = "v2";
4092
+ this.specificationVersion = "v3";
3757
4093
  }
3758
4094
  get provider() {
3759
4095
  return this.config.provider;
@@ -3768,10 +4104,10 @@ var OpenAISpeechModel = class {
3768
4104
  providerOptions
3769
4105
  }) {
3770
4106
  const warnings = [];
3771
- const openAIOptions = await (0, import_provider_utils15.parseProviderOptions)({
4107
+ const openAIOptions = await (0, import_provider_utils26.parseProviderOptions)({
3772
4108
  provider: "openai",
3773
4109
  providerOptions,
3774
- schema: OpenAIProviderOptionsSchema
4110
+ schema: openaiSpeechProviderOptionsSchema
3775
4111
  });
3776
4112
  const requestBody = {
3777
4113
  model: this.modelId,
@@ -3821,15 +4157,15 @@ var OpenAISpeechModel = class {
3821
4157
  value: audio,
3822
4158
  responseHeaders,
3823
4159
  rawValue: rawResponse
3824
- } = await (0, import_provider_utils15.postJsonToApi)({
4160
+ } = await (0, import_provider_utils26.postJsonToApi)({
3825
4161
  url: this.config.url({
3826
4162
  path: "/audio/speech",
3827
4163
  modelId: this.modelId
3828
4164
  }),
3829
- headers: (0, import_provider_utils15.combineHeaders)(this.config.headers(), options.headers),
4165
+ headers: (0, import_provider_utils26.combineHeaders)(this.config.headers(), options.headers),
3830
4166
  body: requestBody,
3831
4167
  failedResponseHandler: openaiFailedResponseHandler,
3832
- successfulResponseHandler: (0, import_provider_utils15.createBinaryResponseHandler)(),
4168
+ successfulResponseHandler: (0, import_provider_utils26.createBinaryResponseHandler)(),
3833
4169
  abortSignal: options.abortSignal,
3834
4170
  fetch: this.config.fetch
3835
4171
  });
@@ -3850,35 +4186,73 @@ var OpenAISpeechModel = class {
3850
4186
  };
3851
4187
 
3852
4188
  // src/transcription/openai-transcription-model.ts
3853
- var import_provider_utils16 = require("@ai-sdk/provider-utils");
3854
- var import_v419 = require("zod/v4");
4189
+ var import_provider_utils29 = require("@ai-sdk/provider-utils");
4190
+
4191
+ // src/transcription/openai-transcription-api.ts
4192
+ var import_provider_utils27 = require("@ai-sdk/provider-utils");
4193
+ var z19 = __toESM(require("zod/v4"));
4194
+ var openaiTranscriptionResponseSchema = (0, import_provider_utils27.lazyValidator)(
4195
+ () => (0, import_provider_utils27.zodSchema)(
4196
+ z19.object({
4197
+ text: z19.string(),
4198
+ language: z19.string().nullish(),
4199
+ duration: z19.number().nullish(),
4200
+ words: z19.array(
4201
+ z19.object({
4202
+ word: z19.string(),
4203
+ start: z19.number(),
4204
+ end: z19.number()
4205
+ })
4206
+ ).nullish(),
4207
+ segments: z19.array(
4208
+ z19.object({
4209
+ id: z19.number(),
4210
+ seek: z19.number(),
4211
+ start: z19.number(),
4212
+ end: z19.number(),
4213
+ text: z19.string(),
4214
+ tokens: z19.array(z19.number()),
4215
+ temperature: z19.number(),
4216
+ avg_logprob: z19.number(),
4217
+ compression_ratio: z19.number(),
4218
+ no_speech_prob: z19.number()
4219
+ })
4220
+ ).nullish()
4221
+ })
4222
+ )
4223
+ );
3855
4224
 
3856
4225
  // src/transcription/openai-transcription-options.ts
3857
- var import_v418 = require("zod/v4");
3858
- var openAITranscriptionProviderOptions = import_v418.z.object({
3859
- /**
3860
- * Additional information to include in the transcription response.
3861
- */
3862
- include: import_v418.z.array(import_v418.z.string()).optional(),
3863
- /**
3864
- * The language of the input audio in ISO-639-1 format.
3865
- */
3866
- language: import_v418.z.string().optional(),
3867
- /**
3868
- * An optional text to guide the model's style or continue a previous audio segment.
3869
- */
3870
- prompt: import_v418.z.string().optional(),
3871
- /**
3872
- * The sampling temperature, between 0 and 1.
3873
- * @default 0
3874
- */
3875
- temperature: import_v418.z.number().min(0).max(1).default(0).optional(),
3876
- /**
3877
- * The timestamp granularities to populate for this transcription.
3878
- * @default ['segment']
3879
- */
3880
- timestampGranularities: import_v418.z.array(import_v418.z.enum(["word", "segment"])).default(["segment"]).optional()
3881
- });
4226
+ var import_provider_utils28 = require("@ai-sdk/provider-utils");
4227
+ var z20 = __toESM(require("zod/v4"));
4228
+ var openAITranscriptionProviderOptions = (0, import_provider_utils28.lazyValidator)(
4229
+ () => (0, import_provider_utils28.zodSchema)(
4230
+ z20.object({
4231
+ /**
4232
+ * Additional information to include in the transcription response.
4233
+ */
4234
+ include: z20.array(z20.string()).optional(),
4235
+ /**
4236
+ * The language of the input audio in ISO-639-1 format.
4237
+ */
4238
+ language: z20.string().optional(),
4239
+ /**
4240
+ * An optional text to guide the model's style or continue a previous audio segment.
4241
+ */
4242
+ prompt: z20.string().optional(),
4243
+ /**
4244
+ * The sampling temperature, between 0 and 1.
4245
+ * @default 0
4246
+ */
4247
+ temperature: z20.number().min(0).max(1).default(0).optional(),
4248
+ /**
4249
+ * The timestamp granularities to populate for this transcription.
4250
+ * @default ['segment']
4251
+ */
4252
+ timestampGranularities: z20.array(z20.enum(["word", "segment"])).default(["segment"]).optional()
4253
+ })
4254
+ )
4255
+ );
3882
4256
 
3883
4257
  // src/transcription/openai-transcription-model.ts
3884
4258
  var languageMap = {
@@ -3944,7 +4318,7 @@ var OpenAITranscriptionModel = class {
3944
4318
  constructor(modelId, config) {
3945
4319
  this.modelId = modelId;
3946
4320
  this.config = config;
3947
- this.specificationVersion = "v2";
4321
+ this.specificationVersion = "v3";
3948
4322
  }
3949
4323
  get provider() {
3950
4324
  return this.config.provider;
@@ -3955,15 +4329,15 @@ var OpenAITranscriptionModel = class {
3955
4329
  providerOptions
3956
4330
  }) {
3957
4331
  const warnings = [];
3958
- const openAIOptions = await (0, import_provider_utils16.parseProviderOptions)({
4332
+ const openAIOptions = await (0, import_provider_utils29.parseProviderOptions)({
3959
4333
  provider: "openai",
3960
4334
  providerOptions,
3961
4335
  schema: openAITranscriptionProviderOptions
3962
4336
  });
3963
4337
  const formData = new FormData();
3964
- const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils16.convertBase64ToUint8Array)(audio)]);
4338
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils29.convertBase64ToUint8Array)(audio)]);
3965
4339
  formData.append("model", this.modelId);
3966
- const fileExtension = (0, import_provider_utils16.mediaTypeToExtension)(mediaType);
4340
+ const fileExtension = (0, import_provider_utils29.mediaTypeToExtension)(mediaType);
3967
4341
  formData.append(
3968
4342
  "file",
3969
4343
  new File([blob], "audio", { type: mediaType }),
@@ -4008,15 +4382,15 @@ var OpenAITranscriptionModel = class {
4008
4382
  value: response,
4009
4383
  responseHeaders,
4010
4384
  rawValue: rawResponse
4011
- } = await (0, import_provider_utils16.postFormDataToApi)({
4385
+ } = await (0, import_provider_utils29.postFormDataToApi)({
4012
4386
  url: this.config.url({
4013
4387
  path: "/audio/transcriptions",
4014
4388
  modelId: this.modelId
4015
4389
  }),
4016
- headers: (0, import_provider_utils16.combineHeaders)(this.config.headers(), options.headers),
4390
+ headers: (0, import_provider_utils29.combineHeaders)(this.config.headers(), options.headers),
4017
4391
  formData,
4018
4392
  failedResponseHandler: openaiFailedResponseHandler,
4019
- successfulResponseHandler: (0, import_provider_utils16.createJsonResponseHandler)(
4393
+ successfulResponseHandler: (0, import_provider_utils29.createJsonResponseHandler)(
4020
4394
  openaiTranscriptionResponseSchema
4021
4395
  ),
4022
4396
  abortSignal: options.abortSignal,
@@ -4046,49 +4420,23 @@ var OpenAITranscriptionModel = class {
4046
4420
  };
4047
4421
  }
4048
4422
  };
4049
- var openaiTranscriptionResponseSchema = import_v419.z.object({
4050
- text: import_v419.z.string(),
4051
- language: import_v419.z.string().nullish(),
4052
- duration: import_v419.z.number().nullish(),
4053
- words: import_v419.z.array(
4054
- import_v419.z.object({
4055
- word: import_v419.z.string(),
4056
- start: import_v419.z.number(),
4057
- end: import_v419.z.number()
4058
- })
4059
- ).nullish(),
4060
- segments: import_v419.z.array(
4061
- import_v419.z.object({
4062
- id: import_v419.z.number(),
4063
- seek: import_v419.z.number(),
4064
- start: import_v419.z.number(),
4065
- end: import_v419.z.number(),
4066
- text: import_v419.z.string(),
4067
- tokens: import_v419.z.array(import_v419.z.number()),
4068
- temperature: import_v419.z.number(),
4069
- avg_logprob: import_v419.z.number(),
4070
- compression_ratio: import_v419.z.number(),
4071
- no_speech_prob: import_v419.z.number()
4072
- })
4073
- ).nullish()
4074
- });
4075
4423
 
4076
4424
  // src/version.ts
4077
- var VERSION = true ? "2.1.0-beta.9" : "0.0.0-test";
4425
+ var VERSION = true ? "3.0.0-beta.18" : "0.0.0-test";
4078
4426
 
4079
4427
  // src/openai-provider.ts
4080
4428
  function createOpenAI(options = {}) {
4081
4429
  var _a, _b;
4082
- const baseURL = (_a = (0, import_provider_utils17.withoutTrailingSlash)(
4083
- (0, import_provider_utils17.loadOptionalSetting)({
4430
+ const baseURL = (_a = (0, import_provider_utils30.withoutTrailingSlash)(
4431
+ (0, import_provider_utils30.loadOptionalSetting)({
4084
4432
  settingValue: options.baseURL,
4085
4433
  environmentVariableName: "OPENAI_BASE_URL"
4086
4434
  })
4087
4435
  )) != null ? _a : "https://api.openai.com/v1";
4088
4436
  const providerName = (_b = options.name) != null ? _b : "openai";
4089
- const getHeaders = () => (0, import_provider_utils17.withUserAgentSuffix)(
4437
+ const getHeaders = () => (0, import_provider_utils30.withUserAgentSuffix)(
4090
4438
  {
4091
- Authorization: `Bearer ${(0, import_provider_utils17.loadApiKey)({
4439
+ Authorization: `Bearer ${(0, import_provider_utils30.loadApiKey)({
4092
4440
  apiKey: options.apiKey,
4093
4441
  environmentVariableName: "OPENAI_API_KEY",
4094
4442
  description: "OpenAI"