@ai-sdk/openai 2.0.0-canary.5 → 2.0.0-canary.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -26,20 +26,21 @@ __export(internal_exports, {
26
26
  OpenAIImageModel: () => OpenAIImageModel,
27
27
  OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
28
28
  OpenAITranscriptionModel: () => OpenAITranscriptionModel,
29
- modelMaxImagesPerCall: () => modelMaxImagesPerCall
29
+ modelMaxImagesPerCall: () => modelMaxImagesPerCall,
30
+ openaiProviderOptions: () => openaiProviderOptions
30
31
  });
31
32
  module.exports = __toCommonJS(internal_exports);
32
33
 
33
34
  // src/openai-chat-language-model.ts
34
35
  var import_provider3 = require("@ai-sdk/provider");
35
- var import_provider_utils2 = require("@ai-sdk/provider-utils");
36
- var import_zod2 = require("zod");
36
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
37
+ var import_zod3 = require("zod");
37
38
 
38
39
  // src/convert-to-openai-chat-messages.ts
39
40
  var import_provider = require("@ai-sdk/provider");
41
+ var import_provider_utils = require("@ai-sdk/provider-utils");
40
42
  function convertToOpenAIChatMessages({
41
43
  prompt,
42
- useLegacyFunctionCalling = false,
43
44
  systemMessageMode = "system"
44
45
  }) {
45
46
  const messages = [];
@@ -91,7 +92,7 @@ function convertToOpenAIChatMessages({
91
92
  return {
92
93
  type: "image_url",
93
94
  image_url: {
94
- url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
95
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`,
95
96
  // OpenAI specific extension: image detail
96
97
  detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
97
98
  }
@@ -106,14 +107,20 @@ function convertToOpenAIChatMessages({
106
107
  case "audio/wav": {
107
108
  return {
108
109
  type: "input_audio",
109
- input_audio: { data: part.data, format: "wav" }
110
+ input_audio: {
111
+ data: (0, import_provider_utils.convertToBase64)(part.data),
112
+ format: "wav"
113
+ }
110
114
  };
111
115
  }
112
116
  case "audio/mp3":
113
117
  case "audio/mpeg": {
114
118
  return {
115
119
  type: "input_audio",
116
- input_audio: { data: part.data, format: "mp3" }
120
+ input_audio: {
121
+ data: (0, import_provider_utils.convertToBase64)(part.data),
122
+ format: "mp3"
123
+ }
117
124
  };
118
125
  }
119
126
  default: {
@@ -168,41 +175,20 @@ function convertToOpenAIChatMessages({
168
175
  }
169
176
  }
170
177
  }
171
- if (useLegacyFunctionCalling) {
172
- if (toolCalls.length > 1) {
173
- throw new import_provider.UnsupportedFunctionalityError({
174
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
175
- });
176
- }
177
- messages.push({
178
- role: "assistant",
179
- content: text,
180
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
181
- });
182
- } else {
183
- messages.push({
184
- role: "assistant",
185
- content: text,
186
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
187
- });
188
- }
178
+ messages.push({
179
+ role: "assistant",
180
+ content: text,
181
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
182
+ });
189
183
  break;
190
184
  }
191
185
  case "tool": {
192
186
  for (const toolResponse of content) {
193
- if (useLegacyFunctionCalling) {
194
- messages.push({
195
- role: "function",
196
- name: toolResponse.toolName,
197
- content: JSON.stringify(toolResponse.result)
198
- });
199
- } else {
200
- messages.push({
201
- role: "tool",
202
- tool_call_id: toolResponse.toolCallId,
203
- content: JSON.stringify(toolResponse.result)
204
- });
205
- }
187
+ messages.push({
188
+ role: "tool",
189
+ tool_call_id: toolResponse.toolCallId,
190
+ content: JSON.stringify(toolResponse.result)
191
+ });
206
192
  }
207
193
  break;
208
194
  }
@@ -245,21 +231,72 @@ function mapOpenAIFinishReason(finishReason) {
245
231
  }
246
232
  }
247
233
 
248
- // src/openai-error.ts
234
+ // src/openai-chat-options.ts
249
235
  var import_zod = require("zod");
250
- var import_provider_utils = require("@ai-sdk/provider-utils");
251
- var openaiErrorDataSchema = import_zod.z.object({
252
- error: import_zod.z.object({
253
- message: import_zod.z.string(),
236
+ var openaiProviderOptions = import_zod.z.object({
237
+ /**
238
+ * Modify the likelihood of specified tokens appearing in the completion.
239
+ *
240
+ * Accepts a JSON object that maps tokens (specified by their token ID in
241
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
242
+ */
243
+ logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
244
+ /**
245
+ * Return the log probabilities of the tokens.
246
+ *
247
+ * Setting to true will return the log probabilities of the tokens that
248
+ * were generated.
249
+ *
250
+ * Setting to a number will return the log probabilities of the top n
251
+ * tokens that were generated.
252
+ */
253
+ logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
254
+ /**
255
+ * Whether to enable parallel function calling during tool use. Default to true.
256
+ */
257
+ parallelToolCalls: import_zod.z.boolean().optional(),
258
+ /**
259
+ * A unique identifier representing your end-user, which can help OpenAI to
260
+ * monitor and detect abuse.
261
+ */
262
+ user: import_zod.z.string().optional(),
263
+ /**
264
+ * Reasoning effort for reasoning models. Defaults to `medium`.
265
+ */
266
+ reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
267
+ /**
268
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
269
+ */
270
+ maxCompletionTokens: import_zod.z.number().optional(),
271
+ /**
272
+ * Whether to enable persistence in responses API.
273
+ */
274
+ store: import_zod.z.boolean().optional(),
275
+ /**
276
+ * Metadata to associate with the request.
277
+ */
278
+ metadata: import_zod.z.record(import_zod.z.string()).optional(),
279
+ /**
280
+ * Parameters for prediction mode.
281
+ */
282
+ prediction: import_zod.z.record(import_zod.z.any()).optional()
283
+ });
284
+
285
+ // src/openai-error.ts
286
+ var import_zod2 = require("zod");
287
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
288
+ var openaiErrorDataSchema = import_zod2.z.object({
289
+ error: import_zod2.z.object({
290
+ message: import_zod2.z.string(),
254
291
  // The additional information below is handled loosely to support
255
292
  // OpenAI-compatible providers that have slightly different error
256
293
  // responses:
257
- type: import_zod.z.string().nullish(),
258
- param: import_zod.z.any().nullish(),
259
- code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
294
+ type: import_zod2.z.string().nullish(),
295
+ param: import_zod2.z.any().nullish(),
296
+ code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
260
297
  })
261
298
  });
262
- var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
299
+ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
263
300
  errorSchema: openaiErrorDataSchema,
264
301
  errorToMessage: (data) => data.error.message
265
302
  });
@@ -282,7 +319,6 @@ var import_provider2 = require("@ai-sdk/provider");
282
319
  function prepareTools({
283
320
  tools,
284
321
  toolChoice,
285
- useLegacyFunctionCalling = false,
286
322
  structuredOutputs
287
323
  }) {
288
324
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
@@ -290,48 +326,6 @@ function prepareTools({
290
326
  if (tools == null) {
291
327
  return { tools: void 0, toolChoice: void 0, toolWarnings };
292
328
  }
293
- if (useLegacyFunctionCalling) {
294
- const openaiFunctions = [];
295
- for (const tool of tools) {
296
- if (tool.type === "provider-defined") {
297
- toolWarnings.push({ type: "unsupported-tool", tool });
298
- } else {
299
- openaiFunctions.push({
300
- name: tool.name,
301
- description: tool.description,
302
- parameters: tool.parameters
303
- });
304
- }
305
- }
306
- if (toolChoice == null) {
307
- return {
308
- functions: openaiFunctions,
309
- function_call: void 0,
310
- toolWarnings
311
- };
312
- }
313
- const type2 = toolChoice.type;
314
- switch (type2) {
315
- case "auto":
316
- case "none":
317
- case void 0:
318
- return {
319
- functions: openaiFunctions,
320
- function_call: void 0,
321
- toolWarnings
322
- };
323
- case "required":
324
- throw new import_provider2.UnsupportedFunctionalityError({
325
- functionality: "useLegacyFunctionCalling and toolChoice: required"
326
- });
327
- default:
328
- return {
329
- functions: openaiFunctions,
330
- function_call: { name: toolChoice.toolName },
331
- toolWarnings
332
- };
333
- }
334
- }
335
329
  const openaiTools = [];
336
330
  for (const tool of tools) {
337
331
  if (tool.type === "provider-defined") {
@@ -403,7 +397,7 @@ var OpenAIChatLanguageModel = class {
403
397
  }
404
398
  getArgs({
405
399
  prompt,
406
- maxTokens,
400
+ maxOutputTokens,
407
401
  temperature,
408
402
  topP,
409
403
  topK,
@@ -416,8 +410,13 @@ var OpenAIChatLanguageModel = class {
416
410
  toolChoice,
417
411
  providerOptions
418
412
  }) {
419
- var _a, _b, _c, _d, _e, _f, _g;
413
+ var _a, _b;
420
414
  const warnings = [];
415
+ const openaiOptions = (_a = (0, import_provider_utils3.parseProviderOptions)({
416
+ provider: "openai",
417
+ providerOptions,
418
+ schema: openaiProviderOptions
419
+ })) != null ? _a : {};
421
420
  if (topK != null) {
422
421
  warnings.push({
423
422
  type: "unsupported-setting",
@@ -431,21 +430,9 @@ var OpenAIChatLanguageModel = class {
431
430
  details: "JSON response format schema is only supported with structuredOutputs"
432
431
  });
433
432
  }
434
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
435
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
436
- throw new import_provider3.UnsupportedFunctionalityError({
437
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
438
- });
439
- }
440
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
441
- throw new import_provider3.UnsupportedFunctionalityError({
442
- functionality: "structuredOutputs with useLegacyFunctionCalling"
443
- });
444
- }
445
433
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
446
434
  {
447
435
  prompt,
448
- useLegacyFunctionCalling,
449
436
  systemMessageMode: getSystemMessageMode(this.modelId)
450
437
  }
451
438
  );
@@ -454,13 +441,13 @@ var OpenAIChatLanguageModel = class {
454
441
  // model id:
455
442
  model: this.modelId,
456
443
  // model specific settings:
457
- logit_bias: this.settings.logitBias,
458
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
459
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
460
- user: this.settings.user,
461
- parallel_tool_calls: this.settings.parallelToolCalls,
444
+ logit_bias: openaiOptions.logitBias,
445
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
446
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
447
+ user: openaiOptions.user,
448
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
462
449
  // standardized settings:
463
- max_tokens: maxTokens,
450
+ max_tokens: maxOutputTokens,
464
451
  temperature,
465
452
  top_p: topP,
466
453
  frequency_penalty: frequencyPenalty,
@@ -471,19 +458,19 @@ var OpenAIChatLanguageModel = class {
471
458
  json_schema: {
472
459
  schema: responseFormat.schema,
473
460
  strict: true,
474
- name: (_a = responseFormat.name) != null ? _a : "response",
461
+ name: (_b = responseFormat.name) != null ? _b : "response",
475
462
  description: responseFormat.description
476
463
  }
477
464
  } : { type: "json_object" } : void 0,
478
465
  stop: stopSequences,
479
466
  seed,
480
467
  // openai specific settings:
481
- // TODO remove in next major version; we auto-map maxTokens now
482
- max_completion_tokens: (_b = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _b.maxCompletionTokens,
483
- store: (_c = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _c.store,
484
- metadata: (_d = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _d.metadata,
485
- prediction: (_e = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _e.prediction,
486
- reasoning_effort: (_g = (_f = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
468
+ // TODO remove in next major version; we auto-map maxOutputTokens now
469
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
470
+ store: openaiOptions.store,
471
+ metadata: openaiOptions.metadata,
472
+ prediction: openaiOptions.prediction,
473
+ reasoning_effort: openaiOptions.reasoningEffort,
487
474
  // messages:
488
475
  messages
489
476
  };
@@ -547,46 +534,50 @@ var OpenAIChatLanguageModel = class {
547
534
  }
548
535
  baseArgs.max_tokens = void 0;
549
536
  }
537
+ } else if (this.modelId.startsWith("gpt-4o-search-preview")) {
538
+ if (baseArgs.temperature != null) {
539
+ baseArgs.temperature = void 0;
540
+ warnings.push({
541
+ type: "unsupported-setting",
542
+ setting: "temperature",
543
+ details: "temperature is not supported for the gpt-4o-search-preview model and has been removed."
544
+ });
545
+ }
550
546
  }
551
547
  const {
552
548
  tools: openaiTools,
553
549
  toolChoice: openaiToolChoice,
554
- functions,
555
- function_call,
556
550
  toolWarnings
557
551
  } = prepareTools({
558
552
  tools,
559
553
  toolChoice,
560
- useLegacyFunctionCalling,
561
554
  structuredOutputs: this.supportsStructuredOutputs
562
555
  });
563
556
  return {
564
557
  args: {
565
558
  ...baseArgs,
566
559
  tools: openaiTools,
567
- tool_choice: openaiToolChoice,
568
- functions,
569
- function_call
560
+ tool_choice: openaiToolChoice
570
561
  },
571
562
  warnings: [...warnings, ...toolWarnings]
572
563
  };
573
564
  }
574
565
  async doGenerate(options) {
575
- var _a, _b, _c, _d, _e, _f, _g, _h;
566
+ var _a, _b, _c, _d, _e, _f, _g;
576
567
  const { args: body, warnings } = this.getArgs(options);
577
568
  const {
578
569
  responseHeaders,
579
570
  value: response,
580
571
  rawValue: rawResponse
581
- } = await (0, import_provider_utils2.postJsonToApi)({
572
+ } = await (0, import_provider_utils3.postJsonToApi)({
582
573
  url: this.config.url({
583
574
  path: "/chat/completions",
584
575
  modelId: this.modelId
585
576
  }),
586
- headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
577
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
587
578
  body,
588
579
  failedResponseHandler: openaiFailedResponseHandler,
589
- successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
580
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
590
581
  openaiChatResponseSchema
591
582
  ),
592
583
  abortSignal: options.abortSignal,
@@ -610,27 +601,21 @@ var OpenAIChatLanguageModel = class {
610
601
  providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
611
602
  }
612
603
  return {
613
- text: (_c = choice.message.content) != null ? _c : void 0,
614
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
615
- {
616
- toolCallType: "function",
617
- toolCallId: (0, import_provider_utils2.generateId)(),
618
- toolName: choice.message.function_call.name,
619
- args: choice.message.function_call.arguments
620
- }
621
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
604
+ text: choice.message.content != null ? { type: "text", text: choice.message.content } : void 0,
605
+ toolCalls: (_c = choice.message.tool_calls) == null ? void 0 : _c.map((toolCall) => {
622
606
  var _a2;
623
607
  return {
608
+ type: "tool-call",
624
609
  toolCallType: "function",
625
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils2.generateId)(),
610
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
626
611
  toolName: toolCall.function.name,
627
612
  args: toolCall.function.arguments
628
613
  };
629
614
  }),
630
615
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
631
616
  usage: {
632
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
633
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
617
+ inputTokens: (_e = (_d = response.usage) == null ? void 0 : _d.prompt_tokens) != null ? _e : void 0,
618
+ outputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.completion_tokens) != null ? _g : void 0
634
619
  },
635
620
  request: { body },
636
621
  response: {
@@ -644,48 +629,6 @@ var OpenAIChatLanguageModel = class {
644
629
  };
645
630
  }
646
631
  async doStream(options) {
647
- if (this.settings.simulateStreaming) {
648
- const result = await this.doGenerate(options);
649
- const simulatedStream = new ReadableStream({
650
- start(controller) {
651
- controller.enqueue({ type: "response-metadata", ...result.response });
652
- if (result.text) {
653
- controller.enqueue({
654
- type: "text-delta",
655
- textDelta: result.text
656
- });
657
- }
658
- if (result.toolCalls) {
659
- for (const toolCall of result.toolCalls) {
660
- controller.enqueue({
661
- type: "tool-call-delta",
662
- toolCallType: "function",
663
- toolCallId: toolCall.toolCallId,
664
- toolName: toolCall.toolName,
665
- argsTextDelta: toolCall.args
666
- });
667
- controller.enqueue({
668
- type: "tool-call",
669
- ...toolCall
670
- });
671
- }
672
- }
673
- controller.enqueue({
674
- type: "finish",
675
- finishReason: result.finishReason,
676
- usage: result.usage,
677
- logprobs: result.logprobs,
678
- providerMetadata: result.providerMetadata
679
- });
680
- controller.close();
681
- }
682
- });
683
- return {
684
- stream: simulatedStream,
685
- response: result.response,
686
- warnings: result.warnings
687
- };
688
- }
689
632
  const { args, warnings } = this.getArgs(options);
690
633
  const body = {
691
634
  ...args,
@@ -693,15 +636,15 @@ var OpenAIChatLanguageModel = class {
693
636
  // only include stream_options when in strict compatibility mode:
694
637
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
695
638
  };
696
- const { responseHeaders, value: response } = await (0, import_provider_utils2.postJsonToApi)({
639
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
697
640
  url: this.config.url({
698
641
  path: "/chat/completions",
699
642
  modelId: this.modelId
700
643
  }),
701
- headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
644
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
702
645
  body,
703
646
  failedResponseHandler: openaiFailedResponseHandler,
704
- successfulResponseHandler: (0, import_provider_utils2.createEventSourceResponseHandler)(
647
+ successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
705
648
  openaiChatChunkSchema
706
649
  ),
707
650
  abortSignal: options.abortSignal,
@@ -710,13 +653,12 @@ var OpenAIChatLanguageModel = class {
710
653
  const { messages: rawPrompt, ...rawSettings } = args;
711
654
  const toolCalls = [];
712
655
  let finishReason = "unknown";
713
- let usage = {
714
- promptTokens: void 0,
715
- completionTokens: void 0
656
+ const usage = {
657
+ inputTokens: void 0,
658
+ outputTokens: void 0
716
659
  };
717
660
  let logprobs;
718
661
  let isFirstChunk = true;
719
- const { useLegacyFunctionCalling } = this.settings;
720
662
  const providerMetadata = { openai: {} };
721
663
  return {
722
664
  stream: response.pipeThrough(
@@ -748,10 +690,8 @@ var OpenAIChatLanguageModel = class {
748
690
  prompt_tokens_details,
749
691
  completion_tokens_details
750
692
  } = value.usage;
751
- usage = {
752
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
753
- completionTokens: completion_tokens != null ? completion_tokens : void 0
754
- };
693
+ usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
694
+ usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
755
695
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
756
696
  providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
757
697
  }
@@ -775,8 +715,8 @@ var OpenAIChatLanguageModel = class {
775
715
  const delta = choice.delta;
776
716
  if (delta.content != null) {
777
717
  controller.enqueue({
778
- type: "text-delta",
779
- textDelta: delta.content
718
+ type: "text",
719
+ text: delta.content
780
720
  });
781
721
  }
782
722
  const mappedLogprobs = mapOpenAIChatLogProbsOutput(
@@ -786,16 +726,8 @@ var OpenAIChatLanguageModel = class {
786
726
  if (logprobs === void 0) logprobs = [];
787
727
  logprobs.push(...mappedLogprobs);
788
728
  }
789
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
790
- {
791
- type: "function",
792
- id: (0, import_provider_utils2.generateId)(),
793
- function: delta.function_call,
794
- index: 0
795
- }
796
- ] : delta.tool_calls;
797
- if (mappedToolCalls != null) {
798
- for (const toolCallDelta of mappedToolCalls) {
729
+ if (delta.tool_calls != null) {
730
+ for (const toolCallDelta of delta.tool_calls) {
799
731
  const index = toolCallDelta.index;
800
732
  if (toolCalls[index] == null) {
801
733
  if (toolCallDelta.type !== "function") {
@@ -836,11 +768,11 @@ var OpenAIChatLanguageModel = class {
836
768
  argsTextDelta: toolCall2.function.arguments
837
769
  });
838
770
  }
839
- if ((0, import_provider_utils2.isParsableJson)(toolCall2.function.arguments)) {
771
+ if ((0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
840
772
  controller.enqueue({
841
773
  type: "tool-call",
842
774
  toolCallType: "function",
843
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils2.generateId)(),
775
+ toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
844
776
  toolName: toolCall2.function.name,
845
777
  args: toolCall2.function.arguments
846
778
  });
@@ -863,11 +795,11 @@ var OpenAIChatLanguageModel = class {
863
795
  toolName: toolCall.function.name,
864
796
  argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
865
797
  });
866
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils2.isParsableJson)(toolCall.function.arguments)) {
798
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
867
799
  controller.enqueue({
868
800
  type: "tool-call",
869
801
  toolCallType: "function",
870
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils2.generateId)(),
802
+ toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
871
803
  toolName: toolCall.function.name,
872
804
  args: toolCall.function.arguments
873
805
  });
@@ -877,15 +809,11 @@ var OpenAIChatLanguageModel = class {
877
809
  }
878
810
  },
879
811
  flush(controller) {
880
- var _a, _b;
881
812
  controller.enqueue({
882
813
  type: "finish",
883
814
  finishReason,
884
815
  logprobs,
885
- usage: {
886
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
887
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
888
- },
816
+ usage,
889
817
  ...providerMetadata != null ? { providerMetadata } : {}
890
818
  });
891
819
  }
@@ -897,104 +825,96 @@ var OpenAIChatLanguageModel = class {
897
825
  };
898
826
  }
899
827
  };
900
- var openaiTokenUsageSchema = import_zod2.z.object({
901
- prompt_tokens: import_zod2.z.number().nullish(),
902
- completion_tokens: import_zod2.z.number().nullish(),
903
- prompt_tokens_details: import_zod2.z.object({
904
- cached_tokens: import_zod2.z.number().nullish()
828
+ var openaiTokenUsageSchema = import_zod3.z.object({
829
+ prompt_tokens: import_zod3.z.number().nullish(),
830
+ completion_tokens: import_zod3.z.number().nullish(),
831
+ prompt_tokens_details: import_zod3.z.object({
832
+ cached_tokens: import_zod3.z.number().nullish()
905
833
  }).nullish(),
906
- completion_tokens_details: import_zod2.z.object({
907
- reasoning_tokens: import_zod2.z.number().nullish(),
908
- accepted_prediction_tokens: import_zod2.z.number().nullish(),
909
- rejected_prediction_tokens: import_zod2.z.number().nullish()
834
+ completion_tokens_details: import_zod3.z.object({
835
+ reasoning_tokens: import_zod3.z.number().nullish(),
836
+ accepted_prediction_tokens: import_zod3.z.number().nullish(),
837
+ rejected_prediction_tokens: import_zod3.z.number().nullish()
910
838
  }).nullish()
911
839
  }).nullish();
912
- var openaiChatResponseSchema = import_zod2.z.object({
913
- id: import_zod2.z.string().nullish(),
914
- created: import_zod2.z.number().nullish(),
915
- model: import_zod2.z.string().nullish(),
916
- choices: import_zod2.z.array(
917
- import_zod2.z.object({
918
- message: import_zod2.z.object({
919
- role: import_zod2.z.literal("assistant").nullish(),
920
- content: import_zod2.z.string().nullish(),
921
- function_call: import_zod2.z.object({
922
- arguments: import_zod2.z.string(),
923
- name: import_zod2.z.string()
924
- }).nullish(),
925
- tool_calls: import_zod2.z.array(
926
- import_zod2.z.object({
927
- id: import_zod2.z.string().nullish(),
928
- type: import_zod2.z.literal("function"),
929
- function: import_zod2.z.object({
930
- name: import_zod2.z.string(),
931
- arguments: import_zod2.z.string()
840
+ var openaiChatResponseSchema = import_zod3.z.object({
841
+ id: import_zod3.z.string().nullish(),
842
+ created: import_zod3.z.number().nullish(),
843
+ model: import_zod3.z.string().nullish(),
844
+ choices: import_zod3.z.array(
845
+ import_zod3.z.object({
846
+ message: import_zod3.z.object({
847
+ role: import_zod3.z.literal("assistant").nullish(),
848
+ content: import_zod3.z.string().nullish(),
849
+ tool_calls: import_zod3.z.array(
850
+ import_zod3.z.object({
851
+ id: import_zod3.z.string().nullish(),
852
+ type: import_zod3.z.literal("function"),
853
+ function: import_zod3.z.object({
854
+ name: import_zod3.z.string(),
855
+ arguments: import_zod3.z.string()
932
856
  })
933
857
  })
934
858
  ).nullish()
935
859
  }),
936
- index: import_zod2.z.number(),
937
- logprobs: import_zod2.z.object({
938
- content: import_zod2.z.array(
939
- import_zod2.z.object({
940
- token: import_zod2.z.string(),
941
- logprob: import_zod2.z.number(),
942
- top_logprobs: import_zod2.z.array(
943
- import_zod2.z.object({
944
- token: import_zod2.z.string(),
945
- logprob: import_zod2.z.number()
860
+ index: import_zod3.z.number(),
861
+ logprobs: import_zod3.z.object({
862
+ content: import_zod3.z.array(
863
+ import_zod3.z.object({
864
+ token: import_zod3.z.string(),
865
+ logprob: import_zod3.z.number(),
866
+ top_logprobs: import_zod3.z.array(
867
+ import_zod3.z.object({
868
+ token: import_zod3.z.string(),
869
+ logprob: import_zod3.z.number()
946
870
  })
947
871
  )
948
872
  })
949
873
  ).nullable()
950
874
  }).nullish(),
951
- finish_reason: import_zod2.z.string().nullish()
875
+ finish_reason: import_zod3.z.string().nullish()
952
876
  })
953
877
  ),
954
878
  usage: openaiTokenUsageSchema
955
879
  });
956
- var openaiChatChunkSchema = import_zod2.z.union([
957
- import_zod2.z.object({
958
- id: import_zod2.z.string().nullish(),
959
- created: import_zod2.z.number().nullish(),
960
- model: import_zod2.z.string().nullish(),
961
- choices: import_zod2.z.array(
962
- import_zod2.z.object({
963
- delta: import_zod2.z.object({
964
- role: import_zod2.z.enum(["assistant"]).nullish(),
965
- content: import_zod2.z.string().nullish(),
966
- function_call: import_zod2.z.object({
967
- name: import_zod2.z.string().optional(),
968
- arguments: import_zod2.z.string().optional()
969
- }).nullish(),
970
- tool_calls: import_zod2.z.array(
971
- import_zod2.z.object({
972
- index: import_zod2.z.number(),
973
- id: import_zod2.z.string().nullish(),
974
- type: import_zod2.z.literal("function").optional(),
975
- function: import_zod2.z.object({
976
- name: import_zod2.z.string().nullish(),
977
- arguments: import_zod2.z.string().nullish()
880
+ var openaiChatChunkSchema = import_zod3.z.union([
881
+ import_zod3.z.object({
882
+ id: import_zod3.z.string().nullish(),
883
+ created: import_zod3.z.number().nullish(),
884
+ model: import_zod3.z.string().nullish(),
885
+ choices: import_zod3.z.array(
886
+ import_zod3.z.object({
887
+ delta: import_zod3.z.object({
888
+ role: import_zod3.z.enum(["assistant"]).nullish(),
889
+ content: import_zod3.z.string().nullish(),
890
+ tool_calls: import_zod3.z.array(
891
+ import_zod3.z.object({
892
+ index: import_zod3.z.number(),
893
+ id: import_zod3.z.string().nullish(),
894
+ type: import_zod3.z.literal("function").optional(),
895
+ function: import_zod3.z.object({
896
+ name: import_zod3.z.string().nullish(),
897
+ arguments: import_zod3.z.string().nullish()
978
898
  })
979
899
  })
980
900
  ).nullish()
981
901
  }).nullish(),
982
- logprobs: import_zod2.z.object({
983
- content: import_zod2.z.array(
984
- import_zod2.z.object({
985
- token: import_zod2.z.string(),
986
- logprob: import_zod2.z.number(),
987
- top_logprobs: import_zod2.z.array(
988
- import_zod2.z.object({
989
- token: import_zod2.z.string(),
990
- logprob: import_zod2.z.number()
902
+ logprobs: import_zod3.z.object({
903
+ content: import_zod3.z.array(
904
+ import_zod3.z.object({
905
+ token: import_zod3.z.string(),
906
+ logprob: import_zod3.z.number(),
907
+ top_logprobs: import_zod3.z.array(
908
+ import_zod3.z.object({
909
+ token: import_zod3.z.string(),
910
+ logprob: import_zod3.z.number()
991
911
  })
992
912
  )
993
913
  })
994
914
  ).nullable()
995
915
  }).nullish(),
996
- finish_reason: import_zod2.z.string().nullable().optional(),
997
- index: import_zod2.z.number()
916
+ finish_reason: import_zod3.z.string().nullable().optional(),
917
+ index: import_zod3.z.number()
998
918
  })
999
919
  ),
1000
920
  usage: openaiTokenUsageSchema
@@ -1036,8 +956,8 @@ var reasoningModels = {
1036
956
  };
1037
957
 
1038
958
  // src/openai-completion-language-model.ts
1039
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
1040
- var import_zod3 = require("zod");
959
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
960
+ var import_zod4 = require("zod");
1041
961
 
1042
962
  // src/convert-to-openai-completion-prompt.ts
1043
963
  var import_provider4 = require("@ai-sdk/provider");
@@ -1147,7 +1067,7 @@ var OpenAICompletionLanguageModel = class {
1147
1067
  getArgs({
1148
1068
  inputFormat,
1149
1069
  prompt,
1150
- maxTokens,
1070
+ maxOutputTokens,
1151
1071
  temperature,
1152
1072
  topP,
1153
1073
  topK,
@@ -1189,7 +1109,7 @@ var OpenAICompletionLanguageModel = class {
1189
1109
  suffix: this.settings.suffix,
1190
1110
  user: this.settings.user,
1191
1111
  // standardized settings:
1192
- max_tokens: maxTokens,
1112
+ max_tokens: maxOutputTokens,
1193
1113
  temperature,
1194
1114
  top_p: topP,
1195
1115
  frequency_penalty: frequencyPenalty,
@@ -1209,15 +1129,15 @@ var OpenAICompletionLanguageModel = class {
1209
1129
  responseHeaders,
1210
1130
  value: response,
1211
1131
  rawValue: rawResponse
1212
- } = await (0, import_provider_utils3.postJsonToApi)({
1132
+ } = await (0, import_provider_utils4.postJsonToApi)({
1213
1133
  url: this.config.url({
1214
1134
  path: "/completions",
1215
1135
  modelId: this.modelId
1216
1136
  }),
1217
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
1137
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1218
1138
  body: args,
1219
1139
  failedResponseHandler: openaiFailedResponseHandler,
1220
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
1140
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1221
1141
  openaiCompletionResponseSchema
1222
1142
  ),
1223
1143
  abortSignal: options.abortSignal,
@@ -1225,10 +1145,10 @@ var OpenAICompletionLanguageModel = class {
1225
1145
  });
1226
1146
  const choice = response.choices[0];
1227
1147
  return {
1228
- text: choice.text,
1148
+ text: { type: "text", text: choice.text },
1229
1149
  usage: {
1230
- promptTokens: response.usage.prompt_tokens,
1231
- completionTokens: response.usage.completion_tokens
1150
+ inputTokens: response.usage.prompt_tokens,
1151
+ outputTokens: response.usage.completion_tokens
1232
1152
  },
1233
1153
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1234
1154
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
@@ -1249,24 +1169,24 @@ var OpenAICompletionLanguageModel = class {
1249
1169
  // only include stream_options when in strict compatibility mode:
1250
1170
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1251
1171
  };
1252
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
1172
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1253
1173
  url: this.config.url({
1254
1174
  path: "/completions",
1255
1175
  modelId: this.modelId
1256
1176
  }),
1257
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
1177
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1258
1178
  body,
1259
1179
  failedResponseHandler: openaiFailedResponseHandler,
1260
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
1180
+ successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
1261
1181
  openaiCompletionChunkSchema
1262
1182
  ),
1263
1183
  abortSignal: options.abortSignal,
1264
1184
  fetch: this.config.fetch
1265
1185
  });
1266
1186
  let finishReason = "unknown";
1267
- let usage = {
1268
- promptTokens: Number.NaN,
1269
- completionTokens: Number.NaN
1187
+ const usage = {
1188
+ inputTokens: void 0,
1189
+ outputTokens: void 0
1270
1190
  };
1271
1191
  let logprobs;
1272
1192
  let isFirstChunk = true;
@@ -1293,10 +1213,8 @@ var OpenAICompletionLanguageModel = class {
1293
1213
  });
1294
1214
  }
1295
1215
  if (value.usage != null) {
1296
- usage = {
1297
- promptTokens: value.usage.prompt_tokens,
1298
- completionTokens: value.usage.completion_tokens
1299
- };
1216
+ usage.inputTokens = value.usage.prompt_tokens;
1217
+ usage.outputTokens = value.usage.completion_tokens;
1300
1218
  }
1301
1219
  const choice = value.choices[0];
1302
1220
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1304,8 +1222,8 @@ var OpenAICompletionLanguageModel = class {
1304
1222
  }
1305
1223
  if ((choice == null ? void 0 : choice.text) != null) {
1306
1224
  controller.enqueue({
1307
- type: "text-delta",
1308
- textDelta: choice.text
1225
+ type: "text",
1226
+ text: choice.text
1309
1227
  });
1310
1228
  }
1311
1229
  const mappedLogprobs = mapOpenAICompletionLogProbs(
@@ -1332,46 +1250,46 @@ var OpenAICompletionLanguageModel = class {
1332
1250
  };
1333
1251
  }
1334
1252
  };
1335
- var openaiCompletionResponseSchema = import_zod3.z.object({
1336
- id: import_zod3.z.string().nullish(),
1337
- created: import_zod3.z.number().nullish(),
1338
- model: import_zod3.z.string().nullish(),
1339
- choices: import_zod3.z.array(
1340
- import_zod3.z.object({
1341
- text: import_zod3.z.string(),
1342
- finish_reason: import_zod3.z.string(),
1343
- logprobs: import_zod3.z.object({
1344
- tokens: import_zod3.z.array(import_zod3.z.string()),
1345
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1346
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1253
+ var openaiCompletionResponseSchema = import_zod4.z.object({
1254
+ id: import_zod4.z.string().nullish(),
1255
+ created: import_zod4.z.number().nullish(),
1256
+ model: import_zod4.z.string().nullish(),
1257
+ choices: import_zod4.z.array(
1258
+ import_zod4.z.object({
1259
+ text: import_zod4.z.string(),
1260
+ finish_reason: import_zod4.z.string(),
1261
+ logprobs: import_zod4.z.object({
1262
+ tokens: import_zod4.z.array(import_zod4.z.string()),
1263
+ token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1264
+ top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1347
1265
  }).nullish()
1348
1266
  })
1349
1267
  ),
1350
- usage: import_zod3.z.object({
1351
- prompt_tokens: import_zod3.z.number(),
1352
- completion_tokens: import_zod3.z.number()
1268
+ usage: import_zod4.z.object({
1269
+ prompt_tokens: import_zod4.z.number(),
1270
+ completion_tokens: import_zod4.z.number()
1353
1271
  })
1354
1272
  });
1355
- var openaiCompletionChunkSchema = import_zod3.z.union([
1356
- import_zod3.z.object({
1357
- id: import_zod3.z.string().nullish(),
1358
- created: import_zod3.z.number().nullish(),
1359
- model: import_zod3.z.string().nullish(),
1360
- choices: import_zod3.z.array(
1361
- import_zod3.z.object({
1362
- text: import_zod3.z.string(),
1363
- finish_reason: import_zod3.z.string().nullish(),
1364
- index: import_zod3.z.number(),
1365
- logprobs: import_zod3.z.object({
1366
- tokens: import_zod3.z.array(import_zod3.z.string()),
1367
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1368
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1273
+ var openaiCompletionChunkSchema = import_zod4.z.union([
1274
+ import_zod4.z.object({
1275
+ id: import_zod4.z.string().nullish(),
1276
+ created: import_zod4.z.number().nullish(),
1277
+ model: import_zod4.z.string().nullish(),
1278
+ choices: import_zod4.z.array(
1279
+ import_zod4.z.object({
1280
+ text: import_zod4.z.string(),
1281
+ finish_reason: import_zod4.z.string().nullish(),
1282
+ index: import_zod4.z.number(),
1283
+ logprobs: import_zod4.z.object({
1284
+ tokens: import_zod4.z.array(import_zod4.z.string()),
1285
+ token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1286
+ top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1369
1287
  }).nullish()
1370
1288
  })
1371
1289
  ),
1372
- usage: import_zod3.z.object({
1373
- prompt_tokens: import_zod3.z.number(),
1374
- completion_tokens: import_zod3.z.number()
1290
+ usage: import_zod4.z.object({
1291
+ prompt_tokens: import_zod4.z.number(),
1292
+ completion_tokens: import_zod4.z.number()
1375
1293
  }).nullish()
1376
1294
  }),
1377
1295
  openaiErrorDataSchema
@@ -1379,11 +1297,11 @@ var openaiCompletionChunkSchema = import_zod3.z.union([
1379
1297
 
1380
1298
  // src/openai-embedding-model.ts
1381
1299
  var import_provider5 = require("@ai-sdk/provider");
1382
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
1383
- var import_zod4 = require("zod");
1300
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
1301
+ var import_zod5 = require("zod");
1384
1302
  var OpenAIEmbeddingModel = class {
1385
1303
  constructor(modelId, settings, config) {
1386
- this.specificationVersion = "v1";
1304
+ this.specificationVersion = "v2";
1387
1305
  this.modelId = modelId;
1388
1306
  this.settings = settings;
1389
1307
  this.config = config;
@@ -1412,12 +1330,16 @@ var OpenAIEmbeddingModel = class {
1412
1330
  values
1413
1331
  });
1414
1332
  }
1415
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1333
+ const {
1334
+ responseHeaders,
1335
+ value: response,
1336
+ rawValue
1337
+ } = await (0, import_provider_utils5.postJsonToApi)({
1416
1338
  url: this.config.url({
1417
1339
  path: "/embeddings",
1418
1340
  modelId: this.modelId
1419
1341
  }),
1420
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), headers),
1342
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1421
1343
  body: {
1422
1344
  model: this.modelId,
1423
1345
  input: values,
@@ -1426,7 +1348,7 @@ var OpenAIEmbeddingModel = class {
1426
1348
  user: this.settings.user
1427
1349
  },
1428
1350
  failedResponseHandler: openaiFailedResponseHandler,
1429
- successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1351
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1430
1352
  openaiTextEmbeddingResponseSchema
1431
1353
  ),
1432
1354
  abortSignal,
@@ -1435,18 +1357,18 @@ var OpenAIEmbeddingModel = class {
1435
1357
  return {
1436
1358
  embeddings: response.data.map((item) => item.embedding),
1437
1359
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1438
- rawResponse: { headers: responseHeaders }
1360
+ response: { headers: responseHeaders, body: rawValue }
1439
1361
  };
1440
1362
  }
1441
1363
  };
1442
- var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1443
- data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1444
- usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1364
+ var openaiTextEmbeddingResponseSchema = import_zod5.z.object({
1365
+ data: import_zod5.z.array(import_zod5.z.object({ embedding: import_zod5.z.array(import_zod5.z.number()) })),
1366
+ usage: import_zod5.z.object({ prompt_tokens: import_zod5.z.number() }).nullish()
1445
1367
  });
1446
1368
 
1447
1369
  // src/openai-image-model.ts
1448
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
1449
- var import_zod5 = require("zod");
1370
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1371
+ var import_zod6 = require("zod");
1450
1372
 
1451
1373
  // src/openai-image-settings.ts
1452
1374
  var modelMaxImagesPerCall = {
@@ -1492,12 +1414,12 @@ var OpenAIImageModel = class {
1492
1414
  warnings.push({ type: "unsupported-setting", setting: "seed" });
1493
1415
  }
1494
1416
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1495
- const { value: response, responseHeaders } = await (0, import_provider_utils5.postJsonToApi)({
1417
+ const { value: response, responseHeaders } = await (0, import_provider_utils6.postJsonToApi)({
1496
1418
  url: this.config.url({
1497
1419
  path: "/images/generations",
1498
1420
  modelId: this.modelId
1499
1421
  }),
1500
- headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1422
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), headers),
1501
1423
  body: {
1502
1424
  model: this.modelId,
1503
1425
  prompt,
@@ -1507,7 +1429,7 @@ var OpenAIImageModel = class {
1507
1429
  response_format: "b64_json"
1508
1430
  },
1509
1431
  failedResponseHandler: openaiFailedResponseHandler,
1510
- successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1432
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1511
1433
  openaiImageResponseSchema
1512
1434
  ),
1513
1435
  abortSignal,
@@ -1524,25 +1446,19 @@ var OpenAIImageModel = class {
1524
1446
  };
1525
1447
  }
1526
1448
  };
1527
- var openaiImageResponseSchema = import_zod5.z.object({
1528
- data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1449
+ var openaiImageResponseSchema = import_zod6.z.object({
1450
+ data: import_zod6.z.array(import_zod6.z.object({ b64_json: import_zod6.z.string() }))
1529
1451
  });
1530
1452
 
1531
1453
  // src/openai-transcription-model.ts
1532
- var import_provider_utils6 = require("@ai-sdk/provider-utils");
1533
- var import_zod6 = require("zod");
1534
- var OpenAIProviderOptionsSchema = import_zod6.z.object({
1535
- include: import_zod6.z.array(import_zod6.z.string()).optional().describe(
1536
- "Additional information to include in the transcription response."
1537
- ),
1538
- language: import_zod6.z.string().optional().describe("The language of the input audio in ISO-639-1 format."),
1539
- prompt: import_zod6.z.string().optional().describe(
1540
- "An optional text to guide the model's style or continue a previous audio segment."
1541
- ),
1542
- temperature: import_zod6.z.number().min(0).max(1).optional().default(0).describe("The sampling temperature, between 0 and 1."),
1543
- timestampGranularities: import_zod6.z.array(import_zod6.z.enum(["word", "segment"])).optional().default(["segment"]).describe(
1544
- "The timestamp granularities to populate for this transcription."
1545
- )
1454
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1455
+ var import_zod7 = require("zod");
1456
+ var openAIProviderOptionsSchema = import_zod7.z.object({
1457
+ include: import_zod7.z.array(import_zod7.z.string()).nullish(),
1458
+ language: import_zod7.z.string().nullish(),
1459
+ prompt: import_zod7.z.string().nullish(),
1460
+ temperature: import_zod7.z.number().min(0).max(1).nullish().default(0),
1461
+ timestampGranularities: import_zod7.z.array(import_zod7.z.enum(["word", "segment"])).nullish().default(["segment"])
1546
1462
  });
1547
1463
  var languageMap = {
1548
1464
  afrikaans: "af",
@@ -1617,28 +1533,29 @@ var OpenAITranscriptionModel = class {
1617
1533
  mediaType,
1618
1534
  providerOptions
1619
1535
  }) {
1536
+ var _a, _b, _c, _d, _e;
1620
1537
  const warnings = [];
1621
- const openAIOptions = (0, import_provider_utils6.parseProviderOptions)({
1538
+ const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1622
1539
  provider: "openai",
1623
1540
  providerOptions,
1624
- schema: OpenAIProviderOptionsSchema
1541
+ schema: openAIProviderOptionsSchema
1625
1542
  });
1626
1543
  const formData = new FormData();
1627
- const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils6.convertBase64ToUint8Array)(audio)]);
1544
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1628
1545
  formData.append("model", this.modelId);
1629
1546
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1630
1547
  if (openAIOptions) {
1631
1548
  const transcriptionModelOptions = {
1632
- include: openAIOptions.include,
1633
- language: openAIOptions.language,
1634
- prompt: openAIOptions.prompt,
1635
- temperature: openAIOptions.temperature,
1636
- timestamp_granularities: openAIOptions.timestampGranularities
1549
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1550
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1551
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1552
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1553
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1637
1554
  };
1638
1555
  for (const key in transcriptionModelOptions) {
1639
1556
  const value = transcriptionModelOptions[key];
1640
1557
  if (value !== void 0) {
1641
- formData.append(key, value);
1558
+ formData.append(key, String(value));
1642
1559
  }
1643
1560
  }
1644
1561
  }
@@ -1655,15 +1572,15 @@ var OpenAITranscriptionModel = class {
1655
1572
  value: response,
1656
1573
  responseHeaders,
1657
1574
  rawValue: rawResponse
1658
- } = await (0, import_provider_utils6.postFormDataToApi)({
1575
+ } = await (0, import_provider_utils7.postFormDataToApi)({
1659
1576
  url: this.config.url({
1660
1577
  path: "/audio/transcriptions",
1661
1578
  modelId: this.modelId
1662
1579
  }),
1663
- headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1580
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1664
1581
  formData,
1665
1582
  failedResponseHandler: openaiFailedResponseHandler,
1666
- successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1583
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1667
1584
  openaiTranscriptionResponseSchema
1668
1585
  ),
1669
1586
  abortSignal: options.abortSignal,
@@ -1689,22 +1606,22 @@ var OpenAITranscriptionModel = class {
1689
1606
  };
1690
1607
  }
1691
1608
  };
1692
- var openaiTranscriptionResponseSchema = import_zod6.z.object({
1693
- text: import_zod6.z.string(),
1694
- language: import_zod6.z.string().nullish(),
1695
- duration: import_zod6.z.number().nullish(),
1696
- words: import_zod6.z.array(
1697
- import_zod6.z.object({
1698
- word: import_zod6.z.string(),
1699
- start: import_zod6.z.number(),
1700
- end: import_zod6.z.number()
1609
+ var openaiTranscriptionResponseSchema = import_zod7.z.object({
1610
+ text: import_zod7.z.string(),
1611
+ language: import_zod7.z.string().nullish(),
1612
+ duration: import_zod7.z.number().nullish(),
1613
+ words: import_zod7.z.array(
1614
+ import_zod7.z.object({
1615
+ word: import_zod7.z.string(),
1616
+ start: import_zod7.z.number(),
1617
+ end: import_zod7.z.number()
1701
1618
  })
1702
1619
  ).nullish()
1703
1620
  });
1704
1621
 
1705
1622
  // src/responses/openai-responses-language-model.ts
1706
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1707
- var import_zod7 = require("zod");
1623
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1624
+ var import_zod8 = require("zod");
1708
1625
 
1709
1626
  // src/responses/convert-to-openai-responses-messages.ts
1710
1627
  var import_provider6 = require("@ai-sdk/provider");
@@ -1921,7 +1838,7 @@ var OpenAIResponsesLanguageModel = class {
1921
1838
  return this.config.provider;
1922
1839
  }
1923
1840
  getArgs({
1924
- maxTokens,
1841
+ maxOutputTokens,
1925
1842
  temperature,
1926
1843
  stopSequences,
1927
1844
  topP,
@@ -1964,7 +1881,7 @@ var OpenAIResponsesLanguageModel = class {
1964
1881
  systemMessageMode: modelConfig.systemMessageMode
1965
1882
  });
1966
1883
  warnings.push(...messageWarnings);
1967
- const openaiOptions = (0, import_provider_utils7.parseProviderOptions)({
1884
+ const openaiOptions = (0, import_provider_utils8.parseProviderOptions)({
1968
1885
  provider: "openai",
1969
1886
  providerOptions,
1970
1887
  schema: openaiResponsesProviderOptionsSchema
@@ -1975,7 +1892,7 @@ var OpenAIResponsesLanguageModel = class {
1975
1892
  input: messages,
1976
1893
  temperature,
1977
1894
  top_p: topP,
1978
- max_output_tokens: maxTokens,
1895
+ max_output_tokens: maxOutputTokens,
1979
1896
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1980
1897
  text: {
1981
1898
  format: responseFormat.schema != null ? {
@@ -2045,58 +1962,58 @@ var OpenAIResponsesLanguageModel = class {
2045
1962
  responseHeaders,
2046
1963
  value: response,
2047
1964
  rawValue: rawResponse
2048
- } = await (0, import_provider_utils7.postJsonToApi)({
1965
+ } = await (0, import_provider_utils8.postJsonToApi)({
2049
1966
  url: this.config.url({
2050
1967
  path: "/responses",
2051
1968
  modelId: this.modelId
2052
1969
  }),
2053
- headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1970
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2054
1971
  body,
2055
1972
  failedResponseHandler: openaiFailedResponseHandler,
2056
- successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
2057
- import_zod7.z.object({
2058
- id: import_zod7.z.string(),
2059
- created_at: import_zod7.z.number(),
2060
- model: import_zod7.z.string(),
2061
- output: import_zod7.z.array(
2062
- import_zod7.z.discriminatedUnion("type", [
2063
- import_zod7.z.object({
2064
- type: import_zod7.z.literal("message"),
2065
- role: import_zod7.z.literal("assistant"),
2066
- content: import_zod7.z.array(
2067
- import_zod7.z.object({
2068
- type: import_zod7.z.literal("output_text"),
2069
- text: import_zod7.z.string(),
2070
- annotations: import_zod7.z.array(
2071
- import_zod7.z.object({
2072
- type: import_zod7.z.literal("url_citation"),
2073
- start_index: import_zod7.z.number(),
2074
- end_index: import_zod7.z.number(),
2075
- url: import_zod7.z.string(),
2076
- title: import_zod7.z.string()
1973
+ successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1974
+ import_zod8.z.object({
1975
+ id: import_zod8.z.string(),
1976
+ created_at: import_zod8.z.number(),
1977
+ model: import_zod8.z.string(),
1978
+ output: import_zod8.z.array(
1979
+ import_zod8.z.discriminatedUnion("type", [
1980
+ import_zod8.z.object({
1981
+ type: import_zod8.z.literal("message"),
1982
+ role: import_zod8.z.literal("assistant"),
1983
+ content: import_zod8.z.array(
1984
+ import_zod8.z.object({
1985
+ type: import_zod8.z.literal("output_text"),
1986
+ text: import_zod8.z.string(),
1987
+ annotations: import_zod8.z.array(
1988
+ import_zod8.z.object({
1989
+ type: import_zod8.z.literal("url_citation"),
1990
+ start_index: import_zod8.z.number(),
1991
+ end_index: import_zod8.z.number(),
1992
+ url: import_zod8.z.string(),
1993
+ title: import_zod8.z.string()
2077
1994
  })
2078
1995
  )
2079
1996
  })
2080
1997
  )
2081
1998
  }),
2082
- import_zod7.z.object({
2083
- type: import_zod7.z.literal("function_call"),
2084
- call_id: import_zod7.z.string(),
2085
- name: import_zod7.z.string(),
2086
- arguments: import_zod7.z.string()
1999
+ import_zod8.z.object({
2000
+ type: import_zod8.z.literal("function_call"),
2001
+ call_id: import_zod8.z.string(),
2002
+ name: import_zod8.z.string(),
2003
+ arguments: import_zod8.z.string()
2087
2004
  }),
2088
- import_zod7.z.object({
2089
- type: import_zod7.z.literal("web_search_call")
2005
+ import_zod8.z.object({
2006
+ type: import_zod8.z.literal("web_search_call")
2090
2007
  }),
2091
- import_zod7.z.object({
2092
- type: import_zod7.z.literal("computer_call")
2008
+ import_zod8.z.object({
2009
+ type: import_zod8.z.literal("computer_call")
2093
2010
  }),
2094
- import_zod7.z.object({
2095
- type: import_zod7.z.literal("reasoning")
2011
+ import_zod8.z.object({
2012
+ type: import_zod8.z.literal("reasoning")
2096
2013
  })
2097
2014
  ])
2098
2015
  ),
2099
- incomplete_details: import_zod7.z.object({ reason: import_zod7.z.string() }).nullable(),
2016
+ incomplete_details: import_zod8.z.object({ reason: import_zod8.z.string() }).nullable(),
2100
2017
  usage: usageSchema
2101
2018
  })
2102
2019
  ),
@@ -2105,19 +2022,24 @@ var OpenAIResponsesLanguageModel = class {
2105
2022
  });
2106
2023
  const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2107
2024
  const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2025
+ type: "tool-call",
2108
2026
  toolCallType: "function",
2109
2027
  toolCallId: output.call_id,
2110
2028
  toolName: output.name,
2111
2029
  args: output.arguments
2112
2030
  }));
2113
2031
  return {
2114
- text: outputTextElements.map((content) => content.text).join("\n"),
2032
+ text: {
2033
+ type: "text",
2034
+ text: outputTextElements.map((content) => content.text).join("\n")
2035
+ },
2115
2036
  sources: outputTextElements.flatMap(
2116
2037
  (content) => content.annotations.map((annotation) => {
2117
2038
  var _a2, _b2, _c2;
2118
2039
  return {
2040
+ type: "source",
2119
2041
  sourceType: "url",
2120
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils7.generateId)(),
2042
+ id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils8.generateId)(),
2121
2043
  url: annotation.url,
2122
2044
  title: annotation.title
2123
2045
  };
@@ -2129,8 +2051,8 @@ var OpenAIResponsesLanguageModel = class {
2129
2051
  }),
2130
2052
  toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2131
2053
  usage: {
2132
- promptTokens: response.usage.input_tokens,
2133
- completionTokens: response.usage.output_tokens
2054
+ inputTokens: response.usage.input_tokens,
2055
+ outputTokens: response.usage.output_tokens
2134
2056
  },
2135
2057
  request: { body },
2136
2058
  response: {
@@ -2152,18 +2074,18 @@ var OpenAIResponsesLanguageModel = class {
2152
2074
  }
2153
2075
  async doStream(options) {
2154
2076
  const { args: body, warnings } = this.getArgs(options);
2155
- const { responseHeaders, value: response } = await (0, import_provider_utils7.postJsonToApi)({
2077
+ const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
2156
2078
  url: this.config.url({
2157
2079
  path: "/responses",
2158
2080
  modelId: this.modelId
2159
2081
  }),
2160
- headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
2082
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2161
2083
  body: {
2162
2084
  ...body,
2163
2085
  stream: true
2164
2086
  },
2165
2087
  failedResponseHandler: openaiFailedResponseHandler,
2166
- successfulResponseHandler: (0, import_provider_utils7.createEventSourceResponseHandler)(
2088
+ successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
2167
2089
  openaiResponsesChunkSchema
2168
2090
  ),
2169
2091
  abortSignal: options.abortSignal,
@@ -2171,8 +2093,10 @@ var OpenAIResponsesLanguageModel = class {
2171
2093
  });
2172
2094
  const self = this;
2173
2095
  let finishReason = "unknown";
2174
- let promptTokens = NaN;
2175
- let completionTokens = NaN;
2096
+ const usage = {
2097
+ inputTokens: void 0,
2098
+ outputTokens: void 0
2099
+ };
2176
2100
  let cachedPromptTokens = null;
2177
2101
  let reasoningTokens = null;
2178
2102
  let responseId = null;
@@ -2224,8 +2148,8 @@ var OpenAIResponsesLanguageModel = class {
2224
2148
  });
2225
2149
  } else if (isTextDeltaChunk(value)) {
2226
2150
  controller.enqueue({
2227
- type: "text-delta",
2228
- textDelta: value.delta
2151
+ type: "text",
2152
+ text: value.delta
2229
2153
  });
2230
2154
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2231
2155
  ongoingToolCalls[value.output_index] = void 0;
@@ -2242,19 +2166,17 @@ var OpenAIResponsesLanguageModel = class {
2242
2166
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2243
2167
  hasToolCalls
2244
2168
  });
2245
- promptTokens = value.response.usage.input_tokens;
2246
- completionTokens = value.response.usage.output_tokens;
2169
+ usage.inputTokens = value.response.usage.input_tokens;
2170
+ usage.outputTokens = value.response.usage.output_tokens;
2247
2171
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2248
2172
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2249
2173
  } else if (isResponseAnnotationAddedChunk(value)) {
2250
2174
  controller.enqueue({
2251
2175
  type: "source",
2252
- source: {
2253
- sourceType: "url",
2254
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils7.generateId)(),
2255
- url: value.annotation.url,
2256
- title: value.annotation.title
2257
- }
2176
+ sourceType: "url",
2177
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2178
+ url: value.annotation.url,
2179
+ title: value.annotation.title
2258
2180
  });
2259
2181
  }
2260
2182
  },
@@ -2262,7 +2184,7 @@ var OpenAIResponsesLanguageModel = class {
2262
2184
  controller.enqueue({
2263
2185
  type: "finish",
2264
2186
  finishReason,
2265
- usage: { promptTokens, completionTokens },
2187
+ usage,
2266
2188
  ...(cachedPromptTokens != null || reasoningTokens != null) && {
2267
2189
  providerMetadata: {
2268
2190
  openai: {
@@ -2282,79 +2204,79 @@ var OpenAIResponsesLanguageModel = class {
2282
2204
  };
2283
2205
  }
2284
2206
  };
2285
- var usageSchema = import_zod7.z.object({
2286
- input_tokens: import_zod7.z.number(),
2287
- input_tokens_details: import_zod7.z.object({ cached_tokens: import_zod7.z.number().nullish() }).nullish(),
2288
- output_tokens: import_zod7.z.number(),
2289
- output_tokens_details: import_zod7.z.object({ reasoning_tokens: import_zod7.z.number().nullish() }).nullish()
2207
+ var usageSchema = import_zod8.z.object({
2208
+ input_tokens: import_zod8.z.number(),
2209
+ input_tokens_details: import_zod8.z.object({ cached_tokens: import_zod8.z.number().nullish() }).nullish(),
2210
+ output_tokens: import_zod8.z.number(),
2211
+ output_tokens_details: import_zod8.z.object({ reasoning_tokens: import_zod8.z.number().nullish() }).nullish()
2290
2212
  });
2291
- var textDeltaChunkSchema = import_zod7.z.object({
2292
- type: import_zod7.z.literal("response.output_text.delta"),
2293
- delta: import_zod7.z.string()
2213
+ var textDeltaChunkSchema = import_zod8.z.object({
2214
+ type: import_zod8.z.literal("response.output_text.delta"),
2215
+ delta: import_zod8.z.string()
2294
2216
  });
2295
- var responseFinishedChunkSchema = import_zod7.z.object({
2296
- type: import_zod7.z.enum(["response.completed", "response.incomplete"]),
2297
- response: import_zod7.z.object({
2298
- incomplete_details: import_zod7.z.object({ reason: import_zod7.z.string() }).nullish(),
2217
+ var responseFinishedChunkSchema = import_zod8.z.object({
2218
+ type: import_zod8.z.enum(["response.completed", "response.incomplete"]),
2219
+ response: import_zod8.z.object({
2220
+ incomplete_details: import_zod8.z.object({ reason: import_zod8.z.string() }).nullish(),
2299
2221
  usage: usageSchema
2300
2222
  })
2301
2223
  });
2302
- var responseCreatedChunkSchema = import_zod7.z.object({
2303
- type: import_zod7.z.literal("response.created"),
2304
- response: import_zod7.z.object({
2305
- id: import_zod7.z.string(),
2306
- created_at: import_zod7.z.number(),
2307
- model: import_zod7.z.string()
2224
+ var responseCreatedChunkSchema = import_zod8.z.object({
2225
+ type: import_zod8.z.literal("response.created"),
2226
+ response: import_zod8.z.object({
2227
+ id: import_zod8.z.string(),
2228
+ created_at: import_zod8.z.number(),
2229
+ model: import_zod8.z.string()
2308
2230
  })
2309
2231
  });
2310
- var responseOutputItemDoneSchema = import_zod7.z.object({
2311
- type: import_zod7.z.literal("response.output_item.done"),
2312
- output_index: import_zod7.z.number(),
2313
- item: import_zod7.z.discriminatedUnion("type", [
2314
- import_zod7.z.object({
2315
- type: import_zod7.z.literal("message")
2232
+ var responseOutputItemDoneSchema = import_zod8.z.object({
2233
+ type: import_zod8.z.literal("response.output_item.done"),
2234
+ output_index: import_zod8.z.number(),
2235
+ item: import_zod8.z.discriminatedUnion("type", [
2236
+ import_zod8.z.object({
2237
+ type: import_zod8.z.literal("message")
2316
2238
  }),
2317
- import_zod7.z.object({
2318
- type: import_zod7.z.literal("function_call"),
2319
- id: import_zod7.z.string(),
2320
- call_id: import_zod7.z.string(),
2321
- name: import_zod7.z.string(),
2322
- arguments: import_zod7.z.string(),
2323
- status: import_zod7.z.literal("completed")
2239
+ import_zod8.z.object({
2240
+ type: import_zod8.z.literal("function_call"),
2241
+ id: import_zod8.z.string(),
2242
+ call_id: import_zod8.z.string(),
2243
+ name: import_zod8.z.string(),
2244
+ arguments: import_zod8.z.string(),
2245
+ status: import_zod8.z.literal("completed")
2324
2246
  })
2325
2247
  ])
2326
2248
  });
2327
- var responseFunctionCallArgumentsDeltaSchema = import_zod7.z.object({
2328
- type: import_zod7.z.literal("response.function_call_arguments.delta"),
2329
- item_id: import_zod7.z.string(),
2330
- output_index: import_zod7.z.number(),
2331
- delta: import_zod7.z.string()
2249
+ var responseFunctionCallArgumentsDeltaSchema = import_zod8.z.object({
2250
+ type: import_zod8.z.literal("response.function_call_arguments.delta"),
2251
+ item_id: import_zod8.z.string(),
2252
+ output_index: import_zod8.z.number(),
2253
+ delta: import_zod8.z.string()
2332
2254
  });
2333
- var responseOutputItemAddedSchema = import_zod7.z.object({
2334
- type: import_zod7.z.literal("response.output_item.added"),
2335
- output_index: import_zod7.z.number(),
2336
- item: import_zod7.z.discriminatedUnion("type", [
2337
- import_zod7.z.object({
2338
- type: import_zod7.z.literal("message")
2255
+ var responseOutputItemAddedSchema = import_zod8.z.object({
2256
+ type: import_zod8.z.literal("response.output_item.added"),
2257
+ output_index: import_zod8.z.number(),
2258
+ item: import_zod8.z.discriminatedUnion("type", [
2259
+ import_zod8.z.object({
2260
+ type: import_zod8.z.literal("message")
2339
2261
  }),
2340
- import_zod7.z.object({
2341
- type: import_zod7.z.literal("function_call"),
2342
- id: import_zod7.z.string(),
2343
- call_id: import_zod7.z.string(),
2344
- name: import_zod7.z.string(),
2345
- arguments: import_zod7.z.string()
2262
+ import_zod8.z.object({
2263
+ type: import_zod8.z.literal("function_call"),
2264
+ id: import_zod8.z.string(),
2265
+ call_id: import_zod8.z.string(),
2266
+ name: import_zod8.z.string(),
2267
+ arguments: import_zod8.z.string()
2346
2268
  })
2347
2269
  ])
2348
2270
  });
2349
- var responseAnnotationAddedSchema = import_zod7.z.object({
2350
- type: import_zod7.z.literal("response.output_text.annotation.added"),
2351
- annotation: import_zod7.z.object({
2352
- type: import_zod7.z.literal("url_citation"),
2353
- url: import_zod7.z.string(),
2354
- title: import_zod7.z.string()
2271
+ var responseAnnotationAddedSchema = import_zod8.z.object({
2272
+ type: import_zod8.z.literal("response.output_text.annotation.added"),
2273
+ annotation: import_zod8.z.object({
2274
+ type: import_zod8.z.literal("url_citation"),
2275
+ url: import_zod8.z.string(),
2276
+ title: import_zod8.z.string()
2355
2277
  })
2356
2278
  });
2357
- var openaiResponsesChunkSchema = import_zod7.z.union([
2279
+ var openaiResponsesChunkSchema = import_zod8.z.union([
2358
2280
  textDeltaChunkSchema,
2359
2281
  responseFinishedChunkSchema,
2360
2282
  responseCreatedChunkSchema,
@@ -2362,7 +2284,7 @@ var openaiResponsesChunkSchema = import_zod7.z.union([
2362
2284
  responseFunctionCallArgumentsDeltaSchema,
2363
2285
  responseOutputItemAddedSchema,
2364
2286
  responseAnnotationAddedSchema,
2365
- import_zod7.z.object({ type: import_zod7.z.string() }).passthrough()
2287
+ import_zod8.z.object({ type: import_zod8.z.string() }).passthrough()
2366
2288
  // fallback for unknown chunks
2367
2289
  ]);
2368
2290
  function isTextDeltaChunk(chunk) {
@@ -2407,15 +2329,15 @@ function getResponsesModelConfig(modelId) {
2407
2329
  requiredAutoTruncation: false
2408
2330
  };
2409
2331
  }
2410
- var openaiResponsesProviderOptionsSchema = import_zod7.z.object({
2411
- metadata: import_zod7.z.any().nullish(),
2412
- parallelToolCalls: import_zod7.z.boolean().nullish(),
2413
- previousResponseId: import_zod7.z.string().nullish(),
2414
- store: import_zod7.z.boolean().nullish(),
2415
- user: import_zod7.z.string().nullish(),
2416
- reasoningEffort: import_zod7.z.string().nullish(),
2417
- strictSchemas: import_zod7.z.boolean().nullish(),
2418
- instructions: import_zod7.z.string().nullish()
2332
+ var openaiResponsesProviderOptionsSchema = import_zod8.z.object({
2333
+ metadata: import_zod8.z.any().nullish(),
2334
+ parallelToolCalls: import_zod8.z.boolean().nullish(),
2335
+ previousResponseId: import_zod8.z.string().nullish(),
2336
+ store: import_zod8.z.boolean().nullish(),
2337
+ user: import_zod8.z.string().nullish(),
2338
+ reasoningEffort: import_zod8.z.string().nullish(),
2339
+ strictSchemas: import_zod8.z.boolean().nullish(),
2340
+ instructions: import_zod8.z.string().nullish()
2419
2341
  });
2420
2342
  // Annotate the CommonJS export names for ESM import in node:
2421
2343
  0 && (module.exports = {
@@ -2425,6 +2347,7 @@ var openaiResponsesProviderOptionsSchema = import_zod7.z.object({
2425
2347
  OpenAIImageModel,
2426
2348
  OpenAIResponsesLanguageModel,
2427
2349
  OpenAITranscriptionModel,
2428
- modelMaxImagesPerCall
2350
+ modelMaxImagesPerCall,
2351
+ openaiProviderOptions
2429
2352
  });
2430
2353
  //# sourceMappingURL=index.js.map