@ai-sdk/openai 2.0.0-canary.4 → 2.0.0-canary.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -26,20 +26,21 @@ __export(internal_exports, {
26
26
  OpenAIImageModel: () => OpenAIImageModel,
27
27
  OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
28
28
  OpenAITranscriptionModel: () => OpenAITranscriptionModel,
29
- modelMaxImagesPerCall: () => modelMaxImagesPerCall
29
+ modelMaxImagesPerCall: () => modelMaxImagesPerCall,
30
+ openaiProviderOptions: () => openaiProviderOptions
30
31
  });
31
32
  module.exports = __toCommonJS(internal_exports);
32
33
 
33
34
  // src/openai-chat-language-model.ts
34
35
  var import_provider3 = require("@ai-sdk/provider");
35
- var import_provider_utils2 = require("@ai-sdk/provider-utils");
36
- var import_zod2 = require("zod");
36
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
37
+ var import_zod3 = require("zod");
37
38
 
38
39
  // src/convert-to-openai-chat-messages.ts
39
40
  var import_provider = require("@ai-sdk/provider");
41
+ var import_provider_utils = require("@ai-sdk/provider-utils");
40
42
  function convertToOpenAIChatMessages({
41
43
  prompt,
42
- useLegacyFunctionCalling = false,
43
44
  systemMessageMode = "system"
44
45
  }) {
45
46
  const messages = [];
@@ -91,7 +92,7 @@ function convertToOpenAIChatMessages({
91
92
  return {
92
93
  type: "image_url",
93
94
  image_url: {
94
- url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
95
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`,
95
96
  // OpenAI specific extension: image detail
96
97
  detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
97
98
  }
@@ -106,14 +107,20 @@ function convertToOpenAIChatMessages({
106
107
  case "audio/wav": {
107
108
  return {
108
109
  type: "input_audio",
109
- input_audio: { data: part.data, format: "wav" }
110
+ input_audio: {
111
+ data: (0, import_provider_utils.convertToBase64)(part.data),
112
+ format: "wav"
113
+ }
110
114
  };
111
115
  }
112
116
  case "audio/mp3":
113
117
  case "audio/mpeg": {
114
118
  return {
115
119
  type: "input_audio",
116
- input_audio: { data: part.data, format: "mp3" }
120
+ input_audio: {
121
+ data: (0, import_provider_utils.convertToBase64)(part.data),
122
+ format: "mp3"
123
+ }
117
124
  };
118
125
  }
119
126
  default: {
@@ -168,41 +175,20 @@ function convertToOpenAIChatMessages({
168
175
  }
169
176
  }
170
177
  }
171
- if (useLegacyFunctionCalling) {
172
- if (toolCalls.length > 1) {
173
- throw new import_provider.UnsupportedFunctionalityError({
174
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
175
- });
176
- }
177
- messages.push({
178
- role: "assistant",
179
- content: text,
180
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
181
- });
182
- } else {
183
- messages.push({
184
- role: "assistant",
185
- content: text,
186
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
187
- });
188
- }
178
+ messages.push({
179
+ role: "assistant",
180
+ content: text,
181
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
182
+ });
189
183
  break;
190
184
  }
191
185
  case "tool": {
192
186
  for (const toolResponse of content) {
193
- if (useLegacyFunctionCalling) {
194
- messages.push({
195
- role: "function",
196
- name: toolResponse.toolName,
197
- content: JSON.stringify(toolResponse.result)
198
- });
199
- } else {
200
- messages.push({
201
- role: "tool",
202
- tool_call_id: toolResponse.toolCallId,
203
- content: JSON.stringify(toolResponse.result)
204
- });
205
- }
187
+ messages.push({
188
+ role: "tool",
189
+ tool_call_id: toolResponse.toolCallId,
190
+ content: JSON.stringify(toolResponse.result)
191
+ });
206
192
  }
207
193
  break;
208
194
  }
@@ -245,21 +231,72 @@ function mapOpenAIFinishReason(finishReason) {
245
231
  }
246
232
  }
247
233
 
248
- // src/openai-error.ts
234
+ // src/openai-chat-options.ts
249
235
  var import_zod = require("zod");
250
- var import_provider_utils = require("@ai-sdk/provider-utils");
251
- var openaiErrorDataSchema = import_zod.z.object({
252
- error: import_zod.z.object({
253
- message: import_zod.z.string(),
236
+ var openaiProviderOptions = import_zod.z.object({
237
+ /**
238
+ * Modify the likelihood of specified tokens appearing in the completion.
239
+ *
240
+ * Accepts a JSON object that maps tokens (specified by their token ID in
241
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
242
+ */
243
+ logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
244
+ /**
245
+ * Return the log probabilities of the tokens.
246
+ *
247
+ * Setting to true will return the log probabilities of the tokens that
248
+ * were generated.
249
+ *
250
+ * Setting to a number will return the log probabilities of the top n
251
+ * tokens that were generated.
252
+ */
253
+ logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
254
+ /**
255
+ * Whether to enable parallel function calling during tool use. Default to true.
256
+ */
257
+ parallelToolCalls: import_zod.z.boolean().optional(),
258
+ /**
259
+ * A unique identifier representing your end-user, which can help OpenAI to
260
+ * monitor and detect abuse.
261
+ */
262
+ user: import_zod.z.string().optional(),
263
+ /**
264
+ * Reasoning effort for reasoning models. Defaults to `medium`.
265
+ */
266
+ reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
267
+ /**
268
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
269
+ */
270
+ maxCompletionTokens: import_zod.z.number().optional(),
271
+ /**
272
+ * Whether to enable persistence in responses API.
273
+ */
274
+ store: import_zod.z.boolean().optional(),
275
+ /**
276
+ * Metadata to associate with the request.
277
+ */
278
+ metadata: import_zod.z.record(import_zod.z.string()).optional(),
279
+ /**
280
+ * Parameters for prediction mode.
281
+ */
282
+ prediction: import_zod.z.record(import_zod.z.any()).optional()
283
+ });
284
+
285
+ // src/openai-error.ts
286
+ var import_zod2 = require("zod");
287
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
288
+ var openaiErrorDataSchema = import_zod2.z.object({
289
+ error: import_zod2.z.object({
290
+ message: import_zod2.z.string(),
254
291
  // The additional information below is handled loosely to support
255
292
  // OpenAI-compatible providers that have slightly different error
256
293
  // responses:
257
- type: import_zod.z.string().nullish(),
258
- param: import_zod.z.any().nullish(),
259
- code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
294
+ type: import_zod2.z.string().nullish(),
295
+ param: import_zod2.z.any().nullish(),
296
+ code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
260
297
  })
261
298
  });
262
- var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
299
+ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
263
300
  errorSchema: openaiErrorDataSchema,
264
301
  errorToMessage: (data) => data.error.message
265
302
  });
@@ -282,7 +319,6 @@ var import_provider2 = require("@ai-sdk/provider");
282
319
  function prepareTools({
283
320
  tools,
284
321
  toolChoice,
285
- useLegacyFunctionCalling = false,
286
322
  structuredOutputs
287
323
  }) {
288
324
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
@@ -290,48 +326,6 @@ function prepareTools({
290
326
  if (tools == null) {
291
327
  return { tools: void 0, toolChoice: void 0, toolWarnings };
292
328
  }
293
- if (useLegacyFunctionCalling) {
294
- const openaiFunctions = [];
295
- for (const tool of tools) {
296
- if (tool.type === "provider-defined") {
297
- toolWarnings.push({ type: "unsupported-tool", tool });
298
- } else {
299
- openaiFunctions.push({
300
- name: tool.name,
301
- description: tool.description,
302
- parameters: tool.parameters
303
- });
304
- }
305
- }
306
- if (toolChoice == null) {
307
- return {
308
- functions: openaiFunctions,
309
- function_call: void 0,
310
- toolWarnings
311
- };
312
- }
313
- const type2 = toolChoice.type;
314
- switch (type2) {
315
- case "auto":
316
- case "none":
317
- case void 0:
318
- return {
319
- functions: openaiFunctions,
320
- function_call: void 0,
321
- toolWarnings
322
- };
323
- case "required":
324
- throw new import_provider2.UnsupportedFunctionalityError({
325
- functionality: "useLegacyFunctionCalling and toolChoice: required"
326
- });
327
- default:
328
- return {
329
- functions: openaiFunctions,
330
- function_call: { name: toolChoice.toolName },
331
- toolWarnings
332
- };
333
- }
334
- }
335
329
  const openaiTools = [];
336
330
  for (const tool of tools) {
337
331
  if (tool.type === "provider-defined") {
@@ -403,7 +397,7 @@ var OpenAIChatLanguageModel = class {
403
397
  }
404
398
  getArgs({
405
399
  prompt,
406
- maxTokens,
400
+ maxOutputTokens,
407
401
  temperature,
408
402
  topP,
409
403
  topK,
@@ -416,8 +410,13 @@ var OpenAIChatLanguageModel = class {
416
410
  toolChoice,
417
411
  providerOptions
418
412
  }) {
419
- var _a, _b, _c, _d, _e, _f, _g;
413
+ var _a, _b;
420
414
  const warnings = [];
415
+ const openaiOptions = (_a = (0, import_provider_utils3.parseProviderOptions)({
416
+ provider: "openai",
417
+ providerOptions,
418
+ schema: openaiProviderOptions
419
+ })) != null ? _a : {};
421
420
  if (topK != null) {
422
421
  warnings.push({
423
422
  type: "unsupported-setting",
@@ -431,21 +430,9 @@ var OpenAIChatLanguageModel = class {
431
430
  details: "JSON response format schema is only supported with structuredOutputs"
432
431
  });
433
432
  }
434
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
435
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
436
- throw new import_provider3.UnsupportedFunctionalityError({
437
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
438
- });
439
- }
440
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
441
- throw new import_provider3.UnsupportedFunctionalityError({
442
- functionality: "structuredOutputs with useLegacyFunctionCalling"
443
- });
444
- }
445
433
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
446
434
  {
447
435
  prompt,
448
- useLegacyFunctionCalling,
449
436
  systemMessageMode: getSystemMessageMode(this.modelId)
450
437
  }
451
438
  );
@@ -454,13 +441,13 @@ var OpenAIChatLanguageModel = class {
454
441
  // model id:
455
442
  model: this.modelId,
456
443
  // model specific settings:
457
- logit_bias: this.settings.logitBias,
458
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
459
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
460
- user: this.settings.user,
461
- parallel_tool_calls: this.settings.parallelToolCalls,
444
+ logit_bias: openaiOptions.logitBias,
445
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
446
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
447
+ user: openaiOptions.user,
448
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
462
449
  // standardized settings:
463
- max_tokens: maxTokens,
450
+ max_tokens: maxOutputTokens,
464
451
  temperature,
465
452
  top_p: topP,
466
453
  frequency_penalty: frequencyPenalty,
@@ -471,19 +458,19 @@ var OpenAIChatLanguageModel = class {
471
458
  json_schema: {
472
459
  schema: responseFormat.schema,
473
460
  strict: true,
474
- name: (_a = responseFormat.name) != null ? _a : "response",
461
+ name: (_b = responseFormat.name) != null ? _b : "response",
475
462
  description: responseFormat.description
476
463
  }
477
464
  } : { type: "json_object" } : void 0,
478
465
  stop: stopSequences,
479
466
  seed,
480
467
  // openai specific settings:
481
- // TODO remove in next major version; we auto-map maxTokens now
482
- max_completion_tokens: (_b = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _b.maxCompletionTokens,
483
- store: (_c = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _c.store,
484
- metadata: (_d = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _d.metadata,
485
- prediction: (_e = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _e.prediction,
486
- reasoning_effort: (_g = (_f = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
468
+ // TODO remove in next major version; we auto-map maxOutputTokens now
469
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
470
+ store: openaiOptions.store,
471
+ metadata: openaiOptions.metadata,
472
+ prediction: openaiOptions.prediction,
473
+ reasoning_effort: openaiOptions.reasoningEffort,
487
474
  // messages:
488
475
  messages
489
476
  };
@@ -547,26 +534,30 @@ var OpenAIChatLanguageModel = class {
547
534
  }
548
535
  baseArgs.max_tokens = void 0;
549
536
  }
537
+ } else if (this.modelId.startsWith("gpt-4o-search-preview")) {
538
+ if (baseArgs.temperature != null) {
539
+ baseArgs.temperature = void 0;
540
+ warnings.push({
541
+ type: "unsupported-setting",
542
+ setting: "temperature",
543
+ details: "temperature is not supported for the gpt-4o-search-preview model and has been removed."
544
+ });
545
+ }
550
546
  }
551
547
  const {
552
548
  tools: openaiTools,
553
549
  toolChoice: openaiToolChoice,
554
- functions,
555
- function_call,
556
550
  toolWarnings
557
551
  } = prepareTools({
558
552
  tools,
559
553
  toolChoice,
560
- useLegacyFunctionCalling,
561
554
  structuredOutputs: this.supportsStructuredOutputs
562
555
  });
563
556
  return {
564
557
  args: {
565
558
  ...baseArgs,
566
559
  tools: openaiTools,
567
- tool_choice: openaiToolChoice,
568
- functions,
569
- function_call
560
+ tool_choice: openaiToolChoice
570
561
  },
571
562
  warnings: [...warnings, ...toolWarnings]
572
563
  };
@@ -578,15 +569,15 @@ var OpenAIChatLanguageModel = class {
578
569
  responseHeaders,
579
570
  value: response,
580
571
  rawValue: rawResponse
581
- } = await (0, import_provider_utils2.postJsonToApi)({
572
+ } = await (0, import_provider_utils3.postJsonToApi)({
582
573
  url: this.config.url({
583
574
  path: "/chat/completions",
584
575
  modelId: this.modelId
585
576
  }),
586
- headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
577
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
587
578
  body,
588
579
  failedResponseHandler: openaiFailedResponseHandler,
589
- successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
580
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
590
581
  openaiChatResponseSchema
591
582
  ),
592
583
  abortSignal: options.abortSignal,
@@ -611,29 +602,21 @@ var OpenAIChatLanguageModel = class {
611
602
  }
612
603
  return {
613
604
  text: (_c = choice.message.content) != null ? _c : void 0,
614
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
615
- {
616
- toolCallType: "function",
617
- toolCallId: (0, import_provider_utils2.generateId)(),
618
- toolName: choice.message.function_call.name,
619
- args: choice.message.function_call.arguments
620
- }
621
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
605
+ toolCalls: (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
622
606
  var _a2;
623
607
  return {
624
608
  toolCallType: "function",
625
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils2.generateId)(),
609
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
626
610
  toolName: toolCall.function.name,
627
611
  args: toolCall.function.arguments
628
612
  };
629
613
  }),
630
614
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
631
615
  usage: {
632
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
633
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
616
+ inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
617
+ outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
634
618
  },
635
- rawCall: { rawPrompt, rawSettings },
636
- request: { body: JSON.stringify(body) },
619
+ request: { body },
637
620
  response: {
638
621
  ...getResponseMetadata(response),
639
622
  headers: responseHeaders,
@@ -645,49 +628,6 @@ var OpenAIChatLanguageModel = class {
645
628
  };
646
629
  }
647
630
  async doStream(options) {
648
- if (this.settings.simulateStreaming) {
649
- const result = await this.doGenerate(options);
650
- const simulatedStream = new ReadableStream({
651
- start(controller) {
652
- controller.enqueue({ type: "response-metadata", ...result.response });
653
- if (result.text) {
654
- controller.enqueue({
655
- type: "text-delta",
656
- textDelta: result.text
657
- });
658
- }
659
- if (result.toolCalls) {
660
- for (const toolCall of result.toolCalls) {
661
- controller.enqueue({
662
- type: "tool-call-delta",
663
- toolCallType: "function",
664
- toolCallId: toolCall.toolCallId,
665
- toolName: toolCall.toolName,
666
- argsTextDelta: toolCall.args
667
- });
668
- controller.enqueue({
669
- type: "tool-call",
670
- ...toolCall
671
- });
672
- }
673
- }
674
- controller.enqueue({
675
- type: "finish",
676
- finishReason: result.finishReason,
677
- usage: result.usage,
678
- logprobs: result.logprobs,
679
- providerMetadata: result.providerMetadata
680
- });
681
- controller.close();
682
- }
683
- });
684
- return {
685
- stream: simulatedStream,
686
- rawCall: result.rawCall,
687
- response: result.response,
688
- warnings: result.warnings
689
- };
690
- }
691
631
  const { args, warnings } = this.getArgs(options);
692
632
  const body = {
693
633
  ...args,
@@ -695,15 +635,15 @@ var OpenAIChatLanguageModel = class {
695
635
  // only include stream_options when in strict compatibility mode:
696
636
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
697
637
  };
698
- const { responseHeaders, value: response } = await (0, import_provider_utils2.postJsonToApi)({
638
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
699
639
  url: this.config.url({
700
640
  path: "/chat/completions",
701
641
  modelId: this.modelId
702
642
  }),
703
- headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
643
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
704
644
  body,
705
645
  failedResponseHandler: openaiFailedResponseHandler,
706
- successfulResponseHandler: (0, import_provider_utils2.createEventSourceResponseHandler)(
646
+ successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
707
647
  openaiChatChunkSchema
708
648
  ),
709
649
  abortSignal: options.abortSignal,
@@ -712,13 +652,12 @@ var OpenAIChatLanguageModel = class {
712
652
  const { messages: rawPrompt, ...rawSettings } = args;
713
653
  const toolCalls = [];
714
654
  let finishReason = "unknown";
715
- let usage = {
716
- promptTokens: void 0,
717
- completionTokens: void 0
655
+ const usage = {
656
+ inputTokens: void 0,
657
+ outputTokens: void 0
718
658
  };
719
659
  let logprobs;
720
660
  let isFirstChunk = true;
721
- const { useLegacyFunctionCalling } = this.settings;
722
661
  const providerMetadata = { openai: {} };
723
662
  return {
724
663
  stream: response.pipeThrough(
@@ -750,10 +689,8 @@ var OpenAIChatLanguageModel = class {
750
689
  prompt_tokens_details,
751
690
  completion_tokens_details
752
691
  } = value.usage;
753
- usage = {
754
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
755
- completionTokens: completion_tokens != null ? completion_tokens : void 0
756
- };
692
+ usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
693
+ usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
757
694
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
758
695
  providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
759
696
  }
@@ -788,16 +725,8 @@ var OpenAIChatLanguageModel = class {
788
725
  if (logprobs === void 0) logprobs = [];
789
726
  logprobs.push(...mappedLogprobs);
790
727
  }
791
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
792
- {
793
- type: "function",
794
- id: (0, import_provider_utils2.generateId)(),
795
- function: delta.function_call,
796
- index: 0
797
- }
798
- ] : delta.tool_calls;
799
- if (mappedToolCalls != null) {
800
- for (const toolCallDelta of mappedToolCalls) {
728
+ if (delta.tool_calls != null) {
729
+ for (const toolCallDelta of delta.tool_calls) {
801
730
  const index = toolCallDelta.index;
802
731
  if (toolCalls[index] == null) {
803
732
  if (toolCallDelta.type !== "function") {
@@ -838,11 +767,11 @@ var OpenAIChatLanguageModel = class {
838
767
  argsTextDelta: toolCall2.function.arguments
839
768
  });
840
769
  }
841
- if ((0, import_provider_utils2.isParsableJson)(toolCall2.function.arguments)) {
770
+ if ((0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
842
771
  controller.enqueue({
843
772
  type: "tool-call",
844
773
  toolCallType: "function",
845
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils2.generateId)(),
774
+ toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
846
775
  toolName: toolCall2.function.name,
847
776
  args: toolCall2.function.arguments
848
777
  });
@@ -865,11 +794,11 @@ var OpenAIChatLanguageModel = class {
865
794
  toolName: toolCall.function.name,
866
795
  argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
867
796
  });
868
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils2.isParsableJson)(toolCall.function.arguments)) {
797
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
869
798
  controller.enqueue({
870
799
  type: "tool-call",
871
800
  toolCallType: "function",
872
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils2.generateId)(),
801
+ toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
873
802
  toolName: toolCall.function.name,
874
803
  args: toolCall.function.arguments
875
804
  });
@@ -879,125 +808,112 @@ var OpenAIChatLanguageModel = class {
879
808
  }
880
809
  },
881
810
  flush(controller) {
882
- var _a, _b;
883
811
  controller.enqueue({
884
812
  type: "finish",
885
813
  finishReason,
886
814
  logprobs,
887
- usage: {
888
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
889
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
890
- },
815
+ usage,
891
816
  ...providerMetadata != null ? { providerMetadata } : {}
892
817
  });
893
818
  }
894
819
  })
895
820
  ),
896
- rawCall: { rawPrompt, rawSettings },
821
+ request: { body },
897
822
  response: { headers: responseHeaders },
898
- request: { body: JSON.stringify(body) },
899
823
  warnings
900
824
  };
901
825
  }
902
826
  };
903
- var openaiTokenUsageSchema = import_zod2.z.object({
904
- prompt_tokens: import_zod2.z.number().nullish(),
905
- completion_tokens: import_zod2.z.number().nullish(),
906
- prompt_tokens_details: import_zod2.z.object({
907
- cached_tokens: import_zod2.z.number().nullish()
827
+ var openaiTokenUsageSchema = import_zod3.z.object({
828
+ prompt_tokens: import_zod3.z.number().nullish(),
829
+ completion_tokens: import_zod3.z.number().nullish(),
830
+ prompt_tokens_details: import_zod3.z.object({
831
+ cached_tokens: import_zod3.z.number().nullish()
908
832
  }).nullish(),
909
- completion_tokens_details: import_zod2.z.object({
910
- reasoning_tokens: import_zod2.z.number().nullish(),
911
- accepted_prediction_tokens: import_zod2.z.number().nullish(),
912
- rejected_prediction_tokens: import_zod2.z.number().nullish()
833
+ completion_tokens_details: import_zod3.z.object({
834
+ reasoning_tokens: import_zod3.z.number().nullish(),
835
+ accepted_prediction_tokens: import_zod3.z.number().nullish(),
836
+ rejected_prediction_tokens: import_zod3.z.number().nullish()
913
837
  }).nullish()
914
838
  }).nullish();
915
- var openaiChatResponseSchema = import_zod2.z.object({
916
- id: import_zod2.z.string().nullish(),
917
- created: import_zod2.z.number().nullish(),
918
- model: import_zod2.z.string().nullish(),
919
- choices: import_zod2.z.array(
920
- import_zod2.z.object({
921
- message: import_zod2.z.object({
922
- role: import_zod2.z.literal("assistant").nullish(),
923
- content: import_zod2.z.string().nullish(),
924
- function_call: import_zod2.z.object({
925
- arguments: import_zod2.z.string(),
926
- name: import_zod2.z.string()
927
- }).nullish(),
928
- tool_calls: import_zod2.z.array(
929
- import_zod2.z.object({
930
- id: import_zod2.z.string().nullish(),
931
- type: import_zod2.z.literal("function"),
932
- function: import_zod2.z.object({
933
- name: import_zod2.z.string(),
934
- arguments: import_zod2.z.string()
839
+ var openaiChatResponseSchema = import_zod3.z.object({
840
+ id: import_zod3.z.string().nullish(),
841
+ created: import_zod3.z.number().nullish(),
842
+ model: import_zod3.z.string().nullish(),
843
+ choices: import_zod3.z.array(
844
+ import_zod3.z.object({
845
+ message: import_zod3.z.object({
846
+ role: import_zod3.z.literal("assistant").nullish(),
847
+ content: import_zod3.z.string().nullish(),
848
+ tool_calls: import_zod3.z.array(
849
+ import_zod3.z.object({
850
+ id: import_zod3.z.string().nullish(),
851
+ type: import_zod3.z.literal("function"),
852
+ function: import_zod3.z.object({
853
+ name: import_zod3.z.string(),
854
+ arguments: import_zod3.z.string()
935
855
  })
936
856
  })
937
857
  ).nullish()
938
858
  }),
939
- index: import_zod2.z.number(),
940
- logprobs: import_zod2.z.object({
941
- content: import_zod2.z.array(
942
- import_zod2.z.object({
943
- token: import_zod2.z.string(),
944
- logprob: import_zod2.z.number(),
945
- top_logprobs: import_zod2.z.array(
946
- import_zod2.z.object({
947
- token: import_zod2.z.string(),
948
- logprob: import_zod2.z.number()
859
+ index: import_zod3.z.number(),
860
+ logprobs: import_zod3.z.object({
861
+ content: import_zod3.z.array(
862
+ import_zod3.z.object({
863
+ token: import_zod3.z.string(),
864
+ logprob: import_zod3.z.number(),
865
+ top_logprobs: import_zod3.z.array(
866
+ import_zod3.z.object({
867
+ token: import_zod3.z.string(),
868
+ logprob: import_zod3.z.number()
949
869
  })
950
870
  )
951
871
  })
952
872
  ).nullable()
953
873
  }).nullish(),
954
- finish_reason: import_zod2.z.string().nullish()
874
+ finish_reason: import_zod3.z.string().nullish()
955
875
  })
956
876
  ),
957
877
  usage: openaiTokenUsageSchema
958
878
  });
959
- var openaiChatChunkSchema = import_zod2.z.union([
960
- import_zod2.z.object({
961
- id: import_zod2.z.string().nullish(),
962
- created: import_zod2.z.number().nullish(),
963
- model: import_zod2.z.string().nullish(),
964
- choices: import_zod2.z.array(
965
- import_zod2.z.object({
966
- delta: import_zod2.z.object({
967
- role: import_zod2.z.enum(["assistant"]).nullish(),
968
- content: import_zod2.z.string().nullish(),
969
- function_call: import_zod2.z.object({
970
- name: import_zod2.z.string().optional(),
971
- arguments: import_zod2.z.string().optional()
972
- }).nullish(),
973
- tool_calls: import_zod2.z.array(
974
- import_zod2.z.object({
975
- index: import_zod2.z.number(),
976
- id: import_zod2.z.string().nullish(),
977
- type: import_zod2.z.literal("function").optional(),
978
- function: import_zod2.z.object({
979
- name: import_zod2.z.string().nullish(),
980
- arguments: import_zod2.z.string().nullish()
879
+ var openaiChatChunkSchema = import_zod3.z.union([
880
+ import_zod3.z.object({
881
+ id: import_zod3.z.string().nullish(),
882
+ created: import_zod3.z.number().nullish(),
883
+ model: import_zod3.z.string().nullish(),
884
+ choices: import_zod3.z.array(
885
+ import_zod3.z.object({
886
+ delta: import_zod3.z.object({
887
+ role: import_zod3.z.enum(["assistant"]).nullish(),
888
+ content: import_zod3.z.string().nullish(),
889
+ tool_calls: import_zod3.z.array(
890
+ import_zod3.z.object({
891
+ index: import_zod3.z.number(),
892
+ id: import_zod3.z.string().nullish(),
893
+ type: import_zod3.z.literal("function").optional(),
894
+ function: import_zod3.z.object({
895
+ name: import_zod3.z.string().nullish(),
896
+ arguments: import_zod3.z.string().nullish()
981
897
  })
982
898
  })
983
899
  ).nullish()
984
900
  }).nullish(),
985
- logprobs: import_zod2.z.object({
986
- content: import_zod2.z.array(
987
- import_zod2.z.object({
988
- token: import_zod2.z.string(),
989
- logprob: import_zod2.z.number(),
990
- top_logprobs: import_zod2.z.array(
991
- import_zod2.z.object({
992
- token: import_zod2.z.string(),
993
- logprob: import_zod2.z.number()
901
+ logprobs: import_zod3.z.object({
902
+ content: import_zod3.z.array(
903
+ import_zod3.z.object({
904
+ token: import_zod3.z.string(),
905
+ logprob: import_zod3.z.number(),
906
+ top_logprobs: import_zod3.z.array(
907
+ import_zod3.z.object({
908
+ token: import_zod3.z.string(),
909
+ logprob: import_zod3.z.number()
994
910
  })
995
911
  )
996
912
  })
997
913
  ).nullable()
998
914
  }).nullish(),
999
- finish_reason: import_zod2.z.string().nullable().optional(),
1000
- index: import_zod2.z.number()
915
+ finish_reason: import_zod3.z.string().nullable().optional(),
916
+ index: import_zod3.z.number()
1001
917
  })
1002
918
  ),
1003
919
  usage: openaiTokenUsageSchema
@@ -1039,8 +955,8 @@ var reasoningModels = {
1039
955
  };
1040
956
 
1041
957
  // src/openai-completion-language-model.ts
1042
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
1043
- var import_zod3 = require("zod");
958
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
959
+ var import_zod4 = require("zod");
1044
960
 
1045
961
  // src/convert-to-openai-completion-prompt.ts
1046
962
  var import_provider4 = require("@ai-sdk/provider");
@@ -1150,7 +1066,7 @@ var OpenAICompletionLanguageModel = class {
1150
1066
  getArgs({
1151
1067
  inputFormat,
1152
1068
  prompt,
1153
- maxTokens,
1069
+ maxOutputTokens,
1154
1070
  temperature,
1155
1071
  topP,
1156
1072
  topK,
@@ -1192,7 +1108,7 @@ var OpenAICompletionLanguageModel = class {
1192
1108
  suffix: this.settings.suffix,
1193
1109
  user: this.settings.user,
1194
1110
  // standardized settings:
1195
- max_tokens: maxTokens,
1111
+ max_tokens: maxOutputTokens,
1196
1112
  temperature,
1197
1113
  top_p: topP,
1198
1114
  frequency_penalty: frequencyPenalty,
@@ -1212,32 +1128,30 @@ var OpenAICompletionLanguageModel = class {
1212
1128
  responseHeaders,
1213
1129
  value: response,
1214
1130
  rawValue: rawResponse
1215
- } = await (0, import_provider_utils3.postJsonToApi)({
1131
+ } = await (0, import_provider_utils4.postJsonToApi)({
1216
1132
  url: this.config.url({
1217
1133
  path: "/completions",
1218
1134
  modelId: this.modelId
1219
1135
  }),
1220
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
1136
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1221
1137
  body: args,
1222
1138
  failedResponseHandler: openaiFailedResponseHandler,
1223
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
1139
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1224
1140
  openaiCompletionResponseSchema
1225
1141
  ),
1226
1142
  abortSignal: options.abortSignal,
1227
1143
  fetch: this.config.fetch
1228
1144
  });
1229
- const { prompt: rawPrompt, ...rawSettings } = args;
1230
1145
  const choice = response.choices[0];
1231
1146
  return {
1232
1147
  text: choice.text,
1233
1148
  usage: {
1234
- promptTokens: response.usage.prompt_tokens,
1235
- completionTokens: response.usage.completion_tokens
1149
+ inputTokens: response.usage.prompt_tokens,
1150
+ outputTokens: response.usage.completion_tokens
1236
1151
  },
1237
1152
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1238
1153
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1239
- rawCall: { rawPrompt, rawSettings },
1240
- request: { body: JSON.stringify(args) },
1154
+ request: { body: args },
1241
1155
  response: {
1242
1156
  ...getResponseMetadata(response),
1243
1157
  headers: responseHeaders,
@@ -1254,25 +1168,24 @@ var OpenAICompletionLanguageModel = class {
1254
1168
  // only include stream_options when in strict compatibility mode:
1255
1169
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1256
1170
  };
1257
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
1171
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1258
1172
  url: this.config.url({
1259
1173
  path: "/completions",
1260
1174
  modelId: this.modelId
1261
1175
  }),
1262
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
1176
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1263
1177
  body,
1264
1178
  failedResponseHandler: openaiFailedResponseHandler,
1265
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
1179
+ successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
1266
1180
  openaiCompletionChunkSchema
1267
1181
  ),
1268
1182
  abortSignal: options.abortSignal,
1269
1183
  fetch: this.config.fetch
1270
1184
  });
1271
- const { prompt: rawPrompt, ...rawSettings } = args;
1272
1185
  let finishReason = "unknown";
1273
- let usage = {
1274
- promptTokens: Number.NaN,
1275
- completionTokens: Number.NaN
1186
+ const usage = {
1187
+ inputTokens: void 0,
1188
+ outputTokens: void 0
1276
1189
  };
1277
1190
  let logprobs;
1278
1191
  let isFirstChunk = true;
@@ -1299,10 +1212,8 @@ var OpenAICompletionLanguageModel = class {
1299
1212
  });
1300
1213
  }
1301
1214
  if (value.usage != null) {
1302
- usage = {
1303
- promptTokens: value.usage.prompt_tokens,
1304
- completionTokens: value.usage.completion_tokens
1305
- };
1215
+ usage.inputTokens = value.usage.prompt_tokens;
1216
+ usage.outputTokens = value.usage.completion_tokens;
1306
1217
  }
1307
1218
  const choice = value.choices[0];
1308
1219
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1332,53 +1243,52 @@ var OpenAICompletionLanguageModel = class {
1332
1243
  }
1333
1244
  })
1334
1245
  ),
1335
- rawCall: { rawPrompt, rawSettings },
1336
1246
  response: { headers: responseHeaders },
1337
1247
  warnings,
1338
1248
  request: { body: JSON.stringify(body) }
1339
1249
  };
1340
1250
  }
1341
1251
  };
1342
- var openaiCompletionResponseSchema = import_zod3.z.object({
1343
- id: import_zod3.z.string().nullish(),
1344
- created: import_zod3.z.number().nullish(),
1345
- model: import_zod3.z.string().nullish(),
1346
- choices: import_zod3.z.array(
1347
- import_zod3.z.object({
1348
- text: import_zod3.z.string(),
1349
- finish_reason: import_zod3.z.string(),
1350
- logprobs: import_zod3.z.object({
1351
- tokens: import_zod3.z.array(import_zod3.z.string()),
1352
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1353
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1252
+ var openaiCompletionResponseSchema = import_zod4.z.object({
1253
+ id: import_zod4.z.string().nullish(),
1254
+ created: import_zod4.z.number().nullish(),
1255
+ model: import_zod4.z.string().nullish(),
1256
+ choices: import_zod4.z.array(
1257
+ import_zod4.z.object({
1258
+ text: import_zod4.z.string(),
1259
+ finish_reason: import_zod4.z.string(),
1260
+ logprobs: import_zod4.z.object({
1261
+ tokens: import_zod4.z.array(import_zod4.z.string()),
1262
+ token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1263
+ top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1354
1264
  }).nullish()
1355
1265
  })
1356
1266
  ),
1357
- usage: import_zod3.z.object({
1358
- prompt_tokens: import_zod3.z.number(),
1359
- completion_tokens: import_zod3.z.number()
1267
+ usage: import_zod4.z.object({
1268
+ prompt_tokens: import_zod4.z.number(),
1269
+ completion_tokens: import_zod4.z.number()
1360
1270
  })
1361
1271
  });
1362
- var openaiCompletionChunkSchema = import_zod3.z.union([
1363
- import_zod3.z.object({
1364
- id: import_zod3.z.string().nullish(),
1365
- created: import_zod3.z.number().nullish(),
1366
- model: import_zod3.z.string().nullish(),
1367
- choices: import_zod3.z.array(
1368
- import_zod3.z.object({
1369
- text: import_zod3.z.string(),
1370
- finish_reason: import_zod3.z.string().nullish(),
1371
- index: import_zod3.z.number(),
1372
- logprobs: import_zod3.z.object({
1373
- tokens: import_zod3.z.array(import_zod3.z.string()),
1374
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1375
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1272
+ var openaiCompletionChunkSchema = import_zod4.z.union([
1273
+ import_zod4.z.object({
1274
+ id: import_zod4.z.string().nullish(),
1275
+ created: import_zod4.z.number().nullish(),
1276
+ model: import_zod4.z.string().nullish(),
1277
+ choices: import_zod4.z.array(
1278
+ import_zod4.z.object({
1279
+ text: import_zod4.z.string(),
1280
+ finish_reason: import_zod4.z.string().nullish(),
1281
+ index: import_zod4.z.number(),
1282
+ logprobs: import_zod4.z.object({
1283
+ tokens: import_zod4.z.array(import_zod4.z.string()),
1284
+ token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1285
+ top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1376
1286
  }).nullish()
1377
1287
  })
1378
1288
  ),
1379
- usage: import_zod3.z.object({
1380
- prompt_tokens: import_zod3.z.number(),
1381
- completion_tokens: import_zod3.z.number()
1289
+ usage: import_zod4.z.object({
1290
+ prompt_tokens: import_zod4.z.number(),
1291
+ completion_tokens: import_zod4.z.number()
1382
1292
  }).nullish()
1383
1293
  }),
1384
1294
  openaiErrorDataSchema
@@ -1386,8 +1296,8 @@ var openaiCompletionChunkSchema = import_zod3.z.union([
1386
1296
 
1387
1297
  // src/openai-embedding-model.ts
1388
1298
  var import_provider5 = require("@ai-sdk/provider");
1389
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
1390
- var import_zod4 = require("zod");
1299
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
1300
+ var import_zod5 = require("zod");
1391
1301
  var OpenAIEmbeddingModel = class {
1392
1302
  constructor(modelId, settings, config) {
1393
1303
  this.specificationVersion = "v1";
@@ -1419,12 +1329,12 @@ var OpenAIEmbeddingModel = class {
1419
1329
  values
1420
1330
  });
1421
1331
  }
1422
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1332
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
1423
1333
  url: this.config.url({
1424
1334
  path: "/embeddings",
1425
1335
  modelId: this.modelId
1426
1336
  }),
1427
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), headers),
1337
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1428
1338
  body: {
1429
1339
  model: this.modelId,
1430
1340
  input: values,
@@ -1433,7 +1343,7 @@ var OpenAIEmbeddingModel = class {
1433
1343
  user: this.settings.user
1434
1344
  },
1435
1345
  failedResponseHandler: openaiFailedResponseHandler,
1436
- successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1346
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1437
1347
  openaiTextEmbeddingResponseSchema
1438
1348
  ),
1439
1349
  abortSignal,
@@ -1446,14 +1356,14 @@ var OpenAIEmbeddingModel = class {
1446
1356
  };
1447
1357
  }
1448
1358
  };
1449
- var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1450
- data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1451
- usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1359
+ var openaiTextEmbeddingResponseSchema = import_zod5.z.object({
1360
+ data: import_zod5.z.array(import_zod5.z.object({ embedding: import_zod5.z.array(import_zod5.z.number()) })),
1361
+ usage: import_zod5.z.object({ prompt_tokens: import_zod5.z.number() }).nullish()
1452
1362
  });
1453
1363
 
1454
1364
  // src/openai-image-model.ts
1455
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
1456
- var import_zod5 = require("zod");
1365
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1366
+ var import_zod6 = require("zod");
1457
1367
 
1458
1368
  // src/openai-image-settings.ts
1459
1369
  var modelMaxImagesPerCall = {
@@ -1499,12 +1409,12 @@ var OpenAIImageModel = class {
1499
1409
  warnings.push({ type: "unsupported-setting", setting: "seed" });
1500
1410
  }
1501
1411
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1502
- const { value: response, responseHeaders } = await (0, import_provider_utils5.postJsonToApi)({
1412
+ const { value: response, responseHeaders } = await (0, import_provider_utils6.postJsonToApi)({
1503
1413
  url: this.config.url({
1504
1414
  path: "/images/generations",
1505
1415
  modelId: this.modelId
1506
1416
  }),
1507
- headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1417
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), headers),
1508
1418
  body: {
1509
1419
  model: this.modelId,
1510
1420
  prompt,
@@ -1514,7 +1424,7 @@ var OpenAIImageModel = class {
1514
1424
  response_format: "b64_json"
1515
1425
  },
1516
1426
  failedResponseHandler: openaiFailedResponseHandler,
1517
- successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1427
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1518
1428
  openaiImageResponseSchema
1519
1429
  ),
1520
1430
  abortSignal,
@@ -1531,25 +1441,19 @@ var OpenAIImageModel = class {
1531
1441
  };
1532
1442
  }
1533
1443
  };
1534
- var openaiImageResponseSchema = import_zod5.z.object({
1535
- data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1444
+ var openaiImageResponseSchema = import_zod6.z.object({
1445
+ data: import_zod6.z.array(import_zod6.z.object({ b64_json: import_zod6.z.string() }))
1536
1446
  });
1537
1447
 
1538
1448
  // src/openai-transcription-model.ts
1539
- var import_provider_utils6 = require("@ai-sdk/provider-utils");
1540
- var import_zod6 = require("zod");
1541
- var OpenAIProviderOptionsSchema = import_zod6.z.object({
1542
- include: import_zod6.z.array(import_zod6.z.string()).optional().describe(
1543
- "Additional information to include in the transcription response."
1544
- ),
1545
- language: import_zod6.z.string().optional().describe("The language of the input audio in ISO-639-1 format."),
1546
- prompt: import_zod6.z.string().optional().describe(
1547
- "An optional text to guide the model's style or continue a previous audio segment."
1548
- ),
1549
- temperature: import_zod6.z.number().min(0).max(1).optional().default(0).describe("The sampling temperature, between 0 and 1."),
1550
- timestampGranularities: import_zod6.z.array(import_zod6.z.enum(["word", "segment"])).optional().default(["segment"]).describe(
1551
- "The timestamp granularities to populate for this transcription."
1552
- )
1449
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1450
+ var import_zod7 = require("zod");
1451
+ var OpenAIProviderOptionsSchema = import_zod7.z.object({
1452
+ include: import_zod7.z.array(import_zod7.z.string()).nullish(),
1453
+ language: import_zod7.z.string().nullish(),
1454
+ prompt: import_zod7.z.string().nullish(),
1455
+ temperature: import_zod7.z.number().min(0).max(1).nullish().default(0),
1456
+ timestampGranularities: import_zod7.z.array(import_zod7.z.enum(["word", "segment"])).nullish().default(["segment"])
1553
1457
  });
1554
1458
  var languageMap = {
1555
1459
  afrikaans: "af",
@@ -1624,28 +1528,29 @@ var OpenAITranscriptionModel = class {
1624
1528
  mediaType,
1625
1529
  providerOptions
1626
1530
  }) {
1531
+ var _a, _b, _c, _d, _e;
1627
1532
  const warnings = [];
1628
- const openAIOptions = (0, import_provider_utils6.parseProviderOptions)({
1533
+ const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1629
1534
  provider: "openai",
1630
1535
  providerOptions,
1631
1536
  schema: OpenAIProviderOptionsSchema
1632
1537
  });
1633
1538
  const formData = new FormData();
1634
- const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils6.convertBase64ToUint8Array)(audio)]);
1539
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1635
1540
  formData.append("model", this.modelId);
1636
1541
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1637
1542
  if (openAIOptions) {
1638
1543
  const transcriptionModelOptions = {
1639
- include: openAIOptions.include,
1640
- language: openAIOptions.language,
1641
- prompt: openAIOptions.prompt,
1642
- temperature: openAIOptions.temperature,
1643
- timestamp_granularities: openAIOptions.timestampGranularities
1544
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1545
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1546
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1547
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1548
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1644
1549
  };
1645
1550
  for (const key in transcriptionModelOptions) {
1646
1551
  const value = transcriptionModelOptions[key];
1647
1552
  if (value !== void 0) {
1648
- formData.append(key, value);
1553
+ formData.append(key, String(value));
1649
1554
  }
1650
1555
  }
1651
1556
  }
@@ -1662,15 +1567,15 @@ var OpenAITranscriptionModel = class {
1662
1567
  value: response,
1663
1568
  responseHeaders,
1664
1569
  rawValue: rawResponse
1665
- } = await (0, import_provider_utils6.postFormDataToApi)({
1570
+ } = await (0, import_provider_utils7.postFormDataToApi)({
1666
1571
  url: this.config.url({
1667
1572
  path: "/audio/transcriptions",
1668
1573
  modelId: this.modelId
1669
1574
  }),
1670
- headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1575
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1671
1576
  formData,
1672
1577
  failedResponseHandler: openaiFailedResponseHandler,
1673
- successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1578
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1674
1579
  openaiTranscriptionResponseSchema
1675
1580
  ),
1676
1581
  abortSignal: options.abortSignal,
@@ -1696,22 +1601,22 @@ var OpenAITranscriptionModel = class {
1696
1601
  };
1697
1602
  }
1698
1603
  };
1699
- var openaiTranscriptionResponseSchema = import_zod6.z.object({
1700
- text: import_zod6.z.string(),
1701
- language: import_zod6.z.string().nullish(),
1702
- duration: import_zod6.z.number().nullish(),
1703
- words: import_zod6.z.array(
1704
- import_zod6.z.object({
1705
- word: import_zod6.z.string(),
1706
- start: import_zod6.z.number(),
1707
- end: import_zod6.z.number()
1604
+ var openaiTranscriptionResponseSchema = import_zod7.z.object({
1605
+ text: import_zod7.z.string(),
1606
+ language: import_zod7.z.string().nullish(),
1607
+ duration: import_zod7.z.number().nullish(),
1608
+ words: import_zod7.z.array(
1609
+ import_zod7.z.object({
1610
+ word: import_zod7.z.string(),
1611
+ start: import_zod7.z.number(),
1612
+ end: import_zod7.z.number()
1708
1613
  })
1709
1614
  ).nullish()
1710
1615
  });
1711
1616
 
1712
1617
  // src/responses/openai-responses-language-model.ts
1713
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1714
- var import_zod7 = require("zod");
1618
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1619
+ var import_zod8 = require("zod");
1715
1620
 
1716
1621
  // src/responses/convert-to-openai-responses-messages.ts
1717
1622
  var import_provider6 = require("@ai-sdk/provider");
@@ -1928,7 +1833,7 @@ var OpenAIResponsesLanguageModel = class {
1928
1833
  return this.config.provider;
1929
1834
  }
1930
1835
  getArgs({
1931
- maxTokens,
1836
+ maxOutputTokens,
1932
1837
  temperature,
1933
1838
  stopSequences,
1934
1839
  topP,
@@ -1971,7 +1876,7 @@ var OpenAIResponsesLanguageModel = class {
1971
1876
  systemMessageMode: modelConfig.systemMessageMode
1972
1877
  });
1973
1878
  warnings.push(...messageWarnings);
1974
- const openaiOptions = (0, import_provider_utils7.parseProviderOptions)({
1879
+ const openaiOptions = (0, import_provider_utils8.parseProviderOptions)({
1975
1880
  provider: "openai",
1976
1881
  providerOptions,
1977
1882
  schema: openaiResponsesProviderOptionsSchema
@@ -1982,7 +1887,7 @@ var OpenAIResponsesLanguageModel = class {
1982
1887
  input: messages,
1983
1888
  temperature,
1984
1889
  top_p: topP,
1985
- max_output_tokens: maxTokens,
1890
+ max_output_tokens: maxOutputTokens,
1986
1891
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1987
1892
  text: {
1988
1893
  format: responseFormat.schema != null ? {
@@ -2052,58 +1957,58 @@ var OpenAIResponsesLanguageModel = class {
2052
1957
  responseHeaders,
2053
1958
  value: response,
2054
1959
  rawValue: rawResponse
2055
- } = await (0, import_provider_utils7.postJsonToApi)({
1960
+ } = await (0, import_provider_utils8.postJsonToApi)({
2056
1961
  url: this.config.url({
2057
1962
  path: "/responses",
2058
1963
  modelId: this.modelId
2059
1964
  }),
2060
- headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1965
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2061
1966
  body,
2062
1967
  failedResponseHandler: openaiFailedResponseHandler,
2063
- successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
2064
- import_zod7.z.object({
2065
- id: import_zod7.z.string(),
2066
- created_at: import_zod7.z.number(),
2067
- model: import_zod7.z.string(),
2068
- output: import_zod7.z.array(
2069
- import_zod7.z.discriminatedUnion("type", [
2070
- import_zod7.z.object({
2071
- type: import_zod7.z.literal("message"),
2072
- role: import_zod7.z.literal("assistant"),
2073
- content: import_zod7.z.array(
2074
- import_zod7.z.object({
2075
- type: import_zod7.z.literal("output_text"),
2076
- text: import_zod7.z.string(),
2077
- annotations: import_zod7.z.array(
2078
- import_zod7.z.object({
2079
- type: import_zod7.z.literal("url_citation"),
2080
- start_index: import_zod7.z.number(),
2081
- end_index: import_zod7.z.number(),
2082
- url: import_zod7.z.string(),
2083
- title: import_zod7.z.string()
1968
+ successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1969
+ import_zod8.z.object({
1970
+ id: import_zod8.z.string(),
1971
+ created_at: import_zod8.z.number(),
1972
+ model: import_zod8.z.string(),
1973
+ output: import_zod8.z.array(
1974
+ import_zod8.z.discriminatedUnion("type", [
1975
+ import_zod8.z.object({
1976
+ type: import_zod8.z.literal("message"),
1977
+ role: import_zod8.z.literal("assistant"),
1978
+ content: import_zod8.z.array(
1979
+ import_zod8.z.object({
1980
+ type: import_zod8.z.literal("output_text"),
1981
+ text: import_zod8.z.string(),
1982
+ annotations: import_zod8.z.array(
1983
+ import_zod8.z.object({
1984
+ type: import_zod8.z.literal("url_citation"),
1985
+ start_index: import_zod8.z.number(),
1986
+ end_index: import_zod8.z.number(),
1987
+ url: import_zod8.z.string(),
1988
+ title: import_zod8.z.string()
2084
1989
  })
2085
1990
  )
2086
1991
  })
2087
1992
  )
2088
1993
  }),
2089
- import_zod7.z.object({
2090
- type: import_zod7.z.literal("function_call"),
2091
- call_id: import_zod7.z.string(),
2092
- name: import_zod7.z.string(),
2093
- arguments: import_zod7.z.string()
1994
+ import_zod8.z.object({
1995
+ type: import_zod8.z.literal("function_call"),
1996
+ call_id: import_zod8.z.string(),
1997
+ name: import_zod8.z.string(),
1998
+ arguments: import_zod8.z.string()
2094
1999
  }),
2095
- import_zod7.z.object({
2096
- type: import_zod7.z.literal("web_search_call")
2000
+ import_zod8.z.object({
2001
+ type: import_zod8.z.literal("web_search_call")
2097
2002
  }),
2098
- import_zod7.z.object({
2099
- type: import_zod7.z.literal("computer_call")
2003
+ import_zod8.z.object({
2004
+ type: import_zod8.z.literal("computer_call")
2100
2005
  }),
2101
- import_zod7.z.object({
2102
- type: import_zod7.z.literal("reasoning")
2006
+ import_zod8.z.object({
2007
+ type: import_zod8.z.literal("reasoning")
2103
2008
  })
2104
2009
  ])
2105
2010
  ),
2106
- incomplete_details: import_zod7.z.object({ reason: import_zod7.z.string() }).nullable(),
2011
+ incomplete_details: import_zod8.z.object({ reason: import_zod8.z.string() }).nullable(),
2107
2012
  usage: usageSchema
2108
2013
  })
2109
2014
  ),
@@ -2124,7 +2029,7 @@ var OpenAIResponsesLanguageModel = class {
2124
2029
  var _a2, _b2, _c2;
2125
2030
  return {
2126
2031
  sourceType: "url",
2127
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils7.generateId)(),
2032
+ id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils8.generateId)(),
2128
2033
  url: annotation.url,
2129
2034
  title: annotation.title
2130
2035
  };
@@ -2136,16 +2041,10 @@ var OpenAIResponsesLanguageModel = class {
2136
2041
  }),
2137
2042
  toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2138
2043
  usage: {
2139
- promptTokens: response.usage.input_tokens,
2140
- completionTokens: response.usage.output_tokens
2141
- },
2142
- rawCall: {
2143
- rawPrompt: void 0,
2144
- rawSettings: {}
2145
- },
2146
- request: {
2147
- body: JSON.stringify(body)
2044
+ inputTokens: response.usage.input_tokens,
2045
+ outputTokens: response.usage.output_tokens
2148
2046
  },
2047
+ request: { body },
2149
2048
  response: {
2150
2049
  id: response.id,
2151
2050
  timestamp: new Date(response.created_at * 1e3),
@@ -2165,18 +2064,18 @@ var OpenAIResponsesLanguageModel = class {
2165
2064
  }
2166
2065
  async doStream(options) {
2167
2066
  const { args: body, warnings } = this.getArgs(options);
2168
- const { responseHeaders, value: response } = await (0, import_provider_utils7.postJsonToApi)({
2067
+ const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
2169
2068
  url: this.config.url({
2170
2069
  path: "/responses",
2171
2070
  modelId: this.modelId
2172
2071
  }),
2173
- headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
2072
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2174
2073
  body: {
2175
2074
  ...body,
2176
2075
  stream: true
2177
2076
  },
2178
2077
  failedResponseHandler: openaiFailedResponseHandler,
2179
- successfulResponseHandler: (0, import_provider_utils7.createEventSourceResponseHandler)(
2078
+ successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
2180
2079
  openaiResponsesChunkSchema
2181
2080
  ),
2182
2081
  abortSignal: options.abortSignal,
@@ -2184,8 +2083,10 @@ var OpenAIResponsesLanguageModel = class {
2184
2083
  });
2185
2084
  const self = this;
2186
2085
  let finishReason = "unknown";
2187
- let promptTokens = NaN;
2188
- let completionTokens = NaN;
2086
+ const usage = {
2087
+ inputTokens: void 0,
2088
+ outputTokens: void 0
2089
+ };
2189
2090
  let cachedPromptTokens = null;
2190
2091
  let reasoningTokens = null;
2191
2092
  let responseId = null;
@@ -2255,8 +2156,8 @@ var OpenAIResponsesLanguageModel = class {
2255
2156
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2256
2157
  hasToolCalls
2257
2158
  });
2258
- promptTokens = value.response.usage.input_tokens;
2259
- completionTokens = value.response.usage.output_tokens;
2159
+ usage.inputTokens = value.response.usage.input_tokens;
2160
+ usage.outputTokens = value.response.usage.output_tokens;
2260
2161
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2261
2162
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2262
2163
  } else if (isResponseAnnotationAddedChunk(value)) {
@@ -2264,7 +2165,7 @@ var OpenAIResponsesLanguageModel = class {
2264
2165
  type: "source",
2265
2166
  source: {
2266
2167
  sourceType: "url",
2267
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils7.generateId)(),
2168
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2268
2169
  url: value.annotation.url,
2269
2170
  title: value.annotation.title
2270
2171
  }
@@ -2275,7 +2176,7 @@ var OpenAIResponsesLanguageModel = class {
2275
2176
  controller.enqueue({
2276
2177
  type: "finish",
2277
2178
  finishReason,
2278
- usage: { promptTokens, completionTokens },
2179
+ usage,
2279
2180
  ...(cachedPromptTokens != null || reasoningTokens != null) && {
2280
2181
  providerMetadata: {
2281
2182
  openai: {
@@ -2289,89 +2190,85 @@ var OpenAIResponsesLanguageModel = class {
2289
2190
  }
2290
2191
  })
2291
2192
  ),
2292
- rawCall: {
2293
- rawPrompt: void 0,
2294
- rawSettings: {}
2295
- },
2296
- request: { body: JSON.stringify(body) },
2193
+ request: { body },
2297
2194
  response: { headers: responseHeaders },
2298
2195
  warnings
2299
2196
  };
2300
2197
  }
2301
2198
  };
2302
- var usageSchema = import_zod7.z.object({
2303
- input_tokens: import_zod7.z.number(),
2304
- input_tokens_details: import_zod7.z.object({ cached_tokens: import_zod7.z.number().nullish() }).nullish(),
2305
- output_tokens: import_zod7.z.number(),
2306
- output_tokens_details: import_zod7.z.object({ reasoning_tokens: import_zod7.z.number().nullish() }).nullish()
2199
+ var usageSchema = import_zod8.z.object({
2200
+ input_tokens: import_zod8.z.number(),
2201
+ input_tokens_details: import_zod8.z.object({ cached_tokens: import_zod8.z.number().nullish() }).nullish(),
2202
+ output_tokens: import_zod8.z.number(),
2203
+ output_tokens_details: import_zod8.z.object({ reasoning_tokens: import_zod8.z.number().nullish() }).nullish()
2307
2204
  });
2308
- var textDeltaChunkSchema = import_zod7.z.object({
2309
- type: import_zod7.z.literal("response.output_text.delta"),
2310
- delta: import_zod7.z.string()
2205
+ var textDeltaChunkSchema = import_zod8.z.object({
2206
+ type: import_zod8.z.literal("response.output_text.delta"),
2207
+ delta: import_zod8.z.string()
2311
2208
  });
2312
- var responseFinishedChunkSchema = import_zod7.z.object({
2313
- type: import_zod7.z.enum(["response.completed", "response.incomplete"]),
2314
- response: import_zod7.z.object({
2315
- incomplete_details: import_zod7.z.object({ reason: import_zod7.z.string() }).nullish(),
2209
+ var responseFinishedChunkSchema = import_zod8.z.object({
2210
+ type: import_zod8.z.enum(["response.completed", "response.incomplete"]),
2211
+ response: import_zod8.z.object({
2212
+ incomplete_details: import_zod8.z.object({ reason: import_zod8.z.string() }).nullish(),
2316
2213
  usage: usageSchema
2317
2214
  })
2318
2215
  });
2319
- var responseCreatedChunkSchema = import_zod7.z.object({
2320
- type: import_zod7.z.literal("response.created"),
2321
- response: import_zod7.z.object({
2322
- id: import_zod7.z.string(),
2323
- created_at: import_zod7.z.number(),
2324
- model: import_zod7.z.string()
2216
+ var responseCreatedChunkSchema = import_zod8.z.object({
2217
+ type: import_zod8.z.literal("response.created"),
2218
+ response: import_zod8.z.object({
2219
+ id: import_zod8.z.string(),
2220
+ created_at: import_zod8.z.number(),
2221
+ model: import_zod8.z.string()
2325
2222
  })
2326
2223
  });
2327
- var responseOutputItemDoneSchema = import_zod7.z.object({
2328
- type: import_zod7.z.literal("response.output_item.done"),
2329
- output_index: import_zod7.z.number(),
2330
- item: import_zod7.z.discriminatedUnion("type", [
2331
- import_zod7.z.object({
2332
- type: import_zod7.z.literal("message")
2224
+ var responseOutputItemDoneSchema = import_zod8.z.object({
2225
+ type: import_zod8.z.literal("response.output_item.done"),
2226
+ output_index: import_zod8.z.number(),
2227
+ item: import_zod8.z.discriminatedUnion("type", [
2228
+ import_zod8.z.object({
2229
+ type: import_zod8.z.literal("message")
2333
2230
  }),
2334
- import_zod7.z.object({
2335
- type: import_zod7.z.literal("function_call"),
2336
- id: import_zod7.z.string(),
2337
- call_id: import_zod7.z.string(),
2338
- name: import_zod7.z.string(),
2339
- arguments: import_zod7.z.string(),
2340
- status: import_zod7.z.literal("completed")
2231
+ import_zod8.z.object({
2232
+ type: import_zod8.z.literal("function_call"),
2233
+ id: import_zod8.z.string(),
2234
+ call_id: import_zod8.z.string(),
2235
+ name: import_zod8.z.string(),
2236
+ arguments: import_zod8.z.string(),
2237
+ status: import_zod8.z.literal("completed")
2341
2238
  })
2342
2239
  ])
2343
2240
  });
2344
- var responseFunctionCallArgumentsDeltaSchema = import_zod7.z.object({
2345
- type: import_zod7.z.literal("response.function_call_arguments.delta"),
2346
- item_id: import_zod7.z.string(),
2347
- output_index: import_zod7.z.number(),
2348
- delta: import_zod7.z.string()
2241
+ var responseFunctionCallArgumentsDeltaSchema = import_zod8.z.object({
2242
+ type: import_zod8.z.literal("response.function_call_arguments.delta"),
2243
+ item_id: import_zod8.z.string(),
2244
+ output_index: import_zod8.z.number(),
2245
+ delta: import_zod8.z.string()
2349
2246
  });
2350
- var responseOutputItemAddedSchema = import_zod7.z.object({
2351
- type: import_zod7.z.literal("response.output_item.added"),
2352
- output_index: import_zod7.z.number(),
2353
- item: import_zod7.z.discriminatedUnion("type", [
2354
- import_zod7.z.object({
2355
- type: import_zod7.z.literal("message")
2247
+ var responseOutputItemAddedSchema = import_zod8.z.object({
2248
+ type: import_zod8.z.literal("response.output_item.added"),
2249
+ output_index: import_zod8.z.number(),
2250
+ item: import_zod8.z.discriminatedUnion("type", [
2251
+ import_zod8.z.object({
2252
+ type: import_zod8.z.literal("message")
2356
2253
  }),
2357
- import_zod7.z.object({
2358
- type: import_zod7.z.literal("function_call"),
2359
- id: import_zod7.z.string(),
2360
- call_id: import_zod7.z.string(),
2361
- name: import_zod7.z.string(),
2362
- arguments: import_zod7.z.string()
2254
+ import_zod8.z.object({
2255
+ type: import_zod8.z.literal("function_call"),
2256
+ id: import_zod8.z.string(),
2257
+ call_id: import_zod8.z.string(),
2258
+ name: import_zod8.z.string(),
2259
+ arguments: import_zod8.z.string()
2363
2260
  })
2364
2261
  ])
2365
2262
  });
2366
- var responseAnnotationAddedSchema = import_zod7.z.object({
2367
- type: import_zod7.z.literal("response.output_text.annotation.added"),
2368
- annotation: import_zod7.z.object({
2369
- type: import_zod7.z.literal("url_citation"),
2370
- url: import_zod7.z.string(),
2371
- title: import_zod7.z.string()
2263
+ var responseAnnotationAddedSchema = import_zod8.z.object({
2264
+ type: import_zod8.z.literal("response.output_text.annotation.added"),
2265
+ annotation: import_zod8.z.object({
2266
+ type: import_zod8.z.literal("url_citation"),
2267
+ url: import_zod8.z.string(),
2268
+ title: import_zod8.z.string()
2372
2269
  })
2373
2270
  });
2374
- var openaiResponsesChunkSchema = import_zod7.z.union([
2271
+ var openaiResponsesChunkSchema = import_zod8.z.union([
2375
2272
  textDeltaChunkSchema,
2376
2273
  responseFinishedChunkSchema,
2377
2274
  responseCreatedChunkSchema,
@@ -2379,7 +2276,7 @@ var openaiResponsesChunkSchema = import_zod7.z.union([
2379
2276
  responseFunctionCallArgumentsDeltaSchema,
2380
2277
  responseOutputItemAddedSchema,
2381
2278
  responseAnnotationAddedSchema,
2382
- import_zod7.z.object({ type: import_zod7.z.string() }).passthrough()
2279
+ import_zod8.z.object({ type: import_zod8.z.string() }).passthrough()
2383
2280
  // fallback for unknown chunks
2384
2281
  ]);
2385
2282
  function isTextDeltaChunk(chunk) {
@@ -2424,15 +2321,15 @@ function getResponsesModelConfig(modelId) {
2424
2321
  requiredAutoTruncation: false
2425
2322
  };
2426
2323
  }
2427
- var openaiResponsesProviderOptionsSchema = import_zod7.z.object({
2428
- metadata: import_zod7.z.any().nullish(),
2429
- parallelToolCalls: import_zod7.z.boolean().nullish(),
2430
- previousResponseId: import_zod7.z.string().nullish(),
2431
- store: import_zod7.z.boolean().nullish(),
2432
- user: import_zod7.z.string().nullish(),
2433
- reasoningEffort: import_zod7.z.string().nullish(),
2434
- strictSchemas: import_zod7.z.boolean().nullish(),
2435
- instructions: import_zod7.z.string().nullish()
2324
+ var openaiResponsesProviderOptionsSchema = import_zod8.z.object({
2325
+ metadata: import_zod8.z.any().nullish(),
2326
+ parallelToolCalls: import_zod8.z.boolean().nullish(),
2327
+ previousResponseId: import_zod8.z.string().nullish(),
2328
+ store: import_zod8.z.boolean().nullish(),
2329
+ user: import_zod8.z.string().nullish(),
2330
+ reasoningEffort: import_zod8.z.string().nullish(),
2331
+ strictSchemas: import_zod8.z.boolean().nullish(),
2332
+ instructions: import_zod8.z.string().nullish()
2436
2333
  });
2437
2334
  // Annotate the CommonJS export names for ESM import in node:
2438
2335
  0 && (module.exports = {
@@ -2442,6 +2339,7 @@ var openaiResponsesProviderOptionsSchema = import_zod7.z.object({
2442
2339
  OpenAIImageModel,
2443
2340
  OpenAIResponsesLanguageModel,
2444
2341
  OpenAITranscriptionModel,
2445
- modelMaxImagesPerCall
2342
+ modelMaxImagesPerCall,
2343
+ openaiProviderOptions
2446
2344
  });
2447
2345
  //# sourceMappingURL=index.js.map