@ai-sdk/openai 2.0.0-canary.4 → 2.0.0-canary.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -26,18 +26,18 @@ __export(src_exports, {
26
26
  module.exports = __toCommonJS(src_exports);
27
27
 
28
28
  // src/openai-provider.ts
29
- var import_provider_utils8 = require("@ai-sdk/provider-utils");
29
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
30
30
 
31
31
  // src/openai-chat-language-model.ts
32
32
  var import_provider3 = require("@ai-sdk/provider");
33
- var import_provider_utils2 = require("@ai-sdk/provider-utils");
34
- var import_zod2 = require("zod");
33
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
34
+ var import_zod3 = require("zod");
35
35
 
36
36
  // src/convert-to-openai-chat-messages.ts
37
37
  var import_provider = require("@ai-sdk/provider");
38
+ var import_provider_utils = require("@ai-sdk/provider-utils");
38
39
  function convertToOpenAIChatMessages({
39
40
  prompt,
40
- useLegacyFunctionCalling = false,
41
41
  systemMessageMode = "system"
42
42
  }) {
43
43
  const messages = [];
@@ -89,7 +89,7 @@ function convertToOpenAIChatMessages({
89
89
  return {
90
90
  type: "image_url",
91
91
  image_url: {
92
- url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
92
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`,
93
93
  // OpenAI specific extension: image detail
94
94
  detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
95
95
  }
@@ -104,14 +104,20 @@ function convertToOpenAIChatMessages({
104
104
  case "audio/wav": {
105
105
  return {
106
106
  type: "input_audio",
107
- input_audio: { data: part.data, format: "wav" }
107
+ input_audio: {
108
+ data: (0, import_provider_utils.convertToBase64)(part.data),
109
+ format: "wav"
110
+ }
108
111
  };
109
112
  }
110
113
  case "audio/mp3":
111
114
  case "audio/mpeg": {
112
115
  return {
113
116
  type: "input_audio",
114
- input_audio: { data: part.data, format: "mp3" }
117
+ input_audio: {
118
+ data: (0, import_provider_utils.convertToBase64)(part.data),
119
+ format: "mp3"
120
+ }
115
121
  };
116
122
  }
117
123
  default: {
@@ -166,41 +172,20 @@ function convertToOpenAIChatMessages({
166
172
  }
167
173
  }
168
174
  }
169
- if (useLegacyFunctionCalling) {
170
- if (toolCalls.length > 1) {
171
- throw new import_provider.UnsupportedFunctionalityError({
172
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
173
- });
174
- }
175
- messages.push({
176
- role: "assistant",
177
- content: text,
178
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
179
- });
180
- } else {
181
- messages.push({
182
- role: "assistant",
183
- content: text,
184
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
185
- });
186
- }
175
+ messages.push({
176
+ role: "assistant",
177
+ content: text,
178
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
179
+ });
187
180
  break;
188
181
  }
189
182
  case "tool": {
190
183
  for (const toolResponse of content) {
191
- if (useLegacyFunctionCalling) {
192
- messages.push({
193
- role: "function",
194
- name: toolResponse.toolName,
195
- content: JSON.stringify(toolResponse.result)
196
- });
197
- } else {
198
- messages.push({
199
- role: "tool",
200
- tool_call_id: toolResponse.toolCallId,
201
- content: JSON.stringify(toolResponse.result)
202
- });
203
- }
184
+ messages.push({
185
+ role: "tool",
186
+ tool_call_id: toolResponse.toolCallId,
187
+ content: JSON.stringify(toolResponse.result)
188
+ });
204
189
  }
205
190
  break;
206
191
  }
@@ -243,21 +228,72 @@ function mapOpenAIFinishReason(finishReason) {
243
228
  }
244
229
  }
245
230
 
246
- // src/openai-error.ts
231
+ // src/openai-chat-options.ts
247
232
  var import_zod = require("zod");
248
- var import_provider_utils = require("@ai-sdk/provider-utils");
249
- var openaiErrorDataSchema = import_zod.z.object({
250
- error: import_zod.z.object({
251
- message: import_zod.z.string(),
233
+ var openaiProviderOptions = import_zod.z.object({
234
+ /**
235
+ * Modify the likelihood of specified tokens appearing in the completion.
236
+ *
237
+ * Accepts a JSON object that maps tokens (specified by their token ID in
238
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
239
+ */
240
+ logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
241
+ /**
242
+ * Return the log probabilities of the tokens.
243
+ *
244
+ * Setting to true will return the log probabilities of the tokens that
245
+ * were generated.
246
+ *
247
+ * Setting to a number will return the log probabilities of the top n
248
+ * tokens that were generated.
249
+ */
250
+ logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
251
+ /**
252
+ * Whether to enable parallel function calling during tool use. Default to true.
253
+ */
254
+ parallelToolCalls: import_zod.z.boolean().optional(),
255
+ /**
256
+ * A unique identifier representing your end-user, which can help OpenAI to
257
+ * monitor and detect abuse.
258
+ */
259
+ user: import_zod.z.string().optional(),
260
+ /**
261
+ * Reasoning effort for reasoning models. Defaults to `medium`.
262
+ */
263
+ reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
264
+ /**
265
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
266
+ */
267
+ maxCompletionTokens: import_zod.z.number().optional(),
268
+ /**
269
+ * Whether to enable persistence in responses API.
270
+ */
271
+ store: import_zod.z.boolean().optional(),
272
+ /**
273
+ * Metadata to associate with the request.
274
+ */
275
+ metadata: import_zod.z.record(import_zod.z.string()).optional(),
276
+ /**
277
+ * Parameters for prediction mode.
278
+ */
279
+ prediction: import_zod.z.record(import_zod.z.any()).optional()
280
+ });
281
+
282
+ // src/openai-error.ts
283
+ var import_zod2 = require("zod");
284
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
285
+ var openaiErrorDataSchema = import_zod2.z.object({
286
+ error: import_zod2.z.object({
287
+ message: import_zod2.z.string(),
252
288
  // The additional information below is handled loosely to support
253
289
  // OpenAI-compatible providers that have slightly different error
254
290
  // responses:
255
- type: import_zod.z.string().nullish(),
256
- param: import_zod.z.any().nullish(),
257
- code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
291
+ type: import_zod2.z.string().nullish(),
292
+ param: import_zod2.z.any().nullish(),
293
+ code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
258
294
  })
259
295
  });
260
- var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
296
+ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
261
297
  errorSchema: openaiErrorDataSchema,
262
298
  errorToMessage: (data) => data.error.message
263
299
  });
@@ -280,7 +316,6 @@ var import_provider2 = require("@ai-sdk/provider");
280
316
  function prepareTools({
281
317
  tools,
282
318
  toolChoice,
283
- useLegacyFunctionCalling = false,
284
319
  structuredOutputs
285
320
  }) {
286
321
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
@@ -288,48 +323,6 @@ function prepareTools({
288
323
  if (tools == null) {
289
324
  return { tools: void 0, toolChoice: void 0, toolWarnings };
290
325
  }
291
- if (useLegacyFunctionCalling) {
292
- const openaiFunctions = [];
293
- for (const tool of tools) {
294
- if (tool.type === "provider-defined") {
295
- toolWarnings.push({ type: "unsupported-tool", tool });
296
- } else {
297
- openaiFunctions.push({
298
- name: tool.name,
299
- description: tool.description,
300
- parameters: tool.parameters
301
- });
302
- }
303
- }
304
- if (toolChoice == null) {
305
- return {
306
- functions: openaiFunctions,
307
- function_call: void 0,
308
- toolWarnings
309
- };
310
- }
311
- const type2 = toolChoice.type;
312
- switch (type2) {
313
- case "auto":
314
- case "none":
315
- case void 0:
316
- return {
317
- functions: openaiFunctions,
318
- function_call: void 0,
319
- toolWarnings
320
- };
321
- case "required":
322
- throw new import_provider2.UnsupportedFunctionalityError({
323
- functionality: "useLegacyFunctionCalling and toolChoice: required"
324
- });
325
- default:
326
- return {
327
- functions: openaiFunctions,
328
- function_call: { name: toolChoice.toolName },
329
- toolWarnings
330
- };
331
- }
332
- }
333
326
  const openaiTools2 = [];
334
327
  for (const tool of tools) {
335
328
  if (tool.type === "provider-defined") {
@@ -401,7 +394,7 @@ var OpenAIChatLanguageModel = class {
401
394
  }
402
395
  getArgs({
403
396
  prompt,
404
- maxTokens,
397
+ maxOutputTokens,
405
398
  temperature,
406
399
  topP,
407
400
  topK,
@@ -414,8 +407,13 @@ var OpenAIChatLanguageModel = class {
414
407
  toolChoice,
415
408
  providerOptions
416
409
  }) {
417
- var _a, _b, _c, _d, _e, _f, _g;
410
+ var _a, _b;
418
411
  const warnings = [];
412
+ const openaiOptions = (_a = (0, import_provider_utils3.parseProviderOptions)({
413
+ provider: "openai",
414
+ providerOptions,
415
+ schema: openaiProviderOptions
416
+ })) != null ? _a : {};
419
417
  if (topK != null) {
420
418
  warnings.push({
421
419
  type: "unsupported-setting",
@@ -429,21 +427,9 @@ var OpenAIChatLanguageModel = class {
429
427
  details: "JSON response format schema is only supported with structuredOutputs"
430
428
  });
431
429
  }
432
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
433
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
434
- throw new import_provider3.UnsupportedFunctionalityError({
435
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
436
- });
437
- }
438
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
439
- throw new import_provider3.UnsupportedFunctionalityError({
440
- functionality: "structuredOutputs with useLegacyFunctionCalling"
441
- });
442
- }
443
430
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
444
431
  {
445
432
  prompt,
446
- useLegacyFunctionCalling,
447
433
  systemMessageMode: getSystemMessageMode(this.modelId)
448
434
  }
449
435
  );
@@ -452,13 +438,13 @@ var OpenAIChatLanguageModel = class {
452
438
  // model id:
453
439
  model: this.modelId,
454
440
  // model specific settings:
455
- logit_bias: this.settings.logitBias,
456
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
457
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
458
- user: this.settings.user,
459
- parallel_tool_calls: this.settings.parallelToolCalls,
441
+ logit_bias: openaiOptions.logitBias,
442
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
443
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
444
+ user: openaiOptions.user,
445
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
460
446
  // standardized settings:
461
- max_tokens: maxTokens,
447
+ max_tokens: maxOutputTokens,
462
448
  temperature,
463
449
  top_p: topP,
464
450
  frequency_penalty: frequencyPenalty,
@@ -469,19 +455,19 @@ var OpenAIChatLanguageModel = class {
469
455
  json_schema: {
470
456
  schema: responseFormat.schema,
471
457
  strict: true,
472
- name: (_a = responseFormat.name) != null ? _a : "response",
458
+ name: (_b = responseFormat.name) != null ? _b : "response",
473
459
  description: responseFormat.description
474
460
  }
475
461
  } : { type: "json_object" } : void 0,
476
462
  stop: stopSequences,
477
463
  seed,
478
464
  // openai specific settings:
479
- // TODO remove in next major version; we auto-map maxTokens now
480
- max_completion_tokens: (_b = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _b.maxCompletionTokens,
481
- store: (_c = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _c.store,
482
- metadata: (_d = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _d.metadata,
483
- prediction: (_e = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _e.prediction,
484
- reasoning_effort: (_g = (_f = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
465
+ // TODO remove in next major version; we auto-map maxOutputTokens now
466
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
467
+ store: openaiOptions.store,
468
+ metadata: openaiOptions.metadata,
469
+ prediction: openaiOptions.prediction,
470
+ reasoning_effort: openaiOptions.reasoningEffort,
485
471
  // messages:
486
472
  messages
487
473
  };
@@ -545,26 +531,30 @@ var OpenAIChatLanguageModel = class {
545
531
  }
546
532
  baseArgs.max_tokens = void 0;
547
533
  }
534
+ } else if (this.modelId.startsWith("gpt-4o-search-preview")) {
535
+ if (baseArgs.temperature != null) {
536
+ baseArgs.temperature = void 0;
537
+ warnings.push({
538
+ type: "unsupported-setting",
539
+ setting: "temperature",
540
+ details: "temperature is not supported for the gpt-4o-search-preview model and has been removed."
541
+ });
542
+ }
548
543
  }
549
544
  const {
550
545
  tools: openaiTools2,
551
546
  toolChoice: openaiToolChoice,
552
- functions,
553
- function_call,
554
547
  toolWarnings
555
548
  } = prepareTools({
556
549
  tools,
557
550
  toolChoice,
558
- useLegacyFunctionCalling,
559
551
  structuredOutputs: this.supportsStructuredOutputs
560
552
  });
561
553
  return {
562
554
  args: {
563
555
  ...baseArgs,
564
556
  tools: openaiTools2,
565
- tool_choice: openaiToolChoice,
566
- functions,
567
- function_call
557
+ tool_choice: openaiToolChoice
568
558
  },
569
559
  warnings: [...warnings, ...toolWarnings]
570
560
  };
@@ -576,15 +566,15 @@ var OpenAIChatLanguageModel = class {
576
566
  responseHeaders,
577
567
  value: response,
578
568
  rawValue: rawResponse
579
- } = await (0, import_provider_utils2.postJsonToApi)({
569
+ } = await (0, import_provider_utils3.postJsonToApi)({
580
570
  url: this.config.url({
581
571
  path: "/chat/completions",
582
572
  modelId: this.modelId
583
573
  }),
584
- headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
574
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
585
575
  body,
586
576
  failedResponseHandler: openaiFailedResponseHandler,
587
- successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
577
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
588
578
  openaiChatResponseSchema
589
579
  ),
590
580
  abortSignal: options.abortSignal,
@@ -609,29 +599,21 @@ var OpenAIChatLanguageModel = class {
609
599
  }
610
600
  return {
611
601
  text: (_c = choice.message.content) != null ? _c : void 0,
612
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
613
- {
614
- toolCallType: "function",
615
- toolCallId: (0, import_provider_utils2.generateId)(),
616
- toolName: choice.message.function_call.name,
617
- args: choice.message.function_call.arguments
618
- }
619
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
602
+ toolCalls: (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
620
603
  var _a2;
621
604
  return {
622
605
  toolCallType: "function",
623
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils2.generateId)(),
606
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
624
607
  toolName: toolCall.function.name,
625
608
  args: toolCall.function.arguments
626
609
  };
627
610
  }),
628
611
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
629
612
  usage: {
630
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
631
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
613
+ inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
614
+ outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
632
615
  },
633
- rawCall: { rawPrompt, rawSettings },
634
- request: { body: JSON.stringify(body) },
616
+ request: { body },
635
617
  response: {
636
618
  ...getResponseMetadata(response),
637
619
  headers: responseHeaders,
@@ -643,49 +625,6 @@ var OpenAIChatLanguageModel = class {
643
625
  };
644
626
  }
645
627
  async doStream(options) {
646
- if (this.settings.simulateStreaming) {
647
- const result = await this.doGenerate(options);
648
- const simulatedStream = new ReadableStream({
649
- start(controller) {
650
- controller.enqueue({ type: "response-metadata", ...result.response });
651
- if (result.text) {
652
- controller.enqueue({
653
- type: "text-delta",
654
- textDelta: result.text
655
- });
656
- }
657
- if (result.toolCalls) {
658
- for (const toolCall of result.toolCalls) {
659
- controller.enqueue({
660
- type: "tool-call-delta",
661
- toolCallType: "function",
662
- toolCallId: toolCall.toolCallId,
663
- toolName: toolCall.toolName,
664
- argsTextDelta: toolCall.args
665
- });
666
- controller.enqueue({
667
- type: "tool-call",
668
- ...toolCall
669
- });
670
- }
671
- }
672
- controller.enqueue({
673
- type: "finish",
674
- finishReason: result.finishReason,
675
- usage: result.usage,
676
- logprobs: result.logprobs,
677
- providerMetadata: result.providerMetadata
678
- });
679
- controller.close();
680
- }
681
- });
682
- return {
683
- stream: simulatedStream,
684
- rawCall: result.rawCall,
685
- response: result.response,
686
- warnings: result.warnings
687
- };
688
- }
689
628
  const { args, warnings } = this.getArgs(options);
690
629
  const body = {
691
630
  ...args,
@@ -693,15 +632,15 @@ var OpenAIChatLanguageModel = class {
693
632
  // only include stream_options when in strict compatibility mode:
694
633
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
695
634
  };
696
- const { responseHeaders, value: response } = await (0, import_provider_utils2.postJsonToApi)({
635
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
697
636
  url: this.config.url({
698
637
  path: "/chat/completions",
699
638
  modelId: this.modelId
700
639
  }),
701
- headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
640
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
702
641
  body,
703
642
  failedResponseHandler: openaiFailedResponseHandler,
704
- successfulResponseHandler: (0, import_provider_utils2.createEventSourceResponseHandler)(
643
+ successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
705
644
  openaiChatChunkSchema
706
645
  ),
707
646
  abortSignal: options.abortSignal,
@@ -710,13 +649,12 @@ var OpenAIChatLanguageModel = class {
710
649
  const { messages: rawPrompt, ...rawSettings } = args;
711
650
  const toolCalls = [];
712
651
  let finishReason = "unknown";
713
- let usage = {
714
- promptTokens: void 0,
715
- completionTokens: void 0
652
+ const usage = {
653
+ inputTokens: void 0,
654
+ outputTokens: void 0
716
655
  };
717
656
  let logprobs;
718
657
  let isFirstChunk = true;
719
- const { useLegacyFunctionCalling } = this.settings;
720
658
  const providerMetadata = { openai: {} };
721
659
  return {
722
660
  stream: response.pipeThrough(
@@ -748,10 +686,8 @@ var OpenAIChatLanguageModel = class {
748
686
  prompt_tokens_details,
749
687
  completion_tokens_details
750
688
  } = value.usage;
751
- usage = {
752
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
753
- completionTokens: completion_tokens != null ? completion_tokens : void 0
754
- };
689
+ usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
690
+ usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
755
691
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
756
692
  providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
757
693
  }
@@ -786,16 +722,8 @@ var OpenAIChatLanguageModel = class {
786
722
  if (logprobs === void 0) logprobs = [];
787
723
  logprobs.push(...mappedLogprobs);
788
724
  }
789
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
790
- {
791
- type: "function",
792
- id: (0, import_provider_utils2.generateId)(),
793
- function: delta.function_call,
794
- index: 0
795
- }
796
- ] : delta.tool_calls;
797
- if (mappedToolCalls != null) {
798
- for (const toolCallDelta of mappedToolCalls) {
725
+ if (delta.tool_calls != null) {
726
+ for (const toolCallDelta of delta.tool_calls) {
799
727
  const index = toolCallDelta.index;
800
728
  if (toolCalls[index] == null) {
801
729
  if (toolCallDelta.type !== "function") {
@@ -836,11 +764,11 @@ var OpenAIChatLanguageModel = class {
836
764
  argsTextDelta: toolCall2.function.arguments
837
765
  });
838
766
  }
839
- if ((0, import_provider_utils2.isParsableJson)(toolCall2.function.arguments)) {
767
+ if ((0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
840
768
  controller.enqueue({
841
769
  type: "tool-call",
842
770
  toolCallType: "function",
843
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils2.generateId)(),
771
+ toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
844
772
  toolName: toolCall2.function.name,
845
773
  args: toolCall2.function.arguments
846
774
  });
@@ -863,11 +791,11 @@ var OpenAIChatLanguageModel = class {
863
791
  toolName: toolCall.function.name,
864
792
  argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
865
793
  });
866
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils2.isParsableJson)(toolCall.function.arguments)) {
794
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
867
795
  controller.enqueue({
868
796
  type: "tool-call",
869
797
  toolCallType: "function",
870
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils2.generateId)(),
798
+ toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
871
799
  toolName: toolCall.function.name,
872
800
  args: toolCall.function.arguments
873
801
  });
@@ -877,125 +805,112 @@ var OpenAIChatLanguageModel = class {
877
805
  }
878
806
  },
879
807
  flush(controller) {
880
- var _a, _b;
881
808
  controller.enqueue({
882
809
  type: "finish",
883
810
  finishReason,
884
811
  logprobs,
885
- usage: {
886
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
887
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
888
- },
812
+ usage,
889
813
  ...providerMetadata != null ? { providerMetadata } : {}
890
814
  });
891
815
  }
892
816
  })
893
817
  ),
894
- rawCall: { rawPrompt, rawSettings },
818
+ request: { body },
895
819
  response: { headers: responseHeaders },
896
- request: { body: JSON.stringify(body) },
897
820
  warnings
898
821
  };
899
822
  }
900
823
  };
901
- var openaiTokenUsageSchema = import_zod2.z.object({
902
- prompt_tokens: import_zod2.z.number().nullish(),
903
- completion_tokens: import_zod2.z.number().nullish(),
904
- prompt_tokens_details: import_zod2.z.object({
905
- cached_tokens: import_zod2.z.number().nullish()
824
+ var openaiTokenUsageSchema = import_zod3.z.object({
825
+ prompt_tokens: import_zod3.z.number().nullish(),
826
+ completion_tokens: import_zod3.z.number().nullish(),
827
+ prompt_tokens_details: import_zod3.z.object({
828
+ cached_tokens: import_zod3.z.number().nullish()
906
829
  }).nullish(),
907
- completion_tokens_details: import_zod2.z.object({
908
- reasoning_tokens: import_zod2.z.number().nullish(),
909
- accepted_prediction_tokens: import_zod2.z.number().nullish(),
910
- rejected_prediction_tokens: import_zod2.z.number().nullish()
830
+ completion_tokens_details: import_zod3.z.object({
831
+ reasoning_tokens: import_zod3.z.number().nullish(),
832
+ accepted_prediction_tokens: import_zod3.z.number().nullish(),
833
+ rejected_prediction_tokens: import_zod3.z.number().nullish()
911
834
  }).nullish()
912
835
  }).nullish();
913
- var openaiChatResponseSchema = import_zod2.z.object({
914
- id: import_zod2.z.string().nullish(),
915
- created: import_zod2.z.number().nullish(),
916
- model: import_zod2.z.string().nullish(),
917
- choices: import_zod2.z.array(
918
- import_zod2.z.object({
919
- message: import_zod2.z.object({
920
- role: import_zod2.z.literal("assistant").nullish(),
921
- content: import_zod2.z.string().nullish(),
922
- function_call: import_zod2.z.object({
923
- arguments: import_zod2.z.string(),
924
- name: import_zod2.z.string()
925
- }).nullish(),
926
- tool_calls: import_zod2.z.array(
927
- import_zod2.z.object({
928
- id: import_zod2.z.string().nullish(),
929
- type: import_zod2.z.literal("function"),
930
- function: import_zod2.z.object({
931
- name: import_zod2.z.string(),
932
- arguments: import_zod2.z.string()
836
+ var openaiChatResponseSchema = import_zod3.z.object({
837
+ id: import_zod3.z.string().nullish(),
838
+ created: import_zod3.z.number().nullish(),
839
+ model: import_zod3.z.string().nullish(),
840
+ choices: import_zod3.z.array(
841
+ import_zod3.z.object({
842
+ message: import_zod3.z.object({
843
+ role: import_zod3.z.literal("assistant").nullish(),
844
+ content: import_zod3.z.string().nullish(),
845
+ tool_calls: import_zod3.z.array(
846
+ import_zod3.z.object({
847
+ id: import_zod3.z.string().nullish(),
848
+ type: import_zod3.z.literal("function"),
849
+ function: import_zod3.z.object({
850
+ name: import_zod3.z.string(),
851
+ arguments: import_zod3.z.string()
933
852
  })
934
853
  })
935
854
  ).nullish()
936
855
  }),
937
- index: import_zod2.z.number(),
938
- logprobs: import_zod2.z.object({
939
- content: import_zod2.z.array(
940
- import_zod2.z.object({
941
- token: import_zod2.z.string(),
942
- logprob: import_zod2.z.number(),
943
- top_logprobs: import_zod2.z.array(
944
- import_zod2.z.object({
945
- token: import_zod2.z.string(),
946
- logprob: import_zod2.z.number()
856
+ index: import_zod3.z.number(),
857
+ logprobs: import_zod3.z.object({
858
+ content: import_zod3.z.array(
859
+ import_zod3.z.object({
860
+ token: import_zod3.z.string(),
861
+ logprob: import_zod3.z.number(),
862
+ top_logprobs: import_zod3.z.array(
863
+ import_zod3.z.object({
864
+ token: import_zod3.z.string(),
865
+ logprob: import_zod3.z.number()
947
866
  })
948
867
  )
949
868
  })
950
869
  ).nullable()
951
870
  }).nullish(),
952
- finish_reason: import_zod2.z.string().nullish()
871
+ finish_reason: import_zod3.z.string().nullish()
953
872
  })
954
873
  ),
955
874
  usage: openaiTokenUsageSchema
956
875
  });
957
- var openaiChatChunkSchema = import_zod2.z.union([
958
- import_zod2.z.object({
959
- id: import_zod2.z.string().nullish(),
960
- created: import_zod2.z.number().nullish(),
961
- model: import_zod2.z.string().nullish(),
962
- choices: import_zod2.z.array(
963
- import_zod2.z.object({
964
- delta: import_zod2.z.object({
965
- role: import_zod2.z.enum(["assistant"]).nullish(),
966
- content: import_zod2.z.string().nullish(),
967
- function_call: import_zod2.z.object({
968
- name: import_zod2.z.string().optional(),
969
- arguments: import_zod2.z.string().optional()
970
- }).nullish(),
971
- tool_calls: import_zod2.z.array(
972
- import_zod2.z.object({
973
- index: import_zod2.z.number(),
974
- id: import_zod2.z.string().nullish(),
975
- type: import_zod2.z.literal("function").optional(),
976
- function: import_zod2.z.object({
977
- name: import_zod2.z.string().nullish(),
978
- arguments: import_zod2.z.string().nullish()
876
+ var openaiChatChunkSchema = import_zod3.z.union([
877
+ import_zod3.z.object({
878
+ id: import_zod3.z.string().nullish(),
879
+ created: import_zod3.z.number().nullish(),
880
+ model: import_zod3.z.string().nullish(),
881
+ choices: import_zod3.z.array(
882
+ import_zod3.z.object({
883
+ delta: import_zod3.z.object({
884
+ role: import_zod3.z.enum(["assistant"]).nullish(),
885
+ content: import_zod3.z.string().nullish(),
886
+ tool_calls: import_zod3.z.array(
887
+ import_zod3.z.object({
888
+ index: import_zod3.z.number(),
889
+ id: import_zod3.z.string().nullish(),
890
+ type: import_zod3.z.literal("function").optional(),
891
+ function: import_zod3.z.object({
892
+ name: import_zod3.z.string().nullish(),
893
+ arguments: import_zod3.z.string().nullish()
979
894
  })
980
895
  })
981
896
  ).nullish()
982
897
  }).nullish(),
983
- logprobs: import_zod2.z.object({
984
- content: import_zod2.z.array(
985
- import_zod2.z.object({
986
- token: import_zod2.z.string(),
987
- logprob: import_zod2.z.number(),
988
- top_logprobs: import_zod2.z.array(
989
- import_zod2.z.object({
990
- token: import_zod2.z.string(),
991
- logprob: import_zod2.z.number()
898
+ logprobs: import_zod3.z.object({
899
+ content: import_zod3.z.array(
900
+ import_zod3.z.object({
901
+ token: import_zod3.z.string(),
902
+ logprob: import_zod3.z.number(),
903
+ top_logprobs: import_zod3.z.array(
904
+ import_zod3.z.object({
905
+ token: import_zod3.z.string(),
906
+ logprob: import_zod3.z.number()
992
907
  })
993
908
  )
994
909
  })
995
910
  ).nullable()
996
911
  }).nullish(),
997
- finish_reason: import_zod2.z.string().nullable().optional(),
998
- index: import_zod2.z.number()
912
+ finish_reason: import_zod3.z.string().nullable().optional(),
913
+ index: import_zod3.z.number()
999
914
  })
1000
915
  ),
1001
916
  usage: openaiTokenUsageSchema
@@ -1037,8 +952,8 @@ var reasoningModels = {
1037
952
  };
1038
953
 
1039
954
  // src/openai-completion-language-model.ts
1040
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
1041
- var import_zod3 = require("zod");
955
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
956
+ var import_zod4 = require("zod");
1042
957
 
1043
958
  // src/convert-to-openai-completion-prompt.ts
1044
959
  var import_provider4 = require("@ai-sdk/provider");
@@ -1148,7 +1063,7 @@ var OpenAICompletionLanguageModel = class {
1148
1063
  getArgs({
1149
1064
  inputFormat,
1150
1065
  prompt,
1151
- maxTokens,
1066
+ maxOutputTokens,
1152
1067
  temperature,
1153
1068
  topP,
1154
1069
  topK,
@@ -1190,7 +1105,7 @@ var OpenAICompletionLanguageModel = class {
1190
1105
  suffix: this.settings.suffix,
1191
1106
  user: this.settings.user,
1192
1107
  // standardized settings:
1193
- max_tokens: maxTokens,
1108
+ max_tokens: maxOutputTokens,
1194
1109
  temperature,
1195
1110
  top_p: topP,
1196
1111
  frequency_penalty: frequencyPenalty,
@@ -1210,32 +1125,30 @@ var OpenAICompletionLanguageModel = class {
1210
1125
  responseHeaders,
1211
1126
  value: response,
1212
1127
  rawValue: rawResponse
1213
- } = await (0, import_provider_utils3.postJsonToApi)({
1128
+ } = await (0, import_provider_utils4.postJsonToApi)({
1214
1129
  url: this.config.url({
1215
1130
  path: "/completions",
1216
1131
  modelId: this.modelId
1217
1132
  }),
1218
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
1133
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1219
1134
  body: args,
1220
1135
  failedResponseHandler: openaiFailedResponseHandler,
1221
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
1136
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1222
1137
  openaiCompletionResponseSchema
1223
1138
  ),
1224
1139
  abortSignal: options.abortSignal,
1225
1140
  fetch: this.config.fetch
1226
1141
  });
1227
- const { prompt: rawPrompt, ...rawSettings } = args;
1228
1142
  const choice = response.choices[0];
1229
1143
  return {
1230
1144
  text: choice.text,
1231
1145
  usage: {
1232
- promptTokens: response.usage.prompt_tokens,
1233
- completionTokens: response.usage.completion_tokens
1146
+ inputTokens: response.usage.prompt_tokens,
1147
+ outputTokens: response.usage.completion_tokens
1234
1148
  },
1235
1149
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1236
1150
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1237
- rawCall: { rawPrompt, rawSettings },
1238
- request: { body: JSON.stringify(args) },
1151
+ request: { body: args },
1239
1152
  response: {
1240
1153
  ...getResponseMetadata(response),
1241
1154
  headers: responseHeaders,
@@ -1252,25 +1165,24 @@ var OpenAICompletionLanguageModel = class {
1252
1165
  // only include stream_options when in strict compatibility mode:
1253
1166
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1254
1167
  };
1255
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
1168
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1256
1169
  url: this.config.url({
1257
1170
  path: "/completions",
1258
1171
  modelId: this.modelId
1259
1172
  }),
1260
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
1173
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1261
1174
  body,
1262
1175
  failedResponseHandler: openaiFailedResponseHandler,
1263
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
1176
+ successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
1264
1177
  openaiCompletionChunkSchema
1265
1178
  ),
1266
1179
  abortSignal: options.abortSignal,
1267
1180
  fetch: this.config.fetch
1268
1181
  });
1269
- const { prompt: rawPrompt, ...rawSettings } = args;
1270
1182
  let finishReason = "unknown";
1271
- let usage = {
1272
- promptTokens: Number.NaN,
1273
- completionTokens: Number.NaN
1183
+ const usage = {
1184
+ inputTokens: void 0,
1185
+ outputTokens: void 0
1274
1186
  };
1275
1187
  let logprobs;
1276
1188
  let isFirstChunk = true;
@@ -1297,10 +1209,8 @@ var OpenAICompletionLanguageModel = class {
1297
1209
  });
1298
1210
  }
1299
1211
  if (value.usage != null) {
1300
- usage = {
1301
- promptTokens: value.usage.prompt_tokens,
1302
- completionTokens: value.usage.completion_tokens
1303
- };
1212
+ usage.inputTokens = value.usage.prompt_tokens;
1213
+ usage.outputTokens = value.usage.completion_tokens;
1304
1214
  }
1305
1215
  const choice = value.choices[0];
1306
1216
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1330,53 +1240,52 @@ var OpenAICompletionLanguageModel = class {
1330
1240
  }
1331
1241
  })
1332
1242
  ),
1333
- rawCall: { rawPrompt, rawSettings },
1334
1243
  response: { headers: responseHeaders },
1335
1244
  warnings,
1336
1245
  request: { body: JSON.stringify(body) }
1337
1246
  };
1338
1247
  }
1339
1248
  };
1340
- var openaiCompletionResponseSchema = import_zod3.z.object({
1341
- id: import_zod3.z.string().nullish(),
1342
- created: import_zod3.z.number().nullish(),
1343
- model: import_zod3.z.string().nullish(),
1344
- choices: import_zod3.z.array(
1345
- import_zod3.z.object({
1346
- text: import_zod3.z.string(),
1347
- finish_reason: import_zod3.z.string(),
1348
- logprobs: import_zod3.z.object({
1349
- tokens: import_zod3.z.array(import_zod3.z.string()),
1350
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1351
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1249
+ var openaiCompletionResponseSchema = import_zod4.z.object({
1250
+ id: import_zod4.z.string().nullish(),
1251
+ created: import_zod4.z.number().nullish(),
1252
+ model: import_zod4.z.string().nullish(),
1253
+ choices: import_zod4.z.array(
1254
+ import_zod4.z.object({
1255
+ text: import_zod4.z.string(),
1256
+ finish_reason: import_zod4.z.string(),
1257
+ logprobs: import_zod4.z.object({
1258
+ tokens: import_zod4.z.array(import_zod4.z.string()),
1259
+ token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1260
+ top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1352
1261
  }).nullish()
1353
1262
  })
1354
1263
  ),
1355
- usage: import_zod3.z.object({
1356
- prompt_tokens: import_zod3.z.number(),
1357
- completion_tokens: import_zod3.z.number()
1264
+ usage: import_zod4.z.object({
1265
+ prompt_tokens: import_zod4.z.number(),
1266
+ completion_tokens: import_zod4.z.number()
1358
1267
  })
1359
1268
  });
1360
- var openaiCompletionChunkSchema = import_zod3.z.union([
1361
- import_zod3.z.object({
1362
- id: import_zod3.z.string().nullish(),
1363
- created: import_zod3.z.number().nullish(),
1364
- model: import_zod3.z.string().nullish(),
1365
- choices: import_zod3.z.array(
1366
- import_zod3.z.object({
1367
- text: import_zod3.z.string(),
1368
- finish_reason: import_zod3.z.string().nullish(),
1369
- index: import_zod3.z.number(),
1370
- logprobs: import_zod3.z.object({
1371
- tokens: import_zod3.z.array(import_zod3.z.string()),
1372
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1373
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1269
+ var openaiCompletionChunkSchema = import_zod4.z.union([
1270
+ import_zod4.z.object({
1271
+ id: import_zod4.z.string().nullish(),
1272
+ created: import_zod4.z.number().nullish(),
1273
+ model: import_zod4.z.string().nullish(),
1274
+ choices: import_zod4.z.array(
1275
+ import_zod4.z.object({
1276
+ text: import_zod4.z.string(),
1277
+ finish_reason: import_zod4.z.string().nullish(),
1278
+ index: import_zod4.z.number(),
1279
+ logprobs: import_zod4.z.object({
1280
+ tokens: import_zod4.z.array(import_zod4.z.string()),
1281
+ token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1282
+ top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1374
1283
  }).nullish()
1375
1284
  })
1376
1285
  ),
1377
- usage: import_zod3.z.object({
1378
- prompt_tokens: import_zod3.z.number(),
1379
- completion_tokens: import_zod3.z.number()
1286
+ usage: import_zod4.z.object({
1287
+ prompt_tokens: import_zod4.z.number(),
1288
+ completion_tokens: import_zod4.z.number()
1380
1289
  }).nullish()
1381
1290
  }),
1382
1291
  openaiErrorDataSchema
@@ -1384,8 +1293,8 @@ var openaiCompletionChunkSchema = import_zod3.z.union([
1384
1293
 
1385
1294
  // src/openai-embedding-model.ts
1386
1295
  var import_provider5 = require("@ai-sdk/provider");
1387
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
1388
- var import_zod4 = require("zod");
1296
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
1297
+ var import_zod5 = require("zod");
1389
1298
  var OpenAIEmbeddingModel = class {
1390
1299
  constructor(modelId, settings, config) {
1391
1300
  this.specificationVersion = "v1";
@@ -1417,12 +1326,12 @@ var OpenAIEmbeddingModel = class {
1417
1326
  values
1418
1327
  });
1419
1328
  }
1420
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1329
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
1421
1330
  url: this.config.url({
1422
1331
  path: "/embeddings",
1423
1332
  modelId: this.modelId
1424
1333
  }),
1425
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), headers),
1334
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1426
1335
  body: {
1427
1336
  model: this.modelId,
1428
1337
  input: values,
@@ -1431,7 +1340,7 @@ var OpenAIEmbeddingModel = class {
1431
1340
  user: this.settings.user
1432
1341
  },
1433
1342
  failedResponseHandler: openaiFailedResponseHandler,
1434
- successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1343
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1435
1344
  openaiTextEmbeddingResponseSchema
1436
1345
  ),
1437
1346
  abortSignal,
@@ -1444,14 +1353,14 @@ var OpenAIEmbeddingModel = class {
1444
1353
  };
1445
1354
  }
1446
1355
  };
1447
- var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1448
- data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1449
- usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1356
+ var openaiTextEmbeddingResponseSchema = import_zod5.z.object({
1357
+ data: import_zod5.z.array(import_zod5.z.object({ embedding: import_zod5.z.array(import_zod5.z.number()) })),
1358
+ usage: import_zod5.z.object({ prompt_tokens: import_zod5.z.number() }).nullish()
1450
1359
  });
1451
1360
 
1452
1361
  // src/openai-image-model.ts
1453
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
1454
- var import_zod5 = require("zod");
1362
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1363
+ var import_zod6 = require("zod");
1455
1364
 
1456
1365
  // src/openai-image-settings.ts
1457
1366
  var modelMaxImagesPerCall = {
@@ -1497,12 +1406,12 @@ var OpenAIImageModel = class {
1497
1406
  warnings.push({ type: "unsupported-setting", setting: "seed" });
1498
1407
  }
1499
1408
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1500
- const { value: response, responseHeaders } = await (0, import_provider_utils5.postJsonToApi)({
1409
+ const { value: response, responseHeaders } = await (0, import_provider_utils6.postJsonToApi)({
1501
1410
  url: this.config.url({
1502
1411
  path: "/images/generations",
1503
1412
  modelId: this.modelId
1504
1413
  }),
1505
- headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1414
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), headers),
1506
1415
  body: {
1507
1416
  model: this.modelId,
1508
1417
  prompt,
@@ -1512,7 +1421,7 @@ var OpenAIImageModel = class {
1512
1421
  response_format: "b64_json"
1513
1422
  },
1514
1423
  failedResponseHandler: openaiFailedResponseHandler,
1515
- successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1424
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1516
1425
  openaiImageResponseSchema
1517
1426
  ),
1518
1427
  abortSignal,
@@ -1529,13 +1438,13 @@ var OpenAIImageModel = class {
1529
1438
  };
1530
1439
  }
1531
1440
  };
1532
- var openaiImageResponseSchema = import_zod5.z.object({
1533
- data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1441
+ var openaiImageResponseSchema = import_zod6.z.object({
1442
+ data: import_zod6.z.array(import_zod6.z.object({ b64_json: import_zod6.z.string() }))
1534
1443
  });
1535
1444
 
1536
1445
  // src/openai-tools.ts
1537
- var import_zod6 = require("zod");
1538
- var WebSearchPreviewParameters = import_zod6.z.object({});
1446
+ var import_zod7 = require("zod");
1447
+ var WebSearchPreviewParameters = import_zod7.z.object({});
1539
1448
  function webSearchPreviewTool({
1540
1449
  searchContextSize,
1541
1450
  userLocation
@@ -1555,20 +1464,14 @@ var openaiTools = {
1555
1464
  };
1556
1465
 
1557
1466
  // src/openai-transcription-model.ts
1558
- var import_provider_utils6 = require("@ai-sdk/provider-utils");
1559
- var import_zod7 = require("zod");
1560
- var OpenAIProviderOptionsSchema = import_zod7.z.object({
1561
- include: import_zod7.z.array(import_zod7.z.string()).optional().describe(
1562
- "Additional information to include in the transcription response."
1563
- ),
1564
- language: import_zod7.z.string().optional().describe("The language of the input audio in ISO-639-1 format."),
1565
- prompt: import_zod7.z.string().optional().describe(
1566
- "An optional text to guide the model's style or continue a previous audio segment."
1567
- ),
1568
- temperature: import_zod7.z.number().min(0).max(1).optional().default(0).describe("The sampling temperature, between 0 and 1."),
1569
- timestampGranularities: import_zod7.z.array(import_zod7.z.enum(["word", "segment"])).optional().default(["segment"]).describe(
1570
- "The timestamp granularities to populate for this transcription."
1571
- )
1467
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1468
+ var import_zod8 = require("zod");
1469
+ var OpenAIProviderOptionsSchema = import_zod8.z.object({
1470
+ include: import_zod8.z.array(import_zod8.z.string()).nullish(),
1471
+ language: import_zod8.z.string().nullish(),
1472
+ prompt: import_zod8.z.string().nullish(),
1473
+ temperature: import_zod8.z.number().min(0).max(1).nullish().default(0),
1474
+ timestampGranularities: import_zod8.z.array(import_zod8.z.enum(["word", "segment"])).nullish().default(["segment"])
1572
1475
  });
1573
1476
  var languageMap = {
1574
1477
  afrikaans: "af",
@@ -1643,28 +1546,29 @@ var OpenAITranscriptionModel = class {
1643
1546
  mediaType,
1644
1547
  providerOptions
1645
1548
  }) {
1549
+ var _a, _b, _c, _d, _e;
1646
1550
  const warnings = [];
1647
- const openAIOptions = (0, import_provider_utils6.parseProviderOptions)({
1551
+ const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1648
1552
  provider: "openai",
1649
1553
  providerOptions,
1650
1554
  schema: OpenAIProviderOptionsSchema
1651
1555
  });
1652
1556
  const formData = new FormData();
1653
- const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils6.convertBase64ToUint8Array)(audio)]);
1557
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1654
1558
  formData.append("model", this.modelId);
1655
1559
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1656
1560
  if (openAIOptions) {
1657
1561
  const transcriptionModelOptions = {
1658
- include: openAIOptions.include,
1659
- language: openAIOptions.language,
1660
- prompt: openAIOptions.prompt,
1661
- temperature: openAIOptions.temperature,
1662
- timestamp_granularities: openAIOptions.timestampGranularities
1562
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1563
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1564
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1565
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1566
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1663
1567
  };
1664
1568
  for (const key in transcriptionModelOptions) {
1665
1569
  const value = transcriptionModelOptions[key];
1666
1570
  if (value !== void 0) {
1667
- formData.append(key, value);
1571
+ formData.append(key, String(value));
1668
1572
  }
1669
1573
  }
1670
1574
  }
@@ -1681,15 +1585,15 @@ var OpenAITranscriptionModel = class {
1681
1585
  value: response,
1682
1586
  responseHeaders,
1683
1587
  rawValue: rawResponse
1684
- } = await (0, import_provider_utils6.postFormDataToApi)({
1588
+ } = await (0, import_provider_utils7.postFormDataToApi)({
1685
1589
  url: this.config.url({
1686
1590
  path: "/audio/transcriptions",
1687
1591
  modelId: this.modelId
1688
1592
  }),
1689
- headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1593
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1690
1594
  formData,
1691
1595
  failedResponseHandler: openaiFailedResponseHandler,
1692
- successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1596
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1693
1597
  openaiTranscriptionResponseSchema
1694
1598
  ),
1695
1599
  abortSignal: options.abortSignal,
@@ -1715,22 +1619,22 @@ var OpenAITranscriptionModel = class {
1715
1619
  };
1716
1620
  }
1717
1621
  };
1718
- var openaiTranscriptionResponseSchema = import_zod7.z.object({
1719
- text: import_zod7.z.string(),
1720
- language: import_zod7.z.string().nullish(),
1721
- duration: import_zod7.z.number().nullish(),
1722
- words: import_zod7.z.array(
1723
- import_zod7.z.object({
1724
- word: import_zod7.z.string(),
1725
- start: import_zod7.z.number(),
1726
- end: import_zod7.z.number()
1622
+ var openaiTranscriptionResponseSchema = import_zod8.z.object({
1623
+ text: import_zod8.z.string(),
1624
+ language: import_zod8.z.string().nullish(),
1625
+ duration: import_zod8.z.number().nullish(),
1626
+ words: import_zod8.z.array(
1627
+ import_zod8.z.object({
1628
+ word: import_zod8.z.string(),
1629
+ start: import_zod8.z.number(),
1630
+ end: import_zod8.z.number()
1727
1631
  })
1728
1632
  ).nullish()
1729
1633
  });
1730
1634
 
1731
1635
  // src/responses/openai-responses-language-model.ts
1732
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1733
- var import_zod8 = require("zod");
1636
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1637
+ var import_zod9 = require("zod");
1734
1638
 
1735
1639
  // src/responses/convert-to-openai-responses-messages.ts
1736
1640
  var import_provider6 = require("@ai-sdk/provider");
@@ -1947,7 +1851,7 @@ var OpenAIResponsesLanguageModel = class {
1947
1851
  return this.config.provider;
1948
1852
  }
1949
1853
  getArgs({
1950
- maxTokens,
1854
+ maxOutputTokens,
1951
1855
  temperature,
1952
1856
  stopSequences,
1953
1857
  topP,
@@ -1990,7 +1894,7 @@ var OpenAIResponsesLanguageModel = class {
1990
1894
  systemMessageMode: modelConfig.systemMessageMode
1991
1895
  });
1992
1896
  warnings.push(...messageWarnings);
1993
- const openaiOptions = (0, import_provider_utils7.parseProviderOptions)({
1897
+ const openaiOptions = (0, import_provider_utils8.parseProviderOptions)({
1994
1898
  provider: "openai",
1995
1899
  providerOptions,
1996
1900
  schema: openaiResponsesProviderOptionsSchema
@@ -2001,7 +1905,7 @@ var OpenAIResponsesLanguageModel = class {
2001
1905
  input: messages,
2002
1906
  temperature,
2003
1907
  top_p: topP,
2004
- max_output_tokens: maxTokens,
1908
+ max_output_tokens: maxOutputTokens,
2005
1909
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2006
1910
  text: {
2007
1911
  format: responseFormat.schema != null ? {
@@ -2071,58 +1975,58 @@ var OpenAIResponsesLanguageModel = class {
2071
1975
  responseHeaders,
2072
1976
  value: response,
2073
1977
  rawValue: rawResponse
2074
- } = await (0, import_provider_utils7.postJsonToApi)({
1978
+ } = await (0, import_provider_utils8.postJsonToApi)({
2075
1979
  url: this.config.url({
2076
1980
  path: "/responses",
2077
1981
  modelId: this.modelId
2078
1982
  }),
2079
- headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1983
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2080
1984
  body,
2081
1985
  failedResponseHandler: openaiFailedResponseHandler,
2082
- successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
2083
- import_zod8.z.object({
2084
- id: import_zod8.z.string(),
2085
- created_at: import_zod8.z.number(),
2086
- model: import_zod8.z.string(),
2087
- output: import_zod8.z.array(
2088
- import_zod8.z.discriminatedUnion("type", [
2089
- import_zod8.z.object({
2090
- type: import_zod8.z.literal("message"),
2091
- role: import_zod8.z.literal("assistant"),
2092
- content: import_zod8.z.array(
2093
- import_zod8.z.object({
2094
- type: import_zod8.z.literal("output_text"),
2095
- text: import_zod8.z.string(),
2096
- annotations: import_zod8.z.array(
2097
- import_zod8.z.object({
2098
- type: import_zod8.z.literal("url_citation"),
2099
- start_index: import_zod8.z.number(),
2100
- end_index: import_zod8.z.number(),
2101
- url: import_zod8.z.string(),
2102
- title: import_zod8.z.string()
1986
+ successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1987
+ import_zod9.z.object({
1988
+ id: import_zod9.z.string(),
1989
+ created_at: import_zod9.z.number(),
1990
+ model: import_zod9.z.string(),
1991
+ output: import_zod9.z.array(
1992
+ import_zod9.z.discriminatedUnion("type", [
1993
+ import_zod9.z.object({
1994
+ type: import_zod9.z.literal("message"),
1995
+ role: import_zod9.z.literal("assistant"),
1996
+ content: import_zod9.z.array(
1997
+ import_zod9.z.object({
1998
+ type: import_zod9.z.literal("output_text"),
1999
+ text: import_zod9.z.string(),
2000
+ annotations: import_zod9.z.array(
2001
+ import_zod9.z.object({
2002
+ type: import_zod9.z.literal("url_citation"),
2003
+ start_index: import_zod9.z.number(),
2004
+ end_index: import_zod9.z.number(),
2005
+ url: import_zod9.z.string(),
2006
+ title: import_zod9.z.string()
2103
2007
  })
2104
2008
  )
2105
2009
  })
2106
2010
  )
2107
2011
  }),
2108
- import_zod8.z.object({
2109
- type: import_zod8.z.literal("function_call"),
2110
- call_id: import_zod8.z.string(),
2111
- name: import_zod8.z.string(),
2112
- arguments: import_zod8.z.string()
2012
+ import_zod9.z.object({
2013
+ type: import_zod9.z.literal("function_call"),
2014
+ call_id: import_zod9.z.string(),
2015
+ name: import_zod9.z.string(),
2016
+ arguments: import_zod9.z.string()
2113
2017
  }),
2114
- import_zod8.z.object({
2115
- type: import_zod8.z.literal("web_search_call")
2018
+ import_zod9.z.object({
2019
+ type: import_zod9.z.literal("web_search_call")
2116
2020
  }),
2117
- import_zod8.z.object({
2118
- type: import_zod8.z.literal("computer_call")
2021
+ import_zod9.z.object({
2022
+ type: import_zod9.z.literal("computer_call")
2119
2023
  }),
2120
- import_zod8.z.object({
2121
- type: import_zod8.z.literal("reasoning")
2024
+ import_zod9.z.object({
2025
+ type: import_zod9.z.literal("reasoning")
2122
2026
  })
2123
2027
  ])
2124
2028
  ),
2125
- incomplete_details: import_zod8.z.object({ reason: import_zod8.z.string() }).nullable(),
2029
+ incomplete_details: import_zod9.z.object({ reason: import_zod9.z.string() }).nullable(),
2126
2030
  usage: usageSchema
2127
2031
  })
2128
2032
  ),
@@ -2143,7 +2047,7 @@ var OpenAIResponsesLanguageModel = class {
2143
2047
  var _a2, _b2, _c2;
2144
2048
  return {
2145
2049
  sourceType: "url",
2146
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils7.generateId)(),
2050
+ id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils8.generateId)(),
2147
2051
  url: annotation.url,
2148
2052
  title: annotation.title
2149
2053
  };
@@ -2155,16 +2059,10 @@ var OpenAIResponsesLanguageModel = class {
2155
2059
  }),
2156
2060
  toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2157
2061
  usage: {
2158
- promptTokens: response.usage.input_tokens,
2159
- completionTokens: response.usage.output_tokens
2160
- },
2161
- rawCall: {
2162
- rawPrompt: void 0,
2163
- rawSettings: {}
2164
- },
2165
- request: {
2166
- body: JSON.stringify(body)
2062
+ inputTokens: response.usage.input_tokens,
2063
+ outputTokens: response.usage.output_tokens
2167
2064
  },
2065
+ request: { body },
2168
2066
  response: {
2169
2067
  id: response.id,
2170
2068
  timestamp: new Date(response.created_at * 1e3),
@@ -2184,18 +2082,18 @@ var OpenAIResponsesLanguageModel = class {
2184
2082
  }
2185
2083
  async doStream(options) {
2186
2084
  const { args: body, warnings } = this.getArgs(options);
2187
- const { responseHeaders, value: response } = await (0, import_provider_utils7.postJsonToApi)({
2085
+ const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
2188
2086
  url: this.config.url({
2189
2087
  path: "/responses",
2190
2088
  modelId: this.modelId
2191
2089
  }),
2192
- headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
2090
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2193
2091
  body: {
2194
2092
  ...body,
2195
2093
  stream: true
2196
2094
  },
2197
2095
  failedResponseHandler: openaiFailedResponseHandler,
2198
- successfulResponseHandler: (0, import_provider_utils7.createEventSourceResponseHandler)(
2096
+ successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
2199
2097
  openaiResponsesChunkSchema
2200
2098
  ),
2201
2099
  abortSignal: options.abortSignal,
@@ -2203,8 +2101,10 @@ var OpenAIResponsesLanguageModel = class {
2203
2101
  });
2204
2102
  const self = this;
2205
2103
  let finishReason = "unknown";
2206
- let promptTokens = NaN;
2207
- let completionTokens = NaN;
2104
+ const usage = {
2105
+ inputTokens: void 0,
2106
+ outputTokens: void 0
2107
+ };
2208
2108
  let cachedPromptTokens = null;
2209
2109
  let reasoningTokens = null;
2210
2110
  let responseId = null;
@@ -2274,8 +2174,8 @@ var OpenAIResponsesLanguageModel = class {
2274
2174
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2275
2175
  hasToolCalls
2276
2176
  });
2277
- promptTokens = value.response.usage.input_tokens;
2278
- completionTokens = value.response.usage.output_tokens;
2177
+ usage.inputTokens = value.response.usage.input_tokens;
2178
+ usage.outputTokens = value.response.usage.output_tokens;
2279
2179
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2280
2180
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2281
2181
  } else if (isResponseAnnotationAddedChunk(value)) {
@@ -2283,7 +2183,7 @@ var OpenAIResponsesLanguageModel = class {
2283
2183
  type: "source",
2284
2184
  source: {
2285
2185
  sourceType: "url",
2286
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils7.generateId)(),
2186
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2287
2187
  url: value.annotation.url,
2288
2188
  title: value.annotation.title
2289
2189
  }
@@ -2294,7 +2194,7 @@ var OpenAIResponsesLanguageModel = class {
2294
2194
  controller.enqueue({
2295
2195
  type: "finish",
2296
2196
  finishReason,
2297
- usage: { promptTokens, completionTokens },
2197
+ usage,
2298
2198
  ...(cachedPromptTokens != null || reasoningTokens != null) && {
2299
2199
  providerMetadata: {
2300
2200
  openai: {
@@ -2308,89 +2208,85 @@ var OpenAIResponsesLanguageModel = class {
2308
2208
  }
2309
2209
  })
2310
2210
  ),
2311
- rawCall: {
2312
- rawPrompt: void 0,
2313
- rawSettings: {}
2314
- },
2315
- request: { body: JSON.stringify(body) },
2211
+ request: { body },
2316
2212
  response: { headers: responseHeaders },
2317
2213
  warnings
2318
2214
  };
2319
2215
  }
2320
2216
  };
2321
- var usageSchema = import_zod8.z.object({
2322
- input_tokens: import_zod8.z.number(),
2323
- input_tokens_details: import_zod8.z.object({ cached_tokens: import_zod8.z.number().nullish() }).nullish(),
2324
- output_tokens: import_zod8.z.number(),
2325
- output_tokens_details: import_zod8.z.object({ reasoning_tokens: import_zod8.z.number().nullish() }).nullish()
2217
+ var usageSchema = import_zod9.z.object({
2218
+ input_tokens: import_zod9.z.number(),
2219
+ input_tokens_details: import_zod9.z.object({ cached_tokens: import_zod9.z.number().nullish() }).nullish(),
2220
+ output_tokens: import_zod9.z.number(),
2221
+ output_tokens_details: import_zod9.z.object({ reasoning_tokens: import_zod9.z.number().nullish() }).nullish()
2326
2222
  });
2327
- var textDeltaChunkSchema = import_zod8.z.object({
2328
- type: import_zod8.z.literal("response.output_text.delta"),
2329
- delta: import_zod8.z.string()
2223
+ var textDeltaChunkSchema = import_zod9.z.object({
2224
+ type: import_zod9.z.literal("response.output_text.delta"),
2225
+ delta: import_zod9.z.string()
2330
2226
  });
2331
- var responseFinishedChunkSchema = import_zod8.z.object({
2332
- type: import_zod8.z.enum(["response.completed", "response.incomplete"]),
2333
- response: import_zod8.z.object({
2334
- incomplete_details: import_zod8.z.object({ reason: import_zod8.z.string() }).nullish(),
2227
+ var responseFinishedChunkSchema = import_zod9.z.object({
2228
+ type: import_zod9.z.enum(["response.completed", "response.incomplete"]),
2229
+ response: import_zod9.z.object({
2230
+ incomplete_details: import_zod9.z.object({ reason: import_zod9.z.string() }).nullish(),
2335
2231
  usage: usageSchema
2336
2232
  })
2337
2233
  });
2338
- var responseCreatedChunkSchema = import_zod8.z.object({
2339
- type: import_zod8.z.literal("response.created"),
2340
- response: import_zod8.z.object({
2341
- id: import_zod8.z.string(),
2342
- created_at: import_zod8.z.number(),
2343
- model: import_zod8.z.string()
2234
+ var responseCreatedChunkSchema = import_zod9.z.object({
2235
+ type: import_zod9.z.literal("response.created"),
2236
+ response: import_zod9.z.object({
2237
+ id: import_zod9.z.string(),
2238
+ created_at: import_zod9.z.number(),
2239
+ model: import_zod9.z.string()
2344
2240
  })
2345
2241
  });
2346
- var responseOutputItemDoneSchema = import_zod8.z.object({
2347
- type: import_zod8.z.literal("response.output_item.done"),
2348
- output_index: import_zod8.z.number(),
2349
- item: import_zod8.z.discriminatedUnion("type", [
2350
- import_zod8.z.object({
2351
- type: import_zod8.z.literal("message")
2242
+ var responseOutputItemDoneSchema = import_zod9.z.object({
2243
+ type: import_zod9.z.literal("response.output_item.done"),
2244
+ output_index: import_zod9.z.number(),
2245
+ item: import_zod9.z.discriminatedUnion("type", [
2246
+ import_zod9.z.object({
2247
+ type: import_zod9.z.literal("message")
2352
2248
  }),
2353
- import_zod8.z.object({
2354
- type: import_zod8.z.literal("function_call"),
2355
- id: import_zod8.z.string(),
2356
- call_id: import_zod8.z.string(),
2357
- name: import_zod8.z.string(),
2358
- arguments: import_zod8.z.string(),
2359
- status: import_zod8.z.literal("completed")
2249
+ import_zod9.z.object({
2250
+ type: import_zod9.z.literal("function_call"),
2251
+ id: import_zod9.z.string(),
2252
+ call_id: import_zod9.z.string(),
2253
+ name: import_zod9.z.string(),
2254
+ arguments: import_zod9.z.string(),
2255
+ status: import_zod9.z.literal("completed")
2360
2256
  })
2361
2257
  ])
2362
2258
  });
2363
- var responseFunctionCallArgumentsDeltaSchema = import_zod8.z.object({
2364
- type: import_zod8.z.literal("response.function_call_arguments.delta"),
2365
- item_id: import_zod8.z.string(),
2366
- output_index: import_zod8.z.number(),
2367
- delta: import_zod8.z.string()
2259
+ var responseFunctionCallArgumentsDeltaSchema = import_zod9.z.object({
2260
+ type: import_zod9.z.literal("response.function_call_arguments.delta"),
2261
+ item_id: import_zod9.z.string(),
2262
+ output_index: import_zod9.z.number(),
2263
+ delta: import_zod9.z.string()
2368
2264
  });
2369
- var responseOutputItemAddedSchema = import_zod8.z.object({
2370
- type: import_zod8.z.literal("response.output_item.added"),
2371
- output_index: import_zod8.z.number(),
2372
- item: import_zod8.z.discriminatedUnion("type", [
2373
- import_zod8.z.object({
2374
- type: import_zod8.z.literal("message")
2265
+ var responseOutputItemAddedSchema = import_zod9.z.object({
2266
+ type: import_zod9.z.literal("response.output_item.added"),
2267
+ output_index: import_zod9.z.number(),
2268
+ item: import_zod9.z.discriminatedUnion("type", [
2269
+ import_zod9.z.object({
2270
+ type: import_zod9.z.literal("message")
2375
2271
  }),
2376
- import_zod8.z.object({
2377
- type: import_zod8.z.literal("function_call"),
2378
- id: import_zod8.z.string(),
2379
- call_id: import_zod8.z.string(),
2380
- name: import_zod8.z.string(),
2381
- arguments: import_zod8.z.string()
2272
+ import_zod9.z.object({
2273
+ type: import_zod9.z.literal("function_call"),
2274
+ id: import_zod9.z.string(),
2275
+ call_id: import_zod9.z.string(),
2276
+ name: import_zod9.z.string(),
2277
+ arguments: import_zod9.z.string()
2382
2278
  })
2383
2279
  ])
2384
2280
  });
2385
- var responseAnnotationAddedSchema = import_zod8.z.object({
2386
- type: import_zod8.z.literal("response.output_text.annotation.added"),
2387
- annotation: import_zod8.z.object({
2388
- type: import_zod8.z.literal("url_citation"),
2389
- url: import_zod8.z.string(),
2390
- title: import_zod8.z.string()
2281
+ var responseAnnotationAddedSchema = import_zod9.z.object({
2282
+ type: import_zod9.z.literal("response.output_text.annotation.added"),
2283
+ annotation: import_zod9.z.object({
2284
+ type: import_zod9.z.literal("url_citation"),
2285
+ url: import_zod9.z.string(),
2286
+ title: import_zod9.z.string()
2391
2287
  })
2392
2288
  });
2393
- var openaiResponsesChunkSchema = import_zod8.z.union([
2289
+ var openaiResponsesChunkSchema = import_zod9.z.union([
2394
2290
  textDeltaChunkSchema,
2395
2291
  responseFinishedChunkSchema,
2396
2292
  responseCreatedChunkSchema,
@@ -2398,7 +2294,7 @@ var openaiResponsesChunkSchema = import_zod8.z.union([
2398
2294
  responseFunctionCallArgumentsDeltaSchema,
2399
2295
  responseOutputItemAddedSchema,
2400
2296
  responseAnnotationAddedSchema,
2401
- import_zod8.z.object({ type: import_zod8.z.string() }).passthrough()
2297
+ import_zod9.z.object({ type: import_zod9.z.string() }).passthrough()
2402
2298
  // fallback for unknown chunks
2403
2299
  ]);
2404
2300
  function isTextDeltaChunk(chunk) {
@@ -2443,25 +2339,25 @@ function getResponsesModelConfig(modelId) {
2443
2339
  requiredAutoTruncation: false
2444
2340
  };
2445
2341
  }
2446
- var openaiResponsesProviderOptionsSchema = import_zod8.z.object({
2447
- metadata: import_zod8.z.any().nullish(),
2448
- parallelToolCalls: import_zod8.z.boolean().nullish(),
2449
- previousResponseId: import_zod8.z.string().nullish(),
2450
- store: import_zod8.z.boolean().nullish(),
2451
- user: import_zod8.z.string().nullish(),
2452
- reasoningEffort: import_zod8.z.string().nullish(),
2453
- strictSchemas: import_zod8.z.boolean().nullish(),
2454
- instructions: import_zod8.z.string().nullish()
2342
+ var openaiResponsesProviderOptionsSchema = import_zod9.z.object({
2343
+ metadata: import_zod9.z.any().nullish(),
2344
+ parallelToolCalls: import_zod9.z.boolean().nullish(),
2345
+ previousResponseId: import_zod9.z.string().nullish(),
2346
+ store: import_zod9.z.boolean().nullish(),
2347
+ user: import_zod9.z.string().nullish(),
2348
+ reasoningEffort: import_zod9.z.string().nullish(),
2349
+ strictSchemas: import_zod9.z.boolean().nullish(),
2350
+ instructions: import_zod9.z.string().nullish()
2455
2351
  });
2456
2352
 
2457
2353
  // src/openai-provider.ts
2458
2354
  function createOpenAI(options = {}) {
2459
2355
  var _a, _b, _c;
2460
- const baseURL = (_a = (0, import_provider_utils8.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2356
+ const baseURL = (_a = (0, import_provider_utils9.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2461
2357
  const compatibility = (_b = options.compatibility) != null ? _b : "compatible";
2462
2358
  const providerName = (_c = options.name) != null ? _c : "openai";
2463
2359
  const getHeaders = () => ({
2464
- Authorization: `Bearer ${(0, import_provider_utils8.loadApiKey)({
2360
+ Authorization: `Bearer ${(0, import_provider_utils9.loadApiKey)({
2465
2361
  apiKey: options.apiKey,
2466
2362
  environmentVariableName: "OPENAI_API_KEY",
2467
2363
  description: "OpenAI"