@ai-sdk/openai 2.0.0-canary.5 → 2.0.0-canary.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -26,18 +26,18 @@ __export(src_exports, {
26
26
  module.exports = __toCommonJS(src_exports);
27
27
 
28
28
  // src/openai-provider.ts
29
- var import_provider_utils8 = require("@ai-sdk/provider-utils");
29
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
30
30
 
31
31
  // src/openai-chat-language-model.ts
32
32
  var import_provider3 = require("@ai-sdk/provider");
33
- var import_provider_utils2 = require("@ai-sdk/provider-utils");
34
- var import_zod2 = require("zod");
33
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
34
+ var import_zod3 = require("zod");
35
35
 
36
36
  // src/convert-to-openai-chat-messages.ts
37
37
  var import_provider = require("@ai-sdk/provider");
38
+ var import_provider_utils = require("@ai-sdk/provider-utils");
38
39
  function convertToOpenAIChatMessages({
39
40
  prompt,
40
- useLegacyFunctionCalling = false,
41
41
  systemMessageMode = "system"
42
42
  }) {
43
43
  const messages = [];
@@ -89,7 +89,7 @@ function convertToOpenAIChatMessages({
89
89
  return {
90
90
  type: "image_url",
91
91
  image_url: {
92
- url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
92
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`,
93
93
  // OpenAI specific extension: image detail
94
94
  detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
95
95
  }
@@ -104,14 +104,20 @@ function convertToOpenAIChatMessages({
104
104
  case "audio/wav": {
105
105
  return {
106
106
  type: "input_audio",
107
- input_audio: { data: part.data, format: "wav" }
107
+ input_audio: {
108
+ data: (0, import_provider_utils.convertToBase64)(part.data),
109
+ format: "wav"
110
+ }
108
111
  };
109
112
  }
110
113
  case "audio/mp3":
111
114
  case "audio/mpeg": {
112
115
  return {
113
116
  type: "input_audio",
114
- input_audio: { data: part.data, format: "mp3" }
117
+ input_audio: {
118
+ data: (0, import_provider_utils.convertToBase64)(part.data),
119
+ format: "mp3"
120
+ }
115
121
  };
116
122
  }
117
123
  default: {
@@ -166,41 +172,20 @@ function convertToOpenAIChatMessages({
166
172
  }
167
173
  }
168
174
  }
169
- if (useLegacyFunctionCalling) {
170
- if (toolCalls.length > 1) {
171
- throw new import_provider.UnsupportedFunctionalityError({
172
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
173
- });
174
- }
175
- messages.push({
176
- role: "assistant",
177
- content: text,
178
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
179
- });
180
- } else {
181
- messages.push({
182
- role: "assistant",
183
- content: text,
184
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
185
- });
186
- }
175
+ messages.push({
176
+ role: "assistant",
177
+ content: text,
178
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
179
+ });
187
180
  break;
188
181
  }
189
182
  case "tool": {
190
183
  for (const toolResponse of content) {
191
- if (useLegacyFunctionCalling) {
192
- messages.push({
193
- role: "function",
194
- name: toolResponse.toolName,
195
- content: JSON.stringify(toolResponse.result)
196
- });
197
- } else {
198
- messages.push({
199
- role: "tool",
200
- tool_call_id: toolResponse.toolCallId,
201
- content: JSON.stringify(toolResponse.result)
202
- });
203
- }
184
+ messages.push({
185
+ role: "tool",
186
+ tool_call_id: toolResponse.toolCallId,
187
+ content: JSON.stringify(toolResponse.result)
188
+ });
204
189
  }
205
190
  break;
206
191
  }
@@ -243,21 +228,72 @@ function mapOpenAIFinishReason(finishReason) {
243
228
  }
244
229
  }
245
230
 
246
- // src/openai-error.ts
231
+ // src/openai-chat-options.ts
247
232
  var import_zod = require("zod");
248
- var import_provider_utils = require("@ai-sdk/provider-utils");
249
- var openaiErrorDataSchema = import_zod.z.object({
250
- error: import_zod.z.object({
251
- message: import_zod.z.string(),
233
+ var openaiProviderOptions = import_zod.z.object({
234
+ /**
235
+ * Modify the likelihood of specified tokens appearing in the completion.
236
+ *
237
+ * Accepts a JSON object that maps tokens (specified by their token ID in
238
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
239
+ */
240
+ logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
241
+ /**
242
+ * Return the log probabilities of the tokens.
243
+ *
244
+ * Setting to true will return the log probabilities of the tokens that
245
+ * were generated.
246
+ *
247
+ * Setting to a number will return the log probabilities of the top n
248
+ * tokens that were generated.
249
+ */
250
+ logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
251
+ /**
252
+ * Whether to enable parallel function calling during tool use. Default to true.
253
+ */
254
+ parallelToolCalls: import_zod.z.boolean().optional(),
255
+ /**
256
+ * A unique identifier representing your end-user, which can help OpenAI to
257
+ * monitor and detect abuse.
258
+ */
259
+ user: import_zod.z.string().optional(),
260
+ /**
261
+ * Reasoning effort for reasoning models. Defaults to `medium`.
262
+ */
263
+ reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
264
+ /**
265
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
266
+ */
267
+ maxCompletionTokens: import_zod.z.number().optional(),
268
+ /**
269
+ * Whether to enable persistence in responses API.
270
+ */
271
+ store: import_zod.z.boolean().optional(),
272
+ /**
273
+ * Metadata to associate with the request.
274
+ */
275
+ metadata: import_zod.z.record(import_zod.z.string()).optional(),
276
+ /**
277
+ * Parameters for prediction mode.
278
+ */
279
+ prediction: import_zod.z.record(import_zod.z.any()).optional()
280
+ });
281
+
282
+ // src/openai-error.ts
283
+ var import_zod2 = require("zod");
284
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
285
+ var openaiErrorDataSchema = import_zod2.z.object({
286
+ error: import_zod2.z.object({
287
+ message: import_zod2.z.string(),
252
288
  // The additional information below is handled loosely to support
253
289
  // OpenAI-compatible providers that have slightly different error
254
290
  // responses:
255
- type: import_zod.z.string().nullish(),
256
- param: import_zod.z.any().nullish(),
257
- code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
291
+ type: import_zod2.z.string().nullish(),
292
+ param: import_zod2.z.any().nullish(),
293
+ code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
258
294
  })
259
295
  });
260
- var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
296
+ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
261
297
  errorSchema: openaiErrorDataSchema,
262
298
  errorToMessage: (data) => data.error.message
263
299
  });
@@ -280,7 +316,6 @@ var import_provider2 = require("@ai-sdk/provider");
280
316
  function prepareTools({
281
317
  tools,
282
318
  toolChoice,
283
- useLegacyFunctionCalling = false,
284
319
  structuredOutputs
285
320
  }) {
286
321
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
@@ -288,48 +323,6 @@ function prepareTools({
288
323
  if (tools == null) {
289
324
  return { tools: void 0, toolChoice: void 0, toolWarnings };
290
325
  }
291
- if (useLegacyFunctionCalling) {
292
- const openaiFunctions = [];
293
- for (const tool of tools) {
294
- if (tool.type === "provider-defined") {
295
- toolWarnings.push({ type: "unsupported-tool", tool });
296
- } else {
297
- openaiFunctions.push({
298
- name: tool.name,
299
- description: tool.description,
300
- parameters: tool.parameters
301
- });
302
- }
303
- }
304
- if (toolChoice == null) {
305
- return {
306
- functions: openaiFunctions,
307
- function_call: void 0,
308
- toolWarnings
309
- };
310
- }
311
- const type2 = toolChoice.type;
312
- switch (type2) {
313
- case "auto":
314
- case "none":
315
- case void 0:
316
- return {
317
- functions: openaiFunctions,
318
- function_call: void 0,
319
- toolWarnings
320
- };
321
- case "required":
322
- throw new import_provider2.UnsupportedFunctionalityError({
323
- functionality: "useLegacyFunctionCalling and toolChoice: required"
324
- });
325
- default:
326
- return {
327
- functions: openaiFunctions,
328
- function_call: { name: toolChoice.toolName },
329
- toolWarnings
330
- };
331
- }
332
- }
333
326
  const openaiTools2 = [];
334
327
  for (const tool of tools) {
335
328
  if (tool.type === "provider-defined") {
@@ -401,7 +394,7 @@ var OpenAIChatLanguageModel = class {
401
394
  }
402
395
  getArgs({
403
396
  prompt,
404
- maxTokens,
397
+ maxOutputTokens,
405
398
  temperature,
406
399
  topP,
407
400
  topK,
@@ -414,8 +407,13 @@ var OpenAIChatLanguageModel = class {
414
407
  toolChoice,
415
408
  providerOptions
416
409
  }) {
417
- var _a, _b, _c, _d, _e, _f, _g;
410
+ var _a, _b;
418
411
  const warnings = [];
412
+ const openaiOptions = (_a = (0, import_provider_utils3.parseProviderOptions)({
413
+ provider: "openai",
414
+ providerOptions,
415
+ schema: openaiProviderOptions
416
+ })) != null ? _a : {};
419
417
  if (topK != null) {
420
418
  warnings.push({
421
419
  type: "unsupported-setting",
@@ -429,21 +427,9 @@ var OpenAIChatLanguageModel = class {
429
427
  details: "JSON response format schema is only supported with structuredOutputs"
430
428
  });
431
429
  }
432
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
433
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
434
- throw new import_provider3.UnsupportedFunctionalityError({
435
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
436
- });
437
- }
438
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
439
- throw new import_provider3.UnsupportedFunctionalityError({
440
- functionality: "structuredOutputs with useLegacyFunctionCalling"
441
- });
442
- }
443
430
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
444
431
  {
445
432
  prompt,
446
- useLegacyFunctionCalling,
447
433
  systemMessageMode: getSystemMessageMode(this.modelId)
448
434
  }
449
435
  );
@@ -452,13 +438,13 @@ var OpenAIChatLanguageModel = class {
452
438
  // model id:
453
439
  model: this.modelId,
454
440
  // model specific settings:
455
- logit_bias: this.settings.logitBias,
456
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
457
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
458
- user: this.settings.user,
459
- parallel_tool_calls: this.settings.parallelToolCalls,
441
+ logit_bias: openaiOptions.logitBias,
442
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
443
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
444
+ user: openaiOptions.user,
445
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
460
446
  // standardized settings:
461
- max_tokens: maxTokens,
447
+ max_tokens: maxOutputTokens,
462
448
  temperature,
463
449
  top_p: topP,
464
450
  frequency_penalty: frequencyPenalty,
@@ -469,19 +455,19 @@ var OpenAIChatLanguageModel = class {
469
455
  json_schema: {
470
456
  schema: responseFormat.schema,
471
457
  strict: true,
472
- name: (_a = responseFormat.name) != null ? _a : "response",
458
+ name: (_b = responseFormat.name) != null ? _b : "response",
473
459
  description: responseFormat.description
474
460
  }
475
461
  } : { type: "json_object" } : void 0,
476
462
  stop: stopSequences,
477
463
  seed,
478
464
  // openai specific settings:
479
- // TODO remove in next major version; we auto-map maxTokens now
480
- max_completion_tokens: (_b = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _b.maxCompletionTokens,
481
- store: (_c = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _c.store,
482
- metadata: (_d = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _d.metadata,
483
- prediction: (_e = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _e.prediction,
484
- reasoning_effort: (_g = (_f = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
465
+ // TODO remove in next major version; we auto-map maxOutputTokens now
466
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
467
+ store: openaiOptions.store,
468
+ metadata: openaiOptions.metadata,
469
+ prediction: openaiOptions.prediction,
470
+ reasoning_effort: openaiOptions.reasoningEffort,
485
471
  // messages:
486
472
  messages
487
473
  };
@@ -545,46 +531,50 @@ var OpenAIChatLanguageModel = class {
545
531
  }
546
532
  baseArgs.max_tokens = void 0;
547
533
  }
534
+ } else if (this.modelId.startsWith("gpt-4o-search-preview")) {
535
+ if (baseArgs.temperature != null) {
536
+ baseArgs.temperature = void 0;
537
+ warnings.push({
538
+ type: "unsupported-setting",
539
+ setting: "temperature",
540
+ details: "temperature is not supported for the gpt-4o-search-preview model and has been removed."
541
+ });
542
+ }
548
543
  }
549
544
  const {
550
545
  tools: openaiTools2,
551
546
  toolChoice: openaiToolChoice,
552
- functions,
553
- function_call,
554
547
  toolWarnings
555
548
  } = prepareTools({
556
549
  tools,
557
550
  toolChoice,
558
- useLegacyFunctionCalling,
559
551
  structuredOutputs: this.supportsStructuredOutputs
560
552
  });
561
553
  return {
562
554
  args: {
563
555
  ...baseArgs,
564
556
  tools: openaiTools2,
565
- tool_choice: openaiToolChoice,
566
- functions,
567
- function_call
557
+ tool_choice: openaiToolChoice
568
558
  },
569
559
  warnings: [...warnings, ...toolWarnings]
570
560
  };
571
561
  }
572
562
  async doGenerate(options) {
573
- var _a, _b, _c, _d, _e, _f, _g, _h;
563
+ var _a, _b, _c, _d, _e, _f, _g;
574
564
  const { args: body, warnings } = this.getArgs(options);
575
565
  const {
576
566
  responseHeaders,
577
567
  value: response,
578
568
  rawValue: rawResponse
579
- } = await (0, import_provider_utils2.postJsonToApi)({
569
+ } = await (0, import_provider_utils3.postJsonToApi)({
580
570
  url: this.config.url({
581
571
  path: "/chat/completions",
582
572
  modelId: this.modelId
583
573
  }),
584
- headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
574
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
585
575
  body,
586
576
  failedResponseHandler: openaiFailedResponseHandler,
587
- successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
577
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
588
578
  openaiChatResponseSchema
589
579
  ),
590
580
  abortSignal: options.abortSignal,
@@ -608,27 +598,21 @@ var OpenAIChatLanguageModel = class {
608
598
  providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
609
599
  }
610
600
  return {
611
- text: (_c = choice.message.content) != null ? _c : void 0,
612
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
613
- {
614
- toolCallType: "function",
615
- toolCallId: (0, import_provider_utils2.generateId)(),
616
- toolName: choice.message.function_call.name,
617
- args: choice.message.function_call.arguments
618
- }
619
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
601
+ text: choice.message.content != null ? { type: "text", text: choice.message.content } : void 0,
602
+ toolCalls: (_c = choice.message.tool_calls) == null ? void 0 : _c.map((toolCall) => {
620
603
  var _a2;
621
604
  return {
605
+ type: "tool-call",
622
606
  toolCallType: "function",
623
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils2.generateId)(),
607
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
624
608
  toolName: toolCall.function.name,
625
609
  args: toolCall.function.arguments
626
610
  };
627
611
  }),
628
612
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
629
613
  usage: {
630
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
631
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
614
+ inputTokens: (_e = (_d = response.usage) == null ? void 0 : _d.prompt_tokens) != null ? _e : void 0,
615
+ outputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.completion_tokens) != null ? _g : void 0
632
616
  },
633
617
  request: { body },
634
618
  response: {
@@ -642,48 +626,6 @@ var OpenAIChatLanguageModel = class {
642
626
  };
643
627
  }
644
628
  async doStream(options) {
645
- if (this.settings.simulateStreaming) {
646
- const result = await this.doGenerate(options);
647
- const simulatedStream = new ReadableStream({
648
- start(controller) {
649
- controller.enqueue({ type: "response-metadata", ...result.response });
650
- if (result.text) {
651
- controller.enqueue({
652
- type: "text-delta",
653
- textDelta: result.text
654
- });
655
- }
656
- if (result.toolCalls) {
657
- for (const toolCall of result.toolCalls) {
658
- controller.enqueue({
659
- type: "tool-call-delta",
660
- toolCallType: "function",
661
- toolCallId: toolCall.toolCallId,
662
- toolName: toolCall.toolName,
663
- argsTextDelta: toolCall.args
664
- });
665
- controller.enqueue({
666
- type: "tool-call",
667
- ...toolCall
668
- });
669
- }
670
- }
671
- controller.enqueue({
672
- type: "finish",
673
- finishReason: result.finishReason,
674
- usage: result.usage,
675
- logprobs: result.logprobs,
676
- providerMetadata: result.providerMetadata
677
- });
678
- controller.close();
679
- }
680
- });
681
- return {
682
- stream: simulatedStream,
683
- response: result.response,
684
- warnings: result.warnings
685
- };
686
- }
687
629
  const { args, warnings } = this.getArgs(options);
688
630
  const body = {
689
631
  ...args,
@@ -691,15 +633,15 @@ var OpenAIChatLanguageModel = class {
691
633
  // only include stream_options when in strict compatibility mode:
692
634
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
693
635
  };
694
- const { responseHeaders, value: response } = await (0, import_provider_utils2.postJsonToApi)({
636
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
695
637
  url: this.config.url({
696
638
  path: "/chat/completions",
697
639
  modelId: this.modelId
698
640
  }),
699
- headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
641
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
700
642
  body,
701
643
  failedResponseHandler: openaiFailedResponseHandler,
702
- successfulResponseHandler: (0, import_provider_utils2.createEventSourceResponseHandler)(
644
+ successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
703
645
  openaiChatChunkSchema
704
646
  ),
705
647
  abortSignal: options.abortSignal,
@@ -708,13 +650,12 @@ var OpenAIChatLanguageModel = class {
708
650
  const { messages: rawPrompt, ...rawSettings } = args;
709
651
  const toolCalls = [];
710
652
  let finishReason = "unknown";
711
- let usage = {
712
- promptTokens: void 0,
713
- completionTokens: void 0
653
+ const usage = {
654
+ inputTokens: void 0,
655
+ outputTokens: void 0
714
656
  };
715
657
  let logprobs;
716
658
  let isFirstChunk = true;
717
- const { useLegacyFunctionCalling } = this.settings;
718
659
  const providerMetadata = { openai: {} };
719
660
  return {
720
661
  stream: response.pipeThrough(
@@ -746,10 +687,8 @@ var OpenAIChatLanguageModel = class {
746
687
  prompt_tokens_details,
747
688
  completion_tokens_details
748
689
  } = value.usage;
749
- usage = {
750
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
751
- completionTokens: completion_tokens != null ? completion_tokens : void 0
752
- };
690
+ usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
691
+ usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
753
692
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
754
693
  providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
755
694
  }
@@ -773,8 +712,8 @@ var OpenAIChatLanguageModel = class {
773
712
  const delta = choice.delta;
774
713
  if (delta.content != null) {
775
714
  controller.enqueue({
776
- type: "text-delta",
777
- textDelta: delta.content
715
+ type: "text",
716
+ text: delta.content
778
717
  });
779
718
  }
780
719
  const mappedLogprobs = mapOpenAIChatLogProbsOutput(
@@ -784,16 +723,8 @@ var OpenAIChatLanguageModel = class {
784
723
  if (logprobs === void 0) logprobs = [];
785
724
  logprobs.push(...mappedLogprobs);
786
725
  }
787
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
788
- {
789
- type: "function",
790
- id: (0, import_provider_utils2.generateId)(),
791
- function: delta.function_call,
792
- index: 0
793
- }
794
- ] : delta.tool_calls;
795
- if (mappedToolCalls != null) {
796
- for (const toolCallDelta of mappedToolCalls) {
726
+ if (delta.tool_calls != null) {
727
+ for (const toolCallDelta of delta.tool_calls) {
797
728
  const index = toolCallDelta.index;
798
729
  if (toolCalls[index] == null) {
799
730
  if (toolCallDelta.type !== "function") {
@@ -834,11 +765,11 @@ var OpenAIChatLanguageModel = class {
834
765
  argsTextDelta: toolCall2.function.arguments
835
766
  });
836
767
  }
837
- if ((0, import_provider_utils2.isParsableJson)(toolCall2.function.arguments)) {
768
+ if ((0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
838
769
  controller.enqueue({
839
770
  type: "tool-call",
840
771
  toolCallType: "function",
841
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils2.generateId)(),
772
+ toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
842
773
  toolName: toolCall2.function.name,
843
774
  args: toolCall2.function.arguments
844
775
  });
@@ -861,11 +792,11 @@ var OpenAIChatLanguageModel = class {
861
792
  toolName: toolCall.function.name,
862
793
  argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
863
794
  });
864
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils2.isParsableJson)(toolCall.function.arguments)) {
795
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
865
796
  controller.enqueue({
866
797
  type: "tool-call",
867
798
  toolCallType: "function",
868
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils2.generateId)(),
799
+ toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
869
800
  toolName: toolCall.function.name,
870
801
  args: toolCall.function.arguments
871
802
  });
@@ -875,15 +806,11 @@ var OpenAIChatLanguageModel = class {
875
806
  }
876
807
  },
877
808
  flush(controller) {
878
- var _a, _b;
879
809
  controller.enqueue({
880
810
  type: "finish",
881
811
  finishReason,
882
812
  logprobs,
883
- usage: {
884
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
885
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
886
- },
813
+ usage,
887
814
  ...providerMetadata != null ? { providerMetadata } : {}
888
815
  });
889
816
  }
@@ -895,104 +822,96 @@ var OpenAIChatLanguageModel = class {
895
822
  };
896
823
  }
897
824
  };
898
- var openaiTokenUsageSchema = import_zod2.z.object({
899
- prompt_tokens: import_zod2.z.number().nullish(),
900
- completion_tokens: import_zod2.z.number().nullish(),
901
- prompt_tokens_details: import_zod2.z.object({
902
- cached_tokens: import_zod2.z.number().nullish()
825
+ var openaiTokenUsageSchema = import_zod3.z.object({
826
+ prompt_tokens: import_zod3.z.number().nullish(),
827
+ completion_tokens: import_zod3.z.number().nullish(),
828
+ prompt_tokens_details: import_zod3.z.object({
829
+ cached_tokens: import_zod3.z.number().nullish()
903
830
  }).nullish(),
904
- completion_tokens_details: import_zod2.z.object({
905
- reasoning_tokens: import_zod2.z.number().nullish(),
906
- accepted_prediction_tokens: import_zod2.z.number().nullish(),
907
- rejected_prediction_tokens: import_zod2.z.number().nullish()
831
+ completion_tokens_details: import_zod3.z.object({
832
+ reasoning_tokens: import_zod3.z.number().nullish(),
833
+ accepted_prediction_tokens: import_zod3.z.number().nullish(),
834
+ rejected_prediction_tokens: import_zod3.z.number().nullish()
908
835
  }).nullish()
909
836
  }).nullish();
910
- var openaiChatResponseSchema = import_zod2.z.object({
911
- id: import_zod2.z.string().nullish(),
912
- created: import_zod2.z.number().nullish(),
913
- model: import_zod2.z.string().nullish(),
914
- choices: import_zod2.z.array(
915
- import_zod2.z.object({
916
- message: import_zod2.z.object({
917
- role: import_zod2.z.literal("assistant").nullish(),
918
- content: import_zod2.z.string().nullish(),
919
- function_call: import_zod2.z.object({
920
- arguments: import_zod2.z.string(),
921
- name: import_zod2.z.string()
922
- }).nullish(),
923
- tool_calls: import_zod2.z.array(
924
- import_zod2.z.object({
925
- id: import_zod2.z.string().nullish(),
926
- type: import_zod2.z.literal("function"),
927
- function: import_zod2.z.object({
928
- name: import_zod2.z.string(),
929
- arguments: import_zod2.z.string()
837
+ var openaiChatResponseSchema = import_zod3.z.object({
838
+ id: import_zod3.z.string().nullish(),
839
+ created: import_zod3.z.number().nullish(),
840
+ model: import_zod3.z.string().nullish(),
841
+ choices: import_zod3.z.array(
842
+ import_zod3.z.object({
843
+ message: import_zod3.z.object({
844
+ role: import_zod3.z.literal("assistant").nullish(),
845
+ content: import_zod3.z.string().nullish(),
846
+ tool_calls: import_zod3.z.array(
847
+ import_zod3.z.object({
848
+ id: import_zod3.z.string().nullish(),
849
+ type: import_zod3.z.literal("function"),
850
+ function: import_zod3.z.object({
851
+ name: import_zod3.z.string(),
852
+ arguments: import_zod3.z.string()
930
853
  })
931
854
  })
932
855
  ).nullish()
933
856
  }),
934
- index: import_zod2.z.number(),
935
- logprobs: import_zod2.z.object({
936
- content: import_zod2.z.array(
937
- import_zod2.z.object({
938
- token: import_zod2.z.string(),
939
- logprob: import_zod2.z.number(),
940
- top_logprobs: import_zod2.z.array(
941
- import_zod2.z.object({
942
- token: import_zod2.z.string(),
943
- logprob: import_zod2.z.number()
857
+ index: import_zod3.z.number(),
858
+ logprobs: import_zod3.z.object({
859
+ content: import_zod3.z.array(
860
+ import_zod3.z.object({
861
+ token: import_zod3.z.string(),
862
+ logprob: import_zod3.z.number(),
863
+ top_logprobs: import_zod3.z.array(
864
+ import_zod3.z.object({
865
+ token: import_zod3.z.string(),
866
+ logprob: import_zod3.z.number()
944
867
  })
945
868
  )
946
869
  })
947
870
  ).nullable()
948
871
  }).nullish(),
949
- finish_reason: import_zod2.z.string().nullish()
872
+ finish_reason: import_zod3.z.string().nullish()
950
873
  })
951
874
  ),
952
875
  usage: openaiTokenUsageSchema
953
876
  });
954
- var openaiChatChunkSchema = import_zod2.z.union([
955
- import_zod2.z.object({
956
- id: import_zod2.z.string().nullish(),
957
- created: import_zod2.z.number().nullish(),
958
- model: import_zod2.z.string().nullish(),
959
- choices: import_zod2.z.array(
960
- import_zod2.z.object({
961
- delta: import_zod2.z.object({
962
- role: import_zod2.z.enum(["assistant"]).nullish(),
963
- content: import_zod2.z.string().nullish(),
964
- function_call: import_zod2.z.object({
965
- name: import_zod2.z.string().optional(),
966
- arguments: import_zod2.z.string().optional()
967
- }).nullish(),
968
- tool_calls: import_zod2.z.array(
969
- import_zod2.z.object({
970
- index: import_zod2.z.number(),
971
- id: import_zod2.z.string().nullish(),
972
- type: import_zod2.z.literal("function").optional(),
973
- function: import_zod2.z.object({
974
- name: import_zod2.z.string().nullish(),
975
- arguments: import_zod2.z.string().nullish()
877
+ var openaiChatChunkSchema = import_zod3.z.union([
878
+ import_zod3.z.object({
879
+ id: import_zod3.z.string().nullish(),
880
+ created: import_zod3.z.number().nullish(),
881
+ model: import_zod3.z.string().nullish(),
882
+ choices: import_zod3.z.array(
883
+ import_zod3.z.object({
884
+ delta: import_zod3.z.object({
885
+ role: import_zod3.z.enum(["assistant"]).nullish(),
886
+ content: import_zod3.z.string().nullish(),
887
+ tool_calls: import_zod3.z.array(
888
+ import_zod3.z.object({
889
+ index: import_zod3.z.number(),
890
+ id: import_zod3.z.string().nullish(),
891
+ type: import_zod3.z.literal("function").optional(),
892
+ function: import_zod3.z.object({
893
+ name: import_zod3.z.string().nullish(),
894
+ arguments: import_zod3.z.string().nullish()
976
895
  })
977
896
  })
978
897
  ).nullish()
979
898
  }).nullish(),
980
- logprobs: import_zod2.z.object({
981
- content: import_zod2.z.array(
982
- import_zod2.z.object({
983
- token: import_zod2.z.string(),
984
- logprob: import_zod2.z.number(),
985
- top_logprobs: import_zod2.z.array(
986
- import_zod2.z.object({
987
- token: import_zod2.z.string(),
988
- logprob: import_zod2.z.number()
899
+ logprobs: import_zod3.z.object({
900
+ content: import_zod3.z.array(
901
+ import_zod3.z.object({
902
+ token: import_zod3.z.string(),
903
+ logprob: import_zod3.z.number(),
904
+ top_logprobs: import_zod3.z.array(
905
+ import_zod3.z.object({
906
+ token: import_zod3.z.string(),
907
+ logprob: import_zod3.z.number()
989
908
  })
990
909
  )
991
910
  })
992
911
  ).nullable()
993
912
  }).nullish(),
994
- finish_reason: import_zod2.z.string().nullable().optional(),
995
- index: import_zod2.z.number()
913
+ finish_reason: import_zod3.z.string().nullable().optional(),
914
+ index: import_zod3.z.number()
996
915
  })
997
916
  ),
998
917
  usage: openaiTokenUsageSchema
@@ -1034,8 +953,8 @@ var reasoningModels = {
1034
953
  };
1035
954
 
1036
955
  // src/openai-completion-language-model.ts
1037
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
1038
- var import_zod3 = require("zod");
956
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
957
+ var import_zod4 = require("zod");
1039
958
 
1040
959
  // src/convert-to-openai-completion-prompt.ts
1041
960
  var import_provider4 = require("@ai-sdk/provider");
@@ -1145,7 +1064,7 @@ var OpenAICompletionLanguageModel = class {
1145
1064
  getArgs({
1146
1065
  inputFormat,
1147
1066
  prompt,
1148
- maxTokens,
1067
+ maxOutputTokens,
1149
1068
  temperature,
1150
1069
  topP,
1151
1070
  topK,
@@ -1187,7 +1106,7 @@ var OpenAICompletionLanguageModel = class {
1187
1106
  suffix: this.settings.suffix,
1188
1107
  user: this.settings.user,
1189
1108
  // standardized settings:
1190
- max_tokens: maxTokens,
1109
+ max_tokens: maxOutputTokens,
1191
1110
  temperature,
1192
1111
  top_p: topP,
1193
1112
  frequency_penalty: frequencyPenalty,
@@ -1207,15 +1126,15 @@ var OpenAICompletionLanguageModel = class {
1207
1126
  responseHeaders,
1208
1127
  value: response,
1209
1128
  rawValue: rawResponse
1210
- } = await (0, import_provider_utils3.postJsonToApi)({
1129
+ } = await (0, import_provider_utils4.postJsonToApi)({
1211
1130
  url: this.config.url({
1212
1131
  path: "/completions",
1213
1132
  modelId: this.modelId
1214
1133
  }),
1215
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
1134
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1216
1135
  body: args,
1217
1136
  failedResponseHandler: openaiFailedResponseHandler,
1218
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
1137
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1219
1138
  openaiCompletionResponseSchema
1220
1139
  ),
1221
1140
  abortSignal: options.abortSignal,
@@ -1223,10 +1142,10 @@ var OpenAICompletionLanguageModel = class {
1223
1142
  });
1224
1143
  const choice = response.choices[0];
1225
1144
  return {
1226
- text: choice.text,
1145
+ text: { type: "text", text: choice.text },
1227
1146
  usage: {
1228
- promptTokens: response.usage.prompt_tokens,
1229
- completionTokens: response.usage.completion_tokens
1147
+ inputTokens: response.usage.prompt_tokens,
1148
+ outputTokens: response.usage.completion_tokens
1230
1149
  },
1231
1150
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1232
1151
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
@@ -1247,24 +1166,24 @@ var OpenAICompletionLanguageModel = class {
1247
1166
  // only include stream_options when in strict compatibility mode:
1248
1167
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1249
1168
  };
1250
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
1169
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1251
1170
  url: this.config.url({
1252
1171
  path: "/completions",
1253
1172
  modelId: this.modelId
1254
1173
  }),
1255
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
1174
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1256
1175
  body,
1257
1176
  failedResponseHandler: openaiFailedResponseHandler,
1258
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
1177
+ successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
1259
1178
  openaiCompletionChunkSchema
1260
1179
  ),
1261
1180
  abortSignal: options.abortSignal,
1262
1181
  fetch: this.config.fetch
1263
1182
  });
1264
1183
  let finishReason = "unknown";
1265
- let usage = {
1266
- promptTokens: Number.NaN,
1267
- completionTokens: Number.NaN
1184
+ const usage = {
1185
+ inputTokens: void 0,
1186
+ outputTokens: void 0
1268
1187
  };
1269
1188
  let logprobs;
1270
1189
  let isFirstChunk = true;
@@ -1291,10 +1210,8 @@ var OpenAICompletionLanguageModel = class {
1291
1210
  });
1292
1211
  }
1293
1212
  if (value.usage != null) {
1294
- usage = {
1295
- promptTokens: value.usage.prompt_tokens,
1296
- completionTokens: value.usage.completion_tokens
1297
- };
1213
+ usage.inputTokens = value.usage.prompt_tokens;
1214
+ usage.outputTokens = value.usage.completion_tokens;
1298
1215
  }
1299
1216
  const choice = value.choices[0];
1300
1217
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1302,8 +1219,8 @@ var OpenAICompletionLanguageModel = class {
1302
1219
  }
1303
1220
  if ((choice == null ? void 0 : choice.text) != null) {
1304
1221
  controller.enqueue({
1305
- type: "text-delta",
1306
- textDelta: choice.text
1222
+ type: "text",
1223
+ text: choice.text
1307
1224
  });
1308
1225
  }
1309
1226
  const mappedLogprobs = mapOpenAICompletionLogProbs(
@@ -1330,46 +1247,46 @@ var OpenAICompletionLanguageModel = class {
1330
1247
  };
1331
1248
  }
1332
1249
  };
1333
- var openaiCompletionResponseSchema = import_zod3.z.object({
1334
- id: import_zod3.z.string().nullish(),
1335
- created: import_zod3.z.number().nullish(),
1336
- model: import_zod3.z.string().nullish(),
1337
- choices: import_zod3.z.array(
1338
- import_zod3.z.object({
1339
- text: import_zod3.z.string(),
1340
- finish_reason: import_zod3.z.string(),
1341
- logprobs: import_zod3.z.object({
1342
- tokens: import_zod3.z.array(import_zod3.z.string()),
1343
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1344
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1250
+ var openaiCompletionResponseSchema = import_zod4.z.object({
1251
+ id: import_zod4.z.string().nullish(),
1252
+ created: import_zod4.z.number().nullish(),
1253
+ model: import_zod4.z.string().nullish(),
1254
+ choices: import_zod4.z.array(
1255
+ import_zod4.z.object({
1256
+ text: import_zod4.z.string(),
1257
+ finish_reason: import_zod4.z.string(),
1258
+ logprobs: import_zod4.z.object({
1259
+ tokens: import_zod4.z.array(import_zod4.z.string()),
1260
+ token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1261
+ top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1345
1262
  }).nullish()
1346
1263
  })
1347
1264
  ),
1348
- usage: import_zod3.z.object({
1349
- prompt_tokens: import_zod3.z.number(),
1350
- completion_tokens: import_zod3.z.number()
1265
+ usage: import_zod4.z.object({
1266
+ prompt_tokens: import_zod4.z.number(),
1267
+ completion_tokens: import_zod4.z.number()
1351
1268
  })
1352
1269
  });
1353
- var openaiCompletionChunkSchema = import_zod3.z.union([
1354
- import_zod3.z.object({
1355
- id: import_zod3.z.string().nullish(),
1356
- created: import_zod3.z.number().nullish(),
1357
- model: import_zod3.z.string().nullish(),
1358
- choices: import_zod3.z.array(
1359
- import_zod3.z.object({
1360
- text: import_zod3.z.string(),
1361
- finish_reason: import_zod3.z.string().nullish(),
1362
- index: import_zod3.z.number(),
1363
- logprobs: import_zod3.z.object({
1364
- tokens: import_zod3.z.array(import_zod3.z.string()),
1365
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1366
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1270
+ var openaiCompletionChunkSchema = import_zod4.z.union([
1271
+ import_zod4.z.object({
1272
+ id: import_zod4.z.string().nullish(),
1273
+ created: import_zod4.z.number().nullish(),
1274
+ model: import_zod4.z.string().nullish(),
1275
+ choices: import_zod4.z.array(
1276
+ import_zod4.z.object({
1277
+ text: import_zod4.z.string(),
1278
+ finish_reason: import_zod4.z.string().nullish(),
1279
+ index: import_zod4.z.number(),
1280
+ logprobs: import_zod4.z.object({
1281
+ tokens: import_zod4.z.array(import_zod4.z.string()),
1282
+ token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1283
+ top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1367
1284
  }).nullish()
1368
1285
  })
1369
1286
  ),
1370
- usage: import_zod3.z.object({
1371
- prompt_tokens: import_zod3.z.number(),
1372
- completion_tokens: import_zod3.z.number()
1287
+ usage: import_zod4.z.object({
1288
+ prompt_tokens: import_zod4.z.number(),
1289
+ completion_tokens: import_zod4.z.number()
1373
1290
  }).nullish()
1374
1291
  }),
1375
1292
  openaiErrorDataSchema
@@ -1377,11 +1294,11 @@ var openaiCompletionChunkSchema = import_zod3.z.union([
1377
1294
 
1378
1295
  // src/openai-embedding-model.ts
1379
1296
  var import_provider5 = require("@ai-sdk/provider");
1380
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
1381
- var import_zod4 = require("zod");
1297
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
1298
+ var import_zod5 = require("zod");
1382
1299
  var OpenAIEmbeddingModel = class {
1383
1300
  constructor(modelId, settings, config) {
1384
- this.specificationVersion = "v1";
1301
+ this.specificationVersion = "v2";
1385
1302
  this.modelId = modelId;
1386
1303
  this.settings = settings;
1387
1304
  this.config = config;
@@ -1410,12 +1327,16 @@ var OpenAIEmbeddingModel = class {
1410
1327
  values
1411
1328
  });
1412
1329
  }
1413
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1330
+ const {
1331
+ responseHeaders,
1332
+ value: response,
1333
+ rawValue
1334
+ } = await (0, import_provider_utils5.postJsonToApi)({
1414
1335
  url: this.config.url({
1415
1336
  path: "/embeddings",
1416
1337
  modelId: this.modelId
1417
1338
  }),
1418
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), headers),
1339
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1419
1340
  body: {
1420
1341
  model: this.modelId,
1421
1342
  input: values,
@@ -1424,7 +1345,7 @@ var OpenAIEmbeddingModel = class {
1424
1345
  user: this.settings.user
1425
1346
  },
1426
1347
  failedResponseHandler: openaiFailedResponseHandler,
1427
- successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1348
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1428
1349
  openaiTextEmbeddingResponseSchema
1429
1350
  ),
1430
1351
  abortSignal,
@@ -1433,18 +1354,18 @@ var OpenAIEmbeddingModel = class {
1433
1354
  return {
1434
1355
  embeddings: response.data.map((item) => item.embedding),
1435
1356
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1436
- rawResponse: { headers: responseHeaders }
1357
+ response: { headers: responseHeaders, body: rawValue }
1437
1358
  };
1438
1359
  }
1439
1360
  };
1440
- var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1441
- data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1442
- usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1361
+ var openaiTextEmbeddingResponseSchema = import_zod5.z.object({
1362
+ data: import_zod5.z.array(import_zod5.z.object({ embedding: import_zod5.z.array(import_zod5.z.number()) })),
1363
+ usage: import_zod5.z.object({ prompt_tokens: import_zod5.z.number() }).nullish()
1443
1364
  });
1444
1365
 
1445
1366
  // src/openai-image-model.ts
1446
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
1447
- var import_zod5 = require("zod");
1367
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1368
+ var import_zod6 = require("zod");
1448
1369
 
1449
1370
  // src/openai-image-settings.ts
1450
1371
  var modelMaxImagesPerCall = {
@@ -1490,12 +1411,12 @@ var OpenAIImageModel = class {
1490
1411
  warnings.push({ type: "unsupported-setting", setting: "seed" });
1491
1412
  }
1492
1413
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1493
- const { value: response, responseHeaders } = await (0, import_provider_utils5.postJsonToApi)({
1414
+ const { value: response, responseHeaders } = await (0, import_provider_utils6.postJsonToApi)({
1494
1415
  url: this.config.url({
1495
1416
  path: "/images/generations",
1496
1417
  modelId: this.modelId
1497
1418
  }),
1498
- headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1419
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), headers),
1499
1420
  body: {
1500
1421
  model: this.modelId,
1501
1422
  prompt,
@@ -1505,7 +1426,7 @@ var OpenAIImageModel = class {
1505
1426
  response_format: "b64_json"
1506
1427
  },
1507
1428
  failedResponseHandler: openaiFailedResponseHandler,
1508
- successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1429
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1509
1430
  openaiImageResponseSchema
1510
1431
  ),
1511
1432
  abortSignal,
@@ -1522,13 +1443,13 @@ var OpenAIImageModel = class {
1522
1443
  };
1523
1444
  }
1524
1445
  };
1525
- var openaiImageResponseSchema = import_zod5.z.object({
1526
- data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1446
+ var openaiImageResponseSchema = import_zod6.z.object({
1447
+ data: import_zod6.z.array(import_zod6.z.object({ b64_json: import_zod6.z.string() }))
1527
1448
  });
1528
1449
 
1529
1450
  // src/openai-tools.ts
1530
- var import_zod6 = require("zod");
1531
- var WebSearchPreviewParameters = import_zod6.z.object({});
1451
+ var import_zod7 = require("zod");
1452
+ var WebSearchPreviewParameters = import_zod7.z.object({});
1532
1453
  function webSearchPreviewTool({
1533
1454
  searchContextSize,
1534
1455
  userLocation
@@ -1548,20 +1469,14 @@ var openaiTools = {
1548
1469
  };
1549
1470
 
1550
1471
  // src/openai-transcription-model.ts
1551
- var import_provider_utils6 = require("@ai-sdk/provider-utils");
1552
- var import_zod7 = require("zod");
1553
- var OpenAIProviderOptionsSchema = import_zod7.z.object({
1554
- include: import_zod7.z.array(import_zod7.z.string()).optional().describe(
1555
- "Additional information to include in the transcription response."
1556
- ),
1557
- language: import_zod7.z.string().optional().describe("The language of the input audio in ISO-639-1 format."),
1558
- prompt: import_zod7.z.string().optional().describe(
1559
- "An optional text to guide the model's style or continue a previous audio segment."
1560
- ),
1561
- temperature: import_zod7.z.number().min(0).max(1).optional().default(0).describe("The sampling temperature, between 0 and 1."),
1562
- timestampGranularities: import_zod7.z.array(import_zod7.z.enum(["word", "segment"])).optional().default(["segment"]).describe(
1563
- "The timestamp granularities to populate for this transcription."
1564
- )
1472
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1473
+ var import_zod8 = require("zod");
1474
+ var openAIProviderOptionsSchema = import_zod8.z.object({
1475
+ include: import_zod8.z.array(import_zod8.z.string()).nullish(),
1476
+ language: import_zod8.z.string().nullish(),
1477
+ prompt: import_zod8.z.string().nullish(),
1478
+ temperature: import_zod8.z.number().min(0).max(1).nullish().default(0),
1479
+ timestampGranularities: import_zod8.z.array(import_zod8.z.enum(["word", "segment"])).nullish().default(["segment"])
1565
1480
  });
1566
1481
  var languageMap = {
1567
1482
  afrikaans: "af",
@@ -1636,28 +1551,29 @@ var OpenAITranscriptionModel = class {
1636
1551
  mediaType,
1637
1552
  providerOptions
1638
1553
  }) {
1554
+ var _a, _b, _c, _d, _e;
1639
1555
  const warnings = [];
1640
- const openAIOptions = (0, import_provider_utils6.parseProviderOptions)({
1556
+ const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1641
1557
  provider: "openai",
1642
1558
  providerOptions,
1643
- schema: OpenAIProviderOptionsSchema
1559
+ schema: openAIProviderOptionsSchema
1644
1560
  });
1645
1561
  const formData = new FormData();
1646
- const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils6.convertBase64ToUint8Array)(audio)]);
1562
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1647
1563
  formData.append("model", this.modelId);
1648
1564
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1649
1565
  if (openAIOptions) {
1650
1566
  const transcriptionModelOptions = {
1651
- include: openAIOptions.include,
1652
- language: openAIOptions.language,
1653
- prompt: openAIOptions.prompt,
1654
- temperature: openAIOptions.temperature,
1655
- timestamp_granularities: openAIOptions.timestampGranularities
1567
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1568
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1569
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1570
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1571
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1656
1572
  };
1657
1573
  for (const key in transcriptionModelOptions) {
1658
1574
  const value = transcriptionModelOptions[key];
1659
1575
  if (value !== void 0) {
1660
- formData.append(key, value);
1576
+ formData.append(key, String(value));
1661
1577
  }
1662
1578
  }
1663
1579
  }
@@ -1674,15 +1590,15 @@ var OpenAITranscriptionModel = class {
1674
1590
  value: response,
1675
1591
  responseHeaders,
1676
1592
  rawValue: rawResponse
1677
- } = await (0, import_provider_utils6.postFormDataToApi)({
1593
+ } = await (0, import_provider_utils7.postFormDataToApi)({
1678
1594
  url: this.config.url({
1679
1595
  path: "/audio/transcriptions",
1680
1596
  modelId: this.modelId
1681
1597
  }),
1682
- headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1598
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1683
1599
  formData,
1684
1600
  failedResponseHandler: openaiFailedResponseHandler,
1685
- successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1601
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1686
1602
  openaiTranscriptionResponseSchema
1687
1603
  ),
1688
1604
  abortSignal: options.abortSignal,
@@ -1708,22 +1624,22 @@ var OpenAITranscriptionModel = class {
1708
1624
  };
1709
1625
  }
1710
1626
  };
1711
- var openaiTranscriptionResponseSchema = import_zod7.z.object({
1712
- text: import_zod7.z.string(),
1713
- language: import_zod7.z.string().nullish(),
1714
- duration: import_zod7.z.number().nullish(),
1715
- words: import_zod7.z.array(
1716
- import_zod7.z.object({
1717
- word: import_zod7.z.string(),
1718
- start: import_zod7.z.number(),
1719
- end: import_zod7.z.number()
1627
+ var openaiTranscriptionResponseSchema = import_zod8.z.object({
1628
+ text: import_zod8.z.string(),
1629
+ language: import_zod8.z.string().nullish(),
1630
+ duration: import_zod8.z.number().nullish(),
1631
+ words: import_zod8.z.array(
1632
+ import_zod8.z.object({
1633
+ word: import_zod8.z.string(),
1634
+ start: import_zod8.z.number(),
1635
+ end: import_zod8.z.number()
1720
1636
  })
1721
1637
  ).nullish()
1722
1638
  });
1723
1639
 
1724
1640
  // src/responses/openai-responses-language-model.ts
1725
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1726
- var import_zod8 = require("zod");
1641
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1642
+ var import_zod9 = require("zod");
1727
1643
 
1728
1644
  // src/responses/convert-to-openai-responses-messages.ts
1729
1645
  var import_provider6 = require("@ai-sdk/provider");
@@ -1940,7 +1856,7 @@ var OpenAIResponsesLanguageModel = class {
1940
1856
  return this.config.provider;
1941
1857
  }
1942
1858
  getArgs({
1943
- maxTokens,
1859
+ maxOutputTokens,
1944
1860
  temperature,
1945
1861
  stopSequences,
1946
1862
  topP,
@@ -1983,7 +1899,7 @@ var OpenAIResponsesLanguageModel = class {
1983
1899
  systemMessageMode: modelConfig.systemMessageMode
1984
1900
  });
1985
1901
  warnings.push(...messageWarnings);
1986
- const openaiOptions = (0, import_provider_utils7.parseProviderOptions)({
1902
+ const openaiOptions = (0, import_provider_utils8.parseProviderOptions)({
1987
1903
  provider: "openai",
1988
1904
  providerOptions,
1989
1905
  schema: openaiResponsesProviderOptionsSchema
@@ -1994,7 +1910,7 @@ var OpenAIResponsesLanguageModel = class {
1994
1910
  input: messages,
1995
1911
  temperature,
1996
1912
  top_p: topP,
1997
- max_output_tokens: maxTokens,
1913
+ max_output_tokens: maxOutputTokens,
1998
1914
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1999
1915
  text: {
2000
1916
  format: responseFormat.schema != null ? {
@@ -2064,58 +1980,58 @@ var OpenAIResponsesLanguageModel = class {
2064
1980
  responseHeaders,
2065
1981
  value: response,
2066
1982
  rawValue: rawResponse
2067
- } = await (0, import_provider_utils7.postJsonToApi)({
1983
+ } = await (0, import_provider_utils8.postJsonToApi)({
2068
1984
  url: this.config.url({
2069
1985
  path: "/responses",
2070
1986
  modelId: this.modelId
2071
1987
  }),
2072
- headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1988
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2073
1989
  body,
2074
1990
  failedResponseHandler: openaiFailedResponseHandler,
2075
- successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
2076
- import_zod8.z.object({
2077
- id: import_zod8.z.string(),
2078
- created_at: import_zod8.z.number(),
2079
- model: import_zod8.z.string(),
2080
- output: import_zod8.z.array(
2081
- import_zod8.z.discriminatedUnion("type", [
2082
- import_zod8.z.object({
2083
- type: import_zod8.z.literal("message"),
2084
- role: import_zod8.z.literal("assistant"),
2085
- content: import_zod8.z.array(
2086
- import_zod8.z.object({
2087
- type: import_zod8.z.literal("output_text"),
2088
- text: import_zod8.z.string(),
2089
- annotations: import_zod8.z.array(
2090
- import_zod8.z.object({
2091
- type: import_zod8.z.literal("url_citation"),
2092
- start_index: import_zod8.z.number(),
2093
- end_index: import_zod8.z.number(),
2094
- url: import_zod8.z.string(),
2095
- title: import_zod8.z.string()
1991
+ successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1992
+ import_zod9.z.object({
1993
+ id: import_zod9.z.string(),
1994
+ created_at: import_zod9.z.number(),
1995
+ model: import_zod9.z.string(),
1996
+ output: import_zod9.z.array(
1997
+ import_zod9.z.discriminatedUnion("type", [
1998
+ import_zod9.z.object({
1999
+ type: import_zod9.z.literal("message"),
2000
+ role: import_zod9.z.literal("assistant"),
2001
+ content: import_zod9.z.array(
2002
+ import_zod9.z.object({
2003
+ type: import_zod9.z.literal("output_text"),
2004
+ text: import_zod9.z.string(),
2005
+ annotations: import_zod9.z.array(
2006
+ import_zod9.z.object({
2007
+ type: import_zod9.z.literal("url_citation"),
2008
+ start_index: import_zod9.z.number(),
2009
+ end_index: import_zod9.z.number(),
2010
+ url: import_zod9.z.string(),
2011
+ title: import_zod9.z.string()
2096
2012
  })
2097
2013
  )
2098
2014
  })
2099
2015
  )
2100
2016
  }),
2101
- import_zod8.z.object({
2102
- type: import_zod8.z.literal("function_call"),
2103
- call_id: import_zod8.z.string(),
2104
- name: import_zod8.z.string(),
2105
- arguments: import_zod8.z.string()
2017
+ import_zod9.z.object({
2018
+ type: import_zod9.z.literal("function_call"),
2019
+ call_id: import_zod9.z.string(),
2020
+ name: import_zod9.z.string(),
2021
+ arguments: import_zod9.z.string()
2106
2022
  }),
2107
- import_zod8.z.object({
2108
- type: import_zod8.z.literal("web_search_call")
2023
+ import_zod9.z.object({
2024
+ type: import_zod9.z.literal("web_search_call")
2109
2025
  }),
2110
- import_zod8.z.object({
2111
- type: import_zod8.z.literal("computer_call")
2026
+ import_zod9.z.object({
2027
+ type: import_zod9.z.literal("computer_call")
2112
2028
  }),
2113
- import_zod8.z.object({
2114
- type: import_zod8.z.literal("reasoning")
2029
+ import_zod9.z.object({
2030
+ type: import_zod9.z.literal("reasoning")
2115
2031
  })
2116
2032
  ])
2117
2033
  ),
2118
- incomplete_details: import_zod8.z.object({ reason: import_zod8.z.string() }).nullable(),
2034
+ incomplete_details: import_zod9.z.object({ reason: import_zod9.z.string() }).nullable(),
2119
2035
  usage: usageSchema
2120
2036
  })
2121
2037
  ),
@@ -2124,19 +2040,24 @@ var OpenAIResponsesLanguageModel = class {
2124
2040
  });
2125
2041
  const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2126
2042
  const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2043
+ type: "tool-call",
2127
2044
  toolCallType: "function",
2128
2045
  toolCallId: output.call_id,
2129
2046
  toolName: output.name,
2130
2047
  args: output.arguments
2131
2048
  }));
2132
2049
  return {
2133
- text: outputTextElements.map((content) => content.text).join("\n"),
2050
+ text: {
2051
+ type: "text",
2052
+ text: outputTextElements.map((content) => content.text).join("\n")
2053
+ },
2134
2054
  sources: outputTextElements.flatMap(
2135
2055
  (content) => content.annotations.map((annotation) => {
2136
2056
  var _a2, _b2, _c2;
2137
2057
  return {
2058
+ type: "source",
2138
2059
  sourceType: "url",
2139
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils7.generateId)(),
2060
+ id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils8.generateId)(),
2140
2061
  url: annotation.url,
2141
2062
  title: annotation.title
2142
2063
  };
@@ -2148,8 +2069,8 @@ var OpenAIResponsesLanguageModel = class {
2148
2069
  }),
2149
2070
  toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2150
2071
  usage: {
2151
- promptTokens: response.usage.input_tokens,
2152
- completionTokens: response.usage.output_tokens
2072
+ inputTokens: response.usage.input_tokens,
2073
+ outputTokens: response.usage.output_tokens
2153
2074
  },
2154
2075
  request: { body },
2155
2076
  response: {
@@ -2171,18 +2092,18 @@ var OpenAIResponsesLanguageModel = class {
2171
2092
  }
2172
2093
  async doStream(options) {
2173
2094
  const { args: body, warnings } = this.getArgs(options);
2174
- const { responseHeaders, value: response } = await (0, import_provider_utils7.postJsonToApi)({
2095
+ const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
2175
2096
  url: this.config.url({
2176
2097
  path: "/responses",
2177
2098
  modelId: this.modelId
2178
2099
  }),
2179
- headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
2100
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2180
2101
  body: {
2181
2102
  ...body,
2182
2103
  stream: true
2183
2104
  },
2184
2105
  failedResponseHandler: openaiFailedResponseHandler,
2185
- successfulResponseHandler: (0, import_provider_utils7.createEventSourceResponseHandler)(
2106
+ successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
2186
2107
  openaiResponsesChunkSchema
2187
2108
  ),
2188
2109
  abortSignal: options.abortSignal,
@@ -2190,8 +2111,10 @@ var OpenAIResponsesLanguageModel = class {
2190
2111
  });
2191
2112
  const self = this;
2192
2113
  let finishReason = "unknown";
2193
- let promptTokens = NaN;
2194
- let completionTokens = NaN;
2114
+ const usage = {
2115
+ inputTokens: void 0,
2116
+ outputTokens: void 0
2117
+ };
2195
2118
  let cachedPromptTokens = null;
2196
2119
  let reasoningTokens = null;
2197
2120
  let responseId = null;
@@ -2243,8 +2166,8 @@ var OpenAIResponsesLanguageModel = class {
2243
2166
  });
2244
2167
  } else if (isTextDeltaChunk(value)) {
2245
2168
  controller.enqueue({
2246
- type: "text-delta",
2247
- textDelta: value.delta
2169
+ type: "text",
2170
+ text: value.delta
2248
2171
  });
2249
2172
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2250
2173
  ongoingToolCalls[value.output_index] = void 0;
@@ -2261,19 +2184,17 @@ var OpenAIResponsesLanguageModel = class {
2261
2184
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2262
2185
  hasToolCalls
2263
2186
  });
2264
- promptTokens = value.response.usage.input_tokens;
2265
- completionTokens = value.response.usage.output_tokens;
2187
+ usage.inputTokens = value.response.usage.input_tokens;
2188
+ usage.outputTokens = value.response.usage.output_tokens;
2266
2189
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2267
2190
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2268
2191
  } else if (isResponseAnnotationAddedChunk(value)) {
2269
2192
  controller.enqueue({
2270
2193
  type: "source",
2271
- source: {
2272
- sourceType: "url",
2273
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils7.generateId)(),
2274
- url: value.annotation.url,
2275
- title: value.annotation.title
2276
- }
2194
+ sourceType: "url",
2195
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2196
+ url: value.annotation.url,
2197
+ title: value.annotation.title
2277
2198
  });
2278
2199
  }
2279
2200
  },
@@ -2281,7 +2202,7 @@ var OpenAIResponsesLanguageModel = class {
2281
2202
  controller.enqueue({
2282
2203
  type: "finish",
2283
2204
  finishReason,
2284
- usage: { promptTokens, completionTokens },
2205
+ usage,
2285
2206
  ...(cachedPromptTokens != null || reasoningTokens != null) && {
2286
2207
  providerMetadata: {
2287
2208
  openai: {
@@ -2301,79 +2222,79 @@ var OpenAIResponsesLanguageModel = class {
2301
2222
  };
2302
2223
  }
2303
2224
  };
2304
- var usageSchema = import_zod8.z.object({
2305
- input_tokens: import_zod8.z.number(),
2306
- input_tokens_details: import_zod8.z.object({ cached_tokens: import_zod8.z.number().nullish() }).nullish(),
2307
- output_tokens: import_zod8.z.number(),
2308
- output_tokens_details: import_zod8.z.object({ reasoning_tokens: import_zod8.z.number().nullish() }).nullish()
2225
+ var usageSchema = import_zod9.z.object({
2226
+ input_tokens: import_zod9.z.number(),
2227
+ input_tokens_details: import_zod9.z.object({ cached_tokens: import_zod9.z.number().nullish() }).nullish(),
2228
+ output_tokens: import_zod9.z.number(),
2229
+ output_tokens_details: import_zod9.z.object({ reasoning_tokens: import_zod9.z.number().nullish() }).nullish()
2309
2230
  });
2310
- var textDeltaChunkSchema = import_zod8.z.object({
2311
- type: import_zod8.z.literal("response.output_text.delta"),
2312
- delta: import_zod8.z.string()
2231
+ var textDeltaChunkSchema = import_zod9.z.object({
2232
+ type: import_zod9.z.literal("response.output_text.delta"),
2233
+ delta: import_zod9.z.string()
2313
2234
  });
2314
- var responseFinishedChunkSchema = import_zod8.z.object({
2315
- type: import_zod8.z.enum(["response.completed", "response.incomplete"]),
2316
- response: import_zod8.z.object({
2317
- incomplete_details: import_zod8.z.object({ reason: import_zod8.z.string() }).nullish(),
2235
+ var responseFinishedChunkSchema = import_zod9.z.object({
2236
+ type: import_zod9.z.enum(["response.completed", "response.incomplete"]),
2237
+ response: import_zod9.z.object({
2238
+ incomplete_details: import_zod9.z.object({ reason: import_zod9.z.string() }).nullish(),
2318
2239
  usage: usageSchema
2319
2240
  })
2320
2241
  });
2321
- var responseCreatedChunkSchema = import_zod8.z.object({
2322
- type: import_zod8.z.literal("response.created"),
2323
- response: import_zod8.z.object({
2324
- id: import_zod8.z.string(),
2325
- created_at: import_zod8.z.number(),
2326
- model: import_zod8.z.string()
2242
+ var responseCreatedChunkSchema = import_zod9.z.object({
2243
+ type: import_zod9.z.literal("response.created"),
2244
+ response: import_zod9.z.object({
2245
+ id: import_zod9.z.string(),
2246
+ created_at: import_zod9.z.number(),
2247
+ model: import_zod9.z.string()
2327
2248
  })
2328
2249
  });
2329
- var responseOutputItemDoneSchema = import_zod8.z.object({
2330
- type: import_zod8.z.literal("response.output_item.done"),
2331
- output_index: import_zod8.z.number(),
2332
- item: import_zod8.z.discriminatedUnion("type", [
2333
- import_zod8.z.object({
2334
- type: import_zod8.z.literal("message")
2250
+ var responseOutputItemDoneSchema = import_zod9.z.object({
2251
+ type: import_zod9.z.literal("response.output_item.done"),
2252
+ output_index: import_zod9.z.number(),
2253
+ item: import_zod9.z.discriminatedUnion("type", [
2254
+ import_zod9.z.object({
2255
+ type: import_zod9.z.literal("message")
2335
2256
  }),
2336
- import_zod8.z.object({
2337
- type: import_zod8.z.literal("function_call"),
2338
- id: import_zod8.z.string(),
2339
- call_id: import_zod8.z.string(),
2340
- name: import_zod8.z.string(),
2341
- arguments: import_zod8.z.string(),
2342
- status: import_zod8.z.literal("completed")
2257
+ import_zod9.z.object({
2258
+ type: import_zod9.z.literal("function_call"),
2259
+ id: import_zod9.z.string(),
2260
+ call_id: import_zod9.z.string(),
2261
+ name: import_zod9.z.string(),
2262
+ arguments: import_zod9.z.string(),
2263
+ status: import_zod9.z.literal("completed")
2343
2264
  })
2344
2265
  ])
2345
2266
  });
2346
- var responseFunctionCallArgumentsDeltaSchema = import_zod8.z.object({
2347
- type: import_zod8.z.literal("response.function_call_arguments.delta"),
2348
- item_id: import_zod8.z.string(),
2349
- output_index: import_zod8.z.number(),
2350
- delta: import_zod8.z.string()
2267
+ var responseFunctionCallArgumentsDeltaSchema = import_zod9.z.object({
2268
+ type: import_zod9.z.literal("response.function_call_arguments.delta"),
2269
+ item_id: import_zod9.z.string(),
2270
+ output_index: import_zod9.z.number(),
2271
+ delta: import_zod9.z.string()
2351
2272
  });
2352
- var responseOutputItemAddedSchema = import_zod8.z.object({
2353
- type: import_zod8.z.literal("response.output_item.added"),
2354
- output_index: import_zod8.z.number(),
2355
- item: import_zod8.z.discriminatedUnion("type", [
2356
- import_zod8.z.object({
2357
- type: import_zod8.z.literal("message")
2273
+ var responseOutputItemAddedSchema = import_zod9.z.object({
2274
+ type: import_zod9.z.literal("response.output_item.added"),
2275
+ output_index: import_zod9.z.number(),
2276
+ item: import_zod9.z.discriminatedUnion("type", [
2277
+ import_zod9.z.object({
2278
+ type: import_zod9.z.literal("message")
2358
2279
  }),
2359
- import_zod8.z.object({
2360
- type: import_zod8.z.literal("function_call"),
2361
- id: import_zod8.z.string(),
2362
- call_id: import_zod8.z.string(),
2363
- name: import_zod8.z.string(),
2364
- arguments: import_zod8.z.string()
2280
+ import_zod9.z.object({
2281
+ type: import_zod9.z.literal("function_call"),
2282
+ id: import_zod9.z.string(),
2283
+ call_id: import_zod9.z.string(),
2284
+ name: import_zod9.z.string(),
2285
+ arguments: import_zod9.z.string()
2365
2286
  })
2366
2287
  ])
2367
2288
  });
2368
- var responseAnnotationAddedSchema = import_zod8.z.object({
2369
- type: import_zod8.z.literal("response.output_text.annotation.added"),
2370
- annotation: import_zod8.z.object({
2371
- type: import_zod8.z.literal("url_citation"),
2372
- url: import_zod8.z.string(),
2373
- title: import_zod8.z.string()
2289
+ var responseAnnotationAddedSchema = import_zod9.z.object({
2290
+ type: import_zod9.z.literal("response.output_text.annotation.added"),
2291
+ annotation: import_zod9.z.object({
2292
+ type: import_zod9.z.literal("url_citation"),
2293
+ url: import_zod9.z.string(),
2294
+ title: import_zod9.z.string()
2374
2295
  })
2375
2296
  });
2376
- var openaiResponsesChunkSchema = import_zod8.z.union([
2297
+ var openaiResponsesChunkSchema = import_zod9.z.union([
2377
2298
  textDeltaChunkSchema,
2378
2299
  responseFinishedChunkSchema,
2379
2300
  responseCreatedChunkSchema,
@@ -2381,7 +2302,7 @@ var openaiResponsesChunkSchema = import_zod8.z.union([
2381
2302
  responseFunctionCallArgumentsDeltaSchema,
2382
2303
  responseOutputItemAddedSchema,
2383
2304
  responseAnnotationAddedSchema,
2384
- import_zod8.z.object({ type: import_zod8.z.string() }).passthrough()
2305
+ import_zod9.z.object({ type: import_zod9.z.string() }).passthrough()
2385
2306
  // fallback for unknown chunks
2386
2307
  ]);
2387
2308
  function isTextDeltaChunk(chunk) {
@@ -2426,25 +2347,25 @@ function getResponsesModelConfig(modelId) {
2426
2347
  requiredAutoTruncation: false
2427
2348
  };
2428
2349
  }
2429
- var openaiResponsesProviderOptionsSchema = import_zod8.z.object({
2430
- metadata: import_zod8.z.any().nullish(),
2431
- parallelToolCalls: import_zod8.z.boolean().nullish(),
2432
- previousResponseId: import_zod8.z.string().nullish(),
2433
- store: import_zod8.z.boolean().nullish(),
2434
- user: import_zod8.z.string().nullish(),
2435
- reasoningEffort: import_zod8.z.string().nullish(),
2436
- strictSchemas: import_zod8.z.boolean().nullish(),
2437
- instructions: import_zod8.z.string().nullish()
2350
+ var openaiResponsesProviderOptionsSchema = import_zod9.z.object({
2351
+ metadata: import_zod9.z.any().nullish(),
2352
+ parallelToolCalls: import_zod9.z.boolean().nullish(),
2353
+ previousResponseId: import_zod9.z.string().nullish(),
2354
+ store: import_zod9.z.boolean().nullish(),
2355
+ user: import_zod9.z.string().nullish(),
2356
+ reasoningEffort: import_zod9.z.string().nullish(),
2357
+ strictSchemas: import_zod9.z.boolean().nullish(),
2358
+ instructions: import_zod9.z.string().nullish()
2438
2359
  });
2439
2360
 
2440
2361
  // src/openai-provider.ts
2441
2362
  function createOpenAI(options = {}) {
2442
2363
  var _a, _b, _c;
2443
- const baseURL = (_a = (0, import_provider_utils8.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2364
+ const baseURL = (_a = (0, import_provider_utils9.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2444
2365
  const compatibility = (_b = options.compatibility) != null ? _b : "compatible";
2445
2366
  const providerName = (_c = options.name) != null ? _c : "openai";
2446
2367
  const getHeaders = () => ({
2447
- Authorization: `Bearer ${(0, import_provider_utils8.loadApiKey)({
2368
+ Authorization: `Bearer ${(0, import_provider_utils9.loadApiKey)({
2448
2369
  apiKey: options.apiKey,
2449
2370
  environmentVariableName: "OPENAI_API_KEY",
2450
2371
  description: "OpenAI"