@ai-sdk/openai 2.0.0-canary.5 → 2.0.0-canary.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -26,20 +26,21 @@ __export(internal_exports, {
26
26
  OpenAIImageModel: () => OpenAIImageModel,
27
27
  OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
28
28
  OpenAITranscriptionModel: () => OpenAITranscriptionModel,
29
- modelMaxImagesPerCall: () => modelMaxImagesPerCall
29
+ modelMaxImagesPerCall: () => modelMaxImagesPerCall,
30
+ openaiProviderOptions: () => openaiProviderOptions
30
31
  });
31
32
  module.exports = __toCommonJS(internal_exports);
32
33
 
33
34
  // src/openai-chat-language-model.ts
34
35
  var import_provider3 = require("@ai-sdk/provider");
35
- var import_provider_utils2 = require("@ai-sdk/provider-utils");
36
- var import_zod2 = require("zod");
36
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
37
+ var import_zod3 = require("zod");
37
38
 
38
39
  // src/convert-to-openai-chat-messages.ts
39
40
  var import_provider = require("@ai-sdk/provider");
41
+ var import_provider_utils = require("@ai-sdk/provider-utils");
40
42
  function convertToOpenAIChatMessages({
41
43
  prompt,
42
- useLegacyFunctionCalling = false,
43
44
  systemMessageMode = "system"
44
45
  }) {
45
46
  const messages = [];
@@ -91,7 +92,7 @@ function convertToOpenAIChatMessages({
91
92
  return {
92
93
  type: "image_url",
93
94
  image_url: {
94
- url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
95
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`,
95
96
  // OpenAI specific extension: image detail
96
97
  detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
97
98
  }
@@ -106,14 +107,20 @@ function convertToOpenAIChatMessages({
106
107
  case "audio/wav": {
107
108
  return {
108
109
  type: "input_audio",
109
- input_audio: { data: part.data, format: "wav" }
110
+ input_audio: {
111
+ data: (0, import_provider_utils.convertToBase64)(part.data),
112
+ format: "wav"
113
+ }
110
114
  };
111
115
  }
112
116
  case "audio/mp3":
113
117
  case "audio/mpeg": {
114
118
  return {
115
119
  type: "input_audio",
116
- input_audio: { data: part.data, format: "mp3" }
120
+ input_audio: {
121
+ data: (0, import_provider_utils.convertToBase64)(part.data),
122
+ format: "mp3"
123
+ }
117
124
  };
118
125
  }
119
126
  default: {
@@ -168,41 +175,20 @@ function convertToOpenAIChatMessages({
168
175
  }
169
176
  }
170
177
  }
171
- if (useLegacyFunctionCalling) {
172
- if (toolCalls.length > 1) {
173
- throw new import_provider.UnsupportedFunctionalityError({
174
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
175
- });
176
- }
177
- messages.push({
178
- role: "assistant",
179
- content: text,
180
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
181
- });
182
- } else {
183
- messages.push({
184
- role: "assistant",
185
- content: text,
186
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
187
- });
188
- }
178
+ messages.push({
179
+ role: "assistant",
180
+ content: text,
181
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
182
+ });
189
183
  break;
190
184
  }
191
185
  case "tool": {
192
186
  for (const toolResponse of content) {
193
- if (useLegacyFunctionCalling) {
194
- messages.push({
195
- role: "function",
196
- name: toolResponse.toolName,
197
- content: JSON.stringify(toolResponse.result)
198
- });
199
- } else {
200
- messages.push({
201
- role: "tool",
202
- tool_call_id: toolResponse.toolCallId,
203
- content: JSON.stringify(toolResponse.result)
204
- });
205
- }
187
+ messages.push({
188
+ role: "tool",
189
+ tool_call_id: toolResponse.toolCallId,
190
+ content: JSON.stringify(toolResponse.result)
191
+ });
206
192
  }
207
193
  break;
208
194
  }
@@ -245,21 +231,72 @@ function mapOpenAIFinishReason(finishReason) {
245
231
  }
246
232
  }
247
233
 
248
- // src/openai-error.ts
234
+ // src/openai-chat-options.ts
249
235
  var import_zod = require("zod");
250
- var import_provider_utils = require("@ai-sdk/provider-utils");
251
- var openaiErrorDataSchema = import_zod.z.object({
252
- error: import_zod.z.object({
253
- message: import_zod.z.string(),
236
+ var openaiProviderOptions = import_zod.z.object({
237
+ /**
238
+ * Modify the likelihood of specified tokens appearing in the completion.
239
+ *
240
+ * Accepts a JSON object that maps tokens (specified by their token ID in
241
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
242
+ */
243
+ logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
244
+ /**
245
+ * Return the log probabilities of the tokens.
246
+ *
247
+ * Setting to true will return the log probabilities of the tokens that
248
+ * were generated.
249
+ *
250
+ * Setting to a number will return the log probabilities of the top n
251
+ * tokens that were generated.
252
+ */
253
+ logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
254
+ /**
255
+ * Whether to enable parallel function calling during tool use. Default to true.
256
+ */
257
+ parallelToolCalls: import_zod.z.boolean().optional(),
258
+ /**
259
+ * A unique identifier representing your end-user, which can help OpenAI to
260
+ * monitor and detect abuse.
261
+ */
262
+ user: import_zod.z.string().optional(),
263
+ /**
264
+ * Reasoning effort for reasoning models. Defaults to `medium`.
265
+ */
266
+ reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
267
+ /**
268
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
269
+ */
270
+ maxCompletionTokens: import_zod.z.number().optional(),
271
+ /**
272
+ * Whether to enable persistence in responses API.
273
+ */
274
+ store: import_zod.z.boolean().optional(),
275
+ /**
276
+ * Metadata to associate with the request.
277
+ */
278
+ metadata: import_zod.z.record(import_zod.z.string()).optional(),
279
+ /**
280
+ * Parameters for prediction mode.
281
+ */
282
+ prediction: import_zod.z.record(import_zod.z.any()).optional()
283
+ });
284
+
285
+ // src/openai-error.ts
286
+ var import_zod2 = require("zod");
287
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
288
+ var openaiErrorDataSchema = import_zod2.z.object({
289
+ error: import_zod2.z.object({
290
+ message: import_zod2.z.string(),
254
291
  // The additional information below is handled loosely to support
255
292
  // OpenAI-compatible providers that have slightly different error
256
293
  // responses:
257
- type: import_zod.z.string().nullish(),
258
- param: import_zod.z.any().nullish(),
259
- code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
294
+ type: import_zod2.z.string().nullish(),
295
+ param: import_zod2.z.any().nullish(),
296
+ code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
260
297
  })
261
298
  });
262
- var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
299
+ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
263
300
  errorSchema: openaiErrorDataSchema,
264
301
  errorToMessage: (data) => data.error.message
265
302
  });
@@ -282,7 +319,6 @@ var import_provider2 = require("@ai-sdk/provider");
282
319
  function prepareTools({
283
320
  tools,
284
321
  toolChoice,
285
- useLegacyFunctionCalling = false,
286
322
  structuredOutputs
287
323
  }) {
288
324
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
@@ -290,48 +326,6 @@ function prepareTools({
290
326
  if (tools == null) {
291
327
  return { tools: void 0, toolChoice: void 0, toolWarnings };
292
328
  }
293
- if (useLegacyFunctionCalling) {
294
- const openaiFunctions = [];
295
- for (const tool of tools) {
296
- if (tool.type === "provider-defined") {
297
- toolWarnings.push({ type: "unsupported-tool", tool });
298
- } else {
299
- openaiFunctions.push({
300
- name: tool.name,
301
- description: tool.description,
302
- parameters: tool.parameters
303
- });
304
- }
305
- }
306
- if (toolChoice == null) {
307
- return {
308
- functions: openaiFunctions,
309
- function_call: void 0,
310
- toolWarnings
311
- };
312
- }
313
- const type2 = toolChoice.type;
314
- switch (type2) {
315
- case "auto":
316
- case "none":
317
- case void 0:
318
- return {
319
- functions: openaiFunctions,
320
- function_call: void 0,
321
- toolWarnings
322
- };
323
- case "required":
324
- throw new import_provider2.UnsupportedFunctionalityError({
325
- functionality: "useLegacyFunctionCalling and toolChoice: required"
326
- });
327
- default:
328
- return {
329
- functions: openaiFunctions,
330
- function_call: { name: toolChoice.toolName },
331
- toolWarnings
332
- };
333
- }
334
- }
335
329
  const openaiTools = [];
336
330
  for (const tool of tools) {
337
331
  if (tool.type === "provider-defined") {
@@ -403,7 +397,7 @@ var OpenAIChatLanguageModel = class {
403
397
  }
404
398
  getArgs({
405
399
  prompt,
406
- maxTokens,
400
+ maxOutputTokens,
407
401
  temperature,
408
402
  topP,
409
403
  topK,
@@ -416,8 +410,13 @@ var OpenAIChatLanguageModel = class {
416
410
  toolChoice,
417
411
  providerOptions
418
412
  }) {
419
- var _a, _b, _c, _d, _e, _f, _g;
413
+ var _a, _b;
420
414
  const warnings = [];
415
+ const openaiOptions = (_a = (0, import_provider_utils3.parseProviderOptions)({
416
+ provider: "openai",
417
+ providerOptions,
418
+ schema: openaiProviderOptions
419
+ })) != null ? _a : {};
421
420
  if (topK != null) {
422
421
  warnings.push({
423
422
  type: "unsupported-setting",
@@ -431,21 +430,9 @@ var OpenAIChatLanguageModel = class {
431
430
  details: "JSON response format schema is only supported with structuredOutputs"
432
431
  });
433
432
  }
434
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
435
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
436
- throw new import_provider3.UnsupportedFunctionalityError({
437
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
438
- });
439
- }
440
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
441
- throw new import_provider3.UnsupportedFunctionalityError({
442
- functionality: "structuredOutputs with useLegacyFunctionCalling"
443
- });
444
- }
445
433
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
446
434
  {
447
435
  prompt,
448
- useLegacyFunctionCalling,
449
436
  systemMessageMode: getSystemMessageMode(this.modelId)
450
437
  }
451
438
  );
@@ -454,13 +441,13 @@ var OpenAIChatLanguageModel = class {
454
441
  // model id:
455
442
  model: this.modelId,
456
443
  // model specific settings:
457
- logit_bias: this.settings.logitBias,
458
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
459
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
460
- user: this.settings.user,
461
- parallel_tool_calls: this.settings.parallelToolCalls,
444
+ logit_bias: openaiOptions.logitBias,
445
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
446
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
447
+ user: openaiOptions.user,
448
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
462
449
  // standardized settings:
463
- max_tokens: maxTokens,
450
+ max_tokens: maxOutputTokens,
464
451
  temperature,
465
452
  top_p: topP,
466
453
  frequency_penalty: frequencyPenalty,
@@ -471,19 +458,19 @@ var OpenAIChatLanguageModel = class {
471
458
  json_schema: {
472
459
  schema: responseFormat.schema,
473
460
  strict: true,
474
- name: (_a = responseFormat.name) != null ? _a : "response",
461
+ name: (_b = responseFormat.name) != null ? _b : "response",
475
462
  description: responseFormat.description
476
463
  }
477
464
  } : { type: "json_object" } : void 0,
478
465
  stop: stopSequences,
479
466
  seed,
480
467
  // openai specific settings:
481
- // TODO remove in next major version; we auto-map maxTokens now
482
- max_completion_tokens: (_b = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _b.maxCompletionTokens,
483
- store: (_c = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _c.store,
484
- metadata: (_d = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _d.metadata,
485
- prediction: (_e = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _e.prediction,
486
- reasoning_effort: (_g = (_f = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
468
+ // TODO remove in next major version; we auto-map maxOutputTokens now
469
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
470
+ store: openaiOptions.store,
471
+ metadata: openaiOptions.metadata,
472
+ prediction: openaiOptions.prediction,
473
+ reasoning_effort: openaiOptions.reasoningEffort,
487
474
  // messages:
488
475
  messages
489
476
  };
@@ -547,26 +534,30 @@ var OpenAIChatLanguageModel = class {
547
534
  }
548
535
  baseArgs.max_tokens = void 0;
549
536
  }
537
+ } else if (this.modelId.startsWith("gpt-4o-search-preview")) {
538
+ if (baseArgs.temperature != null) {
539
+ baseArgs.temperature = void 0;
540
+ warnings.push({
541
+ type: "unsupported-setting",
542
+ setting: "temperature",
543
+ details: "temperature is not supported for the gpt-4o-search-preview model and has been removed."
544
+ });
545
+ }
550
546
  }
551
547
  const {
552
548
  tools: openaiTools,
553
549
  toolChoice: openaiToolChoice,
554
- functions,
555
- function_call,
556
550
  toolWarnings
557
551
  } = prepareTools({
558
552
  tools,
559
553
  toolChoice,
560
- useLegacyFunctionCalling,
561
554
  structuredOutputs: this.supportsStructuredOutputs
562
555
  });
563
556
  return {
564
557
  args: {
565
558
  ...baseArgs,
566
559
  tools: openaiTools,
567
- tool_choice: openaiToolChoice,
568
- functions,
569
- function_call
560
+ tool_choice: openaiToolChoice
570
561
  },
571
562
  warnings: [...warnings, ...toolWarnings]
572
563
  };
@@ -578,15 +569,15 @@ var OpenAIChatLanguageModel = class {
578
569
  responseHeaders,
579
570
  value: response,
580
571
  rawValue: rawResponse
581
- } = await (0, import_provider_utils2.postJsonToApi)({
572
+ } = await (0, import_provider_utils3.postJsonToApi)({
582
573
  url: this.config.url({
583
574
  path: "/chat/completions",
584
575
  modelId: this.modelId
585
576
  }),
586
- headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
577
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
587
578
  body,
588
579
  failedResponseHandler: openaiFailedResponseHandler,
589
- successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
580
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
590
581
  openaiChatResponseSchema
591
582
  ),
592
583
  abortSignal: options.abortSignal,
@@ -611,26 +602,19 @@ var OpenAIChatLanguageModel = class {
611
602
  }
612
603
  return {
613
604
  text: (_c = choice.message.content) != null ? _c : void 0,
614
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
615
- {
616
- toolCallType: "function",
617
- toolCallId: (0, import_provider_utils2.generateId)(),
618
- toolName: choice.message.function_call.name,
619
- args: choice.message.function_call.arguments
620
- }
621
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
605
+ toolCalls: (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
622
606
  var _a2;
623
607
  return {
624
608
  toolCallType: "function",
625
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils2.generateId)(),
609
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
626
610
  toolName: toolCall.function.name,
627
611
  args: toolCall.function.arguments
628
612
  };
629
613
  }),
630
614
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
631
615
  usage: {
632
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
633
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
616
+ inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
617
+ outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
634
618
  },
635
619
  request: { body },
636
620
  response: {
@@ -644,48 +628,6 @@ var OpenAIChatLanguageModel = class {
644
628
  };
645
629
  }
646
630
  async doStream(options) {
647
- if (this.settings.simulateStreaming) {
648
- const result = await this.doGenerate(options);
649
- const simulatedStream = new ReadableStream({
650
- start(controller) {
651
- controller.enqueue({ type: "response-metadata", ...result.response });
652
- if (result.text) {
653
- controller.enqueue({
654
- type: "text-delta",
655
- textDelta: result.text
656
- });
657
- }
658
- if (result.toolCalls) {
659
- for (const toolCall of result.toolCalls) {
660
- controller.enqueue({
661
- type: "tool-call-delta",
662
- toolCallType: "function",
663
- toolCallId: toolCall.toolCallId,
664
- toolName: toolCall.toolName,
665
- argsTextDelta: toolCall.args
666
- });
667
- controller.enqueue({
668
- type: "tool-call",
669
- ...toolCall
670
- });
671
- }
672
- }
673
- controller.enqueue({
674
- type: "finish",
675
- finishReason: result.finishReason,
676
- usage: result.usage,
677
- logprobs: result.logprobs,
678
- providerMetadata: result.providerMetadata
679
- });
680
- controller.close();
681
- }
682
- });
683
- return {
684
- stream: simulatedStream,
685
- response: result.response,
686
- warnings: result.warnings
687
- };
688
- }
689
631
  const { args, warnings } = this.getArgs(options);
690
632
  const body = {
691
633
  ...args,
@@ -693,15 +635,15 @@ var OpenAIChatLanguageModel = class {
693
635
  // only include stream_options when in strict compatibility mode:
694
636
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
695
637
  };
696
- const { responseHeaders, value: response } = await (0, import_provider_utils2.postJsonToApi)({
638
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
697
639
  url: this.config.url({
698
640
  path: "/chat/completions",
699
641
  modelId: this.modelId
700
642
  }),
701
- headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
643
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
702
644
  body,
703
645
  failedResponseHandler: openaiFailedResponseHandler,
704
- successfulResponseHandler: (0, import_provider_utils2.createEventSourceResponseHandler)(
646
+ successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
705
647
  openaiChatChunkSchema
706
648
  ),
707
649
  abortSignal: options.abortSignal,
@@ -710,13 +652,12 @@ var OpenAIChatLanguageModel = class {
710
652
  const { messages: rawPrompt, ...rawSettings } = args;
711
653
  const toolCalls = [];
712
654
  let finishReason = "unknown";
713
- let usage = {
714
- promptTokens: void 0,
715
- completionTokens: void 0
655
+ const usage = {
656
+ inputTokens: void 0,
657
+ outputTokens: void 0
716
658
  };
717
659
  let logprobs;
718
660
  let isFirstChunk = true;
719
- const { useLegacyFunctionCalling } = this.settings;
720
661
  const providerMetadata = { openai: {} };
721
662
  return {
722
663
  stream: response.pipeThrough(
@@ -748,10 +689,8 @@ var OpenAIChatLanguageModel = class {
748
689
  prompt_tokens_details,
749
690
  completion_tokens_details
750
691
  } = value.usage;
751
- usage = {
752
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
753
- completionTokens: completion_tokens != null ? completion_tokens : void 0
754
- };
692
+ usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
693
+ usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
755
694
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
756
695
  providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
757
696
  }
@@ -786,16 +725,8 @@ var OpenAIChatLanguageModel = class {
786
725
  if (logprobs === void 0) logprobs = [];
787
726
  logprobs.push(...mappedLogprobs);
788
727
  }
789
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
790
- {
791
- type: "function",
792
- id: (0, import_provider_utils2.generateId)(),
793
- function: delta.function_call,
794
- index: 0
795
- }
796
- ] : delta.tool_calls;
797
- if (mappedToolCalls != null) {
798
- for (const toolCallDelta of mappedToolCalls) {
728
+ if (delta.tool_calls != null) {
729
+ for (const toolCallDelta of delta.tool_calls) {
799
730
  const index = toolCallDelta.index;
800
731
  if (toolCalls[index] == null) {
801
732
  if (toolCallDelta.type !== "function") {
@@ -836,11 +767,11 @@ var OpenAIChatLanguageModel = class {
836
767
  argsTextDelta: toolCall2.function.arguments
837
768
  });
838
769
  }
839
- if ((0, import_provider_utils2.isParsableJson)(toolCall2.function.arguments)) {
770
+ if ((0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
840
771
  controller.enqueue({
841
772
  type: "tool-call",
842
773
  toolCallType: "function",
843
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils2.generateId)(),
774
+ toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
844
775
  toolName: toolCall2.function.name,
845
776
  args: toolCall2.function.arguments
846
777
  });
@@ -863,11 +794,11 @@ var OpenAIChatLanguageModel = class {
863
794
  toolName: toolCall.function.name,
864
795
  argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
865
796
  });
866
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils2.isParsableJson)(toolCall.function.arguments)) {
797
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
867
798
  controller.enqueue({
868
799
  type: "tool-call",
869
800
  toolCallType: "function",
870
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils2.generateId)(),
801
+ toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
871
802
  toolName: toolCall.function.name,
872
803
  args: toolCall.function.arguments
873
804
  });
@@ -877,15 +808,11 @@ var OpenAIChatLanguageModel = class {
877
808
  }
878
809
  },
879
810
  flush(controller) {
880
- var _a, _b;
881
811
  controller.enqueue({
882
812
  type: "finish",
883
813
  finishReason,
884
814
  logprobs,
885
- usage: {
886
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
887
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
888
- },
815
+ usage,
889
816
  ...providerMetadata != null ? { providerMetadata } : {}
890
817
  });
891
818
  }
@@ -897,104 +824,96 @@ var OpenAIChatLanguageModel = class {
897
824
  };
898
825
  }
899
826
  };
900
- var openaiTokenUsageSchema = import_zod2.z.object({
901
- prompt_tokens: import_zod2.z.number().nullish(),
902
- completion_tokens: import_zod2.z.number().nullish(),
903
- prompt_tokens_details: import_zod2.z.object({
904
- cached_tokens: import_zod2.z.number().nullish()
827
+ var openaiTokenUsageSchema = import_zod3.z.object({
828
+ prompt_tokens: import_zod3.z.number().nullish(),
829
+ completion_tokens: import_zod3.z.number().nullish(),
830
+ prompt_tokens_details: import_zod3.z.object({
831
+ cached_tokens: import_zod3.z.number().nullish()
905
832
  }).nullish(),
906
- completion_tokens_details: import_zod2.z.object({
907
- reasoning_tokens: import_zod2.z.number().nullish(),
908
- accepted_prediction_tokens: import_zod2.z.number().nullish(),
909
- rejected_prediction_tokens: import_zod2.z.number().nullish()
833
+ completion_tokens_details: import_zod3.z.object({
834
+ reasoning_tokens: import_zod3.z.number().nullish(),
835
+ accepted_prediction_tokens: import_zod3.z.number().nullish(),
836
+ rejected_prediction_tokens: import_zod3.z.number().nullish()
910
837
  }).nullish()
911
838
  }).nullish();
912
- var openaiChatResponseSchema = import_zod2.z.object({
913
- id: import_zod2.z.string().nullish(),
914
- created: import_zod2.z.number().nullish(),
915
- model: import_zod2.z.string().nullish(),
916
- choices: import_zod2.z.array(
917
- import_zod2.z.object({
918
- message: import_zod2.z.object({
919
- role: import_zod2.z.literal("assistant").nullish(),
920
- content: import_zod2.z.string().nullish(),
921
- function_call: import_zod2.z.object({
922
- arguments: import_zod2.z.string(),
923
- name: import_zod2.z.string()
924
- }).nullish(),
925
- tool_calls: import_zod2.z.array(
926
- import_zod2.z.object({
927
- id: import_zod2.z.string().nullish(),
928
- type: import_zod2.z.literal("function"),
929
- function: import_zod2.z.object({
930
- name: import_zod2.z.string(),
931
- arguments: import_zod2.z.string()
839
+ var openaiChatResponseSchema = import_zod3.z.object({
840
+ id: import_zod3.z.string().nullish(),
841
+ created: import_zod3.z.number().nullish(),
842
+ model: import_zod3.z.string().nullish(),
843
+ choices: import_zod3.z.array(
844
+ import_zod3.z.object({
845
+ message: import_zod3.z.object({
846
+ role: import_zod3.z.literal("assistant").nullish(),
847
+ content: import_zod3.z.string().nullish(),
848
+ tool_calls: import_zod3.z.array(
849
+ import_zod3.z.object({
850
+ id: import_zod3.z.string().nullish(),
851
+ type: import_zod3.z.literal("function"),
852
+ function: import_zod3.z.object({
853
+ name: import_zod3.z.string(),
854
+ arguments: import_zod3.z.string()
932
855
  })
933
856
  })
934
857
  ).nullish()
935
858
  }),
936
- index: import_zod2.z.number(),
937
- logprobs: import_zod2.z.object({
938
- content: import_zod2.z.array(
939
- import_zod2.z.object({
940
- token: import_zod2.z.string(),
941
- logprob: import_zod2.z.number(),
942
- top_logprobs: import_zod2.z.array(
943
- import_zod2.z.object({
944
- token: import_zod2.z.string(),
945
- logprob: import_zod2.z.number()
859
+ index: import_zod3.z.number(),
860
+ logprobs: import_zod3.z.object({
861
+ content: import_zod3.z.array(
862
+ import_zod3.z.object({
863
+ token: import_zod3.z.string(),
864
+ logprob: import_zod3.z.number(),
865
+ top_logprobs: import_zod3.z.array(
866
+ import_zod3.z.object({
867
+ token: import_zod3.z.string(),
868
+ logprob: import_zod3.z.number()
946
869
  })
947
870
  )
948
871
  })
949
872
  ).nullable()
950
873
  }).nullish(),
951
- finish_reason: import_zod2.z.string().nullish()
874
+ finish_reason: import_zod3.z.string().nullish()
952
875
  })
953
876
  ),
954
877
  usage: openaiTokenUsageSchema
955
878
  });
956
- var openaiChatChunkSchema = import_zod2.z.union([
957
- import_zod2.z.object({
958
- id: import_zod2.z.string().nullish(),
959
- created: import_zod2.z.number().nullish(),
960
- model: import_zod2.z.string().nullish(),
961
- choices: import_zod2.z.array(
962
- import_zod2.z.object({
963
- delta: import_zod2.z.object({
964
- role: import_zod2.z.enum(["assistant"]).nullish(),
965
- content: import_zod2.z.string().nullish(),
966
- function_call: import_zod2.z.object({
967
- name: import_zod2.z.string().optional(),
968
- arguments: import_zod2.z.string().optional()
969
- }).nullish(),
970
- tool_calls: import_zod2.z.array(
971
- import_zod2.z.object({
972
- index: import_zod2.z.number(),
973
- id: import_zod2.z.string().nullish(),
974
- type: import_zod2.z.literal("function").optional(),
975
- function: import_zod2.z.object({
976
- name: import_zod2.z.string().nullish(),
977
- arguments: import_zod2.z.string().nullish()
879
+ var openaiChatChunkSchema = import_zod3.z.union([
880
+ import_zod3.z.object({
881
+ id: import_zod3.z.string().nullish(),
882
+ created: import_zod3.z.number().nullish(),
883
+ model: import_zod3.z.string().nullish(),
884
+ choices: import_zod3.z.array(
885
+ import_zod3.z.object({
886
+ delta: import_zod3.z.object({
887
+ role: import_zod3.z.enum(["assistant"]).nullish(),
888
+ content: import_zod3.z.string().nullish(),
889
+ tool_calls: import_zod3.z.array(
890
+ import_zod3.z.object({
891
+ index: import_zod3.z.number(),
892
+ id: import_zod3.z.string().nullish(),
893
+ type: import_zod3.z.literal("function").optional(),
894
+ function: import_zod3.z.object({
895
+ name: import_zod3.z.string().nullish(),
896
+ arguments: import_zod3.z.string().nullish()
978
897
  })
979
898
  })
980
899
  ).nullish()
981
900
  }).nullish(),
982
- logprobs: import_zod2.z.object({
983
- content: import_zod2.z.array(
984
- import_zod2.z.object({
985
- token: import_zod2.z.string(),
986
- logprob: import_zod2.z.number(),
987
- top_logprobs: import_zod2.z.array(
988
- import_zod2.z.object({
989
- token: import_zod2.z.string(),
990
- logprob: import_zod2.z.number()
901
+ logprobs: import_zod3.z.object({
902
+ content: import_zod3.z.array(
903
+ import_zod3.z.object({
904
+ token: import_zod3.z.string(),
905
+ logprob: import_zod3.z.number(),
906
+ top_logprobs: import_zod3.z.array(
907
+ import_zod3.z.object({
908
+ token: import_zod3.z.string(),
909
+ logprob: import_zod3.z.number()
991
910
  })
992
911
  )
993
912
  })
994
913
  ).nullable()
995
914
  }).nullish(),
996
- finish_reason: import_zod2.z.string().nullable().optional(),
997
- index: import_zod2.z.number()
915
+ finish_reason: import_zod3.z.string().nullable().optional(),
916
+ index: import_zod3.z.number()
998
917
  })
999
918
  ),
1000
919
  usage: openaiTokenUsageSchema
@@ -1036,8 +955,8 @@ var reasoningModels = {
1036
955
  };
1037
956
 
1038
957
  // src/openai-completion-language-model.ts
1039
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
1040
- var import_zod3 = require("zod");
958
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
959
+ var import_zod4 = require("zod");
1041
960
 
1042
961
  // src/convert-to-openai-completion-prompt.ts
1043
962
  var import_provider4 = require("@ai-sdk/provider");
@@ -1147,7 +1066,7 @@ var OpenAICompletionLanguageModel = class {
1147
1066
  getArgs({
1148
1067
  inputFormat,
1149
1068
  prompt,
1150
- maxTokens,
1069
+ maxOutputTokens,
1151
1070
  temperature,
1152
1071
  topP,
1153
1072
  topK,
@@ -1189,7 +1108,7 @@ var OpenAICompletionLanguageModel = class {
1189
1108
  suffix: this.settings.suffix,
1190
1109
  user: this.settings.user,
1191
1110
  // standardized settings:
1192
- max_tokens: maxTokens,
1111
+ max_tokens: maxOutputTokens,
1193
1112
  temperature,
1194
1113
  top_p: topP,
1195
1114
  frequency_penalty: frequencyPenalty,
@@ -1209,15 +1128,15 @@ var OpenAICompletionLanguageModel = class {
1209
1128
  responseHeaders,
1210
1129
  value: response,
1211
1130
  rawValue: rawResponse
1212
- } = await (0, import_provider_utils3.postJsonToApi)({
1131
+ } = await (0, import_provider_utils4.postJsonToApi)({
1213
1132
  url: this.config.url({
1214
1133
  path: "/completions",
1215
1134
  modelId: this.modelId
1216
1135
  }),
1217
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
1136
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1218
1137
  body: args,
1219
1138
  failedResponseHandler: openaiFailedResponseHandler,
1220
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
1139
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1221
1140
  openaiCompletionResponseSchema
1222
1141
  ),
1223
1142
  abortSignal: options.abortSignal,
@@ -1227,8 +1146,8 @@ var OpenAICompletionLanguageModel = class {
1227
1146
  return {
1228
1147
  text: choice.text,
1229
1148
  usage: {
1230
- promptTokens: response.usage.prompt_tokens,
1231
- completionTokens: response.usage.completion_tokens
1149
+ inputTokens: response.usage.prompt_tokens,
1150
+ outputTokens: response.usage.completion_tokens
1232
1151
  },
1233
1152
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1234
1153
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
@@ -1249,24 +1168,24 @@ var OpenAICompletionLanguageModel = class {
1249
1168
  // only include stream_options when in strict compatibility mode:
1250
1169
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1251
1170
  };
1252
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
1171
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1253
1172
  url: this.config.url({
1254
1173
  path: "/completions",
1255
1174
  modelId: this.modelId
1256
1175
  }),
1257
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
1176
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1258
1177
  body,
1259
1178
  failedResponseHandler: openaiFailedResponseHandler,
1260
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
1179
+ successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
1261
1180
  openaiCompletionChunkSchema
1262
1181
  ),
1263
1182
  abortSignal: options.abortSignal,
1264
1183
  fetch: this.config.fetch
1265
1184
  });
1266
1185
  let finishReason = "unknown";
1267
- let usage = {
1268
- promptTokens: Number.NaN,
1269
- completionTokens: Number.NaN
1186
+ const usage = {
1187
+ inputTokens: void 0,
1188
+ outputTokens: void 0
1270
1189
  };
1271
1190
  let logprobs;
1272
1191
  let isFirstChunk = true;
@@ -1293,10 +1212,8 @@ var OpenAICompletionLanguageModel = class {
1293
1212
  });
1294
1213
  }
1295
1214
  if (value.usage != null) {
1296
- usage = {
1297
- promptTokens: value.usage.prompt_tokens,
1298
- completionTokens: value.usage.completion_tokens
1299
- };
1215
+ usage.inputTokens = value.usage.prompt_tokens;
1216
+ usage.outputTokens = value.usage.completion_tokens;
1300
1217
  }
1301
1218
  const choice = value.choices[0];
1302
1219
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1332,46 +1249,46 @@ var OpenAICompletionLanguageModel = class {
1332
1249
  };
1333
1250
  }
1334
1251
  };
1335
- var openaiCompletionResponseSchema = import_zod3.z.object({
1336
- id: import_zod3.z.string().nullish(),
1337
- created: import_zod3.z.number().nullish(),
1338
- model: import_zod3.z.string().nullish(),
1339
- choices: import_zod3.z.array(
1340
- import_zod3.z.object({
1341
- text: import_zod3.z.string(),
1342
- finish_reason: import_zod3.z.string(),
1343
- logprobs: import_zod3.z.object({
1344
- tokens: import_zod3.z.array(import_zod3.z.string()),
1345
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1346
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1252
+ var openaiCompletionResponseSchema = import_zod4.z.object({
1253
+ id: import_zod4.z.string().nullish(),
1254
+ created: import_zod4.z.number().nullish(),
1255
+ model: import_zod4.z.string().nullish(),
1256
+ choices: import_zod4.z.array(
1257
+ import_zod4.z.object({
1258
+ text: import_zod4.z.string(),
1259
+ finish_reason: import_zod4.z.string(),
1260
+ logprobs: import_zod4.z.object({
1261
+ tokens: import_zod4.z.array(import_zod4.z.string()),
1262
+ token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1263
+ top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1347
1264
  }).nullish()
1348
1265
  })
1349
1266
  ),
1350
- usage: import_zod3.z.object({
1351
- prompt_tokens: import_zod3.z.number(),
1352
- completion_tokens: import_zod3.z.number()
1267
+ usage: import_zod4.z.object({
1268
+ prompt_tokens: import_zod4.z.number(),
1269
+ completion_tokens: import_zod4.z.number()
1353
1270
  })
1354
1271
  });
1355
- var openaiCompletionChunkSchema = import_zod3.z.union([
1356
- import_zod3.z.object({
1357
- id: import_zod3.z.string().nullish(),
1358
- created: import_zod3.z.number().nullish(),
1359
- model: import_zod3.z.string().nullish(),
1360
- choices: import_zod3.z.array(
1361
- import_zod3.z.object({
1362
- text: import_zod3.z.string(),
1363
- finish_reason: import_zod3.z.string().nullish(),
1364
- index: import_zod3.z.number(),
1365
- logprobs: import_zod3.z.object({
1366
- tokens: import_zod3.z.array(import_zod3.z.string()),
1367
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1368
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1272
+ var openaiCompletionChunkSchema = import_zod4.z.union([
1273
+ import_zod4.z.object({
1274
+ id: import_zod4.z.string().nullish(),
1275
+ created: import_zod4.z.number().nullish(),
1276
+ model: import_zod4.z.string().nullish(),
1277
+ choices: import_zod4.z.array(
1278
+ import_zod4.z.object({
1279
+ text: import_zod4.z.string(),
1280
+ finish_reason: import_zod4.z.string().nullish(),
1281
+ index: import_zod4.z.number(),
1282
+ logprobs: import_zod4.z.object({
1283
+ tokens: import_zod4.z.array(import_zod4.z.string()),
1284
+ token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1285
+ top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1369
1286
  }).nullish()
1370
1287
  })
1371
1288
  ),
1372
- usage: import_zod3.z.object({
1373
- prompt_tokens: import_zod3.z.number(),
1374
- completion_tokens: import_zod3.z.number()
1289
+ usage: import_zod4.z.object({
1290
+ prompt_tokens: import_zod4.z.number(),
1291
+ completion_tokens: import_zod4.z.number()
1375
1292
  }).nullish()
1376
1293
  }),
1377
1294
  openaiErrorDataSchema
@@ -1379,8 +1296,8 @@ var openaiCompletionChunkSchema = import_zod3.z.union([
1379
1296
 
1380
1297
  // src/openai-embedding-model.ts
1381
1298
  var import_provider5 = require("@ai-sdk/provider");
1382
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
1383
- var import_zod4 = require("zod");
1299
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
1300
+ var import_zod5 = require("zod");
1384
1301
  var OpenAIEmbeddingModel = class {
1385
1302
  constructor(modelId, settings, config) {
1386
1303
  this.specificationVersion = "v1";
@@ -1412,12 +1329,12 @@ var OpenAIEmbeddingModel = class {
1412
1329
  values
1413
1330
  });
1414
1331
  }
1415
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1332
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
1416
1333
  url: this.config.url({
1417
1334
  path: "/embeddings",
1418
1335
  modelId: this.modelId
1419
1336
  }),
1420
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), headers),
1337
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1421
1338
  body: {
1422
1339
  model: this.modelId,
1423
1340
  input: values,
@@ -1426,7 +1343,7 @@ var OpenAIEmbeddingModel = class {
1426
1343
  user: this.settings.user
1427
1344
  },
1428
1345
  failedResponseHandler: openaiFailedResponseHandler,
1429
- successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1346
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1430
1347
  openaiTextEmbeddingResponseSchema
1431
1348
  ),
1432
1349
  abortSignal,
@@ -1439,14 +1356,14 @@ var OpenAIEmbeddingModel = class {
1439
1356
  };
1440
1357
  }
1441
1358
  };
1442
- var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1443
- data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1444
- usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1359
+ var openaiTextEmbeddingResponseSchema = import_zod5.z.object({
1360
+ data: import_zod5.z.array(import_zod5.z.object({ embedding: import_zod5.z.array(import_zod5.z.number()) })),
1361
+ usage: import_zod5.z.object({ prompt_tokens: import_zod5.z.number() }).nullish()
1445
1362
  });
1446
1363
 
1447
1364
  // src/openai-image-model.ts
1448
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
1449
- var import_zod5 = require("zod");
1365
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1366
+ var import_zod6 = require("zod");
1450
1367
 
1451
1368
  // src/openai-image-settings.ts
1452
1369
  var modelMaxImagesPerCall = {
@@ -1492,12 +1409,12 @@ var OpenAIImageModel = class {
1492
1409
  warnings.push({ type: "unsupported-setting", setting: "seed" });
1493
1410
  }
1494
1411
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1495
- const { value: response, responseHeaders } = await (0, import_provider_utils5.postJsonToApi)({
1412
+ const { value: response, responseHeaders } = await (0, import_provider_utils6.postJsonToApi)({
1496
1413
  url: this.config.url({
1497
1414
  path: "/images/generations",
1498
1415
  modelId: this.modelId
1499
1416
  }),
1500
- headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1417
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), headers),
1501
1418
  body: {
1502
1419
  model: this.modelId,
1503
1420
  prompt,
@@ -1507,7 +1424,7 @@ var OpenAIImageModel = class {
1507
1424
  response_format: "b64_json"
1508
1425
  },
1509
1426
  failedResponseHandler: openaiFailedResponseHandler,
1510
- successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1427
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1511
1428
  openaiImageResponseSchema
1512
1429
  ),
1513
1430
  abortSignal,
@@ -1524,25 +1441,19 @@ var OpenAIImageModel = class {
1524
1441
  };
1525
1442
  }
1526
1443
  };
1527
- var openaiImageResponseSchema = import_zod5.z.object({
1528
- data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1444
+ var openaiImageResponseSchema = import_zod6.z.object({
1445
+ data: import_zod6.z.array(import_zod6.z.object({ b64_json: import_zod6.z.string() }))
1529
1446
  });
1530
1447
 
1531
1448
  // src/openai-transcription-model.ts
1532
- var import_provider_utils6 = require("@ai-sdk/provider-utils");
1533
- var import_zod6 = require("zod");
1534
- var OpenAIProviderOptionsSchema = import_zod6.z.object({
1535
- include: import_zod6.z.array(import_zod6.z.string()).optional().describe(
1536
- "Additional information to include in the transcription response."
1537
- ),
1538
- language: import_zod6.z.string().optional().describe("The language of the input audio in ISO-639-1 format."),
1539
- prompt: import_zod6.z.string().optional().describe(
1540
- "An optional text to guide the model's style or continue a previous audio segment."
1541
- ),
1542
- temperature: import_zod6.z.number().min(0).max(1).optional().default(0).describe("The sampling temperature, between 0 and 1."),
1543
- timestampGranularities: import_zod6.z.array(import_zod6.z.enum(["word", "segment"])).optional().default(["segment"]).describe(
1544
- "The timestamp granularities to populate for this transcription."
1545
- )
1449
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1450
+ var import_zod7 = require("zod");
1451
+ var OpenAIProviderOptionsSchema = import_zod7.z.object({
1452
+ include: import_zod7.z.array(import_zod7.z.string()).nullish(),
1453
+ language: import_zod7.z.string().nullish(),
1454
+ prompt: import_zod7.z.string().nullish(),
1455
+ temperature: import_zod7.z.number().min(0).max(1).nullish().default(0),
1456
+ timestampGranularities: import_zod7.z.array(import_zod7.z.enum(["word", "segment"])).nullish().default(["segment"])
1546
1457
  });
1547
1458
  var languageMap = {
1548
1459
  afrikaans: "af",
@@ -1617,28 +1528,29 @@ var OpenAITranscriptionModel = class {
1617
1528
  mediaType,
1618
1529
  providerOptions
1619
1530
  }) {
1531
+ var _a, _b, _c, _d, _e;
1620
1532
  const warnings = [];
1621
- const openAIOptions = (0, import_provider_utils6.parseProviderOptions)({
1533
+ const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1622
1534
  provider: "openai",
1623
1535
  providerOptions,
1624
1536
  schema: OpenAIProviderOptionsSchema
1625
1537
  });
1626
1538
  const formData = new FormData();
1627
- const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils6.convertBase64ToUint8Array)(audio)]);
1539
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1628
1540
  formData.append("model", this.modelId);
1629
1541
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1630
1542
  if (openAIOptions) {
1631
1543
  const transcriptionModelOptions = {
1632
- include: openAIOptions.include,
1633
- language: openAIOptions.language,
1634
- prompt: openAIOptions.prompt,
1635
- temperature: openAIOptions.temperature,
1636
- timestamp_granularities: openAIOptions.timestampGranularities
1544
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1545
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1546
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1547
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1548
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1637
1549
  };
1638
1550
  for (const key in transcriptionModelOptions) {
1639
1551
  const value = transcriptionModelOptions[key];
1640
1552
  if (value !== void 0) {
1641
- formData.append(key, value);
1553
+ formData.append(key, String(value));
1642
1554
  }
1643
1555
  }
1644
1556
  }
@@ -1655,15 +1567,15 @@ var OpenAITranscriptionModel = class {
1655
1567
  value: response,
1656
1568
  responseHeaders,
1657
1569
  rawValue: rawResponse
1658
- } = await (0, import_provider_utils6.postFormDataToApi)({
1570
+ } = await (0, import_provider_utils7.postFormDataToApi)({
1659
1571
  url: this.config.url({
1660
1572
  path: "/audio/transcriptions",
1661
1573
  modelId: this.modelId
1662
1574
  }),
1663
- headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1575
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1664
1576
  formData,
1665
1577
  failedResponseHandler: openaiFailedResponseHandler,
1666
- successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1578
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1667
1579
  openaiTranscriptionResponseSchema
1668
1580
  ),
1669
1581
  abortSignal: options.abortSignal,
@@ -1689,22 +1601,22 @@ var OpenAITranscriptionModel = class {
1689
1601
  };
1690
1602
  }
1691
1603
  };
1692
- var openaiTranscriptionResponseSchema = import_zod6.z.object({
1693
- text: import_zod6.z.string(),
1694
- language: import_zod6.z.string().nullish(),
1695
- duration: import_zod6.z.number().nullish(),
1696
- words: import_zod6.z.array(
1697
- import_zod6.z.object({
1698
- word: import_zod6.z.string(),
1699
- start: import_zod6.z.number(),
1700
- end: import_zod6.z.number()
1604
+ var openaiTranscriptionResponseSchema = import_zod7.z.object({
1605
+ text: import_zod7.z.string(),
1606
+ language: import_zod7.z.string().nullish(),
1607
+ duration: import_zod7.z.number().nullish(),
1608
+ words: import_zod7.z.array(
1609
+ import_zod7.z.object({
1610
+ word: import_zod7.z.string(),
1611
+ start: import_zod7.z.number(),
1612
+ end: import_zod7.z.number()
1701
1613
  })
1702
1614
  ).nullish()
1703
1615
  });
1704
1616
 
1705
1617
  // src/responses/openai-responses-language-model.ts
1706
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1707
- var import_zod7 = require("zod");
1618
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1619
+ var import_zod8 = require("zod");
1708
1620
 
1709
1621
  // src/responses/convert-to-openai-responses-messages.ts
1710
1622
  var import_provider6 = require("@ai-sdk/provider");
@@ -1921,7 +1833,7 @@ var OpenAIResponsesLanguageModel = class {
1921
1833
  return this.config.provider;
1922
1834
  }
1923
1835
  getArgs({
1924
- maxTokens,
1836
+ maxOutputTokens,
1925
1837
  temperature,
1926
1838
  stopSequences,
1927
1839
  topP,
@@ -1964,7 +1876,7 @@ var OpenAIResponsesLanguageModel = class {
1964
1876
  systemMessageMode: modelConfig.systemMessageMode
1965
1877
  });
1966
1878
  warnings.push(...messageWarnings);
1967
- const openaiOptions = (0, import_provider_utils7.parseProviderOptions)({
1879
+ const openaiOptions = (0, import_provider_utils8.parseProviderOptions)({
1968
1880
  provider: "openai",
1969
1881
  providerOptions,
1970
1882
  schema: openaiResponsesProviderOptionsSchema
@@ -1975,7 +1887,7 @@ var OpenAIResponsesLanguageModel = class {
1975
1887
  input: messages,
1976
1888
  temperature,
1977
1889
  top_p: topP,
1978
- max_output_tokens: maxTokens,
1890
+ max_output_tokens: maxOutputTokens,
1979
1891
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1980
1892
  text: {
1981
1893
  format: responseFormat.schema != null ? {
@@ -2045,58 +1957,58 @@ var OpenAIResponsesLanguageModel = class {
2045
1957
  responseHeaders,
2046
1958
  value: response,
2047
1959
  rawValue: rawResponse
2048
- } = await (0, import_provider_utils7.postJsonToApi)({
1960
+ } = await (0, import_provider_utils8.postJsonToApi)({
2049
1961
  url: this.config.url({
2050
1962
  path: "/responses",
2051
1963
  modelId: this.modelId
2052
1964
  }),
2053
- headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1965
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2054
1966
  body,
2055
1967
  failedResponseHandler: openaiFailedResponseHandler,
2056
- successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
2057
- import_zod7.z.object({
2058
- id: import_zod7.z.string(),
2059
- created_at: import_zod7.z.number(),
2060
- model: import_zod7.z.string(),
2061
- output: import_zod7.z.array(
2062
- import_zod7.z.discriminatedUnion("type", [
2063
- import_zod7.z.object({
2064
- type: import_zod7.z.literal("message"),
2065
- role: import_zod7.z.literal("assistant"),
2066
- content: import_zod7.z.array(
2067
- import_zod7.z.object({
2068
- type: import_zod7.z.literal("output_text"),
2069
- text: import_zod7.z.string(),
2070
- annotations: import_zod7.z.array(
2071
- import_zod7.z.object({
2072
- type: import_zod7.z.literal("url_citation"),
2073
- start_index: import_zod7.z.number(),
2074
- end_index: import_zod7.z.number(),
2075
- url: import_zod7.z.string(),
2076
- title: import_zod7.z.string()
1968
+ successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1969
+ import_zod8.z.object({
1970
+ id: import_zod8.z.string(),
1971
+ created_at: import_zod8.z.number(),
1972
+ model: import_zod8.z.string(),
1973
+ output: import_zod8.z.array(
1974
+ import_zod8.z.discriminatedUnion("type", [
1975
+ import_zod8.z.object({
1976
+ type: import_zod8.z.literal("message"),
1977
+ role: import_zod8.z.literal("assistant"),
1978
+ content: import_zod8.z.array(
1979
+ import_zod8.z.object({
1980
+ type: import_zod8.z.literal("output_text"),
1981
+ text: import_zod8.z.string(),
1982
+ annotations: import_zod8.z.array(
1983
+ import_zod8.z.object({
1984
+ type: import_zod8.z.literal("url_citation"),
1985
+ start_index: import_zod8.z.number(),
1986
+ end_index: import_zod8.z.number(),
1987
+ url: import_zod8.z.string(),
1988
+ title: import_zod8.z.string()
2077
1989
  })
2078
1990
  )
2079
1991
  })
2080
1992
  )
2081
1993
  }),
2082
- import_zod7.z.object({
2083
- type: import_zod7.z.literal("function_call"),
2084
- call_id: import_zod7.z.string(),
2085
- name: import_zod7.z.string(),
2086
- arguments: import_zod7.z.string()
1994
+ import_zod8.z.object({
1995
+ type: import_zod8.z.literal("function_call"),
1996
+ call_id: import_zod8.z.string(),
1997
+ name: import_zod8.z.string(),
1998
+ arguments: import_zod8.z.string()
2087
1999
  }),
2088
- import_zod7.z.object({
2089
- type: import_zod7.z.literal("web_search_call")
2000
+ import_zod8.z.object({
2001
+ type: import_zod8.z.literal("web_search_call")
2090
2002
  }),
2091
- import_zod7.z.object({
2092
- type: import_zod7.z.literal("computer_call")
2003
+ import_zod8.z.object({
2004
+ type: import_zod8.z.literal("computer_call")
2093
2005
  }),
2094
- import_zod7.z.object({
2095
- type: import_zod7.z.literal("reasoning")
2006
+ import_zod8.z.object({
2007
+ type: import_zod8.z.literal("reasoning")
2096
2008
  })
2097
2009
  ])
2098
2010
  ),
2099
- incomplete_details: import_zod7.z.object({ reason: import_zod7.z.string() }).nullable(),
2011
+ incomplete_details: import_zod8.z.object({ reason: import_zod8.z.string() }).nullable(),
2100
2012
  usage: usageSchema
2101
2013
  })
2102
2014
  ),
@@ -2117,7 +2029,7 @@ var OpenAIResponsesLanguageModel = class {
2117
2029
  var _a2, _b2, _c2;
2118
2030
  return {
2119
2031
  sourceType: "url",
2120
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils7.generateId)(),
2032
+ id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils8.generateId)(),
2121
2033
  url: annotation.url,
2122
2034
  title: annotation.title
2123
2035
  };
@@ -2129,8 +2041,8 @@ var OpenAIResponsesLanguageModel = class {
2129
2041
  }),
2130
2042
  toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2131
2043
  usage: {
2132
- promptTokens: response.usage.input_tokens,
2133
- completionTokens: response.usage.output_tokens
2044
+ inputTokens: response.usage.input_tokens,
2045
+ outputTokens: response.usage.output_tokens
2134
2046
  },
2135
2047
  request: { body },
2136
2048
  response: {
@@ -2152,18 +2064,18 @@ var OpenAIResponsesLanguageModel = class {
2152
2064
  }
2153
2065
  async doStream(options) {
2154
2066
  const { args: body, warnings } = this.getArgs(options);
2155
- const { responseHeaders, value: response } = await (0, import_provider_utils7.postJsonToApi)({
2067
+ const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
2156
2068
  url: this.config.url({
2157
2069
  path: "/responses",
2158
2070
  modelId: this.modelId
2159
2071
  }),
2160
- headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
2072
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2161
2073
  body: {
2162
2074
  ...body,
2163
2075
  stream: true
2164
2076
  },
2165
2077
  failedResponseHandler: openaiFailedResponseHandler,
2166
- successfulResponseHandler: (0, import_provider_utils7.createEventSourceResponseHandler)(
2078
+ successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
2167
2079
  openaiResponsesChunkSchema
2168
2080
  ),
2169
2081
  abortSignal: options.abortSignal,
@@ -2171,8 +2083,10 @@ var OpenAIResponsesLanguageModel = class {
2171
2083
  });
2172
2084
  const self = this;
2173
2085
  let finishReason = "unknown";
2174
- let promptTokens = NaN;
2175
- let completionTokens = NaN;
2086
+ const usage = {
2087
+ inputTokens: void 0,
2088
+ outputTokens: void 0
2089
+ };
2176
2090
  let cachedPromptTokens = null;
2177
2091
  let reasoningTokens = null;
2178
2092
  let responseId = null;
@@ -2242,8 +2156,8 @@ var OpenAIResponsesLanguageModel = class {
2242
2156
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2243
2157
  hasToolCalls
2244
2158
  });
2245
- promptTokens = value.response.usage.input_tokens;
2246
- completionTokens = value.response.usage.output_tokens;
2159
+ usage.inputTokens = value.response.usage.input_tokens;
2160
+ usage.outputTokens = value.response.usage.output_tokens;
2247
2161
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2248
2162
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2249
2163
  } else if (isResponseAnnotationAddedChunk(value)) {
@@ -2251,7 +2165,7 @@ var OpenAIResponsesLanguageModel = class {
2251
2165
  type: "source",
2252
2166
  source: {
2253
2167
  sourceType: "url",
2254
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils7.generateId)(),
2168
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2255
2169
  url: value.annotation.url,
2256
2170
  title: value.annotation.title
2257
2171
  }
@@ -2262,7 +2176,7 @@ var OpenAIResponsesLanguageModel = class {
2262
2176
  controller.enqueue({
2263
2177
  type: "finish",
2264
2178
  finishReason,
2265
- usage: { promptTokens, completionTokens },
2179
+ usage,
2266
2180
  ...(cachedPromptTokens != null || reasoningTokens != null) && {
2267
2181
  providerMetadata: {
2268
2182
  openai: {
@@ -2282,79 +2196,79 @@ var OpenAIResponsesLanguageModel = class {
2282
2196
  };
2283
2197
  }
2284
2198
  };
2285
- var usageSchema = import_zod7.z.object({
2286
- input_tokens: import_zod7.z.number(),
2287
- input_tokens_details: import_zod7.z.object({ cached_tokens: import_zod7.z.number().nullish() }).nullish(),
2288
- output_tokens: import_zod7.z.number(),
2289
- output_tokens_details: import_zod7.z.object({ reasoning_tokens: import_zod7.z.number().nullish() }).nullish()
2199
+ var usageSchema = import_zod8.z.object({
2200
+ input_tokens: import_zod8.z.number(),
2201
+ input_tokens_details: import_zod8.z.object({ cached_tokens: import_zod8.z.number().nullish() }).nullish(),
2202
+ output_tokens: import_zod8.z.number(),
2203
+ output_tokens_details: import_zod8.z.object({ reasoning_tokens: import_zod8.z.number().nullish() }).nullish()
2290
2204
  });
2291
- var textDeltaChunkSchema = import_zod7.z.object({
2292
- type: import_zod7.z.literal("response.output_text.delta"),
2293
- delta: import_zod7.z.string()
2205
+ var textDeltaChunkSchema = import_zod8.z.object({
2206
+ type: import_zod8.z.literal("response.output_text.delta"),
2207
+ delta: import_zod8.z.string()
2294
2208
  });
2295
- var responseFinishedChunkSchema = import_zod7.z.object({
2296
- type: import_zod7.z.enum(["response.completed", "response.incomplete"]),
2297
- response: import_zod7.z.object({
2298
- incomplete_details: import_zod7.z.object({ reason: import_zod7.z.string() }).nullish(),
2209
+ var responseFinishedChunkSchema = import_zod8.z.object({
2210
+ type: import_zod8.z.enum(["response.completed", "response.incomplete"]),
2211
+ response: import_zod8.z.object({
2212
+ incomplete_details: import_zod8.z.object({ reason: import_zod8.z.string() }).nullish(),
2299
2213
  usage: usageSchema
2300
2214
  })
2301
2215
  });
2302
- var responseCreatedChunkSchema = import_zod7.z.object({
2303
- type: import_zod7.z.literal("response.created"),
2304
- response: import_zod7.z.object({
2305
- id: import_zod7.z.string(),
2306
- created_at: import_zod7.z.number(),
2307
- model: import_zod7.z.string()
2216
+ var responseCreatedChunkSchema = import_zod8.z.object({
2217
+ type: import_zod8.z.literal("response.created"),
2218
+ response: import_zod8.z.object({
2219
+ id: import_zod8.z.string(),
2220
+ created_at: import_zod8.z.number(),
2221
+ model: import_zod8.z.string()
2308
2222
  })
2309
2223
  });
2310
- var responseOutputItemDoneSchema = import_zod7.z.object({
2311
- type: import_zod7.z.literal("response.output_item.done"),
2312
- output_index: import_zod7.z.number(),
2313
- item: import_zod7.z.discriminatedUnion("type", [
2314
- import_zod7.z.object({
2315
- type: import_zod7.z.literal("message")
2224
+ var responseOutputItemDoneSchema = import_zod8.z.object({
2225
+ type: import_zod8.z.literal("response.output_item.done"),
2226
+ output_index: import_zod8.z.number(),
2227
+ item: import_zod8.z.discriminatedUnion("type", [
2228
+ import_zod8.z.object({
2229
+ type: import_zod8.z.literal("message")
2316
2230
  }),
2317
- import_zod7.z.object({
2318
- type: import_zod7.z.literal("function_call"),
2319
- id: import_zod7.z.string(),
2320
- call_id: import_zod7.z.string(),
2321
- name: import_zod7.z.string(),
2322
- arguments: import_zod7.z.string(),
2323
- status: import_zod7.z.literal("completed")
2231
+ import_zod8.z.object({
2232
+ type: import_zod8.z.literal("function_call"),
2233
+ id: import_zod8.z.string(),
2234
+ call_id: import_zod8.z.string(),
2235
+ name: import_zod8.z.string(),
2236
+ arguments: import_zod8.z.string(),
2237
+ status: import_zod8.z.literal("completed")
2324
2238
  })
2325
2239
  ])
2326
2240
  });
2327
- var responseFunctionCallArgumentsDeltaSchema = import_zod7.z.object({
2328
- type: import_zod7.z.literal("response.function_call_arguments.delta"),
2329
- item_id: import_zod7.z.string(),
2330
- output_index: import_zod7.z.number(),
2331
- delta: import_zod7.z.string()
2241
+ var responseFunctionCallArgumentsDeltaSchema = import_zod8.z.object({
2242
+ type: import_zod8.z.literal("response.function_call_arguments.delta"),
2243
+ item_id: import_zod8.z.string(),
2244
+ output_index: import_zod8.z.number(),
2245
+ delta: import_zod8.z.string()
2332
2246
  });
2333
- var responseOutputItemAddedSchema = import_zod7.z.object({
2334
- type: import_zod7.z.literal("response.output_item.added"),
2335
- output_index: import_zod7.z.number(),
2336
- item: import_zod7.z.discriminatedUnion("type", [
2337
- import_zod7.z.object({
2338
- type: import_zod7.z.literal("message")
2247
+ var responseOutputItemAddedSchema = import_zod8.z.object({
2248
+ type: import_zod8.z.literal("response.output_item.added"),
2249
+ output_index: import_zod8.z.number(),
2250
+ item: import_zod8.z.discriminatedUnion("type", [
2251
+ import_zod8.z.object({
2252
+ type: import_zod8.z.literal("message")
2339
2253
  }),
2340
- import_zod7.z.object({
2341
- type: import_zod7.z.literal("function_call"),
2342
- id: import_zod7.z.string(),
2343
- call_id: import_zod7.z.string(),
2344
- name: import_zod7.z.string(),
2345
- arguments: import_zod7.z.string()
2254
+ import_zod8.z.object({
2255
+ type: import_zod8.z.literal("function_call"),
2256
+ id: import_zod8.z.string(),
2257
+ call_id: import_zod8.z.string(),
2258
+ name: import_zod8.z.string(),
2259
+ arguments: import_zod8.z.string()
2346
2260
  })
2347
2261
  ])
2348
2262
  });
2349
- var responseAnnotationAddedSchema = import_zod7.z.object({
2350
- type: import_zod7.z.literal("response.output_text.annotation.added"),
2351
- annotation: import_zod7.z.object({
2352
- type: import_zod7.z.literal("url_citation"),
2353
- url: import_zod7.z.string(),
2354
- title: import_zod7.z.string()
2263
+ var responseAnnotationAddedSchema = import_zod8.z.object({
2264
+ type: import_zod8.z.literal("response.output_text.annotation.added"),
2265
+ annotation: import_zod8.z.object({
2266
+ type: import_zod8.z.literal("url_citation"),
2267
+ url: import_zod8.z.string(),
2268
+ title: import_zod8.z.string()
2355
2269
  })
2356
2270
  });
2357
- var openaiResponsesChunkSchema = import_zod7.z.union([
2271
+ var openaiResponsesChunkSchema = import_zod8.z.union([
2358
2272
  textDeltaChunkSchema,
2359
2273
  responseFinishedChunkSchema,
2360
2274
  responseCreatedChunkSchema,
@@ -2362,7 +2276,7 @@ var openaiResponsesChunkSchema = import_zod7.z.union([
2362
2276
  responseFunctionCallArgumentsDeltaSchema,
2363
2277
  responseOutputItemAddedSchema,
2364
2278
  responseAnnotationAddedSchema,
2365
- import_zod7.z.object({ type: import_zod7.z.string() }).passthrough()
2279
+ import_zod8.z.object({ type: import_zod8.z.string() }).passthrough()
2366
2280
  // fallback for unknown chunks
2367
2281
  ]);
2368
2282
  function isTextDeltaChunk(chunk) {
@@ -2407,15 +2321,15 @@ function getResponsesModelConfig(modelId) {
2407
2321
  requiredAutoTruncation: false
2408
2322
  };
2409
2323
  }
2410
- var openaiResponsesProviderOptionsSchema = import_zod7.z.object({
2411
- metadata: import_zod7.z.any().nullish(),
2412
- parallelToolCalls: import_zod7.z.boolean().nullish(),
2413
- previousResponseId: import_zod7.z.string().nullish(),
2414
- store: import_zod7.z.boolean().nullish(),
2415
- user: import_zod7.z.string().nullish(),
2416
- reasoningEffort: import_zod7.z.string().nullish(),
2417
- strictSchemas: import_zod7.z.boolean().nullish(),
2418
- instructions: import_zod7.z.string().nullish()
2324
+ var openaiResponsesProviderOptionsSchema = import_zod8.z.object({
2325
+ metadata: import_zod8.z.any().nullish(),
2326
+ parallelToolCalls: import_zod8.z.boolean().nullish(),
2327
+ previousResponseId: import_zod8.z.string().nullish(),
2328
+ store: import_zod8.z.boolean().nullish(),
2329
+ user: import_zod8.z.string().nullish(),
2330
+ reasoningEffort: import_zod8.z.string().nullish(),
2331
+ strictSchemas: import_zod8.z.boolean().nullish(),
2332
+ instructions: import_zod8.z.string().nullish()
2419
2333
  });
2420
2334
  // Annotate the CommonJS export names for ESM import in node:
2421
2335
  0 && (module.exports = {
@@ -2425,6 +2339,7 @@ var openaiResponsesProviderOptionsSchema = import_zod7.z.object({
2425
2339
  OpenAIImageModel,
2426
2340
  OpenAIResponsesLanguageModel,
2427
2341
  OpenAITranscriptionModel,
2428
- modelMaxImagesPerCall
2342
+ modelMaxImagesPerCall,
2343
+ openaiProviderOptions
2429
2344
  });
2430
2345
  //# sourceMappingURL=index.js.map