@ai-sdk/openai 2.0.0-canary.5 → 2.0.0-canary.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,6 @@
1
1
  // src/openai-chat-language-model.ts
2
2
  import {
3
- InvalidResponseDataError,
4
- UnsupportedFunctionalityError as UnsupportedFunctionalityError3
3
+ InvalidResponseDataError
5
4
  } from "@ai-sdk/provider";
6
5
  import {
7
6
  combineHeaders,
@@ -9,17 +8,18 @@ import {
9
8
  createJsonResponseHandler,
10
9
  generateId,
11
10
  isParsableJson,
12
- postJsonToApi
11
+ postJsonToApi,
12
+ parseProviderOptions
13
13
  } from "@ai-sdk/provider-utils";
14
- import { z as z2 } from "zod";
14
+ import { z as z3 } from "zod";
15
15
 
16
16
  // src/convert-to-openai-chat-messages.ts
17
17
  import {
18
18
  UnsupportedFunctionalityError
19
19
  } from "@ai-sdk/provider";
20
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
20
21
  function convertToOpenAIChatMessages({
21
22
  prompt,
22
- useLegacyFunctionCalling = false,
23
23
  systemMessageMode = "system"
24
24
  }) {
25
25
  const messages = [];
@@ -71,7 +71,7 @@ function convertToOpenAIChatMessages({
71
71
  return {
72
72
  type: "image_url",
73
73
  image_url: {
74
- url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
74
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
75
75
  // OpenAI specific extension: image detail
76
76
  detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
77
77
  }
@@ -86,14 +86,20 @@ function convertToOpenAIChatMessages({
86
86
  case "audio/wav": {
87
87
  return {
88
88
  type: "input_audio",
89
- input_audio: { data: part.data, format: "wav" }
89
+ input_audio: {
90
+ data: convertToBase64(part.data),
91
+ format: "wav"
92
+ }
90
93
  };
91
94
  }
92
95
  case "audio/mp3":
93
96
  case "audio/mpeg": {
94
97
  return {
95
98
  type: "input_audio",
96
- input_audio: { data: part.data, format: "mp3" }
99
+ input_audio: {
100
+ data: convertToBase64(part.data),
101
+ format: "mp3"
102
+ }
97
103
  };
98
104
  }
99
105
  default: {
@@ -148,41 +154,20 @@ function convertToOpenAIChatMessages({
148
154
  }
149
155
  }
150
156
  }
151
- if (useLegacyFunctionCalling) {
152
- if (toolCalls.length > 1) {
153
- throw new UnsupportedFunctionalityError({
154
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
155
- });
156
- }
157
- messages.push({
158
- role: "assistant",
159
- content: text,
160
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
161
- });
162
- } else {
163
- messages.push({
164
- role: "assistant",
165
- content: text,
166
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
167
- });
168
- }
157
+ messages.push({
158
+ role: "assistant",
159
+ content: text,
160
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
161
+ });
169
162
  break;
170
163
  }
171
164
  case "tool": {
172
165
  for (const toolResponse of content) {
173
- if (useLegacyFunctionCalling) {
174
- messages.push({
175
- role: "function",
176
- name: toolResponse.toolName,
177
- content: JSON.stringify(toolResponse.result)
178
- });
179
- } else {
180
- messages.push({
181
- role: "tool",
182
- tool_call_id: toolResponse.toolCallId,
183
- content: JSON.stringify(toolResponse.result)
184
- });
185
- }
166
+ messages.push({
167
+ role: "tool",
168
+ tool_call_id: toolResponse.toolCallId,
169
+ content: JSON.stringify(toolResponse.result)
170
+ });
186
171
  }
187
172
  break;
188
173
  }
@@ -225,18 +210,69 @@ function mapOpenAIFinishReason(finishReason) {
225
210
  }
226
211
  }
227
212
 
228
- // src/openai-error.ts
213
+ // src/openai-chat-options.ts
229
214
  import { z } from "zod";
215
+ var openaiProviderOptions = z.object({
216
+ /**
217
+ * Modify the likelihood of specified tokens appearing in the completion.
218
+ *
219
+ * Accepts a JSON object that maps tokens (specified by their token ID in
220
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
221
+ */
222
+ logitBias: z.record(z.coerce.number(), z.number()).optional(),
223
+ /**
224
+ * Return the log probabilities of the tokens.
225
+ *
226
+ * Setting to true will return the log probabilities of the tokens that
227
+ * were generated.
228
+ *
229
+ * Setting to a number will return the log probabilities of the top n
230
+ * tokens that were generated.
231
+ */
232
+ logprobs: z.union([z.boolean(), z.number()]).optional(),
233
+ /**
234
+ * Whether to enable parallel function calling during tool use. Default to true.
235
+ */
236
+ parallelToolCalls: z.boolean().optional(),
237
+ /**
238
+ * A unique identifier representing your end-user, which can help OpenAI to
239
+ * monitor and detect abuse.
240
+ */
241
+ user: z.string().optional(),
242
+ /**
243
+ * Reasoning effort for reasoning models. Defaults to `medium`.
244
+ */
245
+ reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
246
+ /**
247
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
248
+ */
249
+ maxCompletionTokens: z.number().optional(),
250
+ /**
251
+ * Whether to enable persistence in responses API.
252
+ */
253
+ store: z.boolean().optional(),
254
+ /**
255
+ * Metadata to associate with the request.
256
+ */
257
+ metadata: z.record(z.string()).optional(),
258
+ /**
259
+ * Parameters for prediction mode.
260
+ */
261
+ prediction: z.record(z.any()).optional()
262
+ });
263
+
264
+ // src/openai-error.ts
265
+ import { z as z2 } from "zod";
230
266
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
231
- var openaiErrorDataSchema = z.object({
232
- error: z.object({
233
- message: z.string(),
267
+ var openaiErrorDataSchema = z2.object({
268
+ error: z2.object({
269
+ message: z2.string(),
234
270
  // The additional information below is handled loosely to support
235
271
  // OpenAI-compatible providers that have slightly different error
236
272
  // responses:
237
- type: z.string().nullish(),
238
- param: z.any().nullish(),
239
- code: z.union([z.string(), z.number()]).nullish()
273
+ type: z2.string().nullish(),
274
+ param: z2.any().nullish(),
275
+ code: z2.union([z2.string(), z2.number()]).nullish()
240
276
  })
241
277
  });
242
278
  var openaiFailedResponseHandler = createJsonErrorResponseHandler({
@@ -264,7 +300,6 @@ import {
264
300
  function prepareTools({
265
301
  tools,
266
302
  toolChoice,
267
- useLegacyFunctionCalling = false,
268
303
  structuredOutputs
269
304
  }) {
270
305
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
@@ -272,48 +307,6 @@ function prepareTools({
272
307
  if (tools == null) {
273
308
  return { tools: void 0, toolChoice: void 0, toolWarnings };
274
309
  }
275
- if (useLegacyFunctionCalling) {
276
- const openaiFunctions = [];
277
- for (const tool of tools) {
278
- if (tool.type === "provider-defined") {
279
- toolWarnings.push({ type: "unsupported-tool", tool });
280
- } else {
281
- openaiFunctions.push({
282
- name: tool.name,
283
- description: tool.description,
284
- parameters: tool.parameters
285
- });
286
- }
287
- }
288
- if (toolChoice == null) {
289
- return {
290
- functions: openaiFunctions,
291
- function_call: void 0,
292
- toolWarnings
293
- };
294
- }
295
- const type2 = toolChoice.type;
296
- switch (type2) {
297
- case "auto":
298
- case "none":
299
- case void 0:
300
- return {
301
- functions: openaiFunctions,
302
- function_call: void 0,
303
- toolWarnings
304
- };
305
- case "required":
306
- throw new UnsupportedFunctionalityError2({
307
- functionality: "useLegacyFunctionCalling and toolChoice: required"
308
- });
309
- default:
310
- return {
311
- functions: openaiFunctions,
312
- function_call: { name: toolChoice.toolName },
313
- toolWarnings
314
- };
315
- }
316
- }
317
310
  const openaiTools = [];
318
311
  for (const tool of tools) {
319
312
  if (tool.type === "provider-defined") {
@@ -385,7 +378,7 @@ var OpenAIChatLanguageModel = class {
385
378
  }
386
379
  getArgs({
387
380
  prompt,
388
- maxTokens,
381
+ maxOutputTokens,
389
382
  temperature,
390
383
  topP,
391
384
  topK,
@@ -398,8 +391,13 @@ var OpenAIChatLanguageModel = class {
398
391
  toolChoice,
399
392
  providerOptions
400
393
  }) {
401
- var _a, _b, _c, _d, _e, _f, _g;
394
+ var _a, _b;
402
395
  const warnings = [];
396
+ const openaiOptions = (_a = parseProviderOptions({
397
+ provider: "openai",
398
+ providerOptions,
399
+ schema: openaiProviderOptions
400
+ })) != null ? _a : {};
403
401
  if (topK != null) {
404
402
  warnings.push({
405
403
  type: "unsupported-setting",
@@ -413,21 +411,9 @@ var OpenAIChatLanguageModel = class {
413
411
  details: "JSON response format schema is only supported with structuredOutputs"
414
412
  });
415
413
  }
416
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
417
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
418
- throw new UnsupportedFunctionalityError3({
419
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
420
- });
421
- }
422
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
423
- throw new UnsupportedFunctionalityError3({
424
- functionality: "structuredOutputs with useLegacyFunctionCalling"
425
- });
426
- }
427
414
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
428
415
  {
429
416
  prompt,
430
- useLegacyFunctionCalling,
431
417
  systemMessageMode: getSystemMessageMode(this.modelId)
432
418
  }
433
419
  );
@@ -436,13 +422,13 @@ var OpenAIChatLanguageModel = class {
436
422
  // model id:
437
423
  model: this.modelId,
438
424
  // model specific settings:
439
- logit_bias: this.settings.logitBias,
440
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
441
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
442
- user: this.settings.user,
443
- parallel_tool_calls: this.settings.parallelToolCalls,
425
+ logit_bias: openaiOptions.logitBias,
426
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
427
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
428
+ user: openaiOptions.user,
429
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
444
430
  // standardized settings:
445
- max_tokens: maxTokens,
431
+ max_tokens: maxOutputTokens,
446
432
  temperature,
447
433
  top_p: topP,
448
434
  frequency_penalty: frequencyPenalty,
@@ -453,19 +439,19 @@ var OpenAIChatLanguageModel = class {
453
439
  json_schema: {
454
440
  schema: responseFormat.schema,
455
441
  strict: true,
456
- name: (_a = responseFormat.name) != null ? _a : "response",
442
+ name: (_b = responseFormat.name) != null ? _b : "response",
457
443
  description: responseFormat.description
458
444
  }
459
445
  } : { type: "json_object" } : void 0,
460
446
  stop: stopSequences,
461
447
  seed,
462
448
  // openai specific settings:
463
- // TODO remove in next major version; we auto-map maxTokens now
464
- max_completion_tokens: (_b = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _b.maxCompletionTokens,
465
- store: (_c = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _c.store,
466
- metadata: (_d = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _d.metadata,
467
- prediction: (_e = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _e.prediction,
468
- reasoning_effort: (_g = (_f = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
449
+ // TODO remove in next major version; we auto-map maxOutputTokens now
450
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
451
+ store: openaiOptions.store,
452
+ metadata: openaiOptions.metadata,
453
+ prediction: openaiOptions.prediction,
454
+ reasoning_effort: openaiOptions.reasoningEffort,
469
455
  // messages:
470
456
  messages
471
457
  };
@@ -529,26 +515,30 @@ var OpenAIChatLanguageModel = class {
529
515
  }
530
516
  baseArgs.max_tokens = void 0;
531
517
  }
518
+ } else if (this.modelId.startsWith("gpt-4o-search-preview")) {
519
+ if (baseArgs.temperature != null) {
520
+ baseArgs.temperature = void 0;
521
+ warnings.push({
522
+ type: "unsupported-setting",
523
+ setting: "temperature",
524
+ details: "temperature is not supported for the gpt-4o-search-preview model and has been removed."
525
+ });
526
+ }
532
527
  }
533
528
  const {
534
529
  tools: openaiTools,
535
530
  toolChoice: openaiToolChoice,
536
- functions,
537
- function_call,
538
531
  toolWarnings
539
532
  } = prepareTools({
540
533
  tools,
541
534
  toolChoice,
542
- useLegacyFunctionCalling,
543
535
  structuredOutputs: this.supportsStructuredOutputs
544
536
  });
545
537
  return {
546
538
  args: {
547
539
  ...baseArgs,
548
540
  tools: openaiTools,
549
- tool_choice: openaiToolChoice,
550
- functions,
551
- function_call
541
+ tool_choice: openaiToolChoice
552
542
  },
553
543
  warnings: [...warnings, ...toolWarnings]
554
544
  };
@@ -593,14 +583,7 @@ var OpenAIChatLanguageModel = class {
593
583
  }
594
584
  return {
595
585
  text: (_c = choice.message.content) != null ? _c : void 0,
596
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
597
- {
598
- toolCallType: "function",
599
- toolCallId: generateId(),
600
- toolName: choice.message.function_call.name,
601
- args: choice.message.function_call.arguments
602
- }
603
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
586
+ toolCalls: (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
604
587
  var _a2;
605
588
  return {
606
589
  toolCallType: "function",
@@ -611,8 +594,8 @@ var OpenAIChatLanguageModel = class {
611
594
  }),
612
595
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
613
596
  usage: {
614
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
615
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
597
+ inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
598
+ outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
616
599
  },
617
600
  request: { body },
618
601
  response: {
@@ -626,48 +609,6 @@ var OpenAIChatLanguageModel = class {
626
609
  };
627
610
  }
628
611
  async doStream(options) {
629
- if (this.settings.simulateStreaming) {
630
- const result = await this.doGenerate(options);
631
- const simulatedStream = new ReadableStream({
632
- start(controller) {
633
- controller.enqueue({ type: "response-metadata", ...result.response });
634
- if (result.text) {
635
- controller.enqueue({
636
- type: "text-delta",
637
- textDelta: result.text
638
- });
639
- }
640
- if (result.toolCalls) {
641
- for (const toolCall of result.toolCalls) {
642
- controller.enqueue({
643
- type: "tool-call-delta",
644
- toolCallType: "function",
645
- toolCallId: toolCall.toolCallId,
646
- toolName: toolCall.toolName,
647
- argsTextDelta: toolCall.args
648
- });
649
- controller.enqueue({
650
- type: "tool-call",
651
- ...toolCall
652
- });
653
- }
654
- }
655
- controller.enqueue({
656
- type: "finish",
657
- finishReason: result.finishReason,
658
- usage: result.usage,
659
- logprobs: result.logprobs,
660
- providerMetadata: result.providerMetadata
661
- });
662
- controller.close();
663
- }
664
- });
665
- return {
666
- stream: simulatedStream,
667
- response: result.response,
668
- warnings: result.warnings
669
- };
670
- }
671
612
  const { args, warnings } = this.getArgs(options);
672
613
  const body = {
673
614
  ...args,
@@ -692,13 +633,12 @@ var OpenAIChatLanguageModel = class {
692
633
  const { messages: rawPrompt, ...rawSettings } = args;
693
634
  const toolCalls = [];
694
635
  let finishReason = "unknown";
695
- let usage = {
696
- promptTokens: void 0,
697
- completionTokens: void 0
636
+ const usage = {
637
+ inputTokens: void 0,
638
+ outputTokens: void 0
698
639
  };
699
640
  let logprobs;
700
641
  let isFirstChunk = true;
701
- const { useLegacyFunctionCalling } = this.settings;
702
642
  const providerMetadata = { openai: {} };
703
643
  return {
704
644
  stream: response.pipeThrough(
@@ -730,10 +670,8 @@ var OpenAIChatLanguageModel = class {
730
670
  prompt_tokens_details,
731
671
  completion_tokens_details
732
672
  } = value.usage;
733
- usage = {
734
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
735
- completionTokens: completion_tokens != null ? completion_tokens : void 0
736
- };
673
+ usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
674
+ usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
737
675
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
738
676
  providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
739
677
  }
@@ -768,16 +706,8 @@ var OpenAIChatLanguageModel = class {
768
706
  if (logprobs === void 0) logprobs = [];
769
707
  logprobs.push(...mappedLogprobs);
770
708
  }
771
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
772
- {
773
- type: "function",
774
- id: generateId(),
775
- function: delta.function_call,
776
- index: 0
777
- }
778
- ] : delta.tool_calls;
779
- if (mappedToolCalls != null) {
780
- for (const toolCallDelta of mappedToolCalls) {
709
+ if (delta.tool_calls != null) {
710
+ for (const toolCallDelta of delta.tool_calls) {
781
711
  const index = toolCallDelta.index;
782
712
  if (toolCalls[index] == null) {
783
713
  if (toolCallDelta.type !== "function") {
@@ -859,15 +789,11 @@ var OpenAIChatLanguageModel = class {
859
789
  }
860
790
  },
861
791
  flush(controller) {
862
- var _a, _b;
863
792
  controller.enqueue({
864
793
  type: "finish",
865
794
  finishReason,
866
795
  logprobs,
867
- usage: {
868
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
869
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
870
- },
796
+ usage,
871
797
  ...providerMetadata != null ? { providerMetadata } : {}
872
798
  });
873
799
  }
@@ -879,104 +805,96 @@ var OpenAIChatLanguageModel = class {
879
805
  };
880
806
  }
881
807
  };
882
- var openaiTokenUsageSchema = z2.object({
883
- prompt_tokens: z2.number().nullish(),
884
- completion_tokens: z2.number().nullish(),
885
- prompt_tokens_details: z2.object({
886
- cached_tokens: z2.number().nullish()
808
+ var openaiTokenUsageSchema = z3.object({
809
+ prompt_tokens: z3.number().nullish(),
810
+ completion_tokens: z3.number().nullish(),
811
+ prompt_tokens_details: z3.object({
812
+ cached_tokens: z3.number().nullish()
887
813
  }).nullish(),
888
- completion_tokens_details: z2.object({
889
- reasoning_tokens: z2.number().nullish(),
890
- accepted_prediction_tokens: z2.number().nullish(),
891
- rejected_prediction_tokens: z2.number().nullish()
814
+ completion_tokens_details: z3.object({
815
+ reasoning_tokens: z3.number().nullish(),
816
+ accepted_prediction_tokens: z3.number().nullish(),
817
+ rejected_prediction_tokens: z3.number().nullish()
892
818
  }).nullish()
893
819
  }).nullish();
894
- var openaiChatResponseSchema = z2.object({
895
- id: z2.string().nullish(),
896
- created: z2.number().nullish(),
897
- model: z2.string().nullish(),
898
- choices: z2.array(
899
- z2.object({
900
- message: z2.object({
901
- role: z2.literal("assistant").nullish(),
902
- content: z2.string().nullish(),
903
- function_call: z2.object({
904
- arguments: z2.string(),
905
- name: z2.string()
906
- }).nullish(),
907
- tool_calls: z2.array(
908
- z2.object({
909
- id: z2.string().nullish(),
910
- type: z2.literal("function"),
911
- function: z2.object({
912
- name: z2.string(),
913
- arguments: z2.string()
820
+ var openaiChatResponseSchema = z3.object({
821
+ id: z3.string().nullish(),
822
+ created: z3.number().nullish(),
823
+ model: z3.string().nullish(),
824
+ choices: z3.array(
825
+ z3.object({
826
+ message: z3.object({
827
+ role: z3.literal("assistant").nullish(),
828
+ content: z3.string().nullish(),
829
+ tool_calls: z3.array(
830
+ z3.object({
831
+ id: z3.string().nullish(),
832
+ type: z3.literal("function"),
833
+ function: z3.object({
834
+ name: z3.string(),
835
+ arguments: z3.string()
914
836
  })
915
837
  })
916
838
  ).nullish()
917
839
  }),
918
- index: z2.number(),
919
- logprobs: z2.object({
920
- content: z2.array(
921
- z2.object({
922
- token: z2.string(),
923
- logprob: z2.number(),
924
- top_logprobs: z2.array(
925
- z2.object({
926
- token: z2.string(),
927
- logprob: z2.number()
840
+ index: z3.number(),
841
+ logprobs: z3.object({
842
+ content: z3.array(
843
+ z3.object({
844
+ token: z3.string(),
845
+ logprob: z3.number(),
846
+ top_logprobs: z3.array(
847
+ z3.object({
848
+ token: z3.string(),
849
+ logprob: z3.number()
928
850
  })
929
851
  )
930
852
  })
931
853
  ).nullable()
932
854
  }).nullish(),
933
- finish_reason: z2.string().nullish()
855
+ finish_reason: z3.string().nullish()
934
856
  })
935
857
  ),
936
858
  usage: openaiTokenUsageSchema
937
859
  });
938
- var openaiChatChunkSchema = z2.union([
939
- z2.object({
940
- id: z2.string().nullish(),
941
- created: z2.number().nullish(),
942
- model: z2.string().nullish(),
943
- choices: z2.array(
944
- z2.object({
945
- delta: z2.object({
946
- role: z2.enum(["assistant"]).nullish(),
947
- content: z2.string().nullish(),
948
- function_call: z2.object({
949
- name: z2.string().optional(),
950
- arguments: z2.string().optional()
951
- }).nullish(),
952
- tool_calls: z2.array(
953
- z2.object({
954
- index: z2.number(),
955
- id: z2.string().nullish(),
956
- type: z2.literal("function").optional(),
957
- function: z2.object({
958
- name: z2.string().nullish(),
959
- arguments: z2.string().nullish()
860
+ var openaiChatChunkSchema = z3.union([
861
+ z3.object({
862
+ id: z3.string().nullish(),
863
+ created: z3.number().nullish(),
864
+ model: z3.string().nullish(),
865
+ choices: z3.array(
866
+ z3.object({
867
+ delta: z3.object({
868
+ role: z3.enum(["assistant"]).nullish(),
869
+ content: z3.string().nullish(),
870
+ tool_calls: z3.array(
871
+ z3.object({
872
+ index: z3.number(),
873
+ id: z3.string().nullish(),
874
+ type: z3.literal("function").optional(),
875
+ function: z3.object({
876
+ name: z3.string().nullish(),
877
+ arguments: z3.string().nullish()
960
878
  })
961
879
  })
962
880
  ).nullish()
963
881
  }).nullish(),
964
- logprobs: z2.object({
965
- content: z2.array(
966
- z2.object({
967
- token: z2.string(),
968
- logprob: z2.number(),
969
- top_logprobs: z2.array(
970
- z2.object({
971
- token: z2.string(),
972
- logprob: z2.number()
882
+ logprobs: z3.object({
883
+ content: z3.array(
884
+ z3.object({
885
+ token: z3.string(),
886
+ logprob: z3.number(),
887
+ top_logprobs: z3.array(
888
+ z3.object({
889
+ token: z3.string(),
890
+ logprob: z3.number()
973
891
  })
974
892
  )
975
893
  })
976
894
  ).nullable()
977
895
  }).nullish(),
978
- finish_reason: z2.string().nullable().optional(),
979
- index: z2.number()
896
+ finish_reason: z3.string().nullable().optional(),
897
+ index: z3.number()
980
898
  })
981
899
  ),
982
900
  usage: openaiTokenUsageSchema
@@ -1024,12 +942,12 @@ import {
1024
942
  createJsonResponseHandler as createJsonResponseHandler2,
1025
943
  postJsonToApi as postJsonToApi2
1026
944
  } from "@ai-sdk/provider-utils";
1027
- import { z as z3 } from "zod";
945
+ import { z as z4 } from "zod";
1028
946
 
1029
947
  // src/convert-to-openai-completion-prompt.ts
1030
948
  import {
1031
949
  InvalidPromptError,
1032
- UnsupportedFunctionalityError as UnsupportedFunctionalityError4
950
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
1033
951
  } from "@ai-sdk/provider";
1034
952
  function convertToOpenAICompletionPrompt({
1035
953
  prompt,
@@ -1076,7 +994,7 @@ ${userMessage}
1076
994
  return part.text;
1077
995
  }
1078
996
  case "tool-call": {
1079
- throw new UnsupportedFunctionalityError4({
997
+ throw new UnsupportedFunctionalityError3({
1080
998
  functionality: "tool-call messages"
1081
999
  });
1082
1000
  }
@@ -1089,7 +1007,7 @@ ${assistantMessage}
1089
1007
  break;
1090
1008
  }
1091
1009
  case "tool": {
1092
- throw new UnsupportedFunctionalityError4({
1010
+ throw new UnsupportedFunctionalityError3({
1093
1011
  functionality: "tool messages"
1094
1012
  });
1095
1013
  }
@@ -1137,7 +1055,7 @@ var OpenAICompletionLanguageModel = class {
1137
1055
  getArgs({
1138
1056
  inputFormat,
1139
1057
  prompt,
1140
- maxTokens,
1058
+ maxOutputTokens,
1141
1059
  temperature,
1142
1060
  topP,
1143
1061
  topK,
@@ -1179,7 +1097,7 @@ var OpenAICompletionLanguageModel = class {
1179
1097
  suffix: this.settings.suffix,
1180
1098
  user: this.settings.user,
1181
1099
  // standardized settings:
1182
- max_tokens: maxTokens,
1100
+ max_tokens: maxOutputTokens,
1183
1101
  temperature,
1184
1102
  top_p: topP,
1185
1103
  frequency_penalty: frequencyPenalty,
@@ -1217,8 +1135,8 @@ var OpenAICompletionLanguageModel = class {
1217
1135
  return {
1218
1136
  text: choice.text,
1219
1137
  usage: {
1220
- promptTokens: response.usage.prompt_tokens,
1221
- completionTokens: response.usage.completion_tokens
1138
+ inputTokens: response.usage.prompt_tokens,
1139
+ outputTokens: response.usage.completion_tokens
1222
1140
  },
1223
1141
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1224
1142
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
@@ -1254,9 +1172,9 @@ var OpenAICompletionLanguageModel = class {
1254
1172
  fetch: this.config.fetch
1255
1173
  });
1256
1174
  let finishReason = "unknown";
1257
- let usage = {
1258
- promptTokens: Number.NaN,
1259
- completionTokens: Number.NaN
1175
+ const usage = {
1176
+ inputTokens: void 0,
1177
+ outputTokens: void 0
1260
1178
  };
1261
1179
  let logprobs;
1262
1180
  let isFirstChunk = true;
@@ -1283,10 +1201,8 @@ var OpenAICompletionLanguageModel = class {
1283
1201
  });
1284
1202
  }
1285
1203
  if (value.usage != null) {
1286
- usage = {
1287
- promptTokens: value.usage.prompt_tokens,
1288
- completionTokens: value.usage.completion_tokens
1289
- };
1204
+ usage.inputTokens = value.usage.prompt_tokens;
1205
+ usage.outputTokens = value.usage.completion_tokens;
1290
1206
  }
1291
1207
  const choice = value.choices[0];
1292
1208
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1322,46 +1238,46 @@ var OpenAICompletionLanguageModel = class {
1322
1238
  };
1323
1239
  }
1324
1240
  };
1325
- var openaiCompletionResponseSchema = z3.object({
1326
- id: z3.string().nullish(),
1327
- created: z3.number().nullish(),
1328
- model: z3.string().nullish(),
1329
- choices: z3.array(
1330
- z3.object({
1331
- text: z3.string(),
1332
- finish_reason: z3.string(),
1333
- logprobs: z3.object({
1334
- tokens: z3.array(z3.string()),
1335
- token_logprobs: z3.array(z3.number()),
1336
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1241
+ var openaiCompletionResponseSchema = z4.object({
1242
+ id: z4.string().nullish(),
1243
+ created: z4.number().nullish(),
1244
+ model: z4.string().nullish(),
1245
+ choices: z4.array(
1246
+ z4.object({
1247
+ text: z4.string(),
1248
+ finish_reason: z4.string(),
1249
+ logprobs: z4.object({
1250
+ tokens: z4.array(z4.string()),
1251
+ token_logprobs: z4.array(z4.number()),
1252
+ top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1337
1253
  }).nullish()
1338
1254
  })
1339
1255
  ),
1340
- usage: z3.object({
1341
- prompt_tokens: z3.number(),
1342
- completion_tokens: z3.number()
1256
+ usage: z4.object({
1257
+ prompt_tokens: z4.number(),
1258
+ completion_tokens: z4.number()
1343
1259
  })
1344
1260
  });
1345
- var openaiCompletionChunkSchema = z3.union([
1346
- z3.object({
1347
- id: z3.string().nullish(),
1348
- created: z3.number().nullish(),
1349
- model: z3.string().nullish(),
1350
- choices: z3.array(
1351
- z3.object({
1352
- text: z3.string(),
1353
- finish_reason: z3.string().nullish(),
1354
- index: z3.number(),
1355
- logprobs: z3.object({
1356
- tokens: z3.array(z3.string()),
1357
- token_logprobs: z3.array(z3.number()),
1358
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1261
+ var openaiCompletionChunkSchema = z4.union([
1262
+ z4.object({
1263
+ id: z4.string().nullish(),
1264
+ created: z4.number().nullish(),
1265
+ model: z4.string().nullish(),
1266
+ choices: z4.array(
1267
+ z4.object({
1268
+ text: z4.string(),
1269
+ finish_reason: z4.string().nullish(),
1270
+ index: z4.number(),
1271
+ logprobs: z4.object({
1272
+ tokens: z4.array(z4.string()),
1273
+ token_logprobs: z4.array(z4.number()),
1274
+ top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1359
1275
  }).nullish()
1360
1276
  })
1361
1277
  ),
1362
- usage: z3.object({
1363
- prompt_tokens: z3.number(),
1364
- completion_tokens: z3.number()
1278
+ usage: z4.object({
1279
+ prompt_tokens: z4.number(),
1280
+ completion_tokens: z4.number()
1365
1281
  }).nullish()
1366
1282
  }),
1367
1283
  openaiErrorDataSchema
@@ -1376,7 +1292,7 @@ import {
1376
1292
  createJsonResponseHandler as createJsonResponseHandler3,
1377
1293
  postJsonToApi as postJsonToApi3
1378
1294
  } from "@ai-sdk/provider-utils";
1379
- import { z as z4 } from "zod";
1295
+ import { z as z5 } from "zod";
1380
1296
  var OpenAIEmbeddingModel = class {
1381
1297
  constructor(modelId, settings, config) {
1382
1298
  this.specificationVersion = "v1";
@@ -1435,9 +1351,9 @@ var OpenAIEmbeddingModel = class {
1435
1351
  };
1436
1352
  }
1437
1353
  };
1438
- var openaiTextEmbeddingResponseSchema = z4.object({
1439
- data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1440
- usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1354
+ var openaiTextEmbeddingResponseSchema = z5.object({
1355
+ data: z5.array(z5.object({ embedding: z5.array(z5.number()) })),
1356
+ usage: z5.object({ prompt_tokens: z5.number() }).nullish()
1441
1357
  });
1442
1358
 
1443
1359
  // src/openai-image-model.ts
@@ -1446,7 +1362,7 @@ import {
1446
1362
  createJsonResponseHandler as createJsonResponseHandler4,
1447
1363
  postJsonToApi as postJsonToApi4
1448
1364
  } from "@ai-sdk/provider-utils";
1449
- import { z as z5 } from "zod";
1365
+ import { z as z6 } from "zod";
1450
1366
 
1451
1367
  // src/openai-image-settings.ts
1452
1368
  var modelMaxImagesPerCall = {
@@ -1524,8 +1440,8 @@ var OpenAIImageModel = class {
1524
1440
  };
1525
1441
  }
1526
1442
  };
1527
- var openaiImageResponseSchema = z5.object({
1528
- data: z5.array(z5.object({ b64_json: z5.string() }))
1443
+ var openaiImageResponseSchema = z6.object({
1444
+ data: z6.array(z6.object({ b64_json: z6.string() }))
1529
1445
  });
1530
1446
 
1531
1447
  // src/openai-transcription-model.ts
@@ -1533,22 +1449,16 @@ import {
1533
1449
  combineHeaders as combineHeaders5,
1534
1450
  convertBase64ToUint8Array,
1535
1451
  createJsonResponseHandler as createJsonResponseHandler5,
1536
- parseProviderOptions,
1452
+ parseProviderOptions as parseProviderOptions2,
1537
1453
  postFormDataToApi
1538
1454
  } from "@ai-sdk/provider-utils";
1539
- import { z as z6 } from "zod";
1540
- var OpenAIProviderOptionsSchema = z6.object({
1541
- include: z6.array(z6.string()).optional().describe(
1542
- "Additional information to include in the transcription response."
1543
- ),
1544
- language: z6.string().optional().describe("The language of the input audio in ISO-639-1 format."),
1545
- prompt: z6.string().optional().describe(
1546
- "An optional text to guide the model's style or continue a previous audio segment."
1547
- ),
1548
- temperature: z6.number().min(0).max(1).optional().default(0).describe("The sampling temperature, between 0 and 1."),
1549
- timestampGranularities: z6.array(z6.enum(["word", "segment"])).optional().default(["segment"]).describe(
1550
- "The timestamp granularities to populate for this transcription."
1551
- )
1455
+ import { z as z7 } from "zod";
1456
+ var OpenAIProviderOptionsSchema = z7.object({
1457
+ include: z7.array(z7.string()).nullish(),
1458
+ language: z7.string().nullish(),
1459
+ prompt: z7.string().nullish(),
1460
+ temperature: z7.number().min(0).max(1).nullish().default(0),
1461
+ timestampGranularities: z7.array(z7.enum(["word", "segment"])).nullish().default(["segment"])
1552
1462
  });
1553
1463
  var languageMap = {
1554
1464
  afrikaans: "af",
@@ -1623,8 +1533,9 @@ var OpenAITranscriptionModel = class {
1623
1533
  mediaType,
1624
1534
  providerOptions
1625
1535
  }) {
1536
+ var _a, _b, _c, _d, _e;
1626
1537
  const warnings = [];
1627
- const openAIOptions = parseProviderOptions({
1538
+ const openAIOptions = parseProviderOptions2({
1628
1539
  provider: "openai",
1629
1540
  providerOptions,
1630
1541
  schema: OpenAIProviderOptionsSchema
@@ -1635,16 +1546,16 @@ var OpenAITranscriptionModel = class {
1635
1546
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1636
1547
  if (openAIOptions) {
1637
1548
  const transcriptionModelOptions = {
1638
- include: openAIOptions.include,
1639
- language: openAIOptions.language,
1640
- prompt: openAIOptions.prompt,
1641
- temperature: openAIOptions.temperature,
1642
- timestamp_granularities: openAIOptions.timestampGranularities
1549
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1550
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1551
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1552
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1553
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1643
1554
  };
1644
1555
  for (const key in transcriptionModelOptions) {
1645
1556
  const value = transcriptionModelOptions[key];
1646
1557
  if (value !== void 0) {
1647
- formData.append(key, value);
1558
+ formData.append(key, String(value));
1648
1559
  }
1649
1560
  }
1650
1561
  }
@@ -1695,15 +1606,15 @@ var OpenAITranscriptionModel = class {
1695
1606
  };
1696
1607
  }
1697
1608
  };
1698
- var openaiTranscriptionResponseSchema = z6.object({
1699
- text: z6.string(),
1700
- language: z6.string().nullish(),
1701
- duration: z6.number().nullish(),
1702
- words: z6.array(
1703
- z6.object({
1704
- word: z6.string(),
1705
- start: z6.number(),
1706
- end: z6.number()
1609
+ var openaiTranscriptionResponseSchema = z7.object({
1610
+ text: z7.string(),
1611
+ language: z7.string().nullish(),
1612
+ duration: z7.number().nullish(),
1613
+ words: z7.array(
1614
+ z7.object({
1615
+ word: z7.string(),
1616
+ start: z7.number(),
1617
+ end: z7.number()
1707
1618
  })
1708
1619
  ).nullish()
1709
1620
  });
@@ -1714,14 +1625,14 @@ import {
1714
1625
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1715
1626
  createJsonResponseHandler as createJsonResponseHandler6,
1716
1627
  generateId as generateId2,
1717
- parseProviderOptions as parseProviderOptions2,
1628
+ parseProviderOptions as parseProviderOptions3,
1718
1629
  postJsonToApi as postJsonToApi5
1719
1630
  } from "@ai-sdk/provider-utils";
1720
- import { z as z7 } from "zod";
1631
+ import { z as z8 } from "zod";
1721
1632
 
1722
1633
  // src/responses/convert-to-openai-responses-messages.ts
1723
1634
  import {
1724
- UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1635
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1725
1636
  } from "@ai-sdk/provider";
1726
1637
  function convertToOpenAIResponsesMessages({
1727
1638
  prompt,
@@ -1777,7 +1688,7 @@ function convertToOpenAIResponsesMessages({
1777
1688
  };
1778
1689
  } else if (part.mediaType === "application/pdf") {
1779
1690
  if (part.data instanceof URL) {
1780
- throw new UnsupportedFunctionalityError5({
1691
+ throw new UnsupportedFunctionalityError4({
1781
1692
  functionality: "PDF file parts with URLs"
1782
1693
  });
1783
1694
  }
@@ -1787,7 +1698,7 @@ function convertToOpenAIResponsesMessages({
1787
1698
  file_data: `data:application/pdf;base64,${part.data}`
1788
1699
  };
1789
1700
  } else {
1790
- throw new UnsupportedFunctionalityError5({
1701
+ throw new UnsupportedFunctionalityError4({
1791
1702
  functionality: `file part media type ${part.mediaType}`
1792
1703
  });
1793
1704
  }
@@ -1859,7 +1770,7 @@ function mapOpenAIResponseFinishReason({
1859
1770
 
1860
1771
  // src/responses/openai-responses-prepare-tools.ts
1861
1772
  import {
1862
- UnsupportedFunctionalityError as UnsupportedFunctionalityError6
1773
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1863
1774
  } from "@ai-sdk/provider";
1864
1775
  function prepareResponsesTools({
1865
1776
  tools,
@@ -1919,7 +1830,7 @@ function prepareResponsesTools({
1919
1830
  };
1920
1831
  default: {
1921
1832
  const _exhaustiveCheck = type;
1922
- throw new UnsupportedFunctionalityError6({
1833
+ throw new UnsupportedFunctionalityError5({
1923
1834
  functionality: `tool choice type: ${_exhaustiveCheck}`
1924
1835
  });
1925
1836
  }
@@ -1938,7 +1849,7 @@ var OpenAIResponsesLanguageModel = class {
1938
1849
  return this.config.provider;
1939
1850
  }
1940
1851
  getArgs({
1941
- maxTokens,
1852
+ maxOutputTokens,
1942
1853
  temperature,
1943
1854
  stopSequences,
1944
1855
  topP,
@@ -1981,7 +1892,7 @@ var OpenAIResponsesLanguageModel = class {
1981
1892
  systemMessageMode: modelConfig.systemMessageMode
1982
1893
  });
1983
1894
  warnings.push(...messageWarnings);
1984
- const openaiOptions = parseProviderOptions2({
1895
+ const openaiOptions = parseProviderOptions3({
1985
1896
  provider: "openai",
1986
1897
  providerOptions,
1987
1898
  schema: openaiResponsesProviderOptionsSchema
@@ -1992,7 +1903,7 @@ var OpenAIResponsesLanguageModel = class {
1992
1903
  input: messages,
1993
1904
  temperature,
1994
1905
  top_p: topP,
1995
- max_output_tokens: maxTokens,
1906
+ max_output_tokens: maxOutputTokens,
1996
1907
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1997
1908
  text: {
1998
1909
  format: responseFormat.schema != null ? {
@@ -2071,49 +1982,49 @@ var OpenAIResponsesLanguageModel = class {
2071
1982
  body,
2072
1983
  failedResponseHandler: openaiFailedResponseHandler,
2073
1984
  successfulResponseHandler: createJsonResponseHandler6(
2074
- z7.object({
2075
- id: z7.string(),
2076
- created_at: z7.number(),
2077
- model: z7.string(),
2078
- output: z7.array(
2079
- z7.discriminatedUnion("type", [
2080
- z7.object({
2081
- type: z7.literal("message"),
2082
- role: z7.literal("assistant"),
2083
- content: z7.array(
2084
- z7.object({
2085
- type: z7.literal("output_text"),
2086
- text: z7.string(),
2087
- annotations: z7.array(
2088
- z7.object({
2089
- type: z7.literal("url_citation"),
2090
- start_index: z7.number(),
2091
- end_index: z7.number(),
2092
- url: z7.string(),
2093
- title: z7.string()
1985
+ z8.object({
1986
+ id: z8.string(),
1987
+ created_at: z8.number(),
1988
+ model: z8.string(),
1989
+ output: z8.array(
1990
+ z8.discriminatedUnion("type", [
1991
+ z8.object({
1992
+ type: z8.literal("message"),
1993
+ role: z8.literal("assistant"),
1994
+ content: z8.array(
1995
+ z8.object({
1996
+ type: z8.literal("output_text"),
1997
+ text: z8.string(),
1998
+ annotations: z8.array(
1999
+ z8.object({
2000
+ type: z8.literal("url_citation"),
2001
+ start_index: z8.number(),
2002
+ end_index: z8.number(),
2003
+ url: z8.string(),
2004
+ title: z8.string()
2094
2005
  })
2095
2006
  )
2096
2007
  })
2097
2008
  )
2098
2009
  }),
2099
- z7.object({
2100
- type: z7.literal("function_call"),
2101
- call_id: z7.string(),
2102
- name: z7.string(),
2103
- arguments: z7.string()
2010
+ z8.object({
2011
+ type: z8.literal("function_call"),
2012
+ call_id: z8.string(),
2013
+ name: z8.string(),
2014
+ arguments: z8.string()
2104
2015
  }),
2105
- z7.object({
2106
- type: z7.literal("web_search_call")
2016
+ z8.object({
2017
+ type: z8.literal("web_search_call")
2107
2018
  }),
2108
- z7.object({
2109
- type: z7.literal("computer_call")
2019
+ z8.object({
2020
+ type: z8.literal("computer_call")
2110
2021
  }),
2111
- z7.object({
2112
- type: z7.literal("reasoning")
2022
+ z8.object({
2023
+ type: z8.literal("reasoning")
2113
2024
  })
2114
2025
  ])
2115
2026
  ),
2116
- incomplete_details: z7.object({ reason: z7.string() }).nullable(),
2027
+ incomplete_details: z8.object({ reason: z8.string() }).nullable(),
2117
2028
  usage: usageSchema
2118
2029
  })
2119
2030
  ),
@@ -2146,8 +2057,8 @@ var OpenAIResponsesLanguageModel = class {
2146
2057
  }),
2147
2058
  toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2148
2059
  usage: {
2149
- promptTokens: response.usage.input_tokens,
2150
- completionTokens: response.usage.output_tokens
2060
+ inputTokens: response.usage.input_tokens,
2061
+ outputTokens: response.usage.output_tokens
2151
2062
  },
2152
2063
  request: { body },
2153
2064
  response: {
@@ -2188,8 +2099,10 @@ var OpenAIResponsesLanguageModel = class {
2188
2099
  });
2189
2100
  const self = this;
2190
2101
  let finishReason = "unknown";
2191
- let promptTokens = NaN;
2192
- let completionTokens = NaN;
2102
+ const usage = {
2103
+ inputTokens: void 0,
2104
+ outputTokens: void 0
2105
+ };
2193
2106
  let cachedPromptTokens = null;
2194
2107
  let reasoningTokens = null;
2195
2108
  let responseId = null;
@@ -2259,8 +2172,8 @@ var OpenAIResponsesLanguageModel = class {
2259
2172
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2260
2173
  hasToolCalls
2261
2174
  });
2262
- promptTokens = value.response.usage.input_tokens;
2263
- completionTokens = value.response.usage.output_tokens;
2175
+ usage.inputTokens = value.response.usage.input_tokens;
2176
+ usage.outputTokens = value.response.usage.output_tokens;
2264
2177
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2265
2178
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2266
2179
  } else if (isResponseAnnotationAddedChunk(value)) {
@@ -2279,7 +2192,7 @@ var OpenAIResponsesLanguageModel = class {
2279
2192
  controller.enqueue({
2280
2193
  type: "finish",
2281
2194
  finishReason,
2282
- usage: { promptTokens, completionTokens },
2195
+ usage,
2283
2196
  ...(cachedPromptTokens != null || reasoningTokens != null) && {
2284
2197
  providerMetadata: {
2285
2198
  openai: {
@@ -2299,79 +2212,79 @@ var OpenAIResponsesLanguageModel = class {
2299
2212
  };
2300
2213
  }
2301
2214
  };
2302
- var usageSchema = z7.object({
2303
- input_tokens: z7.number(),
2304
- input_tokens_details: z7.object({ cached_tokens: z7.number().nullish() }).nullish(),
2305
- output_tokens: z7.number(),
2306
- output_tokens_details: z7.object({ reasoning_tokens: z7.number().nullish() }).nullish()
2215
+ var usageSchema = z8.object({
2216
+ input_tokens: z8.number(),
2217
+ input_tokens_details: z8.object({ cached_tokens: z8.number().nullish() }).nullish(),
2218
+ output_tokens: z8.number(),
2219
+ output_tokens_details: z8.object({ reasoning_tokens: z8.number().nullish() }).nullish()
2307
2220
  });
2308
- var textDeltaChunkSchema = z7.object({
2309
- type: z7.literal("response.output_text.delta"),
2310
- delta: z7.string()
2221
+ var textDeltaChunkSchema = z8.object({
2222
+ type: z8.literal("response.output_text.delta"),
2223
+ delta: z8.string()
2311
2224
  });
2312
- var responseFinishedChunkSchema = z7.object({
2313
- type: z7.enum(["response.completed", "response.incomplete"]),
2314
- response: z7.object({
2315
- incomplete_details: z7.object({ reason: z7.string() }).nullish(),
2225
+ var responseFinishedChunkSchema = z8.object({
2226
+ type: z8.enum(["response.completed", "response.incomplete"]),
2227
+ response: z8.object({
2228
+ incomplete_details: z8.object({ reason: z8.string() }).nullish(),
2316
2229
  usage: usageSchema
2317
2230
  })
2318
2231
  });
2319
- var responseCreatedChunkSchema = z7.object({
2320
- type: z7.literal("response.created"),
2321
- response: z7.object({
2322
- id: z7.string(),
2323
- created_at: z7.number(),
2324
- model: z7.string()
2232
+ var responseCreatedChunkSchema = z8.object({
2233
+ type: z8.literal("response.created"),
2234
+ response: z8.object({
2235
+ id: z8.string(),
2236
+ created_at: z8.number(),
2237
+ model: z8.string()
2325
2238
  })
2326
2239
  });
2327
- var responseOutputItemDoneSchema = z7.object({
2328
- type: z7.literal("response.output_item.done"),
2329
- output_index: z7.number(),
2330
- item: z7.discriminatedUnion("type", [
2331
- z7.object({
2332
- type: z7.literal("message")
2240
+ var responseOutputItemDoneSchema = z8.object({
2241
+ type: z8.literal("response.output_item.done"),
2242
+ output_index: z8.number(),
2243
+ item: z8.discriminatedUnion("type", [
2244
+ z8.object({
2245
+ type: z8.literal("message")
2333
2246
  }),
2334
- z7.object({
2335
- type: z7.literal("function_call"),
2336
- id: z7.string(),
2337
- call_id: z7.string(),
2338
- name: z7.string(),
2339
- arguments: z7.string(),
2340
- status: z7.literal("completed")
2247
+ z8.object({
2248
+ type: z8.literal("function_call"),
2249
+ id: z8.string(),
2250
+ call_id: z8.string(),
2251
+ name: z8.string(),
2252
+ arguments: z8.string(),
2253
+ status: z8.literal("completed")
2341
2254
  })
2342
2255
  ])
2343
2256
  });
2344
- var responseFunctionCallArgumentsDeltaSchema = z7.object({
2345
- type: z7.literal("response.function_call_arguments.delta"),
2346
- item_id: z7.string(),
2347
- output_index: z7.number(),
2348
- delta: z7.string()
2257
+ var responseFunctionCallArgumentsDeltaSchema = z8.object({
2258
+ type: z8.literal("response.function_call_arguments.delta"),
2259
+ item_id: z8.string(),
2260
+ output_index: z8.number(),
2261
+ delta: z8.string()
2349
2262
  });
2350
- var responseOutputItemAddedSchema = z7.object({
2351
- type: z7.literal("response.output_item.added"),
2352
- output_index: z7.number(),
2353
- item: z7.discriminatedUnion("type", [
2354
- z7.object({
2355
- type: z7.literal("message")
2263
+ var responseOutputItemAddedSchema = z8.object({
2264
+ type: z8.literal("response.output_item.added"),
2265
+ output_index: z8.number(),
2266
+ item: z8.discriminatedUnion("type", [
2267
+ z8.object({
2268
+ type: z8.literal("message")
2356
2269
  }),
2357
- z7.object({
2358
- type: z7.literal("function_call"),
2359
- id: z7.string(),
2360
- call_id: z7.string(),
2361
- name: z7.string(),
2362
- arguments: z7.string()
2270
+ z8.object({
2271
+ type: z8.literal("function_call"),
2272
+ id: z8.string(),
2273
+ call_id: z8.string(),
2274
+ name: z8.string(),
2275
+ arguments: z8.string()
2363
2276
  })
2364
2277
  ])
2365
2278
  });
2366
- var responseAnnotationAddedSchema = z7.object({
2367
- type: z7.literal("response.output_text.annotation.added"),
2368
- annotation: z7.object({
2369
- type: z7.literal("url_citation"),
2370
- url: z7.string(),
2371
- title: z7.string()
2279
+ var responseAnnotationAddedSchema = z8.object({
2280
+ type: z8.literal("response.output_text.annotation.added"),
2281
+ annotation: z8.object({
2282
+ type: z8.literal("url_citation"),
2283
+ url: z8.string(),
2284
+ title: z8.string()
2372
2285
  })
2373
2286
  });
2374
- var openaiResponsesChunkSchema = z7.union([
2287
+ var openaiResponsesChunkSchema = z8.union([
2375
2288
  textDeltaChunkSchema,
2376
2289
  responseFinishedChunkSchema,
2377
2290
  responseCreatedChunkSchema,
@@ -2379,7 +2292,7 @@ var openaiResponsesChunkSchema = z7.union([
2379
2292
  responseFunctionCallArgumentsDeltaSchema,
2380
2293
  responseOutputItemAddedSchema,
2381
2294
  responseAnnotationAddedSchema,
2382
- z7.object({ type: z7.string() }).passthrough()
2295
+ z8.object({ type: z8.string() }).passthrough()
2383
2296
  // fallback for unknown chunks
2384
2297
  ]);
2385
2298
  function isTextDeltaChunk(chunk) {
@@ -2424,15 +2337,15 @@ function getResponsesModelConfig(modelId) {
2424
2337
  requiredAutoTruncation: false
2425
2338
  };
2426
2339
  }
2427
- var openaiResponsesProviderOptionsSchema = z7.object({
2428
- metadata: z7.any().nullish(),
2429
- parallelToolCalls: z7.boolean().nullish(),
2430
- previousResponseId: z7.string().nullish(),
2431
- store: z7.boolean().nullish(),
2432
- user: z7.string().nullish(),
2433
- reasoningEffort: z7.string().nullish(),
2434
- strictSchemas: z7.boolean().nullish(),
2435
- instructions: z7.string().nullish()
2340
+ var openaiResponsesProviderOptionsSchema = z8.object({
2341
+ metadata: z8.any().nullish(),
2342
+ parallelToolCalls: z8.boolean().nullish(),
2343
+ previousResponseId: z8.string().nullish(),
2344
+ store: z8.boolean().nullish(),
2345
+ user: z8.string().nullish(),
2346
+ reasoningEffort: z8.string().nullish(),
2347
+ strictSchemas: z8.boolean().nullish(),
2348
+ instructions: z8.string().nullish()
2436
2349
  });
2437
2350
  export {
2438
2351
  OpenAIChatLanguageModel,
@@ -2441,6 +2354,7 @@ export {
2441
2354
  OpenAIImageModel,
2442
2355
  OpenAIResponsesLanguageModel,
2443
2356
  OpenAITranscriptionModel,
2444
- modelMaxImagesPerCall
2357
+ modelMaxImagesPerCall,
2358
+ openaiProviderOptions
2445
2359
  };
2446
2360
  //# sourceMappingURL=index.mjs.map