@ai-sdk/openai 2.0.0-canary.5 → 2.0.0-canary.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -6,8 +6,7 @@ import {
6
6
 
7
7
  // src/openai-chat-language-model.ts
8
8
  import {
9
- InvalidResponseDataError,
10
- UnsupportedFunctionalityError as UnsupportedFunctionalityError3
9
+ InvalidResponseDataError
11
10
  } from "@ai-sdk/provider";
12
11
  import {
13
12
  combineHeaders,
@@ -15,17 +14,18 @@ import {
15
14
  createJsonResponseHandler,
16
15
  generateId,
17
16
  isParsableJson,
18
- postJsonToApi
17
+ postJsonToApi,
18
+ parseProviderOptions
19
19
  } from "@ai-sdk/provider-utils";
20
- import { z as z2 } from "zod";
20
+ import { z as z3 } from "zod";
21
21
 
22
22
  // src/convert-to-openai-chat-messages.ts
23
23
  import {
24
24
  UnsupportedFunctionalityError
25
25
  } from "@ai-sdk/provider";
26
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
26
27
  function convertToOpenAIChatMessages({
27
28
  prompt,
28
- useLegacyFunctionCalling = false,
29
29
  systemMessageMode = "system"
30
30
  }) {
31
31
  const messages = [];
@@ -77,7 +77,7 @@ function convertToOpenAIChatMessages({
77
77
  return {
78
78
  type: "image_url",
79
79
  image_url: {
80
- url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
80
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
81
81
  // OpenAI specific extension: image detail
82
82
  detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
83
83
  }
@@ -92,14 +92,20 @@ function convertToOpenAIChatMessages({
92
92
  case "audio/wav": {
93
93
  return {
94
94
  type: "input_audio",
95
- input_audio: { data: part.data, format: "wav" }
95
+ input_audio: {
96
+ data: convertToBase64(part.data),
97
+ format: "wav"
98
+ }
96
99
  };
97
100
  }
98
101
  case "audio/mp3":
99
102
  case "audio/mpeg": {
100
103
  return {
101
104
  type: "input_audio",
102
- input_audio: { data: part.data, format: "mp3" }
105
+ input_audio: {
106
+ data: convertToBase64(part.data),
107
+ format: "mp3"
108
+ }
103
109
  };
104
110
  }
105
111
  default: {
@@ -154,41 +160,20 @@ function convertToOpenAIChatMessages({
154
160
  }
155
161
  }
156
162
  }
157
- if (useLegacyFunctionCalling) {
158
- if (toolCalls.length > 1) {
159
- throw new UnsupportedFunctionalityError({
160
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
161
- });
162
- }
163
- messages.push({
164
- role: "assistant",
165
- content: text,
166
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
167
- });
168
- } else {
169
- messages.push({
170
- role: "assistant",
171
- content: text,
172
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
173
- });
174
- }
163
+ messages.push({
164
+ role: "assistant",
165
+ content: text,
166
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
167
+ });
175
168
  break;
176
169
  }
177
170
  case "tool": {
178
171
  for (const toolResponse of content) {
179
- if (useLegacyFunctionCalling) {
180
- messages.push({
181
- role: "function",
182
- name: toolResponse.toolName,
183
- content: JSON.stringify(toolResponse.result)
184
- });
185
- } else {
186
- messages.push({
187
- role: "tool",
188
- tool_call_id: toolResponse.toolCallId,
189
- content: JSON.stringify(toolResponse.result)
190
- });
191
- }
172
+ messages.push({
173
+ role: "tool",
174
+ tool_call_id: toolResponse.toolCallId,
175
+ content: JSON.stringify(toolResponse.result)
176
+ });
192
177
  }
193
178
  break;
194
179
  }
@@ -231,18 +216,69 @@ function mapOpenAIFinishReason(finishReason) {
231
216
  }
232
217
  }
233
218
 
234
- // src/openai-error.ts
219
+ // src/openai-chat-options.ts
235
220
  import { z } from "zod";
221
+ var openaiProviderOptions = z.object({
222
+ /**
223
+ * Modify the likelihood of specified tokens appearing in the completion.
224
+ *
225
+ * Accepts a JSON object that maps tokens (specified by their token ID in
226
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
227
+ */
228
+ logitBias: z.record(z.coerce.number(), z.number()).optional(),
229
+ /**
230
+ * Return the log probabilities of the tokens.
231
+ *
232
+ * Setting to true will return the log probabilities of the tokens that
233
+ * were generated.
234
+ *
235
+ * Setting to a number will return the log probabilities of the top n
236
+ * tokens that were generated.
237
+ */
238
+ logprobs: z.union([z.boolean(), z.number()]).optional(),
239
+ /**
240
+ * Whether to enable parallel function calling during tool use. Default to true.
241
+ */
242
+ parallelToolCalls: z.boolean().optional(),
243
+ /**
244
+ * A unique identifier representing your end-user, which can help OpenAI to
245
+ * monitor and detect abuse.
246
+ */
247
+ user: z.string().optional(),
248
+ /**
249
+ * Reasoning effort for reasoning models. Defaults to `medium`.
250
+ */
251
+ reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
252
+ /**
253
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
254
+ */
255
+ maxCompletionTokens: z.number().optional(),
256
+ /**
257
+ * Whether to enable persistence in responses API.
258
+ */
259
+ store: z.boolean().optional(),
260
+ /**
261
+ * Metadata to associate with the request.
262
+ */
263
+ metadata: z.record(z.string()).optional(),
264
+ /**
265
+ * Parameters for prediction mode.
266
+ */
267
+ prediction: z.record(z.any()).optional()
268
+ });
269
+
270
+ // src/openai-error.ts
271
+ import { z as z2 } from "zod";
236
272
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
237
- var openaiErrorDataSchema = z.object({
238
- error: z.object({
239
- message: z.string(),
273
+ var openaiErrorDataSchema = z2.object({
274
+ error: z2.object({
275
+ message: z2.string(),
240
276
  // The additional information below is handled loosely to support
241
277
  // OpenAI-compatible providers that have slightly different error
242
278
  // responses:
243
- type: z.string().nullish(),
244
- param: z.any().nullish(),
245
- code: z.union([z.string(), z.number()]).nullish()
279
+ type: z2.string().nullish(),
280
+ param: z2.any().nullish(),
281
+ code: z2.union([z2.string(), z2.number()]).nullish()
246
282
  })
247
283
  });
248
284
  var openaiFailedResponseHandler = createJsonErrorResponseHandler({
@@ -270,7 +306,6 @@ import {
270
306
  function prepareTools({
271
307
  tools,
272
308
  toolChoice,
273
- useLegacyFunctionCalling = false,
274
309
  structuredOutputs
275
310
  }) {
276
311
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
@@ -278,48 +313,6 @@ function prepareTools({
278
313
  if (tools == null) {
279
314
  return { tools: void 0, toolChoice: void 0, toolWarnings };
280
315
  }
281
- if (useLegacyFunctionCalling) {
282
- const openaiFunctions = [];
283
- for (const tool of tools) {
284
- if (tool.type === "provider-defined") {
285
- toolWarnings.push({ type: "unsupported-tool", tool });
286
- } else {
287
- openaiFunctions.push({
288
- name: tool.name,
289
- description: tool.description,
290
- parameters: tool.parameters
291
- });
292
- }
293
- }
294
- if (toolChoice == null) {
295
- return {
296
- functions: openaiFunctions,
297
- function_call: void 0,
298
- toolWarnings
299
- };
300
- }
301
- const type2 = toolChoice.type;
302
- switch (type2) {
303
- case "auto":
304
- case "none":
305
- case void 0:
306
- return {
307
- functions: openaiFunctions,
308
- function_call: void 0,
309
- toolWarnings
310
- };
311
- case "required":
312
- throw new UnsupportedFunctionalityError2({
313
- functionality: "useLegacyFunctionCalling and toolChoice: required"
314
- });
315
- default:
316
- return {
317
- functions: openaiFunctions,
318
- function_call: { name: toolChoice.toolName },
319
- toolWarnings
320
- };
321
- }
322
- }
323
316
  const openaiTools2 = [];
324
317
  for (const tool of tools) {
325
318
  if (tool.type === "provider-defined") {
@@ -391,7 +384,7 @@ var OpenAIChatLanguageModel = class {
391
384
  }
392
385
  getArgs({
393
386
  prompt,
394
- maxTokens,
387
+ maxOutputTokens,
395
388
  temperature,
396
389
  topP,
397
390
  topK,
@@ -404,8 +397,13 @@ var OpenAIChatLanguageModel = class {
404
397
  toolChoice,
405
398
  providerOptions
406
399
  }) {
407
- var _a, _b, _c, _d, _e, _f, _g;
400
+ var _a, _b;
408
401
  const warnings = [];
402
+ const openaiOptions = (_a = parseProviderOptions({
403
+ provider: "openai",
404
+ providerOptions,
405
+ schema: openaiProviderOptions
406
+ })) != null ? _a : {};
409
407
  if (topK != null) {
410
408
  warnings.push({
411
409
  type: "unsupported-setting",
@@ -419,21 +417,9 @@ var OpenAIChatLanguageModel = class {
419
417
  details: "JSON response format schema is only supported with structuredOutputs"
420
418
  });
421
419
  }
422
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
423
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
424
- throw new UnsupportedFunctionalityError3({
425
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
426
- });
427
- }
428
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
429
- throw new UnsupportedFunctionalityError3({
430
- functionality: "structuredOutputs with useLegacyFunctionCalling"
431
- });
432
- }
433
420
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
434
421
  {
435
422
  prompt,
436
- useLegacyFunctionCalling,
437
423
  systemMessageMode: getSystemMessageMode(this.modelId)
438
424
  }
439
425
  );
@@ -442,13 +428,13 @@ var OpenAIChatLanguageModel = class {
442
428
  // model id:
443
429
  model: this.modelId,
444
430
  // model specific settings:
445
- logit_bias: this.settings.logitBias,
446
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
447
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
448
- user: this.settings.user,
449
- parallel_tool_calls: this.settings.parallelToolCalls,
431
+ logit_bias: openaiOptions.logitBias,
432
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
433
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
434
+ user: openaiOptions.user,
435
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
450
436
  // standardized settings:
451
- max_tokens: maxTokens,
437
+ max_tokens: maxOutputTokens,
452
438
  temperature,
453
439
  top_p: topP,
454
440
  frequency_penalty: frequencyPenalty,
@@ -459,19 +445,19 @@ var OpenAIChatLanguageModel = class {
459
445
  json_schema: {
460
446
  schema: responseFormat.schema,
461
447
  strict: true,
462
- name: (_a = responseFormat.name) != null ? _a : "response",
448
+ name: (_b = responseFormat.name) != null ? _b : "response",
463
449
  description: responseFormat.description
464
450
  }
465
451
  } : { type: "json_object" } : void 0,
466
452
  stop: stopSequences,
467
453
  seed,
468
454
  // openai specific settings:
469
- // TODO remove in next major version; we auto-map maxTokens now
470
- max_completion_tokens: (_b = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _b.maxCompletionTokens,
471
- store: (_c = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _c.store,
472
- metadata: (_d = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _d.metadata,
473
- prediction: (_e = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _e.prediction,
474
- reasoning_effort: (_g = (_f = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
455
+ // TODO remove in next major version; we auto-map maxOutputTokens now
456
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
457
+ store: openaiOptions.store,
458
+ metadata: openaiOptions.metadata,
459
+ prediction: openaiOptions.prediction,
460
+ reasoning_effort: openaiOptions.reasoningEffort,
475
461
  // messages:
476
462
  messages
477
463
  };
@@ -535,26 +521,30 @@ var OpenAIChatLanguageModel = class {
535
521
  }
536
522
  baseArgs.max_tokens = void 0;
537
523
  }
524
+ } else if (this.modelId.startsWith("gpt-4o-search-preview")) {
525
+ if (baseArgs.temperature != null) {
526
+ baseArgs.temperature = void 0;
527
+ warnings.push({
528
+ type: "unsupported-setting",
529
+ setting: "temperature",
530
+ details: "temperature is not supported for the gpt-4o-search-preview model and has been removed."
531
+ });
532
+ }
538
533
  }
539
534
  const {
540
535
  tools: openaiTools2,
541
536
  toolChoice: openaiToolChoice,
542
- functions,
543
- function_call,
544
537
  toolWarnings
545
538
  } = prepareTools({
546
539
  tools,
547
540
  toolChoice,
548
- useLegacyFunctionCalling,
549
541
  structuredOutputs: this.supportsStructuredOutputs
550
542
  });
551
543
  return {
552
544
  args: {
553
545
  ...baseArgs,
554
546
  tools: openaiTools2,
555
- tool_choice: openaiToolChoice,
556
- functions,
557
- function_call
547
+ tool_choice: openaiToolChoice
558
548
  },
559
549
  warnings: [...warnings, ...toolWarnings]
560
550
  };
@@ -599,14 +589,7 @@ var OpenAIChatLanguageModel = class {
599
589
  }
600
590
  return {
601
591
  text: (_c = choice.message.content) != null ? _c : void 0,
602
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
603
- {
604
- toolCallType: "function",
605
- toolCallId: generateId(),
606
- toolName: choice.message.function_call.name,
607
- args: choice.message.function_call.arguments
608
- }
609
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
592
+ toolCalls: (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
610
593
  var _a2;
611
594
  return {
612
595
  toolCallType: "function",
@@ -617,8 +600,8 @@ var OpenAIChatLanguageModel = class {
617
600
  }),
618
601
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
619
602
  usage: {
620
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
621
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
603
+ inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
604
+ outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
622
605
  },
623
606
  request: { body },
624
607
  response: {
@@ -632,48 +615,6 @@ var OpenAIChatLanguageModel = class {
632
615
  };
633
616
  }
634
617
  async doStream(options) {
635
- if (this.settings.simulateStreaming) {
636
- const result = await this.doGenerate(options);
637
- const simulatedStream = new ReadableStream({
638
- start(controller) {
639
- controller.enqueue({ type: "response-metadata", ...result.response });
640
- if (result.text) {
641
- controller.enqueue({
642
- type: "text-delta",
643
- textDelta: result.text
644
- });
645
- }
646
- if (result.toolCalls) {
647
- for (const toolCall of result.toolCalls) {
648
- controller.enqueue({
649
- type: "tool-call-delta",
650
- toolCallType: "function",
651
- toolCallId: toolCall.toolCallId,
652
- toolName: toolCall.toolName,
653
- argsTextDelta: toolCall.args
654
- });
655
- controller.enqueue({
656
- type: "tool-call",
657
- ...toolCall
658
- });
659
- }
660
- }
661
- controller.enqueue({
662
- type: "finish",
663
- finishReason: result.finishReason,
664
- usage: result.usage,
665
- logprobs: result.logprobs,
666
- providerMetadata: result.providerMetadata
667
- });
668
- controller.close();
669
- }
670
- });
671
- return {
672
- stream: simulatedStream,
673
- response: result.response,
674
- warnings: result.warnings
675
- };
676
- }
677
618
  const { args, warnings } = this.getArgs(options);
678
619
  const body = {
679
620
  ...args,
@@ -698,13 +639,12 @@ var OpenAIChatLanguageModel = class {
698
639
  const { messages: rawPrompt, ...rawSettings } = args;
699
640
  const toolCalls = [];
700
641
  let finishReason = "unknown";
701
- let usage = {
702
- promptTokens: void 0,
703
- completionTokens: void 0
642
+ const usage = {
643
+ inputTokens: void 0,
644
+ outputTokens: void 0
704
645
  };
705
646
  let logprobs;
706
647
  let isFirstChunk = true;
707
- const { useLegacyFunctionCalling } = this.settings;
708
648
  const providerMetadata = { openai: {} };
709
649
  return {
710
650
  stream: response.pipeThrough(
@@ -736,10 +676,8 @@ var OpenAIChatLanguageModel = class {
736
676
  prompt_tokens_details,
737
677
  completion_tokens_details
738
678
  } = value.usage;
739
- usage = {
740
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
741
- completionTokens: completion_tokens != null ? completion_tokens : void 0
742
- };
679
+ usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
680
+ usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
743
681
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
744
682
  providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
745
683
  }
@@ -774,16 +712,8 @@ var OpenAIChatLanguageModel = class {
774
712
  if (logprobs === void 0) logprobs = [];
775
713
  logprobs.push(...mappedLogprobs);
776
714
  }
777
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
778
- {
779
- type: "function",
780
- id: generateId(),
781
- function: delta.function_call,
782
- index: 0
783
- }
784
- ] : delta.tool_calls;
785
- if (mappedToolCalls != null) {
786
- for (const toolCallDelta of mappedToolCalls) {
715
+ if (delta.tool_calls != null) {
716
+ for (const toolCallDelta of delta.tool_calls) {
787
717
  const index = toolCallDelta.index;
788
718
  if (toolCalls[index] == null) {
789
719
  if (toolCallDelta.type !== "function") {
@@ -865,15 +795,11 @@ var OpenAIChatLanguageModel = class {
865
795
  }
866
796
  },
867
797
  flush(controller) {
868
- var _a, _b;
869
798
  controller.enqueue({
870
799
  type: "finish",
871
800
  finishReason,
872
801
  logprobs,
873
- usage: {
874
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
875
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
876
- },
802
+ usage,
877
803
  ...providerMetadata != null ? { providerMetadata } : {}
878
804
  });
879
805
  }
@@ -885,104 +811,96 @@ var OpenAIChatLanguageModel = class {
885
811
  };
886
812
  }
887
813
  };
888
- var openaiTokenUsageSchema = z2.object({
889
- prompt_tokens: z2.number().nullish(),
890
- completion_tokens: z2.number().nullish(),
891
- prompt_tokens_details: z2.object({
892
- cached_tokens: z2.number().nullish()
814
+ var openaiTokenUsageSchema = z3.object({
815
+ prompt_tokens: z3.number().nullish(),
816
+ completion_tokens: z3.number().nullish(),
817
+ prompt_tokens_details: z3.object({
818
+ cached_tokens: z3.number().nullish()
893
819
  }).nullish(),
894
- completion_tokens_details: z2.object({
895
- reasoning_tokens: z2.number().nullish(),
896
- accepted_prediction_tokens: z2.number().nullish(),
897
- rejected_prediction_tokens: z2.number().nullish()
820
+ completion_tokens_details: z3.object({
821
+ reasoning_tokens: z3.number().nullish(),
822
+ accepted_prediction_tokens: z3.number().nullish(),
823
+ rejected_prediction_tokens: z3.number().nullish()
898
824
  }).nullish()
899
825
  }).nullish();
900
- var openaiChatResponseSchema = z2.object({
901
- id: z2.string().nullish(),
902
- created: z2.number().nullish(),
903
- model: z2.string().nullish(),
904
- choices: z2.array(
905
- z2.object({
906
- message: z2.object({
907
- role: z2.literal("assistant").nullish(),
908
- content: z2.string().nullish(),
909
- function_call: z2.object({
910
- arguments: z2.string(),
911
- name: z2.string()
912
- }).nullish(),
913
- tool_calls: z2.array(
914
- z2.object({
915
- id: z2.string().nullish(),
916
- type: z2.literal("function"),
917
- function: z2.object({
918
- name: z2.string(),
919
- arguments: z2.string()
826
+ var openaiChatResponseSchema = z3.object({
827
+ id: z3.string().nullish(),
828
+ created: z3.number().nullish(),
829
+ model: z3.string().nullish(),
830
+ choices: z3.array(
831
+ z3.object({
832
+ message: z3.object({
833
+ role: z3.literal("assistant").nullish(),
834
+ content: z3.string().nullish(),
835
+ tool_calls: z3.array(
836
+ z3.object({
837
+ id: z3.string().nullish(),
838
+ type: z3.literal("function"),
839
+ function: z3.object({
840
+ name: z3.string(),
841
+ arguments: z3.string()
920
842
  })
921
843
  })
922
844
  ).nullish()
923
845
  }),
924
- index: z2.number(),
925
- logprobs: z2.object({
926
- content: z2.array(
927
- z2.object({
928
- token: z2.string(),
929
- logprob: z2.number(),
930
- top_logprobs: z2.array(
931
- z2.object({
932
- token: z2.string(),
933
- logprob: z2.number()
846
+ index: z3.number(),
847
+ logprobs: z3.object({
848
+ content: z3.array(
849
+ z3.object({
850
+ token: z3.string(),
851
+ logprob: z3.number(),
852
+ top_logprobs: z3.array(
853
+ z3.object({
854
+ token: z3.string(),
855
+ logprob: z3.number()
934
856
  })
935
857
  )
936
858
  })
937
859
  ).nullable()
938
860
  }).nullish(),
939
- finish_reason: z2.string().nullish()
861
+ finish_reason: z3.string().nullish()
940
862
  })
941
863
  ),
942
864
  usage: openaiTokenUsageSchema
943
865
  });
944
- var openaiChatChunkSchema = z2.union([
945
- z2.object({
946
- id: z2.string().nullish(),
947
- created: z2.number().nullish(),
948
- model: z2.string().nullish(),
949
- choices: z2.array(
950
- z2.object({
951
- delta: z2.object({
952
- role: z2.enum(["assistant"]).nullish(),
953
- content: z2.string().nullish(),
954
- function_call: z2.object({
955
- name: z2.string().optional(),
956
- arguments: z2.string().optional()
957
- }).nullish(),
958
- tool_calls: z2.array(
959
- z2.object({
960
- index: z2.number(),
961
- id: z2.string().nullish(),
962
- type: z2.literal("function").optional(),
963
- function: z2.object({
964
- name: z2.string().nullish(),
965
- arguments: z2.string().nullish()
866
+ var openaiChatChunkSchema = z3.union([
867
+ z3.object({
868
+ id: z3.string().nullish(),
869
+ created: z3.number().nullish(),
870
+ model: z3.string().nullish(),
871
+ choices: z3.array(
872
+ z3.object({
873
+ delta: z3.object({
874
+ role: z3.enum(["assistant"]).nullish(),
875
+ content: z3.string().nullish(),
876
+ tool_calls: z3.array(
877
+ z3.object({
878
+ index: z3.number(),
879
+ id: z3.string().nullish(),
880
+ type: z3.literal("function").optional(),
881
+ function: z3.object({
882
+ name: z3.string().nullish(),
883
+ arguments: z3.string().nullish()
966
884
  })
967
885
  })
968
886
  ).nullish()
969
887
  }).nullish(),
970
- logprobs: z2.object({
971
- content: z2.array(
972
- z2.object({
973
- token: z2.string(),
974
- logprob: z2.number(),
975
- top_logprobs: z2.array(
976
- z2.object({
977
- token: z2.string(),
978
- logprob: z2.number()
888
+ logprobs: z3.object({
889
+ content: z3.array(
890
+ z3.object({
891
+ token: z3.string(),
892
+ logprob: z3.number(),
893
+ top_logprobs: z3.array(
894
+ z3.object({
895
+ token: z3.string(),
896
+ logprob: z3.number()
979
897
  })
980
898
  )
981
899
  })
982
900
  ).nullable()
983
901
  }).nullish(),
984
- finish_reason: z2.string().nullable().optional(),
985
- index: z2.number()
902
+ finish_reason: z3.string().nullable().optional(),
903
+ index: z3.number()
986
904
  })
987
905
  ),
988
906
  usage: openaiTokenUsageSchema
@@ -1030,12 +948,12 @@ import {
1030
948
  createJsonResponseHandler as createJsonResponseHandler2,
1031
949
  postJsonToApi as postJsonToApi2
1032
950
  } from "@ai-sdk/provider-utils";
1033
- import { z as z3 } from "zod";
951
+ import { z as z4 } from "zod";
1034
952
 
1035
953
  // src/convert-to-openai-completion-prompt.ts
1036
954
  import {
1037
955
  InvalidPromptError,
1038
- UnsupportedFunctionalityError as UnsupportedFunctionalityError4
956
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
1039
957
  } from "@ai-sdk/provider";
1040
958
  function convertToOpenAICompletionPrompt({
1041
959
  prompt,
@@ -1082,7 +1000,7 @@ ${userMessage}
1082
1000
  return part.text;
1083
1001
  }
1084
1002
  case "tool-call": {
1085
- throw new UnsupportedFunctionalityError4({
1003
+ throw new UnsupportedFunctionalityError3({
1086
1004
  functionality: "tool-call messages"
1087
1005
  });
1088
1006
  }
@@ -1095,7 +1013,7 @@ ${assistantMessage}
1095
1013
  break;
1096
1014
  }
1097
1015
  case "tool": {
1098
- throw new UnsupportedFunctionalityError4({
1016
+ throw new UnsupportedFunctionalityError3({
1099
1017
  functionality: "tool messages"
1100
1018
  });
1101
1019
  }
@@ -1143,7 +1061,7 @@ var OpenAICompletionLanguageModel = class {
1143
1061
  getArgs({
1144
1062
  inputFormat,
1145
1063
  prompt,
1146
- maxTokens,
1064
+ maxOutputTokens,
1147
1065
  temperature,
1148
1066
  topP,
1149
1067
  topK,
@@ -1185,7 +1103,7 @@ var OpenAICompletionLanguageModel = class {
1185
1103
  suffix: this.settings.suffix,
1186
1104
  user: this.settings.user,
1187
1105
  // standardized settings:
1188
- max_tokens: maxTokens,
1106
+ max_tokens: maxOutputTokens,
1189
1107
  temperature,
1190
1108
  top_p: topP,
1191
1109
  frequency_penalty: frequencyPenalty,
@@ -1223,8 +1141,8 @@ var OpenAICompletionLanguageModel = class {
1223
1141
  return {
1224
1142
  text: choice.text,
1225
1143
  usage: {
1226
- promptTokens: response.usage.prompt_tokens,
1227
- completionTokens: response.usage.completion_tokens
1144
+ inputTokens: response.usage.prompt_tokens,
1145
+ outputTokens: response.usage.completion_tokens
1228
1146
  },
1229
1147
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1230
1148
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
@@ -1260,9 +1178,9 @@ var OpenAICompletionLanguageModel = class {
1260
1178
  fetch: this.config.fetch
1261
1179
  });
1262
1180
  let finishReason = "unknown";
1263
- let usage = {
1264
- promptTokens: Number.NaN,
1265
- completionTokens: Number.NaN
1181
+ const usage = {
1182
+ inputTokens: void 0,
1183
+ outputTokens: void 0
1266
1184
  };
1267
1185
  let logprobs;
1268
1186
  let isFirstChunk = true;
@@ -1289,10 +1207,8 @@ var OpenAICompletionLanguageModel = class {
1289
1207
  });
1290
1208
  }
1291
1209
  if (value.usage != null) {
1292
- usage = {
1293
- promptTokens: value.usage.prompt_tokens,
1294
- completionTokens: value.usage.completion_tokens
1295
- };
1210
+ usage.inputTokens = value.usage.prompt_tokens;
1211
+ usage.outputTokens = value.usage.completion_tokens;
1296
1212
  }
1297
1213
  const choice = value.choices[0];
1298
1214
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1328,46 +1244,46 @@ var OpenAICompletionLanguageModel = class {
1328
1244
  };
1329
1245
  }
1330
1246
  };
1331
- var openaiCompletionResponseSchema = z3.object({
1332
- id: z3.string().nullish(),
1333
- created: z3.number().nullish(),
1334
- model: z3.string().nullish(),
1335
- choices: z3.array(
1336
- z3.object({
1337
- text: z3.string(),
1338
- finish_reason: z3.string(),
1339
- logprobs: z3.object({
1340
- tokens: z3.array(z3.string()),
1341
- token_logprobs: z3.array(z3.number()),
1342
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1247
+ var openaiCompletionResponseSchema = z4.object({
1248
+ id: z4.string().nullish(),
1249
+ created: z4.number().nullish(),
1250
+ model: z4.string().nullish(),
1251
+ choices: z4.array(
1252
+ z4.object({
1253
+ text: z4.string(),
1254
+ finish_reason: z4.string(),
1255
+ logprobs: z4.object({
1256
+ tokens: z4.array(z4.string()),
1257
+ token_logprobs: z4.array(z4.number()),
1258
+ top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1343
1259
  }).nullish()
1344
1260
  })
1345
1261
  ),
1346
- usage: z3.object({
1347
- prompt_tokens: z3.number(),
1348
- completion_tokens: z3.number()
1262
+ usage: z4.object({
1263
+ prompt_tokens: z4.number(),
1264
+ completion_tokens: z4.number()
1349
1265
  })
1350
1266
  });
1351
- var openaiCompletionChunkSchema = z3.union([
1352
- z3.object({
1353
- id: z3.string().nullish(),
1354
- created: z3.number().nullish(),
1355
- model: z3.string().nullish(),
1356
- choices: z3.array(
1357
- z3.object({
1358
- text: z3.string(),
1359
- finish_reason: z3.string().nullish(),
1360
- index: z3.number(),
1361
- logprobs: z3.object({
1362
- tokens: z3.array(z3.string()),
1363
- token_logprobs: z3.array(z3.number()),
1364
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1267
+ var openaiCompletionChunkSchema = z4.union([
1268
+ z4.object({
1269
+ id: z4.string().nullish(),
1270
+ created: z4.number().nullish(),
1271
+ model: z4.string().nullish(),
1272
+ choices: z4.array(
1273
+ z4.object({
1274
+ text: z4.string(),
1275
+ finish_reason: z4.string().nullish(),
1276
+ index: z4.number(),
1277
+ logprobs: z4.object({
1278
+ tokens: z4.array(z4.string()),
1279
+ token_logprobs: z4.array(z4.number()),
1280
+ top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1365
1281
  }).nullish()
1366
1282
  })
1367
1283
  ),
1368
- usage: z3.object({
1369
- prompt_tokens: z3.number(),
1370
- completion_tokens: z3.number()
1284
+ usage: z4.object({
1285
+ prompt_tokens: z4.number(),
1286
+ completion_tokens: z4.number()
1371
1287
  }).nullish()
1372
1288
  }),
1373
1289
  openaiErrorDataSchema
@@ -1382,7 +1298,7 @@ import {
1382
1298
  createJsonResponseHandler as createJsonResponseHandler3,
1383
1299
  postJsonToApi as postJsonToApi3
1384
1300
  } from "@ai-sdk/provider-utils";
1385
- import { z as z4 } from "zod";
1301
+ import { z as z5 } from "zod";
1386
1302
  var OpenAIEmbeddingModel = class {
1387
1303
  constructor(modelId, settings, config) {
1388
1304
  this.specificationVersion = "v1";
@@ -1441,9 +1357,9 @@ var OpenAIEmbeddingModel = class {
1441
1357
  };
1442
1358
  }
1443
1359
  };
1444
- var openaiTextEmbeddingResponseSchema = z4.object({
1445
- data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1446
- usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1360
+ var openaiTextEmbeddingResponseSchema = z5.object({
1361
+ data: z5.array(z5.object({ embedding: z5.array(z5.number()) })),
1362
+ usage: z5.object({ prompt_tokens: z5.number() }).nullish()
1447
1363
  });
1448
1364
 
1449
1365
  // src/openai-image-model.ts
@@ -1452,7 +1368,7 @@ import {
1452
1368
  createJsonResponseHandler as createJsonResponseHandler4,
1453
1369
  postJsonToApi as postJsonToApi4
1454
1370
  } from "@ai-sdk/provider-utils";
1455
- import { z as z5 } from "zod";
1371
+ import { z as z6 } from "zod";
1456
1372
 
1457
1373
  // src/openai-image-settings.ts
1458
1374
  var modelMaxImagesPerCall = {
@@ -1530,13 +1446,13 @@ var OpenAIImageModel = class {
1530
1446
  };
1531
1447
  }
1532
1448
  };
1533
- var openaiImageResponseSchema = z5.object({
1534
- data: z5.array(z5.object({ b64_json: z5.string() }))
1449
+ var openaiImageResponseSchema = z6.object({
1450
+ data: z6.array(z6.object({ b64_json: z6.string() }))
1535
1451
  });
1536
1452
 
1537
1453
  // src/openai-tools.ts
1538
- import { z as z6 } from "zod";
1539
- var WebSearchPreviewParameters = z6.object({});
1454
+ import { z as z7 } from "zod";
1455
+ var WebSearchPreviewParameters = z7.object({});
1540
1456
  function webSearchPreviewTool({
1541
1457
  searchContextSize,
1542
1458
  userLocation
@@ -1560,22 +1476,16 @@ import {
1560
1476
  combineHeaders as combineHeaders5,
1561
1477
  convertBase64ToUint8Array,
1562
1478
  createJsonResponseHandler as createJsonResponseHandler5,
1563
- parseProviderOptions,
1479
+ parseProviderOptions as parseProviderOptions2,
1564
1480
  postFormDataToApi
1565
1481
  } from "@ai-sdk/provider-utils";
1566
- import { z as z7 } from "zod";
1567
- var OpenAIProviderOptionsSchema = z7.object({
1568
- include: z7.array(z7.string()).optional().describe(
1569
- "Additional information to include in the transcription response."
1570
- ),
1571
- language: z7.string().optional().describe("The language of the input audio in ISO-639-1 format."),
1572
- prompt: z7.string().optional().describe(
1573
- "An optional text to guide the model's style or continue a previous audio segment."
1574
- ),
1575
- temperature: z7.number().min(0).max(1).optional().default(0).describe("The sampling temperature, between 0 and 1."),
1576
- timestampGranularities: z7.array(z7.enum(["word", "segment"])).optional().default(["segment"]).describe(
1577
- "The timestamp granularities to populate for this transcription."
1578
- )
1482
+ import { z as z8 } from "zod";
1483
+ var OpenAIProviderOptionsSchema = z8.object({
1484
+ include: z8.array(z8.string()).nullish(),
1485
+ language: z8.string().nullish(),
1486
+ prompt: z8.string().nullish(),
1487
+ temperature: z8.number().min(0).max(1).nullish().default(0),
1488
+ timestampGranularities: z8.array(z8.enum(["word", "segment"])).nullish().default(["segment"])
1579
1489
  });
1580
1490
  var languageMap = {
1581
1491
  afrikaans: "af",
@@ -1650,8 +1560,9 @@ var OpenAITranscriptionModel = class {
1650
1560
  mediaType,
1651
1561
  providerOptions
1652
1562
  }) {
1563
+ var _a, _b, _c, _d, _e;
1653
1564
  const warnings = [];
1654
- const openAIOptions = parseProviderOptions({
1565
+ const openAIOptions = parseProviderOptions2({
1655
1566
  provider: "openai",
1656
1567
  providerOptions,
1657
1568
  schema: OpenAIProviderOptionsSchema
@@ -1662,16 +1573,16 @@ var OpenAITranscriptionModel = class {
1662
1573
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1663
1574
  if (openAIOptions) {
1664
1575
  const transcriptionModelOptions = {
1665
- include: openAIOptions.include,
1666
- language: openAIOptions.language,
1667
- prompt: openAIOptions.prompt,
1668
- temperature: openAIOptions.temperature,
1669
- timestamp_granularities: openAIOptions.timestampGranularities
1576
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1577
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1578
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1579
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1580
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1670
1581
  };
1671
1582
  for (const key in transcriptionModelOptions) {
1672
1583
  const value = transcriptionModelOptions[key];
1673
1584
  if (value !== void 0) {
1674
- formData.append(key, value);
1585
+ formData.append(key, String(value));
1675
1586
  }
1676
1587
  }
1677
1588
  }
@@ -1722,15 +1633,15 @@ var OpenAITranscriptionModel = class {
1722
1633
  };
1723
1634
  }
1724
1635
  };
1725
- var openaiTranscriptionResponseSchema = z7.object({
1726
- text: z7.string(),
1727
- language: z7.string().nullish(),
1728
- duration: z7.number().nullish(),
1729
- words: z7.array(
1730
- z7.object({
1731
- word: z7.string(),
1732
- start: z7.number(),
1733
- end: z7.number()
1636
+ var openaiTranscriptionResponseSchema = z8.object({
1637
+ text: z8.string(),
1638
+ language: z8.string().nullish(),
1639
+ duration: z8.number().nullish(),
1640
+ words: z8.array(
1641
+ z8.object({
1642
+ word: z8.string(),
1643
+ start: z8.number(),
1644
+ end: z8.number()
1734
1645
  })
1735
1646
  ).nullish()
1736
1647
  });
@@ -1741,14 +1652,14 @@ import {
1741
1652
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1742
1653
  createJsonResponseHandler as createJsonResponseHandler6,
1743
1654
  generateId as generateId2,
1744
- parseProviderOptions as parseProviderOptions2,
1655
+ parseProviderOptions as parseProviderOptions3,
1745
1656
  postJsonToApi as postJsonToApi5
1746
1657
  } from "@ai-sdk/provider-utils";
1747
- import { z as z8 } from "zod";
1658
+ import { z as z9 } from "zod";
1748
1659
 
1749
1660
  // src/responses/convert-to-openai-responses-messages.ts
1750
1661
  import {
1751
- UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1662
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1752
1663
  } from "@ai-sdk/provider";
1753
1664
  function convertToOpenAIResponsesMessages({
1754
1665
  prompt,
@@ -1804,7 +1715,7 @@ function convertToOpenAIResponsesMessages({
1804
1715
  };
1805
1716
  } else if (part.mediaType === "application/pdf") {
1806
1717
  if (part.data instanceof URL) {
1807
- throw new UnsupportedFunctionalityError5({
1718
+ throw new UnsupportedFunctionalityError4({
1808
1719
  functionality: "PDF file parts with URLs"
1809
1720
  });
1810
1721
  }
@@ -1814,7 +1725,7 @@ function convertToOpenAIResponsesMessages({
1814
1725
  file_data: `data:application/pdf;base64,${part.data}`
1815
1726
  };
1816
1727
  } else {
1817
- throw new UnsupportedFunctionalityError5({
1728
+ throw new UnsupportedFunctionalityError4({
1818
1729
  functionality: `file part media type ${part.mediaType}`
1819
1730
  });
1820
1731
  }
@@ -1886,7 +1797,7 @@ function mapOpenAIResponseFinishReason({
1886
1797
 
1887
1798
  // src/responses/openai-responses-prepare-tools.ts
1888
1799
  import {
1889
- UnsupportedFunctionalityError as UnsupportedFunctionalityError6
1800
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1890
1801
  } from "@ai-sdk/provider";
1891
1802
  function prepareResponsesTools({
1892
1803
  tools,
@@ -1946,7 +1857,7 @@ function prepareResponsesTools({
1946
1857
  };
1947
1858
  default: {
1948
1859
  const _exhaustiveCheck = type;
1949
- throw new UnsupportedFunctionalityError6({
1860
+ throw new UnsupportedFunctionalityError5({
1950
1861
  functionality: `tool choice type: ${_exhaustiveCheck}`
1951
1862
  });
1952
1863
  }
@@ -1965,7 +1876,7 @@ var OpenAIResponsesLanguageModel = class {
1965
1876
  return this.config.provider;
1966
1877
  }
1967
1878
  getArgs({
1968
- maxTokens,
1879
+ maxOutputTokens,
1969
1880
  temperature,
1970
1881
  stopSequences,
1971
1882
  topP,
@@ -2008,7 +1919,7 @@ var OpenAIResponsesLanguageModel = class {
2008
1919
  systemMessageMode: modelConfig.systemMessageMode
2009
1920
  });
2010
1921
  warnings.push(...messageWarnings);
2011
- const openaiOptions = parseProviderOptions2({
1922
+ const openaiOptions = parseProviderOptions3({
2012
1923
  provider: "openai",
2013
1924
  providerOptions,
2014
1925
  schema: openaiResponsesProviderOptionsSchema
@@ -2019,7 +1930,7 @@ var OpenAIResponsesLanguageModel = class {
2019
1930
  input: messages,
2020
1931
  temperature,
2021
1932
  top_p: topP,
2022
- max_output_tokens: maxTokens,
1933
+ max_output_tokens: maxOutputTokens,
2023
1934
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2024
1935
  text: {
2025
1936
  format: responseFormat.schema != null ? {
@@ -2098,49 +2009,49 @@ var OpenAIResponsesLanguageModel = class {
2098
2009
  body,
2099
2010
  failedResponseHandler: openaiFailedResponseHandler,
2100
2011
  successfulResponseHandler: createJsonResponseHandler6(
2101
- z8.object({
2102
- id: z8.string(),
2103
- created_at: z8.number(),
2104
- model: z8.string(),
2105
- output: z8.array(
2106
- z8.discriminatedUnion("type", [
2107
- z8.object({
2108
- type: z8.literal("message"),
2109
- role: z8.literal("assistant"),
2110
- content: z8.array(
2111
- z8.object({
2112
- type: z8.literal("output_text"),
2113
- text: z8.string(),
2114
- annotations: z8.array(
2115
- z8.object({
2116
- type: z8.literal("url_citation"),
2117
- start_index: z8.number(),
2118
- end_index: z8.number(),
2119
- url: z8.string(),
2120
- title: z8.string()
2012
+ z9.object({
2013
+ id: z9.string(),
2014
+ created_at: z9.number(),
2015
+ model: z9.string(),
2016
+ output: z9.array(
2017
+ z9.discriminatedUnion("type", [
2018
+ z9.object({
2019
+ type: z9.literal("message"),
2020
+ role: z9.literal("assistant"),
2021
+ content: z9.array(
2022
+ z9.object({
2023
+ type: z9.literal("output_text"),
2024
+ text: z9.string(),
2025
+ annotations: z9.array(
2026
+ z9.object({
2027
+ type: z9.literal("url_citation"),
2028
+ start_index: z9.number(),
2029
+ end_index: z9.number(),
2030
+ url: z9.string(),
2031
+ title: z9.string()
2121
2032
  })
2122
2033
  )
2123
2034
  })
2124
2035
  )
2125
2036
  }),
2126
- z8.object({
2127
- type: z8.literal("function_call"),
2128
- call_id: z8.string(),
2129
- name: z8.string(),
2130
- arguments: z8.string()
2037
+ z9.object({
2038
+ type: z9.literal("function_call"),
2039
+ call_id: z9.string(),
2040
+ name: z9.string(),
2041
+ arguments: z9.string()
2131
2042
  }),
2132
- z8.object({
2133
- type: z8.literal("web_search_call")
2043
+ z9.object({
2044
+ type: z9.literal("web_search_call")
2134
2045
  }),
2135
- z8.object({
2136
- type: z8.literal("computer_call")
2046
+ z9.object({
2047
+ type: z9.literal("computer_call")
2137
2048
  }),
2138
- z8.object({
2139
- type: z8.literal("reasoning")
2049
+ z9.object({
2050
+ type: z9.literal("reasoning")
2140
2051
  })
2141
2052
  ])
2142
2053
  ),
2143
- incomplete_details: z8.object({ reason: z8.string() }).nullable(),
2054
+ incomplete_details: z9.object({ reason: z9.string() }).nullable(),
2144
2055
  usage: usageSchema
2145
2056
  })
2146
2057
  ),
@@ -2173,8 +2084,8 @@ var OpenAIResponsesLanguageModel = class {
2173
2084
  }),
2174
2085
  toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2175
2086
  usage: {
2176
- promptTokens: response.usage.input_tokens,
2177
- completionTokens: response.usage.output_tokens
2087
+ inputTokens: response.usage.input_tokens,
2088
+ outputTokens: response.usage.output_tokens
2178
2089
  },
2179
2090
  request: { body },
2180
2091
  response: {
@@ -2215,8 +2126,10 @@ var OpenAIResponsesLanguageModel = class {
2215
2126
  });
2216
2127
  const self = this;
2217
2128
  let finishReason = "unknown";
2218
- let promptTokens = NaN;
2219
- let completionTokens = NaN;
2129
+ const usage = {
2130
+ inputTokens: void 0,
2131
+ outputTokens: void 0
2132
+ };
2220
2133
  let cachedPromptTokens = null;
2221
2134
  let reasoningTokens = null;
2222
2135
  let responseId = null;
@@ -2286,8 +2199,8 @@ var OpenAIResponsesLanguageModel = class {
2286
2199
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2287
2200
  hasToolCalls
2288
2201
  });
2289
- promptTokens = value.response.usage.input_tokens;
2290
- completionTokens = value.response.usage.output_tokens;
2202
+ usage.inputTokens = value.response.usage.input_tokens;
2203
+ usage.outputTokens = value.response.usage.output_tokens;
2291
2204
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2292
2205
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2293
2206
  } else if (isResponseAnnotationAddedChunk(value)) {
@@ -2306,7 +2219,7 @@ var OpenAIResponsesLanguageModel = class {
2306
2219
  controller.enqueue({
2307
2220
  type: "finish",
2308
2221
  finishReason,
2309
- usage: { promptTokens, completionTokens },
2222
+ usage,
2310
2223
  ...(cachedPromptTokens != null || reasoningTokens != null) && {
2311
2224
  providerMetadata: {
2312
2225
  openai: {
@@ -2326,79 +2239,79 @@ var OpenAIResponsesLanguageModel = class {
2326
2239
  };
2327
2240
  }
2328
2241
  };
2329
- var usageSchema = z8.object({
2330
- input_tokens: z8.number(),
2331
- input_tokens_details: z8.object({ cached_tokens: z8.number().nullish() }).nullish(),
2332
- output_tokens: z8.number(),
2333
- output_tokens_details: z8.object({ reasoning_tokens: z8.number().nullish() }).nullish()
2242
+ var usageSchema = z9.object({
2243
+ input_tokens: z9.number(),
2244
+ input_tokens_details: z9.object({ cached_tokens: z9.number().nullish() }).nullish(),
2245
+ output_tokens: z9.number(),
2246
+ output_tokens_details: z9.object({ reasoning_tokens: z9.number().nullish() }).nullish()
2334
2247
  });
2335
- var textDeltaChunkSchema = z8.object({
2336
- type: z8.literal("response.output_text.delta"),
2337
- delta: z8.string()
2248
+ var textDeltaChunkSchema = z9.object({
2249
+ type: z9.literal("response.output_text.delta"),
2250
+ delta: z9.string()
2338
2251
  });
2339
- var responseFinishedChunkSchema = z8.object({
2340
- type: z8.enum(["response.completed", "response.incomplete"]),
2341
- response: z8.object({
2342
- incomplete_details: z8.object({ reason: z8.string() }).nullish(),
2252
+ var responseFinishedChunkSchema = z9.object({
2253
+ type: z9.enum(["response.completed", "response.incomplete"]),
2254
+ response: z9.object({
2255
+ incomplete_details: z9.object({ reason: z9.string() }).nullish(),
2343
2256
  usage: usageSchema
2344
2257
  })
2345
2258
  });
2346
- var responseCreatedChunkSchema = z8.object({
2347
- type: z8.literal("response.created"),
2348
- response: z8.object({
2349
- id: z8.string(),
2350
- created_at: z8.number(),
2351
- model: z8.string()
2259
+ var responseCreatedChunkSchema = z9.object({
2260
+ type: z9.literal("response.created"),
2261
+ response: z9.object({
2262
+ id: z9.string(),
2263
+ created_at: z9.number(),
2264
+ model: z9.string()
2352
2265
  })
2353
2266
  });
2354
- var responseOutputItemDoneSchema = z8.object({
2355
- type: z8.literal("response.output_item.done"),
2356
- output_index: z8.number(),
2357
- item: z8.discriminatedUnion("type", [
2358
- z8.object({
2359
- type: z8.literal("message")
2267
+ var responseOutputItemDoneSchema = z9.object({
2268
+ type: z9.literal("response.output_item.done"),
2269
+ output_index: z9.number(),
2270
+ item: z9.discriminatedUnion("type", [
2271
+ z9.object({
2272
+ type: z9.literal("message")
2360
2273
  }),
2361
- z8.object({
2362
- type: z8.literal("function_call"),
2363
- id: z8.string(),
2364
- call_id: z8.string(),
2365
- name: z8.string(),
2366
- arguments: z8.string(),
2367
- status: z8.literal("completed")
2274
+ z9.object({
2275
+ type: z9.literal("function_call"),
2276
+ id: z9.string(),
2277
+ call_id: z9.string(),
2278
+ name: z9.string(),
2279
+ arguments: z9.string(),
2280
+ status: z9.literal("completed")
2368
2281
  })
2369
2282
  ])
2370
2283
  });
2371
- var responseFunctionCallArgumentsDeltaSchema = z8.object({
2372
- type: z8.literal("response.function_call_arguments.delta"),
2373
- item_id: z8.string(),
2374
- output_index: z8.number(),
2375
- delta: z8.string()
2284
+ var responseFunctionCallArgumentsDeltaSchema = z9.object({
2285
+ type: z9.literal("response.function_call_arguments.delta"),
2286
+ item_id: z9.string(),
2287
+ output_index: z9.number(),
2288
+ delta: z9.string()
2376
2289
  });
2377
- var responseOutputItemAddedSchema = z8.object({
2378
- type: z8.literal("response.output_item.added"),
2379
- output_index: z8.number(),
2380
- item: z8.discriminatedUnion("type", [
2381
- z8.object({
2382
- type: z8.literal("message")
2290
+ var responseOutputItemAddedSchema = z9.object({
2291
+ type: z9.literal("response.output_item.added"),
2292
+ output_index: z9.number(),
2293
+ item: z9.discriminatedUnion("type", [
2294
+ z9.object({
2295
+ type: z9.literal("message")
2383
2296
  }),
2384
- z8.object({
2385
- type: z8.literal("function_call"),
2386
- id: z8.string(),
2387
- call_id: z8.string(),
2388
- name: z8.string(),
2389
- arguments: z8.string()
2297
+ z9.object({
2298
+ type: z9.literal("function_call"),
2299
+ id: z9.string(),
2300
+ call_id: z9.string(),
2301
+ name: z9.string(),
2302
+ arguments: z9.string()
2390
2303
  })
2391
2304
  ])
2392
2305
  });
2393
- var responseAnnotationAddedSchema = z8.object({
2394
- type: z8.literal("response.output_text.annotation.added"),
2395
- annotation: z8.object({
2396
- type: z8.literal("url_citation"),
2397
- url: z8.string(),
2398
- title: z8.string()
2306
+ var responseAnnotationAddedSchema = z9.object({
2307
+ type: z9.literal("response.output_text.annotation.added"),
2308
+ annotation: z9.object({
2309
+ type: z9.literal("url_citation"),
2310
+ url: z9.string(),
2311
+ title: z9.string()
2399
2312
  })
2400
2313
  });
2401
- var openaiResponsesChunkSchema = z8.union([
2314
+ var openaiResponsesChunkSchema = z9.union([
2402
2315
  textDeltaChunkSchema,
2403
2316
  responseFinishedChunkSchema,
2404
2317
  responseCreatedChunkSchema,
@@ -2406,7 +2319,7 @@ var openaiResponsesChunkSchema = z8.union([
2406
2319
  responseFunctionCallArgumentsDeltaSchema,
2407
2320
  responseOutputItemAddedSchema,
2408
2321
  responseAnnotationAddedSchema,
2409
- z8.object({ type: z8.string() }).passthrough()
2322
+ z9.object({ type: z9.string() }).passthrough()
2410
2323
  // fallback for unknown chunks
2411
2324
  ]);
2412
2325
  function isTextDeltaChunk(chunk) {
@@ -2451,15 +2364,15 @@ function getResponsesModelConfig(modelId) {
2451
2364
  requiredAutoTruncation: false
2452
2365
  };
2453
2366
  }
2454
- var openaiResponsesProviderOptionsSchema = z8.object({
2455
- metadata: z8.any().nullish(),
2456
- parallelToolCalls: z8.boolean().nullish(),
2457
- previousResponseId: z8.string().nullish(),
2458
- store: z8.boolean().nullish(),
2459
- user: z8.string().nullish(),
2460
- reasoningEffort: z8.string().nullish(),
2461
- strictSchemas: z8.boolean().nullish(),
2462
- instructions: z8.string().nullish()
2367
+ var openaiResponsesProviderOptionsSchema = z9.object({
2368
+ metadata: z9.any().nullish(),
2369
+ parallelToolCalls: z9.boolean().nullish(),
2370
+ previousResponseId: z9.string().nullish(),
2371
+ store: z9.boolean().nullish(),
2372
+ user: z9.string().nullish(),
2373
+ reasoningEffort: z9.string().nullish(),
2374
+ strictSchemas: z9.boolean().nullish(),
2375
+ instructions: z9.string().nullish()
2463
2376
  });
2464
2377
 
2465
2378
  // src/openai-provider.ts