@ai-sdk/openai 2.0.0-canary.4 → 2.0.0-canary.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -6,8 +6,7 @@ import {
6
6
 
7
7
  // src/openai-chat-language-model.ts
8
8
  import {
9
- InvalidResponseDataError,
10
- UnsupportedFunctionalityError as UnsupportedFunctionalityError3
9
+ InvalidResponseDataError
11
10
  } from "@ai-sdk/provider";
12
11
  import {
13
12
  combineHeaders,
@@ -15,17 +14,18 @@ import {
15
14
  createJsonResponseHandler,
16
15
  generateId,
17
16
  isParsableJson,
18
- postJsonToApi
17
+ postJsonToApi,
18
+ parseProviderOptions
19
19
  } from "@ai-sdk/provider-utils";
20
- import { z as z2 } from "zod";
20
+ import { z as z3 } from "zod";
21
21
 
22
22
  // src/convert-to-openai-chat-messages.ts
23
23
  import {
24
24
  UnsupportedFunctionalityError
25
25
  } from "@ai-sdk/provider";
26
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
26
27
  function convertToOpenAIChatMessages({
27
28
  prompt,
28
- useLegacyFunctionCalling = false,
29
29
  systemMessageMode = "system"
30
30
  }) {
31
31
  const messages = [];
@@ -77,7 +77,7 @@ function convertToOpenAIChatMessages({
77
77
  return {
78
78
  type: "image_url",
79
79
  image_url: {
80
- url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
80
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
81
81
  // OpenAI specific extension: image detail
82
82
  detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
83
83
  }
@@ -92,14 +92,20 @@ function convertToOpenAIChatMessages({
92
92
  case "audio/wav": {
93
93
  return {
94
94
  type: "input_audio",
95
- input_audio: { data: part.data, format: "wav" }
95
+ input_audio: {
96
+ data: convertToBase64(part.data),
97
+ format: "wav"
98
+ }
96
99
  };
97
100
  }
98
101
  case "audio/mp3":
99
102
  case "audio/mpeg": {
100
103
  return {
101
104
  type: "input_audio",
102
- input_audio: { data: part.data, format: "mp3" }
105
+ input_audio: {
106
+ data: convertToBase64(part.data),
107
+ format: "mp3"
108
+ }
103
109
  };
104
110
  }
105
111
  default: {
@@ -154,41 +160,20 @@ function convertToOpenAIChatMessages({
154
160
  }
155
161
  }
156
162
  }
157
- if (useLegacyFunctionCalling) {
158
- if (toolCalls.length > 1) {
159
- throw new UnsupportedFunctionalityError({
160
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
161
- });
162
- }
163
- messages.push({
164
- role: "assistant",
165
- content: text,
166
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
167
- });
168
- } else {
169
- messages.push({
170
- role: "assistant",
171
- content: text,
172
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
173
- });
174
- }
163
+ messages.push({
164
+ role: "assistant",
165
+ content: text,
166
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
167
+ });
175
168
  break;
176
169
  }
177
170
  case "tool": {
178
171
  for (const toolResponse of content) {
179
- if (useLegacyFunctionCalling) {
180
- messages.push({
181
- role: "function",
182
- name: toolResponse.toolName,
183
- content: JSON.stringify(toolResponse.result)
184
- });
185
- } else {
186
- messages.push({
187
- role: "tool",
188
- tool_call_id: toolResponse.toolCallId,
189
- content: JSON.stringify(toolResponse.result)
190
- });
191
- }
172
+ messages.push({
173
+ role: "tool",
174
+ tool_call_id: toolResponse.toolCallId,
175
+ content: JSON.stringify(toolResponse.result)
176
+ });
192
177
  }
193
178
  break;
194
179
  }
@@ -231,18 +216,69 @@ function mapOpenAIFinishReason(finishReason) {
231
216
  }
232
217
  }
233
218
 
234
- // src/openai-error.ts
219
+ // src/openai-chat-options.ts
235
220
  import { z } from "zod";
221
+ var openaiProviderOptions = z.object({
222
+ /**
223
+ * Modify the likelihood of specified tokens appearing in the completion.
224
+ *
225
+ * Accepts a JSON object that maps tokens (specified by their token ID in
226
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
227
+ */
228
+ logitBias: z.record(z.coerce.number(), z.number()).optional(),
229
+ /**
230
+ * Return the log probabilities of the tokens.
231
+ *
232
+ * Setting to true will return the log probabilities of the tokens that
233
+ * were generated.
234
+ *
235
+ * Setting to a number will return the log probabilities of the top n
236
+ * tokens that were generated.
237
+ */
238
+ logprobs: z.union([z.boolean(), z.number()]).optional(),
239
+ /**
240
+ * Whether to enable parallel function calling during tool use. Default to true.
241
+ */
242
+ parallelToolCalls: z.boolean().optional(),
243
+ /**
244
+ * A unique identifier representing your end-user, which can help OpenAI to
245
+ * monitor and detect abuse.
246
+ */
247
+ user: z.string().optional(),
248
+ /**
249
+ * Reasoning effort for reasoning models. Defaults to `medium`.
250
+ */
251
+ reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
252
+ /**
253
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
254
+ */
255
+ maxCompletionTokens: z.number().optional(),
256
+ /**
257
+ * Whether to enable persistence in responses API.
258
+ */
259
+ store: z.boolean().optional(),
260
+ /**
261
+ * Metadata to associate with the request.
262
+ */
263
+ metadata: z.record(z.string()).optional(),
264
+ /**
265
+ * Parameters for prediction mode.
266
+ */
267
+ prediction: z.record(z.any()).optional()
268
+ });
269
+
270
+ // src/openai-error.ts
271
+ import { z as z2 } from "zod";
236
272
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
237
- var openaiErrorDataSchema = z.object({
238
- error: z.object({
239
- message: z.string(),
273
+ var openaiErrorDataSchema = z2.object({
274
+ error: z2.object({
275
+ message: z2.string(),
240
276
  // The additional information below is handled loosely to support
241
277
  // OpenAI-compatible providers that have slightly different error
242
278
  // responses:
243
- type: z.string().nullish(),
244
- param: z.any().nullish(),
245
- code: z.union([z.string(), z.number()]).nullish()
279
+ type: z2.string().nullish(),
280
+ param: z2.any().nullish(),
281
+ code: z2.union([z2.string(), z2.number()]).nullish()
246
282
  })
247
283
  });
248
284
  var openaiFailedResponseHandler = createJsonErrorResponseHandler({
@@ -270,7 +306,6 @@ import {
270
306
  function prepareTools({
271
307
  tools,
272
308
  toolChoice,
273
- useLegacyFunctionCalling = false,
274
309
  structuredOutputs
275
310
  }) {
276
311
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
@@ -278,48 +313,6 @@ function prepareTools({
278
313
  if (tools == null) {
279
314
  return { tools: void 0, toolChoice: void 0, toolWarnings };
280
315
  }
281
- if (useLegacyFunctionCalling) {
282
- const openaiFunctions = [];
283
- for (const tool of tools) {
284
- if (tool.type === "provider-defined") {
285
- toolWarnings.push({ type: "unsupported-tool", tool });
286
- } else {
287
- openaiFunctions.push({
288
- name: tool.name,
289
- description: tool.description,
290
- parameters: tool.parameters
291
- });
292
- }
293
- }
294
- if (toolChoice == null) {
295
- return {
296
- functions: openaiFunctions,
297
- function_call: void 0,
298
- toolWarnings
299
- };
300
- }
301
- const type2 = toolChoice.type;
302
- switch (type2) {
303
- case "auto":
304
- case "none":
305
- case void 0:
306
- return {
307
- functions: openaiFunctions,
308
- function_call: void 0,
309
- toolWarnings
310
- };
311
- case "required":
312
- throw new UnsupportedFunctionalityError2({
313
- functionality: "useLegacyFunctionCalling and toolChoice: required"
314
- });
315
- default:
316
- return {
317
- functions: openaiFunctions,
318
- function_call: { name: toolChoice.toolName },
319
- toolWarnings
320
- };
321
- }
322
- }
323
316
  const openaiTools2 = [];
324
317
  for (const tool of tools) {
325
318
  if (tool.type === "provider-defined") {
@@ -391,7 +384,7 @@ var OpenAIChatLanguageModel = class {
391
384
  }
392
385
  getArgs({
393
386
  prompt,
394
- maxTokens,
387
+ maxOutputTokens,
395
388
  temperature,
396
389
  topP,
397
390
  topK,
@@ -404,8 +397,13 @@ var OpenAIChatLanguageModel = class {
404
397
  toolChoice,
405
398
  providerOptions
406
399
  }) {
407
- var _a, _b, _c, _d, _e, _f, _g;
400
+ var _a, _b;
408
401
  const warnings = [];
402
+ const openaiOptions = (_a = parseProviderOptions({
403
+ provider: "openai",
404
+ providerOptions,
405
+ schema: openaiProviderOptions
406
+ })) != null ? _a : {};
409
407
  if (topK != null) {
410
408
  warnings.push({
411
409
  type: "unsupported-setting",
@@ -419,21 +417,9 @@ var OpenAIChatLanguageModel = class {
419
417
  details: "JSON response format schema is only supported with structuredOutputs"
420
418
  });
421
419
  }
422
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
423
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
424
- throw new UnsupportedFunctionalityError3({
425
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
426
- });
427
- }
428
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
429
- throw new UnsupportedFunctionalityError3({
430
- functionality: "structuredOutputs with useLegacyFunctionCalling"
431
- });
432
- }
433
420
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
434
421
  {
435
422
  prompt,
436
- useLegacyFunctionCalling,
437
423
  systemMessageMode: getSystemMessageMode(this.modelId)
438
424
  }
439
425
  );
@@ -442,13 +428,13 @@ var OpenAIChatLanguageModel = class {
442
428
  // model id:
443
429
  model: this.modelId,
444
430
  // model specific settings:
445
- logit_bias: this.settings.logitBias,
446
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
447
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
448
- user: this.settings.user,
449
- parallel_tool_calls: this.settings.parallelToolCalls,
431
+ logit_bias: openaiOptions.logitBias,
432
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
433
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
434
+ user: openaiOptions.user,
435
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
450
436
  // standardized settings:
451
- max_tokens: maxTokens,
437
+ max_tokens: maxOutputTokens,
452
438
  temperature,
453
439
  top_p: topP,
454
440
  frequency_penalty: frequencyPenalty,
@@ -459,19 +445,19 @@ var OpenAIChatLanguageModel = class {
459
445
  json_schema: {
460
446
  schema: responseFormat.schema,
461
447
  strict: true,
462
- name: (_a = responseFormat.name) != null ? _a : "response",
448
+ name: (_b = responseFormat.name) != null ? _b : "response",
463
449
  description: responseFormat.description
464
450
  }
465
451
  } : { type: "json_object" } : void 0,
466
452
  stop: stopSequences,
467
453
  seed,
468
454
  // openai specific settings:
469
- // TODO remove in next major version; we auto-map maxTokens now
470
- max_completion_tokens: (_b = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _b.maxCompletionTokens,
471
- store: (_c = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _c.store,
472
- metadata: (_d = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _d.metadata,
473
- prediction: (_e = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _e.prediction,
474
- reasoning_effort: (_g = (_f = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
455
+ // TODO remove in next major version; we auto-map maxOutputTokens now
456
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
457
+ store: openaiOptions.store,
458
+ metadata: openaiOptions.metadata,
459
+ prediction: openaiOptions.prediction,
460
+ reasoning_effort: openaiOptions.reasoningEffort,
475
461
  // messages:
476
462
  messages
477
463
  };
@@ -535,26 +521,30 @@ var OpenAIChatLanguageModel = class {
535
521
  }
536
522
  baseArgs.max_tokens = void 0;
537
523
  }
524
+ } else if (this.modelId.startsWith("gpt-4o-search-preview")) {
525
+ if (baseArgs.temperature != null) {
526
+ baseArgs.temperature = void 0;
527
+ warnings.push({
528
+ type: "unsupported-setting",
529
+ setting: "temperature",
530
+ details: "temperature is not supported for the gpt-4o-search-preview model and has been removed."
531
+ });
532
+ }
538
533
  }
539
534
  const {
540
535
  tools: openaiTools2,
541
536
  toolChoice: openaiToolChoice,
542
- functions,
543
- function_call,
544
537
  toolWarnings
545
538
  } = prepareTools({
546
539
  tools,
547
540
  toolChoice,
548
- useLegacyFunctionCalling,
549
541
  structuredOutputs: this.supportsStructuredOutputs
550
542
  });
551
543
  return {
552
544
  args: {
553
545
  ...baseArgs,
554
546
  tools: openaiTools2,
555
- tool_choice: openaiToolChoice,
556
- functions,
557
- function_call
547
+ tool_choice: openaiToolChoice
558
548
  },
559
549
  warnings: [...warnings, ...toolWarnings]
560
550
  };
@@ -599,14 +589,7 @@ var OpenAIChatLanguageModel = class {
599
589
  }
600
590
  return {
601
591
  text: (_c = choice.message.content) != null ? _c : void 0,
602
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
603
- {
604
- toolCallType: "function",
605
- toolCallId: generateId(),
606
- toolName: choice.message.function_call.name,
607
- args: choice.message.function_call.arguments
608
- }
609
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
592
+ toolCalls: (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
610
593
  var _a2;
611
594
  return {
612
595
  toolCallType: "function",
@@ -617,11 +600,10 @@ var OpenAIChatLanguageModel = class {
617
600
  }),
618
601
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
619
602
  usage: {
620
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
621
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
603
+ inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
604
+ outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
622
605
  },
623
- rawCall: { rawPrompt, rawSettings },
624
- request: { body: JSON.stringify(body) },
606
+ request: { body },
625
607
  response: {
626
608
  ...getResponseMetadata(response),
627
609
  headers: responseHeaders,
@@ -633,49 +615,6 @@ var OpenAIChatLanguageModel = class {
633
615
  };
634
616
  }
635
617
  async doStream(options) {
636
- if (this.settings.simulateStreaming) {
637
- const result = await this.doGenerate(options);
638
- const simulatedStream = new ReadableStream({
639
- start(controller) {
640
- controller.enqueue({ type: "response-metadata", ...result.response });
641
- if (result.text) {
642
- controller.enqueue({
643
- type: "text-delta",
644
- textDelta: result.text
645
- });
646
- }
647
- if (result.toolCalls) {
648
- for (const toolCall of result.toolCalls) {
649
- controller.enqueue({
650
- type: "tool-call-delta",
651
- toolCallType: "function",
652
- toolCallId: toolCall.toolCallId,
653
- toolName: toolCall.toolName,
654
- argsTextDelta: toolCall.args
655
- });
656
- controller.enqueue({
657
- type: "tool-call",
658
- ...toolCall
659
- });
660
- }
661
- }
662
- controller.enqueue({
663
- type: "finish",
664
- finishReason: result.finishReason,
665
- usage: result.usage,
666
- logprobs: result.logprobs,
667
- providerMetadata: result.providerMetadata
668
- });
669
- controller.close();
670
- }
671
- });
672
- return {
673
- stream: simulatedStream,
674
- rawCall: result.rawCall,
675
- response: result.response,
676
- warnings: result.warnings
677
- };
678
- }
679
618
  const { args, warnings } = this.getArgs(options);
680
619
  const body = {
681
620
  ...args,
@@ -700,13 +639,12 @@ var OpenAIChatLanguageModel = class {
700
639
  const { messages: rawPrompt, ...rawSettings } = args;
701
640
  const toolCalls = [];
702
641
  let finishReason = "unknown";
703
- let usage = {
704
- promptTokens: void 0,
705
- completionTokens: void 0
642
+ const usage = {
643
+ inputTokens: void 0,
644
+ outputTokens: void 0
706
645
  };
707
646
  let logprobs;
708
647
  let isFirstChunk = true;
709
- const { useLegacyFunctionCalling } = this.settings;
710
648
  const providerMetadata = { openai: {} };
711
649
  return {
712
650
  stream: response.pipeThrough(
@@ -738,10 +676,8 @@ var OpenAIChatLanguageModel = class {
738
676
  prompt_tokens_details,
739
677
  completion_tokens_details
740
678
  } = value.usage;
741
- usage = {
742
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
743
- completionTokens: completion_tokens != null ? completion_tokens : void 0
744
- };
679
+ usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
680
+ usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
745
681
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
746
682
  providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
747
683
  }
@@ -776,16 +712,8 @@ var OpenAIChatLanguageModel = class {
776
712
  if (logprobs === void 0) logprobs = [];
777
713
  logprobs.push(...mappedLogprobs);
778
714
  }
779
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
780
- {
781
- type: "function",
782
- id: generateId(),
783
- function: delta.function_call,
784
- index: 0
785
- }
786
- ] : delta.tool_calls;
787
- if (mappedToolCalls != null) {
788
- for (const toolCallDelta of mappedToolCalls) {
715
+ if (delta.tool_calls != null) {
716
+ for (const toolCallDelta of delta.tool_calls) {
789
717
  const index = toolCallDelta.index;
790
718
  if (toolCalls[index] == null) {
791
719
  if (toolCallDelta.type !== "function") {
@@ -867,125 +795,112 @@ var OpenAIChatLanguageModel = class {
867
795
  }
868
796
  },
869
797
  flush(controller) {
870
- var _a, _b;
871
798
  controller.enqueue({
872
799
  type: "finish",
873
800
  finishReason,
874
801
  logprobs,
875
- usage: {
876
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
877
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
878
- },
802
+ usage,
879
803
  ...providerMetadata != null ? { providerMetadata } : {}
880
804
  });
881
805
  }
882
806
  })
883
807
  ),
884
- rawCall: { rawPrompt, rawSettings },
808
+ request: { body },
885
809
  response: { headers: responseHeaders },
886
- request: { body: JSON.stringify(body) },
887
810
  warnings
888
811
  };
889
812
  }
890
813
  };
891
- var openaiTokenUsageSchema = z2.object({
892
- prompt_tokens: z2.number().nullish(),
893
- completion_tokens: z2.number().nullish(),
894
- prompt_tokens_details: z2.object({
895
- cached_tokens: z2.number().nullish()
814
+ var openaiTokenUsageSchema = z3.object({
815
+ prompt_tokens: z3.number().nullish(),
816
+ completion_tokens: z3.number().nullish(),
817
+ prompt_tokens_details: z3.object({
818
+ cached_tokens: z3.number().nullish()
896
819
  }).nullish(),
897
- completion_tokens_details: z2.object({
898
- reasoning_tokens: z2.number().nullish(),
899
- accepted_prediction_tokens: z2.number().nullish(),
900
- rejected_prediction_tokens: z2.number().nullish()
820
+ completion_tokens_details: z3.object({
821
+ reasoning_tokens: z3.number().nullish(),
822
+ accepted_prediction_tokens: z3.number().nullish(),
823
+ rejected_prediction_tokens: z3.number().nullish()
901
824
  }).nullish()
902
825
  }).nullish();
903
- var openaiChatResponseSchema = z2.object({
904
- id: z2.string().nullish(),
905
- created: z2.number().nullish(),
906
- model: z2.string().nullish(),
907
- choices: z2.array(
908
- z2.object({
909
- message: z2.object({
910
- role: z2.literal("assistant").nullish(),
911
- content: z2.string().nullish(),
912
- function_call: z2.object({
913
- arguments: z2.string(),
914
- name: z2.string()
915
- }).nullish(),
916
- tool_calls: z2.array(
917
- z2.object({
918
- id: z2.string().nullish(),
919
- type: z2.literal("function"),
920
- function: z2.object({
921
- name: z2.string(),
922
- arguments: z2.string()
826
+ var openaiChatResponseSchema = z3.object({
827
+ id: z3.string().nullish(),
828
+ created: z3.number().nullish(),
829
+ model: z3.string().nullish(),
830
+ choices: z3.array(
831
+ z3.object({
832
+ message: z3.object({
833
+ role: z3.literal("assistant").nullish(),
834
+ content: z3.string().nullish(),
835
+ tool_calls: z3.array(
836
+ z3.object({
837
+ id: z3.string().nullish(),
838
+ type: z3.literal("function"),
839
+ function: z3.object({
840
+ name: z3.string(),
841
+ arguments: z3.string()
923
842
  })
924
843
  })
925
844
  ).nullish()
926
845
  }),
927
- index: z2.number(),
928
- logprobs: z2.object({
929
- content: z2.array(
930
- z2.object({
931
- token: z2.string(),
932
- logprob: z2.number(),
933
- top_logprobs: z2.array(
934
- z2.object({
935
- token: z2.string(),
936
- logprob: z2.number()
846
+ index: z3.number(),
847
+ logprobs: z3.object({
848
+ content: z3.array(
849
+ z3.object({
850
+ token: z3.string(),
851
+ logprob: z3.number(),
852
+ top_logprobs: z3.array(
853
+ z3.object({
854
+ token: z3.string(),
855
+ logprob: z3.number()
937
856
  })
938
857
  )
939
858
  })
940
859
  ).nullable()
941
860
  }).nullish(),
942
- finish_reason: z2.string().nullish()
861
+ finish_reason: z3.string().nullish()
943
862
  })
944
863
  ),
945
864
  usage: openaiTokenUsageSchema
946
865
  });
947
- var openaiChatChunkSchema = z2.union([
948
- z2.object({
949
- id: z2.string().nullish(),
950
- created: z2.number().nullish(),
951
- model: z2.string().nullish(),
952
- choices: z2.array(
953
- z2.object({
954
- delta: z2.object({
955
- role: z2.enum(["assistant"]).nullish(),
956
- content: z2.string().nullish(),
957
- function_call: z2.object({
958
- name: z2.string().optional(),
959
- arguments: z2.string().optional()
960
- }).nullish(),
961
- tool_calls: z2.array(
962
- z2.object({
963
- index: z2.number(),
964
- id: z2.string().nullish(),
965
- type: z2.literal("function").optional(),
966
- function: z2.object({
967
- name: z2.string().nullish(),
968
- arguments: z2.string().nullish()
866
+ var openaiChatChunkSchema = z3.union([
867
+ z3.object({
868
+ id: z3.string().nullish(),
869
+ created: z3.number().nullish(),
870
+ model: z3.string().nullish(),
871
+ choices: z3.array(
872
+ z3.object({
873
+ delta: z3.object({
874
+ role: z3.enum(["assistant"]).nullish(),
875
+ content: z3.string().nullish(),
876
+ tool_calls: z3.array(
877
+ z3.object({
878
+ index: z3.number(),
879
+ id: z3.string().nullish(),
880
+ type: z3.literal("function").optional(),
881
+ function: z3.object({
882
+ name: z3.string().nullish(),
883
+ arguments: z3.string().nullish()
969
884
  })
970
885
  })
971
886
  ).nullish()
972
887
  }).nullish(),
973
- logprobs: z2.object({
974
- content: z2.array(
975
- z2.object({
976
- token: z2.string(),
977
- logprob: z2.number(),
978
- top_logprobs: z2.array(
979
- z2.object({
980
- token: z2.string(),
981
- logprob: z2.number()
888
+ logprobs: z3.object({
889
+ content: z3.array(
890
+ z3.object({
891
+ token: z3.string(),
892
+ logprob: z3.number(),
893
+ top_logprobs: z3.array(
894
+ z3.object({
895
+ token: z3.string(),
896
+ logprob: z3.number()
982
897
  })
983
898
  )
984
899
  })
985
900
  ).nullable()
986
901
  }).nullish(),
987
- finish_reason: z2.string().nullable().optional(),
988
- index: z2.number()
902
+ finish_reason: z3.string().nullable().optional(),
903
+ index: z3.number()
989
904
  })
990
905
  ),
991
906
  usage: openaiTokenUsageSchema
@@ -1033,12 +948,12 @@ import {
1033
948
  createJsonResponseHandler as createJsonResponseHandler2,
1034
949
  postJsonToApi as postJsonToApi2
1035
950
  } from "@ai-sdk/provider-utils";
1036
- import { z as z3 } from "zod";
951
+ import { z as z4 } from "zod";
1037
952
 
1038
953
  // src/convert-to-openai-completion-prompt.ts
1039
954
  import {
1040
955
  InvalidPromptError,
1041
- UnsupportedFunctionalityError as UnsupportedFunctionalityError4
956
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
1042
957
  } from "@ai-sdk/provider";
1043
958
  function convertToOpenAICompletionPrompt({
1044
959
  prompt,
@@ -1085,7 +1000,7 @@ ${userMessage}
1085
1000
  return part.text;
1086
1001
  }
1087
1002
  case "tool-call": {
1088
- throw new UnsupportedFunctionalityError4({
1003
+ throw new UnsupportedFunctionalityError3({
1089
1004
  functionality: "tool-call messages"
1090
1005
  });
1091
1006
  }
@@ -1098,7 +1013,7 @@ ${assistantMessage}
1098
1013
  break;
1099
1014
  }
1100
1015
  case "tool": {
1101
- throw new UnsupportedFunctionalityError4({
1016
+ throw new UnsupportedFunctionalityError3({
1102
1017
  functionality: "tool messages"
1103
1018
  });
1104
1019
  }
@@ -1146,7 +1061,7 @@ var OpenAICompletionLanguageModel = class {
1146
1061
  getArgs({
1147
1062
  inputFormat,
1148
1063
  prompt,
1149
- maxTokens,
1064
+ maxOutputTokens,
1150
1065
  temperature,
1151
1066
  topP,
1152
1067
  topK,
@@ -1188,7 +1103,7 @@ var OpenAICompletionLanguageModel = class {
1188
1103
  suffix: this.settings.suffix,
1189
1104
  user: this.settings.user,
1190
1105
  // standardized settings:
1191
- max_tokens: maxTokens,
1106
+ max_tokens: maxOutputTokens,
1192
1107
  temperature,
1193
1108
  top_p: topP,
1194
1109
  frequency_penalty: frequencyPenalty,
@@ -1222,18 +1137,16 @@ var OpenAICompletionLanguageModel = class {
1222
1137
  abortSignal: options.abortSignal,
1223
1138
  fetch: this.config.fetch
1224
1139
  });
1225
- const { prompt: rawPrompt, ...rawSettings } = args;
1226
1140
  const choice = response.choices[0];
1227
1141
  return {
1228
1142
  text: choice.text,
1229
1143
  usage: {
1230
- promptTokens: response.usage.prompt_tokens,
1231
- completionTokens: response.usage.completion_tokens
1144
+ inputTokens: response.usage.prompt_tokens,
1145
+ outputTokens: response.usage.completion_tokens
1232
1146
  },
1233
1147
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1234
1148
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1235
- rawCall: { rawPrompt, rawSettings },
1236
- request: { body: JSON.stringify(args) },
1149
+ request: { body: args },
1237
1150
  response: {
1238
1151
  ...getResponseMetadata(response),
1239
1152
  headers: responseHeaders,
@@ -1264,11 +1177,10 @@ var OpenAICompletionLanguageModel = class {
1264
1177
  abortSignal: options.abortSignal,
1265
1178
  fetch: this.config.fetch
1266
1179
  });
1267
- const { prompt: rawPrompt, ...rawSettings } = args;
1268
1180
  let finishReason = "unknown";
1269
- let usage = {
1270
- promptTokens: Number.NaN,
1271
- completionTokens: Number.NaN
1181
+ const usage = {
1182
+ inputTokens: void 0,
1183
+ outputTokens: void 0
1272
1184
  };
1273
1185
  let logprobs;
1274
1186
  let isFirstChunk = true;
@@ -1295,10 +1207,8 @@ var OpenAICompletionLanguageModel = class {
1295
1207
  });
1296
1208
  }
1297
1209
  if (value.usage != null) {
1298
- usage = {
1299
- promptTokens: value.usage.prompt_tokens,
1300
- completionTokens: value.usage.completion_tokens
1301
- };
1210
+ usage.inputTokens = value.usage.prompt_tokens;
1211
+ usage.outputTokens = value.usage.completion_tokens;
1302
1212
  }
1303
1213
  const choice = value.choices[0];
1304
1214
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1328,53 +1238,52 @@ var OpenAICompletionLanguageModel = class {
1328
1238
  }
1329
1239
  })
1330
1240
  ),
1331
- rawCall: { rawPrompt, rawSettings },
1332
1241
  response: { headers: responseHeaders },
1333
1242
  warnings,
1334
1243
  request: { body: JSON.stringify(body) }
1335
1244
  };
1336
1245
  }
1337
1246
  };
1338
- var openaiCompletionResponseSchema = z3.object({
1339
- id: z3.string().nullish(),
1340
- created: z3.number().nullish(),
1341
- model: z3.string().nullish(),
1342
- choices: z3.array(
1343
- z3.object({
1344
- text: z3.string(),
1345
- finish_reason: z3.string(),
1346
- logprobs: z3.object({
1347
- tokens: z3.array(z3.string()),
1348
- token_logprobs: z3.array(z3.number()),
1349
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1247
+ var openaiCompletionResponseSchema = z4.object({
1248
+ id: z4.string().nullish(),
1249
+ created: z4.number().nullish(),
1250
+ model: z4.string().nullish(),
1251
+ choices: z4.array(
1252
+ z4.object({
1253
+ text: z4.string(),
1254
+ finish_reason: z4.string(),
1255
+ logprobs: z4.object({
1256
+ tokens: z4.array(z4.string()),
1257
+ token_logprobs: z4.array(z4.number()),
1258
+ top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1350
1259
  }).nullish()
1351
1260
  })
1352
1261
  ),
1353
- usage: z3.object({
1354
- prompt_tokens: z3.number(),
1355
- completion_tokens: z3.number()
1262
+ usage: z4.object({
1263
+ prompt_tokens: z4.number(),
1264
+ completion_tokens: z4.number()
1356
1265
  })
1357
1266
  });
1358
- var openaiCompletionChunkSchema = z3.union([
1359
- z3.object({
1360
- id: z3.string().nullish(),
1361
- created: z3.number().nullish(),
1362
- model: z3.string().nullish(),
1363
- choices: z3.array(
1364
- z3.object({
1365
- text: z3.string(),
1366
- finish_reason: z3.string().nullish(),
1367
- index: z3.number(),
1368
- logprobs: z3.object({
1369
- tokens: z3.array(z3.string()),
1370
- token_logprobs: z3.array(z3.number()),
1371
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1267
+ var openaiCompletionChunkSchema = z4.union([
1268
+ z4.object({
1269
+ id: z4.string().nullish(),
1270
+ created: z4.number().nullish(),
1271
+ model: z4.string().nullish(),
1272
+ choices: z4.array(
1273
+ z4.object({
1274
+ text: z4.string(),
1275
+ finish_reason: z4.string().nullish(),
1276
+ index: z4.number(),
1277
+ logprobs: z4.object({
1278
+ tokens: z4.array(z4.string()),
1279
+ token_logprobs: z4.array(z4.number()),
1280
+ top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1372
1281
  }).nullish()
1373
1282
  })
1374
1283
  ),
1375
- usage: z3.object({
1376
- prompt_tokens: z3.number(),
1377
- completion_tokens: z3.number()
1284
+ usage: z4.object({
1285
+ prompt_tokens: z4.number(),
1286
+ completion_tokens: z4.number()
1378
1287
  }).nullish()
1379
1288
  }),
1380
1289
  openaiErrorDataSchema
@@ -1389,7 +1298,7 @@ import {
1389
1298
  createJsonResponseHandler as createJsonResponseHandler3,
1390
1299
  postJsonToApi as postJsonToApi3
1391
1300
  } from "@ai-sdk/provider-utils";
1392
- import { z as z4 } from "zod";
1301
+ import { z as z5 } from "zod";
1393
1302
  var OpenAIEmbeddingModel = class {
1394
1303
  constructor(modelId, settings, config) {
1395
1304
  this.specificationVersion = "v1";
@@ -1448,9 +1357,9 @@ var OpenAIEmbeddingModel = class {
1448
1357
  };
1449
1358
  }
1450
1359
  };
1451
- var openaiTextEmbeddingResponseSchema = z4.object({
1452
- data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1453
- usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1360
+ var openaiTextEmbeddingResponseSchema = z5.object({
1361
+ data: z5.array(z5.object({ embedding: z5.array(z5.number()) })),
1362
+ usage: z5.object({ prompt_tokens: z5.number() }).nullish()
1454
1363
  });
1455
1364
 
1456
1365
  // src/openai-image-model.ts
@@ -1459,7 +1368,7 @@ import {
1459
1368
  createJsonResponseHandler as createJsonResponseHandler4,
1460
1369
  postJsonToApi as postJsonToApi4
1461
1370
  } from "@ai-sdk/provider-utils";
1462
- import { z as z5 } from "zod";
1371
+ import { z as z6 } from "zod";
1463
1372
 
1464
1373
  // src/openai-image-settings.ts
1465
1374
  var modelMaxImagesPerCall = {
@@ -1537,13 +1446,13 @@ var OpenAIImageModel = class {
1537
1446
  };
1538
1447
  }
1539
1448
  };
1540
- var openaiImageResponseSchema = z5.object({
1541
- data: z5.array(z5.object({ b64_json: z5.string() }))
1449
+ var openaiImageResponseSchema = z6.object({
1450
+ data: z6.array(z6.object({ b64_json: z6.string() }))
1542
1451
  });
1543
1452
 
1544
1453
  // src/openai-tools.ts
1545
- import { z as z6 } from "zod";
1546
- var WebSearchPreviewParameters = z6.object({});
1454
+ import { z as z7 } from "zod";
1455
+ var WebSearchPreviewParameters = z7.object({});
1547
1456
  function webSearchPreviewTool({
1548
1457
  searchContextSize,
1549
1458
  userLocation
@@ -1567,22 +1476,16 @@ import {
1567
1476
  combineHeaders as combineHeaders5,
1568
1477
  convertBase64ToUint8Array,
1569
1478
  createJsonResponseHandler as createJsonResponseHandler5,
1570
- parseProviderOptions,
1479
+ parseProviderOptions as parseProviderOptions2,
1571
1480
  postFormDataToApi
1572
1481
  } from "@ai-sdk/provider-utils";
1573
- import { z as z7 } from "zod";
1574
- var OpenAIProviderOptionsSchema = z7.object({
1575
- include: z7.array(z7.string()).optional().describe(
1576
- "Additional information to include in the transcription response."
1577
- ),
1578
- language: z7.string().optional().describe("The language of the input audio in ISO-639-1 format."),
1579
- prompt: z7.string().optional().describe(
1580
- "An optional text to guide the model's style or continue a previous audio segment."
1581
- ),
1582
- temperature: z7.number().min(0).max(1).optional().default(0).describe("The sampling temperature, between 0 and 1."),
1583
- timestampGranularities: z7.array(z7.enum(["word", "segment"])).optional().default(["segment"]).describe(
1584
- "The timestamp granularities to populate for this transcription."
1585
- )
1482
+ import { z as z8 } from "zod";
1483
+ var OpenAIProviderOptionsSchema = z8.object({
1484
+ include: z8.array(z8.string()).nullish(),
1485
+ language: z8.string().nullish(),
1486
+ prompt: z8.string().nullish(),
1487
+ temperature: z8.number().min(0).max(1).nullish().default(0),
1488
+ timestampGranularities: z8.array(z8.enum(["word", "segment"])).nullish().default(["segment"])
1586
1489
  });
1587
1490
  var languageMap = {
1588
1491
  afrikaans: "af",
@@ -1657,8 +1560,9 @@ var OpenAITranscriptionModel = class {
1657
1560
  mediaType,
1658
1561
  providerOptions
1659
1562
  }) {
1563
+ var _a, _b, _c, _d, _e;
1660
1564
  const warnings = [];
1661
- const openAIOptions = parseProviderOptions({
1565
+ const openAIOptions = parseProviderOptions2({
1662
1566
  provider: "openai",
1663
1567
  providerOptions,
1664
1568
  schema: OpenAIProviderOptionsSchema
@@ -1669,16 +1573,16 @@ var OpenAITranscriptionModel = class {
1669
1573
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1670
1574
  if (openAIOptions) {
1671
1575
  const transcriptionModelOptions = {
1672
- include: openAIOptions.include,
1673
- language: openAIOptions.language,
1674
- prompt: openAIOptions.prompt,
1675
- temperature: openAIOptions.temperature,
1676
- timestamp_granularities: openAIOptions.timestampGranularities
1576
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1577
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1578
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1579
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1580
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1677
1581
  };
1678
1582
  for (const key in transcriptionModelOptions) {
1679
1583
  const value = transcriptionModelOptions[key];
1680
1584
  if (value !== void 0) {
1681
- formData.append(key, value);
1585
+ formData.append(key, String(value));
1682
1586
  }
1683
1587
  }
1684
1588
  }
@@ -1729,15 +1633,15 @@ var OpenAITranscriptionModel = class {
1729
1633
  };
1730
1634
  }
1731
1635
  };
1732
- var openaiTranscriptionResponseSchema = z7.object({
1733
- text: z7.string(),
1734
- language: z7.string().nullish(),
1735
- duration: z7.number().nullish(),
1736
- words: z7.array(
1737
- z7.object({
1738
- word: z7.string(),
1739
- start: z7.number(),
1740
- end: z7.number()
1636
+ var openaiTranscriptionResponseSchema = z8.object({
1637
+ text: z8.string(),
1638
+ language: z8.string().nullish(),
1639
+ duration: z8.number().nullish(),
1640
+ words: z8.array(
1641
+ z8.object({
1642
+ word: z8.string(),
1643
+ start: z8.number(),
1644
+ end: z8.number()
1741
1645
  })
1742
1646
  ).nullish()
1743
1647
  });
@@ -1748,14 +1652,14 @@ import {
1748
1652
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1749
1653
  createJsonResponseHandler as createJsonResponseHandler6,
1750
1654
  generateId as generateId2,
1751
- parseProviderOptions as parseProviderOptions2,
1655
+ parseProviderOptions as parseProviderOptions3,
1752
1656
  postJsonToApi as postJsonToApi5
1753
1657
  } from "@ai-sdk/provider-utils";
1754
- import { z as z8 } from "zod";
1658
+ import { z as z9 } from "zod";
1755
1659
 
1756
1660
  // src/responses/convert-to-openai-responses-messages.ts
1757
1661
  import {
1758
- UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1662
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1759
1663
  } from "@ai-sdk/provider";
1760
1664
  function convertToOpenAIResponsesMessages({
1761
1665
  prompt,
@@ -1811,7 +1715,7 @@ function convertToOpenAIResponsesMessages({
1811
1715
  };
1812
1716
  } else if (part.mediaType === "application/pdf") {
1813
1717
  if (part.data instanceof URL) {
1814
- throw new UnsupportedFunctionalityError5({
1718
+ throw new UnsupportedFunctionalityError4({
1815
1719
  functionality: "PDF file parts with URLs"
1816
1720
  });
1817
1721
  }
@@ -1821,7 +1725,7 @@ function convertToOpenAIResponsesMessages({
1821
1725
  file_data: `data:application/pdf;base64,${part.data}`
1822
1726
  };
1823
1727
  } else {
1824
- throw new UnsupportedFunctionalityError5({
1728
+ throw new UnsupportedFunctionalityError4({
1825
1729
  functionality: `file part media type ${part.mediaType}`
1826
1730
  });
1827
1731
  }
@@ -1893,7 +1797,7 @@ function mapOpenAIResponseFinishReason({
1893
1797
 
1894
1798
  // src/responses/openai-responses-prepare-tools.ts
1895
1799
  import {
1896
- UnsupportedFunctionalityError as UnsupportedFunctionalityError6
1800
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1897
1801
  } from "@ai-sdk/provider";
1898
1802
  function prepareResponsesTools({
1899
1803
  tools,
@@ -1953,7 +1857,7 @@ function prepareResponsesTools({
1953
1857
  };
1954
1858
  default: {
1955
1859
  const _exhaustiveCheck = type;
1956
- throw new UnsupportedFunctionalityError6({
1860
+ throw new UnsupportedFunctionalityError5({
1957
1861
  functionality: `tool choice type: ${_exhaustiveCheck}`
1958
1862
  });
1959
1863
  }
@@ -1972,7 +1876,7 @@ var OpenAIResponsesLanguageModel = class {
1972
1876
  return this.config.provider;
1973
1877
  }
1974
1878
  getArgs({
1975
- maxTokens,
1879
+ maxOutputTokens,
1976
1880
  temperature,
1977
1881
  stopSequences,
1978
1882
  topP,
@@ -2015,7 +1919,7 @@ var OpenAIResponsesLanguageModel = class {
2015
1919
  systemMessageMode: modelConfig.systemMessageMode
2016
1920
  });
2017
1921
  warnings.push(...messageWarnings);
2018
- const openaiOptions = parseProviderOptions2({
1922
+ const openaiOptions = parseProviderOptions3({
2019
1923
  provider: "openai",
2020
1924
  providerOptions,
2021
1925
  schema: openaiResponsesProviderOptionsSchema
@@ -2026,7 +1930,7 @@ var OpenAIResponsesLanguageModel = class {
2026
1930
  input: messages,
2027
1931
  temperature,
2028
1932
  top_p: topP,
2029
- max_output_tokens: maxTokens,
1933
+ max_output_tokens: maxOutputTokens,
2030
1934
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2031
1935
  text: {
2032
1936
  format: responseFormat.schema != null ? {
@@ -2105,49 +2009,49 @@ var OpenAIResponsesLanguageModel = class {
2105
2009
  body,
2106
2010
  failedResponseHandler: openaiFailedResponseHandler,
2107
2011
  successfulResponseHandler: createJsonResponseHandler6(
2108
- z8.object({
2109
- id: z8.string(),
2110
- created_at: z8.number(),
2111
- model: z8.string(),
2112
- output: z8.array(
2113
- z8.discriminatedUnion("type", [
2114
- z8.object({
2115
- type: z8.literal("message"),
2116
- role: z8.literal("assistant"),
2117
- content: z8.array(
2118
- z8.object({
2119
- type: z8.literal("output_text"),
2120
- text: z8.string(),
2121
- annotations: z8.array(
2122
- z8.object({
2123
- type: z8.literal("url_citation"),
2124
- start_index: z8.number(),
2125
- end_index: z8.number(),
2126
- url: z8.string(),
2127
- title: z8.string()
2012
+ z9.object({
2013
+ id: z9.string(),
2014
+ created_at: z9.number(),
2015
+ model: z9.string(),
2016
+ output: z9.array(
2017
+ z9.discriminatedUnion("type", [
2018
+ z9.object({
2019
+ type: z9.literal("message"),
2020
+ role: z9.literal("assistant"),
2021
+ content: z9.array(
2022
+ z9.object({
2023
+ type: z9.literal("output_text"),
2024
+ text: z9.string(),
2025
+ annotations: z9.array(
2026
+ z9.object({
2027
+ type: z9.literal("url_citation"),
2028
+ start_index: z9.number(),
2029
+ end_index: z9.number(),
2030
+ url: z9.string(),
2031
+ title: z9.string()
2128
2032
  })
2129
2033
  )
2130
2034
  })
2131
2035
  )
2132
2036
  }),
2133
- z8.object({
2134
- type: z8.literal("function_call"),
2135
- call_id: z8.string(),
2136
- name: z8.string(),
2137
- arguments: z8.string()
2037
+ z9.object({
2038
+ type: z9.literal("function_call"),
2039
+ call_id: z9.string(),
2040
+ name: z9.string(),
2041
+ arguments: z9.string()
2138
2042
  }),
2139
- z8.object({
2140
- type: z8.literal("web_search_call")
2043
+ z9.object({
2044
+ type: z9.literal("web_search_call")
2141
2045
  }),
2142
- z8.object({
2143
- type: z8.literal("computer_call")
2046
+ z9.object({
2047
+ type: z9.literal("computer_call")
2144
2048
  }),
2145
- z8.object({
2146
- type: z8.literal("reasoning")
2049
+ z9.object({
2050
+ type: z9.literal("reasoning")
2147
2051
  })
2148
2052
  ])
2149
2053
  ),
2150
- incomplete_details: z8.object({ reason: z8.string() }).nullable(),
2054
+ incomplete_details: z9.object({ reason: z9.string() }).nullable(),
2151
2055
  usage: usageSchema
2152
2056
  })
2153
2057
  ),
@@ -2180,16 +2084,10 @@ var OpenAIResponsesLanguageModel = class {
2180
2084
  }),
2181
2085
  toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2182
2086
  usage: {
2183
- promptTokens: response.usage.input_tokens,
2184
- completionTokens: response.usage.output_tokens
2185
- },
2186
- rawCall: {
2187
- rawPrompt: void 0,
2188
- rawSettings: {}
2189
- },
2190
- request: {
2191
- body: JSON.stringify(body)
2087
+ inputTokens: response.usage.input_tokens,
2088
+ outputTokens: response.usage.output_tokens
2192
2089
  },
2090
+ request: { body },
2193
2091
  response: {
2194
2092
  id: response.id,
2195
2093
  timestamp: new Date(response.created_at * 1e3),
@@ -2228,8 +2126,10 @@ var OpenAIResponsesLanguageModel = class {
2228
2126
  });
2229
2127
  const self = this;
2230
2128
  let finishReason = "unknown";
2231
- let promptTokens = NaN;
2232
- let completionTokens = NaN;
2129
+ const usage = {
2130
+ inputTokens: void 0,
2131
+ outputTokens: void 0
2132
+ };
2233
2133
  let cachedPromptTokens = null;
2234
2134
  let reasoningTokens = null;
2235
2135
  let responseId = null;
@@ -2299,8 +2199,8 @@ var OpenAIResponsesLanguageModel = class {
2299
2199
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2300
2200
  hasToolCalls
2301
2201
  });
2302
- promptTokens = value.response.usage.input_tokens;
2303
- completionTokens = value.response.usage.output_tokens;
2202
+ usage.inputTokens = value.response.usage.input_tokens;
2203
+ usage.outputTokens = value.response.usage.output_tokens;
2304
2204
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2305
2205
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2306
2206
  } else if (isResponseAnnotationAddedChunk(value)) {
@@ -2319,7 +2219,7 @@ var OpenAIResponsesLanguageModel = class {
2319
2219
  controller.enqueue({
2320
2220
  type: "finish",
2321
2221
  finishReason,
2322
- usage: { promptTokens, completionTokens },
2222
+ usage,
2323
2223
  ...(cachedPromptTokens != null || reasoningTokens != null) && {
2324
2224
  providerMetadata: {
2325
2225
  openai: {
@@ -2333,89 +2233,85 @@ var OpenAIResponsesLanguageModel = class {
2333
2233
  }
2334
2234
  })
2335
2235
  ),
2336
- rawCall: {
2337
- rawPrompt: void 0,
2338
- rawSettings: {}
2339
- },
2340
- request: { body: JSON.stringify(body) },
2236
+ request: { body },
2341
2237
  response: { headers: responseHeaders },
2342
2238
  warnings
2343
2239
  };
2344
2240
  }
2345
2241
  };
2346
- var usageSchema = z8.object({
2347
- input_tokens: z8.number(),
2348
- input_tokens_details: z8.object({ cached_tokens: z8.number().nullish() }).nullish(),
2349
- output_tokens: z8.number(),
2350
- output_tokens_details: z8.object({ reasoning_tokens: z8.number().nullish() }).nullish()
2242
+ var usageSchema = z9.object({
2243
+ input_tokens: z9.number(),
2244
+ input_tokens_details: z9.object({ cached_tokens: z9.number().nullish() }).nullish(),
2245
+ output_tokens: z9.number(),
2246
+ output_tokens_details: z9.object({ reasoning_tokens: z9.number().nullish() }).nullish()
2351
2247
  });
2352
- var textDeltaChunkSchema = z8.object({
2353
- type: z8.literal("response.output_text.delta"),
2354
- delta: z8.string()
2248
+ var textDeltaChunkSchema = z9.object({
2249
+ type: z9.literal("response.output_text.delta"),
2250
+ delta: z9.string()
2355
2251
  });
2356
- var responseFinishedChunkSchema = z8.object({
2357
- type: z8.enum(["response.completed", "response.incomplete"]),
2358
- response: z8.object({
2359
- incomplete_details: z8.object({ reason: z8.string() }).nullish(),
2252
+ var responseFinishedChunkSchema = z9.object({
2253
+ type: z9.enum(["response.completed", "response.incomplete"]),
2254
+ response: z9.object({
2255
+ incomplete_details: z9.object({ reason: z9.string() }).nullish(),
2360
2256
  usage: usageSchema
2361
2257
  })
2362
2258
  });
2363
- var responseCreatedChunkSchema = z8.object({
2364
- type: z8.literal("response.created"),
2365
- response: z8.object({
2366
- id: z8.string(),
2367
- created_at: z8.number(),
2368
- model: z8.string()
2259
+ var responseCreatedChunkSchema = z9.object({
2260
+ type: z9.literal("response.created"),
2261
+ response: z9.object({
2262
+ id: z9.string(),
2263
+ created_at: z9.number(),
2264
+ model: z9.string()
2369
2265
  })
2370
2266
  });
2371
- var responseOutputItemDoneSchema = z8.object({
2372
- type: z8.literal("response.output_item.done"),
2373
- output_index: z8.number(),
2374
- item: z8.discriminatedUnion("type", [
2375
- z8.object({
2376
- type: z8.literal("message")
2267
+ var responseOutputItemDoneSchema = z9.object({
2268
+ type: z9.literal("response.output_item.done"),
2269
+ output_index: z9.number(),
2270
+ item: z9.discriminatedUnion("type", [
2271
+ z9.object({
2272
+ type: z9.literal("message")
2377
2273
  }),
2378
- z8.object({
2379
- type: z8.literal("function_call"),
2380
- id: z8.string(),
2381
- call_id: z8.string(),
2382
- name: z8.string(),
2383
- arguments: z8.string(),
2384
- status: z8.literal("completed")
2274
+ z9.object({
2275
+ type: z9.literal("function_call"),
2276
+ id: z9.string(),
2277
+ call_id: z9.string(),
2278
+ name: z9.string(),
2279
+ arguments: z9.string(),
2280
+ status: z9.literal("completed")
2385
2281
  })
2386
2282
  ])
2387
2283
  });
2388
- var responseFunctionCallArgumentsDeltaSchema = z8.object({
2389
- type: z8.literal("response.function_call_arguments.delta"),
2390
- item_id: z8.string(),
2391
- output_index: z8.number(),
2392
- delta: z8.string()
2284
+ var responseFunctionCallArgumentsDeltaSchema = z9.object({
2285
+ type: z9.literal("response.function_call_arguments.delta"),
2286
+ item_id: z9.string(),
2287
+ output_index: z9.number(),
2288
+ delta: z9.string()
2393
2289
  });
2394
- var responseOutputItemAddedSchema = z8.object({
2395
- type: z8.literal("response.output_item.added"),
2396
- output_index: z8.number(),
2397
- item: z8.discriminatedUnion("type", [
2398
- z8.object({
2399
- type: z8.literal("message")
2290
+ var responseOutputItemAddedSchema = z9.object({
2291
+ type: z9.literal("response.output_item.added"),
2292
+ output_index: z9.number(),
2293
+ item: z9.discriminatedUnion("type", [
2294
+ z9.object({
2295
+ type: z9.literal("message")
2400
2296
  }),
2401
- z8.object({
2402
- type: z8.literal("function_call"),
2403
- id: z8.string(),
2404
- call_id: z8.string(),
2405
- name: z8.string(),
2406
- arguments: z8.string()
2297
+ z9.object({
2298
+ type: z9.literal("function_call"),
2299
+ id: z9.string(),
2300
+ call_id: z9.string(),
2301
+ name: z9.string(),
2302
+ arguments: z9.string()
2407
2303
  })
2408
2304
  ])
2409
2305
  });
2410
- var responseAnnotationAddedSchema = z8.object({
2411
- type: z8.literal("response.output_text.annotation.added"),
2412
- annotation: z8.object({
2413
- type: z8.literal("url_citation"),
2414
- url: z8.string(),
2415
- title: z8.string()
2306
+ var responseAnnotationAddedSchema = z9.object({
2307
+ type: z9.literal("response.output_text.annotation.added"),
2308
+ annotation: z9.object({
2309
+ type: z9.literal("url_citation"),
2310
+ url: z9.string(),
2311
+ title: z9.string()
2416
2312
  })
2417
2313
  });
2418
- var openaiResponsesChunkSchema = z8.union([
2314
+ var openaiResponsesChunkSchema = z9.union([
2419
2315
  textDeltaChunkSchema,
2420
2316
  responseFinishedChunkSchema,
2421
2317
  responseCreatedChunkSchema,
@@ -2423,7 +2319,7 @@ var openaiResponsesChunkSchema = z8.union([
2423
2319
  responseFunctionCallArgumentsDeltaSchema,
2424
2320
  responseOutputItemAddedSchema,
2425
2321
  responseAnnotationAddedSchema,
2426
- z8.object({ type: z8.string() }).passthrough()
2322
+ z9.object({ type: z9.string() }).passthrough()
2427
2323
  // fallback for unknown chunks
2428
2324
  ]);
2429
2325
  function isTextDeltaChunk(chunk) {
@@ -2468,15 +2364,15 @@ function getResponsesModelConfig(modelId) {
2468
2364
  requiredAutoTruncation: false
2469
2365
  };
2470
2366
  }
2471
- var openaiResponsesProviderOptionsSchema = z8.object({
2472
- metadata: z8.any().nullish(),
2473
- parallelToolCalls: z8.boolean().nullish(),
2474
- previousResponseId: z8.string().nullish(),
2475
- store: z8.boolean().nullish(),
2476
- user: z8.string().nullish(),
2477
- reasoningEffort: z8.string().nullish(),
2478
- strictSchemas: z8.boolean().nullish(),
2479
- instructions: z8.string().nullish()
2367
+ var openaiResponsesProviderOptionsSchema = z9.object({
2368
+ metadata: z9.any().nullish(),
2369
+ parallelToolCalls: z9.boolean().nullish(),
2370
+ previousResponseId: z9.string().nullish(),
2371
+ store: z9.boolean().nullish(),
2372
+ user: z9.string().nullish(),
2373
+ reasoningEffort: z9.string().nullish(),
2374
+ strictSchemas: z9.boolean().nullish(),
2375
+ instructions: z9.string().nullish()
2480
2376
  });
2481
2377
 
2482
2378
  // src/openai-provider.ts