@ai-sdk/openai 2.0.0-canary.5 → 2.0.0-canary.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,6 @@
1
1
  // src/openai-chat-language-model.ts
2
2
  import {
3
- InvalidResponseDataError,
4
- UnsupportedFunctionalityError as UnsupportedFunctionalityError3
3
+ InvalidResponseDataError
5
4
  } from "@ai-sdk/provider";
6
5
  import {
7
6
  combineHeaders,
@@ -9,17 +8,18 @@ import {
9
8
  createJsonResponseHandler,
10
9
  generateId,
11
10
  isParsableJson,
12
- postJsonToApi
11
+ postJsonToApi,
12
+ parseProviderOptions
13
13
  } from "@ai-sdk/provider-utils";
14
- import { z as z2 } from "zod";
14
+ import { z as z3 } from "zod";
15
15
 
16
16
  // src/convert-to-openai-chat-messages.ts
17
17
  import {
18
18
  UnsupportedFunctionalityError
19
19
  } from "@ai-sdk/provider";
20
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
20
21
  function convertToOpenAIChatMessages({
21
22
  prompt,
22
- useLegacyFunctionCalling = false,
23
23
  systemMessageMode = "system"
24
24
  }) {
25
25
  const messages = [];
@@ -71,7 +71,7 @@ function convertToOpenAIChatMessages({
71
71
  return {
72
72
  type: "image_url",
73
73
  image_url: {
74
- url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
74
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
75
75
  // OpenAI specific extension: image detail
76
76
  detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
77
77
  }
@@ -86,14 +86,20 @@ function convertToOpenAIChatMessages({
86
86
  case "audio/wav": {
87
87
  return {
88
88
  type: "input_audio",
89
- input_audio: { data: part.data, format: "wav" }
89
+ input_audio: {
90
+ data: convertToBase64(part.data),
91
+ format: "wav"
92
+ }
90
93
  };
91
94
  }
92
95
  case "audio/mp3":
93
96
  case "audio/mpeg": {
94
97
  return {
95
98
  type: "input_audio",
96
- input_audio: { data: part.data, format: "mp3" }
99
+ input_audio: {
100
+ data: convertToBase64(part.data),
101
+ format: "mp3"
102
+ }
97
103
  };
98
104
  }
99
105
  default: {
@@ -148,41 +154,20 @@ function convertToOpenAIChatMessages({
148
154
  }
149
155
  }
150
156
  }
151
- if (useLegacyFunctionCalling) {
152
- if (toolCalls.length > 1) {
153
- throw new UnsupportedFunctionalityError({
154
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
155
- });
156
- }
157
- messages.push({
158
- role: "assistant",
159
- content: text,
160
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
161
- });
162
- } else {
163
- messages.push({
164
- role: "assistant",
165
- content: text,
166
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
167
- });
168
- }
157
+ messages.push({
158
+ role: "assistant",
159
+ content: text,
160
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
161
+ });
169
162
  break;
170
163
  }
171
164
  case "tool": {
172
165
  for (const toolResponse of content) {
173
- if (useLegacyFunctionCalling) {
174
- messages.push({
175
- role: "function",
176
- name: toolResponse.toolName,
177
- content: JSON.stringify(toolResponse.result)
178
- });
179
- } else {
180
- messages.push({
181
- role: "tool",
182
- tool_call_id: toolResponse.toolCallId,
183
- content: JSON.stringify(toolResponse.result)
184
- });
185
- }
166
+ messages.push({
167
+ role: "tool",
168
+ tool_call_id: toolResponse.toolCallId,
169
+ content: JSON.stringify(toolResponse.result)
170
+ });
186
171
  }
187
172
  break;
188
173
  }
@@ -225,18 +210,69 @@ function mapOpenAIFinishReason(finishReason) {
225
210
  }
226
211
  }
227
212
 
228
- // src/openai-error.ts
213
+ // src/openai-chat-options.ts
229
214
  import { z } from "zod";
215
+ var openaiProviderOptions = z.object({
216
+ /**
217
+ * Modify the likelihood of specified tokens appearing in the completion.
218
+ *
219
+ * Accepts a JSON object that maps tokens (specified by their token ID in
220
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
221
+ */
222
+ logitBias: z.record(z.coerce.number(), z.number()).optional(),
223
+ /**
224
+ * Return the log probabilities of the tokens.
225
+ *
226
+ * Setting to true will return the log probabilities of the tokens that
227
+ * were generated.
228
+ *
229
+ * Setting to a number will return the log probabilities of the top n
230
+ * tokens that were generated.
231
+ */
232
+ logprobs: z.union([z.boolean(), z.number()]).optional(),
233
+ /**
234
+ * Whether to enable parallel function calling during tool use. Default to true.
235
+ */
236
+ parallelToolCalls: z.boolean().optional(),
237
+ /**
238
+ * A unique identifier representing your end-user, which can help OpenAI to
239
+ * monitor and detect abuse.
240
+ */
241
+ user: z.string().optional(),
242
+ /**
243
+ * Reasoning effort for reasoning models. Defaults to `medium`.
244
+ */
245
+ reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
246
+ /**
247
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
248
+ */
249
+ maxCompletionTokens: z.number().optional(),
250
+ /**
251
+ * Whether to enable persistence in responses API.
252
+ */
253
+ store: z.boolean().optional(),
254
+ /**
255
+ * Metadata to associate with the request.
256
+ */
257
+ metadata: z.record(z.string()).optional(),
258
+ /**
259
+ * Parameters for prediction mode.
260
+ */
261
+ prediction: z.record(z.any()).optional()
262
+ });
263
+
264
+ // src/openai-error.ts
265
+ import { z as z2 } from "zod";
230
266
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
231
- var openaiErrorDataSchema = z.object({
232
- error: z.object({
233
- message: z.string(),
267
+ var openaiErrorDataSchema = z2.object({
268
+ error: z2.object({
269
+ message: z2.string(),
234
270
  // The additional information below is handled loosely to support
235
271
  // OpenAI-compatible providers that have slightly different error
236
272
  // responses:
237
- type: z.string().nullish(),
238
- param: z.any().nullish(),
239
- code: z.union([z.string(), z.number()]).nullish()
273
+ type: z2.string().nullish(),
274
+ param: z2.any().nullish(),
275
+ code: z2.union([z2.string(), z2.number()]).nullish()
240
276
  })
241
277
  });
242
278
  var openaiFailedResponseHandler = createJsonErrorResponseHandler({
@@ -264,7 +300,6 @@ import {
264
300
  function prepareTools({
265
301
  tools,
266
302
  toolChoice,
267
- useLegacyFunctionCalling = false,
268
303
  structuredOutputs
269
304
  }) {
270
305
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
@@ -272,48 +307,6 @@ function prepareTools({
272
307
  if (tools == null) {
273
308
  return { tools: void 0, toolChoice: void 0, toolWarnings };
274
309
  }
275
- if (useLegacyFunctionCalling) {
276
- const openaiFunctions = [];
277
- for (const tool of tools) {
278
- if (tool.type === "provider-defined") {
279
- toolWarnings.push({ type: "unsupported-tool", tool });
280
- } else {
281
- openaiFunctions.push({
282
- name: tool.name,
283
- description: tool.description,
284
- parameters: tool.parameters
285
- });
286
- }
287
- }
288
- if (toolChoice == null) {
289
- return {
290
- functions: openaiFunctions,
291
- function_call: void 0,
292
- toolWarnings
293
- };
294
- }
295
- const type2 = toolChoice.type;
296
- switch (type2) {
297
- case "auto":
298
- case "none":
299
- case void 0:
300
- return {
301
- functions: openaiFunctions,
302
- function_call: void 0,
303
- toolWarnings
304
- };
305
- case "required":
306
- throw new UnsupportedFunctionalityError2({
307
- functionality: "useLegacyFunctionCalling and toolChoice: required"
308
- });
309
- default:
310
- return {
311
- functions: openaiFunctions,
312
- function_call: { name: toolChoice.toolName },
313
- toolWarnings
314
- };
315
- }
316
- }
317
310
  const openaiTools = [];
318
311
  for (const tool of tools) {
319
312
  if (tool.type === "provider-defined") {
@@ -385,7 +378,7 @@ var OpenAIChatLanguageModel = class {
385
378
  }
386
379
  getArgs({
387
380
  prompt,
388
- maxTokens,
381
+ maxOutputTokens,
389
382
  temperature,
390
383
  topP,
391
384
  topK,
@@ -398,8 +391,13 @@ var OpenAIChatLanguageModel = class {
398
391
  toolChoice,
399
392
  providerOptions
400
393
  }) {
401
- var _a, _b, _c, _d, _e, _f, _g;
394
+ var _a, _b;
402
395
  const warnings = [];
396
+ const openaiOptions = (_a = parseProviderOptions({
397
+ provider: "openai",
398
+ providerOptions,
399
+ schema: openaiProviderOptions
400
+ })) != null ? _a : {};
403
401
  if (topK != null) {
404
402
  warnings.push({
405
403
  type: "unsupported-setting",
@@ -413,21 +411,9 @@ var OpenAIChatLanguageModel = class {
413
411
  details: "JSON response format schema is only supported with structuredOutputs"
414
412
  });
415
413
  }
416
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
417
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
418
- throw new UnsupportedFunctionalityError3({
419
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
420
- });
421
- }
422
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
423
- throw new UnsupportedFunctionalityError3({
424
- functionality: "structuredOutputs with useLegacyFunctionCalling"
425
- });
426
- }
427
414
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
428
415
  {
429
416
  prompt,
430
- useLegacyFunctionCalling,
431
417
  systemMessageMode: getSystemMessageMode(this.modelId)
432
418
  }
433
419
  );
@@ -436,13 +422,13 @@ var OpenAIChatLanguageModel = class {
436
422
  // model id:
437
423
  model: this.modelId,
438
424
  // model specific settings:
439
- logit_bias: this.settings.logitBias,
440
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
441
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
442
- user: this.settings.user,
443
- parallel_tool_calls: this.settings.parallelToolCalls,
425
+ logit_bias: openaiOptions.logitBias,
426
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
427
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
428
+ user: openaiOptions.user,
429
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
444
430
  // standardized settings:
445
- max_tokens: maxTokens,
431
+ max_tokens: maxOutputTokens,
446
432
  temperature,
447
433
  top_p: topP,
448
434
  frequency_penalty: frequencyPenalty,
@@ -453,19 +439,19 @@ var OpenAIChatLanguageModel = class {
453
439
  json_schema: {
454
440
  schema: responseFormat.schema,
455
441
  strict: true,
456
- name: (_a = responseFormat.name) != null ? _a : "response",
442
+ name: (_b = responseFormat.name) != null ? _b : "response",
457
443
  description: responseFormat.description
458
444
  }
459
445
  } : { type: "json_object" } : void 0,
460
446
  stop: stopSequences,
461
447
  seed,
462
448
  // openai specific settings:
463
- // TODO remove in next major version; we auto-map maxTokens now
464
- max_completion_tokens: (_b = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _b.maxCompletionTokens,
465
- store: (_c = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _c.store,
466
- metadata: (_d = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _d.metadata,
467
- prediction: (_e = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _e.prediction,
468
- reasoning_effort: (_g = (_f = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
449
+ // TODO remove in next major version; we auto-map maxOutputTokens now
450
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
451
+ store: openaiOptions.store,
452
+ metadata: openaiOptions.metadata,
453
+ prediction: openaiOptions.prediction,
454
+ reasoning_effort: openaiOptions.reasoningEffort,
469
455
  // messages:
470
456
  messages
471
457
  };
@@ -529,32 +515,36 @@ var OpenAIChatLanguageModel = class {
529
515
  }
530
516
  baseArgs.max_tokens = void 0;
531
517
  }
518
+ } else if (this.modelId.startsWith("gpt-4o-search-preview")) {
519
+ if (baseArgs.temperature != null) {
520
+ baseArgs.temperature = void 0;
521
+ warnings.push({
522
+ type: "unsupported-setting",
523
+ setting: "temperature",
524
+ details: "temperature is not supported for the gpt-4o-search-preview model and has been removed."
525
+ });
526
+ }
532
527
  }
533
528
  const {
534
529
  tools: openaiTools,
535
530
  toolChoice: openaiToolChoice,
536
- functions,
537
- function_call,
538
531
  toolWarnings
539
532
  } = prepareTools({
540
533
  tools,
541
534
  toolChoice,
542
- useLegacyFunctionCalling,
543
535
  structuredOutputs: this.supportsStructuredOutputs
544
536
  });
545
537
  return {
546
538
  args: {
547
539
  ...baseArgs,
548
540
  tools: openaiTools,
549
- tool_choice: openaiToolChoice,
550
- functions,
551
- function_call
541
+ tool_choice: openaiToolChoice
552
542
  },
553
543
  warnings: [...warnings, ...toolWarnings]
554
544
  };
555
545
  }
556
546
  async doGenerate(options) {
557
- var _a, _b, _c, _d, _e, _f, _g, _h;
547
+ var _a, _b, _c, _d, _e, _f, _g;
558
548
  const { args: body, warnings } = this.getArgs(options);
559
549
  const {
560
550
  responseHeaders,
@@ -592,17 +582,11 @@ var OpenAIChatLanguageModel = class {
592
582
  providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
593
583
  }
594
584
  return {
595
- text: (_c = choice.message.content) != null ? _c : void 0,
596
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
597
- {
598
- toolCallType: "function",
599
- toolCallId: generateId(),
600
- toolName: choice.message.function_call.name,
601
- args: choice.message.function_call.arguments
602
- }
603
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
585
+ text: choice.message.content != null ? { type: "text", text: choice.message.content } : void 0,
586
+ toolCalls: (_c = choice.message.tool_calls) == null ? void 0 : _c.map((toolCall) => {
604
587
  var _a2;
605
588
  return {
589
+ type: "tool-call",
606
590
  toolCallType: "function",
607
591
  toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
608
592
  toolName: toolCall.function.name,
@@ -611,8 +595,8 @@ var OpenAIChatLanguageModel = class {
611
595
  }),
612
596
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
613
597
  usage: {
614
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
615
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
598
+ inputTokens: (_e = (_d = response.usage) == null ? void 0 : _d.prompt_tokens) != null ? _e : void 0,
599
+ outputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.completion_tokens) != null ? _g : void 0
616
600
  },
617
601
  request: { body },
618
602
  response: {
@@ -626,48 +610,6 @@ var OpenAIChatLanguageModel = class {
626
610
  };
627
611
  }
628
612
  async doStream(options) {
629
- if (this.settings.simulateStreaming) {
630
- const result = await this.doGenerate(options);
631
- const simulatedStream = new ReadableStream({
632
- start(controller) {
633
- controller.enqueue({ type: "response-metadata", ...result.response });
634
- if (result.text) {
635
- controller.enqueue({
636
- type: "text-delta",
637
- textDelta: result.text
638
- });
639
- }
640
- if (result.toolCalls) {
641
- for (const toolCall of result.toolCalls) {
642
- controller.enqueue({
643
- type: "tool-call-delta",
644
- toolCallType: "function",
645
- toolCallId: toolCall.toolCallId,
646
- toolName: toolCall.toolName,
647
- argsTextDelta: toolCall.args
648
- });
649
- controller.enqueue({
650
- type: "tool-call",
651
- ...toolCall
652
- });
653
- }
654
- }
655
- controller.enqueue({
656
- type: "finish",
657
- finishReason: result.finishReason,
658
- usage: result.usage,
659
- logprobs: result.logprobs,
660
- providerMetadata: result.providerMetadata
661
- });
662
- controller.close();
663
- }
664
- });
665
- return {
666
- stream: simulatedStream,
667
- response: result.response,
668
- warnings: result.warnings
669
- };
670
- }
671
613
  const { args, warnings } = this.getArgs(options);
672
614
  const body = {
673
615
  ...args,
@@ -692,13 +634,12 @@ var OpenAIChatLanguageModel = class {
692
634
  const { messages: rawPrompt, ...rawSettings } = args;
693
635
  const toolCalls = [];
694
636
  let finishReason = "unknown";
695
- let usage = {
696
- promptTokens: void 0,
697
- completionTokens: void 0
637
+ const usage = {
638
+ inputTokens: void 0,
639
+ outputTokens: void 0
698
640
  };
699
641
  let logprobs;
700
642
  let isFirstChunk = true;
701
- const { useLegacyFunctionCalling } = this.settings;
702
643
  const providerMetadata = { openai: {} };
703
644
  return {
704
645
  stream: response.pipeThrough(
@@ -730,10 +671,8 @@ var OpenAIChatLanguageModel = class {
730
671
  prompt_tokens_details,
731
672
  completion_tokens_details
732
673
  } = value.usage;
733
- usage = {
734
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
735
- completionTokens: completion_tokens != null ? completion_tokens : void 0
736
- };
674
+ usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
675
+ usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
737
676
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
738
677
  providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
739
678
  }
@@ -757,8 +696,8 @@ var OpenAIChatLanguageModel = class {
757
696
  const delta = choice.delta;
758
697
  if (delta.content != null) {
759
698
  controller.enqueue({
760
- type: "text-delta",
761
- textDelta: delta.content
699
+ type: "text",
700
+ text: delta.content
762
701
  });
763
702
  }
764
703
  const mappedLogprobs = mapOpenAIChatLogProbsOutput(
@@ -768,16 +707,8 @@ var OpenAIChatLanguageModel = class {
768
707
  if (logprobs === void 0) logprobs = [];
769
708
  logprobs.push(...mappedLogprobs);
770
709
  }
771
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
772
- {
773
- type: "function",
774
- id: generateId(),
775
- function: delta.function_call,
776
- index: 0
777
- }
778
- ] : delta.tool_calls;
779
- if (mappedToolCalls != null) {
780
- for (const toolCallDelta of mappedToolCalls) {
710
+ if (delta.tool_calls != null) {
711
+ for (const toolCallDelta of delta.tool_calls) {
781
712
  const index = toolCallDelta.index;
782
713
  if (toolCalls[index] == null) {
783
714
  if (toolCallDelta.type !== "function") {
@@ -859,15 +790,11 @@ var OpenAIChatLanguageModel = class {
859
790
  }
860
791
  },
861
792
  flush(controller) {
862
- var _a, _b;
863
793
  controller.enqueue({
864
794
  type: "finish",
865
795
  finishReason,
866
796
  logprobs,
867
- usage: {
868
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
869
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
870
- },
797
+ usage,
871
798
  ...providerMetadata != null ? { providerMetadata } : {}
872
799
  });
873
800
  }
@@ -879,104 +806,96 @@ var OpenAIChatLanguageModel = class {
879
806
  };
880
807
  }
881
808
  };
882
- var openaiTokenUsageSchema = z2.object({
883
- prompt_tokens: z2.number().nullish(),
884
- completion_tokens: z2.number().nullish(),
885
- prompt_tokens_details: z2.object({
886
- cached_tokens: z2.number().nullish()
809
+ var openaiTokenUsageSchema = z3.object({
810
+ prompt_tokens: z3.number().nullish(),
811
+ completion_tokens: z3.number().nullish(),
812
+ prompt_tokens_details: z3.object({
813
+ cached_tokens: z3.number().nullish()
887
814
  }).nullish(),
888
- completion_tokens_details: z2.object({
889
- reasoning_tokens: z2.number().nullish(),
890
- accepted_prediction_tokens: z2.number().nullish(),
891
- rejected_prediction_tokens: z2.number().nullish()
815
+ completion_tokens_details: z3.object({
816
+ reasoning_tokens: z3.number().nullish(),
817
+ accepted_prediction_tokens: z3.number().nullish(),
818
+ rejected_prediction_tokens: z3.number().nullish()
892
819
  }).nullish()
893
820
  }).nullish();
894
- var openaiChatResponseSchema = z2.object({
895
- id: z2.string().nullish(),
896
- created: z2.number().nullish(),
897
- model: z2.string().nullish(),
898
- choices: z2.array(
899
- z2.object({
900
- message: z2.object({
901
- role: z2.literal("assistant").nullish(),
902
- content: z2.string().nullish(),
903
- function_call: z2.object({
904
- arguments: z2.string(),
905
- name: z2.string()
906
- }).nullish(),
907
- tool_calls: z2.array(
908
- z2.object({
909
- id: z2.string().nullish(),
910
- type: z2.literal("function"),
911
- function: z2.object({
912
- name: z2.string(),
913
- arguments: z2.string()
821
+ var openaiChatResponseSchema = z3.object({
822
+ id: z3.string().nullish(),
823
+ created: z3.number().nullish(),
824
+ model: z3.string().nullish(),
825
+ choices: z3.array(
826
+ z3.object({
827
+ message: z3.object({
828
+ role: z3.literal("assistant").nullish(),
829
+ content: z3.string().nullish(),
830
+ tool_calls: z3.array(
831
+ z3.object({
832
+ id: z3.string().nullish(),
833
+ type: z3.literal("function"),
834
+ function: z3.object({
835
+ name: z3.string(),
836
+ arguments: z3.string()
914
837
  })
915
838
  })
916
839
  ).nullish()
917
840
  }),
918
- index: z2.number(),
919
- logprobs: z2.object({
920
- content: z2.array(
921
- z2.object({
922
- token: z2.string(),
923
- logprob: z2.number(),
924
- top_logprobs: z2.array(
925
- z2.object({
926
- token: z2.string(),
927
- logprob: z2.number()
841
+ index: z3.number(),
842
+ logprobs: z3.object({
843
+ content: z3.array(
844
+ z3.object({
845
+ token: z3.string(),
846
+ logprob: z3.number(),
847
+ top_logprobs: z3.array(
848
+ z3.object({
849
+ token: z3.string(),
850
+ logprob: z3.number()
928
851
  })
929
852
  )
930
853
  })
931
854
  ).nullable()
932
855
  }).nullish(),
933
- finish_reason: z2.string().nullish()
856
+ finish_reason: z3.string().nullish()
934
857
  })
935
858
  ),
936
859
  usage: openaiTokenUsageSchema
937
860
  });
938
- var openaiChatChunkSchema = z2.union([
939
- z2.object({
940
- id: z2.string().nullish(),
941
- created: z2.number().nullish(),
942
- model: z2.string().nullish(),
943
- choices: z2.array(
944
- z2.object({
945
- delta: z2.object({
946
- role: z2.enum(["assistant"]).nullish(),
947
- content: z2.string().nullish(),
948
- function_call: z2.object({
949
- name: z2.string().optional(),
950
- arguments: z2.string().optional()
951
- }).nullish(),
952
- tool_calls: z2.array(
953
- z2.object({
954
- index: z2.number(),
955
- id: z2.string().nullish(),
956
- type: z2.literal("function").optional(),
957
- function: z2.object({
958
- name: z2.string().nullish(),
959
- arguments: z2.string().nullish()
861
+ var openaiChatChunkSchema = z3.union([
862
+ z3.object({
863
+ id: z3.string().nullish(),
864
+ created: z3.number().nullish(),
865
+ model: z3.string().nullish(),
866
+ choices: z3.array(
867
+ z3.object({
868
+ delta: z3.object({
869
+ role: z3.enum(["assistant"]).nullish(),
870
+ content: z3.string().nullish(),
871
+ tool_calls: z3.array(
872
+ z3.object({
873
+ index: z3.number(),
874
+ id: z3.string().nullish(),
875
+ type: z3.literal("function").optional(),
876
+ function: z3.object({
877
+ name: z3.string().nullish(),
878
+ arguments: z3.string().nullish()
960
879
  })
961
880
  })
962
881
  ).nullish()
963
882
  }).nullish(),
964
- logprobs: z2.object({
965
- content: z2.array(
966
- z2.object({
967
- token: z2.string(),
968
- logprob: z2.number(),
969
- top_logprobs: z2.array(
970
- z2.object({
971
- token: z2.string(),
972
- logprob: z2.number()
883
+ logprobs: z3.object({
884
+ content: z3.array(
885
+ z3.object({
886
+ token: z3.string(),
887
+ logprob: z3.number(),
888
+ top_logprobs: z3.array(
889
+ z3.object({
890
+ token: z3.string(),
891
+ logprob: z3.number()
973
892
  })
974
893
  )
975
894
  })
976
895
  ).nullable()
977
896
  }).nullish(),
978
- finish_reason: z2.string().nullable().optional(),
979
- index: z2.number()
897
+ finish_reason: z3.string().nullable().optional(),
898
+ index: z3.number()
980
899
  })
981
900
  ),
982
901
  usage: openaiTokenUsageSchema
@@ -1024,12 +943,12 @@ import {
1024
943
  createJsonResponseHandler as createJsonResponseHandler2,
1025
944
  postJsonToApi as postJsonToApi2
1026
945
  } from "@ai-sdk/provider-utils";
1027
- import { z as z3 } from "zod";
946
+ import { z as z4 } from "zod";
1028
947
 
1029
948
  // src/convert-to-openai-completion-prompt.ts
1030
949
  import {
1031
950
  InvalidPromptError,
1032
- UnsupportedFunctionalityError as UnsupportedFunctionalityError4
951
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
1033
952
  } from "@ai-sdk/provider";
1034
953
  function convertToOpenAICompletionPrompt({
1035
954
  prompt,
@@ -1076,7 +995,7 @@ ${userMessage}
1076
995
  return part.text;
1077
996
  }
1078
997
  case "tool-call": {
1079
- throw new UnsupportedFunctionalityError4({
998
+ throw new UnsupportedFunctionalityError3({
1080
999
  functionality: "tool-call messages"
1081
1000
  });
1082
1001
  }
@@ -1089,7 +1008,7 @@ ${assistantMessage}
1089
1008
  break;
1090
1009
  }
1091
1010
  case "tool": {
1092
- throw new UnsupportedFunctionalityError4({
1011
+ throw new UnsupportedFunctionalityError3({
1093
1012
  functionality: "tool messages"
1094
1013
  });
1095
1014
  }
@@ -1137,7 +1056,7 @@ var OpenAICompletionLanguageModel = class {
1137
1056
  getArgs({
1138
1057
  inputFormat,
1139
1058
  prompt,
1140
- maxTokens,
1059
+ maxOutputTokens,
1141
1060
  temperature,
1142
1061
  topP,
1143
1062
  topK,
@@ -1179,7 +1098,7 @@ var OpenAICompletionLanguageModel = class {
1179
1098
  suffix: this.settings.suffix,
1180
1099
  user: this.settings.user,
1181
1100
  // standardized settings:
1182
- max_tokens: maxTokens,
1101
+ max_tokens: maxOutputTokens,
1183
1102
  temperature,
1184
1103
  top_p: topP,
1185
1104
  frequency_penalty: frequencyPenalty,
@@ -1215,10 +1134,10 @@ var OpenAICompletionLanguageModel = class {
1215
1134
  });
1216
1135
  const choice = response.choices[0];
1217
1136
  return {
1218
- text: choice.text,
1137
+ text: { type: "text", text: choice.text },
1219
1138
  usage: {
1220
- promptTokens: response.usage.prompt_tokens,
1221
- completionTokens: response.usage.completion_tokens
1139
+ inputTokens: response.usage.prompt_tokens,
1140
+ outputTokens: response.usage.completion_tokens
1222
1141
  },
1223
1142
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1224
1143
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
@@ -1254,9 +1173,9 @@ var OpenAICompletionLanguageModel = class {
1254
1173
  fetch: this.config.fetch
1255
1174
  });
1256
1175
  let finishReason = "unknown";
1257
- let usage = {
1258
- promptTokens: Number.NaN,
1259
- completionTokens: Number.NaN
1176
+ const usage = {
1177
+ inputTokens: void 0,
1178
+ outputTokens: void 0
1260
1179
  };
1261
1180
  let logprobs;
1262
1181
  let isFirstChunk = true;
@@ -1283,10 +1202,8 @@ var OpenAICompletionLanguageModel = class {
1283
1202
  });
1284
1203
  }
1285
1204
  if (value.usage != null) {
1286
- usage = {
1287
- promptTokens: value.usage.prompt_tokens,
1288
- completionTokens: value.usage.completion_tokens
1289
- };
1205
+ usage.inputTokens = value.usage.prompt_tokens;
1206
+ usage.outputTokens = value.usage.completion_tokens;
1290
1207
  }
1291
1208
  const choice = value.choices[0];
1292
1209
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1294,8 +1211,8 @@ var OpenAICompletionLanguageModel = class {
1294
1211
  }
1295
1212
  if ((choice == null ? void 0 : choice.text) != null) {
1296
1213
  controller.enqueue({
1297
- type: "text-delta",
1298
- textDelta: choice.text
1214
+ type: "text",
1215
+ text: choice.text
1299
1216
  });
1300
1217
  }
1301
1218
  const mappedLogprobs = mapOpenAICompletionLogProbs(
@@ -1322,46 +1239,46 @@ var OpenAICompletionLanguageModel = class {
1322
1239
  };
1323
1240
  }
1324
1241
  };
1325
- var openaiCompletionResponseSchema = z3.object({
1326
- id: z3.string().nullish(),
1327
- created: z3.number().nullish(),
1328
- model: z3.string().nullish(),
1329
- choices: z3.array(
1330
- z3.object({
1331
- text: z3.string(),
1332
- finish_reason: z3.string(),
1333
- logprobs: z3.object({
1334
- tokens: z3.array(z3.string()),
1335
- token_logprobs: z3.array(z3.number()),
1336
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1242
+ var openaiCompletionResponseSchema = z4.object({
1243
+ id: z4.string().nullish(),
1244
+ created: z4.number().nullish(),
1245
+ model: z4.string().nullish(),
1246
+ choices: z4.array(
1247
+ z4.object({
1248
+ text: z4.string(),
1249
+ finish_reason: z4.string(),
1250
+ logprobs: z4.object({
1251
+ tokens: z4.array(z4.string()),
1252
+ token_logprobs: z4.array(z4.number()),
1253
+ top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1337
1254
  }).nullish()
1338
1255
  })
1339
1256
  ),
1340
- usage: z3.object({
1341
- prompt_tokens: z3.number(),
1342
- completion_tokens: z3.number()
1257
+ usage: z4.object({
1258
+ prompt_tokens: z4.number(),
1259
+ completion_tokens: z4.number()
1343
1260
  })
1344
1261
  });
1345
- var openaiCompletionChunkSchema = z3.union([
1346
- z3.object({
1347
- id: z3.string().nullish(),
1348
- created: z3.number().nullish(),
1349
- model: z3.string().nullish(),
1350
- choices: z3.array(
1351
- z3.object({
1352
- text: z3.string(),
1353
- finish_reason: z3.string().nullish(),
1354
- index: z3.number(),
1355
- logprobs: z3.object({
1356
- tokens: z3.array(z3.string()),
1357
- token_logprobs: z3.array(z3.number()),
1358
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1262
+ var openaiCompletionChunkSchema = z4.union([
1263
+ z4.object({
1264
+ id: z4.string().nullish(),
1265
+ created: z4.number().nullish(),
1266
+ model: z4.string().nullish(),
1267
+ choices: z4.array(
1268
+ z4.object({
1269
+ text: z4.string(),
1270
+ finish_reason: z4.string().nullish(),
1271
+ index: z4.number(),
1272
+ logprobs: z4.object({
1273
+ tokens: z4.array(z4.string()),
1274
+ token_logprobs: z4.array(z4.number()),
1275
+ top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1359
1276
  }).nullish()
1360
1277
  })
1361
1278
  ),
1362
- usage: z3.object({
1363
- prompt_tokens: z3.number(),
1364
- completion_tokens: z3.number()
1279
+ usage: z4.object({
1280
+ prompt_tokens: z4.number(),
1281
+ completion_tokens: z4.number()
1365
1282
  }).nullish()
1366
1283
  }),
1367
1284
  openaiErrorDataSchema
@@ -1376,10 +1293,10 @@ import {
1376
1293
  createJsonResponseHandler as createJsonResponseHandler3,
1377
1294
  postJsonToApi as postJsonToApi3
1378
1295
  } from "@ai-sdk/provider-utils";
1379
- import { z as z4 } from "zod";
1296
+ import { z as z5 } from "zod";
1380
1297
  var OpenAIEmbeddingModel = class {
1381
1298
  constructor(modelId, settings, config) {
1382
- this.specificationVersion = "v1";
1299
+ this.specificationVersion = "v2";
1383
1300
  this.modelId = modelId;
1384
1301
  this.settings = settings;
1385
1302
  this.config = config;
@@ -1408,7 +1325,11 @@ var OpenAIEmbeddingModel = class {
1408
1325
  values
1409
1326
  });
1410
1327
  }
1411
- const { responseHeaders, value: response } = await postJsonToApi3({
1328
+ const {
1329
+ responseHeaders,
1330
+ value: response,
1331
+ rawValue
1332
+ } = await postJsonToApi3({
1412
1333
  url: this.config.url({
1413
1334
  path: "/embeddings",
1414
1335
  modelId: this.modelId
@@ -1431,13 +1352,13 @@ var OpenAIEmbeddingModel = class {
1431
1352
  return {
1432
1353
  embeddings: response.data.map((item) => item.embedding),
1433
1354
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1434
- rawResponse: { headers: responseHeaders }
1355
+ response: { headers: responseHeaders, body: rawValue }
1435
1356
  };
1436
1357
  }
1437
1358
  };
1438
- var openaiTextEmbeddingResponseSchema = z4.object({
1439
- data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1440
- usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1359
+ var openaiTextEmbeddingResponseSchema = z5.object({
1360
+ data: z5.array(z5.object({ embedding: z5.array(z5.number()) })),
1361
+ usage: z5.object({ prompt_tokens: z5.number() }).nullish()
1441
1362
  });
1442
1363
 
1443
1364
  // src/openai-image-model.ts
@@ -1446,7 +1367,7 @@ import {
1446
1367
  createJsonResponseHandler as createJsonResponseHandler4,
1447
1368
  postJsonToApi as postJsonToApi4
1448
1369
  } from "@ai-sdk/provider-utils";
1449
- import { z as z5 } from "zod";
1370
+ import { z as z6 } from "zod";
1450
1371
 
1451
1372
  // src/openai-image-settings.ts
1452
1373
  var modelMaxImagesPerCall = {
@@ -1524,8 +1445,8 @@ var OpenAIImageModel = class {
1524
1445
  };
1525
1446
  }
1526
1447
  };
1527
- var openaiImageResponseSchema = z5.object({
1528
- data: z5.array(z5.object({ b64_json: z5.string() }))
1448
+ var openaiImageResponseSchema = z6.object({
1449
+ data: z6.array(z6.object({ b64_json: z6.string() }))
1529
1450
  });
1530
1451
 
1531
1452
  // src/openai-transcription-model.ts
@@ -1533,22 +1454,16 @@ import {
1533
1454
  combineHeaders as combineHeaders5,
1534
1455
  convertBase64ToUint8Array,
1535
1456
  createJsonResponseHandler as createJsonResponseHandler5,
1536
- parseProviderOptions,
1457
+ parseProviderOptions as parseProviderOptions2,
1537
1458
  postFormDataToApi
1538
1459
  } from "@ai-sdk/provider-utils";
1539
- import { z as z6 } from "zod";
1540
- var OpenAIProviderOptionsSchema = z6.object({
1541
- include: z6.array(z6.string()).optional().describe(
1542
- "Additional information to include in the transcription response."
1543
- ),
1544
- language: z6.string().optional().describe("The language of the input audio in ISO-639-1 format."),
1545
- prompt: z6.string().optional().describe(
1546
- "An optional text to guide the model's style or continue a previous audio segment."
1547
- ),
1548
- temperature: z6.number().min(0).max(1).optional().default(0).describe("The sampling temperature, between 0 and 1."),
1549
- timestampGranularities: z6.array(z6.enum(["word", "segment"])).optional().default(["segment"]).describe(
1550
- "The timestamp granularities to populate for this transcription."
1551
- )
1460
+ import { z as z7 } from "zod";
1461
+ var openAIProviderOptionsSchema = z7.object({
1462
+ include: z7.array(z7.string()).nullish(),
1463
+ language: z7.string().nullish(),
1464
+ prompt: z7.string().nullish(),
1465
+ temperature: z7.number().min(0).max(1).nullish().default(0),
1466
+ timestampGranularities: z7.array(z7.enum(["word", "segment"])).nullish().default(["segment"])
1552
1467
  });
1553
1468
  var languageMap = {
1554
1469
  afrikaans: "af",
@@ -1623,11 +1538,12 @@ var OpenAITranscriptionModel = class {
1623
1538
  mediaType,
1624
1539
  providerOptions
1625
1540
  }) {
1541
+ var _a, _b, _c, _d, _e;
1626
1542
  const warnings = [];
1627
- const openAIOptions = parseProviderOptions({
1543
+ const openAIOptions = parseProviderOptions2({
1628
1544
  provider: "openai",
1629
1545
  providerOptions,
1630
- schema: OpenAIProviderOptionsSchema
1546
+ schema: openAIProviderOptionsSchema
1631
1547
  });
1632
1548
  const formData = new FormData();
1633
1549
  const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
@@ -1635,16 +1551,16 @@ var OpenAITranscriptionModel = class {
1635
1551
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1636
1552
  if (openAIOptions) {
1637
1553
  const transcriptionModelOptions = {
1638
- include: openAIOptions.include,
1639
- language: openAIOptions.language,
1640
- prompt: openAIOptions.prompt,
1641
- temperature: openAIOptions.temperature,
1642
- timestamp_granularities: openAIOptions.timestampGranularities
1554
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1555
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1556
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1557
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1558
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1643
1559
  };
1644
1560
  for (const key in transcriptionModelOptions) {
1645
1561
  const value = transcriptionModelOptions[key];
1646
1562
  if (value !== void 0) {
1647
- formData.append(key, value);
1563
+ formData.append(key, String(value));
1648
1564
  }
1649
1565
  }
1650
1566
  }
@@ -1695,15 +1611,15 @@ var OpenAITranscriptionModel = class {
1695
1611
  };
1696
1612
  }
1697
1613
  };
1698
- var openaiTranscriptionResponseSchema = z6.object({
1699
- text: z6.string(),
1700
- language: z6.string().nullish(),
1701
- duration: z6.number().nullish(),
1702
- words: z6.array(
1703
- z6.object({
1704
- word: z6.string(),
1705
- start: z6.number(),
1706
- end: z6.number()
1614
+ var openaiTranscriptionResponseSchema = z7.object({
1615
+ text: z7.string(),
1616
+ language: z7.string().nullish(),
1617
+ duration: z7.number().nullish(),
1618
+ words: z7.array(
1619
+ z7.object({
1620
+ word: z7.string(),
1621
+ start: z7.number(),
1622
+ end: z7.number()
1707
1623
  })
1708
1624
  ).nullish()
1709
1625
  });
@@ -1714,14 +1630,14 @@ import {
1714
1630
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1715
1631
  createJsonResponseHandler as createJsonResponseHandler6,
1716
1632
  generateId as generateId2,
1717
- parseProviderOptions as parseProviderOptions2,
1633
+ parseProviderOptions as parseProviderOptions3,
1718
1634
  postJsonToApi as postJsonToApi5
1719
1635
  } from "@ai-sdk/provider-utils";
1720
- import { z as z7 } from "zod";
1636
+ import { z as z8 } from "zod";
1721
1637
 
1722
1638
  // src/responses/convert-to-openai-responses-messages.ts
1723
1639
  import {
1724
- UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1640
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1725
1641
  } from "@ai-sdk/provider";
1726
1642
  function convertToOpenAIResponsesMessages({
1727
1643
  prompt,
@@ -1777,7 +1693,7 @@ function convertToOpenAIResponsesMessages({
1777
1693
  };
1778
1694
  } else if (part.mediaType === "application/pdf") {
1779
1695
  if (part.data instanceof URL) {
1780
- throw new UnsupportedFunctionalityError5({
1696
+ throw new UnsupportedFunctionalityError4({
1781
1697
  functionality: "PDF file parts with URLs"
1782
1698
  });
1783
1699
  }
@@ -1787,7 +1703,7 @@ function convertToOpenAIResponsesMessages({
1787
1703
  file_data: `data:application/pdf;base64,${part.data}`
1788
1704
  };
1789
1705
  } else {
1790
- throw new UnsupportedFunctionalityError5({
1706
+ throw new UnsupportedFunctionalityError4({
1791
1707
  functionality: `file part media type ${part.mediaType}`
1792
1708
  });
1793
1709
  }
@@ -1859,7 +1775,7 @@ function mapOpenAIResponseFinishReason({
1859
1775
 
1860
1776
  // src/responses/openai-responses-prepare-tools.ts
1861
1777
  import {
1862
- UnsupportedFunctionalityError as UnsupportedFunctionalityError6
1778
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1863
1779
  } from "@ai-sdk/provider";
1864
1780
  function prepareResponsesTools({
1865
1781
  tools,
@@ -1919,7 +1835,7 @@ function prepareResponsesTools({
1919
1835
  };
1920
1836
  default: {
1921
1837
  const _exhaustiveCheck = type;
1922
- throw new UnsupportedFunctionalityError6({
1838
+ throw new UnsupportedFunctionalityError5({
1923
1839
  functionality: `tool choice type: ${_exhaustiveCheck}`
1924
1840
  });
1925
1841
  }
@@ -1938,7 +1854,7 @@ var OpenAIResponsesLanguageModel = class {
1938
1854
  return this.config.provider;
1939
1855
  }
1940
1856
  getArgs({
1941
- maxTokens,
1857
+ maxOutputTokens,
1942
1858
  temperature,
1943
1859
  stopSequences,
1944
1860
  topP,
@@ -1981,7 +1897,7 @@ var OpenAIResponsesLanguageModel = class {
1981
1897
  systemMessageMode: modelConfig.systemMessageMode
1982
1898
  });
1983
1899
  warnings.push(...messageWarnings);
1984
- const openaiOptions = parseProviderOptions2({
1900
+ const openaiOptions = parseProviderOptions3({
1985
1901
  provider: "openai",
1986
1902
  providerOptions,
1987
1903
  schema: openaiResponsesProviderOptionsSchema
@@ -1992,7 +1908,7 @@ var OpenAIResponsesLanguageModel = class {
1992
1908
  input: messages,
1993
1909
  temperature,
1994
1910
  top_p: topP,
1995
- max_output_tokens: maxTokens,
1911
+ max_output_tokens: maxOutputTokens,
1996
1912
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1997
1913
  text: {
1998
1914
  format: responseFormat.schema != null ? {
@@ -2071,49 +1987,49 @@ var OpenAIResponsesLanguageModel = class {
2071
1987
  body,
2072
1988
  failedResponseHandler: openaiFailedResponseHandler,
2073
1989
  successfulResponseHandler: createJsonResponseHandler6(
2074
- z7.object({
2075
- id: z7.string(),
2076
- created_at: z7.number(),
2077
- model: z7.string(),
2078
- output: z7.array(
2079
- z7.discriminatedUnion("type", [
2080
- z7.object({
2081
- type: z7.literal("message"),
2082
- role: z7.literal("assistant"),
2083
- content: z7.array(
2084
- z7.object({
2085
- type: z7.literal("output_text"),
2086
- text: z7.string(),
2087
- annotations: z7.array(
2088
- z7.object({
2089
- type: z7.literal("url_citation"),
2090
- start_index: z7.number(),
2091
- end_index: z7.number(),
2092
- url: z7.string(),
2093
- title: z7.string()
1990
+ z8.object({
1991
+ id: z8.string(),
1992
+ created_at: z8.number(),
1993
+ model: z8.string(),
1994
+ output: z8.array(
1995
+ z8.discriminatedUnion("type", [
1996
+ z8.object({
1997
+ type: z8.literal("message"),
1998
+ role: z8.literal("assistant"),
1999
+ content: z8.array(
2000
+ z8.object({
2001
+ type: z8.literal("output_text"),
2002
+ text: z8.string(),
2003
+ annotations: z8.array(
2004
+ z8.object({
2005
+ type: z8.literal("url_citation"),
2006
+ start_index: z8.number(),
2007
+ end_index: z8.number(),
2008
+ url: z8.string(),
2009
+ title: z8.string()
2094
2010
  })
2095
2011
  )
2096
2012
  })
2097
2013
  )
2098
2014
  }),
2099
- z7.object({
2100
- type: z7.literal("function_call"),
2101
- call_id: z7.string(),
2102
- name: z7.string(),
2103
- arguments: z7.string()
2015
+ z8.object({
2016
+ type: z8.literal("function_call"),
2017
+ call_id: z8.string(),
2018
+ name: z8.string(),
2019
+ arguments: z8.string()
2104
2020
  }),
2105
- z7.object({
2106
- type: z7.literal("web_search_call")
2021
+ z8.object({
2022
+ type: z8.literal("web_search_call")
2107
2023
  }),
2108
- z7.object({
2109
- type: z7.literal("computer_call")
2024
+ z8.object({
2025
+ type: z8.literal("computer_call")
2110
2026
  }),
2111
- z7.object({
2112
- type: z7.literal("reasoning")
2027
+ z8.object({
2028
+ type: z8.literal("reasoning")
2113
2029
  })
2114
2030
  ])
2115
2031
  ),
2116
- incomplete_details: z7.object({ reason: z7.string() }).nullable(),
2032
+ incomplete_details: z8.object({ reason: z8.string() }).nullable(),
2117
2033
  usage: usageSchema
2118
2034
  })
2119
2035
  ),
@@ -2122,17 +2038,22 @@ var OpenAIResponsesLanguageModel = class {
2122
2038
  });
2123
2039
  const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2124
2040
  const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2041
+ type: "tool-call",
2125
2042
  toolCallType: "function",
2126
2043
  toolCallId: output.call_id,
2127
2044
  toolName: output.name,
2128
2045
  args: output.arguments
2129
2046
  }));
2130
2047
  return {
2131
- text: outputTextElements.map((content) => content.text).join("\n"),
2048
+ text: {
2049
+ type: "text",
2050
+ text: outputTextElements.map((content) => content.text).join("\n")
2051
+ },
2132
2052
  sources: outputTextElements.flatMap(
2133
2053
  (content) => content.annotations.map((annotation) => {
2134
2054
  var _a2, _b2, _c2;
2135
2055
  return {
2056
+ type: "source",
2136
2057
  sourceType: "url",
2137
2058
  id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : generateId2(),
2138
2059
  url: annotation.url,
@@ -2146,8 +2067,8 @@ var OpenAIResponsesLanguageModel = class {
2146
2067
  }),
2147
2068
  toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2148
2069
  usage: {
2149
- promptTokens: response.usage.input_tokens,
2150
- completionTokens: response.usage.output_tokens
2070
+ inputTokens: response.usage.input_tokens,
2071
+ outputTokens: response.usage.output_tokens
2151
2072
  },
2152
2073
  request: { body },
2153
2074
  response: {
@@ -2188,8 +2109,10 @@ var OpenAIResponsesLanguageModel = class {
2188
2109
  });
2189
2110
  const self = this;
2190
2111
  let finishReason = "unknown";
2191
- let promptTokens = NaN;
2192
- let completionTokens = NaN;
2112
+ const usage = {
2113
+ inputTokens: void 0,
2114
+ outputTokens: void 0
2115
+ };
2193
2116
  let cachedPromptTokens = null;
2194
2117
  let reasoningTokens = null;
2195
2118
  let responseId = null;
@@ -2241,8 +2164,8 @@ var OpenAIResponsesLanguageModel = class {
2241
2164
  });
2242
2165
  } else if (isTextDeltaChunk(value)) {
2243
2166
  controller.enqueue({
2244
- type: "text-delta",
2245
- textDelta: value.delta
2167
+ type: "text",
2168
+ text: value.delta
2246
2169
  });
2247
2170
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2248
2171
  ongoingToolCalls[value.output_index] = void 0;
@@ -2259,19 +2182,17 @@ var OpenAIResponsesLanguageModel = class {
2259
2182
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2260
2183
  hasToolCalls
2261
2184
  });
2262
- promptTokens = value.response.usage.input_tokens;
2263
- completionTokens = value.response.usage.output_tokens;
2185
+ usage.inputTokens = value.response.usage.input_tokens;
2186
+ usage.outputTokens = value.response.usage.output_tokens;
2264
2187
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2265
2188
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2266
2189
  } else if (isResponseAnnotationAddedChunk(value)) {
2267
2190
  controller.enqueue({
2268
2191
  type: "source",
2269
- source: {
2270
- sourceType: "url",
2271
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2272
- url: value.annotation.url,
2273
- title: value.annotation.title
2274
- }
2192
+ sourceType: "url",
2193
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2194
+ url: value.annotation.url,
2195
+ title: value.annotation.title
2275
2196
  });
2276
2197
  }
2277
2198
  },
@@ -2279,7 +2200,7 @@ var OpenAIResponsesLanguageModel = class {
2279
2200
  controller.enqueue({
2280
2201
  type: "finish",
2281
2202
  finishReason,
2282
- usage: { promptTokens, completionTokens },
2203
+ usage,
2283
2204
  ...(cachedPromptTokens != null || reasoningTokens != null) && {
2284
2205
  providerMetadata: {
2285
2206
  openai: {
@@ -2299,79 +2220,79 @@ var OpenAIResponsesLanguageModel = class {
2299
2220
  };
2300
2221
  }
2301
2222
  };
2302
- var usageSchema = z7.object({
2303
- input_tokens: z7.number(),
2304
- input_tokens_details: z7.object({ cached_tokens: z7.number().nullish() }).nullish(),
2305
- output_tokens: z7.number(),
2306
- output_tokens_details: z7.object({ reasoning_tokens: z7.number().nullish() }).nullish()
2223
+ var usageSchema = z8.object({
2224
+ input_tokens: z8.number(),
2225
+ input_tokens_details: z8.object({ cached_tokens: z8.number().nullish() }).nullish(),
2226
+ output_tokens: z8.number(),
2227
+ output_tokens_details: z8.object({ reasoning_tokens: z8.number().nullish() }).nullish()
2307
2228
  });
2308
- var textDeltaChunkSchema = z7.object({
2309
- type: z7.literal("response.output_text.delta"),
2310
- delta: z7.string()
2229
+ var textDeltaChunkSchema = z8.object({
2230
+ type: z8.literal("response.output_text.delta"),
2231
+ delta: z8.string()
2311
2232
  });
2312
- var responseFinishedChunkSchema = z7.object({
2313
- type: z7.enum(["response.completed", "response.incomplete"]),
2314
- response: z7.object({
2315
- incomplete_details: z7.object({ reason: z7.string() }).nullish(),
2233
+ var responseFinishedChunkSchema = z8.object({
2234
+ type: z8.enum(["response.completed", "response.incomplete"]),
2235
+ response: z8.object({
2236
+ incomplete_details: z8.object({ reason: z8.string() }).nullish(),
2316
2237
  usage: usageSchema
2317
2238
  })
2318
2239
  });
2319
- var responseCreatedChunkSchema = z7.object({
2320
- type: z7.literal("response.created"),
2321
- response: z7.object({
2322
- id: z7.string(),
2323
- created_at: z7.number(),
2324
- model: z7.string()
2240
+ var responseCreatedChunkSchema = z8.object({
2241
+ type: z8.literal("response.created"),
2242
+ response: z8.object({
2243
+ id: z8.string(),
2244
+ created_at: z8.number(),
2245
+ model: z8.string()
2325
2246
  })
2326
2247
  });
2327
- var responseOutputItemDoneSchema = z7.object({
2328
- type: z7.literal("response.output_item.done"),
2329
- output_index: z7.number(),
2330
- item: z7.discriminatedUnion("type", [
2331
- z7.object({
2332
- type: z7.literal("message")
2248
+ var responseOutputItemDoneSchema = z8.object({
2249
+ type: z8.literal("response.output_item.done"),
2250
+ output_index: z8.number(),
2251
+ item: z8.discriminatedUnion("type", [
2252
+ z8.object({
2253
+ type: z8.literal("message")
2333
2254
  }),
2334
- z7.object({
2335
- type: z7.literal("function_call"),
2336
- id: z7.string(),
2337
- call_id: z7.string(),
2338
- name: z7.string(),
2339
- arguments: z7.string(),
2340
- status: z7.literal("completed")
2255
+ z8.object({
2256
+ type: z8.literal("function_call"),
2257
+ id: z8.string(),
2258
+ call_id: z8.string(),
2259
+ name: z8.string(),
2260
+ arguments: z8.string(),
2261
+ status: z8.literal("completed")
2341
2262
  })
2342
2263
  ])
2343
2264
  });
2344
- var responseFunctionCallArgumentsDeltaSchema = z7.object({
2345
- type: z7.literal("response.function_call_arguments.delta"),
2346
- item_id: z7.string(),
2347
- output_index: z7.number(),
2348
- delta: z7.string()
2265
+ var responseFunctionCallArgumentsDeltaSchema = z8.object({
2266
+ type: z8.literal("response.function_call_arguments.delta"),
2267
+ item_id: z8.string(),
2268
+ output_index: z8.number(),
2269
+ delta: z8.string()
2349
2270
  });
2350
- var responseOutputItemAddedSchema = z7.object({
2351
- type: z7.literal("response.output_item.added"),
2352
- output_index: z7.number(),
2353
- item: z7.discriminatedUnion("type", [
2354
- z7.object({
2355
- type: z7.literal("message")
2271
+ var responseOutputItemAddedSchema = z8.object({
2272
+ type: z8.literal("response.output_item.added"),
2273
+ output_index: z8.number(),
2274
+ item: z8.discriminatedUnion("type", [
2275
+ z8.object({
2276
+ type: z8.literal("message")
2356
2277
  }),
2357
- z7.object({
2358
- type: z7.literal("function_call"),
2359
- id: z7.string(),
2360
- call_id: z7.string(),
2361
- name: z7.string(),
2362
- arguments: z7.string()
2278
+ z8.object({
2279
+ type: z8.literal("function_call"),
2280
+ id: z8.string(),
2281
+ call_id: z8.string(),
2282
+ name: z8.string(),
2283
+ arguments: z8.string()
2363
2284
  })
2364
2285
  ])
2365
2286
  });
2366
- var responseAnnotationAddedSchema = z7.object({
2367
- type: z7.literal("response.output_text.annotation.added"),
2368
- annotation: z7.object({
2369
- type: z7.literal("url_citation"),
2370
- url: z7.string(),
2371
- title: z7.string()
2287
+ var responseAnnotationAddedSchema = z8.object({
2288
+ type: z8.literal("response.output_text.annotation.added"),
2289
+ annotation: z8.object({
2290
+ type: z8.literal("url_citation"),
2291
+ url: z8.string(),
2292
+ title: z8.string()
2372
2293
  })
2373
2294
  });
2374
- var openaiResponsesChunkSchema = z7.union([
2295
+ var openaiResponsesChunkSchema = z8.union([
2375
2296
  textDeltaChunkSchema,
2376
2297
  responseFinishedChunkSchema,
2377
2298
  responseCreatedChunkSchema,
@@ -2379,7 +2300,7 @@ var openaiResponsesChunkSchema = z7.union([
2379
2300
  responseFunctionCallArgumentsDeltaSchema,
2380
2301
  responseOutputItemAddedSchema,
2381
2302
  responseAnnotationAddedSchema,
2382
- z7.object({ type: z7.string() }).passthrough()
2303
+ z8.object({ type: z8.string() }).passthrough()
2383
2304
  // fallback for unknown chunks
2384
2305
  ]);
2385
2306
  function isTextDeltaChunk(chunk) {
@@ -2424,15 +2345,15 @@ function getResponsesModelConfig(modelId) {
2424
2345
  requiredAutoTruncation: false
2425
2346
  };
2426
2347
  }
2427
- var openaiResponsesProviderOptionsSchema = z7.object({
2428
- metadata: z7.any().nullish(),
2429
- parallelToolCalls: z7.boolean().nullish(),
2430
- previousResponseId: z7.string().nullish(),
2431
- store: z7.boolean().nullish(),
2432
- user: z7.string().nullish(),
2433
- reasoningEffort: z7.string().nullish(),
2434
- strictSchemas: z7.boolean().nullish(),
2435
- instructions: z7.string().nullish()
2348
+ var openaiResponsesProviderOptionsSchema = z8.object({
2349
+ metadata: z8.any().nullish(),
2350
+ parallelToolCalls: z8.boolean().nullish(),
2351
+ previousResponseId: z8.string().nullish(),
2352
+ store: z8.boolean().nullish(),
2353
+ user: z8.string().nullish(),
2354
+ reasoningEffort: z8.string().nullish(),
2355
+ strictSchemas: z8.boolean().nullish(),
2356
+ instructions: z8.string().nullish()
2436
2357
  });
2437
2358
  export {
2438
2359
  OpenAIChatLanguageModel,
@@ -2441,6 +2362,7 @@ export {
2441
2362
  OpenAIImageModel,
2442
2363
  OpenAIResponsesLanguageModel,
2443
2364
  OpenAITranscriptionModel,
2444
- modelMaxImagesPerCall
2365
+ modelMaxImagesPerCall,
2366
+ openaiProviderOptions
2445
2367
  };
2446
2368
  //# sourceMappingURL=index.mjs.map