@ai-sdk/openai 2.0.0-alpha.8 → 2.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -17,7 +17,7 @@ import {
17
17
  parseProviderOptions,
18
18
  postJsonToApi
19
19
  } from "@ai-sdk/provider-utils";
20
- import { z as z3 } from "zod";
20
+ import { z as z5 } from "zod";
21
21
 
22
22
  // src/convert-to-openai-chat-messages.ts
23
23
  import {
@@ -153,7 +153,7 @@ function convertToOpenAIChatMessages({
153
153
  type: "function",
154
154
  function: {
155
155
  name: part.toolName,
156
- arguments: JSON.stringify(part.args)
156
+ arguments: JSON.stringify(part.input)
157
157
  }
158
158
  });
159
159
  break;
@@ -169,10 +169,23 @@ function convertToOpenAIChatMessages({
169
169
  }
170
170
  case "tool": {
171
171
  for (const toolResponse of content) {
172
+ const output = toolResponse.output;
173
+ let contentValue;
174
+ switch (output.type) {
175
+ case "text":
176
+ case "error-text":
177
+ contentValue = output.value;
178
+ break;
179
+ case "content":
180
+ case "json":
181
+ case "error-json":
182
+ contentValue = JSON.stringify(output.value);
183
+ break;
184
+ }
172
185
  messages.push({
173
186
  role: "tool",
174
187
  tool_call_id: toolResponse.toolCallId,
175
- content: JSON.stringify(toolResponse.result)
188
+ content: contentValue
176
189
  });
177
190
  }
178
191
  break;
@@ -270,7 +283,14 @@ var openaiProviderOptions = z.object({
270
283
  *
271
284
  * @default true
272
285
  */
273
- structuredOutputs: z.boolean().optional()
286
+ structuredOutputs: z.boolean().optional(),
287
+ /**
288
+ * Service tier for the request. Set to 'flex' for 50% cheaper processing
289
+ * at the cost of increased latency. Only available for o3 and o4-mini models.
290
+ *
291
+ * @default 'auto'
292
+ */
293
+ serviceTier: z.enum(["auto", "flex"]).optional()
274
294
  });
275
295
 
276
296
  // src/openai-error.ts
@@ -296,6 +316,76 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
296
316
  import {
297
317
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
298
318
  } from "@ai-sdk/provider";
319
+
320
+ // src/tool/file-search.ts
321
+ import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils";
322
+ import { z as z3 } from "zod";
323
+ var fileSearchArgsSchema = z3.object({
324
+ /**
325
+ * List of vector store IDs to search through. If not provided, searches all available vector stores.
326
+ */
327
+ vectorStoreIds: z3.array(z3.string()).optional(),
328
+ /**
329
+ * Maximum number of search results to return. Defaults to 10.
330
+ */
331
+ maxResults: z3.number().optional(),
332
+ /**
333
+ * Type of search to perform. Defaults to 'auto'.
334
+ */
335
+ searchType: z3.enum(["auto", "keyword", "semantic"]).optional()
336
+ });
337
+ var fileSearch = createProviderDefinedToolFactory({
338
+ id: "openai.file_search",
339
+ name: "file_search",
340
+ inputSchema: z3.object({
341
+ query: z3.string()
342
+ })
343
+ });
344
+
345
+ // src/tool/web-search-preview.ts
346
+ import { createProviderDefinedToolFactory as createProviderDefinedToolFactory2 } from "@ai-sdk/provider-utils";
347
+ import { z as z4 } from "zod";
348
+ var webSearchPreviewArgsSchema = z4.object({
349
+ /**
350
+ * Search context size to use for the web search.
351
+ * - high: Most comprehensive context, highest cost, slower response
352
+ * - medium: Balanced context, cost, and latency (default)
353
+ * - low: Least context, lowest cost, fastest response
354
+ */
355
+ searchContextSize: z4.enum(["low", "medium", "high"]).optional(),
356
+ /**
357
+ * User location information to provide geographically relevant search results.
358
+ */
359
+ userLocation: z4.object({
360
+ /**
361
+ * Type of location (always 'approximate')
362
+ */
363
+ type: z4.literal("approximate"),
364
+ /**
365
+ * Two-letter ISO country code (e.g., 'US', 'GB')
366
+ */
367
+ country: z4.string().optional(),
368
+ /**
369
+ * City name (free text, e.g., 'Minneapolis')
370
+ */
371
+ city: z4.string().optional(),
372
+ /**
373
+ * Region name (free text, e.g., 'Minnesota')
374
+ */
375
+ region: z4.string().optional(),
376
+ /**
377
+ * IANA timezone (e.g., 'America/Chicago')
378
+ */
379
+ timezone: z4.string().optional()
380
+ }).optional()
381
+ });
382
+ var webSearchPreview = createProviderDefinedToolFactory2({
383
+ id: "openai.web_search_preview",
384
+ name: "web_search_preview",
385
+ inputSchema: z4.object({})
386
+ });
387
+
388
+ // src/openai-prepare-tools.ts
299
389
  function prepareTools({
300
390
  tools,
301
391
  toolChoice,
@@ -308,18 +398,47 @@ function prepareTools({
308
398
  }
309
399
  const openaiTools2 = [];
310
400
  for (const tool of tools) {
311
- if (tool.type === "provider-defined") {
312
- toolWarnings.push({ type: "unsupported-tool", tool });
313
- } else {
314
- openaiTools2.push({
315
- type: "function",
316
- function: {
317
- name: tool.name,
318
- description: tool.description,
319
- parameters: tool.parameters,
320
- strict: structuredOutputs ? true : void 0
401
+ switch (tool.type) {
402
+ case "function":
403
+ openaiTools2.push({
404
+ type: "function",
405
+ function: {
406
+ name: tool.name,
407
+ description: tool.description,
408
+ parameters: tool.inputSchema,
409
+ strict: structuredOutputs ? true : void 0
410
+ }
411
+ });
412
+ break;
413
+ case "provider-defined":
414
+ switch (tool.id) {
415
+ case "openai.file_search": {
416
+ const args = fileSearchArgsSchema.parse(tool.args);
417
+ openaiTools2.push({
418
+ type: "file_search",
419
+ vector_store_ids: args.vectorStoreIds,
420
+ max_results: args.maxResults,
421
+ search_type: args.searchType
422
+ });
423
+ break;
424
+ }
425
+ case "openai.web_search_preview": {
426
+ const args = webSearchPreviewArgsSchema.parse(tool.args);
427
+ openaiTools2.push({
428
+ type: "web_search_preview",
429
+ search_context_size: args.searchContextSize,
430
+ user_location: args.userLocation
431
+ });
432
+ break;
433
+ }
434
+ default:
435
+ toolWarnings.push({ type: "unsupported-tool", tool });
436
+ break;
321
437
  }
322
- });
438
+ break;
439
+ default:
440
+ toolWarnings.push({ type: "unsupported-tool", tool });
441
+ break;
323
442
  }
324
443
  }
325
444
  if (toolChoice == null) {
@@ -443,6 +562,7 @@ var OpenAIChatLanguageModel = class {
443
562
  metadata: openaiOptions.metadata,
444
563
  prediction: openaiOptions.prediction,
445
564
  reasoning_effort: openaiOptions.reasoningEffort,
565
+ service_tier: openaiOptions.serviceTier,
446
566
  // messages:
447
567
  messages
448
568
  };
@@ -516,6 +636,14 @@ var OpenAIChatLanguageModel = class {
516
636
  });
517
637
  }
518
638
  }
639
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
640
+ warnings.push({
641
+ type: "unsupported-setting",
642
+ setting: "serviceTier",
643
+ details: "flex processing is only available for o3 and o4-mini models"
644
+ });
645
+ baseArgs.service_tier = void 0;
646
+ }
519
647
  const {
520
648
  tools: openaiTools2,
521
649
  toolChoice: openaiToolChoice,
@@ -564,10 +692,9 @@ var OpenAIChatLanguageModel = class {
564
692
  for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
565
693
  content.push({
566
694
  type: "tool-call",
567
- toolCallType: "function",
568
695
  toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
569
696
  toolName: toolCall.function.name,
570
- args: toolCall.function.arguments
697
+ input: toolCall.function.arguments
571
698
  });
572
699
  }
573
700
  const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
@@ -633,6 +760,7 @@ var OpenAIChatLanguageModel = class {
633
760
  totalTokens: void 0
634
761
  };
635
762
  let isFirstChunk = true;
763
+ let isActiveText = false;
636
764
  const providerMetadata = { openai: {} };
637
765
  return {
638
766
  stream: response.pipeThrough(
@@ -642,6 +770,9 @@ var OpenAIChatLanguageModel = class {
642
770
  },
643
771
  transform(chunk, controller) {
644
772
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
773
+ if (options.includeRawChunks) {
774
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
775
+ }
645
776
  if (!chunk.success) {
646
777
  finishReason = "error";
647
778
  controller.enqueue({ type: "error", error: chunk.error });
@@ -685,9 +816,14 @@ var OpenAIChatLanguageModel = class {
685
816
  }
686
817
  const delta = choice.delta;
687
818
  if (delta.content != null) {
819
+ if (!isActiveText) {
820
+ controller.enqueue({ type: "text-start", id: "0" });
821
+ isActiveText = true;
822
+ }
688
823
  controller.enqueue({
689
- type: "text",
690
- text: delta.content
824
+ type: "text-delta",
825
+ id: "0",
826
+ delta: delta.content
691
827
  });
692
828
  }
693
829
  if (delta.tool_calls != null) {
@@ -712,6 +848,11 @@ var OpenAIChatLanguageModel = class {
712
848
  message: `Expected 'function.name' to be a string.`
713
849
  });
714
850
  }
851
+ controller.enqueue({
852
+ type: "tool-input-start",
853
+ id: toolCallDelta.id,
854
+ toolName: toolCallDelta.function.name
855
+ });
715
856
  toolCalls[index] = {
716
857
  id: toolCallDelta.id,
717
858
  type: "function",
@@ -725,20 +866,21 @@ var OpenAIChatLanguageModel = class {
725
866
  if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
726
867
  if (toolCall2.function.arguments.length > 0) {
727
868
  controller.enqueue({
728
- type: "tool-call-delta",
729
- toolCallType: "function",
730
- toolCallId: toolCall2.id,
731
- toolName: toolCall2.function.name,
732
- argsTextDelta: toolCall2.function.arguments
869
+ type: "tool-input-delta",
870
+ id: toolCall2.id,
871
+ delta: toolCall2.function.arguments
733
872
  });
734
873
  }
735
874
  if (isParsableJson(toolCall2.function.arguments)) {
875
+ controller.enqueue({
876
+ type: "tool-input-end",
877
+ id: toolCall2.id
878
+ });
736
879
  controller.enqueue({
737
880
  type: "tool-call",
738
- toolCallType: "function",
739
881
  toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
740
882
  toolName: toolCall2.function.name,
741
- args: toolCall2.function.arguments
883
+ input: toolCall2.function.arguments
742
884
  });
743
885
  toolCall2.hasFinished = true;
744
886
  }
@@ -753,19 +895,20 @@ var OpenAIChatLanguageModel = class {
753
895
  toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
754
896
  }
755
897
  controller.enqueue({
756
- type: "tool-call-delta",
757
- toolCallType: "function",
758
- toolCallId: toolCall.id,
759
- toolName: toolCall.function.name,
760
- argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
898
+ type: "tool-input-delta",
899
+ id: toolCall.id,
900
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
761
901
  });
762
902
  if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
903
+ controller.enqueue({
904
+ type: "tool-input-end",
905
+ id: toolCall.id
906
+ });
763
907
  controller.enqueue({
764
908
  type: "tool-call",
765
- toolCallType: "function",
766
909
  toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
767
910
  toolName: toolCall.function.name,
768
- args: toolCall.function.arguments
911
+ input: toolCall.function.arguments
769
912
  });
770
913
  toolCall.hasFinished = true;
771
914
  }
@@ -773,6 +916,9 @@ var OpenAIChatLanguageModel = class {
773
916
  }
774
917
  },
775
918
  flush(controller) {
919
+ if (isActiveText) {
920
+ controller.enqueue({ type: "text-end", id: "0" });
921
+ }
776
922
  controller.enqueue({
777
923
  type: "finish",
778
924
  finishReason,
@@ -787,97 +933,97 @@ var OpenAIChatLanguageModel = class {
787
933
  };
788
934
  }
789
935
  };
790
- var openaiTokenUsageSchema = z3.object({
791
- prompt_tokens: z3.number().nullish(),
792
- completion_tokens: z3.number().nullish(),
793
- total_tokens: z3.number().nullish(),
794
- prompt_tokens_details: z3.object({
795
- cached_tokens: z3.number().nullish()
936
+ var openaiTokenUsageSchema = z5.object({
937
+ prompt_tokens: z5.number().nullish(),
938
+ completion_tokens: z5.number().nullish(),
939
+ total_tokens: z5.number().nullish(),
940
+ prompt_tokens_details: z5.object({
941
+ cached_tokens: z5.number().nullish()
796
942
  }).nullish(),
797
- completion_tokens_details: z3.object({
798
- reasoning_tokens: z3.number().nullish(),
799
- accepted_prediction_tokens: z3.number().nullish(),
800
- rejected_prediction_tokens: z3.number().nullish()
943
+ completion_tokens_details: z5.object({
944
+ reasoning_tokens: z5.number().nullish(),
945
+ accepted_prediction_tokens: z5.number().nullish(),
946
+ rejected_prediction_tokens: z5.number().nullish()
801
947
  }).nullish()
802
948
  }).nullish();
803
- var openaiChatResponseSchema = z3.object({
804
- id: z3.string().nullish(),
805
- created: z3.number().nullish(),
806
- model: z3.string().nullish(),
807
- choices: z3.array(
808
- z3.object({
809
- message: z3.object({
810
- role: z3.literal("assistant").nullish(),
811
- content: z3.string().nullish(),
812
- tool_calls: z3.array(
813
- z3.object({
814
- id: z3.string().nullish(),
815
- type: z3.literal("function"),
816
- function: z3.object({
817
- name: z3.string(),
818
- arguments: z3.string()
949
+ var openaiChatResponseSchema = z5.object({
950
+ id: z5.string().nullish(),
951
+ created: z5.number().nullish(),
952
+ model: z5.string().nullish(),
953
+ choices: z5.array(
954
+ z5.object({
955
+ message: z5.object({
956
+ role: z5.literal("assistant").nullish(),
957
+ content: z5.string().nullish(),
958
+ tool_calls: z5.array(
959
+ z5.object({
960
+ id: z5.string().nullish(),
961
+ type: z5.literal("function"),
962
+ function: z5.object({
963
+ name: z5.string(),
964
+ arguments: z5.string()
819
965
  })
820
966
  })
821
967
  ).nullish()
822
968
  }),
823
- index: z3.number(),
824
- logprobs: z3.object({
825
- content: z3.array(
826
- z3.object({
827
- token: z3.string(),
828
- logprob: z3.number(),
829
- top_logprobs: z3.array(
830
- z3.object({
831
- token: z3.string(),
832
- logprob: z3.number()
969
+ index: z5.number(),
970
+ logprobs: z5.object({
971
+ content: z5.array(
972
+ z5.object({
973
+ token: z5.string(),
974
+ logprob: z5.number(),
975
+ top_logprobs: z5.array(
976
+ z5.object({
977
+ token: z5.string(),
978
+ logprob: z5.number()
833
979
  })
834
980
  )
835
981
  })
836
982
  ).nullish()
837
983
  }).nullish(),
838
- finish_reason: z3.string().nullish()
984
+ finish_reason: z5.string().nullish()
839
985
  })
840
986
  ),
841
987
  usage: openaiTokenUsageSchema
842
988
  });
843
- var openaiChatChunkSchema = z3.union([
844
- z3.object({
845
- id: z3.string().nullish(),
846
- created: z3.number().nullish(),
847
- model: z3.string().nullish(),
848
- choices: z3.array(
849
- z3.object({
850
- delta: z3.object({
851
- role: z3.enum(["assistant"]).nullish(),
852
- content: z3.string().nullish(),
853
- tool_calls: z3.array(
854
- z3.object({
855
- index: z3.number(),
856
- id: z3.string().nullish(),
857
- type: z3.literal("function").nullish(),
858
- function: z3.object({
859
- name: z3.string().nullish(),
860
- arguments: z3.string().nullish()
989
+ var openaiChatChunkSchema = z5.union([
990
+ z5.object({
991
+ id: z5.string().nullish(),
992
+ created: z5.number().nullish(),
993
+ model: z5.string().nullish(),
994
+ choices: z5.array(
995
+ z5.object({
996
+ delta: z5.object({
997
+ role: z5.enum(["assistant"]).nullish(),
998
+ content: z5.string().nullish(),
999
+ tool_calls: z5.array(
1000
+ z5.object({
1001
+ index: z5.number(),
1002
+ id: z5.string().nullish(),
1003
+ type: z5.literal("function").nullish(),
1004
+ function: z5.object({
1005
+ name: z5.string().nullish(),
1006
+ arguments: z5.string().nullish()
861
1007
  })
862
1008
  })
863
1009
  ).nullish()
864
1010
  }).nullish(),
865
- logprobs: z3.object({
866
- content: z3.array(
867
- z3.object({
868
- token: z3.string(),
869
- logprob: z3.number(),
870
- top_logprobs: z3.array(
871
- z3.object({
872
- token: z3.string(),
873
- logprob: z3.number()
1011
+ logprobs: z5.object({
1012
+ content: z5.array(
1013
+ z5.object({
1014
+ token: z5.string(),
1015
+ logprob: z5.number(),
1016
+ top_logprobs: z5.array(
1017
+ z5.object({
1018
+ token: z5.string(),
1019
+ logprob: z5.number()
874
1020
  })
875
1021
  )
876
1022
  })
877
1023
  ).nullish()
878
1024
  }).nullish(),
879
- finish_reason: z3.string().nullish(),
880
- index: z3.number()
1025
+ finish_reason: z5.string().nullish(),
1026
+ index: z5.number()
881
1027
  })
882
1028
  ),
883
1029
  usage: openaiTokenUsageSchema
@@ -887,6 +1033,9 @@ var openaiChatChunkSchema = z3.union([
887
1033
  function isReasoningModel(modelId) {
888
1034
  return modelId.startsWith("o");
889
1035
  }
1036
+ function supportsFlexProcessing(modelId) {
1037
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1038
+ }
890
1039
  function getSystemMessageMode(modelId) {
891
1040
  var _a, _b;
892
1041
  if (!isReasoningModel(modelId)) {
@@ -935,7 +1084,7 @@ import {
935
1084
  parseProviderOptions as parseProviderOptions2,
936
1085
  postJsonToApi as postJsonToApi2
937
1086
  } from "@ai-sdk/provider-utils";
938
- import { z as z5 } from "zod";
1087
+ import { z as z7 } from "zod";
939
1088
 
940
1089
  // src/convert-to-openai-completion-prompt.ts
941
1090
  import {
@@ -1016,12 +1165,12 @@ ${user}:`]
1016
1165
  }
1017
1166
 
1018
1167
  // src/openai-completion-options.ts
1019
- import { z as z4 } from "zod";
1020
- var openaiCompletionProviderOptions = z4.object({
1168
+ import { z as z6 } from "zod";
1169
+ var openaiCompletionProviderOptions = z6.object({
1021
1170
  /**
1022
1171
  Echo back the prompt in addition to the completion.
1023
1172
  */
1024
- echo: z4.boolean().optional(),
1173
+ echo: z6.boolean().optional(),
1025
1174
  /**
1026
1175
  Modify the likelihood of specified tokens appearing in the completion.
1027
1176
 
@@ -1036,16 +1185,16 @@ var openaiCompletionProviderOptions = z4.object({
1036
1185
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1037
1186
  token from being generated.
1038
1187
  */
1039
- logitBias: z4.record(z4.string(), z4.number()).optional(),
1188
+ logitBias: z6.record(z6.string(), z6.number()).optional(),
1040
1189
  /**
1041
1190
  The suffix that comes after a completion of inserted text.
1042
1191
  */
1043
- suffix: z4.string().optional(),
1192
+ suffix: z6.string().optional(),
1044
1193
  /**
1045
1194
  A unique identifier representing your end-user, which can help OpenAI to
1046
1195
  monitor and detect abuse. Learn more.
1047
1196
  */
1048
- user: z4.string().optional(),
1197
+ user: z6.string().optional(),
1049
1198
  /**
1050
1199
  Return the log probabilities of the tokens. Including logprobs will increase
1051
1200
  the response size and can slow down response times. However, it can
@@ -1055,7 +1204,7 @@ var openaiCompletionProviderOptions = z4.object({
1055
1204
  Setting to a number will return the log probabilities of the top n
1056
1205
  tokens that were generated.
1057
1206
  */
1058
- logprobs: z4.union([z4.boolean(), z4.number()]).optional()
1207
+ logprobs: z6.union([z6.boolean(), z6.number()]).optional()
1059
1208
  });
1060
1209
 
1061
1210
  // src/openai-completion-language-model.ts
@@ -1227,6 +1376,9 @@ var OpenAICompletionLanguageModel = class {
1227
1376
  controller.enqueue({ type: "stream-start", warnings });
1228
1377
  },
1229
1378
  transform(chunk, controller) {
1379
+ if (options.includeRawChunks) {
1380
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1381
+ }
1230
1382
  if (!chunk.success) {
1231
1383
  finishReason = "error";
1232
1384
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1244,6 +1396,7 @@ var OpenAICompletionLanguageModel = class {
1244
1396
  type: "response-metadata",
1245
1397
  ...getResponseMetadata(value)
1246
1398
  });
1399
+ controller.enqueue({ type: "text-start", id: "0" });
1247
1400
  }
1248
1401
  if (value.usage != null) {
1249
1402
  usage.inputTokens = value.usage.prompt_tokens;
@@ -1257,14 +1410,18 @@ var OpenAICompletionLanguageModel = class {
1257
1410
  if ((choice == null ? void 0 : choice.logprobs) != null) {
1258
1411
  providerMetadata.openai.logprobs = choice.logprobs;
1259
1412
  }
1260
- if ((choice == null ? void 0 : choice.text) != null) {
1413
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1261
1414
  controller.enqueue({
1262
- type: "text",
1263
- text: choice.text
1415
+ type: "text-delta",
1416
+ id: "0",
1417
+ delta: choice.text
1264
1418
  });
1265
1419
  }
1266
1420
  },
1267
1421
  flush(controller) {
1422
+ if (!isFirstChunk) {
1423
+ controller.enqueue({ type: "text-end", id: "0" });
1424
+ }
1268
1425
  controller.enqueue({
1269
1426
  type: "finish",
1270
1427
  finishReason,
@@ -1279,42 +1436,42 @@ var OpenAICompletionLanguageModel = class {
1279
1436
  };
1280
1437
  }
1281
1438
  };
1282
- var usageSchema = z5.object({
1283
- prompt_tokens: z5.number(),
1284
- completion_tokens: z5.number(),
1285
- total_tokens: z5.number()
1439
+ var usageSchema = z7.object({
1440
+ prompt_tokens: z7.number(),
1441
+ completion_tokens: z7.number(),
1442
+ total_tokens: z7.number()
1286
1443
  });
1287
- var openaiCompletionResponseSchema = z5.object({
1288
- id: z5.string().nullish(),
1289
- created: z5.number().nullish(),
1290
- model: z5.string().nullish(),
1291
- choices: z5.array(
1292
- z5.object({
1293
- text: z5.string(),
1294
- finish_reason: z5.string(),
1295
- logprobs: z5.object({
1296
- tokens: z5.array(z5.string()),
1297
- token_logprobs: z5.array(z5.number()),
1298
- top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1444
+ var openaiCompletionResponseSchema = z7.object({
1445
+ id: z7.string().nullish(),
1446
+ created: z7.number().nullish(),
1447
+ model: z7.string().nullish(),
1448
+ choices: z7.array(
1449
+ z7.object({
1450
+ text: z7.string(),
1451
+ finish_reason: z7.string(),
1452
+ logprobs: z7.object({
1453
+ tokens: z7.array(z7.string()),
1454
+ token_logprobs: z7.array(z7.number()),
1455
+ top_logprobs: z7.array(z7.record(z7.string(), z7.number())).nullish()
1299
1456
  }).nullish()
1300
1457
  })
1301
1458
  ),
1302
1459
  usage: usageSchema.nullish()
1303
1460
  });
1304
- var openaiCompletionChunkSchema = z5.union([
1305
- z5.object({
1306
- id: z5.string().nullish(),
1307
- created: z5.number().nullish(),
1308
- model: z5.string().nullish(),
1309
- choices: z5.array(
1310
- z5.object({
1311
- text: z5.string(),
1312
- finish_reason: z5.string().nullish(),
1313
- index: z5.number(),
1314
- logprobs: z5.object({
1315
- tokens: z5.array(z5.string()),
1316
- token_logprobs: z5.array(z5.number()),
1317
- top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1461
+ var openaiCompletionChunkSchema = z7.union([
1462
+ z7.object({
1463
+ id: z7.string().nullish(),
1464
+ created: z7.number().nullish(),
1465
+ model: z7.string().nullish(),
1466
+ choices: z7.array(
1467
+ z7.object({
1468
+ text: z7.string(),
1469
+ finish_reason: z7.string().nullish(),
1470
+ index: z7.number(),
1471
+ logprobs: z7.object({
1472
+ tokens: z7.array(z7.string()),
1473
+ token_logprobs: z7.array(z7.number()),
1474
+ top_logprobs: z7.array(z7.record(z7.string(), z7.number())).nullish()
1318
1475
  }).nullish()
1319
1476
  })
1320
1477
  ),
@@ -1333,21 +1490,21 @@ import {
1333
1490
  parseProviderOptions as parseProviderOptions3,
1334
1491
  postJsonToApi as postJsonToApi3
1335
1492
  } from "@ai-sdk/provider-utils";
1336
- import { z as z7 } from "zod";
1493
+ import { z as z9 } from "zod";
1337
1494
 
1338
1495
  // src/openai-embedding-options.ts
1339
- import { z as z6 } from "zod";
1340
- var openaiEmbeddingProviderOptions = z6.object({
1496
+ import { z as z8 } from "zod";
1497
+ var openaiEmbeddingProviderOptions = z8.object({
1341
1498
  /**
1342
1499
  The number of dimensions the resulting output embeddings should have.
1343
1500
  Only supported in text-embedding-3 and later models.
1344
1501
  */
1345
- dimensions: z6.number().optional(),
1502
+ dimensions: z8.number().optional(),
1346
1503
  /**
1347
1504
  A unique identifier representing your end-user, which can help OpenAI to
1348
1505
  monitor and detect abuse. Learn more.
1349
1506
  */
1350
- user: z6.string().optional()
1507
+ user: z8.string().optional()
1351
1508
  });
1352
1509
 
1353
1510
  // src/openai-embedding-model.ts
@@ -1413,9 +1570,9 @@ var OpenAIEmbeddingModel = class {
1413
1570
  };
1414
1571
  }
1415
1572
  };
1416
- var openaiTextEmbeddingResponseSchema = z7.object({
1417
- data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1418
- usage: z7.object({ prompt_tokens: z7.number() }).nullish()
1573
+ var openaiTextEmbeddingResponseSchema = z9.object({
1574
+ data: z9.array(z9.object({ embedding: z9.array(z9.number()) })),
1575
+ usage: z9.object({ prompt_tokens: z9.number() }).nullish()
1419
1576
  });
1420
1577
 
1421
1578
  // src/openai-image-model.ts
@@ -1424,7 +1581,7 @@ import {
1424
1581
  createJsonResponseHandler as createJsonResponseHandler4,
1425
1582
  postJsonToApi as postJsonToApi4
1426
1583
  } from "@ai-sdk/provider-utils";
1427
- import { z as z8 } from "zod";
1584
+ import { z as z10 } from "zod";
1428
1585
 
1429
1586
  // src/openai-image-settings.ts
1430
1587
  var modelMaxImagesPerCall = {
@@ -1512,31 +1669,16 @@ var OpenAIImageModel = class {
1512
1669
  };
1513
1670
  }
1514
1671
  };
1515
- var openaiImageResponseSchema = z8.object({
1516
- data: z8.array(
1517
- z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
1672
+ var openaiImageResponseSchema = z10.object({
1673
+ data: z10.array(
1674
+ z10.object({ b64_json: z10.string(), revised_prompt: z10.string().optional() })
1518
1675
  )
1519
1676
  });
1520
1677
 
1521
1678
  // src/openai-tools.ts
1522
- import { z as z9 } from "zod";
1523
- var WebSearchPreviewParameters = z9.object({});
1524
- function webSearchPreviewTool({
1525
- searchContextSize,
1526
- userLocation
1527
- } = {}) {
1528
- return {
1529
- type: "provider-defined",
1530
- id: "openai.web_search_preview",
1531
- args: {
1532
- searchContextSize,
1533
- userLocation
1534
- },
1535
- parameters: WebSearchPreviewParameters
1536
- };
1537
- }
1538
1679
  var openaiTools = {
1539
- webSearchPreview: webSearchPreviewTool
1680
+ fileSearch,
1681
+ webSearchPreview
1540
1682
  };
1541
1683
 
1542
1684
  // src/openai-transcription-model.ts
@@ -1547,33 +1689,33 @@ import {
1547
1689
  parseProviderOptions as parseProviderOptions4,
1548
1690
  postFormDataToApi
1549
1691
  } from "@ai-sdk/provider-utils";
1550
- import { z as z11 } from "zod";
1692
+ import { z as z12 } from "zod";
1551
1693
 
1552
1694
  // src/openai-transcription-options.ts
1553
- import { z as z10 } from "zod";
1554
- var openAITranscriptionProviderOptions = z10.object({
1695
+ import { z as z11 } from "zod";
1696
+ var openAITranscriptionProviderOptions = z11.object({
1555
1697
  /**
1556
1698
  * Additional information to include in the transcription response.
1557
1699
  */
1558
- include: z10.array(z10.string()).optional(),
1700
+ include: z11.array(z11.string()).optional(),
1559
1701
  /**
1560
1702
  * The language of the input audio in ISO-639-1 format.
1561
1703
  */
1562
- language: z10.string().optional(),
1704
+ language: z11.string().optional(),
1563
1705
  /**
1564
1706
  * An optional text to guide the model's style or continue a previous audio segment.
1565
1707
  */
1566
- prompt: z10.string().optional(),
1708
+ prompt: z11.string().optional(),
1567
1709
  /**
1568
1710
  * The sampling temperature, between 0 and 1.
1569
1711
  * @default 0
1570
1712
  */
1571
- temperature: z10.number().min(0).max(1).default(0).optional(),
1713
+ temperature: z11.number().min(0).max(1).default(0).optional(),
1572
1714
  /**
1573
1715
  * The timestamp granularities to populate for this transcription.
1574
1716
  * @default ['segment']
1575
1717
  */
1576
- timestampGranularities: z10.array(z10.enum(["word", "segment"])).default(["segment"]).optional()
1718
+ timestampGranularities: z11.array(z11.enum(["word", "segment"])).default(["segment"]).optional()
1577
1719
  });
1578
1720
 
1579
1721
  // src/openai-transcription-model.ts
@@ -1640,7 +1782,7 @@ var OpenAITranscriptionModel = class {
1640
1782
  constructor(modelId, config) {
1641
1783
  this.modelId = modelId;
1642
1784
  this.config = config;
1643
- this.specificationVersion = "v1";
1785
+ this.specificationVersion = "v2";
1644
1786
  }
1645
1787
  get provider() {
1646
1788
  return this.config.provider;
@@ -1721,15 +1863,15 @@ var OpenAITranscriptionModel = class {
1721
1863
  };
1722
1864
  }
1723
1865
  };
1724
- var openaiTranscriptionResponseSchema = z11.object({
1725
- text: z11.string(),
1726
- language: z11.string().nullish(),
1727
- duration: z11.number().nullish(),
1728
- words: z11.array(
1729
- z11.object({
1730
- word: z11.string(),
1731
- start: z11.number(),
1732
- end: z11.number()
1866
+ var openaiTranscriptionResponseSchema = z12.object({
1867
+ text: z12.string(),
1868
+ language: z12.string().nullish(),
1869
+ duration: z12.number().nullish(),
1870
+ words: z12.array(
1871
+ z12.object({
1872
+ word: z12.string(),
1873
+ start: z12.number(),
1874
+ end: z12.number()
1733
1875
  })
1734
1876
  ).nullish()
1735
1877
  });
@@ -1743,7 +1885,7 @@ import {
1743
1885
  parseProviderOptions as parseProviderOptions5,
1744
1886
  postJsonToApi as postJsonToApi5
1745
1887
  } from "@ai-sdk/provider-utils";
1746
- import { z as z12 } from "zod";
1888
+ import { z as z13 } from "zod";
1747
1889
 
1748
1890
  // src/responses/convert-to-openai-responses-messages.ts
1749
1891
  import {
@@ -1834,11 +1976,21 @@ function convertToOpenAIResponsesMessages({
1834
1976
  break;
1835
1977
  }
1836
1978
  case "tool-call": {
1979
+ if (part.providerExecuted) {
1980
+ break;
1981
+ }
1837
1982
  messages.push({
1838
1983
  type: "function_call",
1839
1984
  call_id: part.toolCallId,
1840
1985
  name: part.toolName,
1841
- arguments: JSON.stringify(part.args)
1986
+ arguments: JSON.stringify(part.input)
1987
+ });
1988
+ break;
1989
+ }
1990
+ case "tool-result": {
1991
+ warnings.push({
1992
+ type: "other",
1993
+ message: `tool result parts in assistant messages are not supported for OpenAI responses`
1842
1994
  });
1843
1995
  break;
1844
1996
  }
@@ -1848,10 +2000,23 @@ function convertToOpenAIResponsesMessages({
1848
2000
  }
1849
2001
  case "tool": {
1850
2002
  for (const part of content) {
2003
+ const output = part.output;
2004
+ let contentValue;
2005
+ switch (output.type) {
2006
+ case "text":
2007
+ case "error-text":
2008
+ contentValue = output.value;
2009
+ break;
2010
+ case "content":
2011
+ case "json":
2012
+ case "error-json":
2013
+ contentValue = JSON.stringify(output.value);
2014
+ break;
2015
+ }
1851
2016
  messages.push({
1852
2017
  type: "function_call_output",
1853
2018
  call_id: part.toolCallId,
1854
- output: JSON.stringify(part.result)
2019
+ output: contentValue
1855
2020
  });
1856
2021
  }
1857
2022
  break;
@@ -1905,7 +2070,7 @@ function prepareResponsesTools({
1905
2070
  type: "function",
1906
2071
  name: tool.name,
1907
2072
  description: tool.description,
1908
- parameters: tool.parameters,
2073
+ parameters: tool.inputSchema,
1909
2074
  strict: strict ? true : void 0
1910
2075
  });
1911
2076
  break;
@@ -2039,6 +2204,7 @@ var OpenAIResponsesLanguageModel = class {
2039
2204
  store: openaiOptions == null ? void 0 : openaiOptions.store,
2040
2205
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2041
2206
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2207
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2042
2208
  // model-specific settings:
2043
2209
  ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2044
2210
  reasoning: {
@@ -2072,6 +2238,14 @@ var OpenAIResponsesLanguageModel = class {
2072
2238
  });
2073
2239
  }
2074
2240
  }
2241
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2242
+ warnings.push({
2243
+ type: "unsupported-setting",
2244
+ setting: "serviceTier",
2245
+ details: "flex processing is only available for o3 and o4-mini models"
2246
+ });
2247
+ delete baseArgs.service_tier;
2248
+ }
2075
2249
  const {
2076
2250
  tools: openaiTools2,
2077
2251
  toolChoice: openaiToolChoice,
@@ -2106,55 +2280,59 @@ var OpenAIResponsesLanguageModel = class {
2106
2280
  body,
2107
2281
  failedResponseHandler: openaiFailedResponseHandler,
2108
2282
  successfulResponseHandler: createJsonResponseHandler6(
2109
- z12.object({
2110
- id: z12.string(),
2111
- created_at: z12.number(),
2112
- model: z12.string(),
2113
- output: z12.array(
2114
- z12.discriminatedUnion("type", [
2115
- z12.object({
2116
- type: z12.literal("message"),
2117
- role: z12.literal("assistant"),
2118
- content: z12.array(
2119
- z12.object({
2120
- type: z12.literal("output_text"),
2121
- text: z12.string(),
2122
- annotations: z12.array(
2123
- z12.object({
2124
- type: z12.literal("url_citation"),
2125
- start_index: z12.number(),
2126
- end_index: z12.number(),
2127
- url: z12.string(),
2128
- title: z12.string()
2283
+ z13.object({
2284
+ id: z13.string(),
2285
+ created_at: z13.number(),
2286
+ model: z13.string(),
2287
+ output: z13.array(
2288
+ z13.discriminatedUnion("type", [
2289
+ z13.object({
2290
+ type: z13.literal("message"),
2291
+ role: z13.literal("assistant"),
2292
+ content: z13.array(
2293
+ z13.object({
2294
+ type: z13.literal("output_text"),
2295
+ text: z13.string(),
2296
+ annotations: z13.array(
2297
+ z13.object({
2298
+ type: z13.literal("url_citation"),
2299
+ start_index: z13.number(),
2300
+ end_index: z13.number(),
2301
+ url: z13.string(),
2302
+ title: z13.string()
2129
2303
  })
2130
2304
  )
2131
2305
  })
2132
2306
  )
2133
2307
  }),
2134
- z12.object({
2135
- type: z12.literal("function_call"),
2136
- call_id: z12.string(),
2137
- name: z12.string(),
2138
- arguments: z12.string()
2308
+ z13.object({
2309
+ type: z13.literal("function_call"),
2310
+ call_id: z13.string(),
2311
+ name: z13.string(),
2312
+ arguments: z13.string()
2139
2313
  }),
2140
- z12.object({
2141
- type: z12.literal("web_search_call")
2314
+ z13.object({
2315
+ type: z13.literal("web_search_call"),
2316
+ id: z13.string(),
2317
+ status: z13.string().optional()
2142
2318
  }),
2143
- z12.object({
2144
- type: z12.literal("computer_call")
2319
+ z13.object({
2320
+ type: z13.literal("computer_call"),
2321
+ id: z13.string(),
2322
+ status: z13.string().optional()
2145
2323
  }),
2146
- z12.object({
2147
- type: z12.literal("reasoning"),
2148
- summary: z12.array(
2149
- z12.object({
2150
- type: z12.literal("summary_text"),
2151
- text: z12.string()
2324
+ z13.object({
2325
+ type: z13.literal("reasoning"),
2326
+ summary: z13.array(
2327
+ z13.object({
2328
+ type: z13.literal("summary_text"),
2329
+ text: z13.string()
2152
2330
  })
2153
2331
  )
2154
2332
  })
2155
2333
  ])
2156
2334
  ),
2157
- incomplete_details: z12.object({ reason: z12.string() }).nullable(),
2335
+ incomplete_details: z13.object({ reason: z13.string() }).nullable(),
2158
2336
  usage: usageSchema2
2159
2337
  })
2160
2338
  ),
@@ -2192,10 +2370,46 @@ var OpenAIResponsesLanguageModel = class {
2192
2370
  case "function_call": {
2193
2371
  content.push({
2194
2372
  type: "tool-call",
2195
- toolCallType: "function",
2196
2373
  toolCallId: part.call_id,
2197
2374
  toolName: part.name,
2198
- args: part.arguments
2375
+ input: part.arguments
2376
+ });
2377
+ break;
2378
+ }
2379
+ case "web_search_call": {
2380
+ content.push({
2381
+ type: "tool-call",
2382
+ toolCallId: part.id,
2383
+ toolName: "web_search_preview",
2384
+ input: "",
2385
+ providerExecuted: true
2386
+ });
2387
+ content.push({
2388
+ type: "tool-result",
2389
+ toolCallId: part.id,
2390
+ toolName: "web_search_preview",
2391
+ result: { status: part.status || "completed" },
2392
+ providerExecuted: true
2393
+ });
2394
+ break;
2395
+ }
2396
+ case "computer_call": {
2397
+ content.push({
2398
+ type: "tool-call",
2399
+ toolCallId: part.id,
2400
+ toolName: "computer_use",
2401
+ input: "",
2402
+ providerExecuted: true
2403
+ });
2404
+ content.push({
2405
+ type: "tool-result",
2406
+ toolCallId: part.id,
2407
+ toolName: "computer_use",
2408
+ result: {
2409
+ type: "computer_use_tool_result",
2410
+ status: part.status || "completed"
2411
+ },
2412
+ providerExecuted: true
2199
2413
  });
2200
2414
  break;
2201
2415
  }
@@ -2267,6 +2481,9 @@ var OpenAIResponsesLanguageModel = class {
2267
2481
  },
2268
2482
  transform(chunk, controller) {
2269
2483
  var _a, _b, _c, _d, _e, _f, _g, _h;
2484
+ if (options.includeRawChunks) {
2485
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2486
+ }
2270
2487
  if (!chunk.success) {
2271
2488
  finishReason = "error";
2272
2489
  controller.enqueue({ type: "error", error: chunk.error });
@@ -2280,22 +2497,121 @@ var OpenAIResponsesLanguageModel = class {
2280
2497
  toolCallId: value.item.call_id
2281
2498
  };
2282
2499
  controller.enqueue({
2283
- type: "tool-call-delta",
2284
- toolCallType: "function",
2500
+ type: "tool-input-start",
2501
+ id: value.item.call_id,
2502
+ toolName: value.item.name
2503
+ });
2504
+ } else if (value.item.type === "web_search_call") {
2505
+ ongoingToolCalls[value.output_index] = {
2506
+ toolName: "web_search_preview",
2507
+ toolCallId: value.item.id
2508
+ };
2509
+ controller.enqueue({
2510
+ type: "tool-input-start",
2511
+ id: value.item.id,
2512
+ toolName: "web_search_preview"
2513
+ });
2514
+ } else if (value.item.type === "computer_call") {
2515
+ ongoingToolCalls[value.output_index] = {
2516
+ toolName: "computer_use",
2517
+ toolCallId: value.item.id
2518
+ };
2519
+ controller.enqueue({
2520
+ type: "tool-input-start",
2521
+ id: value.item.id,
2522
+ toolName: "computer_use"
2523
+ });
2524
+ } else if (value.item.type === "message") {
2525
+ controller.enqueue({
2526
+ type: "text-start",
2527
+ id: value.item.id
2528
+ });
2529
+ } else if (value.item.type === "reasoning") {
2530
+ controller.enqueue({
2531
+ type: "reasoning-start",
2532
+ id: value.item.id
2533
+ });
2534
+ }
2535
+ } else if (isResponseOutputItemDoneChunk(value)) {
2536
+ if (value.item.type === "function_call") {
2537
+ ongoingToolCalls[value.output_index] = void 0;
2538
+ hasToolCalls = true;
2539
+ controller.enqueue({
2540
+ type: "tool-input-end",
2541
+ id: value.item.call_id
2542
+ });
2543
+ controller.enqueue({
2544
+ type: "tool-call",
2285
2545
  toolCallId: value.item.call_id,
2286
2546
  toolName: value.item.name,
2287
- argsTextDelta: value.item.arguments
2547
+ input: value.item.arguments
2548
+ });
2549
+ } else if (value.item.type === "web_search_call") {
2550
+ ongoingToolCalls[value.output_index] = void 0;
2551
+ hasToolCalls = true;
2552
+ controller.enqueue({
2553
+ type: "tool-input-end",
2554
+ id: value.item.id
2555
+ });
2556
+ controller.enqueue({
2557
+ type: "tool-call",
2558
+ toolCallId: value.item.id,
2559
+ toolName: "web_search_preview",
2560
+ input: "",
2561
+ providerExecuted: true
2562
+ });
2563
+ controller.enqueue({
2564
+ type: "tool-result",
2565
+ toolCallId: value.item.id,
2566
+ toolName: "web_search_preview",
2567
+ result: {
2568
+ type: "web_search_tool_result",
2569
+ status: value.item.status || "completed"
2570
+ },
2571
+ providerExecuted: true
2572
+ });
2573
+ } else if (value.item.type === "computer_call") {
2574
+ ongoingToolCalls[value.output_index] = void 0;
2575
+ hasToolCalls = true;
2576
+ controller.enqueue({
2577
+ type: "tool-input-end",
2578
+ id: value.item.id
2579
+ });
2580
+ controller.enqueue({
2581
+ type: "tool-call",
2582
+ toolCallId: value.item.id,
2583
+ toolName: "computer_use",
2584
+ input: "",
2585
+ providerExecuted: true
2586
+ });
2587
+ controller.enqueue({
2588
+ type: "tool-result",
2589
+ toolCallId: value.item.id,
2590
+ toolName: "computer_use",
2591
+ result: {
2592
+ type: "computer_use_tool_result",
2593
+ status: value.item.status || "completed"
2594
+ },
2595
+ providerExecuted: true
2596
+ });
2597
+ } else if (value.item.type === "message") {
2598
+ controller.enqueue({
2599
+ type: "text-end",
2600
+ id: value.item.id
2601
+ });
2602
+ } else if (value.item.type === "reasoning") {
2603
+ controller.enqueue({
2604
+ type: "reasoning-end",
2605
+ id: value.item.id
2288
2606
  });
2289
2607
  }
2290
2608
  } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2291
2609
  const toolCall = ongoingToolCalls[value.output_index];
2292
2610
  if (toolCall != null) {
2293
2611
  controller.enqueue({
2294
- type: "tool-call-delta",
2295
- toolCallType: "function",
2296
- toolCallId: toolCall.toolCallId,
2297
- toolName: toolCall.toolName,
2298
- argsTextDelta: value.delta
2612
+ type: "tool-input-delta",
2613
+ id: toolCall.toolCallId,
2614
+ delta: value.delta
2299
2615
  });
2300
2616
  }
2301
2617
  } else if (isResponseCreatedChunk(value)) {
@@ -2308,23 +2624,15 @@ var OpenAIResponsesLanguageModel = class {
2308
2624
  });
2309
2625
  } else if (isTextDeltaChunk(value)) {
2310
2626
  controller.enqueue({
2311
- type: "text",
2312
- text: value.delta
2627
+ type: "text-delta",
2628
+ id: value.item_id,
2629
+ delta: value.delta
2313
2630
  });
2314
2631
  } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2315
2632
  controller.enqueue({
2316
- type: "reasoning",
2317
- text: value.delta
2318
- });
2319
- } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2320
- ongoingToolCalls[value.output_index] = void 0;
2321
- hasToolCalls = true;
2322
- controller.enqueue({
2323
- type: "tool-call",
2324
- toolCallType: "function",
2325
- toolCallId: value.item.call_id,
2326
- toolName: value.item.name,
2327
- args: value.item.arguments
2633
+ type: "reasoning-delta",
2634
+ delta: value.delta,
2635
+ id: value.item_id
2328
2636
  });
2329
2637
  } else if (isResponseFinishedChunk(value)) {
2330
2638
  finishReason = mapOpenAIResponseFinishReason({
@@ -2365,95 +2673,134 @@ var OpenAIResponsesLanguageModel = class {
2365
2673
  };
2366
2674
  }
2367
2675
  };
2368
- var usageSchema2 = z12.object({
2369
- input_tokens: z12.number(),
2370
- input_tokens_details: z12.object({ cached_tokens: z12.number().nullish() }).nullish(),
2371
- output_tokens: z12.number(),
2372
- output_tokens_details: z12.object({ reasoning_tokens: z12.number().nullish() }).nullish()
2676
+ var usageSchema2 = z13.object({
2677
+ input_tokens: z13.number(),
2678
+ input_tokens_details: z13.object({ cached_tokens: z13.number().nullish() }).nullish(),
2679
+ output_tokens: z13.number(),
2680
+ output_tokens_details: z13.object({ reasoning_tokens: z13.number().nullish() }).nullish()
2373
2681
  });
2374
- var textDeltaChunkSchema = z12.object({
2375
- type: z12.literal("response.output_text.delta"),
2376
- delta: z12.string()
2682
+ var textDeltaChunkSchema = z13.object({
2683
+ type: z13.literal("response.output_text.delta"),
2684
+ item_id: z13.string(),
2685
+ delta: z13.string()
2377
2686
  });
2378
- var responseFinishedChunkSchema = z12.object({
2379
- type: z12.enum(["response.completed", "response.incomplete"]),
2380
- response: z12.object({
2381
- incomplete_details: z12.object({ reason: z12.string() }).nullish(),
2687
+ var responseFinishedChunkSchema = z13.object({
2688
+ type: z13.enum(["response.completed", "response.incomplete"]),
2689
+ response: z13.object({
2690
+ incomplete_details: z13.object({ reason: z13.string() }).nullish(),
2382
2691
  usage: usageSchema2
2383
2692
  })
2384
2693
  });
2385
- var responseCreatedChunkSchema = z12.object({
2386
- type: z12.literal("response.created"),
2387
- response: z12.object({
2388
- id: z12.string(),
2389
- created_at: z12.number(),
2390
- model: z12.string()
2694
+ var responseCreatedChunkSchema = z13.object({
2695
+ type: z13.literal("response.created"),
2696
+ response: z13.object({
2697
+ id: z13.string(),
2698
+ created_at: z13.number(),
2699
+ model: z13.string()
2391
2700
  })
2392
2701
  });
2393
- var responseOutputItemDoneSchema = z12.object({
2394
- type: z12.literal("response.output_item.done"),
2395
- output_index: z12.number(),
2396
- item: z12.discriminatedUnion("type", [
2397
- z12.object({
2398
- type: z12.literal("message")
2702
+ var responseOutputItemAddedSchema = z13.object({
2703
+ type: z13.literal("response.output_item.added"),
2704
+ output_index: z13.number(),
2705
+ item: z13.discriminatedUnion("type", [
2706
+ z13.object({
2707
+ type: z13.literal("message"),
2708
+ id: z13.string()
2399
2709
  }),
2400
- z12.object({
2401
- type: z12.literal("function_call"),
2402
- id: z12.string(),
2403
- call_id: z12.string(),
2404
- name: z12.string(),
2405
- arguments: z12.string(),
2406
- status: z12.literal("completed")
2710
+ z13.object({
2711
+ type: z13.literal("reasoning"),
2712
+ id: z13.string()
2713
+ }),
2714
+ z13.object({
2715
+ type: z13.literal("function_call"),
2716
+ id: z13.string(),
2717
+ call_id: z13.string(),
2718
+ name: z13.string(),
2719
+ arguments: z13.string()
2720
+ }),
2721
+ z13.object({
2722
+ type: z13.literal("web_search_call"),
2723
+ id: z13.string(),
2724
+ status: z13.string()
2725
+ }),
2726
+ z13.object({
2727
+ type: z13.literal("computer_call"),
2728
+ id: z13.string(),
2729
+ status: z13.string()
2407
2730
  })
2408
2731
  ])
2409
2732
  });
2410
- var responseFunctionCallArgumentsDeltaSchema = z12.object({
2411
- type: z12.literal("response.function_call_arguments.delta"),
2412
- item_id: z12.string(),
2413
- output_index: z12.number(),
2414
- delta: z12.string()
2415
- });
2416
- var responseOutputItemAddedSchema = z12.object({
2417
- type: z12.literal("response.output_item.added"),
2418
- output_index: z12.number(),
2419
- item: z12.discriminatedUnion("type", [
2420
- z12.object({
2421
- type: z12.literal("message")
2733
+ var responseOutputItemDoneSchema = z13.object({
2734
+ type: z13.literal("response.output_item.done"),
2735
+ output_index: z13.number(),
2736
+ item: z13.discriminatedUnion("type", [
2737
+ z13.object({
2738
+ type: z13.literal("message"),
2739
+ id: z13.string()
2422
2740
  }),
2423
- z12.object({
2424
- type: z12.literal("function_call"),
2425
- id: z12.string(),
2426
- call_id: z12.string(),
2427
- name: z12.string(),
2428
- arguments: z12.string()
2741
+ z13.object({
2742
+ type: z13.literal("reasoning"),
2743
+ id: z13.string()
2744
+ }),
2745
+ z13.object({
2746
+ type: z13.literal("function_call"),
2747
+ id: z13.string(),
2748
+ call_id: z13.string(),
2749
+ name: z13.string(),
2750
+ arguments: z13.string(),
2751
+ status: z13.literal("completed")
2752
+ }),
2753
+ z13.object({
2754
+ type: z13.literal("web_search_call"),
2755
+ id: z13.string(),
2756
+ status: z13.literal("completed")
2757
+ }),
2758
+ z13.object({
2759
+ type: z13.literal("computer_call"),
2760
+ id: z13.string(),
2761
+ status: z13.literal("completed")
2429
2762
  })
2430
2763
  ])
2431
2764
  });
2432
- var responseAnnotationAddedSchema = z12.object({
2433
- type: z12.literal("response.output_text.annotation.added"),
2434
- annotation: z12.object({
2435
- type: z12.literal("url_citation"),
2436
- url: z12.string(),
2437
- title: z12.string()
2765
+ var responseFunctionCallArgumentsDeltaSchema = z13.object({
2766
+ type: z13.literal("response.function_call_arguments.delta"),
2767
+ item_id: z13.string(),
2768
+ output_index: z13.number(),
2769
+ delta: z13.string()
2770
+ });
2771
+ var responseAnnotationAddedSchema = z13.object({
2772
+ type: z13.literal("response.output_text.annotation.added"),
2773
+ annotation: z13.object({
2774
+ type: z13.literal("url_citation"),
2775
+ url: z13.string(),
2776
+ title: z13.string()
2438
2777
  })
2439
2778
  });
2440
- var responseReasoningSummaryTextDeltaSchema = z12.object({
2441
- type: z12.literal("response.reasoning_summary_text.delta"),
2442
- item_id: z12.string(),
2443
- output_index: z12.number(),
2444
- summary_index: z12.number(),
2445
- delta: z12.string()
2779
+ var responseReasoningSummaryTextDeltaSchema = z13.object({
2780
+ type: z13.literal("response.reasoning_summary_text.delta"),
2781
+ item_id: z13.string(),
2782
+ output_index: z13.number(),
2783
+ summary_index: z13.number(),
2784
+ delta: z13.string()
2446
2785
  });
2447
- var openaiResponsesChunkSchema = z12.union([
2786
+ var responseReasoningSummaryPartDoneSchema = z13.object({
2787
+ type: z13.literal("response.reasoning_summary_part.done"),
2788
+ item_id: z13.string(),
2789
+ output_index: z13.number(),
2790
+ summary_index: z13.number(),
2791
+ part: z13.unknown().nullish()
2792
+ });
2793
+ var openaiResponsesChunkSchema = z13.union([
2448
2794
  textDeltaChunkSchema,
2449
2795
  responseFinishedChunkSchema,
2450
2796
  responseCreatedChunkSchema,
2797
+ responseOutputItemAddedSchema,
2451
2798
  responseOutputItemDoneSchema,
2452
2799
  responseFunctionCallArgumentsDeltaSchema,
2453
- responseOutputItemAddedSchema,
2454
2800
  responseAnnotationAddedSchema,
2455
2801
  responseReasoningSummaryTextDeltaSchema,
2456
- z12.object({ type: z12.string() }).passthrough()
2802
+ responseReasoningSummaryPartDoneSchema,
2803
+ z13.object({ type: z13.string() }).passthrough()
2457
2804
  // fallback for unknown chunks
2458
2805
  ]);
2459
2806
  function isTextDeltaChunk(chunk) {
@@ -2501,16 +2848,20 @@ function getResponsesModelConfig(modelId) {
2501
2848
  requiredAutoTruncation: false
2502
2849
  };
2503
2850
  }
2504
- var openaiResponsesProviderOptionsSchema = z12.object({
2505
- metadata: z12.any().nullish(),
2506
- parallelToolCalls: z12.boolean().nullish(),
2507
- previousResponseId: z12.string().nullish(),
2508
- store: z12.boolean().nullish(),
2509
- user: z12.string().nullish(),
2510
- reasoningEffort: z12.string().nullish(),
2511
- strictSchemas: z12.boolean().nullish(),
2512
- instructions: z12.string().nullish(),
2513
- reasoningSummary: z12.string().nullish()
2851
+ function supportsFlexProcessing2(modelId) {
2852
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
2853
+ }
2854
+ var openaiResponsesProviderOptionsSchema = z13.object({
2855
+ metadata: z13.any().nullish(),
2856
+ parallelToolCalls: z13.boolean().nullish(),
2857
+ previousResponseId: z13.string().nullish(),
2858
+ store: z13.boolean().nullish(),
2859
+ user: z13.string().nullish(),
2860
+ reasoningEffort: z13.string().nullish(),
2861
+ strictSchemas: z13.boolean().nullish(),
2862
+ instructions: z13.string().nullish(),
2863
+ reasoningSummary: z13.string().nullish(),
2864
+ serviceTier: z13.enum(["auto", "flex"]).nullish()
2514
2865
  });
2515
2866
 
2516
2867
  // src/openai-speech-model.ts
@@ -2520,16 +2871,16 @@ import {
2520
2871
  parseProviderOptions as parseProviderOptions6,
2521
2872
  postJsonToApi as postJsonToApi6
2522
2873
  } from "@ai-sdk/provider-utils";
2523
- import { z as z13 } from "zod";
2524
- var OpenAIProviderOptionsSchema = z13.object({
2525
- instructions: z13.string().nullish(),
2526
- speed: z13.number().min(0.25).max(4).default(1).nullish()
2874
+ import { z as z14 } from "zod";
2875
+ var OpenAIProviderOptionsSchema = z14.object({
2876
+ instructions: z14.string().nullish(),
2877
+ speed: z14.number().min(0.25).max(4).default(1).nullish()
2527
2878
  });
2528
2879
  var OpenAISpeechModel = class {
2529
2880
  constructor(modelId, config) {
2530
2881
  this.modelId = modelId;
2531
2882
  this.config = config;
2532
- this.specificationVersion = "v1";
2883
+ this.specificationVersion = "v2";
2533
2884
  }
2534
2885
  get provider() {
2535
2886
  return this.config.provider;
@@ -2540,6 +2891,7 @@ var OpenAISpeechModel = class {
2540
2891
  outputFormat = "mp3",
2541
2892
  speed,
2542
2893
  instructions,
2894
+ language,
2543
2895
  providerOptions
2544
2896
  }) {
2545
2897
  const warnings = [];
@@ -2576,6 +2928,13 @@ var OpenAISpeechModel = class {
2576
2928
  }
2577
2929
  }
2578
2930
  }
2931
+ if (language) {
2932
+ warnings.push({
2933
+ type: "unsupported-setting",
2934
+ setting: "language",
2935
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
2936
+ });
2937
+ }
2579
2938
  return {
2580
2939
  requestBody,
2581
2940
  warnings
@@ -2674,10 +3033,7 @@ function createOpenAI(options = {}) {
2674
3033
  "The OpenAI model function cannot be called with the new keyword."
2675
3034
  );
2676
3035
  }
2677
- if (modelId === "gpt-3.5-turbo-instruct") {
2678
- return createCompletionModel(modelId);
2679
- }
2680
- return createChatModel(modelId);
3036
+ return createResponsesModel(modelId);
2681
3037
  };
2682
3038
  const createResponsesModel = (modelId) => {
2683
3039
  return new OpenAIResponsesLanguageModel(modelId, {